1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_cycles.h>
12 #include <rte_table_acl.h>
13 #include <rte_table_array.h>
14 #include <rte_table_hash.h>
15 #include <rte_table_lpm.h>
16 #include <rte_table_lpm_ipv6.h>
22 #ifndef THREAD_PIPELINES_MAX
23 #define THREAD_PIPELINES_MAX 256
26 #ifndef THREAD_MSGQ_SIZE
27 #define THREAD_MSGQ_SIZE 64
30 #ifndef THREAD_TIMER_PERIOD_MS
31 #define THREAD_TIMER_PERIOD_MS 100
35 * Master thead: data plane thread context
38 struct rte_ring *msgq_req;
39 struct rte_ring *msgq_rsp;
44 static struct thread thread[RTE_MAX_LCORE];
47 * Data plane threads: context
50 struct rte_table_action *a;
53 struct pipeline_data {
54 struct rte_pipeline *p;
55 struct table_data table_data[RTE_PIPELINE_TABLE_MAX];
58 struct rte_ring *msgq_req;
59 struct rte_ring *msgq_rsp;
60 uint64_t timer_period; /* Measured in CPU cycles. */
63 uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
67 struct rte_pipeline *p[THREAD_PIPELINES_MAX];
70 struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
71 struct rte_ring *msgq_req;
72 struct rte_ring *msgq_rsp;
73 uint64_t timer_period; /* Measured in CPU cycles. */
75 uint64_t time_next_min;
76 } __rte_cache_aligned;
78 static struct thread_data thread_data[RTE_MAX_LCORE];
81 * Master thread: data plane thread init
88 for (i = 0; i < RTE_MAX_LCORE; i++) {
89 struct thread *t = &thread[i];
91 if (!rte_lcore_is_enabled(i))
96 rte_ring_free(t->msgq_req);
99 rte_ring_free(t->msgq_rsp);
108 RTE_LCORE_FOREACH_SLAVE(i) {
110 struct rte_ring *msgq_req, *msgq_rsp;
111 struct thread *t = &thread[i];
112 struct thread_data *t_data = &thread_data[i];
113 uint32_t cpu_id = rte_lcore_to_socket_id(i);
116 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-REQ", i);
118 msgq_req = rte_ring_create(name,
121 RING_F_SP_ENQ | RING_F_SC_DEQ);
123 if (msgq_req == NULL) {
128 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-RSP", i);
130 msgq_rsp = rte_ring_create(name,
133 RING_F_SP_ENQ | RING_F_SC_DEQ);
135 if (msgq_rsp == NULL) {
140 /* Master thread records */
141 t->msgq_req = msgq_req;
142 t->msgq_rsp = msgq_rsp;
145 /* Data plane thread records */
146 t_data->n_pipelines = 0;
147 t_data->msgq_req = msgq_req;
148 t_data->msgq_rsp = msgq_rsp;
149 t_data->timer_period =
150 (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
151 t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
152 t_data->time_next_min = t_data->time_next;
159 * Master thread & data plane threads: message passing
161 enum thread_req_type {
162 THREAD_REQ_PIPELINE_ENABLE = 0,
163 THREAD_REQ_PIPELINE_DISABLE,
167 struct thread_msg_req {
168 enum thread_req_type type;
172 struct rte_pipeline *p;
174 struct rte_table_action *a;
175 } table[RTE_PIPELINE_TABLE_MAX];
176 struct rte_ring *msgq_req;
177 struct rte_ring *msgq_rsp;
178 uint32_t timer_period_ms;
183 struct rte_pipeline *p;
188 struct thread_msg_rsp {
195 static struct thread_msg_req *
196 thread_msg_alloc(void)
198 size_t size = RTE_MAX(sizeof(struct thread_msg_req),
199 sizeof(struct thread_msg_rsp));
201 return calloc(1, size);
205 thread_msg_free(struct thread_msg_rsp *rsp)
210 static struct thread_msg_rsp *
211 thread_msg_send_recv(uint32_t thread_id,
212 struct thread_msg_req *req)
214 struct thread *t = &thread[thread_id];
215 struct rte_ring *msgq_req = t->msgq_req;
216 struct rte_ring *msgq_rsp = t->msgq_rsp;
217 struct thread_msg_rsp *rsp;
222 status = rte_ring_sp_enqueue(msgq_req, req);
223 } while (status == -ENOBUFS);
227 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
228 } while (status != 0);
234 thread_pipeline_enable(uint32_t thread_id,
235 const char *pipeline_name)
237 struct pipeline *p = pipeline_find(pipeline_name);
239 struct thread_msg_req *req;
240 struct thread_msg_rsp *rsp;
244 /* Check input params */
245 if ((thread_id >= RTE_MAX_LCORE) ||
247 (p->n_ports_in == 0) ||
248 (p->n_ports_out == 0) ||
252 t = &thread[thread_id];
253 if ((t->enabled == 0) ||
257 /* Allocate request */
258 req = thread_msg_alloc();
263 req->type = THREAD_REQ_PIPELINE_ENABLE;
264 req->pipeline_enable.p = p->p;
265 for (i = 0; i < p->n_tables; i++)
266 req->pipeline_enable.table[i].a =
268 req->pipeline_enable.msgq_req = p->msgq_req;
269 req->pipeline_enable.msgq_rsp = p->msgq_rsp;
270 req->pipeline_enable.timer_period_ms = p->timer_period_ms;
271 req->pipeline_enable.n_tables = p->n_tables;
273 /* Send request and wait for response */
274 rsp = thread_msg_send_recv(thread_id, req);
279 status = rsp->status;
282 thread_msg_free(rsp);
284 /* Request completion */
288 p->thread_id = thread_id;
295 thread_pipeline_disable(uint32_t thread_id,
296 const char *pipeline_name)
298 struct pipeline *p = pipeline_find(pipeline_name);
300 struct thread_msg_req *req;
301 struct thread_msg_rsp *rsp;
304 /* Check input params */
305 if ((thread_id >= RTE_MAX_LCORE) ||
309 t = &thread[thread_id];
316 if (p->thread_id != thread_id)
319 /* Allocate request */
320 req = thread_msg_alloc();
325 req->type = THREAD_REQ_PIPELINE_DISABLE;
326 req->pipeline_disable.p = p->p;
328 /* Send request and wait for response */
329 rsp = thread_msg_send_recv(thread_id, req);
334 status = rsp->status;
337 thread_msg_free(rsp);
339 /* Request completion */
349 * Data plane threads: message handling
351 static inline struct thread_msg_req *
352 thread_msg_recv(struct rte_ring *msgq_req)
354 struct thread_msg_req *req;
356 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
365 thread_msg_send(struct rte_ring *msgq_rsp,
366 struct thread_msg_rsp *rsp)
371 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
372 } while (status == -ENOBUFS);
375 static struct thread_msg_rsp *
376 thread_msg_handle_pipeline_enable(struct thread_data *t,
377 struct thread_msg_req *req)
379 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
380 struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
384 if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
389 t->p[t->n_pipelines] = req->pipeline_enable.p;
391 p->p = req->pipeline_enable.p;
392 for (i = 0; i < req->pipeline_enable.n_tables; i++)
394 req->pipeline_enable.table[i].a;
396 p->n_tables = req->pipeline_enable.n_tables;
398 p->msgq_req = req->pipeline_enable.msgq_req;
399 p->msgq_rsp = req->pipeline_enable.msgq_rsp;
401 (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
402 p->time_next = rte_get_tsc_cycles() + p->timer_period;
411 static struct thread_msg_rsp *
412 thread_msg_handle_pipeline_disable(struct thread_data *t,
413 struct thread_msg_req *req)
415 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
416 uint32_t n_pipelines = t->n_pipelines;
417 struct rte_pipeline *pipeline = req->pipeline_disable.p;
421 for (i = 0; i < n_pipelines; i++) {
422 struct pipeline_data *p = &t->pipeline_data[i];
424 if (p->p != pipeline)
427 if (i < n_pipelines - 1) {
428 struct rte_pipeline *pipeline_last =
429 t->p[n_pipelines - 1];
430 struct pipeline_data *p_last =
431 &t->pipeline_data[n_pipelines - 1];
433 t->p[i] = pipeline_last;
434 memcpy(p, p_last, sizeof(*p));
443 /* should not get here */
449 thread_msg_handle(struct thread_data *t)
452 struct thread_msg_req *req;
453 struct thread_msg_rsp *rsp;
455 req = thread_msg_recv(t->msgq_req);
460 case THREAD_REQ_PIPELINE_ENABLE:
461 rsp = thread_msg_handle_pipeline_enable(t, req);
464 case THREAD_REQ_PIPELINE_DISABLE:
465 rsp = thread_msg_handle_pipeline_disable(t, req);
469 rsp = (struct thread_msg_rsp *) req;
473 thread_msg_send(t->msgq_rsp, rsp);
478 * Master thread & data plane threads: message passing
480 enum pipeline_req_type {
482 PIPELINE_REQ_PORT_IN_STATS_READ,
483 PIPELINE_REQ_PORT_IN_ENABLE,
484 PIPELINE_REQ_PORT_IN_DISABLE,
487 PIPELINE_REQ_PORT_OUT_STATS_READ,
490 PIPELINE_REQ_TABLE_STATS_READ,
491 PIPELINE_REQ_TABLE_RULE_ADD,
492 PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT,
493 PIPELINE_REQ_TABLE_RULE_DELETE,
494 PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT,
499 struct pipeline_msg_req_port_in_stats_read {
503 struct pipeline_msg_req_port_out_stats_read {
507 struct pipeline_msg_req_table_stats_read {
511 struct pipeline_msg_req_table_rule_add {
512 struct table_rule_match match;
513 struct table_rule_action action;
516 struct pipeline_msg_req_table_rule_add_default {
517 struct table_rule_action action;
520 struct pipeline_msg_req_table_rule_delete {
521 struct table_rule_match match;
524 struct pipeline_msg_req {
525 enum pipeline_req_type type;
526 uint32_t id; /* Port IN, port OUT or table ID */
530 struct pipeline_msg_req_port_in_stats_read port_in_stats_read;
531 struct pipeline_msg_req_port_out_stats_read port_out_stats_read;
532 struct pipeline_msg_req_table_stats_read table_stats_read;
533 struct pipeline_msg_req_table_rule_add table_rule_add;
534 struct pipeline_msg_req_table_rule_add_default table_rule_add_default;
535 struct pipeline_msg_req_table_rule_delete table_rule_delete;
539 struct pipeline_msg_rsp_port_in_stats_read {
540 struct rte_pipeline_port_in_stats stats;
543 struct pipeline_msg_rsp_port_out_stats_read {
544 struct rte_pipeline_port_out_stats stats;
547 struct pipeline_msg_rsp_table_stats_read {
548 struct rte_pipeline_table_stats stats;
551 struct pipeline_msg_rsp_table_rule_add {
555 struct pipeline_msg_rsp_table_rule_add_default {
559 struct pipeline_msg_rsp {
564 struct pipeline_msg_rsp_port_in_stats_read port_in_stats_read;
565 struct pipeline_msg_rsp_port_out_stats_read port_out_stats_read;
566 struct pipeline_msg_rsp_table_stats_read table_stats_read;
567 struct pipeline_msg_rsp_table_rule_add table_rule_add;
568 struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default;
575 static struct pipeline_msg_req *
576 pipeline_msg_alloc(void)
578 size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
579 sizeof(struct pipeline_msg_rsp));
581 return calloc(1, size);
585 pipeline_msg_free(struct pipeline_msg_rsp *rsp)
590 static struct pipeline_msg_rsp *
591 pipeline_msg_send_recv(struct pipeline *p,
592 struct pipeline_msg_req *req)
594 struct rte_ring *msgq_req = p->msgq_req;
595 struct rte_ring *msgq_rsp = p->msgq_rsp;
596 struct pipeline_msg_rsp *rsp;
601 status = rte_ring_sp_enqueue(msgq_req, req);
602 } while (status == -ENOBUFS);
606 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
607 } while (status != 0);
613 pipeline_port_in_stats_read(const char *pipeline_name,
615 struct rte_pipeline_port_in_stats *stats,
619 struct pipeline_msg_req *req;
620 struct pipeline_msg_rsp *rsp;
623 /* Check input params */
624 if ((pipeline_name == NULL) ||
628 p = pipeline_find(pipeline_name);
631 (port_id >= p->n_ports_in))
634 /* Allocate request */
635 req = pipeline_msg_alloc();
640 req->type = PIPELINE_REQ_PORT_IN_STATS_READ;
642 req->port_in_stats_read.clear = clear;
644 /* Send request and wait for response */
645 rsp = pipeline_msg_send_recv(p, req);
650 status = rsp->status;
652 memcpy(stats, &rsp->port_in_stats_read.stats, sizeof(*stats));
655 pipeline_msg_free(rsp);
661 pipeline_port_in_enable(const char *pipeline_name,
665 struct pipeline_msg_req *req;
666 struct pipeline_msg_rsp *rsp;
669 /* Check input params */
670 if (pipeline_name == NULL)
673 p = pipeline_find(pipeline_name);
676 (port_id >= p->n_ports_in))
679 /* Allocate request */
680 req = pipeline_msg_alloc();
685 req->type = PIPELINE_REQ_PORT_IN_ENABLE;
688 /* Send request and wait for response */
689 rsp = pipeline_msg_send_recv(p, req);
694 status = rsp->status;
697 pipeline_msg_free(rsp);
703 pipeline_port_in_disable(const char *pipeline_name,
707 struct pipeline_msg_req *req;
708 struct pipeline_msg_rsp *rsp;
711 /* Check input params */
712 if (pipeline_name == NULL)
715 p = pipeline_find(pipeline_name);
718 (port_id >= p->n_ports_in))
721 /* Allocate request */
722 req = pipeline_msg_alloc();
727 req->type = PIPELINE_REQ_PORT_IN_DISABLE;
730 /* Send request and wait for response */
731 rsp = pipeline_msg_send_recv(p, req);
736 status = rsp->status;
739 pipeline_msg_free(rsp);
745 pipeline_port_out_stats_read(const char *pipeline_name,
747 struct rte_pipeline_port_out_stats *stats,
751 struct pipeline_msg_req *req;
752 struct pipeline_msg_rsp *rsp;
755 /* Check input params */
756 if ((pipeline_name == NULL) ||
760 p = pipeline_find(pipeline_name);
763 (port_id >= p->n_ports_out))
766 /* Allocate request */
767 req = pipeline_msg_alloc();
772 req->type = PIPELINE_REQ_PORT_OUT_STATS_READ;
774 req->port_out_stats_read.clear = clear;
776 /* Send request and wait for response */
777 rsp = pipeline_msg_send_recv(p, req);
782 status = rsp->status;
784 memcpy(stats, &rsp->port_out_stats_read.stats, sizeof(*stats));
787 pipeline_msg_free(rsp);
793 pipeline_table_stats_read(const char *pipeline_name,
795 struct rte_pipeline_table_stats *stats,
799 struct pipeline_msg_req *req;
800 struct pipeline_msg_rsp *rsp;
803 /* Check input params */
804 if ((pipeline_name == NULL) ||
808 p = pipeline_find(pipeline_name);
811 (table_id >= p->n_tables))
814 /* Allocate request */
815 req = pipeline_msg_alloc();
820 req->type = PIPELINE_REQ_TABLE_STATS_READ;
822 req->table_stats_read.clear = clear;
824 /* Send request and wait for response */
825 rsp = pipeline_msg_send_recv(p, req);
830 status = rsp->status;
832 memcpy(stats, &rsp->table_stats_read.stats, sizeof(*stats));
835 pipeline_msg_free(rsp);
841 match_check(struct table_rule_match *match,
847 if ((match == NULL) ||
849 (table_id >= p->n_tables))
852 table = &p->table[table_id];
853 if (match->match_type != table->params.match_type)
856 switch (match->match_type) {
859 struct table_acl_params *t = &table->params.match.acl;
860 struct table_rule_match_acl *r = &match->match.acl;
862 if ((r->ip_version && (t->ip_version == 0)) ||
863 ((r->ip_version == 0) && t->ip_version))
867 if ((r->sa_depth > 32) ||
871 if ((r->sa_depth > 128) ||
886 struct table_lpm_params *t = &table->params.match.lpm;
887 struct table_rule_match_lpm *r = &match->match.lpm;
889 if ((r->ip_version && (t->key_size != 4)) ||
890 ((r->ip_version == 0) && (t->key_size != 16)))
912 action_check(struct table_rule_action *action,
916 struct table_action_profile *ap;
918 if ((action == NULL) ||
920 (table_id >= p->n_tables))
923 ap = p->table[table_id].ap;
924 if (action->action_mask != ap->params.action_mask)
927 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
928 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
929 (action->fwd.id >= p->n_ports_out))
932 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
933 (action->fwd.id >= p->n_tables))
937 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
938 uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1;
939 uint32_t tc_mask1 = action->mtr.tc_mask;
941 if (tc_mask1 != tc_mask0)
945 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
946 uint32_t n_subports_per_port =
947 ap->params.tm.n_subports_per_port;
948 uint32_t n_pipes_per_subport =
949 ap->params.tm.n_pipes_per_subport;
950 uint32_t subport_id = action->tm.subport_id;
951 uint32_t pipe_id = action->tm.pipe_id;
953 if ((subport_id >= n_subports_per_port) ||
954 (pipe_id >= n_pipes_per_subport))
958 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
959 uint64_t encap_mask = ap->params.encap.encap_mask;
960 enum rte_table_action_encap_type type = action->encap.type;
962 if ((encap_mask & (1LLU << type)) == 0)
966 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
967 int ip_version0 = ap->params.common.ip_version;
968 int ip_version1 = action->nat.ip_version;
970 if ((ip_version1 && (ip_version0 == 0)) ||
971 ((ip_version1 == 0) && ip_version0))
979 action_default_check(struct table_rule_action *action,
983 if ((action == NULL) ||
984 (action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD)) ||
986 (table_id >= p->n_tables))
989 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
990 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
991 (action->fwd.id >= p->n_ports_out))
994 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
995 (action->fwd.id >= p->n_tables))
1003 pipeline_table_rule_add(const char *pipeline_name,
1005 struct table_rule_match *match,
1006 struct table_rule_action *action,
1010 struct pipeline_msg_req *req;
1011 struct pipeline_msg_rsp *rsp;
1014 /* Check input params */
1015 if ((pipeline_name == NULL) ||
1021 p = pipeline_find(pipeline_name);
1023 (p->enabled == 0) ||
1024 (table_id >= p->n_tables) ||
1025 match_check(match, p, table_id) ||
1026 action_check(action, p, table_id))
1029 /* Allocate request */
1030 req = pipeline_msg_alloc();
1035 req->type = PIPELINE_REQ_TABLE_RULE_ADD;
1037 memcpy(&req->table_rule_add.match, match, sizeof(*match));
1038 memcpy(&req->table_rule_add.action, action, sizeof(*action));
1040 /* Send request and wait for response */
1041 rsp = pipeline_msg_send_recv(p, req);
1046 status = rsp->status;
1048 *data = rsp->table_rule_add.data;
1051 pipeline_msg_free(rsp);
1057 pipeline_table_rule_add_default(const char *pipeline_name,
1059 struct table_rule_action *action,
1063 struct pipeline_msg_req *req;
1064 struct pipeline_msg_rsp *rsp;
1067 /* Check input params */
1068 if ((pipeline_name == NULL) ||
1073 p = pipeline_find(pipeline_name);
1075 (p->enabled == 0) ||
1076 (table_id >= p->n_tables) ||
1077 action_default_check(action, p, table_id))
1080 /* Allocate request */
1081 req = pipeline_msg_alloc();
1086 req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT;
1088 memcpy(&req->table_rule_add_default.action, action, sizeof(*action));
1090 /* Send request and wait for response */
1091 rsp = pipeline_msg_send_recv(p, req);
1096 status = rsp->status;
1098 *data = rsp->table_rule_add_default.data;
1101 pipeline_msg_free(rsp);
1107 pipeline_table_rule_delete(const char *pipeline_name,
1109 struct table_rule_match *match)
1112 struct pipeline_msg_req *req;
1113 struct pipeline_msg_rsp *rsp;
1116 /* Check input params */
1117 if ((pipeline_name == NULL) ||
1121 p = pipeline_find(pipeline_name);
1123 (p->enabled == 0) ||
1124 (table_id >= p->n_tables) ||
1125 match_check(match, p, table_id))
1128 /* Allocate request */
1129 req = pipeline_msg_alloc();
1134 req->type = PIPELINE_REQ_TABLE_RULE_DELETE;
1136 memcpy(&req->table_rule_delete.match, match, sizeof(*match));
1138 /* Send request and wait for response */
1139 rsp = pipeline_msg_send_recv(p, req);
1144 status = rsp->status;
1147 pipeline_msg_free(rsp);
1153 pipeline_table_rule_delete_default(const char *pipeline_name,
1157 struct pipeline_msg_req *req;
1158 struct pipeline_msg_rsp *rsp;
1161 /* Check input params */
1162 if (pipeline_name == NULL)
1165 p = pipeline_find(pipeline_name);
1167 (p->enabled == 0) ||
1168 (table_id >= p->n_tables))
1171 /* Allocate request */
1172 req = pipeline_msg_alloc();
1177 req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT;
1180 /* Send request and wait for response */
1181 rsp = pipeline_msg_send_recv(p, req);
1186 status = rsp->status;
1189 pipeline_msg_free(rsp);
1195 * Data plane threads: message handling
1197 static inline struct pipeline_msg_req *
1198 pipeline_msg_recv(struct rte_ring *msgq_req)
1200 struct pipeline_msg_req *req;
1202 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
1211 pipeline_msg_send(struct rte_ring *msgq_rsp,
1212 struct pipeline_msg_rsp *rsp)
1217 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
1218 } while (status == -ENOBUFS);
1221 static struct pipeline_msg_rsp *
1222 pipeline_msg_handle_port_in_stats_read(struct pipeline_data *p,
1223 struct pipeline_msg_req *req)
1225 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1226 uint32_t port_id = req->id;
1227 int clear = req->port_in_stats_read.clear;
1229 rsp->status = rte_pipeline_port_in_stats_read(p->p,
1231 &rsp->port_in_stats_read.stats,
1237 static struct pipeline_msg_rsp *
1238 pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
1239 struct pipeline_msg_req *req)
1241 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1242 uint32_t port_id = req->id;
1244 rsp->status = rte_pipeline_port_in_enable(p->p,
1250 static struct pipeline_msg_rsp *
1251 pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
1252 struct pipeline_msg_req *req)
1254 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1255 uint32_t port_id = req->id;
1257 rsp->status = rte_pipeline_port_in_disable(p->p,
1263 static struct pipeline_msg_rsp *
1264 pipeline_msg_handle_port_out_stats_read(struct pipeline_data *p,
1265 struct pipeline_msg_req *req)
1267 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1268 uint32_t port_id = req->id;
1269 int clear = req->port_out_stats_read.clear;
1271 rsp->status = rte_pipeline_port_out_stats_read(p->p,
1273 &rsp->port_out_stats_read.stats,
1279 static struct pipeline_msg_rsp *
1280 pipeline_msg_handle_table_stats_read(struct pipeline_data *p,
1281 struct pipeline_msg_req *req)
1283 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1284 uint32_t port_id = req->id;
1285 int clear = req->table_stats_read.clear;
1287 rsp->status = rte_pipeline_table_stats_read(p->p,
1289 &rsp->table_stats_read.stats,
1295 union table_rule_match_low_level {
1296 struct rte_table_acl_rule_add_params acl_add;
1297 struct rte_table_acl_rule_delete_params acl_delete;
1298 struct rte_table_array_key array;
1299 uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX];
1300 struct rte_table_lpm_key lpm_ipv4;
1301 struct rte_table_lpm_ipv6_key lpm_ipv6;
1305 match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32)
1310 switch (depth / 32) {
1320 depth32[1] = depth - 32;
1328 depth32[2] = depth - 64;
1336 depth32[3] = depth - 96;
1352 match_convert(struct table_rule_match *mh,
1353 union table_rule_match_low_level *ml,
1356 memset(ml, 0, sizeof(*ml));
1358 switch (mh->match_type) {
1360 if (mh->match.acl.ip_version)
1362 ml->acl_add.field_value[0].value.u8 =
1363 mh->match.acl.proto;
1364 ml->acl_add.field_value[0].mask_range.u8 =
1365 mh->match.acl.proto_mask;
1367 ml->acl_add.field_value[1].value.u32 =
1368 mh->match.acl.ipv4.sa;
1369 ml->acl_add.field_value[1].mask_range.u32 =
1370 mh->match.acl.sa_depth;
1372 ml->acl_add.field_value[2].value.u32 =
1373 mh->match.acl.ipv4.da;
1374 ml->acl_add.field_value[2].mask_range.u32 =
1375 mh->match.acl.da_depth;
1377 ml->acl_add.field_value[3].value.u16 =
1379 ml->acl_add.field_value[3].mask_range.u16 =
1382 ml->acl_add.field_value[4].value.u16 =
1384 ml->acl_add.field_value[4].mask_range.u16 =
1387 ml->acl_add.priority =
1388 (int32_t) mh->match.acl.priority;
1390 ml->acl_delete.field_value[0].value.u8 =
1391 mh->match.acl.proto;
1392 ml->acl_delete.field_value[0].mask_range.u8 =
1393 mh->match.acl.proto_mask;
1395 ml->acl_delete.field_value[1].value.u32 =
1396 mh->match.acl.ipv4.sa;
1397 ml->acl_delete.field_value[1].mask_range.u32 =
1398 mh->match.acl.sa_depth;
1400 ml->acl_delete.field_value[2].value.u32 =
1401 mh->match.acl.ipv4.da;
1402 ml->acl_delete.field_value[2].mask_range.u32 =
1403 mh->match.acl.da_depth;
1405 ml->acl_delete.field_value[3].value.u16 =
1407 ml->acl_delete.field_value[3].mask_range.u16 =
1410 ml->acl_delete.field_value[4].value.u16 =
1412 ml->acl_delete.field_value[4].mask_range.u16 =
1418 (uint32_t *) mh->match.acl.ipv6.sa;
1420 (uint32_t *) mh->match.acl.ipv6.da;
1421 uint32_t sa32_depth[4], da32_depth[4];
1424 status = match_convert_ipv6_depth(
1425 mh->match.acl.sa_depth,
1430 status = match_convert_ipv6_depth(
1431 mh->match.acl.da_depth,
1436 ml->acl_add.field_value[0].value.u8 =
1437 mh->match.acl.proto;
1438 ml->acl_add.field_value[0].mask_range.u8 =
1439 mh->match.acl.proto_mask;
1441 ml->acl_add.field_value[1].value.u32 = sa32[0];
1442 ml->acl_add.field_value[1].mask_range.u32 =
1444 ml->acl_add.field_value[2].value.u32 = sa32[1];
1445 ml->acl_add.field_value[2].mask_range.u32 =
1447 ml->acl_add.field_value[3].value.u32 = sa32[2];
1448 ml->acl_add.field_value[3].mask_range.u32 =
1450 ml->acl_add.field_value[4].value.u32 = sa32[3];
1451 ml->acl_add.field_value[4].mask_range.u32 =
1454 ml->acl_add.field_value[5].value.u32 = da32[0];
1455 ml->acl_add.field_value[5].mask_range.u32 =
1457 ml->acl_add.field_value[6].value.u32 = da32[1];
1458 ml->acl_add.field_value[6].mask_range.u32 =
1460 ml->acl_add.field_value[7].value.u32 = da32[2];
1461 ml->acl_add.field_value[7].mask_range.u32 =
1463 ml->acl_add.field_value[8].value.u32 = da32[3];
1464 ml->acl_add.field_value[8].mask_range.u32 =
1467 ml->acl_add.field_value[9].value.u16 =
1469 ml->acl_add.field_value[9].mask_range.u16 =
1472 ml->acl_add.field_value[10].value.u16 =
1474 ml->acl_add.field_value[10].mask_range.u16 =
1477 ml->acl_add.priority =
1478 (int32_t) mh->match.acl.priority;
1481 (uint32_t *) mh->match.acl.ipv6.sa;
1483 (uint32_t *) mh->match.acl.ipv6.da;
1484 uint32_t sa32_depth[4], da32_depth[4];
1487 status = match_convert_ipv6_depth(
1488 mh->match.acl.sa_depth,
1493 status = match_convert_ipv6_depth(
1494 mh->match.acl.da_depth,
1499 ml->acl_delete.field_value[0].value.u8 =
1500 mh->match.acl.proto;
1501 ml->acl_delete.field_value[0].mask_range.u8 =
1502 mh->match.acl.proto_mask;
1504 ml->acl_delete.field_value[1].value.u32 =
1506 ml->acl_delete.field_value[1].mask_range.u32 =
1508 ml->acl_delete.field_value[2].value.u32 =
1510 ml->acl_delete.field_value[2].mask_range.u32 =
1512 ml->acl_delete.field_value[3].value.u32 =
1514 ml->acl_delete.field_value[3].mask_range.u32 =
1516 ml->acl_delete.field_value[4].value.u32 =
1518 ml->acl_delete.field_value[4].mask_range.u32 =
1521 ml->acl_delete.field_value[5].value.u32 =
1523 ml->acl_delete.field_value[5].mask_range.u32 =
1525 ml->acl_delete.field_value[6].value.u32 =
1527 ml->acl_delete.field_value[6].mask_range.u32 =
1529 ml->acl_delete.field_value[7].value.u32 =
1531 ml->acl_delete.field_value[7].mask_range.u32 =
1533 ml->acl_delete.field_value[8].value.u32 =
1535 ml->acl_delete.field_value[8].mask_range.u32 =
1538 ml->acl_delete.field_value[9].value.u16 =
1540 ml->acl_delete.field_value[9].mask_range.u16 =
1543 ml->acl_delete.field_value[10].value.u16 =
1545 ml->acl_delete.field_value[10].mask_range.u16 =
1551 ml->array.pos = mh->match.array.pos;
1555 memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash));
1559 if (mh->match.lpm.ip_version) {
1560 ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
1561 ml->lpm_ipv4.depth = mh->match.lpm.depth;
1563 memcpy(ml->lpm_ipv6.ip,
1564 mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
1565 ml->lpm_ipv6.depth = mh->match.lpm.depth;
1575 static struct pipeline_msg_rsp *
1576 pipeline_msg_handle_table_rule_add(struct pipeline_data *p,
1577 struct pipeline_msg_req *req)
1579 union table_rule_match_low_level match_ll;
1580 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1581 struct table_rule_match *match = &req->table_rule_add.match;
1582 struct table_rule_action *action = &req->table_rule_add.action;
1583 struct rte_pipeline_table_entry *data_in, *data_out;
1584 uint32_t table_id = req->id;
1585 int key_found, status;
1586 struct rte_table_action *a = p->table_data[table_id].a;
1589 memset(p->buffer, 0, sizeof(p->buffer));
1590 data_in = (struct rte_pipeline_table_entry *) p->buffer;
1592 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1593 status = rte_table_action_apply(a,
1595 RTE_TABLE_ACTION_FWD,
1604 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
1605 status = rte_table_action_apply(a,
1607 RTE_TABLE_ACTION_MTR,
1616 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
1617 status = rte_table_action_apply(a,
1619 RTE_TABLE_ACTION_TM,
1628 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
1629 status = rte_table_action_apply(a,
1631 RTE_TABLE_ACTION_ENCAP,
1640 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
1641 status = rte_table_action_apply(a,
1643 RTE_TABLE_ACTION_NAT,
1652 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
1653 status = rte_table_action_apply(a,
1655 RTE_TABLE_ACTION_TTL,
1664 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
1665 status = rte_table_action_apply(a,
1667 RTE_TABLE_ACTION_STATS,
1676 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
1677 status = rte_table_action_apply(a,
1679 RTE_TABLE_ACTION_TIME,
1688 /* Add rule (match, action) to table */
1689 status = match_convert(match, &match_ll, 1);
1695 status = rte_pipeline_table_entry_add(p->p,
1706 /* Write response */
1708 rsp->table_rule_add.data = data_out;
1713 static struct pipeline_msg_rsp *
1714 pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p,
1715 struct pipeline_msg_req *req)
1717 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1718 struct table_rule_action *action = &req->table_rule_add_default.action;
1719 struct rte_pipeline_table_entry *data_in, *data_out;
1720 uint32_t table_id = req->id;
1724 memset(p->buffer, 0, sizeof(p->buffer));
1725 data_in = (struct rte_pipeline_table_entry *) p->buffer;
1727 data_in->action = action->fwd.action;
1728 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
1729 data_in->port_id = action->fwd.id;
1730 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
1731 data_in->table_id = action->fwd.id;
1733 /* Add default rule to table */
1734 status = rte_pipeline_table_default_entry_add(p->p,
1743 /* Write response */
1745 rsp->table_rule_add_default.data = data_out;
1750 static struct pipeline_msg_rsp *
1751 pipeline_msg_handle_table_rule_delete(struct pipeline_data *p,
1752 struct pipeline_msg_req *req)
1754 union table_rule_match_low_level match_ll;
1755 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1756 struct table_rule_match *match = &req->table_rule_delete.match;
1757 uint32_t table_id = req->id;
1758 int key_found, status;
1760 status = match_convert(match, &match_ll, 0);
1766 rsp->status = rte_pipeline_table_entry_delete(p->p,
1775 static struct pipeline_msg_rsp *
1776 pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p,
1777 struct pipeline_msg_req *req)
1779 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1780 uint32_t table_id = req->id;
1782 rsp->status = rte_pipeline_table_default_entry_delete(p->p,
1790 pipeline_msg_handle(struct pipeline_data *p)
1793 struct pipeline_msg_req *req;
1794 struct pipeline_msg_rsp *rsp;
1796 req = pipeline_msg_recv(p->msgq_req);
1800 switch (req->type) {
1801 case PIPELINE_REQ_PORT_IN_STATS_READ:
1802 rsp = pipeline_msg_handle_port_in_stats_read(p, req);
1805 case PIPELINE_REQ_PORT_IN_ENABLE:
1806 rsp = pipeline_msg_handle_port_in_enable(p, req);
1809 case PIPELINE_REQ_PORT_IN_DISABLE:
1810 rsp = pipeline_msg_handle_port_in_disable(p, req);
1813 case PIPELINE_REQ_PORT_OUT_STATS_READ:
1814 rsp = pipeline_msg_handle_port_out_stats_read(p, req);
1817 case PIPELINE_REQ_TABLE_STATS_READ:
1818 rsp = pipeline_msg_handle_table_stats_read(p, req);
1821 case PIPELINE_REQ_TABLE_RULE_ADD:
1822 rsp = pipeline_msg_handle_table_rule_add(p, req);
1825 case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT:
1826 rsp = pipeline_msg_handle_table_rule_add_default(p, req);
1829 case PIPELINE_REQ_TABLE_RULE_DELETE:
1830 rsp = pipeline_msg_handle_table_rule_delete(p, req);
1833 case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT:
1834 rsp = pipeline_msg_handle_table_rule_delete_default(p, req);
1838 rsp = (struct pipeline_msg_rsp *) req;
1842 pipeline_msg_send(p->msgq_rsp, rsp);
1847 * Data plane threads: main
1850 thread_main(void *arg __rte_unused)
1852 struct thread_data *t;
1853 uint32_t thread_id, i;
1855 thread_id = rte_lcore_id();
1856 t = &thread_data[thread_id];
1859 for (i = 0; ; i++) {
1863 for (j = 0; j < t->n_pipelines; j++)
1864 rte_pipeline_run(t->p[j]);
1867 if ((i & 0xF) == 0) {
1868 uint64_t time = rte_get_tsc_cycles();
1869 uint64_t time_next_min = UINT64_MAX;
1871 if (time < t->time_next_min)
1874 /* Pipeline message queues */
1875 for (j = 0; j < t->n_pipelines; j++) {
1876 struct pipeline_data *p =
1877 &t->pipeline_data[j];
1878 uint64_t time_next = p->time_next;
1880 if (time_next <= time) {
1881 pipeline_msg_handle(p);
1882 rte_pipeline_flush(p->p);
1883 time_next = time + p->timer_period;
1884 p->time_next = time_next;
1887 if (time_next < time_next_min)
1888 time_next_min = time_next;
1891 /* Thread message queues */
1893 uint64_t time_next = t->time_next;
1895 if (time_next <= time) {
1896 thread_msg_handle(t);
1897 time_next = time + t->timer_period;
1898 t->time_next = time_next;
1901 if (time_next < time_next_min)
1902 time_next_min = time_next;
1905 t->time_next_min = time_next_min;