1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_cycles.h>
12 #include <rte_table_acl.h>
13 #include <rte_table_array.h>
14 #include <rte_table_hash.h>
15 #include <rte_table_lpm.h>
16 #include <rte_table_lpm_ipv6.h>
22 #ifndef THREAD_PIPELINES_MAX
23 #define THREAD_PIPELINES_MAX 256
26 #ifndef THREAD_MSGQ_SIZE
27 #define THREAD_MSGQ_SIZE 64
30 #ifndef THREAD_TIMER_PERIOD_MS
31 #define THREAD_TIMER_PERIOD_MS 100
35 * Main thread: data plane thread context
38 struct rte_ring *msgq_req;
39 struct rte_ring *msgq_rsp;
44 static struct thread thread[RTE_MAX_LCORE];
47 * Data plane threads: context
50 struct rte_table_action *a;
53 struct pipeline_data {
54 struct rte_pipeline *p;
55 struct table_data table_data[RTE_PIPELINE_TABLE_MAX];
58 struct rte_ring *msgq_req;
59 struct rte_ring *msgq_rsp;
60 uint64_t timer_period; /* Measured in CPU cycles. */
63 uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
67 struct rte_pipeline *p[THREAD_PIPELINES_MAX];
70 struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
71 struct rte_ring *msgq_req;
72 struct rte_ring *msgq_rsp;
73 uint64_t timer_period; /* Measured in CPU cycles. */
75 uint64_t time_next_min;
76 } __rte_cache_aligned;
78 static struct thread_data thread_data[RTE_MAX_LCORE];
81 * Main thread: data plane thread init
88 for (i = 0; i < RTE_MAX_LCORE; i++) {
89 struct thread *t = &thread[i];
91 if (!rte_lcore_is_enabled(i))
95 rte_ring_free(t->msgq_req);
97 rte_ring_free(t->msgq_rsp);
106 RTE_LCORE_FOREACH_WORKER(i) {
108 struct rte_ring *msgq_req, *msgq_rsp;
109 struct thread *t = &thread[i];
110 struct thread_data *t_data = &thread_data[i];
111 uint32_t cpu_id = rte_lcore_to_socket_id(i);
114 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-REQ", i);
116 msgq_req = rte_ring_create(name,
119 RING_F_SP_ENQ | RING_F_SC_DEQ);
121 if (msgq_req == NULL) {
126 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-RSP", i);
128 msgq_rsp = rte_ring_create(name,
131 RING_F_SP_ENQ | RING_F_SC_DEQ);
133 if (msgq_rsp == NULL) {
138 /* Main thread records */
139 t->msgq_req = msgq_req;
140 t->msgq_rsp = msgq_rsp;
143 /* Data plane thread records */
144 t_data->n_pipelines = 0;
145 t_data->msgq_req = msgq_req;
146 t_data->msgq_rsp = msgq_rsp;
147 t_data->timer_period =
148 (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
149 t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
150 t_data->time_next_min = t_data->time_next;
157 thread_is_running(uint32_t thread_id)
159 enum rte_lcore_state_t thread_state;
161 thread_state = rte_eal_get_lcore_state(thread_id);
162 return (thread_state == RUNNING) ? 1 : 0;
166 * Pipeline is running when:
167 * (A) Pipeline is mapped to a data plane thread AND
168 * (B) Its data plane thread is in RUNNING state.
171 pipeline_is_running(struct pipeline *p)
176 return thread_is_running(p->thread_id);
180 * Main thread & data plane threads: message passing
182 enum thread_req_type {
183 THREAD_REQ_PIPELINE_ENABLE = 0,
184 THREAD_REQ_PIPELINE_DISABLE,
188 struct thread_msg_req {
189 enum thread_req_type type;
193 struct rte_pipeline *p;
195 struct rte_table_action *a;
196 } table[RTE_PIPELINE_TABLE_MAX];
197 struct rte_ring *msgq_req;
198 struct rte_ring *msgq_rsp;
199 uint32_t timer_period_ms;
204 struct rte_pipeline *p;
209 struct thread_msg_rsp {
216 static struct thread_msg_req *
217 thread_msg_alloc(void)
219 size_t size = RTE_MAX(sizeof(struct thread_msg_req),
220 sizeof(struct thread_msg_rsp));
222 return calloc(1, size);
226 thread_msg_free(struct thread_msg_rsp *rsp)
231 static struct thread_msg_rsp *
232 thread_msg_send_recv(uint32_t thread_id,
233 struct thread_msg_req *req)
235 struct thread *t = &thread[thread_id];
236 struct rte_ring *msgq_req = t->msgq_req;
237 struct rte_ring *msgq_rsp = t->msgq_rsp;
238 struct thread_msg_rsp *rsp;
243 status = rte_ring_sp_enqueue(msgq_req, req);
244 } while (status == -ENOBUFS);
248 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
249 } while (status != 0);
255 thread_pipeline_enable(uint32_t thread_id,
256 const char *pipeline_name)
258 struct pipeline *p = pipeline_find(pipeline_name);
260 struct thread_msg_req *req;
261 struct thread_msg_rsp *rsp;
265 /* Check input params */
266 if ((thread_id >= RTE_MAX_LCORE) ||
268 (p->n_ports_in == 0) ||
269 (p->n_ports_out == 0) ||
273 t = &thread[thread_id];
274 if ((t->enabled == 0) ||
278 if (!thread_is_running(thread_id)) {
279 struct thread_data *td = &thread_data[thread_id];
280 struct pipeline_data *tdp = &td->pipeline_data[td->n_pipelines];
282 if (td->n_pipelines >= THREAD_PIPELINES_MAX)
285 /* Data plane thread */
286 td->p[td->n_pipelines] = p->p;
289 for (i = 0; i < p->n_tables; i++)
290 tdp->table_data[i].a = p->table[i].a;
292 tdp->n_tables = p->n_tables;
294 tdp->msgq_req = p->msgq_req;
295 tdp->msgq_rsp = p->msgq_rsp;
296 tdp->timer_period = (rte_get_tsc_hz() * p->timer_period_ms) / 1000;
297 tdp->time_next = rte_get_tsc_cycles() + tdp->timer_period;
302 p->thread_id = thread_id;
308 /* Allocate request */
309 req = thread_msg_alloc();
314 req->type = THREAD_REQ_PIPELINE_ENABLE;
315 req->pipeline_enable.p = p->p;
316 for (i = 0; i < p->n_tables; i++)
317 req->pipeline_enable.table[i].a =
319 req->pipeline_enable.msgq_req = p->msgq_req;
320 req->pipeline_enable.msgq_rsp = p->msgq_rsp;
321 req->pipeline_enable.timer_period_ms = p->timer_period_ms;
322 req->pipeline_enable.n_tables = p->n_tables;
324 /* Send request and wait for response */
325 rsp = thread_msg_send_recv(thread_id, req);
328 status = rsp->status;
331 thread_msg_free(rsp);
333 /* Request completion */
337 p->thread_id = thread_id;
344 thread_pipeline_disable(uint32_t thread_id,
345 const char *pipeline_name)
347 struct pipeline *p = pipeline_find(pipeline_name);
349 struct thread_msg_req *req;
350 struct thread_msg_rsp *rsp;
353 /* Check input params */
354 if ((thread_id >= RTE_MAX_LCORE) ||
358 t = &thread[thread_id];
365 if (p->thread_id != thread_id)
368 if (!thread_is_running(thread_id)) {
369 struct thread_data *td = &thread_data[thread_id];
372 for (i = 0; i < td->n_pipelines; i++) {
373 struct pipeline_data *tdp = &td->pipeline_data[i];
378 /* Data plane thread */
379 if (i < td->n_pipelines - 1) {
380 struct rte_pipeline *pipeline_last =
381 td->p[td->n_pipelines - 1];
382 struct pipeline_data *tdp_last =
383 &td->pipeline_data[td->n_pipelines - 1];
385 td->p[i] = pipeline_last;
386 memcpy(tdp, tdp_last, sizeof(*tdp));
400 /* Allocate request */
401 req = thread_msg_alloc();
406 req->type = THREAD_REQ_PIPELINE_DISABLE;
407 req->pipeline_disable.p = p->p;
409 /* Send request and wait for response */
410 rsp = thread_msg_send_recv(thread_id, req);
413 status = rsp->status;
416 thread_msg_free(rsp);
418 /* Request completion */
428 * Data plane threads: message handling
430 static inline struct thread_msg_req *
431 thread_msg_recv(struct rte_ring *msgq_req)
433 struct thread_msg_req *req;
435 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
444 thread_msg_send(struct rte_ring *msgq_rsp,
445 struct thread_msg_rsp *rsp)
450 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
451 } while (status == -ENOBUFS);
454 static struct thread_msg_rsp *
455 thread_msg_handle_pipeline_enable(struct thread_data *t,
456 struct thread_msg_req *req)
458 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
459 struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
463 if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
468 t->p[t->n_pipelines] = req->pipeline_enable.p;
470 p->p = req->pipeline_enable.p;
471 for (i = 0; i < req->pipeline_enable.n_tables; i++)
473 req->pipeline_enable.table[i].a;
475 p->n_tables = req->pipeline_enable.n_tables;
477 p->msgq_req = req->pipeline_enable.msgq_req;
478 p->msgq_rsp = req->pipeline_enable.msgq_rsp;
480 (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
481 p->time_next = rte_get_tsc_cycles() + p->timer_period;
490 static struct thread_msg_rsp *
491 thread_msg_handle_pipeline_disable(struct thread_data *t,
492 struct thread_msg_req *req)
494 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
495 uint32_t n_pipelines = t->n_pipelines;
496 struct rte_pipeline *pipeline = req->pipeline_disable.p;
500 for (i = 0; i < n_pipelines; i++) {
501 struct pipeline_data *p = &t->pipeline_data[i];
503 if (p->p != pipeline)
506 if (i < n_pipelines - 1) {
507 struct rte_pipeline *pipeline_last =
508 t->p[n_pipelines - 1];
509 struct pipeline_data *p_last =
510 &t->pipeline_data[n_pipelines - 1];
512 t->p[i] = pipeline_last;
513 memcpy(p, p_last, sizeof(*p));
522 /* should not get here */
528 thread_msg_handle(struct thread_data *t)
531 struct thread_msg_req *req;
532 struct thread_msg_rsp *rsp;
534 req = thread_msg_recv(t->msgq_req);
539 case THREAD_REQ_PIPELINE_ENABLE:
540 rsp = thread_msg_handle_pipeline_enable(t, req);
543 case THREAD_REQ_PIPELINE_DISABLE:
544 rsp = thread_msg_handle_pipeline_disable(t, req);
548 rsp = (struct thread_msg_rsp *) req;
552 thread_msg_send(t->msgq_rsp, rsp);
557 * Main thread & data plane threads: message passing
559 enum pipeline_req_type {
561 PIPELINE_REQ_PORT_IN_STATS_READ,
562 PIPELINE_REQ_PORT_IN_ENABLE,
563 PIPELINE_REQ_PORT_IN_DISABLE,
566 PIPELINE_REQ_PORT_OUT_STATS_READ,
569 PIPELINE_REQ_TABLE_STATS_READ,
570 PIPELINE_REQ_TABLE_RULE_ADD,
571 PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT,
572 PIPELINE_REQ_TABLE_RULE_ADD_BULK,
573 PIPELINE_REQ_TABLE_RULE_DELETE,
574 PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT,
575 PIPELINE_REQ_TABLE_RULE_STATS_READ,
576 PIPELINE_REQ_TABLE_MTR_PROFILE_ADD,
577 PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE,
578 PIPELINE_REQ_TABLE_RULE_MTR_READ,
579 PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE,
580 PIPELINE_REQ_TABLE_RULE_TTL_READ,
581 PIPELINE_REQ_TABLE_RULE_TIME_READ,
585 struct pipeline_msg_req_port_in_stats_read {
589 struct pipeline_msg_req_port_out_stats_read {
593 struct pipeline_msg_req_table_stats_read {
597 struct pipeline_msg_req_table_rule_add {
598 struct table_rule_match match;
599 struct table_rule_action action;
602 struct pipeline_msg_req_table_rule_add_default {
603 struct table_rule_action action;
606 struct pipeline_msg_req_table_rule_add_bulk {
607 struct table_rule_list *list;
611 struct pipeline_msg_req_table_rule_delete {
612 struct table_rule_match match;
615 struct pipeline_msg_req_table_rule_stats_read {
620 struct pipeline_msg_req_table_mtr_profile_add {
621 uint32_t meter_profile_id;
622 struct rte_table_action_meter_profile profile;
625 struct pipeline_msg_req_table_mtr_profile_delete {
626 uint32_t meter_profile_id;
629 struct pipeline_msg_req_table_rule_mtr_read {
635 struct pipeline_msg_req_table_dscp_table_update {
637 struct rte_table_action_dscp_table dscp_table;
640 struct pipeline_msg_req_table_rule_ttl_read {
645 struct pipeline_msg_req_table_rule_time_read {
649 struct pipeline_msg_req {
650 enum pipeline_req_type type;
651 uint32_t id; /* Port IN, port OUT or table ID */
655 struct pipeline_msg_req_port_in_stats_read port_in_stats_read;
656 struct pipeline_msg_req_port_out_stats_read port_out_stats_read;
657 struct pipeline_msg_req_table_stats_read table_stats_read;
658 struct pipeline_msg_req_table_rule_add table_rule_add;
659 struct pipeline_msg_req_table_rule_add_default table_rule_add_default;
660 struct pipeline_msg_req_table_rule_add_bulk table_rule_add_bulk;
661 struct pipeline_msg_req_table_rule_delete table_rule_delete;
662 struct pipeline_msg_req_table_rule_stats_read table_rule_stats_read;
663 struct pipeline_msg_req_table_mtr_profile_add table_mtr_profile_add;
664 struct pipeline_msg_req_table_mtr_profile_delete table_mtr_profile_delete;
665 struct pipeline_msg_req_table_rule_mtr_read table_rule_mtr_read;
666 struct pipeline_msg_req_table_dscp_table_update table_dscp_table_update;
667 struct pipeline_msg_req_table_rule_ttl_read table_rule_ttl_read;
668 struct pipeline_msg_req_table_rule_time_read table_rule_time_read;
672 struct pipeline_msg_rsp_port_in_stats_read {
673 struct rte_pipeline_port_in_stats stats;
676 struct pipeline_msg_rsp_port_out_stats_read {
677 struct rte_pipeline_port_out_stats stats;
680 struct pipeline_msg_rsp_table_stats_read {
681 struct rte_pipeline_table_stats stats;
684 struct pipeline_msg_rsp_table_rule_add {
688 struct pipeline_msg_rsp_table_rule_add_default {
692 struct pipeline_msg_rsp_table_rule_add_bulk {
696 struct pipeline_msg_rsp_table_rule_stats_read {
697 struct rte_table_action_stats_counters stats;
700 struct pipeline_msg_rsp_table_rule_mtr_read {
701 struct rte_table_action_mtr_counters stats;
704 struct pipeline_msg_rsp_table_rule_ttl_read {
705 struct rte_table_action_ttl_counters stats;
708 struct pipeline_msg_rsp_table_rule_time_read {
712 struct pipeline_msg_rsp {
717 struct pipeline_msg_rsp_port_in_stats_read port_in_stats_read;
718 struct pipeline_msg_rsp_port_out_stats_read port_out_stats_read;
719 struct pipeline_msg_rsp_table_stats_read table_stats_read;
720 struct pipeline_msg_rsp_table_rule_add table_rule_add;
721 struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default;
722 struct pipeline_msg_rsp_table_rule_add_bulk table_rule_add_bulk;
723 struct pipeline_msg_rsp_table_rule_stats_read table_rule_stats_read;
724 struct pipeline_msg_rsp_table_rule_mtr_read table_rule_mtr_read;
725 struct pipeline_msg_rsp_table_rule_ttl_read table_rule_ttl_read;
726 struct pipeline_msg_rsp_table_rule_time_read table_rule_time_read;
733 static struct pipeline_msg_req *
734 pipeline_msg_alloc(void)
736 size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
737 sizeof(struct pipeline_msg_rsp));
739 return calloc(1, size);
743 pipeline_msg_free(struct pipeline_msg_rsp *rsp)
748 static struct pipeline_msg_rsp *
749 pipeline_msg_send_recv(struct pipeline *p,
750 struct pipeline_msg_req *req)
752 struct rte_ring *msgq_req = p->msgq_req;
753 struct rte_ring *msgq_rsp = p->msgq_rsp;
754 struct pipeline_msg_rsp *rsp;
759 status = rte_ring_sp_enqueue(msgq_req, req);
760 } while (status == -ENOBUFS);
764 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
765 } while (status != 0);
771 pipeline_port_in_stats_read(const char *pipeline_name,
773 struct rte_pipeline_port_in_stats *stats,
777 struct pipeline_msg_req *req;
778 struct pipeline_msg_rsp *rsp;
781 /* Check input params */
782 if ((pipeline_name == NULL) ||
786 p = pipeline_find(pipeline_name);
788 (port_id >= p->n_ports_in))
791 if (!pipeline_is_running(p)) {
792 status = rte_pipeline_port_in_stats_read(p->p,
800 /* Allocate request */
801 req = pipeline_msg_alloc();
806 req->type = PIPELINE_REQ_PORT_IN_STATS_READ;
808 req->port_in_stats_read.clear = clear;
810 /* Send request and wait for response */
811 rsp = pipeline_msg_send_recv(p, req);
814 status = rsp->status;
816 memcpy(stats, &rsp->port_in_stats_read.stats, sizeof(*stats));
819 pipeline_msg_free(rsp);
825 pipeline_port_in_enable(const char *pipeline_name,
829 struct pipeline_msg_req *req;
830 struct pipeline_msg_rsp *rsp;
833 /* Check input params */
834 if (pipeline_name == NULL)
837 p = pipeline_find(pipeline_name);
839 (port_id >= p->n_ports_in))
842 if (!pipeline_is_running(p)) {
843 status = rte_pipeline_port_in_enable(p->p, port_id);
847 /* Allocate request */
848 req = pipeline_msg_alloc();
853 req->type = PIPELINE_REQ_PORT_IN_ENABLE;
856 /* Send request and wait for response */
857 rsp = pipeline_msg_send_recv(p, req);
860 status = rsp->status;
863 pipeline_msg_free(rsp);
869 pipeline_port_in_disable(const char *pipeline_name,
873 struct pipeline_msg_req *req;
874 struct pipeline_msg_rsp *rsp;
877 /* Check input params */
878 if (pipeline_name == NULL)
881 p = pipeline_find(pipeline_name);
883 (port_id >= p->n_ports_in))
886 if (!pipeline_is_running(p)) {
887 status = rte_pipeline_port_in_disable(p->p, port_id);
891 /* Allocate request */
892 req = pipeline_msg_alloc();
897 req->type = PIPELINE_REQ_PORT_IN_DISABLE;
900 /* Send request and wait for response */
901 rsp = pipeline_msg_send_recv(p, req);
904 status = rsp->status;
907 pipeline_msg_free(rsp);
913 pipeline_port_out_stats_read(const char *pipeline_name,
915 struct rte_pipeline_port_out_stats *stats,
919 struct pipeline_msg_req *req;
920 struct pipeline_msg_rsp *rsp;
923 /* Check input params */
924 if ((pipeline_name == NULL) ||
928 p = pipeline_find(pipeline_name);
930 (port_id >= p->n_ports_out))
933 if (!pipeline_is_running(p)) {
934 status = rte_pipeline_port_out_stats_read(p->p,
942 /* Allocate request */
943 req = pipeline_msg_alloc();
948 req->type = PIPELINE_REQ_PORT_OUT_STATS_READ;
950 req->port_out_stats_read.clear = clear;
952 /* Send request and wait for response */
953 rsp = pipeline_msg_send_recv(p, req);
956 status = rsp->status;
958 memcpy(stats, &rsp->port_out_stats_read.stats, sizeof(*stats));
961 pipeline_msg_free(rsp);
967 pipeline_table_stats_read(const char *pipeline_name,
969 struct rte_pipeline_table_stats *stats,
973 struct pipeline_msg_req *req;
974 struct pipeline_msg_rsp *rsp;
977 /* Check input params */
978 if ((pipeline_name == NULL) ||
982 p = pipeline_find(pipeline_name);
984 (table_id >= p->n_tables))
987 if (!pipeline_is_running(p)) {
988 status = rte_pipeline_table_stats_read(p->p,
996 /* Allocate request */
997 req = pipeline_msg_alloc();
1002 req->type = PIPELINE_REQ_TABLE_STATS_READ;
1004 req->table_stats_read.clear = clear;
1006 /* Send request and wait for response */
1007 rsp = pipeline_msg_send_recv(p, req);
1010 status = rsp->status;
1012 memcpy(stats, &rsp->table_stats_read.stats, sizeof(*stats));
1015 pipeline_msg_free(rsp);
1021 match_check(struct table_rule_match *match,
1025 struct table *table;
1027 if ((match == NULL) ||
1029 (table_id >= p->n_tables))
1032 table = &p->table[table_id];
1033 if (match->match_type != table->params.match_type)
1036 switch (match->match_type) {
1039 struct table_acl_params *t = &table->params.match.acl;
1040 struct table_rule_match_acl *r = &match->match.acl;
1042 if ((r->ip_version && (t->ip_version == 0)) ||
1043 ((r->ip_version == 0) && t->ip_version))
1046 if (r->ip_version) {
1047 if ((r->sa_depth > 32) ||
1051 if ((r->sa_depth > 128) ||
1052 (r->da_depth > 128))
1066 struct table_lpm_params *t = &table->params.match.lpm;
1067 struct table_rule_match_lpm *r = &match->match.lpm;
1069 if ((r->ip_version && (t->key_size != 4)) ||
1070 ((r->ip_version == 0) && (t->key_size != 16)))
1073 if (r->ip_version) {
1092 action_check(struct table_rule_action *action,
1096 struct table_action_profile *ap;
1098 if ((action == NULL) ||
1100 (table_id >= p->n_tables))
1103 ap = p->table[table_id].ap;
1104 if (action->action_mask != ap->params.action_mask)
1107 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1108 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
1109 (action->fwd.id >= p->n_ports_out))
1112 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
1113 (action->fwd.id >= p->n_tables))
1117 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
1118 uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1;
1119 uint32_t tc_mask1 = action->mtr.tc_mask;
1121 if (tc_mask1 != tc_mask0)
1125 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
1126 uint32_t n_subports_per_port =
1127 ap->params.tm.n_subports_per_port;
1128 uint32_t n_pipes_per_subport =
1129 ap->params.tm.n_pipes_per_subport;
1130 uint32_t subport_id = action->tm.subport_id;
1131 uint32_t pipe_id = action->tm.pipe_id;
1133 if ((subport_id >= n_subports_per_port) ||
1134 (pipe_id >= n_pipes_per_subport))
1138 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
1139 uint64_t encap_mask = ap->params.encap.encap_mask;
1140 enum rte_table_action_encap_type type = action->encap.type;
1142 if ((encap_mask & (1LLU << type)) == 0)
1146 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
1147 int ip_version0 = ap->params.common.ip_version;
1148 int ip_version1 = action->nat.ip_version;
1150 if ((ip_version1 && (ip_version0 == 0)) ||
1151 ((ip_version1 == 0) && ip_version0))
1159 action_default_check(struct table_rule_action *action,
1163 if ((action == NULL) ||
1164 (action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD)) ||
1166 (table_id >= p->n_tables))
1169 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1170 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
1171 (action->fwd.id >= p->n_ports_out))
1174 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
1175 (action->fwd.id >= p->n_tables))
1182 union table_rule_match_low_level {
1183 struct rte_table_acl_rule_add_params acl_add;
1184 struct rte_table_acl_rule_delete_params acl_delete;
1185 struct rte_table_array_key array;
1186 uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX];
1187 struct rte_table_lpm_key lpm_ipv4;
1188 struct rte_table_lpm_ipv6_key lpm_ipv6;
1192 match_convert(struct table_rule_match *mh,
1193 union table_rule_match_low_level *ml,
1197 action_convert(struct rte_table_action *a,
1198 struct table_rule_action *action,
1199 struct rte_pipeline_table_entry *data);
1202 struct rte_pipeline *p;
1204 struct rte_table_action *a;
1209 table_rule_add_bulk_ll(struct table_ll *table,
1210 struct table_rule_list *list,
1213 union table_rule_match_low_level *match_ll = NULL;
1214 uint8_t *action_ll = NULL;
1215 void **match_ll_ptr = NULL;
1216 struct rte_pipeline_table_entry **action_ll_ptr = NULL;
1217 struct rte_pipeline_table_entry **entries_ptr = NULL;
1219 struct table_rule *rule;
1224 TAILQ_FOREACH(rule, list, node)
1227 /* Memory allocation */
1228 match_ll = calloc(n, sizeof(union table_rule_match_low_level));
1229 action_ll = calloc(n, TABLE_RULE_ACTION_SIZE_MAX);
1231 match_ll_ptr = calloc(n, sizeof(void *));
1232 action_ll_ptr = calloc(n, sizeof(struct rte_pipeline_table_entry *));
1234 entries_ptr = calloc(n, sizeof(struct rte_pipeline_table_entry *));
1235 found = calloc(n, sizeof(int));
1237 if (match_ll == NULL ||
1238 action_ll == NULL ||
1239 match_ll_ptr == NULL ||
1240 action_ll_ptr == NULL ||
1241 entries_ptr == NULL ||
1244 goto table_rule_add_bulk_ll_free;
1248 for (i = 0; i < n; i++) {
1249 match_ll_ptr[i] = (void *)&match_ll[i];
1250 action_ll_ptr[i] = (struct rte_pipeline_table_entry *)
1251 &action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
1254 /* Rule (match, action) conversion */
1256 TAILQ_FOREACH(rule, list, node) {
1257 status = match_convert(&rule->match, match_ll_ptr[i], 1);
1259 goto table_rule_add_bulk_ll_free;
1261 status = action_convert(table->a, &rule->action, action_ll_ptr[i]);
1263 goto table_rule_add_bulk_ll_free;
1268 /* Add rule (match, action) to table */
1269 if (table->bulk_supported) {
1270 status = rte_pipeline_table_entry_add_bulk(table->p,
1278 goto table_rule_add_bulk_ll_free;
1280 for (i = 0; i < n; i++) {
1281 status = rte_pipeline_table_entry_add(table->p,
1289 goto table_rule_add_bulk_ll_free;
1298 /* Write back to the rule list. */
1300 TAILQ_FOREACH(rule, list, node) {
1304 rule->data = entries_ptr[i];
1312 table_rule_add_bulk_ll_free:
1315 free(action_ll_ptr);
1324 pipeline_table_rule_add(const char *pipeline_name,
1326 struct table_rule_match *match,
1327 struct table_rule_action *action)
1330 struct table *table;
1331 struct pipeline_msg_req *req;
1332 struct pipeline_msg_rsp *rsp;
1333 struct table_rule *rule;
1336 /* Check input params */
1337 if ((pipeline_name == NULL) ||
1342 p = pipeline_find(pipeline_name);
1344 (table_id >= p->n_tables) ||
1345 match_check(match, p, table_id) ||
1346 action_check(action, p, table_id))
1349 table = &p->table[table_id];
1351 rule = calloc(1, sizeof(struct table_rule));
1355 memcpy(&rule->match, match, sizeof(*match));
1356 memcpy(&rule->action, action, sizeof(*action));
1358 if (!pipeline_is_running(p)) {
1359 union table_rule_match_low_level match_ll;
1360 struct rte_pipeline_table_entry *data_in, *data_out;
1364 buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
1365 if (buffer == NULL) {
1370 /* Table match-action rule conversion */
1371 data_in = (struct rte_pipeline_table_entry *)buffer;
1373 status = match_convert(match, &match_ll, 1);
1380 status = action_convert(table->a, action, data_in);
1387 /* Add rule (match, action) to table */
1388 status = rte_pipeline_table_entry_add(p->p,
1400 /* Write Response */
1401 rule->data = data_out;
1402 table_rule_add(table, rule);
1408 /* Allocate request */
1409 req = pipeline_msg_alloc();
1416 req->type = PIPELINE_REQ_TABLE_RULE_ADD;
1418 memcpy(&req->table_rule_add.match, match, sizeof(*match));
1419 memcpy(&req->table_rule_add.action, action, sizeof(*action));
1421 /* Send request and wait for response */
1422 rsp = pipeline_msg_send_recv(p, req);
1425 status = rsp->status;
1427 rule->data = rsp->table_rule_add.data;
1428 table_rule_add(table, rule);
1433 pipeline_msg_free(rsp);
1439 pipeline_table_rule_add_default(const char *pipeline_name,
1441 struct table_rule_action *action)
1444 struct table *table;
1445 struct pipeline_msg_req *req;
1446 struct pipeline_msg_rsp *rsp;
1447 struct table_rule *rule;
1450 /* Check input params */
1451 if ((pipeline_name == NULL) ||
1455 p = pipeline_find(pipeline_name);
1457 (table_id >= p->n_tables) ||
1458 action_default_check(action, p, table_id))
1461 table = &p->table[table_id];
1463 rule = calloc(1, sizeof(struct table_rule));
1467 memcpy(&rule->action, action, sizeof(*action));
1469 if (!pipeline_is_running(p)) {
1470 struct rte_pipeline_table_entry *data_in, *data_out;
1473 buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
1474 if (buffer == NULL) {
1480 data_in = (struct rte_pipeline_table_entry *)buffer;
1482 data_in->action = action->fwd.action;
1483 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
1484 data_in->port_id = action->fwd.id;
1485 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
1486 data_in->table_id = action->fwd.id;
1488 /* Add default rule to table */
1489 status = rte_pipeline_table_default_entry_add(p->p,
1499 /* Write Response */
1500 rule->data = data_out;
1501 table_rule_default_add(table, rule);
1507 /* Allocate request */
1508 req = pipeline_msg_alloc();
1515 req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT;
1517 memcpy(&req->table_rule_add_default.action, action, sizeof(*action));
1519 /* Send request and wait for response */
1520 rsp = pipeline_msg_send_recv(p, req);
1523 status = rsp->status;
1525 rule->data = rsp->table_rule_add_default.data;
1526 table_rule_default_add(table, rule);
1531 pipeline_msg_free(rsp);
1537 table_rule_list_free(struct table_rule_list *list)
1545 struct table_rule *rule;
1547 rule = TAILQ_FIRST(list);
1551 TAILQ_REMOVE(list, rule, node);
1561 pipeline_table_rule_add_bulk(const char *pipeline_name,
1563 struct table_rule_list *list,
1564 uint32_t *n_rules_added,
1565 uint32_t *n_rules_not_added)
1568 struct table *table;
1569 struct pipeline_msg_req *req;
1570 struct pipeline_msg_rsp *rsp;
1571 struct table_rule *rule;
1574 /* Check input params */
1575 if ((pipeline_name == NULL) ||
1577 TAILQ_EMPTY(list) ||
1578 (n_rules_added == NULL) ||
1579 (n_rules_not_added == NULL)) {
1580 table_rule_list_free(list);
1584 p = pipeline_find(pipeline_name);
1586 (table_id >= p->n_tables)) {
1587 table_rule_list_free(list);
1591 table = &p->table[table_id];
1593 TAILQ_FOREACH(rule, list, node)
1594 if (match_check(&rule->match, p, table_id) ||
1595 action_check(&rule->action, p, table_id)) {
1596 table_rule_list_free(list);
1600 if (!pipeline_is_running(p)) {
1601 struct table_ll table_ll = {
1603 .table_id = table_id,
1605 .bulk_supported = table->params.match_type == TABLE_ACL,
1608 status = table_rule_add_bulk_ll(&table_ll, list, n_rules_added);
1610 table_rule_list_free(list);
1614 table_rule_add_bulk(table, list, *n_rules_added);
1615 *n_rules_not_added = table_rule_list_free(list);
1619 /* Allocate request */
1620 req = pipeline_msg_alloc();
1622 table_rule_list_free(list);
1627 req->type = PIPELINE_REQ_TABLE_RULE_ADD_BULK;
1629 req->table_rule_add_bulk.list = list;
1630 req->table_rule_add_bulk.bulk = table->params.match_type == TABLE_ACL;
1632 /* Send request and wait for response */
1633 rsp = pipeline_msg_send_recv(p, req);
1636 status = rsp->status;
1638 *n_rules_added = rsp->table_rule_add_bulk.n_rules;
1640 table_rule_add_bulk(table, list, *n_rules_added);
1641 *n_rules_not_added = table_rule_list_free(list);
1643 table_rule_list_free(list);
1647 pipeline_msg_free(rsp);
1653 pipeline_table_rule_delete(const char *pipeline_name,
1655 struct table_rule_match *match)
1658 struct table *table;
1659 struct pipeline_msg_req *req;
1660 struct pipeline_msg_rsp *rsp;
1663 /* Check input params */
1664 if ((pipeline_name == NULL) ||
1668 p = pipeline_find(pipeline_name);
1670 (table_id >= p->n_tables) ||
1671 match_check(match, p, table_id))
1674 table = &p->table[table_id];
1676 if (!pipeline_is_running(p)) {
1677 union table_rule_match_low_level match_ll;
1680 status = match_convert(match, &match_ll, 0);
1684 status = rte_pipeline_table_entry_delete(p->p,
1691 table_rule_delete(table, match);
1696 /* Allocate request */
1697 req = pipeline_msg_alloc();
1702 req->type = PIPELINE_REQ_TABLE_RULE_DELETE;
1704 memcpy(&req->table_rule_delete.match, match, sizeof(*match));
1706 /* Send request and wait for response */
1707 rsp = pipeline_msg_send_recv(p, req);
1710 status = rsp->status;
1712 table_rule_delete(table, match);
1715 pipeline_msg_free(rsp);
1721 pipeline_table_rule_delete_default(const char *pipeline_name,
1725 struct table *table;
1726 struct pipeline_msg_req *req;
1727 struct pipeline_msg_rsp *rsp;
1730 /* Check input params */
1731 if (pipeline_name == NULL)
1734 p = pipeline_find(pipeline_name);
1736 (table_id >= p->n_tables))
1739 table = &p->table[table_id];
1741 if (!pipeline_is_running(p)) {
1742 status = rte_pipeline_table_default_entry_delete(p->p,
1747 table_rule_default_delete(table);
1752 /* Allocate request */
1753 req = pipeline_msg_alloc();
1758 req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT;
1761 /* Send request and wait for response */
1762 rsp = pipeline_msg_send_recv(p, req);
1765 status = rsp->status;
1767 table_rule_default_delete(table);
1770 pipeline_msg_free(rsp);
1776 pipeline_table_rule_stats_read(const char *pipeline_name,
1778 struct table_rule_match *match,
1779 struct rte_table_action_stats_counters *stats,
1783 struct table *table;
1784 struct pipeline_msg_req *req;
1785 struct pipeline_msg_rsp *rsp;
1786 struct table_rule *rule;
1789 /* Check input params */
1790 if ((pipeline_name == NULL) ||
1795 p = pipeline_find(pipeline_name);
1797 (table_id >= p->n_tables) ||
1798 match_check(match, p, table_id))
1801 table = &p->table[table_id];
1802 rule = table_rule_find(table, match);
1806 if (!pipeline_is_running(p)) {
1807 status = rte_table_action_stats_read(table->a,
1815 /* Allocate request */
1816 req = pipeline_msg_alloc();
1821 req->type = PIPELINE_REQ_TABLE_RULE_STATS_READ;
1823 req->table_rule_stats_read.data = rule->data;
1824 req->table_rule_stats_read.clear = clear;
1826 /* Send request and wait for response */
1827 rsp = pipeline_msg_send_recv(p, req);
1830 status = rsp->status;
1832 memcpy(stats, &rsp->table_rule_stats_read.stats, sizeof(*stats));
1835 pipeline_msg_free(rsp);
1841 pipeline_table_mtr_profile_add(const char *pipeline_name,
1843 uint32_t meter_profile_id,
1844 struct rte_table_action_meter_profile *profile)
1847 struct pipeline_msg_req *req;
1848 struct pipeline_msg_rsp *rsp;
1851 /* Check input params */
1852 if ((pipeline_name == NULL) ||
1856 p = pipeline_find(pipeline_name);
1858 (table_id >= p->n_tables))
1861 if (!pipeline_is_running(p)) {
1862 struct rte_table_action *a = p->table[table_id].a;
1864 status = rte_table_action_meter_profile_add(a,
1871 /* Allocate request */
1872 req = pipeline_msg_alloc();
1877 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_ADD;
1879 req->table_mtr_profile_add.meter_profile_id = meter_profile_id;
1880 memcpy(&req->table_mtr_profile_add.profile, profile, sizeof(*profile));
1882 /* Send request and wait for response */
1883 rsp = pipeline_msg_send_recv(p, req);
1886 status = rsp->status;
1889 pipeline_msg_free(rsp);
1895 pipeline_table_mtr_profile_delete(const char *pipeline_name,
1897 uint32_t meter_profile_id)
1900 struct pipeline_msg_req *req;
1901 struct pipeline_msg_rsp *rsp;
1904 /* Check input params */
1905 if (pipeline_name == NULL)
1908 p = pipeline_find(pipeline_name);
1910 (table_id >= p->n_tables))
1913 if (!pipeline_is_running(p)) {
1914 struct rte_table_action *a = p->table[table_id].a;
1916 status = rte_table_action_meter_profile_delete(a,
1922 /* Allocate request */
1923 req = pipeline_msg_alloc();
1928 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE;
1930 req->table_mtr_profile_delete.meter_profile_id = meter_profile_id;
1932 /* Send request and wait for response */
1933 rsp = pipeline_msg_send_recv(p, req);
1936 status = rsp->status;
1939 pipeline_msg_free(rsp);
1945 pipeline_table_rule_mtr_read(const char *pipeline_name,
1947 struct table_rule_match *match,
1948 struct rte_table_action_mtr_counters *stats,
1952 struct table *table;
1953 struct pipeline_msg_req *req;
1954 struct pipeline_msg_rsp *rsp;
1955 struct table_rule *rule;
1959 /* Check input params */
1960 if ((pipeline_name == NULL) ||
1965 p = pipeline_find(pipeline_name);
1967 (table_id >= p->n_tables) ||
1968 match_check(match, p, table_id))
1971 table = &p->table[table_id];
1972 tc_mask = (1 << table->ap->params.mtr.n_tc) - 1;
1974 rule = table_rule_find(table, match);
1978 if (!pipeline_is_running(p)) {
1979 status = rte_table_action_meter_read(table->a,
1988 /* Allocate request */
1989 req = pipeline_msg_alloc();
1994 req->type = PIPELINE_REQ_TABLE_RULE_MTR_READ;
1996 req->table_rule_mtr_read.data = rule->data;
1997 req->table_rule_mtr_read.tc_mask = tc_mask;
1998 req->table_rule_mtr_read.clear = clear;
2000 /* Send request and wait for response */
2001 rsp = pipeline_msg_send_recv(p, req);
2004 status = rsp->status;
2006 memcpy(stats, &rsp->table_rule_mtr_read.stats, sizeof(*stats));
2009 pipeline_msg_free(rsp);
2015 pipeline_table_dscp_table_update(const char *pipeline_name,
2018 struct rte_table_action_dscp_table *dscp_table)
2021 struct pipeline_msg_req *req;
2022 struct pipeline_msg_rsp *rsp;
2025 /* Check input params */
2026 if ((pipeline_name == NULL) ||
2027 (dscp_table == NULL))
2030 p = pipeline_find(pipeline_name);
2032 (table_id >= p->n_tables))
2035 if (!pipeline_is_running(p)) {
2036 struct rte_table_action *a = p->table[table_id].a;
2038 status = rte_table_action_dscp_table_update(a,
2045 /* Allocate request */
2046 req = pipeline_msg_alloc();
2051 req->type = PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE;
2053 req->table_dscp_table_update.dscp_mask = dscp_mask;
2054 memcpy(&req->table_dscp_table_update.dscp_table,
2055 dscp_table, sizeof(*dscp_table));
2057 /* Send request and wait for response */
2058 rsp = pipeline_msg_send_recv(p, req);
2061 status = rsp->status;
2064 pipeline_msg_free(rsp);
2070 pipeline_table_rule_ttl_read(const char *pipeline_name,
2072 struct table_rule_match *match,
2073 struct rte_table_action_ttl_counters *stats,
2077 struct table *table;
2078 struct pipeline_msg_req *req;
2079 struct pipeline_msg_rsp *rsp;
2080 struct table_rule *rule;
2083 /* Check input params */
2084 if ((pipeline_name == NULL) ||
2089 p = pipeline_find(pipeline_name);
2091 (table_id >= p->n_tables) ||
2092 match_check(match, p, table_id))
2095 table = &p->table[table_id];
2096 if (!table->ap->params.ttl.n_packets_enabled)
2099 rule = table_rule_find(table, match);
2103 if (!pipeline_is_running(p)) {
2104 status = rte_table_action_ttl_read(table->a,
2112 /* Allocate request */
2113 req = pipeline_msg_alloc();
2118 req->type = PIPELINE_REQ_TABLE_RULE_TTL_READ;
2120 req->table_rule_ttl_read.data = rule->data;
2121 req->table_rule_ttl_read.clear = clear;
2123 /* Send request and wait for response */
2124 rsp = pipeline_msg_send_recv(p, req);
2127 status = rsp->status;
2129 memcpy(stats, &rsp->table_rule_ttl_read.stats, sizeof(*stats));
2132 pipeline_msg_free(rsp);
2138 pipeline_table_rule_time_read(const char *pipeline_name,
2140 struct table_rule_match *match,
2141 uint64_t *timestamp)
2144 struct table *table;
2145 struct pipeline_msg_req *req;
2146 struct pipeline_msg_rsp *rsp;
2147 struct table_rule *rule;
2150 /* Check input params */
2151 if ((pipeline_name == NULL) ||
2153 (timestamp == NULL))
2156 p = pipeline_find(pipeline_name);
2158 (table_id >= p->n_tables) ||
2159 match_check(match, p, table_id))
2162 table = &p->table[table_id];
2164 rule = table_rule_find(table, match);
2168 if (!pipeline_is_running(p)) {
2169 status = rte_table_action_time_read(table->a,
2176 /* Allocate request */
2177 req = pipeline_msg_alloc();
2182 req->type = PIPELINE_REQ_TABLE_RULE_TIME_READ;
2184 req->table_rule_time_read.data = rule->data;
2186 /* Send request and wait for response */
2187 rsp = pipeline_msg_send_recv(p, req);
2190 status = rsp->status;
2192 *timestamp = rsp->table_rule_time_read.timestamp;
2195 pipeline_msg_free(rsp);
2201 * Data plane threads: message handling
2203 static inline struct pipeline_msg_req *
2204 pipeline_msg_recv(struct rte_ring *msgq_req)
2206 struct pipeline_msg_req *req;
2208 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
2217 pipeline_msg_send(struct rte_ring *msgq_rsp,
2218 struct pipeline_msg_rsp *rsp)
2223 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
2224 } while (status == -ENOBUFS);
2227 static struct pipeline_msg_rsp *
2228 pipeline_msg_handle_port_in_stats_read(struct pipeline_data *p,
2229 struct pipeline_msg_req *req)
2231 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2232 uint32_t port_id = req->id;
2233 int clear = req->port_in_stats_read.clear;
2235 rsp->status = rte_pipeline_port_in_stats_read(p->p,
2237 &rsp->port_in_stats_read.stats,
2243 static struct pipeline_msg_rsp *
2244 pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
2245 struct pipeline_msg_req *req)
2247 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2248 uint32_t port_id = req->id;
2250 rsp->status = rte_pipeline_port_in_enable(p->p,
2256 static struct pipeline_msg_rsp *
2257 pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
2258 struct pipeline_msg_req *req)
2260 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2261 uint32_t port_id = req->id;
2263 rsp->status = rte_pipeline_port_in_disable(p->p,
2269 static struct pipeline_msg_rsp *
2270 pipeline_msg_handle_port_out_stats_read(struct pipeline_data *p,
2271 struct pipeline_msg_req *req)
2273 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2274 uint32_t port_id = req->id;
2275 int clear = req->port_out_stats_read.clear;
2277 rsp->status = rte_pipeline_port_out_stats_read(p->p,
2279 &rsp->port_out_stats_read.stats,
2285 static struct pipeline_msg_rsp *
2286 pipeline_msg_handle_table_stats_read(struct pipeline_data *p,
2287 struct pipeline_msg_req *req)
2289 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2290 uint32_t port_id = req->id;
2291 int clear = req->table_stats_read.clear;
2293 rsp->status = rte_pipeline_table_stats_read(p->p,
2295 &rsp->table_stats_read.stats,
2302 match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32)
2307 switch (depth / 32) {
2317 depth32[1] = depth - 32;
2325 depth32[2] = depth - 64;
2333 depth32[3] = depth - 96;
2349 match_convert(struct table_rule_match *mh,
2350 union table_rule_match_low_level *ml,
2353 memset(ml, 0, sizeof(*ml));
2355 switch (mh->match_type) {
2357 if (mh->match.acl.ip_version)
2359 ml->acl_add.field_value[0].value.u8 =
2360 mh->match.acl.proto;
2361 ml->acl_add.field_value[0].mask_range.u8 =
2362 mh->match.acl.proto_mask;
2364 ml->acl_add.field_value[1].value.u32 =
2365 mh->match.acl.ipv4.sa;
2366 ml->acl_add.field_value[1].mask_range.u32 =
2367 mh->match.acl.sa_depth;
2369 ml->acl_add.field_value[2].value.u32 =
2370 mh->match.acl.ipv4.da;
2371 ml->acl_add.field_value[2].mask_range.u32 =
2372 mh->match.acl.da_depth;
2374 ml->acl_add.field_value[3].value.u16 =
2376 ml->acl_add.field_value[3].mask_range.u16 =
2379 ml->acl_add.field_value[4].value.u16 =
2381 ml->acl_add.field_value[4].mask_range.u16 =
2384 ml->acl_add.priority =
2385 (int32_t) mh->match.acl.priority;
2387 ml->acl_delete.field_value[0].value.u8 =
2388 mh->match.acl.proto;
2389 ml->acl_delete.field_value[0].mask_range.u8 =
2390 mh->match.acl.proto_mask;
2392 ml->acl_delete.field_value[1].value.u32 =
2393 mh->match.acl.ipv4.sa;
2394 ml->acl_delete.field_value[1].mask_range.u32 =
2395 mh->match.acl.sa_depth;
2397 ml->acl_delete.field_value[2].value.u32 =
2398 mh->match.acl.ipv4.da;
2399 ml->acl_delete.field_value[2].mask_range.u32 =
2400 mh->match.acl.da_depth;
2402 ml->acl_delete.field_value[3].value.u16 =
2404 ml->acl_delete.field_value[3].mask_range.u16 =
2407 ml->acl_delete.field_value[4].value.u16 =
2409 ml->acl_delete.field_value[4].mask_range.u16 =
2415 (uint32_t *) mh->match.acl.ipv6.sa;
2417 (uint32_t *) mh->match.acl.ipv6.da;
2418 uint32_t sa32_depth[4], da32_depth[4];
2421 status = match_convert_ipv6_depth(
2422 mh->match.acl.sa_depth,
2427 status = match_convert_ipv6_depth(
2428 mh->match.acl.da_depth,
2433 ml->acl_add.field_value[0].value.u8 =
2434 mh->match.acl.proto;
2435 ml->acl_add.field_value[0].mask_range.u8 =
2436 mh->match.acl.proto_mask;
2438 ml->acl_add.field_value[1].value.u32 =
2439 rte_be_to_cpu_32(sa32[0]);
2440 ml->acl_add.field_value[1].mask_range.u32 =
2442 ml->acl_add.field_value[2].value.u32 =
2443 rte_be_to_cpu_32(sa32[1]);
2444 ml->acl_add.field_value[2].mask_range.u32 =
2446 ml->acl_add.field_value[3].value.u32 =
2447 rte_be_to_cpu_32(sa32[2]);
2448 ml->acl_add.field_value[3].mask_range.u32 =
2450 ml->acl_add.field_value[4].value.u32 =
2451 rte_be_to_cpu_32(sa32[3]);
2452 ml->acl_add.field_value[4].mask_range.u32 =
2455 ml->acl_add.field_value[5].value.u32 =
2456 rte_be_to_cpu_32(da32[0]);
2457 ml->acl_add.field_value[5].mask_range.u32 =
2459 ml->acl_add.field_value[6].value.u32 =
2460 rte_be_to_cpu_32(da32[1]);
2461 ml->acl_add.field_value[6].mask_range.u32 =
2463 ml->acl_add.field_value[7].value.u32 =
2464 rte_be_to_cpu_32(da32[2]);
2465 ml->acl_add.field_value[7].mask_range.u32 =
2467 ml->acl_add.field_value[8].value.u32 =
2468 rte_be_to_cpu_32(da32[3]);
2469 ml->acl_add.field_value[8].mask_range.u32 =
2472 ml->acl_add.field_value[9].value.u16 =
2474 ml->acl_add.field_value[9].mask_range.u16 =
2477 ml->acl_add.field_value[10].value.u16 =
2479 ml->acl_add.field_value[10].mask_range.u16 =
2482 ml->acl_add.priority =
2483 (int32_t) mh->match.acl.priority;
2486 (uint32_t *) mh->match.acl.ipv6.sa;
2488 (uint32_t *) mh->match.acl.ipv6.da;
2489 uint32_t sa32_depth[4], da32_depth[4];
2492 status = match_convert_ipv6_depth(
2493 mh->match.acl.sa_depth,
2498 status = match_convert_ipv6_depth(
2499 mh->match.acl.da_depth,
2504 ml->acl_delete.field_value[0].value.u8 =
2505 mh->match.acl.proto;
2506 ml->acl_delete.field_value[0].mask_range.u8 =
2507 mh->match.acl.proto_mask;
2509 ml->acl_delete.field_value[1].value.u32 =
2510 rte_be_to_cpu_32(sa32[0]);
2511 ml->acl_delete.field_value[1].mask_range.u32 =
2513 ml->acl_delete.field_value[2].value.u32 =
2514 rte_be_to_cpu_32(sa32[1]);
2515 ml->acl_delete.field_value[2].mask_range.u32 =
2517 ml->acl_delete.field_value[3].value.u32 =
2518 rte_be_to_cpu_32(sa32[2]);
2519 ml->acl_delete.field_value[3].mask_range.u32 =
2521 ml->acl_delete.field_value[4].value.u32 =
2522 rte_be_to_cpu_32(sa32[3]);
2523 ml->acl_delete.field_value[4].mask_range.u32 =
2526 ml->acl_delete.field_value[5].value.u32 =
2527 rte_be_to_cpu_32(da32[0]);
2528 ml->acl_delete.field_value[5].mask_range.u32 =
2530 ml->acl_delete.field_value[6].value.u32 =
2531 rte_be_to_cpu_32(da32[1]);
2532 ml->acl_delete.field_value[6].mask_range.u32 =
2534 ml->acl_delete.field_value[7].value.u32 =
2535 rte_be_to_cpu_32(da32[2]);
2536 ml->acl_delete.field_value[7].mask_range.u32 =
2538 ml->acl_delete.field_value[8].value.u32 =
2539 rte_be_to_cpu_32(da32[3]);
2540 ml->acl_delete.field_value[8].mask_range.u32 =
2543 ml->acl_delete.field_value[9].value.u16 =
2545 ml->acl_delete.field_value[9].mask_range.u16 =
2548 ml->acl_delete.field_value[10].value.u16 =
2550 ml->acl_delete.field_value[10].mask_range.u16 =
2556 ml->array.pos = mh->match.array.pos;
2560 memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash));
2564 if (mh->match.lpm.ip_version) {
2565 ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
2566 ml->lpm_ipv4.depth = mh->match.lpm.depth;
2568 memcpy(ml->lpm_ipv6.ip,
2569 mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
2570 ml->lpm_ipv6.depth = mh->match.lpm.depth;
2581 action_convert(struct rte_table_action *a,
2582 struct table_rule_action *action,
2583 struct rte_pipeline_table_entry *data)
2588 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
2589 status = rte_table_action_apply(a,
2591 RTE_TABLE_ACTION_FWD,
2598 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2599 status = rte_table_action_apply(a,
2601 RTE_TABLE_ACTION_LB,
2608 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2609 status = rte_table_action_apply(a,
2611 RTE_TABLE_ACTION_MTR,
2618 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2619 status = rte_table_action_apply(a,
2621 RTE_TABLE_ACTION_TM,
2628 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2629 status = rte_table_action_apply(a,
2631 RTE_TABLE_ACTION_ENCAP,
2638 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2639 status = rte_table_action_apply(a,
2641 RTE_TABLE_ACTION_NAT,
2648 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2649 status = rte_table_action_apply(a,
2651 RTE_TABLE_ACTION_TTL,
2658 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2659 status = rte_table_action_apply(a,
2661 RTE_TABLE_ACTION_STATS,
2668 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2669 status = rte_table_action_apply(a,
2671 RTE_TABLE_ACTION_TIME,
2678 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
2679 status = rte_table_action_apply(a,
2681 RTE_TABLE_ACTION_SYM_CRYPTO,
2682 &action->sym_crypto);
2688 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
2689 status = rte_table_action_apply(a,
2691 RTE_TABLE_ACTION_TAG,
2698 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
2699 status = rte_table_action_apply(a,
2701 RTE_TABLE_ACTION_DECAP,
2711 static struct pipeline_msg_rsp *
2712 pipeline_msg_handle_table_rule_add(struct pipeline_data *p,
2713 struct pipeline_msg_req *req)
2715 union table_rule_match_low_level match_ll;
2716 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2717 struct table_rule_match *match = &req->table_rule_add.match;
2718 struct table_rule_action *action = &req->table_rule_add.action;
2719 struct rte_pipeline_table_entry *data_in, *data_out;
2720 uint32_t table_id = req->id;
2721 int key_found, status;
2722 struct rte_table_action *a = p->table_data[table_id].a;
2725 memset(p->buffer, 0, sizeof(p->buffer));
2726 data_in = (struct rte_pipeline_table_entry *) p->buffer;
2728 status = match_convert(match, &match_ll, 1);
2734 status = action_convert(a, action, data_in);
2740 status = rte_pipeline_table_entry_add(p->p,
2751 /* Write response */
2753 rsp->table_rule_add.data = data_out;
2758 static struct pipeline_msg_rsp *
2759 pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p,
2760 struct pipeline_msg_req *req)
2762 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2763 struct table_rule_action *action = &req->table_rule_add_default.action;
2764 struct rte_pipeline_table_entry *data_in, *data_out;
2765 uint32_t table_id = req->id;
2769 memset(p->buffer, 0, sizeof(p->buffer));
2770 data_in = (struct rte_pipeline_table_entry *) p->buffer;
2772 data_in->action = action->fwd.action;
2773 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
2774 data_in->port_id = action->fwd.id;
2775 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
2776 data_in->table_id = action->fwd.id;
2778 /* Add default rule to table */
2779 status = rte_pipeline_table_default_entry_add(p->p,
2788 /* Write response */
2790 rsp->table_rule_add_default.data = data_out;
2795 static struct pipeline_msg_rsp *
2796 pipeline_msg_handle_table_rule_add_bulk(struct pipeline_data *p,
2797 struct pipeline_msg_req *req)
2799 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2801 uint32_t table_id = req->id;
2802 struct table_rule_list *list = req->table_rule_add_bulk.list;
2803 uint32_t bulk = req->table_rule_add_bulk.bulk;
2805 uint32_t n_rules_added;
2808 struct table_ll table_ll = {
2810 .table_id = table_id,
2811 .a = p->table_data[table_id].a,
2812 .bulk_supported = bulk,
2815 status = table_rule_add_bulk_ll(&table_ll, list, &n_rules_added);
2818 rsp->table_rule_add_bulk.n_rules = 0;
2822 /* Write response */
2824 rsp->table_rule_add_bulk.n_rules = n_rules_added;
2828 static struct pipeline_msg_rsp *
2829 pipeline_msg_handle_table_rule_delete(struct pipeline_data *p,
2830 struct pipeline_msg_req *req)
2832 union table_rule_match_low_level match_ll;
2833 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2834 struct table_rule_match *match = &req->table_rule_delete.match;
2835 uint32_t table_id = req->id;
2836 int key_found, status;
2838 status = match_convert(match, &match_ll, 0);
2844 rsp->status = rte_pipeline_table_entry_delete(p->p,
2853 static struct pipeline_msg_rsp *
2854 pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p,
2855 struct pipeline_msg_req *req)
2857 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2858 uint32_t table_id = req->id;
2860 rsp->status = rte_pipeline_table_default_entry_delete(p->p,
2867 static struct pipeline_msg_rsp *
2868 pipeline_msg_handle_table_rule_stats_read(struct pipeline_data *p,
2869 struct pipeline_msg_req *req)
2871 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2872 uint32_t table_id = req->id;
2873 void *data = req->table_rule_stats_read.data;
2874 int clear = req->table_rule_stats_read.clear;
2875 struct rte_table_action *a = p->table_data[table_id].a;
2877 rsp->status = rte_table_action_stats_read(a,
2879 &rsp->table_rule_stats_read.stats,
2885 static struct pipeline_msg_rsp *
2886 pipeline_msg_handle_table_mtr_profile_add(struct pipeline_data *p,
2887 struct pipeline_msg_req *req)
2889 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2890 uint32_t table_id = req->id;
2891 uint32_t meter_profile_id = req->table_mtr_profile_add.meter_profile_id;
2892 struct rte_table_action_meter_profile *profile =
2893 &req->table_mtr_profile_add.profile;
2894 struct rte_table_action *a = p->table_data[table_id].a;
2896 rsp->status = rte_table_action_meter_profile_add(a,
2903 static struct pipeline_msg_rsp *
2904 pipeline_msg_handle_table_mtr_profile_delete(struct pipeline_data *p,
2905 struct pipeline_msg_req *req)
2907 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2908 uint32_t table_id = req->id;
2909 uint32_t meter_profile_id =
2910 req->table_mtr_profile_delete.meter_profile_id;
2911 struct rte_table_action *a = p->table_data[table_id].a;
2913 rsp->status = rte_table_action_meter_profile_delete(a,
2919 static struct pipeline_msg_rsp *
2920 pipeline_msg_handle_table_rule_mtr_read(struct pipeline_data *p,
2921 struct pipeline_msg_req *req)
2923 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2924 uint32_t table_id = req->id;
2925 void *data = req->table_rule_mtr_read.data;
2926 uint32_t tc_mask = req->table_rule_mtr_read.tc_mask;
2927 int clear = req->table_rule_mtr_read.clear;
2928 struct rte_table_action *a = p->table_data[table_id].a;
2930 rsp->status = rte_table_action_meter_read(a,
2933 &rsp->table_rule_mtr_read.stats,
2939 static struct pipeline_msg_rsp *
2940 pipeline_msg_handle_table_dscp_table_update(struct pipeline_data *p,
2941 struct pipeline_msg_req *req)
2943 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2944 uint32_t table_id = req->id;
2945 uint64_t dscp_mask = req->table_dscp_table_update.dscp_mask;
2946 struct rte_table_action_dscp_table *dscp_table =
2947 &req->table_dscp_table_update.dscp_table;
2948 struct rte_table_action *a = p->table_data[table_id].a;
2950 rsp->status = rte_table_action_dscp_table_update(a,
2957 static struct pipeline_msg_rsp *
2958 pipeline_msg_handle_table_rule_ttl_read(struct pipeline_data *p,
2959 struct pipeline_msg_req *req)
2961 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2962 uint32_t table_id = req->id;
2963 void *data = req->table_rule_ttl_read.data;
2964 int clear = req->table_rule_ttl_read.clear;
2965 struct rte_table_action *a = p->table_data[table_id].a;
2967 rsp->status = rte_table_action_ttl_read(a,
2969 &rsp->table_rule_ttl_read.stats,
2975 static struct pipeline_msg_rsp *
2976 pipeline_msg_handle_table_rule_time_read(struct pipeline_data *p,
2977 struct pipeline_msg_req *req)
2979 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2980 uint32_t table_id = req->id;
2981 void *data = req->table_rule_time_read.data;
2982 struct rte_table_action *a = p->table_data[table_id].a;
2984 rsp->status = rte_table_action_time_read(a,
2986 &rsp->table_rule_time_read.timestamp);
2992 pipeline_msg_handle(struct pipeline_data *p)
2995 struct pipeline_msg_req *req;
2996 struct pipeline_msg_rsp *rsp;
2998 req = pipeline_msg_recv(p->msgq_req);
3002 switch (req->type) {
3003 case PIPELINE_REQ_PORT_IN_STATS_READ:
3004 rsp = pipeline_msg_handle_port_in_stats_read(p, req);
3007 case PIPELINE_REQ_PORT_IN_ENABLE:
3008 rsp = pipeline_msg_handle_port_in_enable(p, req);
3011 case PIPELINE_REQ_PORT_IN_DISABLE:
3012 rsp = pipeline_msg_handle_port_in_disable(p, req);
3015 case PIPELINE_REQ_PORT_OUT_STATS_READ:
3016 rsp = pipeline_msg_handle_port_out_stats_read(p, req);
3019 case PIPELINE_REQ_TABLE_STATS_READ:
3020 rsp = pipeline_msg_handle_table_stats_read(p, req);
3023 case PIPELINE_REQ_TABLE_RULE_ADD:
3024 rsp = pipeline_msg_handle_table_rule_add(p, req);
3027 case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT:
3028 rsp = pipeline_msg_handle_table_rule_add_default(p, req);
3031 case PIPELINE_REQ_TABLE_RULE_ADD_BULK:
3032 rsp = pipeline_msg_handle_table_rule_add_bulk(p, req);
3035 case PIPELINE_REQ_TABLE_RULE_DELETE:
3036 rsp = pipeline_msg_handle_table_rule_delete(p, req);
3039 case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT:
3040 rsp = pipeline_msg_handle_table_rule_delete_default(p, req);
3043 case PIPELINE_REQ_TABLE_RULE_STATS_READ:
3044 rsp = pipeline_msg_handle_table_rule_stats_read(p, req);
3047 case PIPELINE_REQ_TABLE_MTR_PROFILE_ADD:
3048 rsp = pipeline_msg_handle_table_mtr_profile_add(p, req);
3051 case PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE:
3052 rsp = pipeline_msg_handle_table_mtr_profile_delete(p, req);
3055 case PIPELINE_REQ_TABLE_RULE_MTR_READ:
3056 rsp = pipeline_msg_handle_table_rule_mtr_read(p, req);
3059 case PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE:
3060 rsp = pipeline_msg_handle_table_dscp_table_update(p, req);
3063 case PIPELINE_REQ_TABLE_RULE_TTL_READ:
3064 rsp = pipeline_msg_handle_table_rule_ttl_read(p, req);
3067 case PIPELINE_REQ_TABLE_RULE_TIME_READ:
3068 rsp = pipeline_msg_handle_table_rule_time_read(p, req);
3072 rsp = (struct pipeline_msg_rsp *) req;
3076 pipeline_msg_send(p->msgq_rsp, rsp);
3081 * Data plane threads: main
3084 thread_main(void *arg __rte_unused)
3086 struct thread_data *t;
3087 uint32_t thread_id, i;
3089 thread_id = rte_lcore_id();
3090 t = &thread_data[thread_id];
3093 for (i = 0; ; i++) {
3097 for (j = 0; j < t->n_pipelines; j++)
3098 rte_pipeline_run(t->p[j]);
3101 if ((i & 0xF) == 0) {
3102 uint64_t time = rte_get_tsc_cycles();
3103 uint64_t time_next_min = UINT64_MAX;
3105 if (time < t->time_next_min)
3108 /* Pipeline message queues */
3109 for (j = 0; j < t->n_pipelines; j++) {
3110 struct pipeline_data *p =
3111 &t->pipeline_data[j];
3112 uint64_t time_next = p->time_next;
3114 if (time_next <= time) {
3115 pipeline_msg_handle(p);
3116 rte_pipeline_flush(p->p);
3117 time_next = time + p->timer_period;
3118 p->time_next = time_next;
3121 if (time_next < time_next_min)
3122 time_next_min = time_next;
3125 /* Thread message queues */
3127 uint64_t time_next = t->time_next;
3129 if (time_next <= time) {
3130 thread_msg_handle(t);
3131 time_next = time + t->timer_period;
3132 t->time_next = time_next;
3135 if (time_next < time_next_min)
3136 time_next_min = time_next;
3139 t->time_next_min = time_next_min;