1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_cycles.h>
12 #include <rte_table_acl.h>
13 #include <rte_table_array.h>
14 #include <rte_table_hash.h>
15 #include <rte_table_lpm.h>
16 #include <rte_table_lpm_ipv6.h>
22 #ifndef THREAD_PIPELINES_MAX
23 #define THREAD_PIPELINES_MAX 256
26 #ifndef THREAD_MSGQ_SIZE
27 #define THREAD_MSGQ_SIZE 64
30 #ifndef THREAD_TIMER_PERIOD_MS
31 #define THREAD_TIMER_PERIOD_MS 100
35 * Main thread: data plane thread context
38 struct rte_ring *msgq_req;
39 struct rte_ring *msgq_rsp;
44 static struct thread thread[RTE_MAX_LCORE];
47 * Data plane threads: context
50 struct rte_table_action *a;
53 struct pipeline_data {
54 struct rte_pipeline *p;
55 struct table_data table_data[RTE_PIPELINE_TABLE_MAX];
58 struct rte_ring *msgq_req;
59 struct rte_ring *msgq_rsp;
60 uint64_t timer_period; /* Measured in CPU cycles. */
63 uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
67 struct rte_pipeline *p[THREAD_PIPELINES_MAX];
70 struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
71 struct rte_ring *msgq_req;
72 struct rte_ring *msgq_rsp;
73 uint64_t timer_period; /* Measured in CPU cycles. */
75 uint64_t time_next_min;
76 } __rte_cache_aligned;
78 static struct thread_data thread_data[RTE_MAX_LCORE];
81 * Main thread: data plane thread init
88 for (i = 0; i < RTE_MAX_LCORE; i++) {
89 struct thread *t = &thread[i];
91 if (!rte_lcore_is_enabled(i))
96 rte_ring_free(t->msgq_req);
99 rte_ring_free(t->msgq_rsp);
108 RTE_LCORE_FOREACH_WORKER(i) {
110 struct rte_ring *msgq_req, *msgq_rsp;
111 struct thread *t = &thread[i];
112 struct thread_data *t_data = &thread_data[i];
113 uint32_t cpu_id = rte_lcore_to_socket_id(i);
116 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-REQ", i);
118 msgq_req = rte_ring_create(name,
121 RING_F_SP_ENQ | RING_F_SC_DEQ);
123 if (msgq_req == NULL) {
128 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-RSP", i);
130 msgq_rsp = rte_ring_create(name,
133 RING_F_SP_ENQ | RING_F_SC_DEQ);
135 if (msgq_rsp == NULL) {
140 /* Main thread records */
141 t->msgq_req = msgq_req;
142 t->msgq_rsp = msgq_rsp;
145 /* Data plane thread records */
146 t_data->n_pipelines = 0;
147 t_data->msgq_req = msgq_req;
148 t_data->msgq_rsp = msgq_rsp;
149 t_data->timer_period =
150 (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
151 t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
152 t_data->time_next_min = t_data->time_next;
159 thread_is_running(uint32_t thread_id)
161 enum rte_lcore_state_t thread_state;
163 thread_state = rte_eal_get_lcore_state(thread_id);
164 return (thread_state == RUNNING) ? 1 : 0;
168 * Pipeline is running when:
169 * (A) Pipeline is mapped to a data plane thread AND
170 * (B) Its data plane thread is in RUNNING state.
173 pipeline_is_running(struct pipeline *p)
178 return thread_is_running(p->thread_id);
182 * Main thread & data plane threads: message passing
184 enum thread_req_type {
185 THREAD_REQ_PIPELINE_ENABLE = 0,
186 THREAD_REQ_PIPELINE_DISABLE,
190 struct thread_msg_req {
191 enum thread_req_type type;
195 struct rte_pipeline *p;
197 struct rte_table_action *a;
198 } table[RTE_PIPELINE_TABLE_MAX];
199 struct rte_ring *msgq_req;
200 struct rte_ring *msgq_rsp;
201 uint32_t timer_period_ms;
206 struct rte_pipeline *p;
211 struct thread_msg_rsp {
218 static struct thread_msg_req *
219 thread_msg_alloc(void)
221 size_t size = RTE_MAX(sizeof(struct thread_msg_req),
222 sizeof(struct thread_msg_rsp));
224 return calloc(1, size);
228 thread_msg_free(struct thread_msg_rsp *rsp)
233 static struct thread_msg_rsp *
234 thread_msg_send_recv(uint32_t thread_id,
235 struct thread_msg_req *req)
237 struct thread *t = &thread[thread_id];
238 struct rte_ring *msgq_req = t->msgq_req;
239 struct rte_ring *msgq_rsp = t->msgq_rsp;
240 struct thread_msg_rsp *rsp;
245 status = rte_ring_sp_enqueue(msgq_req, req);
246 } while (status == -ENOBUFS);
250 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
251 } while (status != 0);
257 thread_pipeline_enable(uint32_t thread_id,
258 const char *pipeline_name)
260 struct pipeline *p = pipeline_find(pipeline_name);
262 struct thread_msg_req *req;
263 struct thread_msg_rsp *rsp;
267 /* Check input params */
268 if ((thread_id >= RTE_MAX_LCORE) ||
270 (p->n_ports_in == 0) ||
271 (p->n_ports_out == 0) ||
275 t = &thread[thread_id];
276 if ((t->enabled == 0) ||
280 if (!thread_is_running(thread_id)) {
281 struct thread_data *td = &thread_data[thread_id];
282 struct pipeline_data *tdp = &td->pipeline_data[td->n_pipelines];
284 if (td->n_pipelines >= THREAD_PIPELINES_MAX)
287 /* Data plane thread */
288 td->p[td->n_pipelines] = p->p;
291 for (i = 0; i < p->n_tables; i++)
292 tdp->table_data[i].a = p->table[i].a;
294 tdp->n_tables = p->n_tables;
296 tdp->msgq_req = p->msgq_req;
297 tdp->msgq_rsp = p->msgq_rsp;
298 tdp->timer_period = (rte_get_tsc_hz() * p->timer_period_ms) / 1000;
299 tdp->time_next = rte_get_tsc_cycles() + tdp->timer_period;
304 p->thread_id = thread_id;
310 /* Allocate request */
311 req = thread_msg_alloc();
316 req->type = THREAD_REQ_PIPELINE_ENABLE;
317 req->pipeline_enable.p = p->p;
318 for (i = 0; i < p->n_tables; i++)
319 req->pipeline_enable.table[i].a =
321 req->pipeline_enable.msgq_req = p->msgq_req;
322 req->pipeline_enable.msgq_rsp = p->msgq_rsp;
323 req->pipeline_enable.timer_period_ms = p->timer_period_ms;
324 req->pipeline_enable.n_tables = p->n_tables;
326 /* Send request and wait for response */
327 rsp = thread_msg_send_recv(thread_id, req);
330 status = rsp->status;
333 thread_msg_free(rsp);
335 /* Request completion */
339 p->thread_id = thread_id;
346 thread_pipeline_disable(uint32_t thread_id,
347 const char *pipeline_name)
349 struct pipeline *p = pipeline_find(pipeline_name);
351 struct thread_msg_req *req;
352 struct thread_msg_rsp *rsp;
355 /* Check input params */
356 if ((thread_id >= RTE_MAX_LCORE) ||
360 t = &thread[thread_id];
367 if (p->thread_id != thread_id)
370 if (!thread_is_running(thread_id)) {
371 struct thread_data *td = &thread_data[thread_id];
374 for (i = 0; i < td->n_pipelines; i++) {
375 struct pipeline_data *tdp = &td->pipeline_data[i];
380 /* Data plane thread */
381 if (i < td->n_pipelines - 1) {
382 struct rte_pipeline *pipeline_last =
383 td->p[td->n_pipelines - 1];
384 struct pipeline_data *tdp_last =
385 &td->pipeline_data[td->n_pipelines - 1];
387 td->p[i] = pipeline_last;
388 memcpy(tdp, tdp_last, sizeof(*tdp));
402 /* Allocate request */
403 req = thread_msg_alloc();
408 req->type = THREAD_REQ_PIPELINE_DISABLE;
409 req->pipeline_disable.p = p->p;
411 /* Send request and wait for response */
412 rsp = thread_msg_send_recv(thread_id, req);
415 status = rsp->status;
418 thread_msg_free(rsp);
420 /* Request completion */
430 * Data plane threads: message handling
432 static inline struct thread_msg_req *
433 thread_msg_recv(struct rte_ring *msgq_req)
435 struct thread_msg_req *req;
437 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
446 thread_msg_send(struct rte_ring *msgq_rsp,
447 struct thread_msg_rsp *rsp)
452 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
453 } while (status == -ENOBUFS);
456 static struct thread_msg_rsp *
457 thread_msg_handle_pipeline_enable(struct thread_data *t,
458 struct thread_msg_req *req)
460 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
461 struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
465 if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
470 t->p[t->n_pipelines] = req->pipeline_enable.p;
472 p->p = req->pipeline_enable.p;
473 for (i = 0; i < req->pipeline_enable.n_tables; i++)
475 req->pipeline_enable.table[i].a;
477 p->n_tables = req->pipeline_enable.n_tables;
479 p->msgq_req = req->pipeline_enable.msgq_req;
480 p->msgq_rsp = req->pipeline_enable.msgq_rsp;
482 (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
483 p->time_next = rte_get_tsc_cycles() + p->timer_period;
492 static struct thread_msg_rsp *
493 thread_msg_handle_pipeline_disable(struct thread_data *t,
494 struct thread_msg_req *req)
496 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
497 uint32_t n_pipelines = t->n_pipelines;
498 struct rte_pipeline *pipeline = req->pipeline_disable.p;
502 for (i = 0; i < n_pipelines; i++) {
503 struct pipeline_data *p = &t->pipeline_data[i];
505 if (p->p != pipeline)
508 if (i < n_pipelines - 1) {
509 struct rte_pipeline *pipeline_last =
510 t->p[n_pipelines - 1];
511 struct pipeline_data *p_last =
512 &t->pipeline_data[n_pipelines - 1];
514 t->p[i] = pipeline_last;
515 memcpy(p, p_last, sizeof(*p));
524 /* should not get here */
530 thread_msg_handle(struct thread_data *t)
533 struct thread_msg_req *req;
534 struct thread_msg_rsp *rsp;
536 req = thread_msg_recv(t->msgq_req);
541 case THREAD_REQ_PIPELINE_ENABLE:
542 rsp = thread_msg_handle_pipeline_enable(t, req);
545 case THREAD_REQ_PIPELINE_DISABLE:
546 rsp = thread_msg_handle_pipeline_disable(t, req);
550 rsp = (struct thread_msg_rsp *) req;
554 thread_msg_send(t->msgq_rsp, rsp);
559 * Main thread & data plane threads: message passing
561 enum pipeline_req_type {
563 PIPELINE_REQ_PORT_IN_STATS_READ,
564 PIPELINE_REQ_PORT_IN_ENABLE,
565 PIPELINE_REQ_PORT_IN_DISABLE,
568 PIPELINE_REQ_PORT_OUT_STATS_READ,
571 PIPELINE_REQ_TABLE_STATS_READ,
572 PIPELINE_REQ_TABLE_RULE_ADD,
573 PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT,
574 PIPELINE_REQ_TABLE_RULE_ADD_BULK,
575 PIPELINE_REQ_TABLE_RULE_DELETE,
576 PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT,
577 PIPELINE_REQ_TABLE_RULE_STATS_READ,
578 PIPELINE_REQ_TABLE_MTR_PROFILE_ADD,
579 PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE,
580 PIPELINE_REQ_TABLE_RULE_MTR_READ,
581 PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE,
582 PIPELINE_REQ_TABLE_RULE_TTL_READ,
583 PIPELINE_REQ_TABLE_RULE_TIME_READ,
587 struct pipeline_msg_req_port_in_stats_read {
591 struct pipeline_msg_req_port_out_stats_read {
595 struct pipeline_msg_req_table_stats_read {
599 struct pipeline_msg_req_table_rule_add {
600 struct table_rule_match match;
601 struct table_rule_action action;
604 struct pipeline_msg_req_table_rule_add_default {
605 struct table_rule_action action;
608 struct pipeline_msg_req_table_rule_add_bulk {
609 struct table_rule_list *list;
613 struct pipeline_msg_req_table_rule_delete {
614 struct table_rule_match match;
617 struct pipeline_msg_req_table_rule_stats_read {
622 struct pipeline_msg_req_table_mtr_profile_add {
623 uint32_t meter_profile_id;
624 struct rte_table_action_meter_profile profile;
627 struct pipeline_msg_req_table_mtr_profile_delete {
628 uint32_t meter_profile_id;
631 struct pipeline_msg_req_table_rule_mtr_read {
637 struct pipeline_msg_req_table_dscp_table_update {
639 struct rte_table_action_dscp_table dscp_table;
642 struct pipeline_msg_req_table_rule_ttl_read {
647 struct pipeline_msg_req_table_rule_time_read {
651 struct pipeline_msg_req {
652 enum pipeline_req_type type;
653 uint32_t id; /* Port IN, port OUT or table ID */
657 struct pipeline_msg_req_port_in_stats_read port_in_stats_read;
658 struct pipeline_msg_req_port_out_stats_read port_out_stats_read;
659 struct pipeline_msg_req_table_stats_read table_stats_read;
660 struct pipeline_msg_req_table_rule_add table_rule_add;
661 struct pipeline_msg_req_table_rule_add_default table_rule_add_default;
662 struct pipeline_msg_req_table_rule_add_bulk table_rule_add_bulk;
663 struct pipeline_msg_req_table_rule_delete table_rule_delete;
664 struct pipeline_msg_req_table_rule_stats_read table_rule_stats_read;
665 struct pipeline_msg_req_table_mtr_profile_add table_mtr_profile_add;
666 struct pipeline_msg_req_table_mtr_profile_delete table_mtr_profile_delete;
667 struct pipeline_msg_req_table_rule_mtr_read table_rule_mtr_read;
668 struct pipeline_msg_req_table_dscp_table_update table_dscp_table_update;
669 struct pipeline_msg_req_table_rule_ttl_read table_rule_ttl_read;
670 struct pipeline_msg_req_table_rule_time_read table_rule_time_read;
674 struct pipeline_msg_rsp_port_in_stats_read {
675 struct rte_pipeline_port_in_stats stats;
678 struct pipeline_msg_rsp_port_out_stats_read {
679 struct rte_pipeline_port_out_stats stats;
682 struct pipeline_msg_rsp_table_stats_read {
683 struct rte_pipeline_table_stats stats;
686 struct pipeline_msg_rsp_table_rule_add {
690 struct pipeline_msg_rsp_table_rule_add_default {
694 struct pipeline_msg_rsp_table_rule_add_bulk {
698 struct pipeline_msg_rsp_table_rule_stats_read {
699 struct rte_table_action_stats_counters stats;
702 struct pipeline_msg_rsp_table_rule_mtr_read {
703 struct rte_table_action_mtr_counters stats;
706 struct pipeline_msg_rsp_table_rule_ttl_read {
707 struct rte_table_action_ttl_counters stats;
710 struct pipeline_msg_rsp_table_rule_time_read {
714 struct pipeline_msg_rsp {
719 struct pipeline_msg_rsp_port_in_stats_read port_in_stats_read;
720 struct pipeline_msg_rsp_port_out_stats_read port_out_stats_read;
721 struct pipeline_msg_rsp_table_stats_read table_stats_read;
722 struct pipeline_msg_rsp_table_rule_add table_rule_add;
723 struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default;
724 struct pipeline_msg_rsp_table_rule_add_bulk table_rule_add_bulk;
725 struct pipeline_msg_rsp_table_rule_stats_read table_rule_stats_read;
726 struct pipeline_msg_rsp_table_rule_mtr_read table_rule_mtr_read;
727 struct pipeline_msg_rsp_table_rule_ttl_read table_rule_ttl_read;
728 struct pipeline_msg_rsp_table_rule_time_read table_rule_time_read;
735 static struct pipeline_msg_req *
736 pipeline_msg_alloc(void)
738 size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
739 sizeof(struct pipeline_msg_rsp));
741 return calloc(1, size);
745 pipeline_msg_free(struct pipeline_msg_rsp *rsp)
750 static struct pipeline_msg_rsp *
751 pipeline_msg_send_recv(struct pipeline *p,
752 struct pipeline_msg_req *req)
754 struct rte_ring *msgq_req = p->msgq_req;
755 struct rte_ring *msgq_rsp = p->msgq_rsp;
756 struct pipeline_msg_rsp *rsp;
761 status = rte_ring_sp_enqueue(msgq_req, req);
762 } while (status == -ENOBUFS);
766 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
767 } while (status != 0);
773 pipeline_port_in_stats_read(const char *pipeline_name,
775 struct rte_pipeline_port_in_stats *stats,
779 struct pipeline_msg_req *req;
780 struct pipeline_msg_rsp *rsp;
783 /* Check input params */
784 if ((pipeline_name == NULL) ||
788 p = pipeline_find(pipeline_name);
790 (port_id >= p->n_ports_in))
793 if (!pipeline_is_running(p)) {
794 status = rte_pipeline_port_in_stats_read(p->p,
802 /* Allocate request */
803 req = pipeline_msg_alloc();
808 req->type = PIPELINE_REQ_PORT_IN_STATS_READ;
810 req->port_in_stats_read.clear = clear;
812 /* Send request and wait for response */
813 rsp = pipeline_msg_send_recv(p, req);
816 status = rsp->status;
818 memcpy(stats, &rsp->port_in_stats_read.stats, sizeof(*stats));
821 pipeline_msg_free(rsp);
827 pipeline_port_in_enable(const char *pipeline_name,
831 struct pipeline_msg_req *req;
832 struct pipeline_msg_rsp *rsp;
835 /* Check input params */
836 if (pipeline_name == NULL)
839 p = pipeline_find(pipeline_name);
841 (port_id >= p->n_ports_in))
844 if (!pipeline_is_running(p)) {
845 status = rte_pipeline_port_in_enable(p->p, port_id);
849 /* Allocate request */
850 req = pipeline_msg_alloc();
855 req->type = PIPELINE_REQ_PORT_IN_ENABLE;
858 /* Send request and wait for response */
859 rsp = pipeline_msg_send_recv(p, req);
862 status = rsp->status;
865 pipeline_msg_free(rsp);
871 pipeline_port_in_disable(const char *pipeline_name,
875 struct pipeline_msg_req *req;
876 struct pipeline_msg_rsp *rsp;
879 /* Check input params */
880 if (pipeline_name == NULL)
883 p = pipeline_find(pipeline_name);
885 (port_id >= p->n_ports_in))
888 if (!pipeline_is_running(p)) {
889 status = rte_pipeline_port_in_disable(p->p, port_id);
893 /* Allocate request */
894 req = pipeline_msg_alloc();
899 req->type = PIPELINE_REQ_PORT_IN_DISABLE;
902 /* Send request and wait for response */
903 rsp = pipeline_msg_send_recv(p, req);
906 status = rsp->status;
909 pipeline_msg_free(rsp);
915 pipeline_port_out_stats_read(const char *pipeline_name,
917 struct rte_pipeline_port_out_stats *stats,
921 struct pipeline_msg_req *req;
922 struct pipeline_msg_rsp *rsp;
925 /* Check input params */
926 if ((pipeline_name == NULL) ||
930 p = pipeline_find(pipeline_name);
932 (port_id >= p->n_ports_out))
935 if (!pipeline_is_running(p)) {
936 status = rte_pipeline_port_out_stats_read(p->p,
944 /* Allocate request */
945 req = pipeline_msg_alloc();
950 req->type = PIPELINE_REQ_PORT_OUT_STATS_READ;
952 req->port_out_stats_read.clear = clear;
954 /* Send request and wait for response */
955 rsp = pipeline_msg_send_recv(p, req);
958 status = rsp->status;
960 memcpy(stats, &rsp->port_out_stats_read.stats, sizeof(*stats));
963 pipeline_msg_free(rsp);
969 pipeline_table_stats_read(const char *pipeline_name,
971 struct rte_pipeline_table_stats *stats,
975 struct pipeline_msg_req *req;
976 struct pipeline_msg_rsp *rsp;
979 /* Check input params */
980 if ((pipeline_name == NULL) ||
984 p = pipeline_find(pipeline_name);
986 (table_id >= p->n_tables))
989 if (!pipeline_is_running(p)) {
990 status = rte_pipeline_table_stats_read(p->p,
998 /* Allocate request */
999 req = pipeline_msg_alloc();
1004 req->type = PIPELINE_REQ_TABLE_STATS_READ;
1006 req->table_stats_read.clear = clear;
1008 /* Send request and wait for response */
1009 rsp = pipeline_msg_send_recv(p, req);
1012 status = rsp->status;
1014 memcpy(stats, &rsp->table_stats_read.stats, sizeof(*stats));
1017 pipeline_msg_free(rsp);
1023 match_check(struct table_rule_match *match,
1027 struct table *table;
1029 if ((match == NULL) ||
1031 (table_id >= p->n_tables))
1034 table = &p->table[table_id];
1035 if (match->match_type != table->params.match_type)
1038 switch (match->match_type) {
1041 struct table_acl_params *t = &table->params.match.acl;
1042 struct table_rule_match_acl *r = &match->match.acl;
1044 if ((r->ip_version && (t->ip_version == 0)) ||
1045 ((r->ip_version == 0) && t->ip_version))
1048 if (r->ip_version) {
1049 if ((r->sa_depth > 32) ||
1053 if ((r->sa_depth > 128) ||
1054 (r->da_depth > 128))
1068 struct table_lpm_params *t = &table->params.match.lpm;
1069 struct table_rule_match_lpm *r = &match->match.lpm;
1071 if ((r->ip_version && (t->key_size != 4)) ||
1072 ((r->ip_version == 0) && (t->key_size != 16)))
1075 if (r->ip_version) {
1094 action_check(struct table_rule_action *action,
1098 struct table_action_profile *ap;
1100 if ((action == NULL) ||
1102 (table_id >= p->n_tables))
1105 ap = p->table[table_id].ap;
1106 if (action->action_mask != ap->params.action_mask)
1109 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1110 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
1111 (action->fwd.id >= p->n_ports_out))
1114 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
1115 (action->fwd.id >= p->n_tables))
1119 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
1120 uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1;
1121 uint32_t tc_mask1 = action->mtr.tc_mask;
1123 if (tc_mask1 != tc_mask0)
1127 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
1128 uint32_t n_subports_per_port =
1129 ap->params.tm.n_subports_per_port;
1130 uint32_t n_pipes_per_subport =
1131 ap->params.tm.n_pipes_per_subport;
1132 uint32_t subport_id = action->tm.subport_id;
1133 uint32_t pipe_id = action->tm.pipe_id;
1135 if ((subport_id >= n_subports_per_port) ||
1136 (pipe_id >= n_pipes_per_subport))
1140 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
1141 uint64_t encap_mask = ap->params.encap.encap_mask;
1142 enum rte_table_action_encap_type type = action->encap.type;
1144 if ((encap_mask & (1LLU << type)) == 0)
1148 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
1149 int ip_version0 = ap->params.common.ip_version;
1150 int ip_version1 = action->nat.ip_version;
1152 if ((ip_version1 && (ip_version0 == 0)) ||
1153 ((ip_version1 == 0) && ip_version0))
1161 action_default_check(struct table_rule_action *action,
1165 if ((action == NULL) ||
1166 (action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD)) ||
1168 (table_id >= p->n_tables))
1171 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1172 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
1173 (action->fwd.id >= p->n_ports_out))
1176 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
1177 (action->fwd.id >= p->n_tables))
1184 union table_rule_match_low_level {
1185 struct rte_table_acl_rule_add_params acl_add;
1186 struct rte_table_acl_rule_delete_params acl_delete;
1187 struct rte_table_array_key array;
1188 uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX];
1189 struct rte_table_lpm_key lpm_ipv4;
1190 struct rte_table_lpm_ipv6_key lpm_ipv6;
1194 match_convert(struct table_rule_match *mh,
1195 union table_rule_match_low_level *ml,
1199 action_convert(struct rte_table_action *a,
1200 struct table_rule_action *action,
1201 struct rte_pipeline_table_entry *data);
1204 struct rte_pipeline *p;
1206 struct rte_table_action *a;
1211 table_rule_add_bulk_ll(struct table_ll *table,
1212 struct table_rule_list *list,
1215 union table_rule_match_low_level *match_ll = NULL;
1216 uint8_t *action_ll = NULL;
1217 void **match_ll_ptr = NULL;
1218 struct rte_pipeline_table_entry **action_ll_ptr = NULL;
1219 struct rte_pipeline_table_entry **entries_ptr = NULL;
1221 struct table_rule *rule;
1226 TAILQ_FOREACH(rule, list, node)
1229 /* Memory allocation */
1230 match_ll = calloc(n, sizeof(union table_rule_match_low_level));
1231 action_ll = calloc(n, TABLE_RULE_ACTION_SIZE_MAX);
1233 match_ll_ptr = calloc(n, sizeof(void *));
1234 action_ll_ptr = calloc(n, sizeof(struct rte_pipeline_table_entry *));
1236 entries_ptr = calloc(n, sizeof(struct rte_pipeline_table_entry *));
1237 found = calloc(n, sizeof(int));
1239 if (match_ll == NULL ||
1240 action_ll == NULL ||
1241 match_ll_ptr == NULL ||
1242 action_ll_ptr == NULL ||
1243 entries_ptr == NULL ||
1246 goto table_rule_add_bulk_ll_free;
1250 for (i = 0; i < n; i++) {
1251 match_ll_ptr[i] = (void *)&match_ll[i];
1252 action_ll_ptr[i] = (struct rte_pipeline_table_entry *)
1253 &action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
1256 /* Rule (match, action) conversion */
1258 TAILQ_FOREACH(rule, list, node) {
1259 status = match_convert(&rule->match, match_ll_ptr[i], 1);
1261 goto table_rule_add_bulk_ll_free;
1263 status = action_convert(table->a, &rule->action, action_ll_ptr[i]);
1265 goto table_rule_add_bulk_ll_free;
1270 /* Add rule (match, action) to table */
1271 if (table->bulk_supported) {
1272 status = rte_pipeline_table_entry_add_bulk(table->p,
1280 goto table_rule_add_bulk_ll_free;
1282 for (i = 0; i < n; i++) {
1283 status = rte_pipeline_table_entry_add(table->p,
1291 goto table_rule_add_bulk_ll_free;
1300 /* Write back to the rule list. */
1302 TAILQ_FOREACH(rule, list, node) {
1306 rule->data = entries_ptr[i];
1314 table_rule_add_bulk_ll_free:
1317 free(action_ll_ptr);
1326 pipeline_table_rule_add(const char *pipeline_name,
1328 struct table_rule_match *match,
1329 struct table_rule_action *action)
1332 struct table *table;
1333 struct pipeline_msg_req *req;
1334 struct pipeline_msg_rsp *rsp;
1335 struct table_rule *rule;
1338 /* Check input params */
1339 if ((pipeline_name == NULL) ||
1344 p = pipeline_find(pipeline_name);
1346 (table_id >= p->n_tables) ||
1347 match_check(match, p, table_id) ||
1348 action_check(action, p, table_id))
1351 table = &p->table[table_id];
1353 rule = calloc(1, sizeof(struct table_rule));
1357 memcpy(&rule->match, match, sizeof(*match));
1358 memcpy(&rule->action, action, sizeof(*action));
1360 if (!pipeline_is_running(p)) {
1361 union table_rule_match_low_level match_ll;
1362 struct rte_pipeline_table_entry *data_in, *data_out;
1366 buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
1367 if (buffer == NULL) {
1372 /* Table match-action rule conversion */
1373 data_in = (struct rte_pipeline_table_entry *)buffer;
1375 status = match_convert(match, &match_ll, 1);
1382 status = action_convert(table->a, action, data_in);
1389 /* Add rule (match, action) to table */
1390 status = rte_pipeline_table_entry_add(p->p,
1402 /* Write Response */
1403 rule->data = data_out;
1404 table_rule_add(table, rule);
1410 /* Allocate request */
1411 req = pipeline_msg_alloc();
1418 req->type = PIPELINE_REQ_TABLE_RULE_ADD;
1420 memcpy(&req->table_rule_add.match, match, sizeof(*match));
1421 memcpy(&req->table_rule_add.action, action, sizeof(*action));
1423 /* Send request and wait for response */
1424 rsp = pipeline_msg_send_recv(p, req);
1427 status = rsp->status;
1429 rule->data = rsp->table_rule_add.data;
1430 table_rule_add(table, rule);
1435 pipeline_msg_free(rsp);
1441 pipeline_table_rule_add_default(const char *pipeline_name,
1443 struct table_rule_action *action)
1446 struct table *table;
1447 struct pipeline_msg_req *req;
1448 struct pipeline_msg_rsp *rsp;
1449 struct table_rule *rule;
1452 /* Check input params */
1453 if ((pipeline_name == NULL) ||
1457 p = pipeline_find(pipeline_name);
1459 (table_id >= p->n_tables) ||
1460 action_default_check(action, p, table_id))
1463 table = &p->table[table_id];
1465 rule = calloc(1, sizeof(struct table_rule));
1469 memcpy(&rule->action, action, sizeof(*action));
1471 if (!pipeline_is_running(p)) {
1472 struct rte_pipeline_table_entry *data_in, *data_out;
1475 buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
1476 if (buffer == NULL) {
1482 data_in = (struct rte_pipeline_table_entry *)buffer;
1484 data_in->action = action->fwd.action;
1485 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
1486 data_in->port_id = action->fwd.id;
1487 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
1488 data_in->table_id = action->fwd.id;
1490 /* Add default rule to table */
1491 status = rte_pipeline_table_default_entry_add(p->p,
1501 /* Write Response */
1502 rule->data = data_out;
1503 table_rule_default_add(table, rule);
1509 /* Allocate request */
1510 req = pipeline_msg_alloc();
1517 req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT;
1519 memcpy(&req->table_rule_add_default.action, action, sizeof(*action));
1521 /* Send request and wait for response */
1522 rsp = pipeline_msg_send_recv(p, req);
1525 status = rsp->status;
1527 rule->data = rsp->table_rule_add_default.data;
1528 table_rule_default_add(table, rule);
1533 pipeline_msg_free(rsp);
1539 table_rule_list_free(struct table_rule_list *list)
1547 struct table_rule *rule;
1549 rule = TAILQ_FIRST(list);
1553 TAILQ_REMOVE(list, rule, node);
1563 pipeline_table_rule_add_bulk(const char *pipeline_name,
1565 struct table_rule_list *list,
1566 uint32_t *n_rules_added,
1567 uint32_t *n_rules_not_added)
1570 struct table *table;
1571 struct pipeline_msg_req *req;
1572 struct pipeline_msg_rsp *rsp;
1573 struct table_rule *rule;
1576 /* Check input params */
1577 if ((pipeline_name == NULL) ||
1579 TAILQ_EMPTY(list) ||
1580 (n_rules_added == NULL) ||
1581 (n_rules_not_added == NULL)) {
1582 table_rule_list_free(list);
1586 p = pipeline_find(pipeline_name);
1588 (table_id >= p->n_tables)) {
1589 table_rule_list_free(list);
1593 table = &p->table[table_id];
1595 TAILQ_FOREACH(rule, list, node)
1596 if (match_check(&rule->match, p, table_id) ||
1597 action_check(&rule->action, p, table_id)) {
1598 table_rule_list_free(list);
1602 if (!pipeline_is_running(p)) {
1603 struct table_ll table_ll = {
1605 .table_id = table_id,
1607 .bulk_supported = table->params.match_type == TABLE_ACL,
1610 status = table_rule_add_bulk_ll(&table_ll, list, n_rules_added);
1612 table_rule_list_free(list);
1616 table_rule_add_bulk(table, list, *n_rules_added);
1617 *n_rules_not_added = table_rule_list_free(list);
1621 /* Allocate request */
1622 req = pipeline_msg_alloc();
1624 table_rule_list_free(list);
1629 req->type = PIPELINE_REQ_TABLE_RULE_ADD_BULK;
1631 req->table_rule_add_bulk.list = list;
1632 req->table_rule_add_bulk.bulk = table->params.match_type == TABLE_ACL;
1634 /* Send request and wait for response */
1635 rsp = pipeline_msg_send_recv(p, req);
1638 status = rsp->status;
1640 *n_rules_added = rsp->table_rule_add_bulk.n_rules;
1642 table_rule_add_bulk(table, list, *n_rules_added);
1643 *n_rules_not_added = table_rule_list_free(list);
1645 table_rule_list_free(list);
1649 pipeline_msg_free(rsp);
1655 pipeline_table_rule_delete(const char *pipeline_name,
1657 struct table_rule_match *match)
1660 struct table *table;
1661 struct pipeline_msg_req *req;
1662 struct pipeline_msg_rsp *rsp;
1665 /* Check input params */
1666 if ((pipeline_name == NULL) ||
1670 p = pipeline_find(pipeline_name);
1672 (table_id >= p->n_tables) ||
1673 match_check(match, p, table_id))
1676 table = &p->table[table_id];
1678 if (!pipeline_is_running(p)) {
1679 union table_rule_match_low_level match_ll;
1682 status = match_convert(match, &match_ll, 0);
1686 status = rte_pipeline_table_entry_delete(p->p,
1693 table_rule_delete(table, match);
1698 /* Allocate request */
1699 req = pipeline_msg_alloc();
1704 req->type = PIPELINE_REQ_TABLE_RULE_DELETE;
1706 memcpy(&req->table_rule_delete.match, match, sizeof(*match));
1708 /* Send request and wait for response */
1709 rsp = pipeline_msg_send_recv(p, req);
1712 status = rsp->status;
1714 table_rule_delete(table, match);
1717 pipeline_msg_free(rsp);
1723 pipeline_table_rule_delete_default(const char *pipeline_name,
1727 struct table *table;
1728 struct pipeline_msg_req *req;
1729 struct pipeline_msg_rsp *rsp;
1732 /* Check input params */
1733 if (pipeline_name == NULL)
1736 p = pipeline_find(pipeline_name);
1738 (table_id >= p->n_tables))
1741 table = &p->table[table_id];
1743 if (!pipeline_is_running(p)) {
1744 status = rte_pipeline_table_default_entry_delete(p->p,
1749 table_rule_default_delete(table);
1754 /* Allocate request */
1755 req = pipeline_msg_alloc();
1760 req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT;
1763 /* Send request and wait for response */
1764 rsp = pipeline_msg_send_recv(p, req);
1767 status = rsp->status;
1769 table_rule_default_delete(table);
1772 pipeline_msg_free(rsp);
1778 pipeline_table_rule_stats_read(const char *pipeline_name,
1780 struct table_rule_match *match,
1781 struct rte_table_action_stats_counters *stats,
1785 struct table *table;
1786 struct pipeline_msg_req *req;
1787 struct pipeline_msg_rsp *rsp;
1788 struct table_rule *rule;
1791 /* Check input params */
1792 if ((pipeline_name == NULL) ||
1797 p = pipeline_find(pipeline_name);
1799 (table_id >= p->n_tables) ||
1800 match_check(match, p, table_id))
1803 table = &p->table[table_id];
1804 rule = table_rule_find(table, match);
1808 if (!pipeline_is_running(p)) {
1809 status = rte_table_action_stats_read(table->a,
1817 /* Allocate request */
1818 req = pipeline_msg_alloc();
1823 req->type = PIPELINE_REQ_TABLE_RULE_STATS_READ;
1825 req->table_rule_stats_read.data = rule->data;
1826 req->table_rule_stats_read.clear = clear;
1828 /* Send request and wait for response */
1829 rsp = pipeline_msg_send_recv(p, req);
1832 status = rsp->status;
1834 memcpy(stats, &rsp->table_rule_stats_read.stats, sizeof(*stats));
1837 pipeline_msg_free(rsp);
1843 pipeline_table_mtr_profile_add(const char *pipeline_name,
1845 uint32_t meter_profile_id,
1846 struct rte_table_action_meter_profile *profile)
1849 struct pipeline_msg_req *req;
1850 struct pipeline_msg_rsp *rsp;
1853 /* Check input params */
1854 if ((pipeline_name == NULL) ||
1858 p = pipeline_find(pipeline_name);
1860 (table_id >= p->n_tables))
1863 if (!pipeline_is_running(p)) {
1864 struct rte_table_action *a = p->table[table_id].a;
1866 status = rte_table_action_meter_profile_add(a,
1873 /* Allocate request */
1874 req = pipeline_msg_alloc();
1879 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_ADD;
1881 req->table_mtr_profile_add.meter_profile_id = meter_profile_id;
1882 memcpy(&req->table_mtr_profile_add.profile, profile, sizeof(*profile));
1884 /* Send request and wait for response */
1885 rsp = pipeline_msg_send_recv(p, req);
1888 status = rsp->status;
1891 pipeline_msg_free(rsp);
1897 pipeline_table_mtr_profile_delete(const char *pipeline_name,
1899 uint32_t meter_profile_id)
1902 struct pipeline_msg_req *req;
1903 struct pipeline_msg_rsp *rsp;
1906 /* Check input params */
1907 if (pipeline_name == NULL)
1910 p = pipeline_find(pipeline_name);
1912 (table_id >= p->n_tables))
1915 if (!pipeline_is_running(p)) {
1916 struct rte_table_action *a = p->table[table_id].a;
1918 status = rte_table_action_meter_profile_delete(a,
1924 /* Allocate request */
1925 req = pipeline_msg_alloc();
1930 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE;
1932 req->table_mtr_profile_delete.meter_profile_id = meter_profile_id;
1934 /* Send request and wait for response */
1935 rsp = pipeline_msg_send_recv(p, req);
1938 status = rsp->status;
1941 pipeline_msg_free(rsp);
1947 pipeline_table_rule_mtr_read(const char *pipeline_name,
1949 struct table_rule_match *match,
1950 struct rte_table_action_mtr_counters *stats,
1954 struct table *table;
1955 struct pipeline_msg_req *req;
1956 struct pipeline_msg_rsp *rsp;
1957 struct table_rule *rule;
1961 /* Check input params */
1962 if ((pipeline_name == NULL) ||
1967 p = pipeline_find(pipeline_name);
1969 (table_id >= p->n_tables) ||
1970 match_check(match, p, table_id))
1973 table = &p->table[table_id];
1974 tc_mask = (1 << table->ap->params.mtr.n_tc) - 1;
1976 rule = table_rule_find(table, match);
1980 if (!pipeline_is_running(p)) {
1981 status = rte_table_action_meter_read(table->a,
1990 /* Allocate request */
1991 req = pipeline_msg_alloc();
1996 req->type = PIPELINE_REQ_TABLE_RULE_MTR_READ;
1998 req->table_rule_mtr_read.data = rule->data;
1999 req->table_rule_mtr_read.tc_mask = tc_mask;
2000 req->table_rule_mtr_read.clear = clear;
2002 /* Send request and wait for response */
2003 rsp = pipeline_msg_send_recv(p, req);
2006 status = rsp->status;
2008 memcpy(stats, &rsp->table_rule_mtr_read.stats, sizeof(*stats));
2011 pipeline_msg_free(rsp);
2017 pipeline_table_dscp_table_update(const char *pipeline_name,
2020 struct rte_table_action_dscp_table *dscp_table)
2023 struct pipeline_msg_req *req;
2024 struct pipeline_msg_rsp *rsp;
2027 /* Check input params */
2028 if ((pipeline_name == NULL) ||
2029 (dscp_table == NULL))
2032 p = pipeline_find(pipeline_name);
2034 (table_id >= p->n_tables))
2037 if (!pipeline_is_running(p)) {
2038 struct rte_table_action *a = p->table[table_id].a;
2040 status = rte_table_action_dscp_table_update(a,
2047 /* Allocate request */
2048 req = pipeline_msg_alloc();
2053 req->type = PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE;
2055 req->table_dscp_table_update.dscp_mask = dscp_mask;
2056 memcpy(&req->table_dscp_table_update.dscp_table,
2057 dscp_table, sizeof(*dscp_table));
2059 /* Send request and wait for response */
2060 rsp = pipeline_msg_send_recv(p, req);
2063 status = rsp->status;
2066 pipeline_msg_free(rsp);
2072 pipeline_table_rule_ttl_read(const char *pipeline_name,
2074 struct table_rule_match *match,
2075 struct rte_table_action_ttl_counters *stats,
2079 struct table *table;
2080 struct pipeline_msg_req *req;
2081 struct pipeline_msg_rsp *rsp;
2082 struct table_rule *rule;
2085 /* Check input params */
2086 if ((pipeline_name == NULL) ||
2091 p = pipeline_find(pipeline_name);
2093 (table_id >= p->n_tables) ||
2094 match_check(match, p, table_id))
2097 table = &p->table[table_id];
2098 if (!table->ap->params.ttl.n_packets_enabled)
2101 rule = table_rule_find(table, match);
2105 if (!pipeline_is_running(p)) {
2106 status = rte_table_action_ttl_read(table->a,
2114 /* Allocate request */
2115 req = pipeline_msg_alloc();
2120 req->type = PIPELINE_REQ_TABLE_RULE_TTL_READ;
2122 req->table_rule_ttl_read.data = rule->data;
2123 req->table_rule_ttl_read.clear = clear;
2125 /* Send request and wait for response */
2126 rsp = pipeline_msg_send_recv(p, req);
2129 status = rsp->status;
2131 memcpy(stats, &rsp->table_rule_ttl_read.stats, sizeof(*stats));
2134 pipeline_msg_free(rsp);
2140 pipeline_table_rule_time_read(const char *pipeline_name,
2142 struct table_rule_match *match,
2143 uint64_t *timestamp)
2146 struct table *table;
2147 struct pipeline_msg_req *req;
2148 struct pipeline_msg_rsp *rsp;
2149 struct table_rule *rule;
2152 /* Check input params */
2153 if ((pipeline_name == NULL) ||
2155 (timestamp == NULL))
2158 p = pipeline_find(pipeline_name);
2160 (table_id >= p->n_tables) ||
2161 match_check(match, p, table_id))
2164 table = &p->table[table_id];
2166 rule = table_rule_find(table, match);
2170 if (!pipeline_is_running(p)) {
2171 status = rte_table_action_time_read(table->a,
2178 /* Allocate request */
2179 req = pipeline_msg_alloc();
2184 req->type = PIPELINE_REQ_TABLE_RULE_TIME_READ;
2186 req->table_rule_time_read.data = rule->data;
2188 /* Send request and wait for response */
2189 rsp = pipeline_msg_send_recv(p, req);
2192 status = rsp->status;
2194 *timestamp = rsp->table_rule_time_read.timestamp;
2197 pipeline_msg_free(rsp);
2203 * Data plane threads: message handling
2205 static inline struct pipeline_msg_req *
2206 pipeline_msg_recv(struct rte_ring *msgq_req)
2208 struct pipeline_msg_req *req;
2210 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
2219 pipeline_msg_send(struct rte_ring *msgq_rsp,
2220 struct pipeline_msg_rsp *rsp)
2225 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
2226 } while (status == -ENOBUFS);
2229 static struct pipeline_msg_rsp *
2230 pipeline_msg_handle_port_in_stats_read(struct pipeline_data *p,
2231 struct pipeline_msg_req *req)
2233 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2234 uint32_t port_id = req->id;
2235 int clear = req->port_in_stats_read.clear;
2237 rsp->status = rte_pipeline_port_in_stats_read(p->p,
2239 &rsp->port_in_stats_read.stats,
2245 static struct pipeline_msg_rsp *
2246 pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
2247 struct pipeline_msg_req *req)
2249 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2250 uint32_t port_id = req->id;
2252 rsp->status = rte_pipeline_port_in_enable(p->p,
2258 static struct pipeline_msg_rsp *
2259 pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
2260 struct pipeline_msg_req *req)
2262 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2263 uint32_t port_id = req->id;
2265 rsp->status = rte_pipeline_port_in_disable(p->p,
2271 static struct pipeline_msg_rsp *
2272 pipeline_msg_handle_port_out_stats_read(struct pipeline_data *p,
2273 struct pipeline_msg_req *req)
2275 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2276 uint32_t port_id = req->id;
2277 int clear = req->port_out_stats_read.clear;
2279 rsp->status = rte_pipeline_port_out_stats_read(p->p,
2281 &rsp->port_out_stats_read.stats,
2287 static struct pipeline_msg_rsp *
2288 pipeline_msg_handle_table_stats_read(struct pipeline_data *p,
2289 struct pipeline_msg_req *req)
2291 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2292 uint32_t port_id = req->id;
2293 int clear = req->table_stats_read.clear;
2295 rsp->status = rte_pipeline_table_stats_read(p->p,
2297 &rsp->table_stats_read.stats,
2304 match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32)
2309 switch (depth / 32) {
2319 depth32[1] = depth - 32;
2327 depth32[2] = depth - 64;
2335 depth32[3] = depth - 96;
2351 match_convert(struct table_rule_match *mh,
2352 union table_rule_match_low_level *ml,
2355 memset(ml, 0, sizeof(*ml));
2357 switch (mh->match_type) {
2359 if (mh->match.acl.ip_version)
2361 ml->acl_add.field_value[0].value.u8 =
2362 mh->match.acl.proto;
2363 ml->acl_add.field_value[0].mask_range.u8 =
2364 mh->match.acl.proto_mask;
2366 ml->acl_add.field_value[1].value.u32 =
2367 mh->match.acl.ipv4.sa;
2368 ml->acl_add.field_value[1].mask_range.u32 =
2369 mh->match.acl.sa_depth;
2371 ml->acl_add.field_value[2].value.u32 =
2372 mh->match.acl.ipv4.da;
2373 ml->acl_add.field_value[2].mask_range.u32 =
2374 mh->match.acl.da_depth;
2376 ml->acl_add.field_value[3].value.u16 =
2378 ml->acl_add.field_value[3].mask_range.u16 =
2381 ml->acl_add.field_value[4].value.u16 =
2383 ml->acl_add.field_value[4].mask_range.u16 =
2386 ml->acl_add.priority =
2387 (int32_t) mh->match.acl.priority;
2389 ml->acl_delete.field_value[0].value.u8 =
2390 mh->match.acl.proto;
2391 ml->acl_delete.field_value[0].mask_range.u8 =
2392 mh->match.acl.proto_mask;
2394 ml->acl_delete.field_value[1].value.u32 =
2395 mh->match.acl.ipv4.sa;
2396 ml->acl_delete.field_value[1].mask_range.u32 =
2397 mh->match.acl.sa_depth;
2399 ml->acl_delete.field_value[2].value.u32 =
2400 mh->match.acl.ipv4.da;
2401 ml->acl_delete.field_value[2].mask_range.u32 =
2402 mh->match.acl.da_depth;
2404 ml->acl_delete.field_value[3].value.u16 =
2406 ml->acl_delete.field_value[3].mask_range.u16 =
2409 ml->acl_delete.field_value[4].value.u16 =
2411 ml->acl_delete.field_value[4].mask_range.u16 =
2417 (uint32_t *) mh->match.acl.ipv6.sa;
2419 (uint32_t *) mh->match.acl.ipv6.da;
2420 uint32_t sa32_depth[4], da32_depth[4];
2423 status = match_convert_ipv6_depth(
2424 mh->match.acl.sa_depth,
2429 status = match_convert_ipv6_depth(
2430 mh->match.acl.da_depth,
2435 ml->acl_add.field_value[0].value.u8 =
2436 mh->match.acl.proto;
2437 ml->acl_add.field_value[0].mask_range.u8 =
2438 mh->match.acl.proto_mask;
2440 ml->acl_add.field_value[1].value.u32 =
2441 rte_be_to_cpu_32(sa32[0]);
2442 ml->acl_add.field_value[1].mask_range.u32 =
2444 ml->acl_add.field_value[2].value.u32 =
2445 rte_be_to_cpu_32(sa32[1]);
2446 ml->acl_add.field_value[2].mask_range.u32 =
2448 ml->acl_add.field_value[3].value.u32 =
2449 rte_be_to_cpu_32(sa32[2]);
2450 ml->acl_add.field_value[3].mask_range.u32 =
2452 ml->acl_add.field_value[4].value.u32 =
2453 rte_be_to_cpu_32(sa32[3]);
2454 ml->acl_add.field_value[4].mask_range.u32 =
2457 ml->acl_add.field_value[5].value.u32 =
2458 rte_be_to_cpu_32(da32[0]);
2459 ml->acl_add.field_value[5].mask_range.u32 =
2461 ml->acl_add.field_value[6].value.u32 =
2462 rte_be_to_cpu_32(da32[1]);
2463 ml->acl_add.field_value[6].mask_range.u32 =
2465 ml->acl_add.field_value[7].value.u32 =
2466 rte_be_to_cpu_32(da32[2]);
2467 ml->acl_add.field_value[7].mask_range.u32 =
2469 ml->acl_add.field_value[8].value.u32 =
2470 rte_be_to_cpu_32(da32[3]);
2471 ml->acl_add.field_value[8].mask_range.u32 =
2474 ml->acl_add.field_value[9].value.u16 =
2476 ml->acl_add.field_value[9].mask_range.u16 =
2479 ml->acl_add.field_value[10].value.u16 =
2481 ml->acl_add.field_value[10].mask_range.u16 =
2484 ml->acl_add.priority =
2485 (int32_t) mh->match.acl.priority;
2488 (uint32_t *) mh->match.acl.ipv6.sa;
2490 (uint32_t *) mh->match.acl.ipv6.da;
2491 uint32_t sa32_depth[4], da32_depth[4];
2494 status = match_convert_ipv6_depth(
2495 mh->match.acl.sa_depth,
2500 status = match_convert_ipv6_depth(
2501 mh->match.acl.da_depth,
2506 ml->acl_delete.field_value[0].value.u8 =
2507 mh->match.acl.proto;
2508 ml->acl_delete.field_value[0].mask_range.u8 =
2509 mh->match.acl.proto_mask;
2511 ml->acl_delete.field_value[1].value.u32 =
2512 rte_be_to_cpu_32(sa32[0]);
2513 ml->acl_delete.field_value[1].mask_range.u32 =
2515 ml->acl_delete.field_value[2].value.u32 =
2516 rte_be_to_cpu_32(sa32[1]);
2517 ml->acl_delete.field_value[2].mask_range.u32 =
2519 ml->acl_delete.field_value[3].value.u32 =
2520 rte_be_to_cpu_32(sa32[2]);
2521 ml->acl_delete.field_value[3].mask_range.u32 =
2523 ml->acl_delete.field_value[4].value.u32 =
2524 rte_be_to_cpu_32(sa32[3]);
2525 ml->acl_delete.field_value[4].mask_range.u32 =
2528 ml->acl_delete.field_value[5].value.u32 =
2529 rte_be_to_cpu_32(da32[0]);
2530 ml->acl_delete.field_value[5].mask_range.u32 =
2532 ml->acl_delete.field_value[6].value.u32 =
2533 rte_be_to_cpu_32(da32[1]);
2534 ml->acl_delete.field_value[6].mask_range.u32 =
2536 ml->acl_delete.field_value[7].value.u32 =
2537 rte_be_to_cpu_32(da32[2]);
2538 ml->acl_delete.field_value[7].mask_range.u32 =
2540 ml->acl_delete.field_value[8].value.u32 =
2541 rte_be_to_cpu_32(da32[3]);
2542 ml->acl_delete.field_value[8].mask_range.u32 =
2545 ml->acl_delete.field_value[9].value.u16 =
2547 ml->acl_delete.field_value[9].mask_range.u16 =
2550 ml->acl_delete.field_value[10].value.u16 =
2552 ml->acl_delete.field_value[10].mask_range.u16 =
2558 ml->array.pos = mh->match.array.pos;
2562 memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash));
2566 if (mh->match.lpm.ip_version) {
2567 ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
2568 ml->lpm_ipv4.depth = mh->match.lpm.depth;
2570 memcpy(ml->lpm_ipv6.ip,
2571 mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
2572 ml->lpm_ipv6.depth = mh->match.lpm.depth;
2583 action_convert(struct rte_table_action *a,
2584 struct table_rule_action *action,
2585 struct rte_pipeline_table_entry *data)
2590 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
2591 status = rte_table_action_apply(a,
2593 RTE_TABLE_ACTION_FWD,
2600 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2601 status = rte_table_action_apply(a,
2603 RTE_TABLE_ACTION_LB,
2610 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2611 status = rte_table_action_apply(a,
2613 RTE_TABLE_ACTION_MTR,
2620 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2621 status = rte_table_action_apply(a,
2623 RTE_TABLE_ACTION_TM,
2630 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2631 status = rte_table_action_apply(a,
2633 RTE_TABLE_ACTION_ENCAP,
2640 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2641 status = rte_table_action_apply(a,
2643 RTE_TABLE_ACTION_NAT,
2650 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2651 status = rte_table_action_apply(a,
2653 RTE_TABLE_ACTION_TTL,
2660 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2661 status = rte_table_action_apply(a,
2663 RTE_TABLE_ACTION_STATS,
2670 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2671 status = rte_table_action_apply(a,
2673 RTE_TABLE_ACTION_TIME,
2680 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
2681 status = rte_table_action_apply(a,
2683 RTE_TABLE_ACTION_SYM_CRYPTO,
2684 &action->sym_crypto);
2690 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
2691 status = rte_table_action_apply(a,
2693 RTE_TABLE_ACTION_TAG,
2700 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
2701 status = rte_table_action_apply(a,
2703 RTE_TABLE_ACTION_DECAP,
2713 static struct pipeline_msg_rsp *
2714 pipeline_msg_handle_table_rule_add(struct pipeline_data *p,
2715 struct pipeline_msg_req *req)
2717 union table_rule_match_low_level match_ll;
2718 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2719 struct table_rule_match *match = &req->table_rule_add.match;
2720 struct table_rule_action *action = &req->table_rule_add.action;
2721 struct rte_pipeline_table_entry *data_in, *data_out;
2722 uint32_t table_id = req->id;
2723 int key_found, status;
2724 struct rte_table_action *a = p->table_data[table_id].a;
2727 memset(p->buffer, 0, sizeof(p->buffer));
2728 data_in = (struct rte_pipeline_table_entry *) p->buffer;
2730 status = match_convert(match, &match_ll, 1);
2736 status = action_convert(a, action, data_in);
2742 status = rte_pipeline_table_entry_add(p->p,
2753 /* Write response */
2755 rsp->table_rule_add.data = data_out;
2760 static struct pipeline_msg_rsp *
2761 pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p,
2762 struct pipeline_msg_req *req)
2764 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2765 struct table_rule_action *action = &req->table_rule_add_default.action;
2766 struct rte_pipeline_table_entry *data_in, *data_out;
2767 uint32_t table_id = req->id;
2771 memset(p->buffer, 0, sizeof(p->buffer));
2772 data_in = (struct rte_pipeline_table_entry *) p->buffer;
2774 data_in->action = action->fwd.action;
2775 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
2776 data_in->port_id = action->fwd.id;
2777 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
2778 data_in->table_id = action->fwd.id;
2780 /* Add default rule to table */
2781 status = rte_pipeline_table_default_entry_add(p->p,
2790 /* Write response */
2792 rsp->table_rule_add_default.data = data_out;
2797 static struct pipeline_msg_rsp *
2798 pipeline_msg_handle_table_rule_add_bulk(struct pipeline_data *p,
2799 struct pipeline_msg_req *req)
2801 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2803 uint32_t table_id = req->id;
2804 struct table_rule_list *list = req->table_rule_add_bulk.list;
2805 uint32_t bulk = req->table_rule_add_bulk.bulk;
2807 uint32_t n_rules_added;
2810 struct table_ll table_ll = {
2812 .table_id = table_id,
2813 .a = p->table_data[table_id].a,
2814 .bulk_supported = bulk,
2817 status = table_rule_add_bulk_ll(&table_ll, list, &n_rules_added);
2820 rsp->table_rule_add_bulk.n_rules = 0;
2824 /* Write response */
2826 rsp->table_rule_add_bulk.n_rules = n_rules_added;
2830 static struct pipeline_msg_rsp *
2831 pipeline_msg_handle_table_rule_delete(struct pipeline_data *p,
2832 struct pipeline_msg_req *req)
2834 union table_rule_match_low_level match_ll;
2835 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2836 struct table_rule_match *match = &req->table_rule_delete.match;
2837 uint32_t table_id = req->id;
2838 int key_found, status;
2840 status = match_convert(match, &match_ll, 0);
2846 rsp->status = rte_pipeline_table_entry_delete(p->p,
2855 static struct pipeline_msg_rsp *
2856 pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p,
2857 struct pipeline_msg_req *req)
2859 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2860 uint32_t table_id = req->id;
2862 rsp->status = rte_pipeline_table_default_entry_delete(p->p,
2869 static struct pipeline_msg_rsp *
2870 pipeline_msg_handle_table_rule_stats_read(struct pipeline_data *p,
2871 struct pipeline_msg_req *req)
2873 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2874 uint32_t table_id = req->id;
2875 void *data = req->table_rule_stats_read.data;
2876 int clear = req->table_rule_stats_read.clear;
2877 struct rte_table_action *a = p->table_data[table_id].a;
2879 rsp->status = rte_table_action_stats_read(a,
2881 &rsp->table_rule_stats_read.stats,
2887 static struct pipeline_msg_rsp *
2888 pipeline_msg_handle_table_mtr_profile_add(struct pipeline_data *p,
2889 struct pipeline_msg_req *req)
2891 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2892 uint32_t table_id = req->id;
2893 uint32_t meter_profile_id = req->table_mtr_profile_add.meter_profile_id;
2894 struct rte_table_action_meter_profile *profile =
2895 &req->table_mtr_profile_add.profile;
2896 struct rte_table_action *a = p->table_data[table_id].a;
2898 rsp->status = rte_table_action_meter_profile_add(a,
2905 static struct pipeline_msg_rsp *
2906 pipeline_msg_handle_table_mtr_profile_delete(struct pipeline_data *p,
2907 struct pipeline_msg_req *req)
2909 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2910 uint32_t table_id = req->id;
2911 uint32_t meter_profile_id =
2912 req->table_mtr_profile_delete.meter_profile_id;
2913 struct rte_table_action *a = p->table_data[table_id].a;
2915 rsp->status = rte_table_action_meter_profile_delete(a,
2921 static struct pipeline_msg_rsp *
2922 pipeline_msg_handle_table_rule_mtr_read(struct pipeline_data *p,
2923 struct pipeline_msg_req *req)
2925 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2926 uint32_t table_id = req->id;
2927 void *data = req->table_rule_mtr_read.data;
2928 uint32_t tc_mask = req->table_rule_mtr_read.tc_mask;
2929 int clear = req->table_rule_mtr_read.clear;
2930 struct rte_table_action *a = p->table_data[table_id].a;
2932 rsp->status = rte_table_action_meter_read(a,
2935 &rsp->table_rule_mtr_read.stats,
2941 static struct pipeline_msg_rsp *
2942 pipeline_msg_handle_table_dscp_table_update(struct pipeline_data *p,
2943 struct pipeline_msg_req *req)
2945 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2946 uint32_t table_id = req->id;
2947 uint64_t dscp_mask = req->table_dscp_table_update.dscp_mask;
2948 struct rte_table_action_dscp_table *dscp_table =
2949 &req->table_dscp_table_update.dscp_table;
2950 struct rte_table_action *a = p->table_data[table_id].a;
2952 rsp->status = rte_table_action_dscp_table_update(a,
2959 static struct pipeline_msg_rsp *
2960 pipeline_msg_handle_table_rule_ttl_read(struct pipeline_data *p,
2961 struct pipeline_msg_req *req)
2963 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2964 uint32_t table_id = req->id;
2965 void *data = req->table_rule_ttl_read.data;
2966 int clear = req->table_rule_ttl_read.clear;
2967 struct rte_table_action *a = p->table_data[table_id].a;
2969 rsp->status = rte_table_action_ttl_read(a,
2971 &rsp->table_rule_ttl_read.stats,
2977 static struct pipeline_msg_rsp *
2978 pipeline_msg_handle_table_rule_time_read(struct pipeline_data *p,
2979 struct pipeline_msg_req *req)
2981 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2982 uint32_t table_id = req->id;
2983 void *data = req->table_rule_time_read.data;
2984 struct rte_table_action *a = p->table_data[table_id].a;
2986 rsp->status = rte_table_action_time_read(a,
2988 &rsp->table_rule_time_read.timestamp);
2994 pipeline_msg_handle(struct pipeline_data *p)
2997 struct pipeline_msg_req *req;
2998 struct pipeline_msg_rsp *rsp;
3000 req = pipeline_msg_recv(p->msgq_req);
3004 switch (req->type) {
3005 case PIPELINE_REQ_PORT_IN_STATS_READ:
3006 rsp = pipeline_msg_handle_port_in_stats_read(p, req);
3009 case PIPELINE_REQ_PORT_IN_ENABLE:
3010 rsp = pipeline_msg_handle_port_in_enable(p, req);
3013 case PIPELINE_REQ_PORT_IN_DISABLE:
3014 rsp = pipeline_msg_handle_port_in_disable(p, req);
3017 case PIPELINE_REQ_PORT_OUT_STATS_READ:
3018 rsp = pipeline_msg_handle_port_out_stats_read(p, req);
3021 case PIPELINE_REQ_TABLE_STATS_READ:
3022 rsp = pipeline_msg_handle_table_stats_read(p, req);
3025 case PIPELINE_REQ_TABLE_RULE_ADD:
3026 rsp = pipeline_msg_handle_table_rule_add(p, req);
3029 case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT:
3030 rsp = pipeline_msg_handle_table_rule_add_default(p, req);
3033 case PIPELINE_REQ_TABLE_RULE_ADD_BULK:
3034 rsp = pipeline_msg_handle_table_rule_add_bulk(p, req);
3037 case PIPELINE_REQ_TABLE_RULE_DELETE:
3038 rsp = pipeline_msg_handle_table_rule_delete(p, req);
3041 case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT:
3042 rsp = pipeline_msg_handle_table_rule_delete_default(p, req);
3045 case PIPELINE_REQ_TABLE_RULE_STATS_READ:
3046 rsp = pipeline_msg_handle_table_rule_stats_read(p, req);
3049 case PIPELINE_REQ_TABLE_MTR_PROFILE_ADD:
3050 rsp = pipeline_msg_handle_table_mtr_profile_add(p, req);
3053 case PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE:
3054 rsp = pipeline_msg_handle_table_mtr_profile_delete(p, req);
3057 case PIPELINE_REQ_TABLE_RULE_MTR_READ:
3058 rsp = pipeline_msg_handle_table_rule_mtr_read(p, req);
3061 case PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE:
3062 rsp = pipeline_msg_handle_table_dscp_table_update(p, req);
3065 case PIPELINE_REQ_TABLE_RULE_TTL_READ:
3066 rsp = pipeline_msg_handle_table_rule_ttl_read(p, req);
3069 case PIPELINE_REQ_TABLE_RULE_TIME_READ:
3070 rsp = pipeline_msg_handle_table_rule_time_read(p, req);
3074 rsp = (struct pipeline_msg_rsp *) req;
3078 pipeline_msg_send(p->msgq_rsp, rsp);
3083 * Data plane threads: main
3086 thread_main(void *arg __rte_unused)
3088 struct thread_data *t;
3089 uint32_t thread_id, i;
3091 thread_id = rte_lcore_id();
3092 t = &thread_data[thread_id];
3095 for (i = 0; ; i++) {
3099 for (j = 0; j < t->n_pipelines; j++)
3100 rte_pipeline_run(t->p[j]);
3103 if ((i & 0xF) == 0) {
3104 uint64_t time = rte_get_tsc_cycles();
3105 uint64_t time_next_min = UINT64_MAX;
3107 if (time < t->time_next_min)
3110 /* Pipeline message queues */
3111 for (j = 0; j < t->n_pipelines; j++) {
3112 struct pipeline_data *p =
3113 &t->pipeline_data[j];
3114 uint64_t time_next = p->time_next;
3116 if (time_next <= time) {
3117 pipeline_msg_handle(p);
3118 rte_pipeline_flush(p->p);
3119 time_next = time + p->timer_period;
3120 p->time_next = time_next;
3123 if (time_next < time_next_min)
3124 time_next_min = time_next;
3127 /* Thread message queues */
3129 uint64_t time_next = t->time_next;
3131 if (time_next <= time) {
3132 thread_msg_handle(t);
3133 time_next = time + t->timer_period;
3134 t->time_next = time_next;
3137 if (time_next < time_next_min)
3138 time_next_min = time_next;
3141 t->time_next_min = time_next_min;