1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_cycles.h>
12 #include <rte_table_acl.h>
13 #include <rte_table_array.h>
14 #include <rte_table_hash.h>
15 #include <rte_table_lpm.h>
16 #include <rte_table_lpm_ipv6.h>
22 #ifndef THREAD_PIPELINES_MAX
23 #define THREAD_PIPELINES_MAX 256
26 #ifndef THREAD_MSGQ_SIZE
27 #define THREAD_MSGQ_SIZE 64
30 #ifndef THREAD_TIMER_PERIOD_MS
31 #define THREAD_TIMER_PERIOD_MS 100
35 * Master thead: data plane thread context
38 struct rte_ring *msgq_req;
39 struct rte_ring *msgq_rsp;
44 static struct thread thread[RTE_MAX_LCORE];
47 * Data plane threads: context
50 struct rte_table_action *a;
53 struct pipeline_data {
54 struct rte_pipeline *p;
55 struct table_data table_data[RTE_PIPELINE_TABLE_MAX];
58 struct rte_ring *msgq_req;
59 struct rte_ring *msgq_rsp;
60 uint64_t timer_period; /* Measured in CPU cycles. */
63 uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
67 struct rte_pipeline *p[THREAD_PIPELINES_MAX];
70 struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
71 struct rte_ring *msgq_req;
72 struct rte_ring *msgq_rsp;
73 uint64_t timer_period; /* Measured in CPU cycles. */
75 uint64_t time_next_min;
76 } __rte_cache_aligned;
78 static struct thread_data thread_data[RTE_MAX_LCORE];
81 * Master thread: data plane thread init
88 for (i = 0; i < RTE_MAX_LCORE; i++) {
89 struct thread *t = &thread[i];
91 if (!rte_lcore_is_enabled(i))
96 rte_ring_free(t->msgq_req);
99 rte_ring_free(t->msgq_rsp);
108 RTE_LCORE_FOREACH_SLAVE(i) {
110 struct rte_ring *msgq_req, *msgq_rsp;
111 struct thread *t = &thread[i];
112 struct thread_data *t_data = &thread_data[i];
113 uint32_t cpu_id = rte_lcore_to_socket_id(i);
116 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-REQ", i);
118 msgq_req = rte_ring_create(name,
121 RING_F_SP_ENQ | RING_F_SC_DEQ);
123 if (msgq_req == NULL) {
128 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-RSP", i);
130 msgq_rsp = rte_ring_create(name,
133 RING_F_SP_ENQ | RING_F_SC_DEQ);
135 if (msgq_rsp == NULL) {
140 /* Master thread records */
141 t->msgq_req = msgq_req;
142 t->msgq_rsp = msgq_rsp;
145 /* Data plane thread records */
146 t_data->n_pipelines = 0;
147 t_data->msgq_req = msgq_req;
148 t_data->msgq_rsp = msgq_rsp;
149 t_data->timer_period =
150 (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
151 t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
152 t_data->time_next_min = t_data->time_next;
159 thread_is_running(uint32_t thread_id)
161 enum rte_lcore_state_t thread_state;
163 thread_state = rte_eal_get_lcore_state(thread_id);
164 return (thread_state == RUNNING) ? 1 : 0;
168 * Pipeline is running when:
169 * (A) Pipeline is mapped to a data plane thread AND
170 * (B) Its data plane thread is in RUNNING state.
173 pipeline_is_running(struct pipeline *p)
178 return thread_is_running(p->thread_id);
182 * Master thread & data plane threads: message passing
184 enum thread_req_type {
185 THREAD_REQ_PIPELINE_ENABLE = 0,
186 THREAD_REQ_PIPELINE_DISABLE,
190 struct thread_msg_req {
191 enum thread_req_type type;
195 struct rte_pipeline *p;
197 struct rte_table_action *a;
198 } table[RTE_PIPELINE_TABLE_MAX];
199 struct rte_ring *msgq_req;
200 struct rte_ring *msgq_rsp;
201 uint32_t timer_period_ms;
206 struct rte_pipeline *p;
211 struct thread_msg_rsp {
218 static struct thread_msg_req *
219 thread_msg_alloc(void)
221 size_t size = RTE_MAX(sizeof(struct thread_msg_req),
222 sizeof(struct thread_msg_rsp));
224 return calloc(1, size);
228 thread_msg_free(struct thread_msg_rsp *rsp)
233 static struct thread_msg_rsp *
234 thread_msg_send_recv(uint32_t thread_id,
235 struct thread_msg_req *req)
237 struct thread *t = &thread[thread_id];
238 struct rte_ring *msgq_req = t->msgq_req;
239 struct rte_ring *msgq_rsp = t->msgq_rsp;
240 struct thread_msg_rsp *rsp;
245 status = rte_ring_sp_enqueue(msgq_req, req);
246 } while (status == -ENOBUFS);
250 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
251 } while (status != 0);
257 thread_pipeline_enable(uint32_t thread_id,
258 const char *pipeline_name)
260 struct pipeline *p = pipeline_find(pipeline_name);
262 struct thread_msg_req *req;
263 struct thread_msg_rsp *rsp;
267 /* Check input params */
268 if ((thread_id >= RTE_MAX_LCORE) ||
270 (p->n_ports_in == 0) ||
271 (p->n_ports_out == 0) ||
275 t = &thread[thread_id];
276 if ((t->enabled == 0) ||
280 if (!thread_is_running(thread_id)) {
281 struct thread_data *td = &thread_data[thread_id];
282 struct pipeline_data *tdp = &td->pipeline_data[td->n_pipelines];
284 if (td->n_pipelines >= THREAD_PIPELINES_MAX)
287 /* Data plane thread */
288 td->p[td->n_pipelines] = p->p;
291 for (i = 0; i < p->n_tables; i++)
292 tdp->table_data[i].a = p->table[i].a;
294 tdp->n_tables = p->n_tables;
296 tdp->msgq_req = p->msgq_req;
297 tdp->msgq_rsp = p->msgq_rsp;
298 tdp->timer_period = (rte_get_tsc_hz() * p->timer_period_ms) / 1000;
299 tdp->time_next = rte_get_tsc_cycles() + tdp->timer_period;
304 p->thread_id = thread_id;
310 /* Allocate request */
311 req = thread_msg_alloc();
316 req->type = THREAD_REQ_PIPELINE_ENABLE;
317 req->pipeline_enable.p = p->p;
318 for (i = 0; i < p->n_tables; i++)
319 req->pipeline_enable.table[i].a =
321 req->pipeline_enable.msgq_req = p->msgq_req;
322 req->pipeline_enable.msgq_rsp = p->msgq_rsp;
323 req->pipeline_enable.timer_period_ms = p->timer_period_ms;
324 req->pipeline_enable.n_tables = p->n_tables;
326 /* Send request and wait for response */
327 rsp = thread_msg_send_recv(thread_id, req);
332 status = rsp->status;
335 thread_msg_free(rsp);
337 /* Request completion */
341 p->thread_id = thread_id;
348 thread_pipeline_disable(uint32_t thread_id,
349 const char *pipeline_name)
351 struct pipeline *p = pipeline_find(pipeline_name);
353 struct thread_msg_req *req;
354 struct thread_msg_rsp *rsp;
357 /* Check input params */
358 if ((thread_id >= RTE_MAX_LCORE) ||
362 t = &thread[thread_id];
369 if (p->thread_id != thread_id)
372 if (!thread_is_running(thread_id)) {
373 struct thread_data *td = &thread_data[thread_id];
376 for (i = 0; i < td->n_pipelines; i++) {
377 struct pipeline_data *tdp = &td->pipeline_data[i];
382 /* Data plane thread */
383 if (i < td->n_pipelines - 1) {
384 struct rte_pipeline *pipeline_last =
385 td->p[td->n_pipelines - 1];
386 struct pipeline_data *tdp_last =
387 &td->pipeline_data[td->n_pipelines - 1];
389 td->p[i] = pipeline_last;
390 memcpy(tdp, tdp_last, sizeof(*tdp));
404 /* Allocate request */
405 req = thread_msg_alloc();
410 req->type = THREAD_REQ_PIPELINE_DISABLE;
411 req->pipeline_disable.p = p->p;
413 /* Send request and wait for response */
414 rsp = thread_msg_send_recv(thread_id, req);
419 status = rsp->status;
422 thread_msg_free(rsp);
424 /* Request completion */
434 * Data plane threads: message handling
436 static inline struct thread_msg_req *
437 thread_msg_recv(struct rte_ring *msgq_req)
439 struct thread_msg_req *req;
441 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
450 thread_msg_send(struct rte_ring *msgq_rsp,
451 struct thread_msg_rsp *rsp)
456 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
457 } while (status == -ENOBUFS);
460 static struct thread_msg_rsp *
461 thread_msg_handle_pipeline_enable(struct thread_data *t,
462 struct thread_msg_req *req)
464 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
465 struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
469 if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
474 t->p[t->n_pipelines] = req->pipeline_enable.p;
476 p->p = req->pipeline_enable.p;
477 for (i = 0; i < req->pipeline_enable.n_tables; i++)
479 req->pipeline_enable.table[i].a;
481 p->n_tables = req->pipeline_enable.n_tables;
483 p->msgq_req = req->pipeline_enable.msgq_req;
484 p->msgq_rsp = req->pipeline_enable.msgq_rsp;
486 (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
487 p->time_next = rte_get_tsc_cycles() + p->timer_period;
496 static struct thread_msg_rsp *
497 thread_msg_handle_pipeline_disable(struct thread_data *t,
498 struct thread_msg_req *req)
500 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
501 uint32_t n_pipelines = t->n_pipelines;
502 struct rte_pipeline *pipeline = req->pipeline_disable.p;
506 for (i = 0; i < n_pipelines; i++) {
507 struct pipeline_data *p = &t->pipeline_data[i];
509 if (p->p != pipeline)
512 if (i < n_pipelines - 1) {
513 struct rte_pipeline *pipeline_last =
514 t->p[n_pipelines - 1];
515 struct pipeline_data *p_last =
516 &t->pipeline_data[n_pipelines - 1];
518 t->p[i] = pipeline_last;
519 memcpy(p, p_last, sizeof(*p));
528 /* should not get here */
534 thread_msg_handle(struct thread_data *t)
537 struct thread_msg_req *req;
538 struct thread_msg_rsp *rsp;
540 req = thread_msg_recv(t->msgq_req);
545 case THREAD_REQ_PIPELINE_ENABLE:
546 rsp = thread_msg_handle_pipeline_enable(t, req);
549 case THREAD_REQ_PIPELINE_DISABLE:
550 rsp = thread_msg_handle_pipeline_disable(t, req);
554 rsp = (struct thread_msg_rsp *) req;
558 thread_msg_send(t->msgq_rsp, rsp);
563 * Master thread & data plane threads: message passing
565 enum pipeline_req_type {
567 PIPELINE_REQ_PORT_IN_STATS_READ,
568 PIPELINE_REQ_PORT_IN_ENABLE,
569 PIPELINE_REQ_PORT_IN_DISABLE,
572 PIPELINE_REQ_PORT_OUT_STATS_READ,
575 PIPELINE_REQ_TABLE_STATS_READ,
576 PIPELINE_REQ_TABLE_RULE_ADD,
577 PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT,
578 PIPELINE_REQ_TABLE_RULE_ADD_BULK,
579 PIPELINE_REQ_TABLE_RULE_DELETE,
580 PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT,
581 PIPELINE_REQ_TABLE_RULE_STATS_READ,
582 PIPELINE_REQ_TABLE_MTR_PROFILE_ADD,
583 PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE,
584 PIPELINE_REQ_TABLE_RULE_MTR_READ,
585 PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE,
586 PIPELINE_REQ_TABLE_RULE_TTL_READ,
587 PIPELINE_REQ_TABLE_RULE_TIME_READ,
591 struct pipeline_msg_req_port_in_stats_read {
595 struct pipeline_msg_req_port_out_stats_read {
599 struct pipeline_msg_req_table_stats_read {
603 struct pipeline_msg_req_table_rule_add {
604 struct table_rule_match match;
605 struct table_rule_action action;
608 struct pipeline_msg_req_table_rule_add_default {
609 struct table_rule_action action;
612 struct pipeline_msg_req_table_rule_add_bulk {
613 struct table_rule_list *list;
617 struct pipeline_msg_req_table_rule_delete {
618 struct table_rule_match match;
621 struct pipeline_msg_req_table_rule_stats_read {
626 struct pipeline_msg_req_table_mtr_profile_add {
627 uint32_t meter_profile_id;
628 struct rte_table_action_meter_profile profile;
631 struct pipeline_msg_req_table_mtr_profile_delete {
632 uint32_t meter_profile_id;
635 struct pipeline_msg_req_table_rule_mtr_read {
641 struct pipeline_msg_req_table_dscp_table_update {
643 struct rte_table_action_dscp_table dscp_table;
646 struct pipeline_msg_req_table_rule_ttl_read {
651 struct pipeline_msg_req_table_rule_time_read {
655 struct pipeline_msg_req {
656 enum pipeline_req_type type;
657 uint32_t id; /* Port IN, port OUT or table ID */
661 struct pipeline_msg_req_port_in_stats_read port_in_stats_read;
662 struct pipeline_msg_req_port_out_stats_read port_out_stats_read;
663 struct pipeline_msg_req_table_stats_read table_stats_read;
664 struct pipeline_msg_req_table_rule_add table_rule_add;
665 struct pipeline_msg_req_table_rule_add_default table_rule_add_default;
666 struct pipeline_msg_req_table_rule_add_bulk table_rule_add_bulk;
667 struct pipeline_msg_req_table_rule_delete table_rule_delete;
668 struct pipeline_msg_req_table_rule_stats_read table_rule_stats_read;
669 struct pipeline_msg_req_table_mtr_profile_add table_mtr_profile_add;
670 struct pipeline_msg_req_table_mtr_profile_delete table_mtr_profile_delete;
671 struct pipeline_msg_req_table_rule_mtr_read table_rule_mtr_read;
672 struct pipeline_msg_req_table_dscp_table_update table_dscp_table_update;
673 struct pipeline_msg_req_table_rule_ttl_read table_rule_ttl_read;
674 struct pipeline_msg_req_table_rule_time_read table_rule_time_read;
678 struct pipeline_msg_rsp_port_in_stats_read {
679 struct rte_pipeline_port_in_stats stats;
682 struct pipeline_msg_rsp_port_out_stats_read {
683 struct rte_pipeline_port_out_stats stats;
686 struct pipeline_msg_rsp_table_stats_read {
687 struct rte_pipeline_table_stats stats;
690 struct pipeline_msg_rsp_table_rule_add {
694 struct pipeline_msg_rsp_table_rule_add_default {
698 struct pipeline_msg_rsp_table_rule_add_bulk {
702 struct pipeline_msg_rsp_table_rule_stats_read {
703 struct rte_table_action_stats_counters stats;
706 struct pipeline_msg_rsp_table_rule_mtr_read {
707 struct rte_table_action_mtr_counters stats;
710 struct pipeline_msg_rsp_table_rule_ttl_read {
711 struct rte_table_action_ttl_counters stats;
714 struct pipeline_msg_rsp_table_rule_time_read {
718 struct pipeline_msg_rsp {
723 struct pipeline_msg_rsp_port_in_stats_read port_in_stats_read;
724 struct pipeline_msg_rsp_port_out_stats_read port_out_stats_read;
725 struct pipeline_msg_rsp_table_stats_read table_stats_read;
726 struct pipeline_msg_rsp_table_rule_add table_rule_add;
727 struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default;
728 struct pipeline_msg_rsp_table_rule_add_bulk table_rule_add_bulk;
729 struct pipeline_msg_rsp_table_rule_stats_read table_rule_stats_read;
730 struct pipeline_msg_rsp_table_rule_mtr_read table_rule_mtr_read;
731 struct pipeline_msg_rsp_table_rule_ttl_read table_rule_ttl_read;
732 struct pipeline_msg_rsp_table_rule_time_read table_rule_time_read;
739 static struct pipeline_msg_req *
740 pipeline_msg_alloc(void)
742 size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
743 sizeof(struct pipeline_msg_rsp));
745 return calloc(1, size);
749 pipeline_msg_free(struct pipeline_msg_rsp *rsp)
754 static struct pipeline_msg_rsp *
755 pipeline_msg_send_recv(struct pipeline *p,
756 struct pipeline_msg_req *req)
758 struct rte_ring *msgq_req = p->msgq_req;
759 struct rte_ring *msgq_rsp = p->msgq_rsp;
760 struct pipeline_msg_rsp *rsp;
765 status = rte_ring_sp_enqueue(msgq_req, req);
766 } while (status == -ENOBUFS);
770 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
771 } while (status != 0);
777 pipeline_port_in_stats_read(const char *pipeline_name,
779 struct rte_pipeline_port_in_stats *stats,
783 struct pipeline_msg_req *req;
784 struct pipeline_msg_rsp *rsp;
787 /* Check input params */
788 if ((pipeline_name == NULL) ||
792 p = pipeline_find(pipeline_name);
794 (port_id >= p->n_ports_in))
797 if (!pipeline_is_running(p)) {
798 status = rte_pipeline_port_in_stats_read(p->p,
806 /* Allocate request */
807 req = pipeline_msg_alloc();
812 req->type = PIPELINE_REQ_PORT_IN_STATS_READ;
814 req->port_in_stats_read.clear = clear;
816 /* Send request and wait for response */
817 rsp = pipeline_msg_send_recv(p, req);
822 status = rsp->status;
824 memcpy(stats, &rsp->port_in_stats_read.stats, sizeof(*stats));
827 pipeline_msg_free(rsp);
833 pipeline_port_in_enable(const char *pipeline_name,
837 struct pipeline_msg_req *req;
838 struct pipeline_msg_rsp *rsp;
841 /* Check input params */
842 if (pipeline_name == NULL)
845 p = pipeline_find(pipeline_name);
847 (port_id >= p->n_ports_in))
850 if (!pipeline_is_running(p)) {
851 status = rte_pipeline_port_in_enable(p->p, port_id);
855 /* Allocate request */
856 req = pipeline_msg_alloc();
861 req->type = PIPELINE_REQ_PORT_IN_ENABLE;
864 /* Send request and wait for response */
865 rsp = pipeline_msg_send_recv(p, req);
870 status = rsp->status;
873 pipeline_msg_free(rsp);
879 pipeline_port_in_disable(const char *pipeline_name,
883 struct pipeline_msg_req *req;
884 struct pipeline_msg_rsp *rsp;
887 /* Check input params */
888 if (pipeline_name == NULL)
891 p = pipeline_find(pipeline_name);
893 (port_id >= p->n_ports_in))
896 if (!pipeline_is_running(p)) {
897 status = rte_pipeline_port_in_disable(p->p, port_id);
901 /* Allocate request */
902 req = pipeline_msg_alloc();
907 req->type = PIPELINE_REQ_PORT_IN_DISABLE;
910 /* Send request and wait for response */
911 rsp = pipeline_msg_send_recv(p, req);
916 status = rsp->status;
919 pipeline_msg_free(rsp);
925 pipeline_port_out_stats_read(const char *pipeline_name,
927 struct rte_pipeline_port_out_stats *stats,
931 struct pipeline_msg_req *req;
932 struct pipeline_msg_rsp *rsp;
935 /* Check input params */
936 if ((pipeline_name == NULL) ||
940 p = pipeline_find(pipeline_name);
942 (port_id >= p->n_ports_out))
945 if (!pipeline_is_running(p)) {
946 status = rte_pipeline_port_out_stats_read(p->p,
954 /* Allocate request */
955 req = pipeline_msg_alloc();
960 req->type = PIPELINE_REQ_PORT_OUT_STATS_READ;
962 req->port_out_stats_read.clear = clear;
964 /* Send request and wait for response */
965 rsp = pipeline_msg_send_recv(p, req);
970 status = rsp->status;
972 memcpy(stats, &rsp->port_out_stats_read.stats, sizeof(*stats));
975 pipeline_msg_free(rsp);
981 pipeline_table_stats_read(const char *pipeline_name,
983 struct rte_pipeline_table_stats *stats,
987 struct pipeline_msg_req *req;
988 struct pipeline_msg_rsp *rsp;
991 /* Check input params */
992 if ((pipeline_name == NULL) ||
996 p = pipeline_find(pipeline_name);
998 (table_id >= p->n_tables))
1001 if (!pipeline_is_running(p)) {
1002 status = rte_pipeline_table_stats_read(p->p,
1010 /* Allocate request */
1011 req = pipeline_msg_alloc();
1016 req->type = PIPELINE_REQ_TABLE_STATS_READ;
1018 req->table_stats_read.clear = clear;
1020 /* Send request and wait for response */
1021 rsp = pipeline_msg_send_recv(p, req);
1026 status = rsp->status;
1028 memcpy(stats, &rsp->table_stats_read.stats, sizeof(*stats));
1031 pipeline_msg_free(rsp);
1037 match_check(struct table_rule_match *match,
1041 struct table *table;
1043 if ((match == NULL) ||
1045 (table_id >= p->n_tables))
1048 table = &p->table[table_id];
1049 if (match->match_type != table->params.match_type)
1052 switch (match->match_type) {
1055 struct table_acl_params *t = &table->params.match.acl;
1056 struct table_rule_match_acl *r = &match->match.acl;
1058 if ((r->ip_version && (t->ip_version == 0)) ||
1059 ((r->ip_version == 0) && t->ip_version))
1062 if (r->ip_version) {
1063 if ((r->sa_depth > 32) ||
1067 if ((r->sa_depth > 128) ||
1068 (r->da_depth > 128))
1082 struct table_lpm_params *t = &table->params.match.lpm;
1083 struct table_rule_match_lpm *r = &match->match.lpm;
1085 if ((r->ip_version && (t->key_size != 4)) ||
1086 ((r->ip_version == 0) && (t->key_size != 16)))
1089 if (r->ip_version) {
1108 action_check(struct table_rule_action *action,
1112 struct table_action_profile *ap;
1114 if ((action == NULL) ||
1116 (table_id >= p->n_tables))
1119 ap = p->table[table_id].ap;
1120 if (action->action_mask != ap->params.action_mask)
1123 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1124 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
1125 (action->fwd.id >= p->n_ports_out))
1128 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
1129 (action->fwd.id >= p->n_tables))
1133 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
1134 uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1;
1135 uint32_t tc_mask1 = action->mtr.tc_mask;
1137 if (tc_mask1 != tc_mask0)
1141 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
1142 uint32_t n_subports_per_port =
1143 ap->params.tm.n_subports_per_port;
1144 uint32_t n_pipes_per_subport =
1145 ap->params.tm.n_pipes_per_subport;
1146 uint32_t subport_id = action->tm.subport_id;
1147 uint32_t pipe_id = action->tm.pipe_id;
1149 if ((subport_id >= n_subports_per_port) ||
1150 (pipe_id >= n_pipes_per_subport))
1154 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
1155 uint64_t encap_mask = ap->params.encap.encap_mask;
1156 enum rte_table_action_encap_type type = action->encap.type;
1158 if ((encap_mask & (1LLU << type)) == 0)
1162 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
1163 int ip_version0 = ap->params.common.ip_version;
1164 int ip_version1 = action->nat.ip_version;
1166 if ((ip_version1 && (ip_version0 == 0)) ||
1167 ((ip_version1 == 0) && ip_version0))
1175 action_default_check(struct table_rule_action *action,
1179 if ((action == NULL) ||
1180 (action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD)) ||
1182 (table_id >= p->n_tables))
1185 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1186 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
1187 (action->fwd.id >= p->n_ports_out))
1190 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
1191 (action->fwd.id >= p->n_tables))
1198 union table_rule_match_low_level {
1199 struct rte_table_acl_rule_add_params acl_add;
1200 struct rte_table_acl_rule_delete_params acl_delete;
1201 struct rte_table_array_key array;
1202 uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX];
1203 struct rte_table_lpm_key lpm_ipv4;
1204 struct rte_table_lpm_ipv6_key lpm_ipv6;
1208 match_convert(struct table_rule_match *mh,
1209 union table_rule_match_low_level *ml,
1213 action_convert(struct rte_table_action *a,
1214 struct table_rule_action *action,
1215 struct rte_pipeline_table_entry *data);
1218 struct rte_pipeline *p;
1220 struct rte_table_action *a;
1225 table_rule_add_bulk_ll(struct table_ll *table,
1226 struct table_rule_list *list,
1229 union table_rule_match_low_level *match_ll = NULL;
1230 uint8_t *action_ll = NULL;
1231 void **match_ll_ptr = NULL;
1232 struct rte_pipeline_table_entry **action_ll_ptr = NULL;
1233 struct rte_pipeline_table_entry **entries_ptr = NULL;
1235 struct table_rule *rule;
1240 TAILQ_FOREACH(rule, list, node)
1243 /* Memory allocation */
1244 match_ll = calloc(n, sizeof(union table_rule_match_low_level));
1245 action_ll = calloc(n, TABLE_RULE_ACTION_SIZE_MAX);
1247 match_ll_ptr = calloc(n, sizeof(void *));
1248 action_ll_ptr = calloc(n, sizeof(struct rte_pipeline_table_entry *));
1250 entries_ptr = calloc(n, sizeof(struct rte_pipeline_table_entry *));
1251 found = calloc(n, sizeof(int));
1253 if (match_ll == NULL ||
1254 action_ll == NULL ||
1255 match_ll_ptr == NULL ||
1256 action_ll_ptr == NULL ||
1257 entries_ptr == NULL ||
1260 goto table_rule_add_bulk_ll_free;
1264 for (i = 0; i < n; i++) {
1265 match_ll_ptr[i] = (void *)&match_ll[i];
1266 action_ll_ptr[i] = (struct rte_pipeline_table_entry *)
1267 &action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
1270 /* Rule (match, action) conversion */
1272 TAILQ_FOREACH(rule, list, node) {
1273 status = match_convert(&rule->match, match_ll_ptr[i], 1);
1275 goto table_rule_add_bulk_ll_free;
1277 status = action_convert(table->a, &rule->action, action_ll_ptr[i]);
1279 goto table_rule_add_bulk_ll_free;
1284 /* Add rule (match, action) to table */
1285 if (table->bulk_supported) {
1286 status = rte_pipeline_table_entry_add_bulk(table->p,
1294 goto table_rule_add_bulk_ll_free;
1296 for (i = 0; i < n; i++) {
1297 status = rte_pipeline_table_entry_add(table->p,
1305 goto table_rule_add_bulk_ll_free;
1314 /* Write back to the rule list. */
1316 TAILQ_FOREACH(rule, list, node) {
1320 rule->data = entries_ptr[i];
1328 table_rule_add_bulk_ll_free:
1331 free(action_ll_ptr);
1340 pipeline_table_rule_add(const char *pipeline_name,
1342 struct table_rule_match *match,
1343 struct table_rule_action *action)
1346 struct table *table;
1347 struct pipeline_msg_req *req;
1348 struct pipeline_msg_rsp *rsp;
1349 struct table_rule *rule;
1352 /* Check input params */
1353 if ((pipeline_name == NULL) ||
1358 p = pipeline_find(pipeline_name);
1360 (table_id >= p->n_tables) ||
1361 match_check(match, p, table_id) ||
1362 action_check(action, p, table_id))
1365 table = &p->table[table_id];
1367 rule = calloc(1, sizeof(struct table_rule));
1371 memcpy(&rule->match, match, sizeof(*match));
1372 memcpy(&rule->action, action, sizeof(*action));
1374 if (!pipeline_is_running(p)) {
1375 union table_rule_match_low_level match_ll;
1376 struct rte_pipeline_table_entry *data_in, *data_out;
1380 buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
1381 if (buffer == NULL) {
1386 /* Table match-action rule conversion */
1387 data_in = (struct rte_pipeline_table_entry *)buffer;
1389 status = match_convert(match, &match_ll, 1);
1396 status = action_convert(table->a, action, data_in);
1403 /* Add rule (match, action) to table */
1404 status = rte_pipeline_table_entry_add(p->p,
1416 /* Write Response */
1417 rule->data = data_out;
1418 table_rule_add(table, rule);
1424 /* Allocate request */
1425 req = pipeline_msg_alloc();
1432 req->type = PIPELINE_REQ_TABLE_RULE_ADD;
1434 memcpy(&req->table_rule_add.match, match, sizeof(*match));
1435 memcpy(&req->table_rule_add.action, action, sizeof(*action));
1437 /* Send request and wait for response */
1438 rsp = pipeline_msg_send_recv(p, req);
1445 status = rsp->status;
1447 rule->data = rsp->table_rule_add.data;
1448 table_rule_add(table, rule);
1453 pipeline_msg_free(rsp);
1459 pipeline_table_rule_add_default(const char *pipeline_name,
1461 struct table_rule_action *action)
1464 struct table *table;
1465 struct pipeline_msg_req *req;
1466 struct pipeline_msg_rsp *rsp;
1467 struct table_rule *rule;
1470 /* Check input params */
1471 if ((pipeline_name == NULL) ||
1475 p = pipeline_find(pipeline_name);
1477 (table_id >= p->n_tables) ||
1478 action_default_check(action, p, table_id))
1481 table = &p->table[table_id];
1483 rule = calloc(1, sizeof(struct table_rule));
1487 memcpy(&rule->action, action, sizeof(*action));
1489 if (!pipeline_is_running(p)) {
1490 struct rte_pipeline_table_entry *data_in, *data_out;
1493 buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
1494 if (buffer == NULL) {
1500 data_in = (struct rte_pipeline_table_entry *)buffer;
1502 data_in->action = action->fwd.action;
1503 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
1504 data_in->port_id = action->fwd.id;
1505 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
1506 data_in->table_id = action->fwd.id;
1508 /* Add default rule to table */
1509 status = rte_pipeline_table_default_entry_add(p->p,
1519 /* Write Response */
1520 rule->data = data_out;
1521 table_rule_default_add(table, rule);
1527 /* Allocate request */
1528 req = pipeline_msg_alloc();
1535 req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT;
1537 memcpy(&req->table_rule_add_default.action, action, sizeof(*action));
1539 /* Send request and wait for response */
1540 rsp = pipeline_msg_send_recv(p, req);
1547 status = rsp->status;
1549 rule->data = rsp->table_rule_add_default.data;
1550 table_rule_default_add(table, rule);
1555 pipeline_msg_free(rsp);
1561 table_rule_list_free(struct table_rule_list *list)
1569 struct table_rule *rule;
1571 rule = TAILQ_FIRST(list);
1575 TAILQ_REMOVE(list, rule, node);
1585 pipeline_table_rule_add_bulk(const char *pipeline_name,
1587 struct table_rule_list *list,
1588 uint32_t *n_rules_added,
1589 uint32_t *n_rules_not_added)
1592 struct table *table;
1593 struct pipeline_msg_req *req;
1594 struct pipeline_msg_rsp *rsp;
1595 struct table_rule *rule;
1598 /* Check input params */
1599 if ((pipeline_name == NULL) ||
1601 TAILQ_EMPTY(list) ||
1602 (n_rules_added == NULL) ||
1603 (n_rules_not_added == NULL)) {
1604 table_rule_list_free(list);
1608 p = pipeline_find(pipeline_name);
1610 (table_id >= p->n_tables)) {
1611 table_rule_list_free(list);
1615 table = &p->table[table_id];
1617 TAILQ_FOREACH(rule, list, node)
1618 if (match_check(&rule->match, p, table_id) ||
1619 action_check(&rule->action, p, table_id)) {
1620 table_rule_list_free(list);
1624 if (!pipeline_is_running(p)) {
1625 struct table_ll table_ll = {
1627 .table_id = table_id,
1629 .bulk_supported = table->params.match_type == TABLE_ACL,
1632 status = table_rule_add_bulk_ll(&table_ll, list, n_rules_added);
1634 table_rule_list_free(list);
1638 table_rule_add_bulk(table, list, *n_rules_added);
1639 *n_rules_not_added = table_rule_list_free(list);
1643 /* Allocate request */
1644 req = pipeline_msg_alloc();
1646 table_rule_list_free(list);
1651 req->type = PIPELINE_REQ_TABLE_RULE_ADD_BULK;
1653 req->table_rule_add_bulk.list = list;
1654 req->table_rule_add_bulk.bulk = table->params.match_type == TABLE_ACL;
1656 /* Send request and wait for response */
1657 rsp = pipeline_msg_send_recv(p, req);
1659 table_rule_list_free(list);
1664 status = rsp->status;
1666 *n_rules_added = rsp->table_rule_add_bulk.n_rules;
1668 table_rule_add_bulk(table, list, *n_rules_added);
1669 *n_rules_not_added = table_rule_list_free(list);
1671 table_rule_list_free(list);
1675 pipeline_msg_free(rsp);
1681 pipeline_table_rule_delete(const char *pipeline_name,
1683 struct table_rule_match *match)
1686 struct table *table;
1687 struct pipeline_msg_req *req;
1688 struct pipeline_msg_rsp *rsp;
1691 /* Check input params */
1692 if ((pipeline_name == NULL) ||
1696 p = pipeline_find(pipeline_name);
1698 (table_id >= p->n_tables) ||
1699 match_check(match, p, table_id))
1702 table = &p->table[table_id];
1704 if (!pipeline_is_running(p)) {
1705 union table_rule_match_low_level match_ll;
1708 status = match_convert(match, &match_ll, 0);
1712 status = rte_pipeline_table_entry_delete(p->p,
1719 table_rule_delete(table, match);
1724 /* Allocate request */
1725 req = pipeline_msg_alloc();
1730 req->type = PIPELINE_REQ_TABLE_RULE_DELETE;
1732 memcpy(&req->table_rule_delete.match, match, sizeof(*match));
1734 /* Send request and wait for response */
1735 rsp = pipeline_msg_send_recv(p, req);
1740 status = rsp->status;
1742 table_rule_delete(table, match);
1745 pipeline_msg_free(rsp);
1751 pipeline_table_rule_delete_default(const char *pipeline_name,
1755 struct table *table;
1756 struct pipeline_msg_req *req;
1757 struct pipeline_msg_rsp *rsp;
1760 /* Check input params */
1761 if (pipeline_name == NULL)
1764 p = pipeline_find(pipeline_name);
1766 (table_id >= p->n_tables))
1769 table = &p->table[table_id];
1771 if (!pipeline_is_running(p)) {
1772 status = rte_pipeline_table_default_entry_delete(p->p,
1777 table_rule_default_delete(table);
1782 /* Allocate request */
1783 req = pipeline_msg_alloc();
1788 req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT;
1791 /* Send request and wait for response */
1792 rsp = pipeline_msg_send_recv(p, req);
1797 status = rsp->status;
1799 table_rule_default_delete(table);
1802 pipeline_msg_free(rsp);
1808 pipeline_table_rule_stats_read(const char *pipeline_name,
1810 struct table_rule_match *match,
1811 struct rte_table_action_stats_counters *stats,
1815 struct table *table;
1816 struct pipeline_msg_req *req;
1817 struct pipeline_msg_rsp *rsp;
1818 struct table_rule *rule;
1821 /* Check input params */
1822 if ((pipeline_name == NULL) ||
1827 p = pipeline_find(pipeline_name);
1829 (table_id >= p->n_tables) ||
1830 match_check(match, p, table_id))
1833 table = &p->table[table_id];
1834 rule = table_rule_find(table, match);
1838 if (!pipeline_is_running(p)) {
1839 status = rte_table_action_stats_read(table->a,
1847 /* Allocate request */
1848 req = pipeline_msg_alloc();
1853 req->type = PIPELINE_REQ_TABLE_RULE_STATS_READ;
1855 req->table_rule_stats_read.data = rule->data;
1856 req->table_rule_stats_read.clear = clear;
1858 /* Send request and wait for response */
1859 rsp = pipeline_msg_send_recv(p, req);
1864 status = rsp->status;
1866 memcpy(stats, &rsp->table_rule_stats_read.stats, sizeof(*stats));
1869 pipeline_msg_free(rsp);
1875 pipeline_table_mtr_profile_add(const char *pipeline_name,
1877 uint32_t meter_profile_id,
1878 struct rte_table_action_meter_profile *profile)
1881 struct pipeline_msg_req *req;
1882 struct pipeline_msg_rsp *rsp;
1885 /* Check input params */
1886 if ((pipeline_name == NULL) ||
1890 p = pipeline_find(pipeline_name);
1892 (table_id >= p->n_tables))
1895 if (!pipeline_is_running(p)) {
1896 struct rte_table_action *a = p->table[table_id].a;
1898 status = rte_table_action_meter_profile_add(a,
1905 /* Allocate request */
1906 req = pipeline_msg_alloc();
1911 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_ADD;
1913 req->table_mtr_profile_add.meter_profile_id = meter_profile_id;
1914 memcpy(&req->table_mtr_profile_add.profile, profile, sizeof(*profile));
1916 /* Send request and wait for response */
1917 rsp = pipeline_msg_send_recv(p, req);
1922 status = rsp->status;
1925 pipeline_msg_free(rsp);
1931 pipeline_table_mtr_profile_delete(const char *pipeline_name,
1933 uint32_t meter_profile_id)
1936 struct pipeline_msg_req *req;
1937 struct pipeline_msg_rsp *rsp;
1940 /* Check input params */
1941 if (pipeline_name == NULL)
1944 p = pipeline_find(pipeline_name);
1946 (table_id >= p->n_tables))
1949 if (!pipeline_is_running(p)) {
1950 struct rte_table_action *a = p->table[table_id].a;
1952 status = rte_table_action_meter_profile_delete(a,
1958 /* Allocate request */
1959 req = pipeline_msg_alloc();
1964 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE;
1966 req->table_mtr_profile_delete.meter_profile_id = meter_profile_id;
1968 /* Send request and wait for response */
1969 rsp = pipeline_msg_send_recv(p, req);
1974 status = rsp->status;
1977 pipeline_msg_free(rsp);
1983 pipeline_table_rule_mtr_read(const char *pipeline_name,
1985 struct table_rule_match *match,
1986 struct rte_table_action_mtr_counters *stats,
1990 struct table *table;
1991 struct pipeline_msg_req *req;
1992 struct pipeline_msg_rsp *rsp;
1993 struct table_rule *rule;
1997 /* Check input params */
1998 if ((pipeline_name == NULL) ||
2003 p = pipeline_find(pipeline_name);
2005 (table_id >= p->n_tables) ||
2006 match_check(match, p, table_id))
2009 table = &p->table[table_id];
2010 tc_mask = (1 << table->ap->params.mtr.n_tc) - 1;
2012 rule = table_rule_find(table, match);
2016 if (!pipeline_is_running(p)) {
2017 status = rte_table_action_meter_read(table->a,
2026 /* Allocate request */
2027 req = pipeline_msg_alloc();
2032 req->type = PIPELINE_REQ_TABLE_RULE_MTR_READ;
2034 req->table_rule_mtr_read.data = rule->data;
2035 req->table_rule_mtr_read.tc_mask = tc_mask;
2036 req->table_rule_mtr_read.clear = clear;
2038 /* Send request and wait for response */
2039 rsp = pipeline_msg_send_recv(p, req);
2044 status = rsp->status;
2046 memcpy(stats, &rsp->table_rule_mtr_read.stats, sizeof(*stats));
2049 pipeline_msg_free(rsp);
2055 pipeline_table_dscp_table_update(const char *pipeline_name,
2058 struct rte_table_action_dscp_table *dscp_table)
2061 struct pipeline_msg_req *req;
2062 struct pipeline_msg_rsp *rsp;
2065 /* Check input params */
2066 if ((pipeline_name == NULL) ||
2067 (dscp_table == NULL))
2070 p = pipeline_find(pipeline_name);
2072 (table_id >= p->n_tables))
2075 if (!pipeline_is_running(p)) {
2076 struct rte_table_action *a = p->table[table_id].a;
2078 status = rte_table_action_dscp_table_update(a,
2085 /* Allocate request */
2086 req = pipeline_msg_alloc();
2091 req->type = PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE;
2093 req->table_dscp_table_update.dscp_mask = dscp_mask;
2094 memcpy(&req->table_dscp_table_update.dscp_table,
2095 dscp_table, sizeof(*dscp_table));
2097 /* Send request and wait for response */
2098 rsp = pipeline_msg_send_recv(p, req);
2103 status = rsp->status;
2106 pipeline_msg_free(rsp);
2112 pipeline_table_rule_ttl_read(const char *pipeline_name,
2114 struct table_rule_match *match,
2115 struct rte_table_action_ttl_counters *stats,
2119 struct table *table;
2120 struct pipeline_msg_req *req;
2121 struct pipeline_msg_rsp *rsp;
2122 struct table_rule *rule;
2125 /* Check input params */
2126 if ((pipeline_name == NULL) ||
2131 p = pipeline_find(pipeline_name);
2133 (table_id >= p->n_tables) ||
2134 match_check(match, p, table_id))
2137 table = &p->table[table_id];
2138 if (!table->ap->params.ttl.n_packets_enabled)
2141 rule = table_rule_find(table, match);
2145 if (!pipeline_is_running(p)) {
2146 status = rte_table_action_ttl_read(table->a,
2154 /* Allocate request */
2155 req = pipeline_msg_alloc();
2160 req->type = PIPELINE_REQ_TABLE_RULE_TTL_READ;
2162 req->table_rule_ttl_read.data = rule->data;
2163 req->table_rule_ttl_read.clear = clear;
2165 /* Send request and wait for response */
2166 rsp = pipeline_msg_send_recv(p, req);
2171 status = rsp->status;
2173 memcpy(stats, &rsp->table_rule_ttl_read.stats, sizeof(*stats));
2176 pipeline_msg_free(rsp);
2182 pipeline_table_rule_time_read(const char *pipeline_name,
2184 struct table_rule_match *match,
2185 uint64_t *timestamp)
2188 struct table *table;
2189 struct pipeline_msg_req *req;
2190 struct pipeline_msg_rsp *rsp;
2191 struct table_rule *rule;
2194 /* Check input params */
2195 if ((pipeline_name == NULL) ||
2197 (timestamp == NULL))
2200 p = pipeline_find(pipeline_name);
2202 (table_id >= p->n_tables) ||
2203 match_check(match, p, table_id))
2206 table = &p->table[table_id];
2208 rule = table_rule_find(table, match);
2212 if (!pipeline_is_running(p)) {
2213 status = rte_table_action_time_read(table->a,
2220 /* Allocate request */
2221 req = pipeline_msg_alloc();
2226 req->type = PIPELINE_REQ_TABLE_RULE_TIME_READ;
2228 req->table_rule_time_read.data = rule->data;
2230 /* Send request and wait for response */
2231 rsp = pipeline_msg_send_recv(p, req);
2236 status = rsp->status;
2238 *timestamp = rsp->table_rule_time_read.timestamp;
2241 pipeline_msg_free(rsp);
2247 * Data plane threads: message handling
2249 static inline struct pipeline_msg_req *
2250 pipeline_msg_recv(struct rte_ring *msgq_req)
2252 struct pipeline_msg_req *req;
2254 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
2263 pipeline_msg_send(struct rte_ring *msgq_rsp,
2264 struct pipeline_msg_rsp *rsp)
2269 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
2270 } while (status == -ENOBUFS);
2273 static struct pipeline_msg_rsp *
2274 pipeline_msg_handle_port_in_stats_read(struct pipeline_data *p,
2275 struct pipeline_msg_req *req)
2277 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2278 uint32_t port_id = req->id;
2279 int clear = req->port_in_stats_read.clear;
2281 rsp->status = rte_pipeline_port_in_stats_read(p->p,
2283 &rsp->port_in_stats_read.stats,
2289 static struct pipeline_msg_rsp *
2290 pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
2291 struct pipeline_msg_req *req)
2293 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2294 uint32_t port_id = req->id;
2296 rsp->status = rte_pipeline_port_in_enable(p->p,
2302 static struct pipeline_msg_rsp *
2303 pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
2304 struct pipeline_msg_req *req)
2306 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2307 uint32_t port_id = req->id;
2309 rsp->status = rte_pipeline_port_in_disable(p->p,
2315 static struct pipeline_msg_rsp *
2316 pipeline_msg_handle_port_out_stats_read(struct pipeline_data *p,
2317 struct pipeline_msg_req *req)
2319 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2320 uint32_t port_id = req->id;
2321 int clear = req->port_out_stats_read.clear;
2323 rsp->status = rte_pipeline_port_out_stats_read(p->p,
2325 &rsp->port_out_stats_read.stats,
2331 static struct pipeline_msg_rsp *
2332 pipeline_msg_handle_table_stats_read(struct pipeline_data *p,
2333 struct pipeline_msg_req *req)
2335 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2336 uint32_t port_id = req->id;
2337 int clear = req->table_stats_read.clear;
2339 rsp->status = rte_pipeline_table_stats_read(p->p,
2341 &rsp->table_stats_read.stats,
2348 match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32)
2353 switch (depth / 32) {
2363 depth32[1] = depth - 32;
2371 depth32[2] = depth - 64;
2379 depth32[3] = depth - 96;
2395 match_convert(struct table_rule_match *mh,
2396 union table_rule_match_low_level *ml,
2399 memset(ml, 0, sizeof(*ml));
2401 switch (mh->match_type) {
2403 if (mh->match.acl.ip_version)
2405 ml->acl_add.field_value[0].value.u8 =
2406 mh->match.acl.proto;
2407 ml->acl_add.field_value[0].mask_range.u8 =
2408 mh->match.acl.proto_mask;
2410 ml->acl_add.field_value[1].value.u32 =
2411 mh->match.acl.ipv4.sa;
2412 ml->acl_add.field_value[1].mask_range.u32 =
2413 mh->match.acl.sa_depth;
2415 ml->acl_add.field_value[2].value.u32 =
2416 mh->match.acl.ipv4.da;
2417 ml->acl_add.field_value[2].mask_range.u32 =
2418 mh->match.acl.da_depth;
2420 ml->acl_add.field_value[3].value.u16 =
2422 ml->acl_add.field_value[3].mask_range.u16 =
2425 ml->acl_add.field_value[4].value.u16 =
2427 ml->acl_add.field_value[4].mask_range.u16 =
2430 ml->acl_add.priority =
2431 (int32_t) mh->match.acl.priority;
2433 ml->acl_delete.field_value[0].value.u8 =
2434 mh->match.acl.proto;
2435 ml->acl_delete.field_value[0].mask_range.u8 =
2436 mh->match.acl.proto_mask;
2438 ml->acl_delete.field_value[1].value.u32 =
2439 mh->match.acl.ipv4.sa;
2440 ml->acl_delete.field_value[1].mask_range.u32 =
2441 mh->match.acl.sa_depth;
2443 ml->acl_delete.field_value[2].value.u32 =
2444 mh->match.acl.ipv4.da;
2445 ml->acl_delete.field_value[2].mask_range.u32 =
2446 mh->match.acl.da_depth;
2448 ml->acl_delete.field_value[3].value.u16 =
2450 ml->acl_delete.field_value[3].mask_range.u16 =
2453 ml->acl_delete.field_value[4].value.u16 =
2455 ml->acl_delete.field_value[4].mask_range.u16 =
2461 (uint32_t *) mh->match.acl.ipv6.sa;
2463 (uint32_t *) mh->match.acl.ipv6.da;
2464 uint32_t sa32_depth[4], da32_depth[4];
2467 status = match_convert_ipv6_depth(
2468 mh->match.acl.sa_depth,
2473 status = match_convert_ipv6_depth(
2474 mh->match.acl.da_depth,
2479 ml->acl_add.field_value[0].value.u8 =
2480 mh->match.acl.proto;
2481 ml->acl_add.field_value[0].mask_range.u8 =
2482 mh->match.acl.proto_mask;
2484 ml->acl_add.field_value[1].value.u32 =
2485 rte_be_to_cpu_32(sa32[0]);
2486 ml->acl_add.field_value[1].mask_range.u32 =
2488 ml->acl_add.field_value[2].value.u32 =
2489 rte_be_to_cpu_32(sa32[1]);
2490 ml->acl_add.field_value[2].mask_range.u32 =
2492 ml->acl_add.field_value[3].value.u32 =
2493 rte_be_to_cpu_32(sa32[2]);
2494 ml->acl_add.field_value[3].mask_range.u32 =
2496 ml->acl_add.field_value[4].value.u32 =
2497 rte_be_to_cpu_32(sa32[3]);
2498 ml->acl_add.field_value[4].mask_range.u32 =
2501 ml->acl_add.field_value[5].value.u32 =
2502 rte_be_to_cpu_32(da32[0]);
2503 ml->acl_add.field_value[5].mask_range.u32 =
2505 ml->acl_add.field_value[6].value.u32 =
2506 rte_be_to_cpu_32(da32[1]);
2507 ml->acl_add.field_value[6].mask_range.u32 =
2509 ml->acl_add.field_value[7].value.u32 =
2510 rte_be_to_cpu_32(da32[2]);
2511 ml->acl_add.field_value[7].mask_range.u32 =
2513 ml->acl_add.field_value[8].value.u32 =
2514 rte_be_to_cpu_32(da32[3]);
2515 ml->acl_add.field_value[8].mask_range.u32 =
2518 ml->acl_add.field_value[9].value.u16 =
2520 ml->acl_add.field_value[9].mask_range.u16 =
2523 ml->acl_add.field_value[10].value.u16 =
2525 ml->acl_add.field_value[10].mask_range.u16 =
2528 ml->acl_add.priority =
2529 (int32_t) mh->match.acl.priority;
2532 (uint32_t *) mh->match.acl.ipv6.sa;
2534 (uint32_t *) mh->match.acl.ipv6.da;
2535 uint32_t sa32_depth[4], da32_depth[4];
2538 status = match_convert_ipv6_depth(
2539 mh->match.acl.sa_depth,
2544 status = match_convert_ipv6_depth(
2545 mh->match.acl.da_depth,
2550 ml->acl_delete.field_value[0].value.u8 =
2551 mh->match.acl.proto;
2552 ml->acl_delete.field_value[0].mask_range.u8 =
2553 mh->match.acl.proto_mask;
2555 ml->acl_delete.field_value[1].value.u32 =
2556 rte_be_to_cpu_32(sa32[0]);
2557 ml->acl_delete.field_value[1].mask_range.u32 =
2559 ml->acl_delete.field_value[2].value.u32 =
2560 rte_be_to_cpu_32(sa32[1]);
2561 ml->acl_delete.field_value[2].mask_range.u32 =
2563 ml->acl_delete.field_value[3].value.u32 =
2564 rte_be_to_cpu_32(sa32[2]);
2565 ml->acl_delete.field_value[3].mask_range.u32 =
2567 ml->acl_delete.field_value[4].value.u32 =
2568 rte_be_to_cpu_32(sa32[3]);
2569 ml->acl_delete.field_value[4].mask_range.u32 =
2572 ml->acl_delete.field_value[5].value.u32 =
2573 rte_be_to_cpu_32(da32[0]);
2574 ml->acl_delete.field_value[5].mask_range.u32 =
2576 ml->acl_delete.field_value[6].value.u32 =
2577 rte_be_to_cpu_32(da32[1]);
2578 ml->acl_delete.field_value[6].mask_range.u32 =
2580 ml->acl_delete.field_value[7].value.u32 =
2581 rte_be_to_cpu_32(da32[2]);
2582 ml->acl_delete.field_value[7].mask_range.u32 =
2584 ml->acl_delete.field_value[8].value.u32 =
2585 rte_be_to_cpu_32(da32[3]);
2586 ml->acl_delete.field_value[8].mask_range.u32 =
2589 ml->acl_delete.field_value[9].value.u16 =
2591 ml->acl_delete.field_value[9].mask_range.u16 =
2594 ml->acl_delete.field_value[10].value.u16 =
2596 ml->acl_delete.field_value[10].mask_range.u16 =
2602 ml->array.pos = mh->match.array.pos;
2606 memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash));
2610 if (mh->match.lpm.ip_version) {
2611 ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
2612 ml->lpm_ipv4.depth = mh->match.lpm.depth;
2614 memcpy(ml->lpm_ipv6.ip,
2615 mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
2616 ml->lpm_ipv6.depth = mh->match.lpm.depth;
2627 action_convert(struct rte_table_action *a,
2628 struct table_rule_action *action,
2629 struct rte_pipeline_table_entry *data)
2634 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
2635 status = rte_table_action_apply(a,
2637 RTE_TABLE_ACTION_FWD,
2644 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2645 status = rte_table_action_apply(a,
2647 RTE_TABLE_ACTION_LB,
2654 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2655 status = rte_table_action_apply(a,
2657 RTE_TABLE_ACTION_MTR,
2664 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2665 status = rte_table_action_apply(a,
2667 RTE_TABLE_ACTION_TM,
2674 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2675 status = rte_table_action_apply(a,
2677 RTE_TABLE_ACTION_ENCAP,
2684 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2685 status = rte_table_action_apply(a,
2687 RTE_TABLE_ACTION_NAT,
2694 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2695 status = rte_table_action_apply(a,
2697 RTE_TABLE_ACTION_TTL,
2704 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2705 status = rte_table_action_apply(a,
2707 RTE_TABLE_ACTION_STATS,
2714 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2715 status = rte_table_action_apply(a,
2717 RTE_TABLE_ACTION_TIME,
2724 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
2725 status = rte_table_action_apply(a,
2727 RTE_TABLE_ACTION_SYM_CRYPTO,
2728 &action->sym_crypto);
2734 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
2735 status = rte_table_action_apply(a,
2737 RTE_TABLE_ACTION_TAG,
2744 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
2745 status = rte_table_action_apply(a,
2747 RTE_TABLE_ACTION_DECAP,
2757 static struct pipeline_msg_rsp *
2758 pipeline_msg_handle_table_rule_add(struct pipeline_data *p,
2759 struct pipeline_msg_req *req)
2761 union table_rule_match_low_level match_ll;
2762 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2763 struct table_rule_match *match = &req->table_rule_add.match;
2764 struct table_rule_action *action = &req->table_rule_add.action;
2765 struct rte_pipeline_table_entry *data_in, *data_out;
2766 uint32_t table_id = req->id;
2767 int key_found, status;
2768 struct rte_table_action *a = p->table_data[table_id].a;
2771 memset(p->buffer, 0, sizeof(p->buffer));
2772 data_in = (struct rte_pipeline_table_entry *) p->buffer;
2774 status = match_convert(match, &match_ll, 1);
2780 status = action_convert(a, action, data_in);
2786 status = rte_pipeline_table_entry_add(p->p,
2797 /* Write response */
2799 rsp->table_rule_add.data = data_out;
2804 static struct pipeline_msg_rsp *
2805 pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p,
2806 struct pipeline_msg_req *req)
2808 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2809 struct table_rule_action *action = &req->table_rule_add_default.action;
2810 struct rte_pipeline_table_entry *data_in, *data_out;
2811 uint32_t table_id = req->id;
2815 memset(p->buffer, 0, sizeof(p->buffer));
2816 data_in = (struct rte_pipeline_table_entry *) p->buffer;
2818 data_in->action = action->fwd.action;
2819 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
2820 data_in->port_id = action->fwd.id;
2821 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
2822 data_in->table_id = action->fwd.id;
2824 /* Add default rule to table */
2825 status = rte_pipeline_table_default_entry_add(p->p,
2834 /* Write response */
2836 rsp->table_rule_add_default.data = data_out;
2841 static struct pipeline_msg_rsp *
2842 pipeline_msg_handle_table_rule_add_bulk(struct pipeline_data *p,
2843 struct pipeline_msg_req *req)
2845 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2847 uint32_t table_id = req->id;
2848 struct table_rule_list *list = req->table_rule_add_bulk.list;
2849 uint32_t bulk = req->table_rule_add_bulk.bulk;
2851 uint32_t n_rules_added;
2854 struct table_ll table_ll = {
2856 .table_id = table_id,
2857 .a = p->table_data[table_id].a,
2858 .bulk_supported = bulk,
2861 status = table_rule_add_bulk_ll(&table_ll, list, &n_rules_added);
2864 rsp->table_rule_add_bulk.n_rules = 0;
2868 /* Write response */
2870 rsp->table_rule_add_bulk.n_rules = n_rules_added;
2874 static struct pipeline_msg_rsp *
2875 pipeline_msg_handle_table_rule_delete(struct pipeline_data *p,
2876 struct pipeline_msg_req *req)
2878 union table_rule_match_low_level match_ll;
2879 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2880 struct table_rule_match *match = &req->table_rule_delete.match;
2881 uint32_t table_id = req->id;
2882 int key_found, status;
2884 status = match_convert(match, &match_ll, 0);
2890 rsp->status = rte_pipeline_table_entry_delete(p->p,
2899 static struct pipeline_msg_rsp *
2900 pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p,
2901 struct pipeline_msg_req *req)
2903 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2904 uint32_t table_id = req->id;
2906 rsp->status = rte_pipeline_table_default_entry_delete(p->p,
2913 static struct pipeline_msg_rsp *
2914 pipeline_msg_handle_table_rule_stats_read(struct pipeline_data *p,
2915 struct pipeline_msg_req *req)
2917 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2918 uint32_t table_id = req->id;
2919 void *data = req->table_rule_stats_read.data;
2920 int clear = req->table_rule_stats_read.clear;
2921 struct rte_table_action *a = p->table_data[table_id].a;
2923 rsp->status = rte_table_action_stats_read(a,
2925 &rsp->table_rule_stats_read.stats,
2931 static struct pipeline_msg_rsp *
2932 pipeline_msg_handle_table_mtr_profile_add(struct pipeline_data *p,
2933 struct pipeline_msg_req *req)
2935 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2936 uint32_t table_id = req->id;
2937 uint32_t meter_profile_id = req->table_mtr_profile_add.meter_profile_id;
2938 struct rte_table_action_meter_profile *profile =
2939 &req->table_mtr_profile_add.profile;
2940 struct rte_table_action *a = p->table_data[table_id].a;
2942 rsp->status = rte_table_action_meter_profile_add(a,
2949 static struct pipeline_msg_rsp *
2950 pipeline_msg_handle_table_mtr_profile_delete(struct pipeline_data *p,
2951 struct pipeline_msg_req *req)
2953 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2954 uint32_t table_id = req->id;
2955 uint32_t meter_profile_id =
2956 req->table_mtr_profile_delete.meter_profile_id;
2957 struct rte_table_action *a = p->table_data[table_id].a;
2959 rsp->status = rte_table_action_meter_profile_delete(a,
2965 static struct pipeline_msg_rsp *
2966 pipeline_msg_handle_table_rule_mtr_read(struct pipeline_data *p,
2967 struct pipeline_msg_req *req)
2969 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2970 uint32_t table_id = req->id;
2971 void *data = req->table_rule_mtr_read.data;
2972 uint32_t tc_mask = req->table_rule_mtr_read.tc_mask;
2973 int clear = req->table_rule_mtr_read.clear;
2974 struct rte_table_action *a = p->table_data[table_id].a;
2976 rsp->status = rte_table_action_meter_read(a,
2979 &rsp->table_rule_mtr_read.stats,
2985 static struct pipeline_msg_rsp *
2986 pipeline_msg_handle_table_dscp_table_update(struct pipeline_data *p,
2987 struct pipeline_msg_req *req)
2989 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2990 uint32_t table_id = req->id;
2991 uint64_t dscp_mask = req->table_dscp_table_update.dscp_mask;
2992 struct rte_table_action_dscp_table *dscp_table =
2993 &req->table_dscp_table_update.dscp_table;
2994 struct rte_table_action *a = p->table_data[table_id].a;
2996 rsp->status = rte_table_action_dscp_table_update(a,
3003 static struct pipeline_msg_rsp *
3004 pipeline_msg_handle_table_rule_ttl_read(struct pipeline_data *p,
3005 struct pipeline_msg_req *req)
3007 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
3008 uint32_t table_id = req->id;
3009 void *data = req->table_rule_ttl_read.data;
3010 int clear = req->table_rule_ttl_read.clear;
3011 struct rte_table_action *a = p->table_data[table_id].a;
3013 rsp->status = rte_table_action_ttl_read(a,
3015 &rsp->table_rule_ttl_read.stats,
3021 static struct pipeline_msg_rsp *
3022 pipeline_msg_handle_table_rule_time_read(struct pipeline_data *p,
3023 struct pipeline_msg_req *req)
3025 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
3026 uint32_t table_id = req->id;
3027 void *data = req->table_rule_time_read.data;
3028 struct rte_table_action *a = p->table_data[table_id].a;
3030 rsp->status = rte_table_action_time_read(a,
3032 &rsp->table_rule_time_read.timestamp);
3038 pipeline_msg_handle(struct pipeline_data *p)
3041 struct pipeline_msg_req *req;
3042 struct pipeline_msg_rsp *rsp;
3044 req = pipeline_msg_recv(p->msgq_req);
3048 switch (req->type) {
3049 case PIPELINE_REQ_PORT_IN_STATS_READ:
3050 rsp = pipeline_msg_handle_port_in_stats_read(p, req);
3053 case PIPELINE_REQ_PORT_IN_ENABLE:
3054 rsp = pipeline_msg_handle_port_in_enable(p, req);
3057 case PIPELINE_REQ_PORT_IN_DISABLE:
3058 rsp = pipeline_msg_handle_port_in_disable(p, req);
3061 case PIPELINE_REQ_PORT_OUT_STATS_READ:
3062 rsp = pipeline_msg_handle_port_out_stats_read(p, req);
3065 case PIPELINE_REQ_TABLE_STATS_READ:
3066 rsp = pipeline_msg_handle_table_stats_read(p, req);
3069 case PIPELINE_REQ_TABLE_RULE_ADD:
3070 rsp = pipeline_msg_handle_table_rule_add(p, req);
3073 case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT:
3074 rsp = pipeline_msg_handle_table_rule_add_default(p, req);
3077 case PIPELINE_REQ_TABLE_RULE_ADD_BULK:
3078 rsp = pipeline_msg_handle_table_rule_add_bulk(p, req);
3081 case PIPELINE_REQ_TABLE_RULE_DELETE:
3082 rsp = pipeline_msg_handle_table_rule_delete(p, req);
3085 case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT:
3086 rsp = pipeline_msg_handle_table_rule_delete_default(p, req);
3089 case PIPELINE_REQ_TABLE_RULE_STATS_READ:
3090 rsp = pipeline_msg_handle_table_rule_stats_read(p, req);
3093 case PIPELINE_REQ_TABLE_MTR_PROFILE_ADD:
3094 rsp = pipeline_msg_handle_table_mtr_profile_add(p, req);
3097 case PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE:
3098 rsp = pipeline_msg_handle_table_mtr_profile_delete(p, req);
3101 case PIPELINE_REQ_TABLE_RULE_MTR_READ:
3102 rsp = pipeline_msg_handle_table_rule_mtr_read(p, req);
3105 case PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE:
3106 rsp = pipeline_msg_handle_table_dscp_table_update(p, req);
3109 case PIPELINE_REQ_TABLE_RULE_TTL_READ:
3110 rsp = pipeline_msg_handle_table_rule_ttl_read(p, req);
3113 case PIPELINE_REQ_TABLE_RULE_TIME_READ:
3114 rsp = pipeline_msg_handle_table_rule_time_read(p, req);
3118 rsp = (struct pipeline_msg_rsp *) req;
3122 pipeline_msg_send(p->msgq_rsp, rsp);
3127 * Data plane threads: main
3130 thread_main(void *arg __rte_unused)
3132 struct thread_data *t;
3133 uint32_t thread_id, i;
3135 thread_id = rte_lcore_id();
3136 t = &thread_data[thread_id];
3139 for (i = 0; ; i++) {
3143 for (j = 0; j < t->n_pipelines; j++)
3144 rte_pipeline_run(t->p[j]);
3147 if ((i & 0xF) == 0) {
3148 uint64_t time = rte_get_tsc_cycles();
3149 uint64_t time_next_min = UINT64_MAX;
3151 if (time < t->time_next_min)
3154 /* Pipeline message queues */
3155 for (j = 0; j < t->n_pipelines; j++) {
3156 struct pipeline_data *p =
3157 &t->pipeline_data[j];
3158 uint64_t time_next = p->time_next;
3160 if (time_next <= time) {
3161 pipeline_msg_handle(p);
3162 rte_pipeline_flush(p->p);
3163 time_next = time + p->timer_period;
3164 p->time_next = time_next;
3167 if (time_next < time_next_min)
3168 time_next_min = time_next;
3171 /* Thread message queues */
3173 uint64_t time_next = t->time_next;
3175 if (time_next <= time) {
3176 thread_msg_handle(t);
3177 time_next = time + t->timer_period;
3178 t->time_next = time_next;
3181 if (time_next < time_next_min)
3182 time_next_min = time_next;
3185 t->time_next_min = time_next_min;