1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_cycles.h>
12 #include <rte_table_acl.h>
13 #include <rte_table_array.h>
14 #include <rte_table_hash.h>
15 #include <rte_table_lpm.h>
16 #include <rte_table_lpm_ipv6.h>
22 #ifndef THREAD_PIPELINES_MAX
23 #define THREAD_PIPELINES_MAX 256
26 #ifndef THREAD_MSGQ_SIZE
27 #define THREAD_MSGQ_SIZE 64
30 #ifndef THREAD_TIMER_PERIOD_MS
31 #define THREAD_TIMER_PERIOD_MS 100
35 * Master thead: data plane thread context
38 struct rte_ring *msgq_req;
39 struct rte_ring *msgq_rsp;
44 static struct thread thread[RTE_MAX_LCORE];
47 * Data plane threads: context
50 struct rte_table_action *a;
53 struct pipeline_data {
54 struct rte_pipeline *p;
55 struct table_data table_data[RTE_PIPELINE_TABLE_MAX];
58 struct rte_ring *msgq_req;
59 struct rte_ring *msgq_rsp;
60 uint64_t timer_period; /* Measured in CPU cycles. */
63 uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
67 struct rte_pipeline *p[THREAD_PIPELINES_MAX];
70 struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
71 struct rte_ring *msgq_req;
72 struct rte_ring *msgq_rsp;
73 uint64_t timer_period; /* Measured in CPU cycles. */
75 uint64_t time_next_min;
76 } __rte_cache_aligned;
78 static struct thread_data thread_data[RTE_MAX_LCORE];
81 * Master thread: data plane thread init
88 for (i = 0; i < RTE_MAX_LCORE; i++) {
89 struct thread *t = &thread[i];
91 if (!rte_lcore_is_enabled(i))
96 rte_ring_free(t->msgq_req);
99 rte_ring_free(t->msgq_rsp);
108 RTE_LCORE_FOREACH_SLAVE(i) {
110 struct rte_ring *msgq_req, *msgq_rsp;
111 struct thread *t = &thread[i];
112 struct thread_data *t_data = &thread_data[i];
113 uint32_t cpu_id = rte_lcore_to_socket_id(i);
116 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-REQ", i);
118 msgq_req = rte_ring_create(name,
121 RING_F_SP_ENQ | RING_F_SC_DEQ);
123 if (msgq_req == NULL) {
128 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-RSP", i);
130 msgq_rsp = rte_ring_create(name,
133 RING_F_SP_ENQ | RING_F_SC_DEQ);
135 if (msgq_rsp == NULL) {
140 /* Master thread records */
141 t->msgq_req = msgq_req;
142 t->msgq_rsp = msgq_rsp;
145 /* Data plane thread records */
146 t_data->n_pipelines = 0;
147 t_data->msgq_req = msgq_req;
148 t_data->msgq_rsp = msgq_rsp;
149 t_data->timer_period =
150 (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
151 t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
152 t_data->time_next_min = t_data->time_next;
159 thread_is_running(uint32_t thread_id)
161 enum rte_lcore_state_t thread_state;
163 thread_state = rte_eal_get_lcore_state(thread_id);
164 return (thread_state == RUNNING) ? 1 : 0;
168 * Pipeline is running when:
169 * (A) Pipeline is mapped to a data plane thread AND
170 * (B) Its data plane thread is in RUNNING state.
173 pipeline_is_running(struct pipeline *p)
178 return thread_is_running(p->thread_id);
182 * Master thread & data plane threads: message passing
184 enum thread_req_type {
185 THREAD_REQ_PIPELINE_ENABLE = 0,
186 THREAD_REQ_PIPELINE_DISABLE,
190 struct thread_msg_req {
191 enum thread_req_type type;
195 struct rte_pipeline *p;
197 struct rte_table_action *a;
198 } table[RTE_PIPELINE_TABLE_MAX];
199 struct rte_ring *msgq_req;
200 struct rte_ring *msgq_rsp;
201 uint32_t timer_period_ms;
206 struct rte_pipeline *p;
211 struct thread_msg_rsp {
218 static struct thread_msg_req *
219 thread_msg_alloc(void)
221 size_t size = RTE_MAX(sizeof(struct thread_msg_req),
222 sizeof(struct thread_msg_rsp));
224 return calloc(1, size);
228 thread_msg_free(struct thread_msg_rsp *rsp)
233 static struct thread_msg_rsp *
234 thread_msg_send_recv(uint32_t thread_id,
235 struct thread_msg_req *req)
237 struct thread *t = &thread[thread_id];
238 struct rte_ring *msgq_req = t->msgq_req;
239 struct rte_ring *msgq_rsp = t->msgq_rsp;
240 struct thread_msg_rsp *rsp;
245 status = rte_ring_sp_enqueue(msgq_req, req);
246 } while (status == -ENOBUFS);
250 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
251 } while (status != 0);
257 thread_pipeline_enable(uint32_t thread_id,
258 const char *pipeline_name)
260 struct pipeline *p = pipeline_find(pipeline_name);
262 struct thread_msg_req *req;
263 struct thread_msg_rsp *rsp;
267 /* Check input params */
268 if ((thread_id >= RTE_MAX_LCORE) ||
270 (p->n_ports_in == 0) ||
271 (p->n_ports_out == 0) ||
275 t = &thread[thread_id];
276 if ((t->enabled == 0) ||
280 if (!thread_is_running(thread_id)) {
281 struct thread_data *td = &thread_data[thread_id];
282 struct pipeline_data *tdp = &td->pipeline_data[td->n_pipelines];
284 if (td->n_pipelines >= THREAD_PIPELINES_MAX)
287 /* Data plane thread */
288 td->p[td->n_pipelines] = p->p;
291 for (i = 0; i < p->n_tables; i++)
292 tdp->table_data[i].a = p->table[i].a;
294 tdp->n_tables = p->n_tables;
296 tdp->msgq_req = p->msgq_req;
297 tdp->msgq_rsp = p->msgq_rsp;
298 tdp->timer_period = (rte_get_tsc_hz() * p->timer_period_ms) / 1000;
299 tdp->time_next = rte_get_tsc_cycles() + tdp->timer_period;
304 p->thread_id = thread_id;
310 /* Allocate request */
311 req = thread_msg_alloc();
316 req->type = THREAD_REQ_PIPELINE_ENABLE;
317 req->pipeline_enable.p = p->p;
318 for (i = 0; i < p->n_tables; i++)
319 req->pipeline_enable.table[i].a =
321 req->pipeline_enable.msgq_req = p->msgq_req;
322 req->pipeline_enable.msgq_rsp = p->msgq_rsp;
323 req->pipeline_enable.timer_period_ms = p->timer_period_ms;
324 req->pipeline_enable.n_tables = p->n_tables;
326 /* Send request and wait for response */
327 rsp = thread_msg_send_recv(thread_id, req);
332 status = rsp->status;
335 thread_msg_free(rsp);
337 /* Request completion */
341 p->thread_id = thread_id;
348 thread_pipeline_disable(uint32_t thread_id,
349 const char *pipeline_name)
351 struct pipeline *p = pipeline_find(pipeline_name);
353 struct thread_msg_req *req;
354 struct thread_msg_rsp *rsp;
357 /* Check input params */
358 if ((thread_id >= RTE_MAX_LCORE) ||
362 t = &thread[thread_id];
369 if (p->thread_id != thread_id)
372 if (!thread_is_running(thread_id)) {
373 struct thread_data *td = &thread_data[thread_id];
376 for (i = 0; i < td->n_pipelines; i++) {
377 struct pipeline_data *tdp = &td->pipeline_data[i];
382 /* Data plane thread */
383 if (i < td->n_pipelines - 1) {
384 struct rte_pipeline *pipeline_last =
385 td->p[td->n_pipelines - 1];
386 struct pipeline_data *tdp_last =
387 &td->pipeline_data[td->n_pipelines - 1];
389 td->p[i] = pipeline_last;
390 memcpy(tdp, tdp_last, sizeof(*tdp));
404 /* Allocate request */
405 req = thread_msg_alloc();
410 req->type = THREAD_REQ_PIPELINE_DISABLE;
411 req->pipeline_disable.p = p->p;
413 /* Send request and wait for response */
414 rsp = thread_msg_send_recv(thread_id, req);
419 status = rsp->status;
422 thread_msg_free(rsp);
424 /* Request completion */
434 * Data plane threads: message handling
436 static inline struct thread_msg_req *
437 thread_msg_recv(struct rte_ring *msgq_req)
439 struct thread_msg_req *req;
441 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
450 thread_msg_send(struct rte_ring *msgq_rsp,
451 struct thread_msg_rsp *rsp)
456 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
457 } while (status == -ENOBUFS);
460 static struct thread_msg_rsp *
461 thread_msg_handle_pipeline_enable(struct thread_data *t,
462 struct thread_msg_req *req)
464 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
465 struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
469 if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
474 t->p[t->n_pipelines] = req->pipeline_enable.p;
476 p->p = req->pipeline_enable.p;
477 for (i = 0; i < req->pipeline_enable.n_tables; i++)
479 req->pipeline_enable.table[i].a;
481 p->n_tables = req->pipeline_enable.n_tables;
483 p->msgq_req = req->pipeline_enable.msgq_req;
484 p->msgq_rsp = req->pipeline_enable.msgq_rsp;
486 (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
487 p->time_next = rte_get_tsc_cycles() + p->timer_period;
496 static struct thread_msg_rsp *
497 thread_msg_handle_pipeline_disable(struct thread_data *t,
498 struct thread_msg_req *req)
500 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
501 uint32_t n_pipelines = t->n_pipelines;
502 struct rte_pipeline *pipeline = req->pipeline_disable.p;
506 for (i = 0; i < n_pipelines; i++) {
507 struct pipeline_data *p = &t->pipeline_data[i];
509 if (p->p != pipeline)
512 if (i < n_pipelines - 1) {
513 struct rte_pipeline *pipeline_last =
514 t->p[n_pipelines - 1];
515 struct pipeline_data *p_last =
516 &t->pipeline_data[n_pipelines - 1];
518 t->p[i] = pipeline_last;
519 memcpy(p, p_last, sizeof(*p));
528 /* should not get here */
534 thread_msg_handle(struct thread_data *t)
537 struct thread_msg_req *req;
538 struct thread_msg_rsp *rsp;
540 req = thread_msg_recv(t->msgq_req);
545 case THREAD_REQ_PIPELINE_ENABLE:
546 rsp = thread_msg_handle_pipeline_enable(t, req);
549 case THREAD_REQ_PIPELINE_DISABLE:
550 rsp = thread_msg_handle_pipeline_disable(t, req);
554 rsp = (struct thread_msg_rsp *) req;
558 thread_msg_send(t->msgq_rsp, rsp);
563 * Master thread & data plane threads: message passing
565 enum pipeline_req_type {
567 PIPELINE_REQ_PORT_IN_STATS_READ,
568 PIPELINE_REQ_PORT_IN_ENABLE,
569 PIPELINE_REQ_PORT_IN_DISABLE,
572 PIPELINE_REQ_PORT_OUT_STATS_READ,
575 PIPELINE_REQ_TABLE_STATS_READ,
576 PIPELINE_REQ_TABLE_RULE_ADD,
577 PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT,
578 PIPELINE_REQ_TABLE_RULE_ADD_BULK,
579 PIPELINE_REQ_TABLE_RULE_DELETE,
580 PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT,
581 PIPELINE_REQ_TABLE_RULE_STATS_READ,
582 PIPELINE_REQ_TABLE_MTR_PROFILE_ADD,
583 PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE,
584 PIPELINE_REQ_TABLE_RULE_MTR_READ,
585 PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE,
586 PIPELINE_REQ_TABLE_RULE_TTL_READ,
590 struct pipeline_msg_req_port_in_stats_read {
594 struct pipeline_msg_req_port_out_stats_read {
598 struct pipeline_msg_req_table_stats_read {
602 struct pipeline_msg_req_table_rule_add {
603 struct table_rule_match match;
604 struct table_rule_action action;
607 struct pipeline_msg_req_table_rule_add_default {
608 struct table_rule_action action;
611 struct pipeline_msg_req_table_rule_add_bulk {
612 struct table_rule_list *list;
616 struct pipeline_msg_req_table_rule_delete {
617 struct table_rule_match match;
620 struct pipeline_msg_req_table_rule_stats_read {
625 struct pipeline_msg_req_table_mtr_profile_add {
626 uint32_t meter_profile_id;
627 struct rte_table_action_meter_profile profile;
630 struct pipeline_msg_req_table_mtr_profile_delete {
631 uint32_t meter_profile_id;
634 struct pipeline_msg_req_table_rule_mtr_read {
640 struct pipeline_msg_req_table_dscp_table_update {
642 struct rte_table_action_dscp_table dscp_table;
645 struct pipeline_msg_req_table_rule_ttl_read {
650 struct pipeline_msg_req {
651 enum pipeline_req_type type;
652 uint32_t id; /* Port IN, port OUT or table ID */
656 struct pipeline_msg_req_port_in_stats_read port_in_stats_read;
657 struct pipeline_msg_req_port_out_stats_read port_out_stats_read;
658 struct pipeline_msg_req_table_stats_read table_stats_read;
659 struct pipeline_msg_req_table_rule_add table_rule_add;
660 struct pipeline_msg_req_table_rule_add_default table_rule_add_default;
661 struct pipeline_msg_req_table_rule_add_bulk table_rule_add_bulk;
662 struct pipeline_msg_req_table_rule_delete table_rule_delete;
663 struct pipeline_msg_req_table_rule_stats_read table_rule_stats_read;
664 struct pipeline_msg_req_table_mtr_profile_add table_mtr_profile_add;
665 struct pipeline_msg_req_table_mtr_profile_delete table_mtr_profile_delete;
666 struct pipeline_msg_req_table_rule_mtr_read table_rule_mtr_read;
667 struct pipeline_msg_req_table_dscp_table_update table_dscp_table_update;
668 struct pipeline_msg_req_table_rule_ttl_read table_rule_ttl_read;
672 struct pipeline_msg_rsp_port_in_stats_read {
673 struct rte_pipeline_port_in_stats stats;
676 struct pipeline_msg_rsp_port_out_stats_read {
677 struct rte_pipeline_port_out_stats stats;
680 struct pipeline_msg_rsp_table_stats_read {
681 struct rte_pipeline_table_stats stats;
684 struct pipeline_msg_rsp_table_rule_add {
688 struct pipeline_msg_rsp_table_rule_add_default {
692 struct pipeline_msg_rsp_table_rule_add_bulk {
696 struct pipeline_msg_rsp_table_rule_stats_read {
697 struct rte_table_action_stats_counters stats;
700 struct pipeline_msg_rsp_table_rule_mtr_read {
701 struct rte_table_action_mtr_counters stats;
704 struct pipeline_msg_rsp_table_rule_ttl_read {
705 struct rte_table_action_ttl_counters stats;
708 struct pipeline_msg_rsp {
713 struct pipeline_msg_rsp_port_in_stats_read port_in_stats_read;
714 struct pipeline_msg_rsp_port_out_stats_read port_out_stats_read;
715 struct pipeline_msg_rsp_table_stats_read table_stats_read;
716 struct pipeline_msg_rsp_table_rule_add table_rule_add;
717 struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default;
718 struct pipeline_msg_rsp_table_rule_add_bulk table_rule_add_bulk;
719 struct pipeline_msg_rsp_table_rule_stats_read table_rule_stats_read;
720 struct pipeline_msg_rsp_table_rule_mtr_read table_rule_mtr_read;
721 struct pipeline_msg_rsp_table_rule_ttl_read table_rule_ttl_read;
728 static struct pipeline_msg_req *
729 pipeline_msg_alloc(void)
731 size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
732 sizeof(struct pipeline_msg_rsp));
734 return calloc(1, size);
738 pipeline_msg_free(struct pipeline_msg_rsp *rsp)
743 static struct pipeline_msg_rsp *
744 pipeline_msg_send_recv(struct pipeline *p,
745 struct pipeline_msg_req *req)
747 struct rte_ring *msgq_req = p->msgq_req;
748 struct rte_ring *msgq_rsp = p->msgq_rsp;
749 struct pipeline_msg_rsp *rsp;
754 status = rte_ring_sp_enqueue(msgq_req, req);
755 } while (status == -ENOBUFS);
759 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
760 } while (status != 0);
766 pipeline_port_in_stats_read(const char *pipeline_name,
768 struct rte_pipeline_port_in_stats *stats,
772 struct pipeline_msg_req *req;
773 struct pipeline_msg_rsp *rsp;
776 /* Check input params */
777 if ((pipeline_name == NULL) ||
781 p = pipeline_find(pipeline_name);
783 (port_id >= p->n_ports_in))
786 if (!pipeline_is_running(p)) {
787 status = rte_pipeline_port_in_stats_read(p->p,
795 /* Allocate request */
796 req = pipeline_msg_alloc();
801 req->type = PIPELINE_REQ_PORT_IN_STATS_READ;
803 req->port_in_stats_read.clear = clear;
805 /* Send request and wait for response */
806 rsp = pipeline_msg_send_recv(p, req);
811 status = rsp->status;
813 memcpy(stats, &rsp->port_in_stats_read.stats, sizeof(*stats));
816 pipeline_msg_free(rsp);
822 pipeline_port_in_enable(const char *pipeline_name,
826 struct pipeline_msg_req *req;
827 struct pipeline_msg_rsp *rsp;
830 /* Check input params */
831 if (pipeline_name == NULL)
834 p = pipeline_find(pipeline_name);
836 (port_id >= p->n_ports_in))
839 if (!pipeline_is_running(p)) {
840 status = rte_pipeline_port_in_enable(p->p, port_id);
844 /* Allocate request */
845 req = pipeline_msg_alloc();
850 req->type = PIPELINE_REQ_PORT_IN_ENABLE;
853 /* Send request and wait for response */
854 rsp = pipeline_msg_send_recv(p, req);
859 status = rsp->status;
862 pipeline_msg_free(rsp);
868 pipeline_port_in_disable(const char *pipeline_name,
872 struct pipeline_msg_req *req;
873 struct pipeline_msg_rsp *rsp;
876 /* Check input params */
877 if (pipeline_name == NULL)
880 p = pipeline_find(pipeline_name);
882 (port_id >= p->n_ports_in))
885 if (!pipeline_is_running(p)) {
886 status = rte_pipeline_port_in_disable(p->p, port_id);
890 /* Allocate request */
891 req = pipeline_msg_alloc();
896 req->type = PIPELINE_REQ_PORT_IN_DISABLE;
899 /* Send request and wait for response */
900 rsp = pipeline_msg_send_recv(p, req);
905 status = rsp->status;
908 pipeline_msg_free(rsp);
914 pipeline_port_out_stats_read(const char *pipeline_name,
916 struct rte_pipeline_port_out_stats *stats,
920 struct pipeline_msg_req *req;
921 struct pipeline_msg_rsp *rsp;
924 /* Check input params */
925 if ((pipeline_name == NULL) ||
929 p = pipeline_find(pipeline_name);
931 (port_id >= p->n_ports_out))
934 if (!pipeline_is_running(p)) {
935 status = rte_pipeline_port_out_stats_read(p->p,
943 /* Allocate request */
944 req = pipeline_msg_alloc();
949 req->type = PIPELINE_REQ_PORT_OUT_STATS_READ;
951 req->port_out_stats_read.clear = clear;
953 /* Send request and wait for response */
954 rsp = pipeline_msg_send_recv(p, req);
959 status = rsp->status;
961 memcpy(stats, &rsp->port_out_stats_read.stats, sizeof(*stats));
964 pipeline_msg_free(rsp);
970 pipeline_table_stats_read(const char *pipeline_name,
972 struct rte_pipeline_table_stats *stats,
976 struct pipeline_msg_req *req;
977 struct pipeline_msg_rsp *rsp;
980 /* Check input params */
981 if ((pipeline_name == NULL) ||
985 p = pipeline_find(pipeline_name);
987 (table_id >= p->n_tables))
990 if (!pipeline_is_running(p)) {
991 status = rte_pipeline_table_stats_read(p->p,
999 /* Allocate request */
1000 req = pipeline_msg_alloc();
1005 req->type = PIPELINE_REQ_TABLE_STATS_READ;
1007 req->table_stats_read.clear = clear;
1009 /* Send request and wait for response */
1010 rsp = pipeline_msg_send_recv(p, req);
1015 status = rsp->status;
1017 memcpy(stats, &rsp->table_stats_read.stats, sizeof(*stats));
1020 pipeline_msg_free(rsp);
1026 match_check(struct table_rule_match *match,
1030 struct table *table;
1032 if ((match == NULL) ||
1034 (table_id >= p->n_tables))
1037 table = &p->table[table_id];
1038 if (match->match_type != table->params.match_type)
1041 switch (match->match_type) {
1044 struct table_acl_params *t = &table->params.match.acl;
1045 struct table_rule_match_acl *r = &match->match.acl;
1047 if ((r->ip_version && (t->ip_version == 0)) ||
1048 ((r->ip_version == 0) && t->ip_version))
1051 if (r->ip_version) {
1052 if ((r->sa_depth > 32) ||
1056 if ((r->sa_depth > 128) ||
1057 (r->da_depth > 128))
1071 struct table_lpm_params *t = &table->params.match.lpm;
1072 struct table_rule_match_lpm *r = &match->match.lpm;
1074 if ((r->ip_version && (t->key_size != 4)) ||
1075 ((r->ip_version == 0) && (t->key_size != 16)))
1078 if (r->ip_version) {
1097 action_check(struct table_rule_action *action,
1101 struct table_action_profile *ap;
1103 if ((action == NULL) ||
1105 (table_id >= p->n_tables))
1108 ap = p->table[table_id].ap;
1109 if (action->action_mask != ap->params.action_mask)
1112 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1113 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
1114 (action->fwd.id >= p->n_ports_out))
1117 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
1118 (action->fwd.id >= p->n_tables))
1122 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
1123 uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1;
1124 uint32_t tc_mask1 = action->mtr.tc_mask;
1126 if (tc_mask1 != tc_mask0)
1130 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
1131 uint32_t n_subports_per_port =
1132 ap->params.tm.n_subports_per_port;
1133 uint32_t n_pipes_per_subport =
1134 ap->params.tm.n_pipes_per_subport;
1135 uint32_t subport_id = action->tm.subport_id;
1136 uint32_t pipe_id = action->tm.pipe_id;
1138 if ((subport_id >= n_subports_per_port) ||
1139 (pipe_id >= n_pipes_per_subport))
1143 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
1144 uint64_t encap_mask = ap->params.encap.encap_mask;
1145 enum rte_table_action_encap_type type = action->encap.type;
1147 if ((encap_mask & (1LLU << type)) == 0)
1151 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
1152 int ip_version0 = ap->params.common.ip_version;
1153 int ip_version1 = action->nat.ip_version;
1155 if ((ip_version1 && (ip_version0 == 0)) ||
1156 ((ip_version1 == 0) && ip_version0))
1164 action_default_check(struct table_rule_action *action,
1168 if ((action == NULL) ||
1169 (action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD)) ||
1171 (table_id >= p->n_tables))
1174 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1175 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
1176 (action->fwd.id >= p->n_ports_out))
1179 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
1180 (action->fwd.id >= p->n_tables))
1187 union table_rule_match_low_level {
1188 struct rte_table_acl_rule_add_params acl_add;
1189 struct rte_table_acl_rule_delete_params acl_delete;
1190 struct rte_table_array_key array;
1191 uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX];
1192 struct rte_table_lpm_key lpm_ipv4;
1193 struct rte_table_lpm_ipv6_key lpm_ipv6;
1197 match_convert(struct table_rule_match *mh,
1198 union table_rule_match_low_level *ml,
1202 action_convert(struct rte_table_action *a,
1203 struct table_rule_action *action,
1204 struct rte_pipeline_table_entry *data);
1207 struct rte_pipeline *p;
1209 struct rte_table_action *a;
1214 table_rule_add_bulk_ll(struct table_ll *table,
1215 struct table_rule_list *list,
1218 union table_rule_match_low_level *match_ll = NULL;
1219 uint8_t *action_ll = NULL;
1220 void **match_ll_ptr = NULL;
1221 struct rte_pipeline_table_entry **action_ll_ptr = NULL;
1222 struct rte_pipeline_table_entry **entries_ptr = NULL;
1224 struct table_rule *rule;
1229 TAILQ_FOREACH(rule, list, node)
1232 /* Memory allocation */
1233 match_ll = calloc(n, sizeof(union table_rule_match_low_level));
1234 action_ll = calloc(n, TABLE_RULE_ACTION_SIZE_MAX);
1236 match_ll_ptr = calloc(n, sizeof(void *));
1237 action_ll_ptr = calloc(n, sizeof(struct rte_pipeline_table_entry *));
1239 entries_ptr = calloc(n, sizeof(struct rte_pipeline_table_entry *));
1240 found = calloc(n, sizeof(int));
1242 if (match_ll == NULL ||
1243 action_ll == NULL ||
1244 match_ll_ptr == NULL ||
1245 action_ll_ptr == NULL ||
1246 entries_ptr == NULL ||
1249 goto table_rule_add_bulk_ll_free;
1253 for (i = 0; i < n; i++) {
1254 match_ll_ptr[i] = (void *)&match_ll[i];
1255 action_ll_ptr[i] = (struct rte_pipeline_table_entry *)
1256 &action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
1259 /* Rule (match, action) conversion */
1261 TAILQ_FOREACH(rule, list, node) {
1262 status = match_convert(&rule->match, match_ll_ptr[i], 1);
1264 goto table_rule_add_bulk_ll_free;
1266 status = action_convert(table->a, &rule->action, action_ll_ptr[i]);
1268 goto table_rule_add_bulk_ll_free;
1273 /* Add rule (match, action) to table */
1274 if (table->bulk_supported) {
1275 status = rte_pipeline_table_entry_add_bulk(table->p,
1283 goto table_rule_add_bulk_ll_free;
1285 for (i = 0; i < n; i++) {
1286 status = rte_pipeline_table_entry_add(table->p,
1294 goto table_rule_add_bulk_ll_free;
1303 /* Write back to the rule list. */
1305 TAILQ_FOREACH(rule, list, node) {
1309 rule->data = entries_ptr[i];
1317 table_rule_add_bulk_ll_free:
1320 free(action_ll_ptr);
1329 pipeline_table_rule_add(const char *pipeline_name,
1331 struct table_rule_match *match,
1332 struct table_rule_action *action)
1335 struct table *table;
1336 struct pipeline_msg_req *req;
1337 struct pipeline_msg_rsp *rsp;
1338 struct table_rule *rule;
1341 /* Check input params */
1342 if ((pipeline_name == NULL) ||
1347 p = pipeline_find(pipeline_name);
1349 (table_id >= p->n_tables) ||
1350 match_check(match, p, table_id) ||
1351 action_check(action, p, table_id))
1354 table = &p->table[table_id];
1356 rule = calloc(1, sizeof(struct table_rule));
1360 memcpy(&rule->match, match, sizeof(*match));
1361 memcpy(&rule->action, action, sizeof(*action));
1363 if (!pipeline_is_running(p)) {
1364 union table_rule_match_low_level match_ll;
1365 struct rte_pipeline_table_entry *data_in, *data_out;
1369 buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
1370 if (buffer == NULL) {
1375 /* Table match-action rule conversion */
1376 data_in = (struct rte_pipeline_table_entry *)buffer;
1378 status = match_convert(match, &match_ll, 1);
1385 status = action_convert(table->a, action, data_in);
1392 /* Add rule (match, action) to table */
1393 status = rte_pipeline_table_entry_add(p->p,
1405 /* Write Response */
1406 rule->data = data_out;
1407 table_rule_add(table, rule);
1413 /* Allocate request */
1414 req = pipeline_msg_alloc();
1421 req->type = PIPELINE_REQ_TABLE_RULE_ADD;
1423 memcpy(&req->table_rule_add.match, match, sizeof(*match));
1424 memcpy(&req->table_rule_add.action, action, sizeof(*action));
1426 /* Send request and wait for response */
1427 rsp = pipeline_msg_send_recv(p, req);
1434 status = rsp->status;
1436 rule->data = rsp->table_rule_add.data;
1437 table_rule_add(table, rule);
1442 pipeline_msg_free(rsp);
1448 pipeline_table_rule_add_default(const char *pipeline_name,
1450 struct table_rule_action *action)
1453 struct table *table;
1454 struct pipeline_msg_req *req;
1455 struct pipeline_msg_rsp *rsp;
1456 struct table_rule *rule;
1459 /* Check input params */
1460 if ((pipeline_name == NULL) ||
1464 p = pipeline_find(pipeline_name);
1466 (table_id >= p->n_tables) ||
1467 action_default_check(action, p, table_id))
1470 table = &p->table[table_id];
1472 rule = calloc(1, sizeof(struct table_rule));
1476 memcpy(&rule->action, action, sizeof(*action));
1478 if (!pipeline_is_running(p)) {
1479 struct rte_pipeline_table_entry *data_in, *data_out;
1482 buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
1483 if (buffer == NULL) {
1489 data_in = (struct rte_pipeline_table_entry *)buffer;
1491 data_in->action = action->fwd.action;
1492 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
1493 data_in->port_id = action->fwd.id;
1494 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
1495 data_in->table_id = action->fwd.id;
1497 /* Add default rule to table */
1498 status = rte_pipeline_table_default_entry_add(p->p,
1508 /* Write Response */
1509 rule->data = data_out;
1510 table_rule_default_add(table, rule);
1516 /* Allocate request */
1517 req = pipeline_msg_alloc();
1524 req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT;
1526 memcpy(&req->table_rule_add_default.action, action, sizeof(*action));
1528 /* Send request and wait for response */
1529 rsp = pipeline_msg_send_recv(p, req);
1536 status = rsp->status;
1538 rule->data = rsp->table_rule_add_default.data;
1539 table_rule_default_add(table, rule);
1544 pipeline_msg_free(rsp);
1550 table_rule_list_free(struct table_rule_list *list)
1558 struct table_rule *rule;
1560 rule = TAILQ_FIRST(list);
1564 TAILQ_REMOVE(list, rule, node);
1574 pipeline_table_rule_add_bulk(const char *pipeline_name,
1576 struct table_rule_list *list,
1577 uint32_t *n_rules_added,
1578 uint32_t *n_rules_not_added)
1581 struct table *table;
1582 struct pipeline_msg_req *req;
1583 struct pipeline_msg_rsp *rsp;
1584 struct table_rule *rule;
1587 /* Check input params */
1588 if ((pipeline_name == NULL) ||
1590 TAILQ_EMPTY(list) ||
1591 (n_rules_added == NULL) ||
1592 (n_rules_not_added == NULL)) {
1593 table_rule_list_free(list);
1597 p = pipeline_find(pipeline_name);
1599 (table_id >= p->n_tables)) {
1600 table_rule_list_free(list);
1604 table = &p->table[table_id];
1606 TAILQ_FOREACH(rule, list, node)
1607 if (match_check(&rule->match, p, table_id) ||
1608 action_check(&rule->action, p, table_id)) {
1609 table_rule_list_free(list);
1613 if (!pipeline_is_running(p)) {
1614 struct table_ll table_ll = {
1616 .table_id = table_id,
1618 .bulk_supported = table->params.match_type == TABLE_ACL,
1621 status = table_rule_add_bulk_ll(&table_ll, list, n_rules_added);
1623 table_rule_list_free(list);
1627 table_rule_add_bulk(table, list, *n_rules_added);
1628 *n_rules_not_added = table_rule_list_free(list);
1632 /* Allocate request */
1633 req = pipeline_msg_alloc();
1635 table_rule_list_free(list);
1640 req->type = PIPELINE_REQ_TABLE_RULE_ADD_BULK;
1642 req->table_rule_add_bulk.list = list;
1643 req->table_rule_add_bulk.bulk = table->params.match_type == TABLE_ACL;
1645 /* Send request and wait for response */
1646 rsp = pipeline_msg_send_recv(p, req);
1648 table_rule_list_free(list);
1653 status = rsp->status;
1655 *n_rules_added = rsp->table_rule_add_bulk.n_rules;
1657 table_rule_add_bulk(table, list, *n_rules_added);
1658 *n_rules_not_added = table_rule_list_free(list);
1660 table_rule_list_free(list);
1664 pipeline_msg_free(rsp);
1670 pipeline_table_rule_delete(const char *pipeline_name,
1672 struct table_rule_match *match)
1675 struct table *table;
1676 struct pipeline_msg_req *req;
1677 struct pipeline_msg_rsp *rsp;
1680 /* Check input params */
1681 if ((pipeline_name == NULL) ||
1685 p = pipeline_find(pipeline_name);
1687 (table_id >= p->n_tables) ||
1688 match_check(match, p, table_id))
1691 table = &p->table[table_id];
1693 if (!pipeline_is_running(p)) {
1694 union table_rule_match_low_level match_ll;
1697 status = match_convert(match, &match_ll, 0);
1701 status = rte_pipeline_table_entry_delete(p->p,
1708 table_rule_delete(table, match);
1713 /* Allocate request */
1714 req = pipeline_msg_alloc();
1719 req->type = PIPELINE_REQ_TABLE_RULE_DELETE;
1721 memcpy(&req->table_rule_delete.match, match, sizeof(*match));
1723 /* Send request and wait for response */
1724 rsp = pipeline_msg_send_recv(p, req);
1729 status = rsp->status;
1731 table_rule_delete(table, match);
1734 pipeline_msg_free(rsp);
1740 pipeline_table_rule_delete_default(const char *pipeline_name,
1744 struct table *table;
1745 struct pipeline_msg_req *req;
1746 struct pipeline_msg_rsp *rsp;
1749 /* Check input params */
1750 if (pipeline_name == NULL)
1753 p = pipeline_find(pipeline_name);
1755 (table_id >= p->n_tables))
1758 table = &p->table[table_id];
1760 if (!pipeline_is_running(p)) {
1761 status = rte_pipeline_table_default_entry_delete(p->p,
1766 table_rule_default_delete(table);
1771 /* Allocate request */
1772 req = pipeline_msg_alloc();
1777 req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT;
1780 /* Send request and wait for response */
1781 rsp = pipeline_msg_send_recv(p, req);
1786 status = rsp->status;
1788 table_rule_default_delete(table);
1791 pipeline_msg_free(rsp);
1797 pipeline_table_rule_stats_read(const char *pipeline_name,
1799 struct table_rule_match *match,
1800 struct rte_table_action_stats_counters *stats,
1804 struct table *table;
1805 struct pipeline_msg_req *req;
1806 struct pipeline_msg_rsp *rsp;
1807 struct table_rule *rule;
1810 /* Check input params */
1811 if ((pipeline_name == NULL) ||
1816 p = pipeline_find(pipeline_name);
1818 (table_id >= p->n_tables) ||
1819 match_check(match, p, table_id))
1822 table = &p->table[table_id];
1823 rule = table_rule_find(table, match);
1827 if (!pipeline_is_running(p)) {
1828 status = rte_table_action_stats_read(table->a,
1836 /* Allocate request */
1837 req = pipeline_msg_alloc();
1842 req->type = PIPELINE_REQ_TABLE_RULE_STATS_READ;
1844 req->table_rule_stats_read.data = rule->data;
1845 req->table_rule_stats_read.clear = clear;
1847 /* Send request and wait for response */
1848 rsp = pipeline_msg_send_recv(p, req);
1853 status = rsp->status;
1855 memcpy(stats, &rsp->table_rule_stats_read.stats, sizeof(*stats));
1858 pipeline_msg_free(rsp);
1864 pipeline_table_mtr_profile_add(const char *pipeline_name,
1866 uint32_t meter_profile_id,
1867 struct rte_table_action_meter_profile *profile)
1870 struct pipeline_msg_req *req;
1871 struct pipeline_msg_rsp *rsp;
1874 /* Check input params */
1875 if ((pipeline_name == NULL) ||
1879 p = pipeline_find(pipeline_name);
1881 (table_id >= p->n_tables))
1884 if (!pipeline_is_running(p)) {
1885 struct rte_table_action *a = p->table[table_id].a;
1887 status = rte_table_action_meter_profile_add(a,
1894 /* Allocate request */
1895 req = pipeline_msg_alloc();
1900 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_ADD;
1902 req->table_mtr_profile_add.meter_profile_id = meter_profile_id;
1903 memcpy(&req->table_mtr_profile_add.profile, profile, sizeof(*profile));
1905 /* Send request and wait for response */
1906 rsp = pipeline_msg_send_recv(p, req);
1911 status = rsp->status;
1914 pipeline_msg_free(rsp);
1920 pipeline_table_mtr_profile_delete(const char *pipeline_name,
1922 uint32_t meter_profile_id)
1925 struct pipeline_msg_req *req;
1926 struct pipeline_msg_rsp *rsp;
1929 /* Check input params */
1930 if (pipeline_name == NULL)
1933 p = pipeline_find(pipeline_name);
1935 (table_id >= p->n_tables))
1938 if (!pipeline_is_running(p)) {
1939 struct rte_table_action *a = p->table[table_id].a;
1941 status = rte_table_action_meter_profile_delete(a,
1947 /* Allocate request */
1948 req = pipeline_msg_alloc();
1953 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE;
1955 req->table_mtr_profile_delete.meter_profile_id = meter_profile_id;
1957 /* Send request and wait for response */
1958 rsp = pipeline_msg_send_recv(p, req);
1963 status = rsp->status;
1966 pipeline_msg_free(rsp);
1972 pipeline_table_rule_mtr_read(const char *pipeline_name,
1976 struct rte_table_action_mtr_counters *stats,
1980 struct pipeline_msg_req *req;
1981 struct pipeline_msg_rsp *rsp;
1984 /* Check input params */
1985 if ((pipeline_name == NULL) ||
1990 p = pipeline_find(pipeline_name);
1992 (table_id >= p->n_tables))
1995 if (!pipeline_is_running(p)) {
1996 struct rte_table_action *a = p->table[table_id].a;
1998 status = rte_table_action_meter_read(a,
2007 /* Allocate request */
2008 req = pipeline_msg_alloc();
2013 req->type = PIPELINE_REQ_TABLE_RULE_MTR_READ;
2015 req->table_rule_mtr_read.data = data;
2016 req->table_rule_mtr_read.tc_mask = tc_mask;
2017 req->table_rule_mtr_read.clear = clear;
2019 /* Send request and wait for response */
2020 rsp = pipeline_msg_send_recv(p, req);
2025 status = rsp->status;
2027 memcpy(stats, &rsp->table_rule_mtr_read.stats, sizeof(*stats));
2030 pipeline_msg_free(rsp);
2036 pipeline_table_dscp_table_update(const char *pipeline_name,
2039 struct rte_table_action_dscp_table *dscp_table)
2042 struct pipeline_msg_req *req;
2043 struct pipeline_msg_rsp *rsp;
2046 /* Check input params */
2047 if ((pipeline_name == NULL) ||
2048 (dscp_table == NULL))
2051 p = pipeline_find(pipeline_name);
2053 (table_id >= p->n_tables))
2056 if (!pipeline_is_running(p)) {
2057 struct rte_table_action *a = p->table[table_id].a;
2059 status = rte_table_action_dscp_table_update(a,
2066 /* Allocate request */
2067 req = pipeline_msg_alloc();
2072 req->type = PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE;
2074 req->table_dscp_table_update.dscp_mask = dscp_mask;
2075 memcpy(&req->table_dscp_table_update.dscp_table,
2076 dscp_table, sizeof(*dscp_table));
2078 /* Send request and wait for response */
2079 rsp = pipeline_msg_send_recv(p, req);
2084 status = rsp->status;
2087 pipeline_msg_free(rsp);
2093 pipeline_table_rule_ttl_read(const char *pipeline_name,
2096 struct rte_table_action_ttl_counters *stats,
2100 struct pipeline_msg_req *req;
2101 struct pipeline_msg_rsp *rsp;
2104 /* Check input params */
2105 if ((pipeline_name == NULL) ||
2110 p = pipeline_find(pipeline_name);
2112 (table_id >= p->n_tables))
2115 if (!pipeline_is_running(p)) {
2116 struct rte_table_action *a = p->table[table_id].a;
2118 status = rte_table_action_ttl_read(a,
2126 /* Allocate request */
2127 req = pipeline_msg_alloc();
2132 req->type = PIPELINE_REQ_TABLE_RULE_TTL_READ;
2134 req->table_rule_ttl_read.data = data;
2135 req->table_rule_ttl_read.clear = clear;
2137 /* Send request and wait for response */
2138 rsp = pipeline_msg_send_recv(p, req);
2143 status = rsp->status;
2145 memcpy(stats, &rsp->table_rule_ttl_read.stats, sizeof(*stats));
2148 pipeline_msg_free(rsp);
2154 * Data plane threads: message handling
2156 static inline struct pipeline_msg_req *
2157 pipeline_msg_recv(struct rte_ring *msgq_req)
2159 struct pipeline_msg_req *req;
2161 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
2170 pipeline_msg_send(struct rte_ring *msgq_rsp,
2171 struct pipeline_msg_rsp *rsp)
2176 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
2177 } while (status == -ENOBUFS);
2180 static struct pipeline_msg_rsp *
2181 pipeline_msg_handle_port_in_stats_read(struct pipeline_data *p,
2182 struct pipeline_msg_req *req)
2184 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2185 uint32_t port_id = req->id;
2186 int clear = req->port_in_stats_read.clear;
2188 rsp->status = rte_pipeline_port_in_stats_read(p->p,
2190 &rsp->port_in_stats_read.stats,
2196 static struct pipeline_msg_rsp *
2197 pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
2198 struct pipeline_msg_req *req)
2200 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2201 uint32_t port_id = req->id;
2203 rsp->status = rte_pipeline_port_in_enable(p->p,
2209 static struct pipeline_msg_rsp *
2210 pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
2211 struct pipeline_msg_req *req)
2213 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2214 uint32_t port_id = req->id;
2216 rsp->status = rte_pipeline_port_in_disable(p->p,
2222 static struct pipeline_msg_rsp *
2223 pipeline_msg_handle_port_out_stats_read(struct pipeline_data *p,
2224 struct pipeline_msg_req *req)
2226 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2227 uint32_t port_id = req->id;
2228 int clear = req->port_out_stats_read.clear;
2230 rsp->status = rte_pipeline_port_out_stats_read(p->p,
2232 &rsp->port_out_stats_read.stats,
2238 static struct pipeline_msg_rsp *
2239 pipeline_msg_handle_table_stats_read(struct pipeline_data *p,
2240 struct pipeline_msg_req *req)
2242 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2243 uint32_t port_id = req->id;
2244 int clear = req->table_stats_read.clear;
2246 rsp->status = rte_pipeline_table_stats_read(p->p,
2248 &rsp->table_stats_read.stats,
2255 match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32)
2260 switch (depth / 32) {
2270 depth32[1] = depth - 32;
2278 depth32[2] = depth - 64;
2286 depth32[3] = depth - 96;
2302 match_convert(struct table_rule_match *mh,
2303 union table_rule_match_low_level *ml,
2306 memset(ml, 0, sizeof(*ml));
2308 switch (mh->match_type) {
2310 if (mh->match.acl.ip_version)
2312 ml->acl_add.field_value[0].value.u8 =
2313 mh->match.acl.proto;
2314 ml->acl_add.field_value[0].mask_range.u8 =
2315 mh->match.acl.proto_mask;
2317 ml->acl_add.field_value[1].value.u32 =
2318 mh->match.acl.ipv4.sa;
2319 ml->acl_add.field_value[1].mask_range.u32 =
2320 mh->match.acl.sa_depth;
2322 ml->acl_add.field_value[2].value.u32 =
2323 mh->match.acl.ipv4.da;
2324 ml->acl_add.field_value[2].mask_range.u32 =
2325 mh->match.acl.da_depth;
2327 ml->acl_add.field_value[3].value.u16 =
2329 ml->acl_add.field_value[3].mask_range.u16 =
2332 ml->acl_add.field_value[4].value.u16 =
2334 ml->acl_add.field_value[4].mask_range.u16 =
2337 ml->acl_add.priority =
2338 (int32_t) mh->match.acl.priority;
2340 ml->acl_delete.field_value[0].value.u8 =
2341 mh->match.acl.proto;
2342 ml->acl_delete.field_value[0].mask_range.u8 =
2343 mh->match.acl.proto_mask;
2345 ml->acl_delete.field_value[1].value.u32 =
2346 mh->match.acl.ipv4.sa;
2347 ml->acl_delete.field_value[1].mask_range.u32 =
2348 mh->match.acl.sa_depth;
2350 ml->acl_delete.field_value[2].value.u32 =
2351 mh->match.acl.ipv4.da;
2352 ml->acl_delete.field_value[2].mask_range.u32 =
2353 mh->match.acl.da_depth;
2355 ml->acl_delete.field_value[3].value.u16 =
2357 ml->acl_delete.field_value[3].mask_range.u16 =
2360 ml->acl_delete.field_value[4].value.u16 =
2362 ml->acl_delete.field_value[4].mask_range.u16 =
2368 (uint32_t *) mh->match.acl.ipv6.sa;
2370 (uint32_t *) mh->match.acl.ipv6.da;
2371 uint32_t sa32_depth[4], da32_depth[4];
2374 status = match_convert_ipv6_depth(
2375 mh->match.acl.sa_depth,
2380 status = match_convert_ipv6_depth(
2381 mh->match.acl.da_depth,
2386 ml->acl_add.field_value[0].value.u8 =
2387 mh->match.acl.proto;
2388 ml->acl_add.field_value[0].mask_range.u8 =
2389 mh->match.acl.proto_mask;
2391 ml->acl_add.field_value[1].value.u32 =
2392 rte_be_to_cpu_32(sa32[0]);
2393 ml->acl_add.field_value[1].mask_range.u32 =
2395 ml->acl_add.field_value[2].value.u32 =
2396 rte_be_to_cpu_32(sa32[1]);
2397 ml->acl_add.field_value[2].mask_range.u32 =
2399 ml->acl_add.field_value[3].value.u32 =
2400 rte_be_to_cpu_32(sa32[2]);
2401 ml->acl_add.field_value[3].mask_range.u32 =
2403 ml->acl_add.field_value[4].value.u32 =
2404 rte_be_to_cpu_32(sa32[3]);
2405 ml->acl_add.field_value[4].mask_range.u32 =
2408 ml->acl_add.field_value[5].value.u32 =
2409 rte_be_to_cpu_32(da32[0]);
2410 ml->acl_add.field_value[5].mask_range.u32 =
2412 ml->acl_add.field_value[6].value.u32 =
2413 rte_be_to_cpu_32(da32[1]);
2414 ml->acl_add.field_value[6].mask_range.u32 =
2416 ml->acl_add.field_value[7].value.u32 =
2417 rte_be_to_cpu_32(da32[2]);
2418 ml->acl_add.field_value[7].mask_range.u32 =
2420 ml->acl_add.field_value[8].value.u32 =
2421 rte_be_to_cpu_32(da32[3]);
2422 ml->acl_add.field_value[8].mask_range.u32 =
2425 ml->acl_add.field_value[9].value.u16 =
2427 ml->acl_add.field_value[9].mask_range.u16 =
2430 ml->acl_add.field_value[10].value.u16 =
2432 ml->acl_add.field_value[10].mask_range.u16 =
2435 ml->acl_add.priority =
2436 (int32_t) mh->match.acl.priority;
2439 (uint32_t *) mh->match.acl.ipv6.sa;
2441 (uint32_t *) mh->match.acl.ipv6.da;
2442 uint32_t sa32_depth[4], da32_depth[4];
2445 status = match_convert_ipv6_depth(
2446 mh->match.acl.sa_depth,
2451 status = match_convert_ipv6_depth(
2452 mh->match.acl.da_depth,
2457 ml->acl_delete.field_value[0].value.u8 =
2458 mh->match.acl.proto;
2459 ml->acl_delete.field_value[0].mask_range.u8 =
2460 mh->match.acl.proto_mask;
2462 ml->acl_delete.field_value[1].value.u32 =
2463 rte_be_to_cpu_32(sa32[0]);
2464 ml->acl_delete.field_value[1].mask_range.u32 =
2466 ml->acl_delete.field_value[2].value.u32 =
2467 rte_be_to_cpu_32(sa32[1]);
2468 ml->acl_delete.field_value[2].mask_range.u32 =
2470 ml->acl_delete.field_value[3].value.u32 =
2471 rte_be_to_cpu_32(sa32[2]);
2472 ml->acl_delete.field_value[3].mask_range.u32 =
2474 ml->acl_delete.field_value[4].value.u32 =
2475 rte_be_to_cpu_32(sa32[3]);
2476 ml->acl_delete.field_value[4].mask_range.u32 =
2479 ml->acl_delete.field_value[5].value.u32 =
2480 rte_be_to_cpu_32(da32[0]);
2481 ml->acl_delete.field_value[5].mask_range.u32 =
2483 ml->acl_delete.field_value[6].value.u32 =
2484 rte_be_to_cpu_32(da32[1]);
2485 ml->acl_delete.field_value[6].mask_range.u32 =
2487 ml->acl_delete.field_value[7].value.u32 =
2488 rte_be_to_cpu_32(da32[2]);
2489 ml->acl_delete.field_value[7].mask_range.u32 =
2491 ml->acl_delete.field_value[8].value.u32 =
2492 rte_be_to_cpu_32(da32[3]);
2493 ml->acl_delete.field_value[8].mask_range.u32 =
2496 ml->acl_delete.field_value[9].value.u16 =
2498 ml->acl_delete.field_value[9].mask_range.u16 =
2501 ml->acl_delete.field_value[10].value.u16 =
2503 ml->acl_delete.field_value[10].mask_range.u16 =
2509 ml->array.pos = mh->match.array.pos;
2513 memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash));
2517 if (mh->match.lpm.ip_version) {
2518 ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
2519 ml->lpm_ipv4.depth = mh->match.lpm.depth;
2521 memcpy(ml->lpm_ipv6.ip,
2522 mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
2523 ml->lpm_ipv6.depth = mh->match.lpm.depth;
2534 action_convert(struct rte_table_action *a,
2535 struct table_rule_action *action,
2536 struct rte_pipeline_table_entry *data)
2541 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
2542 status = rte_table_action_apply(a,
2544 RTE_TABLE_ACTION_FWD,
2551 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2552 status = rte_table_action_apply(a,
2554 RTE_TABLE_ACTION_LB,
2561 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2562 status = rte_table_action_apply(a,
2564 RTE_TABLE_ACTION_MTR,
2571 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2572 status = rte_table_action_apply(a,
2574 RTE_TABLE_ACTION_TM,
2581 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2582 status = rte_table_action_apply(a,
2584 RTE_TABLE_ACTION_ENCAP,
2591 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2592 status = rte_table_action_apply(a,
2594 RTE_TABLE_ACTION_NAT,
2601 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2602 status = rte_table_action_apply(a,
2604 RTE_TABLE_ACTION_TTL,
2611 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2612 status = rte_table_action_apply(a,
2614 RTE_TABLE_ACTION_STATS,
2621 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2622 status = rte_table_action_apply(a,
2624 RTE_TABLE_ACTION_TIME,
2631 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
2632 status = rte_table_action_apply(a,
2634 RTE_TABLE_ACTION_SYM_CRYPTO,
2635 &action->sym_crypto);
2641 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
2642 status = rte_table_action_apply(a,
2644 RTE_TABLE_ACTION_TAG,
2651 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
2652 status = rte_table_action_apply(a,
2654 RTE_TABLE_ACTION_DECAP,
2664 static struct pipeline_msg_rsp *
2665 pipeline_msg_handle_table_rule_add(struct pipeline_data *p,
2666 struct pipeline_msg_req *req)
2668 union table_rule_match_low_level match_ll;
2669 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2670 struct table_rule_match *match = &req->table_rule_add.match;
2671 struct table_rule_action *action = &req->table_rule_add.action;
2672 struct rte_pipeline_table_entry *data_in, *data_out;
2673 uint32_t table_id = req->id;
2674 int key_found, status;
2675 struct rte_table_action *a = p->table_data[table_id].a;
2678 memset(p->buffer, 0, sizeof(p->buffer));
2679 data_in = (struct rte_pipeline_table_entry *) p->buffer;
2681 status = match_convert(match, &match_ll, 1);
2687 status = action_convert(a, action, data_in);
2693 status = rte_pipeline_table_entry_add(p->p,
2704 /* Write response */
2706 rsp->table_rule_add.data = data_out;
2711 static struct pipeline_msg_rsp *
2712 pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p,
2713 struct pipeline_msg_req *req)
2715 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2716 struct table_rule_action *action = &req->table_rule_add_default.action;
2717 struct rte_pipeline_table_entry *data_in, *data_out;
2718 uint32_t table_id = req->id;
2722 memset(p->buffer, 0, sizeof(p->buffer));
2723 data_in = (struct rte_pipeline_table_entry *) p->buffer;
2725 data_in->action = action->fwd.action;
2726 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
2727 data_in->port_id = action->fwd.id;
2728 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
2729 data_in->table_id = action->fwd.id;
2731 /* Add default rule to table */
2732 status = rte_pipeline_table_default_entry_add(p->p,
2741 /* Write response */
2743 rsp->table_rule_add_default.data = data_out;
2748 static struct pipeline_msg_rsp *
2749 pipeline_msg_handle_table_rule_add_bulk(struct pipeline_data *p,
2750 struct pipeline_msg_req *req)
2752 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2754 uint32_t table_id = req->id;
2755 struct table_rule_list *list = req->table_rule_add_bulk.list;
2756 uint32_t bulk = req->table_rule_add_bulk.bulk;
2758 uint32_t n_rules_added;
2761 struct table_ll table_ll = {
2763 .table_id = table_id,
2764 .a = p->table_data[table_id].a,
2765 .bulk_supported = bulk,
2768 status = table_rule_add_bulk_ll(&table_ll, list, &n_rules_added);
2771 rsp->table_rule_add_bulk.n_rules = 0;
2775 /* Write response */
2777 rsp->table_rule_add_bulk.n_rules = n_rules_added;
2781 static struct pipeline_msg_rsp *
2782 pipeline_msg_handle_table_rule_delete(struct pipeline_data *p,
2783 struct pipeline_msg_req *req)
2785 union table_rule_match_low_level match_ll;
2786 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2787 struct table_rule_match *match = &req->table_rule_delete.match;
2788 uint32_t table_id = req->id;
2789 int key_found, status;
2791 status = match_convert(match, &match_ll, 0);
2797 rsp->status = rte_pipeline_table_entry_delete(p->p,
2806 static struct pipeline_msg_rsp *
2807 pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p,
2808 struct pipeline_msg_req *req)
2810 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2811 uint32_t table_id = req->id;
2813 rsp->status = rte_pipeline_table_default_entry_delete(p->p,
2820 static struct pipeline_msg_rsp *
2821 pipeline_msg_handle_table_rule_stats_read(struct pipeline_data *p,
2822 struct pipeline_msg_req *req)
2824 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2825 uint32_t table_id = req->id;
2826 void *data = req->table_rule_stats_read.data;
2827 int clear = req->table_rule_stats_read.clear;
2828 struct rte_table_action *a = p->table_data[table_id].a;
2830 rsp->status = rte_table_action_stats_read(a,
2832 &rsp->table_rule_stats_read.stats,
2838 static struct pipeline_msg_rsp *
2839 pipeline_msg_handle_table_mtr_profile_add(struct pipeline_data *p,
2840 struct pipeline_msg_req *req)
2842 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2843 uint32_t table_id = req->id;
2844 uint32_t meter_profile_id = req->table_mtr_profile_add.meter_profile_id;
2845 struct rte_table_action_meter_profile *profile =
2846 &req->table_mtr_profile_add.profile;
2847 struct rte_table_action *a = p->table_data[table_id].a;
2849 rsp->status = rte_table_action_meter_profile_add(a,
2856 static struct pipeline_msg_rsp *
2857 pipeline_msg_handle_table_mtr_profile_delete(struct pipeline_data *p,
2858 struct pipeline_msg_req *req)
2860 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2861 uint32_t table_id = req->id;
2862 uint32_t meter_profile_id =
2863 req->table_mtr_profile_delete.meter_profile_id;
2864 struct rte_table_action *a = p->table_data[table_id].a;
2866 rsp->status = rte_table_action_meter_profile_delete(a,
2872 static struct pipeline_msg_rsp *
2873 pipeline_msg_handle_table_rule_mtr_read(struct pipeline_data *p,
2874 struct pipeline_msg_req *req)
2876 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2877 uint32_t table_id = req->id;
2878 void *data = req->table_rule_mtr_read.data;
2879 uint32_t tc_mask = req->table_rule_mtr_read.tc_mask;
2880 int clear = req->table_rule_mtr_read.clear;
2881 struct rte_table_action *a = p->table_data[table_id].a;
2883 rsp->status = rte_table_action_meter_read(a,
2886 &rsp->table_rule_mtr_read.stats,
2892 static struct pipeline_msg_rsp *
2893 pipeline_msg_handle_table_dscp_table_update(struct pipeline_data *p,
2894 struct pipeline_msg_req *req)
2896 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2897 uint32_t table_id = req->id;
2898 uint64_t dscp_mask = req->table_dscp_table_update.dscp_mask;
2899 struct rte_table_action_dscp_table *dscp_table =
2900 &req->table_dscp_table_update.dscp_table;
2901 struct rte_table_action *a = p->table_data[table_id].a;
2903 rsp->status = rte_table_action_dscp_table_update(a,
2910 static struct pipeline_msg_rsp *
2911 pipeline_msg_handle_table_rule_ttl_read(struct pipeline_data *p,
2912 struct pipeline_msg_req *req)
2914 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2915 uint32_t table_id = req->id;
2916 void *data = req->table_rule_ttl_read.data;
2917 int clear = req->table_rule_ttl_read.clear;
2918 struct rte_table_action *a = p->table_data[table_id].a;
2920 rsp->status = rte_table_action_ttl_read(a,
2922 &rsp->table_rule_ttl_read.stats,
2929 pipeline_msg_handle(struct pipeline_data *p)
2932 struct pipeline_msg_req *req;
2933 struct pipeline_msg_rsp *rsp;
2935 req = pipeline_msg_recv(p->msgq_req);
2939 switch (req->type) {
2940 case PIPELINE_REQ_PORT_IN_STATS_READ:
2941 rsp = pipeline_msg_handle_port_in_stats_read(p, req);
2944 case PIPELINE_REQ_PORT_IN_ENABLE:
2945 rsp = pipeline_msg_handle_port_in_enable(p, req);
2948 case PIPELINE_REQ_PORT_IN_DISABLE:
2949 rsp = pipeline_msg_handle_port_in_disable(p, req);
2952 case PIPELINE_REQ_PORT_OUT_STATS_READ:
2953 rsp = pipeline_msg_handle_port_out_stats_read(p, req);
2956 case PIPELINE_REQ_TABLE_STATS_READ:
2957 rsp = pipeline_msg_handle_table_stats_read(p, req);
2960 case PIPELINE_REQ_TABLE_RULE_ADD:
2961 rsp = pipeline_msg_handle_table_rule_add(p, req);
2964 case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT:
2965 rsp = pipeline_msg_handle_table_rule_add_default(p, req);
2968 case PIPELINE_REQ_TABLE_RULE_ADD_BULK:
2969 rsp = pipeline_msg_handle_table_rule_add_bulk(p, req);
2972 case PIPELINE_REQ_TABLE_RULE_DELETE:
2973 rsp = pipeline_msg_handle_table_rule_delete(p, req);
2976 case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT:
2977 rsp = pipeline_msg_handle_table_rule_delete_default(p, req);
2980 case PIPELINE_REQ_TABLE_RULE_STATS_READ:
2981 rsp = pipeline_msg_handle_table_rule_stats_read(p, req);
2984 case PIPELINE_REQ_TABLE_MTR_PROFILE_ADD:
2985 rsp = pipeline_msg_handle_table_mtr_profile_add(p, req);
2988 case PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE:
2989 rsp = pipeline_msg_handle_table_mtr_profile_delete(p, req);
2992 case PIPELINE_REQ_TABLE_RULE_MTR_READ:
2993 rsp = pipeline_msg_handle_table_rule_mtr_read(p, req);
2996 case PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE:
2997 rsp = pipeline_msg_handle_table_dscp_table_update(p, req);
3000 case PIPELINE_REQ_TABLE_RULE_TTL_READ:
3001 rsp = pipeline_msg_handle_table_rule_ttl_read(p, req);
3005 rsp = (struct pipeline_msg_rsp *) req;
3009 pipeline_msg_send(p->msgq_rsp, rsp);
3014 * Data plane threads: main
3017 thread_main(void *arg __rte_unused)
3019 struct thread_data *t;
3020 uint32_t thread_id, i;
3022 thread_id = rte_lcore_id();
3023 t = &thread_data[thread_id];
3026 for (i = 0; ; i++) {
3030 for (j = 0; j < t->n_pipelines; j++)
3031 rte_pipeline_run(t->p[j]);
3034 if ((i & 0xF) == 0) {
3035 uint64_t time = rte_get_tsc_cycles();
3036 uint64_t time_next_min = UINT64_MAX;
3038 if (time < t->time_next_min)
3041 /* Pipeline message queues */
3042 for (j = 0; j < t->n_pipelines; j++) {
3043 struct pipeline_data *p =
3044 &t->pipeline_data[j];
3045 uint64_t time_next = p->time_next;
3047 if (time_next <= time) {
3048 pipeline_msg_handle(p);
3049 rte_pipeline_flush(p->p);
3050 time_next = time + p->timer_period;
3051 p->time_next = time_next;
3054 if (time_next < time_next_min)
3055 time_next_min = time_next;
3058 /* Thread message queues */
3060 uint64_t time_next = t->time_next;
3062 if (time_next <= time) {
3063 thread_msg_handle(t);
3064 time_next = time + t->timer_period;
3065 t->time_next = time_next;
3068 if (time_next < time_next_min)
3069 time_next_min = time_next;
3072 t->time_next_min = time_next_min;