1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_cycles.h>
12 #include <rte_table_acl.h>
13 #include <rte_table_array.h>
14 #include <rte_table_hash.h>
15 #include <rte_table_lpm.h>
16 #include <rte_table_lpm_ipv6.h>
22 #ifndef THREAD_PIPELINES_MAX
23 #define THREAD_PIPELINES_MAX 256
26 #ifndef THREAD_MSGQ_SIZE
27 #define THREAD_MSGQ_SIZE 64
30 #ifndef THREAD_TIMER_PERIOD_MS
31 #define THREAD_TIMER_PERIOD_MS 100
35 * Master thead: data plane thread context
38 struct rte_ring *msgq_req;
39 struct rte_ring *msgq_rsp;
44 static struct thread thread[RTE_MAX_LCORE];
47 * Data plane threads: context
50 struct rte_table_action *a;
53 struct pipeline_data {
54 struct rte_pipeline *p;
55 struct table_data table_data[RTE_PIPELINE_TABLE_MAX];
58 struct rte_ring *msgq_req;
59 struct rte_ring *msgq_rsp;
60 uint64_t timer_period; /* Measured in CPU cycles. */
63 uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
67 struct rte_pipeline *p[THREAD_PIPELINES_MAX];
70 struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
71 struct rte_ring *msgq_req;
72 struct rte_ring *msgq_rsp;
73 uint64_t timer_period; /* Measured in CPU cycles. */
75 uint64_t time_next_min;
76 } __rte_cache_aligned;
78 static struct thread_data thread_data[RTE_MAX_LCORE];
81 * Master thread: data plane thread init
88 for (i = 0; i < RTE_MAX_LCORE; i++) {
89 struct thread *t = &thread[i];
91 if (!rte_lcore_is_enabled(i))
96 rte_ring_free(t->msgq_req);
99 rte_ring_free(t->msgq_rsp);
108 RTE_LCORE_FOREACH_SLAVE(i) {
110 struct rte_ring *msgq_req, *msgq_rsp;
111 struct thread *t = &thread[i];
112 struct thread_data *t_data = &thread_data[i];
113 uint32_t cpu_id = rte_lcore_to_socket_id(i);
116 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-REQ", i);
118 msgq_req = rte_ring_create(name,
121 RING_F_SP_ENQ | RING_F_SC_DEQ);
123 if (msgq_req == NULL) {
128 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-RSP", i);
130 msgq_rsp = rte_ring_create(name,
133 RING_F_SP_ENQ | RING_F_SC_DEQ);
135 if (msgq_rsp == NULL) {
140 /* Master thread records */
141 t->msgq_req = msgq_req;
142 t->msgq_rsp = msgq_rsp;
145 /* Data plane thread records */
146 t_data->n_pipelines = 0;
147 t_data->msgq_req = msgq_req;
148 t_data->msgq_rsp = msgq_rsp;
149 t_data->timer_period =
150 (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
151 t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
152 t_data->time_next_min = t_data->time_next;
159 * Master thread & data plane threads: message passing
161 enum thread_req_type {
162 THREAD_REQ_PIPELINE_ENABLE = 0,
163 THREAD_REQ_PIPELINE_DISABLE,
167 struct thread_msg_req {
168 enum thread_req_type type;
172 struct rte_pipeline *p;
174 struct rte_table_action *a;
175 } table[RTE_PIPELINE_TABLE_MAX];
176 struct rte_ring *msgq_req;
177 struct rte_ring *msgq_rsp;
178 uint32_t timer_period_ms;
183 struct rte_pipeline *p;
188 struct thread_msg_rsp {
195 static struct thread_msg_req *
196 thread_msg_alloc(void)
198 size_t size = RTE_MAX(sizeof(struct thread_msg_req),
199 sizeof(struct thread_msg_rsp));
201 return calloc(1, size);
205 thread_msg_free(struct thread_msg_rsp *rsp)
210 static struct thread_msg_rsp *
211 thread_msg_send_recv(uint32_t thread_id,
212 struct thread_msg_req *req)
214 struct thread *t = &thread[thread_id];
215 struct rte_ring *msgq_req = t->msgq_req;
216 struct rte_ring *msgq_rsp = t->msgq_rsp;
217 struct thread_msg_rsp *rsp;
222 status = rte_ring_sp_enqueue(msgq_req, req);
223 } while (status == -ENOBUFS);
227 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
228 } while (status != 0);
234 thread_pipeline_enable(uint32_t thread_id,
235 const char *pipeline_name)
237 struct pipeline *p = pipeline_find(pipeline_name);
239 struct thread_msg_req *req;
240 struct thread_msg_rsp *rsp;
244 /* Check input params */
245 if ((thread_id >= RTE_MAX_LCORE) ||
247 (p->n_ports_in == 0) ||
248 (p->n_ports_out == 0) ||
252 t = &thread[thread_id];
253 if ((t->enabled == 0) ||
257 /* Allocate request */
258 req = thread_msg_alloc();
263 req->type = THREAD_REQ_PIPELINE_ENABLE;
264 req->pipeline_enable.p = p->p;
265 for (i = 0; i < p->n_tables; i++)
266 req->pipeline_enable.table[i].a =
268 req->pipeline_enable.msgq_req = p->msgq_req;
269 req->pipeline_enable.msgq_rsp = p->msgq_rsp;
270 req->pipeline_enable.timer_period_ms = p->timer_period_ms;
271 req->pipeline_enable.n_tables = p->n_tables;
273 /* Send request and wait for response */
274 rsp = thread_msg_send_recv(thread_id, req);
279 status = rsp->status;
282 thread_msg_free(rsp);
284 /* Request completion */
288 p->thread_id = thread_id;
295 thread_pipeline_disable(uint32_t thread_id,
296 const char *pipeline_name)
298 struct pipeline *p = pipeline_find(pipeline_name);
300 struct thread_msg_req *req;
301 struct thread_msg_rsp *rsp;
304 /* Check input params */
305 if ((thread_id >= RTE_MAX_LCORE) ||
309 t = &thread[thread_id];
316 if (p->thread_id != thread_id)
319 /* Allocate request */
320 req = thread_msg_alloc();
325 req->type = THREAD_REQ_PIPELINE_DISABLE;
326 req->pipeline_disable.p = p->p;
328 /* Send request and wait for response */
329 rsp = thread_msg_send_recv(thread_id, req);
334 status = rsp->status;
337 thread_msg_free(rsp);
339 /* Request completion */
349 * Data plane threads: message handling
351 static inline struct thread_msg_req *
352 thread_msg_recv(struct rte_ring *msgq_req)
354 struct thread_msg_req *req;
356 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
365 thread_msg_send(struct rte_ring *msgq_rsp,
366 struct thread_msg_rsp *rsp)
371 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
372 } while (status == -ENOBUFS);
375 static struct thread_msg_rsp *
376 thread_msg_handle_pipeline_enable(struct thread_data *t,
377 struct thread_msg_req *req)
379 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
380 struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
384 if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
389 t->p[t->n_pipelines] = req->pipeline_enable.p;
391 p->p = req->pipeline_enable.p;
392 for (i = 0; i < req->pipeline_enable.n_tables; i++)
394 req->pipeline_enable.table[i].a;
396 p->n_tables = req->pipeline_enable.n_tables;
398 p->msgq_req = req->pipeline_enable.msgq_req;
399 p->msgq_rsp = req->pipeline_enable.msgq_rsp;
401 (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
402 p->time_next = rte_get_tsc_cycles() + p->timer_period;
411 static struct thread_msg_rsp *
412 thread_msg_handle_pipeline_disable(struct thread_data *t,
413 struct thread_msg_req *req)
415 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
416 uint32_t n_pipelines = t->n_pipelines;
417 struct rte_pipeline *pipeline = req->pipeline_disable.p;
421 for (i = 0; i < n_pipelines; i++) {
422 struct pipeline_data *p = &t->pipeline_data[i];
424 if (p->p != pipeline)
427 if (i < n_pipelines - 1) {
428 struct rte_pipeline *pipeline_last =
429 t->p[n_pipelines - 1];
430 struct pipeline_data *p_last =
431 &t->pipeline_data[n_pipelines - 1];
433 t->p[i] = pipeline_last;
434 memcpy(p, p_last, sizeof(*p));
443 /* should not get here */
449 thread_msg_handle(struct thread_data *t)
452 struct thread_msg_req *req;
453 struct thread_msg_rsp *rsp;
455 req = thread_msg_recv(t->msgq_req);
460 case THREAD_REQ_PIPELINE_ENABLE:
461 rsp = thread_msg_handle_pipeline_enable(t, req);
464 case THREAD_REQ_PIPELINE_DISABLE:
465 rsp = thread_msg_handle_pipeline_disable(t, req);
469 rsp = (struct thread_msg_rsp *) req;
473 thread_msg_send(t->msgq_rsp, rsp);
478 * Master thread & data plane threads: message passing
480 enum pipeline_req_type {
482 PIPELINE_REQ_PORT_IN_STATS_READ,
483 PIPELINE_REQ_PORT_IN_ENABLE,
484 PIPELINE_REQ_PORT_IN_DISABLE,
487 PIPELINE_REQ_PORT_OUT_STATS_READ,
490 PIPELINE_REQ_TABLE_STATS_READ,
491 PIPELINE_REQ_TABLE_RULE_ADD,
492 PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT,
493 PIPELINE_REQ_TABLE_RULE_ADD_BULK,
494 PIPELINE_REQ_TABLE_RULE_DELETE,
495 PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT,
496 PIPELINE_REQ_TABLE_RULE_STATS_READ,
497 PIPELINE_REQ_TABLE_MTR_PROFILE_ADD,
498 PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE,
499 PIPELINE_REQ_TABLE_RULE_MTR_READ,
500 PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE,
501 PIPELINE_REQ_TABLE_RULE_TTL_READ,
505 struct pipeline_msg_req_port_in_stats_read {
509 struct pipeline_msg_req_port_out_stats_read {
513 struct pipeline_msg_req_table_stats_read {
517 struct pipeline_msg_req_table_rule_add {
518 struct table_rule_match match;
519 struct table_rule_action action;
522 struct pipeline_msg_req_table_rule_add_default {
523 struct table_rule_action action;
526 struct pipeline_msg_req_table_rule_add_bulk {
527 struct table_rule_match *match;
528 struct table_rule_action *action;
534 struct pipeline_msg_req_table_rule_delete {
535 struct table_rule_match match;
538 struct pipeline_msg_req_table_rule_stats_read {
543 struct pipeline_msg_req_table_mtr_profile_add {
544 uint32_t meter_profile_id;
545 struct rte_table_action_meter_profile profile;
548 struct pipeline_msg_req_table_mtr_profile_delete {
549 uint32_t meter_profile_id;
552 struct pipeline_msg_req_table_rule_mtr_read {
558 struct pipeline_msg_req_table_dscp_table_update {
560 struct rte_table_action_dscp_table dscp_table;
563 struct pipeline_msg_req_table_rule_ttl_read {
568 struct pipeline_msg_req {
569 enum pipeline_req_type type;
570 uint32_t id; /* Port IN, port OUT or table ID */
574 struct pipeline_msg_req_port_in_stats_read port_in_stats_read;
575 struct pipeline_msg_req_port_out_stats_read port_out_stats_read;
576 struct pipeline_msg_req_table_stats_read table_stats_read;
577 struct pipeline_msg_req_table_rule_add table_rule_add;
578 struct pipeline_msg_req_table_rule_add_default table_rule_add_default;
579 struct pipeline_msg_req_table_rule_add_bulk table_rule_add_bulk;
580 struct pipeline_msg_req_table_rule_delete table_rule_delete;
581 struct pipeline_msg_req_table_rule_stats_read table_rule_stats_read;
582 struct pipeline_msg_req_table_mtr_profile_add table_mtr_profile_add;
583 struct pipeline_msg_req_table_mtr_profile_delete table_mtr_profile_delete;
584 struct pipeline_msg_req_table_rule_mtr_read table_rule_mtr_read;
585 struct pipeline_msg_req_table_dscp_table_update table_dscp_table_update;
586 struct pipeline_msg_req_table_rule_ttl_read table_rule_ttl_read;
590 struct pipeline_msg_rsp_port_in_stats_read {
591 struct rte_pipeline_port_in_stats stats;
594 struct pipeline_msg_rsp_port_out_stats_read {
595 struct rte_pipeline_port_out_stats stats;
598 struct pipeline_msg_rsp_table_stats_read {
599 struct rte_pipeline_table_stats stats;
602 struct pipeline_msg_rsp_table_rule_add {
606 struct pipeline_msg_rsp_table_rule_add_default {
610 struct pipeline_msg_rsp_table_rule_add_bulk {
614 struct pipeline_msg_rsp_table_rule_stats_read {
615 struct rte_table_action_stats_counters stats;
618 struct pipeline_msg_rsp_table_rule_mtr_read {
619 struct rte_table_action_mtr_counters stats;
622 struct pipeline_msg_rsp_table_rule_ttl_read {
623 struct rte_table_action_ttl_counters stats;
626 struct pipeline_msg_rsp {
631 struct pipeline_msg_rsp_port_in_stats_read port_in_stats_read;
632 struct pipeline_msg_rsp_port_out_stats_read port_out_stats_read;
633 struct pipeline_msg_rsp_table_stats_read table_stats_read;
634 struct pipeline_msg_rsp_table_rule_add table_rule_add;
635 struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default;
636 struct pipeline_msg_rsp_table_rule_add_bulk table_rule_add_bulk;
637 struct pipeline_msg_rsp_table_rule_stats_read table_rule_stats_read;
638 struct pipeline_msg_rsp_table_rule_mtr_read table_rule_mtr_read;
639 struct pipeline_msg_rsp_table_rule_ttl_read table_rule_ttl_read;
646 static struct pipeline_msg_req *
647 pipeline_msg_alloc(void)
649 size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
650 sizeof(struct pipeline_msg_rsp));
652 return calloc(1, size);
656 pipeline_msg_free(struct pipeline_msg_rsp *rsp)
661 static struct pipeline_msg_rsp *
662 pipeline_msg_send_recv(struct pipeline *p,
663 struct pipeline_msg_req *req)
665 struct rte_ring *msgq_req = p->msgq_req;
666 struct rte_ring *msgq_rsp = p->msgq_rsp;
667 struct pipeline_msg_rsp *rsp;
672 status = rte_ring_sp_enqueue(msgq_req, req);
673 } while (status == -ENOBUFS);
677 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
678 } while (status != 0);
684 pipeline_port_in_stats_read(const char *pipeline_name,
686 struct rte_pipeline_port_in_stats *stats,
690 struct pipeline_msg_req *req;
691 struct pipeline_msg_rsp *rsp;
694 /* Check input params */
695 if ((pipeline_name == NULL) ||
699 p = pipeline_find(pipeline_name);
702 (port_id >= p->n_ports_in))
705 /* Allocate request */
706 req = pipeline_msg_alloc();
711 req->type = PIPELINE_REQ_PORT_IN_STATS_READ;
713 req->port_in_stats_read.clear = clear;
715 /* Send request and wait for response */
716 rsp = pipeline_msg_send_recv(p, req);
721 status = rsp->status;
723 memcpy(stats, &rsp->port_in_stats_read.stats, sizeof(*stats));
726 pipeline_msg_free(rsp);
732 pipeline_port_in_enable(const char *pipeline_name,
736 struct pipeline_msg_req *req;
737 struct pipeline_msg_rsp *rsp;
740 /* Check input params */
741 if (pipeline_name == NULL)
744 p = pipeline_find(pipeline_name);
747 (port_id >= p->n_ports_in))
750 /* Allocate request */
751 req = pipeline_msg_alloc();
756 req->type = PIPELINE_REQ_PORT_IN_ENABLE;
759 /* Send request and wait for response */
760 rsp = pipeline_msg_send_recv(p, req);
765 status = rsp->status;
768 pipeline_msg_free(rsp);
774 pipeline_port_in_disable(const char *pipeline_name,
778 struct pipeline_msg_req *req;
779 struct pipeline_msg_rsp *rsp;
782 /* Check input params */
783 if (pipeline_name == NULL)
786 p = pipeline_find(pipeline_name);
789 (port_id >= p->n_ports_in))
792 /* Allocate request */
793 req = pipeline_msg_alloc();
798 req->type = PIPELINE_REQ_PORT_IN_DISABLE;
801 /* Send request and wait for response */
802 rsp = pipeline_msg_send_recv(p, req);
807 status = rsp->status;
810 pipeline_msg_free(rsp);
816 pipeline_port_out_stats_read(const char *pipeline_name,
818 struct rte_pipeline_port_out_stats *stats,
822 struct pipeline_msg_req *req;
823 struct pipeline_msg_rsp *rsp;
826 /* Check input params */
827 if ((pipeline_name == NULL) ||
831 p = pipeline_find(pipeline_name);
834 (port_id >= p->n_ports_out))
837 /* Allocate request */
838 req = pipeline_msg_alloc();
843 req->type = PIPELINE_REQ_PORT_OUT_STATS_READ;
845 req->port_out_stats_read.clear = clear;
847 /* Send request and wait for response */
848 rsp = pipeline_msg_send_recv(p, req);
853 status = rsp->status;
855 memcpy(stats, &rsp->port_out_stats_read.stats, sizeof(*stats));
858 pipeline_msg_free(rsp);
864 pipeline_table_stats_read(const char *pipeline_name,
866 struct rte_pipeline_table_stats *stats,
870 struct pipeline_msg_req *req;
871 struct pipeline_msg_rsp *rsp;
874 /* Check input params */
875 if ((pipeline_name == NULL) ||
879 p = pipeline_find(pipeline_name);
882 (table_id >= p->n_tables))
885 /* Allocate request */
886 req = pipeline_msg_alloc();
891 req->type = PIPELINE_REQ_TABLE_STATS_READ;
893 req->table_stats_read.clear = clear;
895 /* Send request and wait for response */
896 rsp = pipeline_msg_send_recv(p, req);
901 status = rsp->status;
903 memcpy(stats, &rsp->table_stats_read.stats, sizeof(*stats));
906 pipeline_msg_free(rsp);
912 match_check(struct table_rule_match *match,
918 if ((match == NULL) ||
920 (table_id >= p->n_tables))
923 table = &p->table[table_id];
924 if (match->match_type != table->params.match_type)
927 switch (match->match_type) {
930 struct table_acl_params *t = &table->params.match.acl;
931 struct table_rule_match_acl *r = &match->match.acl;
933 if ((r->ip_version && (t->ip_version == 0)) ||
934 ((r->ip_version == 0) && t->ip_version))
938 if ((r->sa_depth > 32) ||
942 if ((r->sa_depth > 128) ||
957 struct table_lpm_params *t = &table->params.match.lpm;
958 struct table_rule_match_lpm *r = &match->match.lpm;
960 if ((r->ip_version && (t->key_size != 4)) ||
961 ((r->ip_version == 0) && (t->key_size != 16)))
983 action_check(struct table_rule_action *action,
987 struct table_action_profile *ap;
989 if ((action == NULL) ||
991 (table_id >= p->n_tables))
994 ap = p->table[table_id].ap;
995 if (action->action_mask != ap->params.action_mask)
998 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
999 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
1000 (action->fwd.id >= p->n_ports_out))
1003 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
1004 (action->fwd.id >= p->n_tables))
1008 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
1009 uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1;
1010 uint32_t tc_mask1 = action->mtr.tc_mask;
1012 if (tc_mask1 != tc_mask0)
1016 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
1017 uint32_t n_subports_per_port =
1018 ap->params.tm.n_subports_per_port;
1019 uint32_t n_pipes_per_subport =
1020 ap->params.tm.n_pipes_per_subport;
1021 uint32_t subport_id = action->tm.subport_id;
1022 uint32_t pipe_id = action->tm.pipe_id;
1024 if ((subport_id >= n_subports_per_port) ||
1025 (pipe_id >= n_pipes_per_subport))
1029 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
1030 uint64_t encap_mask = ap->params.encap.encap_mask;
1031 enum rte_table_action_encap_type type = action->encap.type;
1033 if ((encap_mask & (1LLU << type)) == 0)
1037 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
1038 int ip_version0 = ap->params.common.ip_version;
1039 int ip_version1 = action->nat.ip_version;
1041 if ((ip_version1 && (ip_version0 == 0)) ||
1042 ((ip_version1 == 0) && ip_version0))
1050 action_default_check(struct table_rule_action *action,
1054 if ((action == NULL) ||
1055 (action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD)) ||
1057 (table_id >= p->n_tables))
1060 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1061 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
1062 (action->fwd.id >= p->n_ports_out))
1065 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
1066 (action->fwd.id >= p->n_tables))
1074 pipeline_table_rule_add(const char *pipeline_name,
1076 struct table_rule_match *match,
1077 struct table_rule_action *action,
1081 struct pipeline_msg_req *req;
1082 struct pipeline_msg_rsp *rsp;
1085 /* Check input params */
1086 if ((pipeline_name == NULL) ||
1092 p = pipeline_find(pipeline_name);
1094 (p->enabled == 0) ||
1095 (table_id >= p->n_tables) ||
1096 match_check(match, p, table_id) ||
1097 action_check(action, p, table_id))
1100 /* Allocate request */
1101 req = pipeline_msg_alloc();
1106 req->type = PIPELINE_REQ_TABLE_RULE_ADD;
1108 memcpy(&req->table_rule_add.match, match, sizeof(*match));
1109 memcpy(&req->table_rule_add.action, action, sizeof(*action));
1111 /* Send request and wait for response */
1112 rsp = pipeline_msg_send_recv(p, req);
1117 status = rsp->status;
1119 *data = rsp->table_rule_add.data;
1122 pipeline_msg_free(rsp);
1128 pipeline_table_rule_add_default(const char *pipeline_name,
1130 struct table_rule_action *action,
1134 struct pipeline_msg_req *req;
1135 struct pipeline_msg_rsp *rsp;
1138 /* Check input params */
1139 if ((pipeline_name == NULL) ||
1144 p = pipeline_find(pipeline_name);
1146 (p->enabled == 0) ||
1147 (table_id >= p->n_tables) ||
1148 action_default_check(action, p, table_id))
1151 /* Allocate request */
1152 req = pipeline_msg_alloc();
1157 req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT;
1159 memcpy(&req->table_rule_add_default.action, action, sizeof(*action));
1161 /* Send request and wait for response */
1162 rsp = pipeline_msg_send_recv(p, req);
1167 status = rsp->status;
1169 *data = rsp->table_rule_add_default.data;
1172 pipeline_msg_free(rsp);
1178 pipeline_table_rule_add_bulk(const char *pipeline_name,
1180 struct table_rule_match *match,
1181 struct table_rule_action *action,
1186 struct pipeline_msg_req *req;
1187 struct pipeline_msg_rsp *rsp;
1191 /* Check input params */
1192 if ((pipeline_name == NULL) ||
1196 (n_rules == NULL) ||
1200 p = pipeline_find(pipeline_name);
1202 (p->enabled == 0) ||
1203 (table_id >= p->n_tables))
1206 for (i = 0; i < *n_rules; i++)
1207 if (match_check(match, p, table_id) ||
1208 action_check(action, p, table_id))
1211 /* Allocate request */
1212 req = pipeline_msg_alloc();
1217 req->type = PIPELINE_REQ_TABLE_RULE_ADD_BULK;
1219 req->table_rule_add_bulk.match = match;
1220 req->table_rule_add_bulk.action = action;
1221 req->table_rule_add_bulk.data = data;
1222 req->table_rule_add_bulk.n_rules = *n_rules;
1223 req->table_rule_add_bulk.bulk =
1224 (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
1226 /* Send request and wait for response */
1227 rsp = pipeline_msg_send_recv(p, req);
1232 status = rsp->status;
1234 *n_rules = rsp->table_rule_add_bulk.n_rules;
1237 pipeline_msg_free(rsp);
1243 pipeline_table_rule_delete(const char *pipeline_name,
1245 struct table_rule_match *match)
1248 struct pipeline_msg_req *req;
1249 struct pipeline_msg_rsp *rsp;
1252 /* Check input params */
1253 if ((pipeline_name == NULL) ||
1257 p = pipeline_find(pipeline_name);
1259 (p->enabled == 0) ||
1260 (table_id >= p->n_tables) ||
1261 match_check(match, p, table_id))
1264 /* Allocate request */
1265 req = pipeline_msg_alloc();
1270 req->type = PIPELINE_REQ_TABLE_RULE_DELETE;
1272 memcpy(&req->table_rule_delete.match, match, sizeof(*match));
1274 /* Send request and wait for response */
1275 rsp = pipeline_msg_send_recv(p, req);
1280 status = rsp->status;
1283 pipeline_msg_free(rsp);
1289 pipeline_table_rule_delete_default(const char *pipeline_name,
1293 struct pipeline_msg_req *req;
1294 struct pipeline_msg_rsp *rsp;
1297 /* Check input params */
1298 if (pipeline_name == NULL)
1301 p = pipeline_find(pipeline_name);
1303 (p->enabled == 0) ||
1304 (table_id >= p->n_tables))
1307 /* Allocate request */
1308 req = pipeline_msg_alloc();
1313 req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT;
1316 /* Send request and wait for response */
1317 rsp = pipeline_msg_send_recv(p, req);
1322 status = rsp->status;
1325 pipeline_msg_free(rsp);
1331 pipeline_table_rule_stats_read(const char *pipeline_name,
1334 struct rte_table_action_stats_counters *stats,
1338 struct pipeline_msg_req *req;
1339 struct pipeline_msg_rsp *rsp;
1342 /* Check input params */
1343 if ((pipeline_name == NULL) ||
1348 p = pipeline_find(pipeline_name);
1350 (p->enabled == 0) ||
1351 (table_id >= p->n_tables))
1354 /* Allocate request */
1355 req = pipeline_msg_alloc();
1360 req->type = PIPELINE_REQ_TABLE_RULE_STATS_READ;
1362 req->table_rule_stats_read.data = data;
1363 req->table_rule_stats_read.clear = clear;
1365 /* Send request and wait for response */
1366 rsp = pipeline_msg_send_recv(p, req);
1371 status = rsp->status;
1373 memcpy(stats, &rsp->table_rule_stats_read.stats, sizeof(*stats));
1376 pipeline_msg_free(rsp);
1382 pipeline_table_mtr_profile_add(const char *pipeline_name,
1384 uint32_t meter_profile_id,
1385 struct rte_table_action_meter_profile *profile)
1388 struct pipeline_msg_req *req;
1389 struct pipeline_msg_rsp *rsp;
1392 /* Check input params */
1393 if ((pipeline_name == NULL) ||
1397 p = pipeline_find(pipeline_name);
1399 (p->enabled == 0) ||
1400 (table_id >= p->n_tables))
1403 /* Allocate request */
1404 req = pipeline_msg_alloc();
1409 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_ADD;
1411 req->table_mtr_profile_add.meter_profile_id = meter_profile_id;
1412 memcpy(&req->table_mtr_profile_add.profile, profile, sizeof(*profile));
1414 /* Send request and wait for response */
1415 rsp = pipeline_msg_send_recv(p, req);
1420 status = rsp->status;
1423 pipeline_msg_free(rsp);
1429 pipeline_table_mtr_profile_delete(const char *pipeline_name,
1431 uint32_t meter_profile_id)
1434 struct pipeline_msg_req *req;
1435 struct pipeline_msg_rsp *rsp;
1438 /* Check input params */
1439 if (pipeline_name == NULL)
1442 p = pipeline_find(pipeline_name);
1444 (p->enabled == 0) ||
1445 (table_id >= p->n_tables))
1448 /* Allocate request */
1449 req = pipeline_msg_alloc();
1454 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE;
1456 req->table_mtr_profile_delete.meter_profile_id = meter_profile_id;
1458 /* Send request and wait for response */
1459 rsp = pipeline_msg_send_recv(p, req);
1464 status = rsp->status;
1467 pipeline_msg_free(rsp);
1473 pipeline_table_rule_mtr_read(const char *pipeline_name,
1477 struct rte_table_action_mtr_counters *stats,
1481 struct pipeline_msg_req *req;
1482 struct pipeline_msg_rsp *rsp;
1485 /* Check input params */
1486 if ((pipeline_name == NULL) ||
1491 p = pipeline_find(pipeline_name);
1493 (p->enabled == 0) ||
1494 (table_id >= p->n_tables))
1497 /* Allocate request */
1498 req = pipeline_msg_alloc();
1503 req->type = PIPELINE_REQ_TABLE_RULE_MTR_READ;
1505 req->table_rule_mtr_read.data = data;
1506 req->table_rule_mtr_read.tc_mask = tc_mask;
1507 req->table_rule_mtr_read.clear = clear;
1509 /* Send request and wait for response */
1510 rsp = pipeline_msg_send_recv(p, req);
1515 status = rsp->status;
1517 memcpy(stats, &rsp->table_rule_mtr_read.stats, sizeof(*stats));
1520 pipeline_msg_free(rsp);
1526 pipeline_table_dscp_table_update(const char *pipeline_name,
1529 struct rte_table_action_dscp_table *dscp_table)
1532 struct pipeline_msg_req *req;
1533 struct pipeline_msg_rsp *rsp;
1536 /* Check input params */
1537 if ((pipeline_name == NULL) ||
1538 (dscp_table == NULL))
1541 p = pipeline_find(pipeline_name);
1543 (p->enabled == 0) ||
1544 (table_id >= p->n_tables))
1547 /* Allocate request */
1548 req = pipeline_msg_alloc();
1553 req->type = PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE;
1555 req->table_dscp_table_update.dscp_mask = dscp_mask;
1556 memcpy(&req->table_dscp_table_update.dscp_table,
1557 dscp_table, sizeof(*dscp_table));
1559 /* Send request and wait for response */
1560 rsp = pipeline_msg_send_recv(p, req);
1565 status = rsp->status;
1568 pipeline_msg_free(rsp);
1574 pipeline_table_rule_ttl_read(const char *pipeline_name,
1577 struct rte_table_action_ttl_counters *stats,
1581 struct pipeline_msg_req *req;
1582 struct pipeline_msg_rsp *rsp;
1585 /* Check input params */
1586 if ((pipeline_name == NULL) ||
1591 p = pipeline_find(pipeline_name);
1593 (p->enabled == 0) ||
1594 (table_id >= p->n_tables))
1597 /* Allocate request */
1598 req = pipeline_msg_alloc();
1603 req->type = PIPELINE_REQ_TABLE_RULE_TTL_READ;
1605 req->table_rule_ttl_read.data = data;
1606 req->table_rule_ttl_read.clear = clear;
1608 /* Send request and wait for response */
1609 rsp = pipeline_msg_send_recv(p, req);
1614 status = rsp->status;
1616 memcpy(stats, &rsp->table_rule_ttl_read.stats, sizeof(*stats));
1619 pipeline_msg_free(rsp);
1625 * Data plane threads: message handling
1627 static inline struct pipeline_msg_req *
1628 pipeline_msg_recv(struct rte_ring *msgq_req)
1630 struct pipeline_msg_req *req;
1632 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
1641 pipeline_msg_send(struct rte_ring *msgq_rsp,
1642 struct pipeline_msg_rsp *rsp)
1647 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
1648 } while (status == -ENOBUFS);
1651 static struct pipeline_msg_rsp *
1652 pipeline_msg_handle_port_in_stats_read(struct pipeline_data *p,
1653 struct pipeline_msg_req *req)
1655 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1656 uint32_t port_id = req->id;
1657 int clear = req->port_in_stats_read.clear;
1659 rsp->status = rte_pipeline_port_in_stats_read(p->p,
1661 &rsp->port_in_stats_read.stats,
1667 static struct pipeline_msg_rsp *
1668 pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
1669 struct pipeline_msg_req *req)
1671 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1672 uint32_t port_id = req->id;
1674 rsp->status = rte_pipeline_port_in_enable(p->p,
1680 static struct pipeline_msg_rsp *
1681 pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
1682 struct pipeline_msg_req *req)
1684 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1685 uint32_t port_id = req->id;
1687 rsp->status = rte_pipeline_port_in_disable(p->p,
1693 static struct pipeline_msg_rsp *
1694 pipeline_msg_handle_port_out_stats_read(struct pipeline_data *p,
1695 struct pipeline_msg_req *req)
1697 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1698 uint32_t port_id = req->id;
1699 int clear = req->port_out_stats_read.clear;
1701 rsp->status = rte_pipeline_port_out_stats_read(p->p,
1703 &rsp->port_out_stats_read.stats,
1709 static struct pipeline_msg_rsp *
1710 pipeline_msg_handle_table_stats_read(struct pipeline_data *p,
1711 struct pipeline_msg_req *req)
1713 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1714 uint32_t port_id = req->id;
1715 int clear = req->table_stats_read.clear;
1717 rsp->status = rte_pipeline_table_stats_read(p->p,
1719 &rsp->table_stats_read.stats,
1725 union table_rule_match_low_level {
1726 struct rte_table_acl_rule_add_params acl_add;
1727 struct rte_table_acl_rule_delete_params acl_delete;
1728 struct rte_table_array_key array;
1729 uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX];
1730 struct rte_table_lpm_key lpm_ipv4;
1731 struct rte_table_lpm_ipv6_key lpm_ipv6;
1735 match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32)
1740 switch (depth / 32) {
1750 depth32[1] = depth - 32;
1758 depth32[2] = depth - 64;
1766 depth32[3] = depth - 96;
1782 match_convert(struct table_rule_match *mh,
1783 union table_rule_match_low_level *ml,
1786 memset(ml, 0, sizeof(*ml));
1788 switch (mh->match_type) {
1790 if (mh->match.acl.ip_version)
1792 ml->acl_add.field_value[0].value.u8 =
1793 mh->match.acl.proto;
1794 ml->acl_add.field_value[0].mask_range.u8 =
1795 mh->match.acl.proto_mask;
1797 ml->acl_add.field_value[1].value.u32 =
1798 mh->match.acl.ipv4.sa;
1799 ml->acl_add.field_value[1].mask_range.u32 =
1800 mh->match.acl.sa_depth;
1802 ml->acl_add.field_value[2].value.u32 =
1803 mh->match.acl.ipv4.da;
1804 ml->acl_add.field_value[2].mask_range.u32 =
1805 mh->match.acl.da_depth;
1807 ml->acl_add.field_value[3].value.u16 =
1809 ml->acl_add.field_value[3].mask_range.u16 =
1812 ml->acl_add.field_value[4].value.u16 =
1814 ml->acl_add.field_value[4].mask_range.u16 =
1817 ml->acl_add.priority =
1818 (int32_t) mh->match.acl.priority;
1820 ml->acl_delete.field_value[0].value.u8 =
1821 mh->match.acl.proto;
1822 ml->acl_delete.field_value[0].mask_range.u8 =
1823 mh->match.acl.proto_mask;
1825 ml->acl_delete.field_value[1].value.u32 =
1826 mh->match.acl.ipv4.sa;
1827 ml->acl_delete.field_value[1].mask_range.u32 =
1828 mh->match.acl.sa_depth;
1830 ml->acl_delete.field_value[2].value.u32 =
1831 mh->match.acl.ipv4.da;
1832 ml->acl_delete.field_value[2].mask_range.u32 =
1833 mh->match.acl.da_depth;
1835 ml->acl_delete.field_value[3].value.u16 =
1837 ml->acl_delete.field_value[3].mask_range.u16 =
1840 ml->acl_delete.field_value[4].value.u16 =
1842 ml->acl_delete.field_value[4].mask_range.u16 =
1848 (uint32_t *) mh->match.acl.ipv6.sa;
1850 (uint32_t *) mh->match.acl.ipv6.da;
1851 uint32_t sa32_depth[4], da32_depth[4];
1854 status = match_convert_ipv6_depth(
1855 mh->match.acl.sa_depth,
1860 status = match_convert_ipv6_depth(
1861 mh->match.acl.da_depth,
1866 ml->acl_add.field_value[0].value.u8 =
1867 mh->match.acl.proto;
1868 ml->acl_add.field_value[0].mask_range.u8 =
1869 mh->match.acl.proto_mask;
1871 ml->acl_add.field_value[1].value.u32 = sa32[0];
1872 ml->acl_add.field_value[1].mask_range.u32 =
1874 ml->acl_add.field_value[2].value.u32 = sa32[1];
1875 ml->acl_add.field_value[2].mask_range.u32 =
1877 ml->acl_add.field_value[3].value.u32 = sa32[2];
1878 ml->acl_add.field_value[3].mask_range.u32 =
1880 ml->acl_add.field_value[4].value.u32 = sa32[3];
1881 ml->acl_add.field_value[4].mask_range.u32 =
1884 ml->acl_add.field_value[5].value.u32 = da32[0];
1885 ml->acl_add.field_value[5].mask_range.u32 =
1887 ml->acl_add.field_value[6].value.u32 = da32[1];
1888 ml->acl_add.field_value[6].mask_range.u32 =
1890 ml->acl_add.field_value[7].value.u32 = da32[2];
1891 ml->acl_add.field_value[7].mask_range.u32 =
1893 ml->acl_add.field_value[8].value.u32 = da32[3];
1894 ml->acl_add.field_value[8].mask_range.u32 =
1897 ml->acl_add.field_value[9].value.u16 =
1899 ml->acl_add.field_value[9].mask_range.u16 =
1902 ml->acl_add.field_value[10].value.u16 =
1904 ml->acl_add.field_value[10].mask_range.u16 =
1907 ml->acl_add.priority =
1908 (int32_t) mh->match.acl.priority;
1911 (uint32_t *) mh->match.acl.ipv6.sa;
1913 (uint32_t *) mh->match.acl.ipv6.da;
1914 uint32_t sa32_depth[4], da32_depth[4];
1917 status = match_convert_ipv6_depth(
1918 mh->match.acl.sa_depth,
1923 status = match_convert_ipv6_depth(
1924 mh->match.acl.da_depth,
1929 ml->acl_delete.field_value[0].value.u8 =
1930 mh->match.acl.proto;
1931 ml->acl_delete.field_value[0].mask_range.u8 =
1932 mh->match.acl.proto_mask;
1934 ml->acl_delete.field_value[1].value.u32 =
1936 ml->acl_delete.field_value[1].mask_range.u32 =
1938 ml->acl_delete.field_value[2].value.u32 =
1940 ml->acl_delete.field_value[2].mask_range.u32 =
1942 ml->acl_delete.field_value[3].value.u32 =
1944 ml->acl_delete.field_value[3].mask_range.u32 =
1946 ml->acl_delete.field_value[4].value.u32 =
1948 ml->acl_delete.field_value[4].mask_range.u32 =
1951 ml->acl_delete.field_value[5].value.u32 =
1953 ml->acl_delete.field_value[5].mask_range.u32 =
1955 ml->acl_delete.field_value[6].value.u32 =
1957 ml->acl_delete.field_value[6].mask_range.u32 =
1959 ml->acl_delete.field_value[7].value.u32 =
1961 ml->acl_delete.field_value[7].mask_range.u32 =
1963 ml->acl_delete.field_value[8].value.u32 =
1965 ml->acl_delete.field_value[8].mask_range.u32 =
1968 ml->acl_delete.field_value[9].value.u16 =
1970 ml->acl_delete.field_value[9].mask_range.u16 =
1973 ml->acl_delete.field_value[10].value.u16 =
1975 ml->acl_delete.field_value[10].mask_range.u16 =
1981 ml->array.pos = mh->match.array.pos;
1985 memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash));
1989 if (mh->match.lpm.ip_version) {
1990 ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
1991 ml->lpm_ipv4.depth = mh->match.lpm.depth;
1993 memcpy(ml->lpm_ipv6.ip,
1994 mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
1995 ml->lpm_ipv6.depth = mh->match.lpm.depth;
2005 static struct pipeline_msg_rsp *
2006 pipeline_msg_handle_table_rule_add(struct pipeline_data *p,
2007 struct pipeline_msg_req *req)
2009 union table_rule_match_low_level match_ll;
2010 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2011 struct table_rule_match *match = &req->table_rule_add.match;
2012 struct table_rule_action *action = &req->table_rule_add.action;
2013 struct rte_pipeline_table_entry *data_in, *data_out;
2014 uint32_t table_id = req->id;
2015 int key_found, status;
2016 struct rte_table_action *a = p->table_data[table_id].a;
2019 memset(p->buffer, 0, sizeof(p->buffer));
2020 data_in = (struct rte_pipeline_table_entry *) p->buffer;
2022 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
2023 status = rte_table_action_apply(a,
2025 RTE_TABLE_ACTION_FWD,
2034 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2035 status = rte_table_action_apply(a,
2037 RTE_TABLE_ACTION_LB,
2046 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2047 status = rte_table_action_apply(a,
2049 RTE_TABLE_ACTION_MTR,
2058 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2059 status = rte_table_action_apply(a,
2061 RTE_TABLE_ACTION_TM,
2070 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2071 status = rte_table_action_apply(a,
2073 RTE_TABLE_ACTION_ENCAP,
2082 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2083 status = rte_table_action_apply(a,
2085 RTE_TABLE_ACTION_NAT,
2094 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2095 status = rte_table_action_apply(a,
2097 RTE_TABLE_ACTION_TTL,
2106 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2107 status = rte_table_action_apply(a,
2109 RTE_TABLE_ACTION_STATS,
2118 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2119 status = rte_table_action_apply(a,
2121 RTE_TABLE_ACTION_TIME,
2130 /* Add rule (match, action) to table */
2131 status = match_convert(match, &match_ll, 1);
2137 status = rte_pipeline_table_entry_add(p->p,
2148 /* Write response */
2150 rsp->table_rule_add.data = data_out;
2155 static struct pipeline_msg_rsp *
2156 pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p,
2157 struct pipeline_msg_req *req)
2159 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2160 struct table_rule_action *action = &req->table_rule_add_default.action;
2161 struct rte_pipeline_table_entry *data_in, *data_out;
2162 uint32_t table_id = req->id;
2166 memset(p->buffer, 0, sizeof(p->buffer));
2167 data_in = (struct rte_pipeline_table_entry *) p->buffer;
2169 data_in->action = action->fwd.action;
2170 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
2171 data_in->port_id = action->fwd.id;
2172 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
2173 data_in->table_id = action->fwd.id;
2175 /* Add default rule to table */
2176 status = rte_pipeline_table_default_entry_add(p->p,
2185 /* Write response */
2187 rsp->table_rule_add_default.data = data_out;
2192 static struct pipeline_msg_rsp *
2193 pipeline_msg_handle_table_rule_add_bulk(struct pipeline_data *p,
2194 struct pipeline_msg_req *req)
2197 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2199 uint32_t table_id = req->id;
2200 struct table_rule_match *match = req->table_rule_add_bulk.match;
2201 struct table_rule_action *action = req->table_rule_add_bulk.action;
2202 struct rte_pipeline_table_entry **data =
2203 (struct rte_pipeline_table_entry **)req->table_rule_add_bulk.data;
2204 uint32_t n_rules = req->table_rule_add_bulk.n_rules;
2205 uint32_t bulk = req->table_rule_add_bulk.bulk;
2207 struct rte_table_action *a = p->table_data[table_id].a;
2208 union table_rule_match_low_level *match_ll;
2210 void **match_ll_ptr;
2211 struct rte_pipeline_table_entry **action_ll_ptr;
2215 /* Memory allocation */
2216 match_ll = calloc(n_rules, sizeof(union table_rule_match_low_level));
2217 action_ll = calloc(n_rules, TABLE_RULE_ACTION_SIZE_MAX);
2218 match_ll_ptr = calloc(n_rules, sizeof(void *));
2220 calloc(n_rules, sizeof(struct rte_pipeline_table_entry *));
2221 found = calloc(n_rules, sizeof(int));
2223 if ((match_ll == NULL) ||
2224 (action_ll == NULL) ||
2225 (match_ll_ptr == NULL) ||
2226 (action_ll_ptr == NULL) ||
2230 for (i = 0; i < n_rules; i++) {
2231 match_ll_ptr[i] = (void *)&match_ll[i];
2233 (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
2236 /* Rule match conversion */
2237 for (i = 0; i < n_rules; i++) {
2238 status = match_convert(&match[i], match_ll_ptr[i], 1);
2243 /* Rule action conversion */
2244 for (i = 0; i < n_rules; i++) {
2245 void *data_in = action_ll_ptr[i];
2246 struct table_rule_action *act = &action[i];
2248 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
2249 status = rte_table_action_apply(a,
2251 RTE_TABLE_ACTION_FWD,
2258 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2259 status = rte_table_action_apply(a,
2261 RTE_TABLE_ACTION_LB,
2268 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2269 status = rte_table_action_apply(a,
2271 RTE_TABLE_ACTION_MTR,
2278 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2279 status = rte_table_action_apply(a,
2281 RTE_TABLE_ACTION_TM,
2288 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2289 status = rte_table_action_apply(a,
2291 RTE_TABLE_ACTION_ENCAP,
2298 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2299 status = rte_table_action_apply(a,
2301 RTE_TABLE_ACTION_NAT,
2308 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2309 status = rte_table_action_apply(a,
2311 RTE_TABLE_ACTION_TTL,
2318 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2319 status = rte_table_action_apply(a,
2321 RTE_TABLE_ACTION_STATS,
2328 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2329 status = rte_table_action_apply(a,
2331 RTE_TABLE_ACTION_TIME,
2339 /* Add rule (match, action) to table */
2341 status = rte_pipeline_table_entry_add_bulk(p->p,
2351 for (i = 0; i < n_rules; i++) {
2352 status = rte_pipeline_table_entry_add(p->p,
2364 /* Write response */
2366 rsp->table_rule_add_bulk.n_rules = n_rules;
2370 free(action_ll_ptr);
2379 free(action_ll_ptr);
2385 rsp->table_rule_add_bulk.n_rules = 0;
2389 static struct pipeline_msg_rsp *
2390 pipeline_msg_handle_table_rule_delete(struct pipeline_data *p,
2391 struct pipeline_msg_req *req)
2393 union table_rule_match_low_level match_ll;
2394 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2395 struct table_rule_match *match = &req->table_rule_delete.match;
2396 uint32_t table_id = req->id;
2397 int key_found, status;
2399 status = match_convert(match, &match_ll, 0);
2405 rsp->status = rte_pipeline_table_entry_delete(p->p,
2414 static struct pipeline_msg_rsp *
2415 pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p,
2416 struct pipeline_msg_req *req)
2418 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2419 uint32_t table_id = req->id;
2421 rsp->status = rte_pipeline_table_default_entry_delete(p->p,
2428 static struct pipeline_msg_rsp *
2429 pipeline_msg_handle_table_rule_stats_read(struct pipeline_data *p,
2430 struct pipeline_msg_req *req)
2432 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2433 uint32_t table_id = req->id;
2434 void *data = req->table_rule_stats_read.data;
2435 int clear = req->table_rule_stats_read.clear;
2436 struct rte_table_action *a = p->table_data[table_id].a;
2438 rsp->status = rte_table_action_stats_read(a,
2440 &rsp->table_rule_stats_read.stats,
2446 static struct pipeline_msg_rsp *
2447 pipeline_msg_handle_table_mtr_profile_add(struct pipeline_data *p,
2448 struct pipeline_msg_req *req)
2450 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2451 uint32_t table_id = req->id;
2452 uint32_t meter_profile_id = req->table_mtr_profile_add.meter_profile_id;
2453 struct rte_table_action_meter_profile *profile =
2454 &req->table_mtr_profile_add.profile;
2455 struct rte_table_action *a = p->table_data[table_id].a;
2457 rsp->status = rte_table_action_meter_profile_add(a,
2464 static struct pipeline_msg_rsp *
2465 pipeline_msg_handle_table_mtr_profile_delete(struct pipeline_data *p,
2466 struct pipeline_msg_req *req)
2468 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2469 uint32_t table_id = req->id;
2470 uint32_t meter_profile_id =
2471 req->table_mtr_profile_delete.meter_profile_id;
2472 struct rte_table_action *a = p->table_data[table_id].a;
2474 rsp->status = rte_table_action_meter_profile_delete(a,
2480 static struct pipeline_msg_rsp *
2481 pipeline_msg_handle_table_rule_mtr_read(struct pipeline_data *p,
2482 struct pipeline_msg_req *req)
2484 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2485 uint32_t table_id = req->id;
2486 void *data = req->table_rule_mtr_read.data;
2487 uint32_t tc_mask = req->table_rule_mtr_read.tc_mask;
2488 int clear = req->table_rule_mtr_read.clear;
2489 struct rte_table_action *a = p->table_data[table_id].a;
2491 rsp->status = rte_table_action_meter_read(a,
2494 &rsp->table_rule_mtr_read.stats,
2500 static struct pipeline_msg_rsp *
2501 pipeline_msg_handle_table_dscp_table_update(struct pipeline_data *p,
2502 struct pipeline_msg_req *req)
2504 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2505 uint32_t table_id = req->id;
2506 uint64_t dscp_mask = req->table_dscp_table_update.dscp_mask;
2507 struct rte_table_action_dscp_table *dscp_table =
2508 &req->table_dscp_table_update.dscp_table;
2509 struct rte_table_action *a = p->table_data[table_id].a;
2511 rsp->status = rte_table_action_dscp_table_update(a,
2518 static struct pipeline_msg_rsp *
2519 pipeline_msg_handle_table_rule_ttl_read(struct pipeline_data *p,
2520 struct pipeline_msg_req *req)
2522 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2523 uint32_t table_id = req->id;
2524 void *data = req->table_rule_ttl_read.data;
2525 int clear = req->table_rule_ttl_read.clear;
2526 struct rte_table_action *a = p->table_data[table_id].a;
2528 rsp->status = rte_table_action_ttl_read(a,
2530 &rsp->table_rule_ttl_read.stats,
2537 pipeline_msg_handle(struct pipeline_data *p)
2540 struct pipeline_msg_req *req;
2541 struct pipeline_msg_rsp *rsp;
2543 req = pipeline_msg_recv(p->msgq_req);
2547 switch (req->type) {
2548 case PIPELINE_REQ_PORT_IN_STATS_READ:
2549 rsp = pipeline_msg_handle_port_in_stats_read(p, req);
2552 case PIPELINE_REQ_PORT_IN_ENABLE:
2553 rsp = pipeline_msg_handle_port_in_enable(p, req);
2556 case PIPELINE_REQ_PORT_IN_DISABLE:
2557 rsp = pipeline_msg_handle_port_in_disable(p, req);
2560 case PIPELINE_REQ_PORT_OUT_STATS_READ:
2561 rsp = pipeline_msg_handle_port_out_stats_read(p, req);
2564 case PIPELINE_REQ_TABLE_STATS_READ:
2565 rsp = pipeline_msg_handle_table_stats_read(p, req);
2568 case PIPELINE_REQ_TABLE_RULE_ADD:
2569 rsp = pipeline_msg_handle_table_rule_add(p, req);
2572 case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT:
2573 rsp = pipeline_msg_handle_table_rule_add_default(p, req);
2576 case PIPELINE_REQ_TABLE_RULE_ADD_BULK:
2577 rsp = pipeline_msg_handle_table_rule_add_bulk(p, req);
2580 case PIPELINE_REQ_TABLE_RULE_DELETE:
2581 rsp = pipeline_msg_handle_table_rule_delete(p, req);
2584 case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT:
2585 rsp = pipeline_msg_handle_table_rule_delete_default(p, req);
2588 case PIPELINE_REQ_TABLE_RULE_STATS_READ:
2589 rsp = pipeline_msg_handle_table_rule_stats_read(p, req);
2592 case PIPELINE_REQ_TABLE_MTR_PROFILE_ADD:
2593 rsp = pipeline_msg_handle_table_mtr_profile_add(p, req);
2596 case PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE:
2597 rsp = pipeline_msg_handle_table_mtr_profile_delete(p, req);
2600 case PIPELINE_REQ_TABLE_RULE_MTR_READ:
2601 rsp = pipeline_msg_handle_table_rule_mtr_read(p, req);
2604 case PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE:
2605 rsp = pipeline_msg_handle_table_dscp_table_update(p, req);
2608 case PIPELINE_REQ_TABLE_RULE_TTL_READ:
2609 rsp = pipeline_msg_handle_table_rule_ttl_read(p, req);
2613 rsp = (struct pipeline_msg_rsp *) req;
2617 pipeline_msg_send(p->msgq_rsp, rsp);
2622 * Data plane threads: main
2625 thread_main(void *arg __rte_unused)
2627 struct thread_data *t;
2628 uint32_t thread_id, i;
2630 thread_id = rte_lcore_id();
2631 t = &thread_data[thread_id];
2634 for (i = 0; ; i++) {
2638 for (j = 0; j < t->n_pipelines; j++)
2639 rte_pipeline_run(t->p[j]);
2642 if ((i & 0xF) == 0) {
2643 uint64_t time = rte_get_tsc_cycles();
2644 uint64_t time_next_min = UINT64_MAX;
2646 if (time < t->time_next_min)
2649 /* Pipeline message queues */
2650 for (j = 0; j < t->n_pipelines; j++) {
2651 struct pipeline_data *p =
2652 &t->pipeline_data[j];
2653 uint64_t time_next = p->time_next;
2655 if (time_next <= time) {
2656 pipeline_msg_handle(p);
2657 rte_pipeline_flush(p->p);
2658 time_next = time + p->timer_period;
2659 p->time_next = time_next;
2662 if (time_next < time_next_min)
2663 time_next_min = time_next;
2666 /* Thread message queues */
2668 uint64_t time_next = t->time_next;
2670 if (time_next <= time) {
2671 thread_msg_handle(t);
2672 time_next = time + t->timer_period;
2673 t->time_next = time_next;
2676 if (time_next < time_next_min)
2677 time_next_min = time_next;
2680 t->time_next_min = time_next_min;