1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_cycles.h>
12 #include <rte_table_acl.h>
13 #include <rte_table_array.h>
14 #include <rte_table_hash.h>
15 #include <rte_table_lpm.h>
16 #include <rte_table_lpm_ipv6.h>
22 #ifndef THREAD_PIPELINES_MAX
23 #define THREAD_PIPELINES_MAX 256
26 #ifndef THREAD_MSGQ_SIZE
27 #define THREAD_MSGQ_SIZE 64
30 #ifndef THREAD_TIMER_PERIOD_MS
31 #define THREAD_TIMER_PERIOD_MS 100
35 * Master thead: data plane thread context
38 struct rte_ring *msgq_req;
39 struct rte_ring *msgq_rsp;
44 static struct thread thread[RTE_MAX_LCORE];
47 * Data plane threads: context
50 struct rte_table_action *a;
53 struct pipeline_data {
54 struct rte_pipeline *p;
55 struct table_data table_data[RTE_PIPELINE_TABLE_MAX];
58 struct rte_ring *msgq_req;
59 struct rte_ring *msgq_rsp;
60 uint64_t timer_period; /* Measured in CPU cycles. */
63 uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
67 struct rte_pipeline *p[THREAD_PIPELINES_MAX];
70 struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
71 struct rte_ring *msgq_req;
72 struct rte_ring *msgq_rsp;
73 uint64_t timer_period; /* Measured in CPU cycles. */
75 uint64_t time_next_min;
76 } __rte_cache_aligned;
78 static struct thread_data thread_data[RTE_MAX_LCORE];
81 * Master thread: data plane thread init
88 for (i = 0; i < RTE_MAX_LCORE; i++) {
89 struct thread *t = &thread[i];
91 if (!rte_lcore_is_enabled(i))
96 rte_ring_free(t->msgq_req);
99 rte_ring_free(t->msgq_rsp);
108 RTE_LCORE_FOREACH_SLAVE(i) {
110 struct rte_ring *msgq_req, *msgq_rsp;
111 struct thread *t = &thread[i];
112 struct thread_data *t_data = &thread_data[i];
113 uint32_t cpu_id = rte_lcore_to_socket_id(i);
116 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-REQ", i);
118 msgq_req = rte_ring_create(name,
121 RING_F_SP_ENQ | RING_F_SC_DEQ);
123 if (msgq_req == NULL) {
128 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-RSP", i);
130 msgq_rsp = rte_ring_create(name,
133 RING_F_SP_ENQ | RING_F_SC_DEQ);
135 if (msgq_rsp == NULL) {
140 /* Master thread records */
141 t->msgq_req = msgq_req;
142 t->msgq_rsp = msgq_rsp;
145 /* Data plane thread records */
146 t_data->n_pipelines = 0;
147 t_data->msgq_req = msgq_req;
148 t_data->msgq_rsp = msgq_rsp;
149 t_data->timer_period =
150 (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
151 t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
152 t_data->time_next_min = t_data->time_next;
159 * Master thread & data plane threads: message passing
161 enum thread_req_type {
162 THREAD_REQ_PIPELINE_ENABLE = 0,
163 THREAD_REQ_PIPELINE_DISABLE,
167 struct thread_msg_req {
168 enum thread_req_type type;
172 struct rte_pipeline *p;
174 struct rte_table_action *a;
175 } table[RTE_PIPELINE_TABLE_MAX];
176 struct rte_ring *msgq_req;
177 struct rte_ring *msgq_rsp;
178 uint32_t timer_period_ms;
183 struct rte_pipeline *p;
188 struct thread_msg_rsp {
195 static struct thread_msg_req *
196 thread_msg_alloc(void)
198 size_t size = RTE_MAX(sizeof(struct thread_msg_req),
199 sizeof(struct thread_msg_rsp));
201 return calloc(1, size);
205 thread_msg_free(struct thread_msg_rsp *rsp)
210 static struct thread_msg_rsp *
211 thread_msg_send_recv(uint32_t thread_id,
212 struct thread_msg_req *req)
214 struct thread *t = &thread[thread_id];
215 struct rte_ring *msgq_req = t->msgq_req;
216 struct rte_ring *msgq_rsp = t->msgq_rsp;
217 struct thread_msg_rsp *rsp;
222 status = rte_ring_sp_enqueue(msgq_req, req);
223 } while (status == -ENOBUFS);
227 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
228 } while (status != 0);
234 thread_pipeline_enable(uint32_t thread_id,
235 const char *pipeline_name)
237 struct pipeline *p = pipeline_find(pipeline_name);
239 struct thread_msg_req *req;
240 struct thread_msg_rsp *rsp;
244 /* Check input params */
245 if ((thread_id >= RTE_MAX_LCORE) ||
247 (p->n_ports_in == 0) ||
248 (p->n_ports_out == 0) ||
252 t = &thread[thread_id];
253 if ((t->enabled == 0) ||
257 /* Allocate request */
258 req = thread_msg_alloc();
263 req->type = THREAD_REQ_PIPELINE_ENABLE;
264 req->pipeline_enable.p = p->p;
265 for (i = 0; i < p->n_tables; i++)
266 req->pipeline_enable.table[i].a =
268 req->pipeline_enable.msgq_req = p->msgq_req;
269 req->pipeline_enable.msgq_rsp = p->msgq_rsp;
270 req->pipeline_enable.timer_period_ms = p->timer_period_ms;
271 req->pipeline_enable.n_tables = p->n_tables;
273 /* Send request and wait for response */
274 rsp = thread_msg_send_recv(thread_id, req);
279 status = rsp->status;
282 thread_msg_free(rsp);
284 /* Request completion */
288 p->thread_id = thread_id;
295 thread_pipeline_disable(uint32_t thread_id,
296 const char *pipeline_name)
298 struct pipeline *p = pipeline_find(pipeline_name);
300 struct thread_msg_req *req;
301 struct thread_msg_rsp *rsp;
304 /* Check input params */
305 if ((thread_id >= RTE_MAX_LCORE) ||
309 t = &thread[thread_id];
316 if (p->thread_id != thread_id)
319 /* Allocate request */
320 req = thread_msg_alloc();
325 req->type = THREAD_REQ_PIPELINE_DISABLE;
326 req->pipeline_disable.p = p->p;
328 /* Send request and wait for response */
329 rsp = thread_msg_send_recv(thread_id, req);
334 status = rsp->status;
337 thread_msg_free(rsp);
339 /* Request completion */
349 * Data plane threads: message handling
351 static inline struct thread_msg_req *
352 thread_msg_recv(struct rte_ring *msgq_req)
354 struct thread_msg_req *req;
356 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
365 thread_msg_send(struct rte_ring *msgq_rsp,
366 struct thread_msg_rsp *rsp)
371 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
372 } while (status == -ENOBUFS);
375 static struct thread_msg_rsp *
376 thread_msg_handle_pipeline_enable(struct thread_data *t,
377 struct thread_msg_req *req)
379 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
380 struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
384 if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
389 t->p[t->n_pipelines] = req->pipeline_enable.p;
391 p->p = req->pipeline_enable.p;
392 for (i = 0; i < req->pipeline_enable.n_tables; i++)
394 req->pipeline_enable.table[i].a;
396 p->n_tables = req->pipeline_enable.n_tables;
398 p->msgq_req = req->pipeline_enable.msgq_req;
399 p->msgq_rsp = req->pipeline_enable.msgq_rsp;
401 (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
402 p->time_next = rte_get_tsc_cycles() + p->timer_period;
411 static struct thread_msg_rsp *
412 thread_msg_handle_pipeline_disable(struct thread_data *t,
413 struct thread_msg_req *req)
415 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
416 uint32_t n_pipelines = t->n_pipelines;
417 struct rte_pipeline *pipeline = req->pipeline_disable.p;
421 for (i = 0; i < n_pipelines; i++) {
422 struct pipeline_data *p = &t->pipeline_data[i];
424 if (p->p != pipeline)
427 if (i < n_pipelines - 1) {
428 struct rte_pipeline *pipeline_last =
429 t->p[n_pipelines - 1];
430 struct pipeline_data *p_last =
431 &t->pipeline_data[n_pipelines - 1];
433 t->p[i] = pipeline_last;
434 memcpy(p, p_last, sizeof(*p));
443 /* should not get here */
449 thread_msg_handle(struct thread_data *t)
452 struct thread_msg_req *req;
453 struct thread_msg_rsp *rsp;
455 req = thread_msg_recv(t->msgq_req);
460 case THREAD_REQ_PIPELINE_ENABLE:
461 rsp = thread_msg_handle_pipeline_enable(t, req);
464 case THREAD_REQ_PIPELINE_DISABLE:
465 rsp = thread_msg_handle_pipeline_disable(t, req);
469 rsp = (struct thread_msg_rsp *) req;
473 thread_msg_send(t->msgq_rsp, rsp);
478 * Master thread & data plane threads: message passing
480 enum pipeline_req_type {
482 PIPELINE_REQ_PORT_IN_STATS_READ,
483 PIPELINE_REQ_PORT_IN_ENABLE,
484 PIPELINE_REQ_PORT_IN_DISABLE,
487 PIPELINE_REQ_PORT_OUT_STATS_READ,
490 PIPELINE_REQ_TABLE_STATS_READ,
491 PIPELINE_REQ_TABLE_RULE_ADD,
492 PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT,
493 PIPELINE_REQ_TABLE_RULE_ADD_BULK,
494 PIPELINE_REQ_TABLE_RULE_DELETE,
495 PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT,
496 PIPELINE_REQ_TABLE_RULE_STATS_READ,
497 PIPELINE_REQ_TABLE_MTR_PROFILE_ADD,
498 PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE,
499 PIPELINE_REQ_TABLE_RULE_MTR_READ,
500 PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE,
504 struct pipeline_msg_req_port_in_stats_read {
508 struct pipeline_msg_req_port_out_stats_read {
512 struct pipeline_msg_req_table_stats_read {
516 struct pipeline_msg_req_table_rule_add {
517 struct table_rule_match match;
518 struct table_rule_action action;
521 struct pipeline_msg_req_table_rule_add_default {
522 struct table_rule_action action;
525 struct pipeline_msg_req_table_rule_add_bulk {
526 struct table_rule_match *match;
527 struct table_rule_action *action;
533 struct pipeline_msg_req_table_rule_delete {
534 struct table_rule_match match;
537 struct pipeline_msg_req_table_rule_stats_read {
542 struct pipeline_msg_req_table_mtr_profile_add {
543 uint32_t meter_profile_id;
544 struct rte_table_action_meter_profile profile;
547 struct pipeline_msg_req_table_mtr_profile_delete {
548 uint32_t meter_profile_id;
551 struct pipeline_msg_req_table_rule_mtr_read {
557 struct pipeline_msg_req_table_dscp_table_update {
559 struct rte_table_action_dscp_table dscp_table;
562 struct pipeline_msg_req {
563 enum pipeline_req_type type;
564 uint32_t id; /* Port IN, port OUT or table ID */
568 struct pipeline_msg_req_port_in_stats_read port_in_stats_read;
569 struct pipeline_msg_req_port_out_stats_read port_out_stats_read;
570 struct pipeline_msg_req_table_stats_read table_stats_read;
571 struct pipeline_msg_req_table_rule_add table_rule_add;
572 struct pipeline_msg_req_table_rule_add_default table_rule_add_default;
573 struct pipeline_msg_req_table_rule_add_bulk table_rule_add_bulk;
574 struct pipeline_msg_req_table_rule_delete table_rule_delete;
575 struct pipeline_msg_req_table_rule_stats_read table_rule_stats_read;
576 struct pipeline_msg_req_table_mtr_profile_add table_mtr_profile_add;
577 struct pipeline_msg_req_table_mtr_profile_delete table_mtr_profile_delete;
578 struct pipeline_msg_req_table_rule_mtr_read table_rule_mtr_read;
579 struct pipeline_msg_req_table_dscp_table_update table_dscp_table_update;
583 struct pipeline_msg_rsp_port_in_stats_read {
584 struct rte_pipeline_port_in_stats stats;
587 struct pipeline_msg_rsp_port_out_stats_read {
588 struct rte_pipeline_port_out_stats stats;
591 struct pipeline_msg_rsp_table_stats_read {
592 struct rte_pipeline_table_stats stats;
595 struct pipeline_msg_rsp_table_rule_add {
599 struct pipeline_msg_rsp_table_rule_add_default {
603 struct pipeline_msg_rsp_table_rule_add_bulk {
607 struct pipeline_msg_rsp_table_rule_stats_read {
608 struct rte_table_action_stats_counters stats;
611 struct pipeline_msg_rsp_table_rule_mtr_read {
612 struct rte_table_action_mtr_counters stats;
615 struct pipeline_msg_rsp {
620 struct pipeline_msg_rsp_port_in_stats_read port_in_stats_read;
621 struct pipeline_msg_rsp_port_out_stats_read port_out_stats_read;
622 struct pipeline_msg_rsp_table_stats_read table_stats_read;
623 struct pipeline_msg_rsp_table_rule_add table_rule_add;
624 struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default;
625 struct pipeline_msg_rsp_table_rule_add_bulk table_rule_add_bulk;
626 struct pipeline_msg_rsp_table_rule_stats_read table_rule_stats_read;
627 struct pipeline_msg_rsp_table_rule_mtr_read table_rule_mtr_read;
634 static struct pipeline_msg_req *
635 pipeline_msg_alloc(void)
637 size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
638 sizeof(struct pipeline_msg_rsp));
640 return calloc(1, size);
644 pipeline_msg_free(struct pipeline_msg_rsp *rsp)
649 static struct pipeline_msg_rsp *
650 pipeline_msg_send_recv(struct pipeline *p,
651 struct pipeline_msg_req *req)
653 struct rte_ring *msgq_req = p->msgq_req;
654 struct rte_ring *msgq_rsp = p->msgq_rsp;
655 struct pipeline_msg_rsp *rsp;
660 status = rte_ring_sp_enqueue(msgq_req, req);
661 } while (status == -ENOBUFS);
665 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
666 } while (status != 0);
672 pipeline_port_in_stats_read(const char *pipeline_name,
674 struct rte_pipeline_port_in_stats *stats,
678 struct pipeline_msg_req *req;
679 struct pipeline_msg_rsp *rsp;
682 /* Check input params */
683 if ((pipeline_name == NULL) ||
687 p = pipeline_find(pipeline_name);
690 (port_id >= p->n_ports_in))
693 /* Allocate request */
694 req = pipeline_msg_alloc();
699 req->type = PIPELINE_REQ_PORT_IN_STATS_READ;
701 req->port_in_stats_read.clear = clear;
703 /* Send request and wait for response */
704 rsp = pipeline_msg_send_recv(p, req);
709 status = rsp->status;
711 memcpy(stats, &rsp->port_in_stats_read.stats, sizeof(*stats));
714 pipeline_msg_free(rsp);
720 pipeline_port_in_enable(const char *pipeline_name,
724 struct pipeline_msg_req *req;
725 struct pipeline_msg_rsp *rsp;
728 /* Check input params */
729 if (pipeline_name == NULL)
732 p = pipeline_find(pipeline_name);
735 (port_id >= p->n_ports_in))
738 /* Allocate request */
739 req = pipeline_msg_alloc();
744 req->type = PIPELINE_REQ_PORT_IN_ENABLE;
747 /* Send request and wait for response */
748 rsp = pipeline_msg_send_recv(p, req);
753 status = rsp->status;
756 pipeline_msg_free(rsp);
762 pipeline_port_in_disable(const char *pipeline_name,
766 struct pipeline_msg_req *req;
767 struct pipeline_msg_rsp *rsp;
770 /* Check input params */
771 if (pipeline_name == NULL)
774 p = pipeline_find(pipeline_name);
777 (port_id >= p->n_ports_in))
780 /* Allocate request */
781 req = pipeline_msg_alloc();
786 req->type = PIPELINE_REQ_PORT_IN_DISABLE;
789 /* Send request and wait for response */
790 rsp = pipeline_msg_send_recv(p, req);
795 status = rsp->status;
798 pipeline_msg_free(rsp);
804 pipeline_port_out_stats_read(const char *pipeline_name,
806 struct rte_pipeline_port_out_stats *stats,
810 struct pipeline_msg_req *req;
811 struct pipeline_msg_rsp *rsp;
814 /* Check input params */
815 if ((pipeline_name == NULL) ||
819 p = pipeline_find(pipeline_name);
822 (port_id >= p->n_ports_out))
825 /* Allocate request */
826 req = pipeline_msg_alloc();
831 req->type = PIPELINE_REQ_PORT_OUT_STATS_READ;
833 req->port_out_stats_read.clear = clear;
835 /* Send request and wait for response */
836 rsp = pipeline_msg_send_recv(p, req);
841 status = rsp->status;
843 memcpy(stats, &rsp->port_out_stats_read.stats, sizeof(*stats));
846 pipeline_msg_free(rsp);
852 pipeline_table_stats_read(const char *pipeline_name,
854 struct rte_pipeline_table_stats *stats,
858 struct pipeline_msg_req *req;
859 struct pipeline_msg_rsp *rsp;
862 /* Check input params */
863 if ((pipeline_name == NULL) ||
867 p = pipeline_find(pipeline_name);
870 (table_id >= p->n_tables))
873 /* Allocate request */
874 req = pipeline_msg_alloc();
879 req->type = PIPELINE_REQ_TABLE_STATS_READ;
881 req->table_stats_read.clear = clear;
883 /* Send request and wait for response */
884 rsp = pipeline_msg_send_recv(p, req);
889 status = rsp->status;
891 memcpy(stats, &rsp->table_stats_read.stats, sizeof(*stats));
894 pipeline_msg_free(rsp);
900 match_check(struct table_rule_match *match,
906 if ((match == NULL) ||
908 (table_id >= p->n_tables))
911 table = &p->table[table_id];
912 if (match->match_type != table->params.match_type)
915 switch (match->match_type) {
918 struct table_acl_params *t = &table->params.match.acl;
919 struct table_rule_match_acl *r = &match->match.acl;
921 if ((r->ip_version && (t->ip_version == 0)) ||
922 ((r->ip_version == 0) && t->ip_version))
926 if ((r->sa_depth > 32) ||
930 if ((r->sa_depth > 128) ||
945 struct table_lpm_params *t = &table->params.match.lpm;
946 struct table_rule_match_lpm *r = &match->match.lpm;
948 if ((r->ip_version && (t->key_size != 4)) ||
949 ((r->ip_version == 0) && (t->key_size != 16)))
971 action_check(struct table_rule_action *action,
975 struct table_action_profile *ap;
977 if ((action == NULL) ||
979 (table_id >= p->n_tables))
982 ap = p->table[table_id].ap;
983 if (action->action_mask != ap->params.action_mask)
986 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
987 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
988 (action->fwd.id >= p->n_ports_out))
991 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
992 (action->fwd.id >= p->n_tables))
996 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
997 uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1;
998 uint32_t tc_mask1 = action->mtr.tc_mask;
1000 if (tc_mask1 != tc_mask0)
1004 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
1005 uint32_t n_subports_per_port =
1006 ap->params.tm.n_subports_per_port;
1007 uint32_t n_pipes_per_subport =
1008 ap->params.tm.n_pipes_per_subport;
1009 uint32_t subport_id = action->tm.subport_id;
1010 uint32_t pipe_id = action->tm.pipe_id;
1012 if ((subport_id >= n_subports_per_port) ||
1013 (pipe_id >= n_pipes_per_subport))
1017 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
1018 uint64_t encap_mask = ap->params.encap.encap_mask;
1019 enum rte_table_action_encap_type type = action->encap.type;
1021 if ((encap_mask & (1LLU << type)) == 0)
1025 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
1026 int ip_version0 = ap->params.common.ip_version;
1027 int ip_version1 = action->nat.ip_version;
1029 if ((ip_version1 && (ip_version0 == 0)) ||
1030 ((ip_version1 == 0) && ip_version0))
1038 action_default_check(struct table_rule_action *action,
1042 if ((action == NULL) ||
1043 (action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD)) ||
1045 (table_id >= p->n_tables))
1048 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1049 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
1050 (action->fwd.id >= p->n_ports_out))
1053 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
1054 (action->fwd.id >= p->n_tables))
1062 pipeline_table_rule_add(const char *pipeline_name,
1064 struct table_rule_match *match,
1065 struct table_rule_action *action,
1069 struct pipeline_msg_req *req;
1070 struct pipeline_msg_rsp *rsp;
1073 /* Check input params */
1074 if ((pipeline_name == NULL) ||
1080 p = pipeline_find(pipeline_name);
1082 (p->enabled == 0) ||
1083 (table_id >= p->n_tables) ||
1084 match_check(match, p, table_id) ||
1085 action_check(action, p, table_id))
1088 /* Allocate request */
1089 req = pipeline_msg_alloc();
1094 req->type = PIPELINE_REQ_TABLE_RULE_ADD;
1096 memcpy(&req->table_rule_add.match, match, sizeof(*match));
1097 memcpy(&req->table_rule_add.action, action, sizeof(*action));
1099 /* Send request and wait for response */
1100 rsp = pipeline_msg_send_recv(p, req);
1105 status = rsp->status;
1107 *data = rsp->table_rule_add.data;
1110 pipeline_msg_free(rsp);
1116 pipeline_table_rule_add_default(const char *pipeline_name,
1118 struct table_rule_action *action,
1122 struct pipeline_msg_req *req;
1123 struct pipeline_msg_rsp *rsp;
1126 /* Check input params */
1127 if ((pipeline_name == NULL) ||
1132 p = pipeline_find(pipeline_name);
1134 (p->enabled == 0) ||
1135 (table_id >= p->n_tables) ||
1136 action_default_check(action, p, table_id))
1139 /* Allocate request */
1140 req = pipeline_msg_alloc();
1145 req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT;
1147 memcpy(&req->table_rule_add_default.action, action, sizeof(*action));
1149 /* Send request and wait for response */
1150 rsp = pipeline_msg_send_recv(p, req);
1155 status = rsp->status;
1157 *data = rsp->table_rule_add_default.data;
1160 pipeline_msg_free(rsp);
1166 pipeline_table_rule_add_bulk(const char *pipeline_name,
1168 struct table_rule_match *match,
1169 struct table_rule_action *action,
1174 struct pipeline_msg_req *req;
1175 struct pipeline_msg_rsp *rsp;
1179 /* Check input params */
1180 if ((pipeline_name == NULL) ||
1184 (n_rules == NULL) ||
1188 p = pipeline_find(pipeline_name);
1190 (p->enabled == 0) ||
1191 (table_id >= p->n_tables))
1194 for (i = 0; i < *n_rules; i++)
1195 if (match_check(match, p, table_id) ||
1196 action_check(action, p, table_id))
1199 /* Allocate request */
1200 req = pipeline_msg_alloc();
1205 req->type = PIPELINE_REQ_TABLE_RULE_ADD_BULK;
1207 req->table_rule_add_bulk.match = match;
1208 req->table_rule_add_bulk.action = action;
1209 req->table_rule_add_bulk.data = data;
1210 req->table_rule_add_bulk.n_rules = *n_rules;
1211 req->table_rule_add_bulk.bulk =
1212 (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
1214 /* Send request and wait for response */
1215 rsp = pipeline_msg_send_recv(p, req);
1220 status = rsp->status;
1222 *n_rules = rsp->table_rule_add_bulk.n_rules;
1225 pipeline_msg_free(rsp);
1231 pipeline_table_rule_delete(const char *pipeline_name,
1233 struct table_rule_match *match)
1236 struct pipeline_msg_req *req;
1237 struct pipeline_msg_rsp *rsp;
1240 /* Check input params */
1241 if ((pipeline_name == NULL) ||
1245 p = pipeline_find(pipeline_name);
1247 (p->enabled == 0) ||
1248 (table_id >= p->n_tables) ||
1249 match_check(match, p, table_id))
1252 /* Allocate request */
1253 req = pipeline_msg_alloc();
1258 req->type = PIPELINE_REQ_TABLE_RULE_DELETE;
1260 memcpy(&req->table_rule_delete.match, match, sizeof(*match));
1262 /* Send request and wait for response */
1263 rsp = pipeline_msg_send_recv(p, req);
1268 status = rsp->status;
1271 pipeline_msg_free(rsp);
1277 pipeline_table_rule_delete_default(const char *pipeline_name,
1281 struct pipeline_msg_req *req;
1282 struct pipeline_msg_rsp *rsp;
1285 /* Check input params */
1286 if (pipeline_name == NULL)
1289 p = pipeline_find(pipeline_name);
1291 (p->enabled == 0) ||
1292 (table_id >= p->n_tables))
1295 /* Allocate request */
1296 req = pipeline_msg_alloc();
1301 req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT;
1304 /* Send request and wait for response */
1305 rsp = pipeline_msg_send_recv(p, req);
1310 status = rsp->status;
1313 pipeline_msg_free(rsp);
1319 pipeline_table_rule_stats_read(const char *pipeline_name,
1322 struct rte_table_action_stats_counters *stats,
1326 struct pipeline_msg_req *req;
1327 struct pipeline_msg_rsp *rsp;
1330 /* Check input params */
1331 if ((pipeline_name == NULL) ||
1336 p = pipeline_find(pipeline_name);
1338 (p->enabled == 0) ||
1339 (table_id >= p->n_tables))
1342 /* Allocate request */
1343 req = pipeline_msg_alloc();
1348 req->type = PIPELINE_REQ_TABLE_RULE_STATS_READ;
1350 req->table_rule_stats_read.data = data;
1351 req->table_rule_stats_read.clear = clear;
1353 /* Send request and wait for response */
1354 rsp = pipeline_msg_send_recv(p, req);
1359 status = rsp->status;
1361 memcpy(stats, &rsp->table_rule_stats_read.stats, sizeof(*stats));
1364 pipeline_msg_free(rsp);
1370 pipeline_table_mtr_profile_add(const char *pipeline_name,
1372 uint32_t meter_profile_id,
1373 struct rte_table_action_meter_profile *profile)
1376 struct pipeline_msg_req *req;
1377 struct pipeline_msg_rsp *rsp;
1380 /* Check input params */
1381 if ((pipeline_name == NULL) ||
1385 p = pipeline_find(pipeline_name);
1387 (p->enabled == 0) ||
1388 (table_id >= p->n_tables))
1391 /* Allocate request */
1392 req = pipeline_msg_alloc();
1397 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_ADD;
1399 req->table_mtr_profile_add.meter_profile_id = meter_profile_id;
1400 memcpy(&req->table_mtr_profile_add.profile, profile, sizeof(*profile));
1402 /* Send request and wait for response */
1403 rsp = pipeline_msg_send_recv(p, req);
1408 status = rsp->status;
1411 pipeline_msg_free(rsp);
1417 pipeline_table_mtr_profile_delete(const char *pipeline_name,
1419 uint32_t meter_profile_id)
1422 struct pipeline_msg_req *req;
1423 struct pipeline_msg_rsp *rsp;
1426 /* Check input params */
1427 if (pipeline_name == NULL)
1430 p = pipeline_find(pipeline_name);
1432 (p->enabled == 0) ||
1433 (table_id >= p->n_tables))
1436 /* Allocate request */
1437 req = pipeline_msg_alloc();
1442 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE;
1444 req->table_mtr_profile_delete.meter_profile_id = meter_profile_id;
1446 /* Send request and wait for response */
1447 rsp = pipeline_msg_send_recv(p, req);
1452 status = rsp->status;
1455 pipeline_msg_free(rsp);
1461 pipeline_table_rule_mtr_read(const char *pipeline_name,
1465 struct rte_table_action_mtr_counters *stats,
1469 struct pipeline_msg_req *req;
1470 struct pipeline_msg_rsp *rsp;
1473 /* Check input params */
1474 if ((pipeline_name == NULL) ||
1479 p = pipeline_find(pipeline_name);
1481 (p->enabled == 0) ||
1482 (table_id >= p->n_tables))
1485 /* Allocate request */
1486 req = pipeline_msg_alloc();
1491 req->type = PIPELINE_REQ_TABLE_RULE_MTR_READ;
1493 req->table_rule_mtr_read.data = data;
1494 req->table_rule_mtr_read.tc_mask = tc_mask;
1495 req->table_rule_mtr_read.clear = clear;
1497 /* Send request and wait for response */
1498 rsp = pipeline_msg_send_recv(p, req);
1503 status = rsp->status;
1505 memcpy(stats, &rsp->table_rule_mtr_read.stats, sizeof(*stats));
1508 pipeline_msg_free(rsp);
1514 pipeline_table_dscp_table_update(const char *pipeline_name,
1517 struct rte_table_action_dscp_table *dscp_table)
1520 struct pipeline_msg_req *req;
1521 struct pipeline_msg_rsp *rsp;
1524 /* Check input params */
1525 if ((pipeline_name == NULL) ||
1526 (dscp_table == NULL))
1529 p = pipeline_find(pipeline_name);
1531 (p->enabled == 0) ||
1532 (table_id >= p->n_tables))
1535 /* Allocate request */
1536 req = pipeline_msg_alloc();
1541 req->type = PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE;
1543 req->table_dscp_table_update.dscp_mask = dscp_mask;
1544 memcpy(&req->table_dscp_table_update.dscp_table,
1545 dscp_table, sizeof(*dscp_table));
1547 /* Send request and wait for response */
1548 rsp = pipeline_msg_send_recv(p, req);
1553 status = rsp->status;
1556 pipeline_msg_free(rsp);
1562 * Data plane threads: message handling
1564 static inline struct pipeline_msg_req *
1565 pipeline_msg_recv(struct rte_ring *msgq_req)
1567 struct pipeline_msg_req *req;
1569 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
1578 pipeline_msg_send(struct rte_ring *msgq_rsp,
1579 struct pipeline_msg_rsp *rsp)
1584 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
1585 } while (status == -ENOBUFS);
1588 static struct pipeline_msg_rsp *
1589 pipeline_msg_handle_port_in_stats_read(struct pipeline_data *p,
1590 struct pipeline_msg_req *req)
1592 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1593 uint32_t port_id = req->id;
1594 int clear = req->port_in_stats_read.clear;
1596 rsp->status = rte_pipeline_port_in_stats_read(p->p,
1598 &rsp->port_in_stats_read.stats,
1604 static struct pipeline_msg_rsp *
1605 pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
1606 struct pipeline_msg_req *req)
1608 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1609 uint32_t port_id = req->id;
1611 rsp->status = rte_pipeline_port_in_enable(p->p,
1617 static struct pipeline_msg_rsp *
1618 pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
1619 struct pipeline_msg_req *req)
1621 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1622 uint32_t port_id = req->id;
1624 rsp->status = rte_pipeline_port_in_disable(p->p,
1630 static struct pipeline_msg_rsp *
1631 pipeline_msg_handle_port_out_stats_read(struct pipeline_data *p,
1632 struct pipeline_msg_req *req)
1634 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1635 uint32_t port_id = req->id;
1636 int clear = req->port_out_stats_read.clear;
1638 rsp->status = rte_pipeline_port_out_stats_read(p->p,
1640 &rsp->port_out_stats_read.stats,
1646 static struct pipeline_msg_rsp *
1647 pipeline_msg_handle_table_stats_read(struct pipeline_data *p,
1648 struct pipeline_msg_req *req)
1650 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1651 uint32_t port_id = req->id;
1652 int clear = req->table_stats_read.clear;
1654 rsp->status = rte_pipeline_table_stats_read(p->p,
1656 &rsp->table_stats_read.stats,
1662 union table_rule_match_low_level {
1663 struct rte_table_acl_rule_add_params acl_add;
1664 struct rte_table_acl_rule_delete_params acl_delete;
1665 struct rte_table_array_key array;
1666 uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX];
1667 struct rte_table_lpm_key lpm_ipv4;
1668 struct rte_table_lpm_ipv6_key lpm_ipv6;
1672 match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32)
1677 switch (depth / 32) {
1687 depth32[1] = depth - 32;
1695 depth32[2] = depth - 64;
1703 depth32[3] = depth - 96;
1719 match_convert(struct table_rule_match *mh,
1720 union table_rule_match_low_level *ml,
1723 memset(ml, 0, sizeof(*ml));
1725 switch (mh->match_type) {
1727 if (mh->match.acl.ip_version)
1729 ml->acl_add.field_value[0].value.u8 =
1730 mh->match.acl.proto;
1731 ml->acl_add.field_value[0].mask_range.u8 =
1732 mh->match.acl.proto_mask;
1734 ml->acl_add.field_value[1].value.u32 =
1735 mh->match.acl.ipv4.sa;
1736 ml->acl_add.field_value[1].mask_range.u32 =
1737 mh->match.acl.sa_depth;
1739 ml->acl_add.field_value[2].value.u32 =
1740 mh->match.acl.ipv4.da;
1741 ml->acl_add.field_value[2].mask_range.u32 =
1742 mh->match.acl.da_depth;
1744 ml->acl_add.field_value[3].value.u16 =
1746 ml->acl_add.field_value[3].mask_range.u16 =
1749 ml->acl_add.field_value[4].value.u16 =
1751 ml->acl_add.field_value[4].mask_range.u16 =
1754 ml->acl_add.priority =
1755 (int32_t) mh->match.acl.priority;
1757 ml->acl_delete.field_value[0].value.u8 =
1758 mh->match.acl.proto;
1759 ml->acl_delete.field_value[0].mask_range.u8 =
1760 mh->match.acl.proto_mask;
1762 ml->acl_delete.field_value[1].value.u32 =
1763 mh->match.acl.ipv4.sa;
1764 ml->acl_delete.field_value[1].mask_range.u32 =
1765 mh->match.acl.sa_depth;
1767 ml->acl_delete.field_value[2].value.u32 =
1768 mh->match.acl.ipv4.da;
1769 ml->acl_delete.field_value[2].mask_range.u32 =
1770 mh->match.acl.da_depth;
1772 ml->acl_delete.field_value[3].value.u16 =
1774 ml->acl_delete.field_value[3].mask_range.u16 =
1777 ml->acl_delete.field_value[4].value.u16 =
1779 ml->acl_delete.field_value[4].mask_range.u16 =
1785 (uint32_t *) mh->match.acl.ipv6.sa;
1787 (uint32_t *) mh->match.acl.ipv6.da;
1788 uint32_t sa32_depth[4], da32_depth[4];
1791 status = match_convert_ipv6_depth(
1792 mh->match.acl.sa_depth,
1797 status = match_convert_ipv6_depth(
1798 mh->match.acl.da_depth,
1803 ml->acl_add.field_value[0].value.u8 =
1804 mh->match.acl.proto;
1805 ml->acl_add.field_value[0].mask_range.u8 =
1806 mh->match.acl.proto_mask;
1808 ml->acl_add.field_value[1].value.u32 = sa32[0];
1809 ml->acl_add.field_value[1].mask_range.u32 =
1811 ml->acl_add.field_value[2].value.u32 = sa32[1];
1812 ml->acl_add.field_value[2].mask_range.u32 =
1814 ml->acl_add.field_value[3].value.u32 = sa32[2];
1815 ml->acl_add.field_value[3].mask_range.u32 =
1817 ml->acl_add.field_value[4].value.u32 = sa32[3];
1818 ml->acl_add.field_value[4].mask_range.u32 =
1821 ml->acl_add.field_value[5].value.u32 = da32[0];
1822 ml->acl_add.field_value[5].mask_range.u32 =
1824 ml->acl_add.field_value[6].value.u32 = da32[1];
1825 ml->acl_add.field_value[6].mask_range.u32 =
1827 ml->acl_add.field_value[7].value.u32 = da32[2];
1828 ml->acl_add.field_value[7].mask_range.u32 =
1830 ml->acl_add.field_value[8].value.u32 = da32[3];
1831 ml->acl_add.field_value[8].mask_range.u32 =
1834 ml->acl_add.field_value[9].value.u16 =
1836 ml->acl_add.field_value[9].mask_range.u16 =
1839 ml->acl_add.field_value[10].value.u16 =
1841 ml->acl_add.field_value[10].mask_range.u16 =
1844 ml->acl_add.priority =
1845 (int32_t) mh->match.acl.priority;
1848 (uint32_t *) mh->match.acl.ipv6.sa;
1850 (uint32_t *) mh->match.acl.ipv6.da;
1851 uint32_t sa32_depth[4], da32_depth[4];
1854 status = match_convert_ipv6_depth(
1855 mh->match.acl.sa_depth,
1860 status = match_convert_ipv6_depth(
1861 mh->match.acl.da_depth,
1866 ml->acl_delete.field_value[0].value.u8 =
1867 mh->match.acl.proto;
1868 ml->acl_delete.field_value[0].mask_range.u8 =
1869 mh->match.acl.proto_mask;
1871 ml->acl_delete.field_value[1].value.u32 =
1873 ml->acl_delete.field_value[1].mask_range.u32 =
1875 ml->acl_delete.field_value[2].value.u32 =
1877 ml->acl_delete.field_value[2].mask_range.u32 =
1879 ml->acl_delete.field_value[3].value.u32 =
1881 ml->acl_delete.field_value[3].mask_range.u32 =
1883 ml->acl_delete.field_value[4].value.u32 =
1885 ml->acl_delete.field_value[4].mask_range.u32 =
1888 ml->acl_delete.field_value[5].value.u32 =
1890 ml->acl_delete.field_value[5].mask_range.u32 =
1892 ml->acl_delete.field_value[6].value.u32 =
1894 ml->acl_delete.field_value[6].mask_range.u32 =
1896 ml->acl_delete.field_value[7].value.u32 =
1898 ml->acl_delete.field_value[7].mask_range.u32 =
1900 ml->acl_delete.field_value[8].value.u32 =
1902 ml->acl_delete.field_value[8].mask_range.u32 =
1905 ml->acl_delete.field_value[9].value.u16 =
1907 ml->acl_delete.field_value[9].mask_range.u16 =
1910 ml->acl_delete.field_value[10].value.u16 =
1912 ml->acl_delete.field_value[10].mask_range.u16 =
1918 ml->array.pos = mh->match.array.pos;
1922 memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash));
1926 if (mh->match.lpm.ip_version) {
1927 ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
1928 ml->lpm_ipv4.depth = mh->match.lpm.depth;
1930 memcpy(ml->lpm_ipv6.ip,
1931 mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
1932 ml->lpm_ipv6.depth = mh->match.lpm.depth;
1942 static struct pipeline_msg_rsp *
1943 pipeline_msg_handle_table_rule_add(struct pipeline_data *p,
1944 struct pipeline_msg_req *req)
1946 union table_rule_match_low_level match_ll;
1947 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1948 struct table_rule_match *match = &req->table_rule_add.match;
1949 struct table_rule_action *action = &req->table_rule_add.action;
1950 struct rte_pipeline_table_entry *data_in, *data_out;
1951 uint32_t table_id = req->id;
1952 int key_found, status;
1953 struct rte_table_action *a = p->table_data[table_id].a;
1956 memset(p->buffer, 0, sizeof(p->buffer));
1957 data_in = (struct rte_pipeline_table_entry *) p->buffer;
1959 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1960 status = rte_table_action_apply(a,
1962 RTE_TABLE_ACTION_FWD,
1971 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
1972 status = rte_table_action_apply(a,
1974 RTE_TABLE_ACTION_MTR,
1983 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
1984 status = rte_table_action_apply(a,
1986 RTE_TABLE_ACTION_TM,
1995 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
1996 status = rte_table_action_apply(a,
1998 RTE_TABLE_ACTION_ENCAP,
2007 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2008 status = rte_table_action_apply(a,
2010 RTE_TABLE_ACTION_NAT,
2019 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2020 status = rte_table_action_apply(a,
2022 RTE_TABLE_ACTION_TTL,
2031 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2032 status = rte_table_action_apply(a,
2034 RTE_TABLE_ACTION_STATS,
2043 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2044 status = rte_table_action_apply(a,
2046 RTE_TABLE_ACTION_TIME,
2055 /* Add rule (match, action) to table */
2056 status = match_convert(match, &match_ll, 1);
2062 status = rte_pipeline_table_entry_add(p->p,
2073 /* Write response */
2075 rsp->table_rule_add.data = data_out;
2080 static struct pipeline_msg_rsp *
2081 pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p,
2082 struct pipeline_msg_req *req)
2084 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2085 struct table_rule_action *action = &req->table_rule_add_default.action;
2086 struct rte_pipeline_table_entry *data_in, *data_out;
2087 uint32_t table_id = req->id;
2091 memset(p->buffer, 0, sizeof(p->buffer));
2092 data_in = (struct rte_pipeline_table_entry *) p->buffer;
2094 data_in->action = action->fwd.action;
2095 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
2096 data_in->port_id = action->fwd.id;
2097 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
2098 data_in->table_id = action->fwd.id;
2100 /* Add default rule to table */
2101 status = rte_pipeline_table_default_entry_add(p->p,
2110 /* Write response */
2112 rsp->table_rule_add_default.data = data_out;
2117 static struct pipeline_msg_rsp *
2118 pipeline_msg_handle_table_rule_add_bulk(struct pipeline_data *p,
2119 struct pipeline_msg_req *req)
2122 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2124 uint32_t table_id = req->id;
2125 struct table_rule_match *match = req->table_rule_add_bulk.match;
2126 struct table_rule_action *action = req->table_rule_add_bulk.action;
2127 struct rte_pipeline_table_entry **data =
2128 (struct rte_pipeline_table_entry **)req->table_rule_add_bulk.data;
2129 uint32_t n_rules = req->table_rule_add_bulk.n_rules;
2130 uint32_t bulk = req->table_rule_add_bulk.bulk;
2132 struct rte_table_action *a = p->table_data[table_id].a;
2133 union table_rule_match_low_level *match_ll;
2135 void **match_ll_ptr;
2136 struct rte_pipeline_table_entry **action_ll_ptr;
2140 /* Memory allocation */
2141 match_ll = calloc(n_rules, sizeof(union table_rule_match_low_level));
2142 action_ll = calloc(n_rules, TABLE_RULE_ACTION_SIZE_MAX);
2143 match_ll_ptr = calloc(n_rules, sizeof(void *));
2145 calloc(n_rules, sizeof(struct rte_pipeline_table_entry *));
2146 found = calloc(n_rules, sizeof(int));
2148 if ((match_ll == NULL) ||
2149 (action_ll == NULL) ||
2150 (match_ll_ptr == NULL) ||
2151 (action_ll_ptr == NULL) ||
2155 for (i = 0; i < n_rules; i++) {
2156 match_ll_ptr[i] = (void *)&match_ll[i];
2158 (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
2161 /* Rule match conversion */
2162 for (i = 0; i < n_rules; i++) {
2163 status = match_convert(&match[i], match_ll_ptr[i], 1);
2168 /* Rule action conversion */
2169 for (i = 0; i < n_rules; i++) {
2170 void *data_in = action_ll_ptr[i];
2171 struct table_rule_action *act = &action[i];
2173 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
2174 status = rte_table_action_apply(a,
2176 RTE_TABLE_ACTION_FWD,
2183 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2184 status = rte_table_action_apply(a,
2186 RTE_TABLE_ACTION_MTR,
2193 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2194 status = rte_table_action_apply(a,
2196 RTE_TABLE_ACTION_TM,
2203 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2204 status = rte_table_action_apply(a,
2206 RTE_TABLE_ACTION_ENCAP,
2213 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2214 status = rte_table_action_apply(a,
2216 RTE_TABLE_ACTION_NAT,
2223 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2224 status = rte_table_action_apply(a,
2226 RTE_TABLE_ACTION_TTL,
2233 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2234 status = rte_table_action_apply(a,
2236 RTE_TABLE_ACTION_STATS,
2243 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2244 status = rte_table_action_apply(a,
2246 RTE_TABLE_ACTION_TIME,
2254 /* Add rule (match, action) to table */
2256 status = rte_pipeline_table_entry_add_bulk(p->p,
2266 for (i = 0; i < n_rules; i++) {
2267 status = rte_pipeline_table_entry_add(p->p,
2279 /* Write response */
2281 rsp->table_rule_add_bulk.n_rules = n_rules;
2285 free(action_ll_ptr);
2294 free(action_ll_ptr);
2300 rsp->table_rule_add_bulk.n_rules = 0;
2304 static struct pipeline_msg_rsp *
2305 pipeline_msg_handle_table_rule_delete(struct pipeline_data *p,
2306 struct pipeline_msg_req *req)
2308 union table_rule_match_low_level match_ll;
2309 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2310 struct table_rule_match *match = &req->table_rule_delete.match;
2311 uint32_t table_id = req->id;
2312 int key_found, status;
2314 status = match_convert(match, &match_ll, 0);
2320 rsp->status = rte_pipeline_table_entry_delete(p->p,
2329 static struct pipeline_msg_rsp *
2330 pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p,
2331 struct pipeline_msg_req *req)
2333 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2334 uint32_t table_id = req->id;
2336 rsp->status = rte_pipeline_table_default_entry_delete(p->p,
2343 static struct pipeline_msg_rsp *
2344 pipeline_msg_handle_table_rule_stats_read(struct pipeline_data *p,
2345 struct pipeline_msg_req *req)
2347 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2348 uint32_t table_id = req->id;
2349 void *data = req->table_rule_stats_read.data;
2350 int clear = req->table_rule_stats_read.clear;
2351 struct rte_table_action *a = p->table_data[table_id].a;
2353 rsp->status = rte_table_action_stats_read(a,
2355 &rsp->table_rule_stats_read.stats,
2361 static struct pipeline_msg_rsp *
2362 pipeline_msg_handle_table_mtr_profile_add(struct pipeline_data *p,
2363 struct pipeline_msg_req *req)
2365 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2366 uint32_t table_id = req->id;
2367 uint32_t meter_profile_id = req->table_mtr_profile_add.meter_profile_id;
2368 struct rte_table_action_meter_profile *profile =
2369 &req->table_mtr_profile_add.profile;
2370 struct rte_table_action *a = p->table_data[table_id].a;
2372 rsp->status = rte_table_action_meter_profile_add(a,
2379 static struct pipeline_msg_rsp *
2380 pipeline_msg_handle_table_mtr_profile_delete(struct pipeline_data *p,
2381 struct pipeline_msg_req *req)
2383 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2384 uint32_t table_id = req->id;
2385 uint32_t meter_profile_id =
2386 req->table_mtr_profile_delete.meter_profile_id;
2387 struct rte_table_action *a = p->table_data[table_id].a;
2389 rsp->status = rte_table_action_meter_profile_delete(a,
2395 static struct pipeline_msg_rsp *
2396 pipeline_msg_handle_table_rule_mtr_read(struct pipeline_data *p,
2397 struct pipeline_msg_req *req)
2399 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2400 uint32_t table_id = req->id;
2401 void *data = req->table_rule_mtr_read.data;
2402 uint32_t tc_mask = req->table_rule_mtr_read.tc_mask;
2403 int clear = req->table_rule_mtr_read.clear;
2404 struct rte_table_action *a = p->table_data[table_id].a;
2406 rsp->status = rte_table_action_meter_read(a,
2409 &rsp->table_rule_mtr_read.stats,
2415 static struct pipeline_msg_rsp *
2416 pipeline_msg_handle_table_dscp_table_update(struct pipeline_data *p,
2417 struct pipeline_msg_req *req)
2419 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2420 uint32_t table_id = req->id;
2421 uint64_t dscp_mask = req->table_dscp_table_update.dscp_mask;
2422 struct rte_table_action_dscp_table *dscp_table =
2423 &req->table_dscp_table_update.dscp_table;
2424 struct rte_table_action *a = p->table_data[table_id].a;
2426 rsp->status = rte_table_action_dscp_table_update(a,
2434 pipeline_msg_handle(struct pipeline_data *p)
2437 struct pipeline_msg_req *req;
2438 struct pipeline_msg_rsp *rsp;
2440 req = pipeline_msg_recv(p->msgq_req);
2444 switch (req->type) {
2445 case PIPELINE_REQ_PORT_IN_STATS_READ:
2446 rsp = pipeline_msg_handle_port_in_stats_read(p, req);
2449 case PIPELINE_REQ_PORT_IN_ENABLE:
2450 rsp = pipeline_msg_handle_port_in_enable(p, req);
2453 case PIPELINE_REQ_PORT_IN_DISABLE:
2454 rsp = pipeline_msg_handle_port_in_disable(p, req);
2457 case PIPELINE_REQ_PORT_OUT_STATS_READ:
2458 rsp = pipeline_msg_handle_port_out_stats_read(p, req);
2461 case PIPELINE_REQ_TABLE_STATS_READ:
2462 rsp = pipeline_msg_handle_table_stats_read(p, req);
2465 case PIPELINE_REQ_TABLE_RULE_ADD:
2466 rsp = pipeline_msg_handle_table_rule_add(p, req);
2469 case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT:
2470 rsp = pipeline_msg_handle_table_rule_add_default(p, req);
2473 case PIPELINE_REQ_TABLE_RULE_ADD_BULK:
2474 rsp = pipeline_msg_handle_table_rule_add_bulk(p, req);
2477 case PIPELINE_REQ_TABLE_RULE_DELETE:
2478 rsp = pipeline_msg_handle_table_rule_delete(p, req);
2481 case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT:
2482 rsp = pipeline_msg_handle_table_rule_delete_default(p, req);
2485 case PIPELINE_REQ_TABLE_RULE_STATS_READ:
2486 rsp = pipeline_msg_handle_table_rule_stats_read(p, req);
2489 case PIPELINE_REQ_TABLE_MTR_PROFILE_ADD:
2490 rsp = pipeline_msg_handle_table_mtr_profile_add(p, req);
2493 case PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE:
2494 rsp = pipeline_msg_handle_table_mtr_profile_delete(p, req);
2497 case PIPELINE_REQ_TABLE_RULE_MTR_READ:
2498 rsp = pipeline_msg_handle_table_rule_mtr_read(p, req);
2501 case PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE:
2502 rsp = pipeline_msg_handle_table_dscp_table_update(p, req);
2506 rsp = (struct pipeline_msg_rsp *) req;
2510 pipeline_msg_send(p->msgq_rsp, rsp);
2515 * Data plane threads: main
2518 thread_main(void *arg __rte_unused)
2520 struct thread_data *t;
2521 uint32_t thread_id, i;
2523 thread_id = rte_lcore_id();
2524 t = &thread_data[thread_id];
2527 for (i = 0; ; i++) {
2531 for (j = 0; j < t->n_pipelines; j++)
2532 rte_pipeline_run(t->p[j]);
2535 if ((i & 0xF) == 0) {
2536 uint64_t time = rte_get_tsc_cycles();
2537 uint64_t time_next_min = UINT64_MAX;
2539 if (time < t->time_next_min)
2542 /* Pipeline message queues */
2543 for (j = 0; j < t->n_pipelines; j++) {
2544 struct pipeline_data *p =
2545 &t->pipeline_data[j];
2546 uint64_t time_next = p->time_next;
2548 if (time_next <= time) {
2549 pipeline_msg_handle(p);
2550 rte_pipeline_flush(p->p);
2551 time_next = time + p->timer_period;
2552 p->time_next = time_next;
2555 if (time_next < time_next_min)
2556 time_next_min = time_next;
2559 /* Thread message queues */
2561 uint64_t time_next = t->time_next;
2563 if (time_next <= time) {
2564 thread_msg_handle(t);
2565 time_next = time + t->timer_period;
2566 t->time_next = time_next;
2569 if (time_next < time_next_min)
2570 time_next_min = time_next;
2573 t->time_next_min = time_next_min;