1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_cycles.h>
12 #include <rte_table_acl.h>
13 #include <rte_table_array.h>
14 #include <rte_table_hash.h>
15 #include <rte_table_lpm.h>
16 #include <rte_table_lpm_ipv6.h>
22 #ifndef THREAD_PIPELINES_MAX
23 #define THREAD_PIPELINES_MAX 256
26 #ifndef THREAD_MSGQ_SIZE
27 #define THREAD_MSGQ_SIZE 64
30 #ifndef THREAD_TIMER_PERIOD_MS
31 #define THREAD_TIMER_PERIOD_MS 100
35 * Master thead: data plane thread context
38 struct rte_ring *msgq_req;
39 struct rte_ring *msgq_rsp;
44 static struct thread thread[RTE_MAX_LCORE];
47 * Data plane threads: context
50 struct rte_table_action *a;
53 struct pipeline_data {
54 struct rte_pipeline *p;
55 struct table_data table_data[RTE_PIPELINE_TABLE_MAX];
58 struct rte_ring *msgq_req;
59 struct rte_ring *msgq_rsp;
60 uint64_t timer_period; /* Measured in CPU cycles. */
63 uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
67 struct rte_pipeline *p[THREAD_PIPELINES_MAX];
70 struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
71 struct rte_ring *msgq_req;
72 struct rte_ring *msgq_rsp;
73 uint64_t timer_period; /* Measured in CPU cycles. */
75 uint64_t time_next_min;
76 } __rte_cache_aligned;
78 static struct thread_data thread_data[RTE_MAX_LCORE];
81 * Master thread: data plane thread init
88 for (i = 0; i < RTE_MAX_LCORE; i++) {
89 struct thread *t = &thread[i];
91 if (!rte_lcore_is_enabled(i))
96 rte_ring_free(t->msgq_req);
99 rte_ring_free(t->msgq_rsp);
108 RTE_LCORE_FOREACH_SLAVE(i) {
110 struct rte_ring *msgq_req, *msgq_rsp;
111 struct thread *t = &thread[i];
112 struct thread_data *t_data = &thread_data[i];
113 uint32_t cpu_id = rte_lcore_to_socket_id(i);
116 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-REQ", i);
118 msgq_req = rte_ring_create(name,
121 RING_F_SP_ENQ | RING_F_SC_DEQ);
123 if (msgq_req == NULL) {
128 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-RSP", i);
130 msgq_rsp = rte_ring_create(name,
133 RING_F_SP_ENQ | RING_F_SC_DEQ);
135 if (msgq_rsp == NULL) {
140 /* Master thread records */
141 t->msgq_req = msgq_req;
142 t->msgq_rsp = msgq_rsp;
145 /* Data plane thread records */
146 t_data->n_pipelines = 0;
147 t_data->msgq_req = msgq_req;
148 t_data->msgq_rsp = msgq_rsp;
149 t_data->timer_period =
150 (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
151 t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
152 t_data->time_next_min = t_data->time_next;
159 * Master thread & data plane threads: message passing
161 enum thread_req_type {
162 THREAD_REQ_PIPELINE_ENABLE = 0,
163 THREAD_REQ_PIPELINE_DISABLE,
167 struct thread_msg_req {
168 enum thread_req_type type;
172 struct rte_pipeline *p;
174 struct rte_table_action *a;
175 } table[RTE_PIPELINE_TABLE_MAX];
176 struct rte_ring *msgq_req;
177 struct rte_ring *msgq_rsp;
178 uint32_t timer_period_ms;
183 struct rte_pipeline *p;
188 struct thread_msg_rsp {
195 static struct thread_msg_req *
196 thread_msg_alloc(void)
198 size_t size = RTE_MAX(sizeof(struct thread_msg_req),
199 sizeof(struct thread_msg_rsp));
201 return calloc(1, size);
205 thread_msg_free(struct thread_msg_rsp *rsp)
210 static struct thread_msg_rsp *
211 thread_msg_send_recv(uint32_t thread_id,
212 struct thread_msg_req *req)
214 struct thread *t = &thread[thread_id];
215 struct rte_ring *msgq_req = t->msgq_req;
216 struct rte_ring *msgq_rsp = t->msgq_rsp;
217 struct thread_msg_rsp *rsp;
222 status = rte_ring_sp_enqueue(msgq_req, req);
223 } while (status == -ENOBUFS);
227 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
228 } while (status != 0);
234 thread_pipeline_enable(uint32_t thread_id,
235 const char *pipeline_name)
237 struct pipeline *p = pipeline_find(pipeline_name);
239 struct thread_msg_req *req;
240 struct thread_msg_rsp *rsp;
244 /* Check input params */
245 if ((thread_id >= RTE_MAX_LCORE) ||
247 (p->n_ports_in == 0) ||
248 (p->n_ports_out == 0) ||
252 t = &thread[thread_id];
253 if ((t->enabled == 0) ||
257 /* Allocate request */
258 req = thread_msg_alloc();
263 req->type = THREAD_REQ_PIPELINE_ENABLE;
264 req->pipeline_enable.p = p->p;
265 for (i = 0; i < p->n_tables; i++)
266 req->pipeline_enable.table[i].a =
268 req->pipeline_enable.msgq_req = p->msgq_req;
269 req->pipeline_enable.msgq_rsp = p->msgq_rsp;
270 req->pipeline_enable.timer_period_ms = p->timer_period_ms;
271 req->pipeline_enable.n_tables = p->n_tables;
273 /* Send request and wait for response */
274 rsp = thread_msg_send_recv(thread_id, req);
279 status = rsp->status;
282 thread_msg_free(rsp);
284 /* Request completion */
288 p->thread_id = thread_id;
295 thread_pipeline_disable(uint32_t thread_id,
296 const char *pipeline_name)
298 struct pipeline *p = pipeline_find(pipeline_name);
300 struct thread_msg_req *req;
301 struct thread_msg_rsp *rsp;
304 /* Check input params */
305 if ((thread_id >= RTE_MAX_LCORE) ||
309 t = &thread[thread_id];
316 if (p->thread_id != thread_id)
319 /* Allocate request */
320 req = thread_msg_alloc();
325 req->type = THREAD_REQ_PIPELINE_DISABLE;
326 req->pipeline_disable.p = p->p;
328 /* Send request and wait for response */
329 rsp = thread_msg_send_recv(thread_id, req);
334 status = rsp->status;
337 thread_msg_free(rsp);
339 /* Request completion */
349 * Data plane threads: message handling
351 static inline struct thread_msg_req *
352 thread_msg_recv(struct rte_ring *msgq_req)
354 struct thread_msg_req *req;
356 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
365 thread_msg_send(struct rte_ring *msgq_rsp,
366 struct thread_msg_rsp *rsp)
371 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
372 } while (status == -ENOBUFS);
375 static struct thread_msg_rsp *
376 thread_msg_handle_pipeline_enable(struct thread_data *t,
377 struct thread_msg_req *req)
379 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
380 struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
384 if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
389 t->p[t->n_pipelines] = req->pipeline_enable.p;
391 p->p = req->pipeline_enable.p;
392 for (i = 0; i < req->pipeline_enable.n_tables; i++)
394 req->pipeline_enable.table[i].a;
396 p->n_tables = req->pipeline_enable.n_tables;
398 p->msgq_req = req->pipeline_enable.msgq_req;
399 p->msgq_rsp = req->pipeline_enable.msgq_rsp;
401 (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
402 p->time_next = rte_get_tsc_cycles() + p->timer_period;
411 static struct thread_msg_rsp *
412 thread_msg_handle_pipeline_disable(struct thread_data *t,
413 struct thread_msg_req *req)
415 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
416 uint32_t n_pipelines = t->n_pipelines;
417 struct rte_pipeline *pipeline = req->pipeline_disable.p;
421 for (i = 0; i < n_pipelines; i++) {
422 struct pipeline_data *p = &t->pipeline_data[i];
424 if (p->p != pipeline)
427 if (i < n_pipelines - 1) {
428 struct rte_pipeline *pipeline_last =
429 t->p[n_pipelines - 1];
430 struct pipeline_data *p_last =
431 &t->pipeline_data[n_pipelines - 1];
433 t->p[i] = pipeline_last;
434 memcpy(p, p_last, sizeof(*p));
443 /* should not get here */
449 thread_msg_handle(struct thread_data *t)
452 struct thread_msg_req *req;
453 struct thread_msg_rsp *rsp;
455 req = thread_msg_recv(t->msgq_req);
460 case THREAD_REQ_PIPELINE_ENABLE:
461 rsp = thread_msg_handle_pipeline_enable(t, req);
464 case THREAD_REQ_PIPELINE_DISABLE:
465 rsp = thread_msg_handle_pipeline_disable(t, req);
469 rsp = (struct thread_msg_rsp *) req;
473 thread_msg_send(t->msgq_rsp, rsp);
478 * Master thread & data plane threads: message passing
480 enum pipeline_req_type {
482 PIPELINE_REQ_PORT_IN_STATS_READ,
483 PIPELINE_REQ_PORT_IN_ENABLE,
484 PIPELINE_REQ_PORT_IN_DISABLE,
487 PIPELINE_REQ_PORT_OUT_STATS_READ,
490 PIPELINE_REQ_TABLE_STATS_READ,
491 PIPELINE_REQ_TABLE_RULE_ADD,
492 PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT,
493 PIPELINE_REQ_TABLE_RULE_ADD_BULK,
494 PIPELINE_REQ_TABLE_RULE_DELETE,
495 PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT,
496 PIPELINE_REQ_TABLE_RULE_STATS_READ,
497 PIPELINE_REQ_TABLE_MTR_PROFILE_ADD,
498 PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE,
499 PIPELINE_REQ_TABLE_RULE_MTR_READ,
503 struct pipeline_msg_req_port_in_stats_read {
507 struct pipeline_msg_req_port_out_stats_read {
511 struct pipeline_msg_req_table_stats_read {
515 struct pipeline_msg_req_table_rule_add {
516 struct table_rule_match match;
517 struct table_rule_action action;
520 struct pipeline_msg_req_table_rule_add_default {
521 struct table_rule_action action;
524 struct pipeline_msg_req_table_rule_add_bulk {
525 struct table_rule_match *match;
526 struct table_rule_action *action;
532 struct pipeline_msg_req_table_rule_delete {
533 struct table_rule_match match;
536 struct pipeline_msg_req_table_rule_stats_read {
541 struct pipeline_msg_req_table_mtr_profile_add {
542 uint32_t meter_profile_id;
543 struct rte_table_action_meter_profile profile;
546 struct pipeline_msg_req_table_mtr_profile_delete {
547 uint32_t meter_profile_id;
550 struct pipeline_msg_req_table_rule_mtr_read {
555 struct pipeline_msg_req {
556 enum pipeline_req_type type;
557 uint32_t id; /* Port IN, port OUT or table ID */
561 struct pipeline_msg_req_port_in_stats_read port_in_stats_read;
562 struct pipeline_msg_req_port_out_stats_read port_out_stats_read;
563 struct pipeline_msg_req_table_stats_read table_stats_read;
564 struct pipeline_msg_req_table_rule_add table_rule_add;
565 struct pipeline_msg_req_table_rule_add_default table_rule_add_default;
566 struct pipeline_msg_req_table_rule_add_bulk table_rule_add_bulk;
567 struct pipeline_msg_req_table_rule_delete table_rule_delete;
568 struct pipeline_msg_req_table_rule_stats_read table_rule_stats_read;
569 struct pipeline_msg_req_table_mtr_profile_add table_mtr_profile_add;
570 struct pipeline_msg_req_table_mtr_profile_delete table_mtr_profile_delete;
571 struct pipeline_msg_req_table_rule_mtr_read table_rule_mtr_read;
575 struct pipeline_msg_rsp_port_in_stats_read {
576 struct rte_pipeline_port_in_stats stats;
579 struct pipeline_msg_rsp_port_out_stats_read {
580 struct rte_pipeline_port_out_stats stats;
583 struct pipeline_msg_rsp_table_stats_read {
584 struct rte_pipeline_table_stats stats;
587 struct pipeline_msg_rsp_table_rule_add {
591 struct pipeline_msg_rsp_table_rule_add_default {
595 struct pipeline_msg_rsp_table_rule_add_bulk {
599 struct pipeline_msg_rsp_table_rule_stats_read {
600 struct rte_table_action_stats_counters stats;
603 struct pipeline_msg_rsp_table_rule_mtr_read {
604 struct rte_table_action_mtr_counters stats;
607 struct pipeline_msg_rsp {
612 struct pipeline_msg_rsp_port_in_stats_read port_in_stats_read;
613 struct pipeline_msg_rsp_port_out_stats_read port_out_stats_read;
614 struct pipeline_msg_rsp_table_stats_read table_stats_read;
615 struct pipeline_msg_rsp_table_rule_add table_rule_add;
616 struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default;
617 struct pipeline_msg_rsp_table_rule_add_bulk table_rule_add_bulk;
618 struct pipeline_msg_rsp_table_rule_stats_read table_rule_stats_read;
619 struct pipeline_msg_rsp_table_rule_mtr_read table_rule_mtr_read;
626 static struct pipeline_msg_req *
627 pipeline_msg_alloc(void)
629 size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
630 sizeof(struct pipeline_msg_rsp));
632 return calloc(1, size);
636 pipeline_msg_free(struct pipeline_msg_rsp *rsp)
641 static struct pipeline_msg_rsp *
642 pipeline_msg_send_recv(struct pipeline *p,
643 struct pipeline_msg_req *req)
645 struct rte_ring *msgq_req = p->msgq_req;
646 struct rte_ring *msgq_rsp = p->msgq_rsp;
647 struct pipeline_msg_rsp *rsp;
652 status = rte_ring_sp_enqueue(msgq_req, req);
653 } while (status == -ENOBUFS);
657 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
658 } while (status != 0);
664 pipeline_port_in_stats_read(const char *pipeline_name,
666 struct rte_pipeline_port_in_stats *stats,
670 struct pipeline_msg_req *req;
671 struct pipeline_msg_rsp *rsp;
674 /* Check input params */
675 if ((pipeline_name == NULL) ||
679 p = pipeline_find(pipeline_name);
682 (port_id >= p->n_ports_in))
685 /* Allocate request */
686 req = pipeline_msg_alloc();
691 req->type = PIPELINE_REQ_PORT_IN_STATS_READ;
693 req->port_in_stats_read.clear = clear;
695 /* Send request and wait for response */
696 rsp = pipeline_msg_send_recv(p, req);
701 status = rsp->status;
703 memcpy(stats, &rsp->port_in_stats_read.stats, sizeof(*stats));
706 pipeline_msg_free(rsp);
712 pipeline_port_in_enable(const char *pipeline_name,
716 struct pipeline_msg_req *req;
717 struct pipeline_msg_rsp *rsp;
720 /* Check input params */
721 if (pipeline_name == NULL)
724 p = pipeline_find(pipeline_name);
727 (port_id >= p->n_ports_in))
730 /* Allocate request */
731 req = pipeline_msg_alloc();
736 req->type = PIPELINE_REQ_PORT_IN_ENABLE;
739 /* Send request and wait for response */
740 rsp = pipeline_msg_send_recv(p, req);
745 status = rsp->status;
748 pipeline_msg_free(rsp);
754 pipeline_port_in_disable(const char *pipeline_name,
758 struct pipeline_msg_req *req;
759 struct pipeline_msg_rsp *rsp;
762 /* Check input params */
763 if (pipeline_name == NULL)
766 p = pipeline_find(pipeline_name);
769 (port_id >= p->n_ports_in))
772 /* Allocate request */
773 req = pipeline_msg_alloc();
778 req->type = PIPELINE_REQ_PORT_IN_DISABLE;
781 /* Send request and wait for response */
782 rsp = pipeline_msg_send_recv(p, req);
787 status = rsp->status;
790 pipeline_msg_free(rsp);
796 pipeline_port_out_stats_read(const char *pipeline_name,
798 struct rte_pipeline_port_out_stats *stats,
802 struct pipeline_msg_req *req;
803 struct pipeline_msg_rsp *rsp;
806 /* Check input params */
807 if ((pipeline_name == NULL) ||
811 p = pipeline_find(pipeline_name);
814 (port_id >= p->n_ports_out))
817 /* Allocate request */
818 req = pipeline_msg_alloc();
823 req->type = PIPELINE_REQ_PORT_OUT_STATS_READ;
825 req->port_out_stats_read.clear = clear;
827 /* Send request and wait for response */
828 rsp = pipeline_msg_send_recv(p, req);
833 status = rsp->status;
835 memcpy(stats, &rsp->port_out_stats_read.stats, sizeof(*stats));
838 pipeline_msg_free(rsp);
844 pipeline_table_stats_read(const char *pipeline_name,
846 struct rte_pipeline_table_stats *stats,
850 struct pipeline_msg_req *req;
851 struct pipeline_msg_rsp *rsp;
854 /* Check input params */
855 if ((pipeline_name == NULL) ||
859 p = pipeline_find(pipeline_name);
862 (table_id >= p->n_tables))
865 /* Allocate request */
866 req = pipeline_msg_alloc();
871 req->type = PIPELINE_REQ_TABLE_STATS_READ;
873 req->table_stats_read.clear = clear;
875 /* Send request and wait for response */
876 rsp = pipeline_msg_send_recv(p, req);
881 status = rsp->status;
883 memcpy(stats, &rsp->table_stats_read.stats, sizeof(*stats));
886 pipeline_msg_free(rsp);
892 match_check(struct table_rule_match *match,
898 if ((match == NULL) ||
900 (table_id >= p->n_tables))
903 table = &p->table[table_id];
904 if (match->match_type != table->params.match_type)
907 switch (match->match_type) {
910 struct table_acl_params *t = &table->params.match.acl;
911 struct table_rule_match_acl *r = &match->match.acl;
913 if ((r->ip_version && (t->ip_version == 0)) ||
914 ((r->ip_version == 0) && t->ip_version))
918 if ((r->sa_depth > 32) ||
922 if ((r->sa_depth > 128) ||
937 struct table_lpm_params *t = &table->params.match.lpm;
938 struct table_rule_match_lpm *r = &match->match.lpm;
940 if ((r->ip_version && (t->key_size != 4)) ||
941 ((r->ip_version == 0) && (t->key_size != 16)))
963 action_check(struct table_rule_action *action,
967 struct table_action_profile *ap;
969 if ((action == NULL) ||
971 (table_id >= p->n_tables))
974 ap = p->table[table_id].ap;
975 if (action->action_mask != ap->params.action_mask)
978 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
979 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
980 (action->fwd.id >= p->n_ports_out))
983 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
984 (action->fwd.id >= p->n_tables))
988 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
989 uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1;
990 uint32_t tc_mask1 = action->mtr.tc_mask;
992 if (tc_mask1 != tc_mask0)
996 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
997 uint32_t n_subports_per_port =
998 ap->params.tm.n_subports_per_port;
999 uint32_t n_pipes_per_subport =
1000 ap->params.tm.n_pipes_per_subport;
1001 uint32_t subport_id = action->tm.subport_id;
1002 uint32_t pipe_id = action->tm.pipe_id;
1004 if ((subport_id >= n_subports_per_port) ||
1005 (pipe_id >= n_pipes_per_subport))
1009 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
1010 uint64_t encap_mask = ap->params.encap.encap_mask;
1011 enum rte_table_action_encap_type type = action->encap.type;
1013 if ((encap_mask & (1LLU << type)) == 0)
1017 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
1018 int ip_version0 = ap->params.common.ip_version;
1019 int ip_version1 = action->nat.ip_version;
1021 if ((ip_version1 && (ip_version0 == 0)) ||
1022 ((ip_version1 == 0) && ip_version0))
1030 action_default_check(struct table_rule_action *action,
1034 if ((action == NULL) ||
1035 (action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD)) ||
1037 (table_id >= p->n_tables))
1040 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1041 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
1042 (action->fwd.id >= p->n_ports_out))
1045 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
1046 (action->fwd.id >= p->n_tables))
1054 pipeline_table_rule_add(const char *pipeline_name,
1056 struct table_rule_match *match,
1057 struct table_rule_action *action,
1061 struct pipeline_msg_req *req;
1062 struct pipeline_msg_rsp *rsp;
1065 /* Check input params */
1066 if ((pipeline_name == NULL) ||
1072 p = pipeline_find(pipeline_name);
1074 (p->enabled == 0) ||
1075 (table_id >= p->n_tables) ||
1076 match_check(match, p, table_id) ||
1077 action_check(action, p, table_id))
1080 /* Allocate request */
1081 req = pipeline_msg_alloc();
1086 req->type = PIPELINE_REQ_TABLE_RULE_ADD;
1088 memcpy(&req->table_rule_add.match, match, sizeof(*match));
1089 memcpy(&req->table_rule_add.action, action, sizeof(*action));
1091 /* Send request and wait for response */
1092 rsp = pipeline_msg_send_recv(p, req);
1097 status = rsp->status;
1099 *data = rsp->table_rule_add.data;
1102 pipeline_msg_free(rsp);
1108 pipeline_table_rule_add_default(const char *pipeline_name,
1110 struct table_rule_action *action,
1114 struct pipeline_msg_req *req;
1115 struct pipeline_msg_rsp *rsp;
1118 /* Check input params */
1119 if ((pipeline_name == NULL) ||
1124 p = pipeline_find(pipeline_name);
1126 (p->enabled == 0) ||
1127 (table_id >= p->n_tables) ||
1128 action_default_check(action, p, table_id))
1131 /* Allocate request */
1132 req = pipeline_msg_alloc();
1137 req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT;
1139 memcpy(&req->table_rule_add_default.action, action, sizeof(*action));
1141 /* Send request and wait for response */
1142 rsp = pipeline_msg_send_recv(p, req);
1147 status = rsp->status;
1149 *data = rsp->table_rule_add_default.data;
1152 pipeline_msg_free(rsp);
1158 pipeline_table_rule_add_bulk(const char *pipeline_name,
1160 struct table_rule_match *match,
1161 struct table_rule_action *action,
1166 struct pipeline_msg_req *req;
1167 struct pipeline_msg_rsp *rsp;
1171 /* Check input params */
1172 if ((pipeline_name == NULL) ||
1176 (n_rules == NULL) ||
1180 p = pipeline_find(pipeline_name);
1182 (p->enabled == 0) ||
1183 (table_id >= p->n_tables))
1186 for (i = 0; i < *n_rules; i++)
1187 if (match_check(match, p, table_id) ||
1188 action_check(action, p, table_id))
1191 /* Allocate request */
1192 req = pipeline_msg_alloc();
1197 req->type = PIPELINE_REQ_TABLE_RULE_ADD_BULK;
1199 req->table_rule_add_bulk.match = match;
1200 req->table_rule_add_bulk.action = action;
1201 req->table_rule_add_bulk.data = data;
1202 req->table_rule_add_bulk.n_rules = *n_rules;
1203 req->table_rule_add_bulk.bulk =
1204 (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
1206 /* Send request and wait for response */
1207 rsp = pipeline_msg_send_recv(p, req);
1212 status = rsp->status;
1214 *n_rules = rsp->table_rule_add_bulk.n_rules;
1217 pipeline_msg_free(rsp);
1223 pipeline_table_rule_delete(const char *pipeline_name,
1225 struct table_rule_match *match)
1228 struct pipeline_msg_req *req;
1229 struct pipeline_msg_rsp *rsp;
1232 /* Check input params */
1233 if ((pipeline_name == NULL) ||
1237 p = pipeline_find(pipeline_name);
1239 (p->enabled == 0) ||
1240 (table_id >= p->n_tables) ||
1241 match_check(match, p, table_id))
1244 /* Allocate request */
1245 req = pipeline_msg_alloc();
1250 req->type = PIPELINE_REQ_TABLE_RULE_DELETE;
1252 memcpy(&req->table_rule_delete.match, match, sizeof(*match));
1254 /* Send request and wait for response */
1255 rsp = pipeline_msg_send_recv(p, req);
1260 status = rsp->status;
1263 pipeline_msg_free(rsp);
1269 pipeline_table_rule_delete_default(const char *pipeline_name,
1273 struct pipeline_msg_req *req;
1274 struct pipeline_msg_rsp *rsp;
1277 /* Check input params */
1278 if (pipeline_name == NULL)
1281 p = pipeline_find(pipeline_name);
1283 (p->enabled == 0) ||
1284 (table_id >= p->n_tables))
1287 /* Allocate request */
1288 req = pipeline_msg_alloc();
1293 req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT;
1296 /* Send request and wait for response */
1297 rsp = pipeline_msg_send_recv(p, req);
1302 status = rsp->status;
1305 pipeline_msg_free(rsp);
1311 pipeline_table_rule_stats_read(const char *pipeline_name,
1314 struct rte_table_action_stats_counters *stats,
1318 struct pipeline_msg_req *req;
1319 struct pipeline_msg_rsp *rsp;
1322 /* Check input params */
1323 if ((pipeline_name == NULL) ||
1328 p = pipeline_find(pipeline_name);
1330 (p->enabled == 0) ||
1331 (table_id >= p->n_tables))
1334 /* Allocate request */
1335 req = pipeline_msg_alloc();
1340 req->type = PIPELINE_REQ_TABLE_RULE_STATS_READ;
1342 req->table_rule_stats_read.data = data;
1343 req->table_rule_stats_read.clear = clear;
1345 /* Send request and wait for response */
1346 rsp = pipeline_msg_send_recv(p, req);
1351 status = rsp->status;
1353 memcpy(stats, &rsp->table_rule_stats_read.stats, sizeof(*stats));
1356 pipeline_msg_free(rsp);
1362 pipeline_table_mtr_profile_add(const char *pipeline_name,
1364 uint32_t meter_profile_id,
1365 struct rte_table_action_meter_profile *profile)
1368 struct pipeline_msg_req *req;
1369 struct pipeline_msg_rsp *rsp;
1372 /* Check input params */
1373 if ((pipeline_name == NULL) ||
1377 p = pipeline_find(pipeline_name);
1379 (p->enabled == 0) ||
1380 (table_id >= p->n_tables))
1383 /* Allocate request */
1384 req = pipeline_msg_alloc();
1389 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_ADD;
1391 req->table_mtr_profile_add.meter_profile_id = meter_profile_id;
1392 memcpy(&req->table_mtr_profile_add.profile, profile, sizeof(*profile));
1394 /* Send request and wait for response */
1395 rsp = pipeline_msg_send_recv(p, req);
1400 status = rsp->status;
1403 pipeline_msg_free(rsp);
1409 pipeline_table_mtr_profile_delete(const char *pipeline_name,
1411 uint32_t meter_profile_id)
1414 struct pipeline_msg_req *req;
1415 struct pipeline_msg_rsp *rsp;
1418 /* Check input params */
1419 if (pipeline_name == NULL)
1422 p = pipeline_find(pipeline_name);
1424 (p->enabled == 0) ||
1425 (table_id >= p->n_tables))
1428 /* Allocate request */
1429 req = pipeline_msg_alloc();
1434 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE;
1436 req->table_mtr_profile_delete.meter_profile_id = meter_profile_id;
1438 /* Send request and wait for response */
1439 rsp = pipeline_msg_send_recv(p, req);
1444 status = rsp->status;
1447 pipeline_msg_free(rsp);
1453 pipeline_table_rule_mtr_read(const char *pipeline_name,
1457 struct rte_table_action_mtr_counters *stats,
1461 struct pipeline_msg_req *req;
1462 struct pipeline_msg_rsp *rsp;
1465 /* Check input params */
1466 if ((pipeline_name == NULL) ||
1471 p = pipeline_find(pipeline_name);
1473 (p->enabled == 0) ||
1474 (table_id >= p->n_tables))
1477 /* Allocate request */
1478 req = pipeline_msg_alloc();
1483 req->type = PIPELINE_REQ_TABLE_RULE_MTR_READ;
1485 req->table_rule_mtr_read.data = data;
1486 req->table_rule_mtr_read.tc_mask = tc_mask;
1487 req->table_rule_mtr_read.clear = clear;
1489 /* Send request and wait for response */
1490 rsp = pipeline_msg_send_recv(p, req);
1495 status = rsp->status;
1497 memcpy(stats, &rsp->table_rule_mtr_read.stats, sizeof(*stats));
1500 pipeline_msg_free(rsp);
1506 * Data plane threads: message handling
1508 static inline struct pipeline_msg_req *
1509 pipeline_msg_recv(struct rte_ring *msgq_req)
1511 struct pipeline_msg_req *req;
1513 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
1522 pipeline_msg_send(struct rte_ring *msgq_rsp,
1523 struct pipeline_msg_rsp *rsp)
1528 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
1529 } while (status == -ENOBUFS);
1532 static struct pipeline_msg_rsp *
1533 pipeline_msg_handle_port_in_stats_read(struct pipeline_data *p,
1534 struct pipeline_msg_req *req)
1536 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1537 uint32_t port_id = req->id;
1538 int clear = req->port_in_stats_read.clear;
1540 rsp->status = rte_pipeline_port_in_stats_read(p->p,
1542 &rsp->port_in_stats_read.stats,
1548 static struct pipeline_msg_rsp *
1549 pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
1550 struct pipeline_msg_req *req)
1552 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1553 uint32_t port_id = req->id;
1555 rsp->status = rte_pipeline_port_in_enable(p->p,
1561 static struct pipeline_msg_rsp *
1562 pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
1563 struct pipeline_msg_req *req)
1565 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1566 uint32_t port_id = req->id;
1568 rsp->status = rte_pipeline_port_in_disable(p->p,
1574 static struct pipeline_msg_rsp *
1575 pipeline_msg_handle_port_out_stats_read(struct pipeline_data *p,
1576 struct pipeline_msg_req *req)
1578 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1579 uint32_t port_id = req->id;
1580 int clear = req->port_out_stats_read.clear;
1582 rsp->status = rte_pipeline_port_out_stats_read(p->p,
1584 &rsp->port_out_stats_read.stats,
1590 static struct pipeline_msg_rsp *
1591 pipeline_msg_handle_table_stats_read(struct pipeline_data *p,
1592 struct pipeline_msg_req *req)
1594 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1595 uint32_t port_id = req->id;
1596 int clear = req->table_stats_read.clear;
1598 rsp->status = rte_pipeline_table_stats_read(p->p,
1600 &rsp->table_stats_read.stats,
1606 union table_rule_match_low_level {
1607 struct rte_table_acl_rule_add_params acl_add;
1608 struct rte_table_acl_rule_delete_params acl_delete;
1609 struct rte_table_array_key array;
1610 uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX];
1611 struct rte_table_lpm_key lpm_ipv4;
1612 struct rte_table_lpm_ipv6_key lpm_ipv6;
1616 match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32)
1621 switch (depth / 32) {
1631 depth32[1] = depth - 32;
1639 depth32[2] = depth - 64;
1647 depth32[3] = depth - 96;
1663 match_convert(struct table_rule_match *mh,
1664 union table_rule_match_low_level *ml,
1667 memset(ml, 0, sizeof(*ml));
1669 switch (mh->match_type) {
1671 if (mh->match.acl.ip_version)
1673 ml->acl_add.field_value[0].value.u8 =
1674 mh->match.acl.proto;
1675 ml->acl_add.field_value[0].mask_range.u8 =
1676 mh->match.acl.proto_mask;
1678 ml->acl_add.field_value[1].value.u32 =
1679 mh->match.acl.ipv4.sa;
1680 ml->acl_add.field_value[1].mask_range.u32 =
1681 mh->match.acl.sa_depth;
1683 ml->acl_add.field_value[2].value.u32 =
1684 mh->match.acl.ipv4.da;
1685 ml->acl_add.field_value[2].mask_range.u32 =
1686 mh->match.acl.da_depth;
1688 ml->acl_add.field_value[3].value.u16 =
1690 ml->acl_add.field_value[3].mask_range.u16 =
1693 ml->acl_add.field_value[4].value.u16 =
1695 ml->acl_add.field_value[4].mask_range.u16 =
1698 ml->acl_add.priority =
1699 (int32_t) mh->match.acl.priority;
1701 ml->acl_delete.field_value[0].value.u8 =
1702 mh->match.acl.proto;
1703 ml->acl_delete.field_value[0].mask_range.u8 =
1704 mh->match.acl.proto_mask;
1706 ml->acl_delete.field_value[1].value.u32 =
1707 mh->match.acl.ipv4.sa;
1708 ml->acl_delete.field_value[1].mask_range.u32 =
1709 mh->match.acl.sa_depth;
1711 ml->acl_delete.field_value[2].value.u32 =
1712 mh->match.acl.ipv4.da;
1713 ml->acl_delete.field_value[2].mask_range.u32 =
1714 mh->match.acl.da_depth;
1716 ml->acl_delete.field_value[3].value.u16 =
1718 ml->acl_delete.field_value[3].mask_range.u16 =
1721 ml->acl_delete.field_value[4].value.u16 =
1723 ml->acl_delete.field_value[4].mask_range.u16 =
1729 (uint32_t *) mh->match.acl.ipv6.sa;
1731 (uint32_t *) mh->match.acl.ipv6.da;
1732 uint32_t sa32_depth[4], da32_depth[4];
1735 status = match_convert_ipv6_depth(
1736 mh->match.acl.sa_depth,
1741 status = match_convert_ipv6_depth(
1742 mh->match.acl.da_depth,
1747 ml->acl_add.field_value[0].value.u8 =
1748 mh->match.acl.proto;
1749 ml->acl_add.field_value[0].mask_range.u8 =
1750 mh->match.acl.proto_mask;
1752 ml->acl_add.field_value[1].value.u32 = sa32[0];
1753 ml->acl_add.field_value[1].mask_range.u32 =
1755 ml->acl_add.field_value[2].value.u32 = sa32[1];
1756 ml->acl_add.field_value[2].mask_range.u32 =
1758 ml->acl_add.field_value[3].value.u32 = sa32[2];
1759 ml->acl_add.field_value[3].mask_range.u32 =
1761 ml->acl_add.field_value[4].value.u32 = sa32[3];
1762 ml->acl_add.field_value[4].mask_range.u32 =
1765 ml->acl_add.field_value[5].value.u32 = da32[0];
1766 ml->acl_add.field_value[5].mask_range.u32 =
1768 ml->acl_add.field_value[6].value.u32 = da32[1];
1769 ml->acl_add.field_value[6].mask_range.u32 =
1771 ml->acl_add.field_value[7].value.u32 = da32[2];
1772 ml->acl_add.field_value[7].mask_range.u32 =
1774 ml->acl_add.field_value[8].value.u32 = da32[3];
1775 ml->acl_add.field_value[8].mask_range.u32 =
1778 ml->acl_add.field_value[9].value.u16 =
1780 ml->acl_add.field_value[9].mask_range.u16 =
1783 ml->acl_add.field_value[10].value.u16 =
1785 ml->acl_add.field_value[10].mask_range.u16 =
1788 ml->acl_add.priority =
1789 (int32_t) mh->match.acl.priority;
1792 (uint32_t *) mh->match.acl.ipv6.sa;
1794 (uint32_t *) mh->match.acl.ipv6.da;
1795 uint32_t sa32_depth[4], da32_depth[4];
1798 status = match_convert_ipv6_depth(
1799 mh->match.acl.sa_depth,
1804 status = match_convert_ipv6_depth(
1805 mh->match.acl.da_depth,
1810 ml->acl_delete.field_value[0].value.u8 =
1811 mh->match.acl.proto;
1812 ml->acl_delete.field_value[0].mask_range.u8 =
1813 mh->match.acl.proto_mask;
1815 ml->acl_delete.field_value[1].value.u32 =
1817 ml->acl_delete.field_value[1].mask_range.u32 =
1819 ml->acl_delete.field_value[2].value.u32 =
1821 ml->acl_delete.field_value[2].mask_range.u32 =
1823 ml->acl_delete.field_value[3].value.u32 =
1825 ml->acl_delete.field_value[3].mask_range.u32 =
1827 ml->acl_delete.field_value[4].value.u32 =
1829 ml->acl_delete.field_value[4].mask_range.u32 =
1832 ml->acl_delete.field_value[5].value.u32 =
1834 ml->acl_delete.field_value[5].mask_range.u32 =
1836 ml->acl_delete.field_value[6].value.u32 =
1838 ml->acl_delete.field_value[6].mask_range.u32 =
1840 ml->acl_delete.field_value[7].value.u32 =
1842 ml->acl_delete.field_value[7].mask_range.u32 =
1844 ml->acl_delete.field_value[8].value.u32 =
1846 ml->acl_delete.field_value[8].mask_range.u32 =
1849 ml->acl_delete.field_value[9].value.u16 =
1851 ml->acl_delete.field_value[9].mask_range.u16 =
1854 ml->acl_delete.field_value[10].value.u16 =
1856 ml->acl_delete.field_value[10].mask_range.u16 =
1862 ml->array.pos = mh->match.array.pos;
1866 memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash));
1870 if (mh->match.lpm.ip_version) {
1871 ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
1872 ml->lpm_ipv4.depth = mh->match.lpm.depth;
1874 memcpy(ml->lpm_ipv6.ip,
1875 mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
1876 ml->lpm_ipv6.depth = mh->match.lpm.depth;
1886 static struct pipeline_msg_rsp *
1887 pipeline_msg_handle_table_rule_add(struct pipeline_data *p,
1888 struct pipeline_msg_req *req)
1890 union table_rule_match_low_level match_ll;
1891 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1892 struct table_rule_match *match = &req->table_rule_add.match;
1893 struct table_rule_action *action = &req->table_rule_add.action;
1894 struct rte_pipeline_table_entry *data_in, *data_out;
1895 uint32_t table_id = req->id;
1896 int key_found, status;
1897 struct rte_table_action *a = p->table_data[table_id].a;
1900 memset(p->buffer, 0, sizeof(p->buffer));
1901 data_in = (struct rte_pipeline_table_entry *) p->buffer;
1903 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1904 status = rte_table_action_apply(a,
1906 RTE_TABLE_ACTION_FWD,
1915 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
1916 status = rte_table_action_apply(a,
1918 RTE_TABLE_ACTION_MTR,
1927 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
1928 status = rte_table_action_apply(a,
1930 RTE_TABLE_ACTION_TM,
1939 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
1940 status = rte_table_action_apply(a,
1942 RTE_TABLE_ACTION_ENCAP,
1951 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
1952 status = rte_table_action_apply(a,
1954 RTE_TABLE_ACTION_NAT,
1963 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
1964 status = rte_table_action_apply(a,
1966 RTE_TABLE_ACTION_TTL,
1975 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
1976 status = rte_table_action_apply(a,
1978 RTE_TABLE_ACTION_STATS,
1987 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
1988 status = rte_table_action_apply(a,
1990 RTE_TABLE_ACTION_TIME,
1999 /* Add rule (match, action) to table */
2000 status = match_convert(match, &match_ll, 1);
2006 status = rte_pipeline_table_entry_add(p->p,
2017 /* Write response */
2019 rsp->table_rule_add.data = data_out;
2024 static struct pipeline_msg_rsp *
2025 pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p,
2026 struct pipeline_msg_req *req)
2028 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2029 struct table_rule_action *action = &req->table_rule_add_default.action;
2030 struct rte_pipeline_table_entry *data_in, *data_out;
2031 uint32_t table_id = req->id;
2035 memset(p->buffer, 0, sizeof(p->buffer));
2036 data_in = (struct rte_pipeline_table_entry *) p->buffer;
2038 data_in->action = action->fwd.action;
2039 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
2040 data_in->port_id = action->fwd.id;
2041 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
2042 data_in->table_id = action->fwd.id;
2044 /* Add default rule to table */
2045 status = rte_pipeline_table_default_entry_add(p->p,
2054 /* Write response */
2056 rsp->table_rule_add_default.data = data_out;
2061 static struct pipeline_msg_rsp *
2062 pipeline_msg_handle_table_rule_add_bulk(struct pipeline_data *p,
2063 struct pipeline_msg_req *req)
2066 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2068 uint32_t table_id = req->id;
2069 struct table_rule_match *match = req->table_rule_add_bulk.match;
2070 struct table_rule_action *action = req->table_rule_add_bulk.action;
2071 struct rte_pipeline_table_entry **data =
2072 (struct rte_pipeline_table_entry **)req->table_rule_add_bulk.data;
2073 uint32_t n_rules = req->table_rule_add_bulk.n_rules;
2074 uint32_t bulk = req->table_rule_add_bulk.bulk;
2076 struct rte_table_action *a = p->table_data[table_id].a;
2077 union table_rule_match_low_level *match_ll;
2079 void **match_ll_ptr;
2080 struct rte_pipeline_table_entry **action_ll_ptr;
2084 /* Memory allocation */
2085 match_ll = calloc(n_rules, sizeof(union table_rule_match_low_level));
2086 action_ll = calloc(n_rules, TABLE_RULE_ACTION_SIZE_MAX);
2087 match_ll_ptr = calloc(n_rules, sizeof(void *));
2089 calloc(n_rules, sizeof(struct rte_pipeline_table_entry *));
2090 found = calloc(n_rules, sizeof(int));
2092 if ((match_ll == NULL) ||
2093 (action_ll == NULL) ||
2094 (match_ll_ptr == NULL) ||
2095 (action_ll_ptr == NULL) ||
2099 for (i = 0; i < n_rules; i++) {
2100 match_ll_ptr[i] = (void *)&match_ll[i];
2102 (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
2105 /* Rule match conversion */
2106 for (i = 0; i < n_rules; i++) {
2107 status = match_convert(&match[i], match_ll_ptr[i], 1);
2112 /* Rule action conversion */
2113 for (i = 0; i < n_rules; i++) {
2114 void *data_in = action_ll_ptr[i];
2115 struct table_rule_action *act = &action[i];
2117 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
2118 status = rte_table_action_apply(a,
2120 RTE_TABLE_ACTION_FWD,
2127 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2128 status = rte_table_action_apply(a,
2130 RTE_TABLE_ACTION_MTR,
2137 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2138 status = rte_table_action_apply(a,
2140 RTE_TABLE_ACTION_TM,
2147 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2148 status = rte_table_action_apply(a,
2150 RTE_TABLE_ACTION_ENCAP,
2157 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2158 status = rte_table_action_apply(a,
2160 RTE_TABLE_ACTION_NAT,
2167 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2168 status = rte_table_action_apply(a,
2170 RTE_TABLE_ACTION_TTL,
2177 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2178 status = rte_table_action_apply(a,
2180 RTE_TABLE_ACTION_STATS,
2187 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2188 status = rte_table_action_apply(a,
2190 RTE_TABLE_ACTION_TIME,
2198 /* Add rule (match, action) to table */
2200 status = rte_pipeline_table_entry_add_bulk(p->p,
2210 for (i = 0; i < n_rules; i++) {
2211 status = rte_pipeline_table_entry_add(p->p,
2223 /* Write response */
2225 rsp->table_rule_add_bulk.n_rules = n_rules;
2229 free(action_ll_ptr);
2238 free(action_ll_ptr);
2244 rsp->table_rule_add_bulk.n_rules = 0;
2248 static struct pipeline_msg_rsp *
2249 pipeline_msg_handle_table_rule_delete(struct pipeline_data *p,
2250 struct pipeline_msg_req *req)
2252 union table_rule_match_low_level match_ll;
2253 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2254 struct table_rule_match *match = &req->table_rule_delete.match;
2255 uint32_t table_id = req->id;
2256 int key_found, status;
2258 status = match_convert(match, &match_ll, 0);
2264 rsp->status = rte_pipeline_table_entry_delete(p->p,
2273 static struct pipeline_msg_rsp *
2274 pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p,
2275 struct pipeline_msg_req *req)
2277 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2278 uint32_t table_id = req->id;
2280 rsp->status = rte_pipeline_table_default_entry_delete(p->p,
2287 static struct pipeline_msg_rsp *
2288 pipeline_msg_handle_table_rule_stats_read(struct pipeline_data *p,
2289 struct pipeline_msg_req *req)
2291 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2292 uint32_t table_id = req->id;
2293 void *data = req->table_rule_stats_read.data;
2294 int clear = req->table_rule_stats_read.clear;
2295 struct rte_table_action *a = p->table_data[table_id].a;
2297 rsp->status = rte_table_action_stats_read(a,
2299 &rsp->table_rule_stats_read.stats,
2305 static struct pipeline_msg_rsp *
2306 pipeline_msg_handle_table_mtr_profile_add(struct pipeline_data *p,
2307 struct pipeline_msg_req *req)
2309 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2310 uint32_t table_id = req->id;
2311 uint32_t meter_profile_id = req->table_mtr_profile_add.meter_profile_id;
2312 struct rte_table_action_meter_profile *profile =
2313 &req->table_mtr_profile_add.profile;
2314 struct rte_table_action *a = p->table_data[table_id].a;
2316 rsp->status = rte_table_action_meter_profile_add(a,
2323 static struct pipeline_msg_rsp *
2324 pipeline_msg_handle_table_mtr_profile_delete(struct pipeline_data *p,
2325 struct pipeline_msg_req *req)
2327 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2328 uint32_t table_id = req->id;
2329 uint32_t meter_profile_id =
2330 req->table_mtr_profile_delete.meter_profile_id;
2331 struct rte_table_action *a = p->table_data[table_id].a;
2333 rsp->status = rte_table_action_meter_profile_delete(a,
2339 static struct pipeline_msg_rsp *
2340 pipeline_msg_handle_table_rule_mtr_read(struct pipeline_data *p,
2341 struct pipeline_msg_req *req)
2343 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2344 uint32_t table_id = req->id;
2345 void *data = req->table_rule_mtr_read.data;
2346 uint32_t tc_mask = req->table_rule_mtr_read.tc_mask;
2347 int clear = req->table_rule_mtr_read.clear;
2348 struct rte_table_action *a = p->table_data[table_id].a;
2350 rsp->status = rte_table_action_meter_read(a,
2353 &rsp->table_rule_mtr_read.stats,
2360 pipeline_msg_handle(struct pipeline_data *p)
2363 struct pipeline_msg_req *req;
2364 struct pipeline_msg_rsp *rsp;
2366 req = pipeline_msg_recv(p->msgq_req);
2370 switch (req->type) {
2371 case PIPELINE_REQ_PORT_IN_STATS_READ:
2372 rsp = pipeline_msg_handle_port_in_stats_read(p, req);
2375 case PIPELINE_REQ_PORT_IN_ENABLE:
2376 rsp = pipeline_msg_handle_port_in_enable(p, req);
2379 case PIPELINE_REQ_PORT_IN_DISABLE:
2380 rsp = pipeline_msg_handle_port_in_disable(p, req);
2383 case PIPELINE_REQ_PORT_OUT_STATS_READ:
2384 rsp = pipeline_msg_handle_port_out_stats_read(p, req);
2387 case PIPELINE_REQ_TABLE_STATS_READ:
2388 rsp = pipeline_msg_handle_table_stats_read(p, req);
2391 case PIPELINE_REQ_TABLE_RULE_ADD:
2392 rsp = pipeline_msg_handle_table_rule_add(p, req);
2395 case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT:
2396 rsp = pipeline_msg_handle_table_rule_add_default(p, req);
2399 case PIPELINE_REQ_TABLE_RULE_ADD_BULK:
2400 rsp = pipeline_msg_handle_table_rule_add_bulk(p, req);
2403 case PIPELINE_REQ_TABLE_RULE_DELETE:
2404 rsp = pipeline_msg_handle_table_rule_delete(p, req);
2407 case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT:
2408 rsp = pipeline_msg_handle_table_rule_delete_default(p, req);
2411 case PIPELINE_REQ_TABLE_RULE_STATS_READ:
2412 rsp = pipeline_msg_handle_table_rule_stats_read(p, req);
2415 case PIPELINE_REQ_TABLE_MTR_PROFILE_ADD:
2416 rsp = pipeline_msg_handle_table_mtr_profile_add(p, req);
2419 case PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE:
2420 rsp = pipeline_msg_handle_table_mtr_profile_delete(p, req);
2423 case PIPELINE_REQ_TABLE_RULE_MTR_READ:
2424 rsp = pipeline_msg_handle_table_rule_mtr_read(p, req);
2428 rsp = (struct pipeline_msg_rsp *) req;
2432 pipeline_msg_send(p->msgq_rsp, rsp);
2437 * Data plane threads: main
2440 thread_main(void *arg __rte_unused)
2442 struct thread_data *t;
2443 uint32_t thread_id, i;
2445 thread_id = rte_lcore_id();
2446 t = &thread_data[thread_id];
2449 for (i = 0; ; i++) {
2453 for (j = 0; j < t->n_pipelines; j++)
2454 rte_pipeline_run(t->p[j]);
2457 if ((i & 0xF) == 0) {
2458 uint64_t time = rte_get_tsc_cycles();
2459 uint64_t time_next_min = UINT64_MAX;
2461 if (time < t->time_next_min)
2464 /* Pipeline message queues */
2465 for (j = 0; j < t->n_pipelines; j++) {
2466 struct pipeline_data *p =
2467 &t->pipeline_data[j];
2468 uint64_t time_next = p->time_next;
2470 if (time_next <= time) {
2471 pipeline_msg_handle(p);
2472 rte_pipeline_flush(p->p);
2473 time_next = time + p->timer_period;
2474 p->time_next = time_next;
2477 if (time_next < time_next_min)
2478 time_next_min = time_next;
2481 /* Thread message queues */
2483 uint64_t time_next = t->time_next;
2485 if (time_next <= time) {
2486 thread_msg_handle(t);
2487 time_next = time + t->timer_period;
2488 t->time_next = time_next;
2491 if (time_next < time_next_min)
2492 time_next_min = time_next;
2495 t->time_next_min = time_next_min;