1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_cycles.h>
12 #include <rte_table_acl.h>
13 #include <rte_table_array.h>
14 #include <rte_table_hash.h>
15 #include <rte_table_lpm.h>
16 #include <rte_table_lpm_ipv6.h>
22 #ifndef THREAD_PIPELINES_MAX
23 #define THREAD_PIPELINES_MAX 256
26 #ifndef THREAD_MSGQ_SIZE
27 #define THREAD_MSGQ_SIZE 64
30 #ifndef THREAD_TIMER_PERIOD_MS
31 #define THREAD_TIMER_PERIOD_MS 100
35 * Master thead: data plane thread context
38 struct rte_ring *msgq_req;
39 struct rte_ring *msgq_rsp;
44 static struct thread thread[RTE_MAX_LCORE];
47 * Data plane threads: context
50 struct rte_table_action *a;
53 struct pipeline_data {
54 struct rte_pipeline *p;
55 struct table_data table_data[RTE_PIPELINE_TABLE_MAX];
58 struct rte_ring *msgq_req;
59 struct rte_ring *msgq_rsp;
60 uint64_t timer_period; /* Measured in CPU cycles. */
63 uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
67 struct rte_pipeline *p[THREAD_PIPELINES_MAX];
70 struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
71 struct rte_ring *msgq_req;
72 struct rte_ring *msgq_rsp;
73 uint64_t timer_period; /* Measured in CPU cycles. */
75 uint64_t time_next_min;
76 } __rte_cache_aligned;
78 static struct thread_data thread_data[RTE_MAX_LCORE];
81 * Master thread: data plane thread init
88 for (i = 0; i < RTE_MAX_LCORE; i++) {
89 struct thread *t = &thread[i];
91 if (!rte_lcore_is_enabled(i))
96 rte_ring_free(t->msgq_req);
99 rte_ring_free(t->msgq_rsp);
108 RTE_LCORE_FOREACH_SLAVE(i) {
110 struct rte_ring *msgq_req, *msgq_rsp;
111 struct thread *t = &thread[i];
112 struct thread_data *t_data = &thread_data[i];
113 uint32_t cpu_id = rte_lcore_to_socket_id(i);
116 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-REQ", i);
118 msgq_req = rte_ring_create(name,
121 RING_F_SP_ENQ | RING_F_SC_DEQ);
123 if (msgq_req == NULL) {
128 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-RSP", i);
130 msgq_rsp = rte_ring_create(name,
133 RING_F_SP_ENQ | RING_F_SC_DEQ);
135 if (msgq_rsp == NULL) {
140 /* Master thread records */
141 t->msgq_req = msgq_req;
142 t->msgq_rsp = msgq_rsp;
145 /* Data plane thread records */
146 t_data->n_pipelines = 0;
147 t_data->msgq_req = msgq_req;
148 t_data->msgq_rsp = msgq_rsp;
149 t_data->timer_period =
150 (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
151 t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
152 t_data->time_next_min = t_data->time_next;
159 * Master thread & data plane threads: message passing
161 enum thread_req_type {
162 THREAD_REQ_PIPELINE_ENABLE = 0,
163 THREAD_REQ_PIPELINE_DISABLE,
167 struct thread_msg_req {
168 enum thread_req_type type;
172 struct rte_pipeline *p;
174 struct rte_table_action *a;
175 } table[RTE_PIPELINE_TABLE_MAX];
176 struct rte_ring *msgq_req;
177 struct rte_ring *msgq_rsp;
178 uint32_t timer_period_ms;
183 struct rte_pipeline *p;
188 struct thread_msg_rsp {
195 static struct thread_msg_req *
196 thread_msg_alloc(void)
198 size_t size = RTE_MAX(sizeof(struct thread_msg_req),
199 sizeof(struct thread_msg_rsp));
201 return calloc(1, size);
205 thread_msg_free(struct thread_msg_rsp *rsp)
210 static struct thread_msg_rsp *
211 thread_msg_send_recv(uint32_t thread_id,
212 struct thread_msg_req *req)
214 struct thread *t = &thread[thread_id];
215 struct rte_ring *msgq_req = t->msgq_req;
216 struct rte_ring *msgq_rsp = t->msgq_rsp;
217 struct thread_msg_rsp *rsp;
222 status = rte_ring_sp_enqueue(msgq_req, req);
223 } while (status == -ENOBUFS);
227 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
228 } while (status != 0);
234 thread_pipeline_enable(uint32_t thread_id,
235 const char *pipeline_name)
237 struct pipeline *p = pipeline_find(pipeline_name);
239 struct thread_msg_req *req;
240 struct thread_msg_rsp *rsp;
244 /* Check input params */
245 if ((thread_id >= RTE_MAX_LCORE) ||
247 (p->n_ports_in == 0) ||
248 (p->n_ports_out == 0) ||
252 t = &thread[thread_id];
253 if ((t->enabled == 0) ||
257 /* Allocate request */
258 req = thread_msg_alloc();
263 req->type = THREAD_REQ_PIPELINE_ENABLE;
264 req->pipeline_enable.p = p->p;
265 for (i = 0; i < p->n_tables; i++)
266 req->pipeline_enable.table[i].a =
268 req->pipeline_enable.msgq_req = p->msgq_req;
269 req->pipeline_enable.msgq_rsp = p->msgq_rsp;
270 req->pipeline_enable.timer_period_ms = p->timer_period_ms;
271 req->pipeline_enable.n_tables = p->n_tables;
273 /* Send request and wait for response */
274 rsp = thread_msg_send_recv(thread_id, req);
279 status = rsp->status;
282 thread_msg_free(rsp);
284 /* Request completion */
288 p->thread_id = thread_id;
295 thread_pipeline_disable(uint32_t thread_id,
296 const char *pipeline_name)
298 struct pipeline *p = pipeline_find(pipeline_name);
300 struct thread_msg_req *req;
301 struct thread_msg_rsp *rsp;
304 /* Check input params */
305 if ((thread_id >= RTE_MAX_LCORE) ||
309 t = &thread[thread_id];
316 if (p->thread_id != thread_id)
319 /* Allocate request */
320 req = thread_msg_alloc();
325 req->type = THREAD_REQ_PIPELINE_DISABLE;
326 req->pipeline_disable.p = p->p;
328 /* Send request and wait for response */
329 rsp = thread_msg_send_recv(thread_id, req);
334 status = rsp->status;
337 thread_msg_free(rsp);
339 /* Request completion */
349 * Data plane threads: message handling
351 static inline struct thread_msg_req *
352 thread_msg_recv(struct rte_ring *msgq_req)
354 struct thread_msg_req *req;
356 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
365 thread_msg_send(struct rte_ring *msgq_rsp,
366 struct thread_msg_rsp *rsp)
371 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
372 } while (status == -ENOBUFS);
375 static struct thread_msg_rsp *
376 thread_msg_handle_pipeline_enable(struct thread_data *t,
377 struct thread_msg_req *req)
379 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
380 struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
384 if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
389 t->p[t->n_pipelines] = req->pipeline_enable.p;
391 p->p = req->pipeline_enable.p;
392 for (i = 0; i < req->pipeline_enable.n_tables; i++)
394 req->pipeline_enable.table[i].a;
396 p->n_tables = req->pipeline_enable.n_tables;
398 p->msgq_req = req->pipeline_enable.msgq_req;
399 p->msgq_rsp = req->pipeline_enable.msgq_rsp;
401 (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
402 p->time_next = rte_get_tsc_cycles() + p->timer_period;
411 static struct thread_msg_rsp *
412 thread_msg_handle_pipeline_disable(struct thread_data *t,
413 struct thread_msg_req *req)
415 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
416 uint32_t n_pipelines = t->n_pipelines;
417 struct rte_pipeline *pipeline = req->pipeline_disable.p;
421 for (i = 0; i < n_pipelines; i++) {
422 struct pipeline_data *p = &t->pipeline_data[i];
424 if (p->p != pipeline)
427 if (i < n_pipelines - 1) {
428 struct rte_pipeline *pipeline_last =
429 t->p[n_pipelines - 1];
430 struct pipeline_data *p_last =
431 &t->pipeline_data[n_pipelines - 1];
433 t->p[i] = pipeline_last;
434 memcpy(p, p_last, sizeof(*p));
443 /* should not get here */
449 thread_msg_handle(struct thread_data *t)
452 struct thread_msg_req *req;
453 struct thread_msg_rsp *rsp;
455 req = thread_msg_recv(t->msgq_req);
460 case THREAD_REQ_PIPELINE_ENABLE:
461 rsp = thread_msg_handle_pipeline_enable(t, req);
464 case THREAD_REQ_PIPELINE_DISABLE:
465 rsp = thread_msg_handle_pipeline_disable(t, req);
469 rsp = (struct thread_msg_rsp *) req;
473 thread_msg_send(t->msgq_rsp, rsp);
478 * Master thread & data plane threads: message passing
480 enum pipeline_req_type {
482 PIPELINE_REQ_PORT_IN_STATS_READ,
483 PIPELINE_REQ_PORT_IN_ENABLE,
484 PIPELINE_REQ_PORT_IN_DISABLE,
487 PIPELINE_REQ_PORT_OUT_STATS_READ,
490 PIPELINE_REQ_TABLE_STATS_READ,
491 PIPELINE_REQ_TABLE_RULE_ADD,
492 PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT,
493 PIPELINE_REQ_TABLE_RULE_ADD_BULK,
494 PIPELINE_REQ_TABLE_RULE_DELETE,
495 PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT,
496 PIPELINE_REQ_TABLE_RULE_STATS_READ,
497 PIPELINE_REQ_TABLE_MTR_PROFILE_ADD,
498 PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE,
502 struct pipeline_msg_req_port_in_stats_read {
506 struct pipeline_msg_req_port_out_stats_read {
510 struct pipeline_msg_req_table_stats_read {
514 struct pipeline_msg_req_table_rule_add {
515 struct table_rule_match match;
516 struct table_rule_action action;
519 struct pipeline_msg_req_table_rule_add_default {
520 struct table_rule_action action;
523 struct pipeline_msg_req_table_rule_add_bulk {
524 struct table_rule_match *match;
525 struct table_rule_action *action;
531 struct pipeline_msg_req_table_rule_delete {
532 struct table_rule_match match;
535 struct pipeline_msg_req_table_rule_stats_read {
540 struct pipeline_msg_req_table_mtr_profile_add {
541 uint32_t meter_profile_id;
542 struct rte_table_action_meter_profile profile;
545 struct pipeline_msg_req_table_mtr_profile_delete {
546 uint32_t meter_profile_id;
549 struct pipeline_msg_req {
550 enum pipeline_req_type type;
551 uint32_t id; /* Port IN, port OUT or table ID */
555 struct pipeline_msg_req_port_in_stats_read port_in_stats_read;
556 struct pipeline_msg_req_port_out_stats_read port_out_stats_read;
557 struct pipeline_msg_req_table_stats_read table_stats_read;
558 struct pipeline_msg_req_table_rule_add table_rule_add;
559 struct pipeline_msg_req_table_rule_add_default table_rule_add_default;
560 struct pipeline_msg_req_table_rule_add_bulk table_rule_add_bulk;
561 struct pipeline_msg_req_table_rule_delete table_rule_delete;
562 struct pipeline_msg_req_table_rule_stats_read table_rule_stats_read;
563 struct pipeline_msg_req_table_mtr_profile_add table_mtr_profile_add;
564 struct pipeline_msg_req_table_mtr_profile_delete table_mtr_profile_delete;
568 struct pipeline_msg_rsp_port_in_stats_read {
569 struct rte_pipeline_port_in_stats stats;
572 struct pipeline_msg_rsp_port_out_stats_read {
573 struct rte_pipeline_port_out_stats stats;
576 struct pipeline_msg_rsp_table_stats_read {
577 struct rte_pipeline_table_stats stats;
580 struct pipeline_msg_rsp_table_rule_add {
584 struct pipeline_msg_rsp_table_rule_add_default {
588 struct pipeline_msg_rsp_table_rule_add_bulk {
592 struct pipeline_msg_rsp_table_rule_stats_read {
593 struct rte_table_action_stats_counters stats;
596 struct pipeline_msg_rsp {
601 struct pipeline_msg_rsp_port_in_stats_read port_in_stats_read;
602 struct pipeline_msg_rsp_port_out_stats_read port_out_stats_read;
603 struct pipeline_msg_rsp_table_stats_read table_stats_read;
604 struct pipeline_msg_rsp_table_rule_add table_rule_add;
605 struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default;
606 struct pipeline_msg_rsp_table_rule_add_bulk table_rule_add_bulk;
607 struct pipeline_msg_rsp_table_rule_stats_read table_rule_stats_read;
614 static struct pipeline_msg_req *
615 pipeline_msg_alloc(void)
617 size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
618 sizeof(struct pipeline_msg_rsp));
620 return calloc(1, size);
624 pipeline_msg_free(struct pipeline_msg_rsp *rsp)
629 static struct pipeline_msg_rsp *
630 pipeline_msg_send_recv(struct pipeline *p,
631 struct pipeline_msg_req *req)
633 struct rte_ring *msgq_req = p->msgq_req;
634 struct rte_ring *msgq_rsp = p->msgq_rsp;
635 struct pipeline_msg_rsp *rsp;
640 status = rte_ring_sp_enqueue(msgq_req, req);
641 } while (status == -ENOBUFS);
645 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
646 } while (status != 0);
652 pipeline_port_in_stats_read(const char *pipeline_name,
654 struct rte_pipeline_port_in_stats *stats,
658 struct pipeline_msg_req *req;
659 struct pipeline_msg_rsp *rsp;
662 /* Check input params */
663 if ((pipeline_name == NULL) ||
667 p = pipeline_find(pipeline_name);
670 (port_id >= p->n_ports_in))
673 /* Allocate request */
674 req = pipeline_msg_alloc();
679 req->type = PIPELINE_REQ_PORT_IN_STATS_READ;
681 req->port_in_stats_read.clear = clear;
683 /* Send request and wait for response */
684 rsp = pipeline_msg_send_recv(p, req);
689 status = rsp->status;
691 memcpy(stats, &rsp->port_in_stats_read.stats, sizeof(*stats));
694 pipeline_msg_free(rsp);
700 pipeline_port_in_enable(const char *pipeline_name,
704 struct pipeline_msg_req *req;
705 struct pipeline_msg_rsp *rsp;
708 /* Check input params */
709 if (pipeline_name == NULL)
712 p = pipeline_find(pipeline_name);
715 (port_id >= p->n_ports_in))
718 /* Allocate request */
719 req = pipeline_msg_alloc();
724 req->type = PIPELINE_REQ_PORT_IN_ENABLE;
727 /* Send request and wait for response */
728 rsp = pipeline_msg_send_recv(p, req);
733 status = rsp->status;
736 pipeline_msg_free(rsp);
742 pipeline_port_in_disable(const char *pipeline_name,
746 struct pipeline_msg_req *req;
747 struct pipeline_msg_rsp *rsp;
750 /* Check input params */
751 if (pipeline_name == NULL)
754 p = pipeline_find(pipeline_name);
757 (port_id >= p->n_ports_in))
760 /* Allocate request */
761 req = pipeline_msg_alloc();
766 req->type = PIPELINE_REQ_PORT_IN_DISABLE;
769 /* Send request and wait for response */
770 rsp = pipeline_msg_send_recv(p, req);
775 status = rsp->status;
778 pipeline_msg_free(rsp);
784 pipeline_port_out_stats_read(const char *pipeline_name,
786 struct rte_pipeline_port_out_stats *stats,
790 struct pipeline_msg_req *req;
791 struct pipeline_msg_rsp *rsp;
794 /* Check input params */
795 if ((pipeline_name == NULL) ||
799 p = pipeline_find(pipeline_name);
802 (port_id >= p->n_ports_out))
805 /* Allocate request */
806 req = pipeline_msg_alloc();
811 req->type = PIPELINE_REQ_PORT_OUT_STATS_READ;
813 req->port_out_stats_read.clear = clear;
815 /* Send request and wait for response */
816 rsp = pipeline_msg_send_recv(p, req);
821 status = rsp->status;
823 memcpy(stats, &rsp->port_out_stats_read.stats, sizeof(*stats));
826 pipeline_msg_free(rsp);
832 pipeline_table_stats_read(const char *pipeline_name,
834 struct rte_pipeline_table_stats *stats,
838 struct pipeline_msg_req *req;
839 struct pipeline_msg_rsp *rsp;
842 /* Check input params */
843 if ((pipeline_name == NULL) ||
847 p = pipeline_find(pipeline_name);
850 (table_id >= p->n_tables))
853 /* Allocate request */
854 req = pipeline_msg_alloc();
859 req->type = PIPELINE_REQ_TABLE_STATS_READ;
861 req->table_stats_read.clear = clear;
863 /* Send request and wait for response */
864 rsp = pipeline_msg_send_recv(p, req);
869 status = rsp->status;
871 memcpy(stats, &rsp->table_stats_read.stats, sizeof(*stats));
874 pipeline_msg_free(rsp);
880 match_check(struct table_rule_match *match,
886 if ((match == NULL) ||
888 (table_id >= p->n_tables))
891 table = &p->table[table_id];
892 if (match->match_type != table->params.match_type)
895 switch (match->match_type) {
898 struct table_acl_params *t = &table->params.match.acl;
899 struct table_rule_match_acl *r = &match->match.acl;
901 if ((r->ip_version && (t->ip_version == 0)) ||
902 ((r->ip_version == 0) && t->ip_version))
906 if ((r->sa_depth > 32) ||
910 if ((r->sa_depth > 128) ||
925 struct table_lpm_params *t = &table->params.match.lpm;
926 struct table_rule_match_lpm *r = &match->match.lpm;
928 if ((r->ip_version && (t->key_size != 4)) ||
929 ((r->ip_version == 0) && (t->key_size != 16)))
951 action_check(struct table_rule_action *action,
955 struct table_action_profile *ap;
957 if ((action == NULL) ||
959 (table_id >= p->n_tables))
962 ap = p->table[table_id].ap;
963 if (action->action_mask != ap->params.action_mask)
966 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
967 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
968 (action->fwd.id >= p->n_ports_out))
971 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
972 (action->fwd.id >= p->n_tables))
976 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
977 uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1;
978 uint32_t tc_mask1 = action->mtr.tc_mask;
980 if (tc_mask1 != tc_mask0)
984 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
985 uint32_t n_subports_per_port =
986 ap->params.tm.n_subports_per_port;
987 uint32_t n_pipes_per_subport =
988 ap->params.tm.n_pipes_per_subport;
989 uint32_t subport_id = action->tm.subport_id;
990 uint32_t pipe_id = action->tm.pipe_id;
992 if ((subport_id >= n_subports_per_port) ||
993 (pipe_id >= n_pipes_per_subport))
997 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
998 uint64_t encap_mask = ap->params.encap.encap_mask;
999 enum rte_table_action_encap_type type = action->encap.type;
1001 if ((encap_mask & (1LLU << type)) == 0)
1005 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
1006 int ip_version0 = ap->params.common.ip_version;
1007 int ip_version1 = action->nat.ip_version;
1009 if ((ip_version1 && (ip_version0 == 0)) ||
1010 ((ip_version1 == 0) && ip_version0))
1018 action_default_check(struct table_rule_action *action,
1022 if ((action == NULL) ||
1023 (action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD)) ||
1025 (table_id >= p->n_tables))
1028 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1029 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
1030 (action->fwd.id >= p->n_ports_out))
1033 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
1034 (action->fwd.id >= p->n_tables))
1042 pipeline_table_rule_add(const char *pipeline_name,
1044 struct table_rule_match *match,
1045 struct table_rule_action *action,
1049 struct pipeline_msg_req *req;
1050 struct pipeline_msg_rsp *rsp;
1053 /* Check input params */
1054 if ((pipeline_name == NULL) ||
1060 p = pipeline_find(pipeline_name);
1062 (p->enabled == 0) ||
1063 (table_id >= p->n_tables) ||
1064 match_check(match, p, table_id) ||
1065 action_check(action, p, table_id))
1068 /* Allocate request */
1069 req = pipeline_msg_alloc();
1074 req->type = PIPELINE_REQ_TABLE_RULE_ADD;
1076 memcpy(&req->table_rule_add.match, match, sizeof(*match));
1077 memcpy(&req->table_rule_add.action, action, sizeof(*action));
1079 /* Send request and wait for response */
1080 rsp = pipeline_msg_send_recv(p, req);
1085 status = rsp->status;
1087 *data = rsp->table_rule_add.data;
1090 pipeline_msg_free(rsp);
1096 pipeline_table_rule_add_default(const char *pipeline_name,
1098 struct table_rule_action *action,
1102 struct pipeline_msg_req *req;
1103 struct pipeline_msg_rsp *rsp;
1106 /* Check input params */
1107 if ((pipeline_name == NULL) ||
1112 p = pipeline_find(pipeline_name);
1114 (p->enabled == 0) ||
1115 (table_id >= p->n_tables) ||
1116 action_default_check(action, p, table_id))
1119 /* Allocate request */
1120 req = pipeline_msg_alloc();
1125 req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT;
1127 memcpy(&req->table_rule_add_default.action, action, sizeof(*action));
1129 /* Send request and wait for response */
1130 rsp = pipeline_msg_send_recv(p, req);
1135 status = rsp->status;
1137 *data = rsp->table_rule_add_default.data;
1140 pipeline_msg_free(rsp);
1146 pipeline_table_rule_add_bulk(const char *pipeline_name,
1148 struct table_rule_match *match,
1149 struct table_rule_action *action,
1154 struct pipeline_msg_req *req;
1155 struct pipeline_msg_rsp *rsp;
1159 /* Check input params */
1160 if ((pipeline_name == NULL) ||
1164 (n_rules == NULL) ||
1168 p = pipeline_find(pipeline_name);
1170 (p->enabled == 0) ||
1171 (table_id >= p->n_tables))
1174 for (i = 0; i < *n_rules; i++)
1175 if (match_check(match, p, table_id) ||
1176 action_check(action, p, table_id))
1179 /* Allocate request */
1180 req = pipeline_msg_alloc();
1185 req->type = PIPELINE_REQ_TABLE_RULE_ADD_BULK;
1187 req->table_rule_add_bulk.match = match;
1188 req->table_rule_add_bulk.action = action;
1189 req->table_rule_add_bulk.data = data;
1190 req->table_rule_add_bulk.n_rules = *n_rules;
1191 req->table_rule_add_bulk.bulk =
1192 (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
1194 /* Send request and wait for response */
1195 rsp = pipeline_msg_send_recv(p, req);
1200 status = rsp->status;
1202 *n_rules = rsp->table_rule_add_bulk.n_rules;
1205 pipeline_msg_free(rsp);
1211 pipeline_table_rule_delete(const char *pipeline_name,
1213 struct table_rule_match *match)
1216 struct pipeline_msg_req *req;
1217 struct pipeline_msg_rsp *rsp;
1220 /* Check input params */
1221 if ((pipeline_name == NULL) ||
1225 p = pipeline_find(pipeline_name);
1227 (p->enabled == 0) ||
1228 (table_id >= p->n_tables) ||
1229 match_check(match, p, table_id))
1232 /* Allocate request */
1233 req = pipeline_msg_alloc();
1238 req->type = PIPELINE_REQ_TABLE_RULE_DELETE;
1240 memcpy(&req->table_rule_delete.match, match, sizeof(*match));
1242 /* Send request and wait for response */
1243 rsp = pipeline_msg_send_recv(p, req);
1248 status = rsp->status;
1251 pipeline_msg_free(rsp);
1257 pipeline_table_rule_delete_default(const char *pipeline_name,
1261 struct pipeline_msg_req *req;
1262 struct pipeline_msg_rsp *rsp;
1265 /* Check input params */
1266 if (pipeline_name == NULL)
1269 p = pipeline_find(pipeline_name);
1271 (p->enabled == 0) ||
1272 (table_id >= p->n_tables))
1275 /* Allocate request */
1276 req = pipeline_msg_alloc();
1281 req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT;
1284 /* Send request and wait for response */
1285 rsp = pipeline_msg_send_recv(p, req);
1290 status = rsp->status;
1293 pipeline_msg_free(rsp);
1299 pipeline_table_rule_stats_read(const char *pipeline_name,
1302 struct rte_table_action_stats_counters *stats,
1306 struct pipeline_msg_req *req;
1307 struct pipeline_msg_rsp *rsp;
1310 /* Check input params */
1311 if ((pipeline_name == NULL) ||
1316 p = pipeline_find(pipeline_name);
1318 (p->enabled == 0) ||
1319 (table_id >= p->n_tables))
1322 /* Allocate request */
1323 req = pipeline_msg_alloc();
1328 req->type = PIPELINE_REQ_TABLE_RULE_STATS_READ;
1330 req->table_rule_stats_read.data = data;
1331 req->table_rule_stats_read.clear = clear;
1333 /* Send request and wait for response */
1334 rsp = pipeline_msg_send_recv(p, req);
1339 status = rsp->status;
1341 memcpy(stats, &rsp->table_rule_stats_read.stats, sizeof(*stats));
1344 pipeline_msg_free(rsp);
1350 pipeline_table_mtr_profile_add(const char *pipeline_name,
1352 uint32_t meter_profile_id,
1353 struct rte_table_action_meter_profile *profile)
1356 struct pipeline_msg_req *req;
1357 struct pipeline_msg_rsp *rsp;
1360 /* Check input params */
1361 if ((pipeline_name == NULL) ||
1365 p = pipeline_find(pipeline_name);
1367 (p->enabled == 0) ||
1368 (table_id >= p->n_tables))
1371 /* Allocate request */
1372 req = pipeline_msg_alloc();
1377 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_ADD;
1379 req->table_mtr_profile_add.meter_profile_id = meter_profile_id;
1380 memcpy(&req->table_mtr_profile_add.profile, profile, sizeof(*profile));
1382 /* Send request and wait for response */
1383 rsp = pipeline_msg_send_recv(p, req);
1388 status = rsp->status;
1391 pipeline_msg_free(rsp);
1397 pipeline_table_mtr_profile_delete(const char *pipeline_name,
1399 uint32_t meter_profile_id)
1402 struct pipeline_msg_req *req;
1403 struct pipeline_msg_rsp *rsp;
1406 /* Check input params */
1407 if (pipeline_name == NULL)
1410 p = pipeline_find(pipeline_name);
1412 (p->enabled == 0) ||
1413 (table_id >= p->n_tables))
1416 /* Allocate request */
1417 req = pipeline_msg_alloc();
1422 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE;
1424 req->table_mtr_profile_delete.meter_profile_id = meter_profile_id;
1426 /* Send request and wait for response */
1427 rsp = pipeline_msg_send_recv(p, req);
1432 status = rsp->status;
1435 pipeline_msg_free(rsp);
1441 * Data plane threads: message handling
1443 static inline struct pipeline_msg_req *
1444 pipeline_msg_recv(struct rte_ring *msgq_req)
1446 struct pipeline_msg_req *req;
1448 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
1457 pipeline_msg_send(struct rte_ring *msgq_rsp,
1458 struct pipeline_msg_rsp *rsp)
1463 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
1464 } while (status == -ENOBUFS);
1467 static struct pipeline_msg_rsp *
1468 pipeline_msg_handle_port_in_stats_read(struct pipeline_data *p,
1469 struct pipeline_msg_req *req)
1471 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1472 uint32_t port_id = req->id;
1473 int clear = req->port_in_stats_read.clear;
1475 rsp->status = rte_pipeline_port_in_stats_read(p->p,
1477 &rsp->port_in_stats_read.stats,
1483 static struct pipeline_msg_rsp *
1484 pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
1485 struct pipeline_msg_req *req)
1487 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1488 uint32_t port_id = req->id;
1490 rsp->status = rte_pipeline_port_in_enable(p->p,
1496 static struct pipeline_msg_rsp *
1497 pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
1498 struct pipeline_msg_req *req)
1500 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1501 uint32_t port_id = req->id;
1503 rsp->status = rte_pipeline_port_in_disable(p->p,
1509 static struct pipeline_msg_rsp *
1510 pipeline_msg_handle_port_out_stats_read(struct pipeline_data *p,
1511 struct pipeline_msg_req *req)
1513 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1514 uint32_t port_id = req->id;
1515 int clear = req->port_out_stats_read.clear;
1517 rsp->status = rte_pipeline_port_out_stats_read(p->p,
1519 &rsp->port_out_stats_read.stats,
1525 static struct pipeline_msg_rsp *
1526 pipeline_msg_handle_table_stats_read(struct pipeline_data *p,
1527 struct pipeline_msg_req *req)
1529 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1530 uint32_t port_id = req->id;
1531 int clear = req->table_stats_read.clear;
1533 rsp->status = rte_pipeline_table_stats_read(p->p,
1535 &rsp->table_stats_read.stats,
1541 union table_rule_match_low_level {
1542 struct rte_table_acl_rule_add_params acl_add;
1543 struct rte_table_acl_rule_delete_params acl_delete;
1544 struct rte_table_array_key array;
1545 uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX];
1546 struct rte_table_lpm_key lpm_ipv4;
1547 struct rte_table_lpm_ipv6_key lpm_ipv6;
1551 match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32)
1556 switch (depth / 32) {
1566 depth32[1] = depth - 32;
1574 depth32[2] = depth - 64;
1582 depth32[3] = depth - 96;
1598 match_convert(struct table_rule_match *mh,
1599 union table_rule_match_low_level *ml,
1602 memset(ml, 0, sizeof(*ml));
1604 switch (mh->match_type) {
1606 if (mh->match.acl.ip_version)
1608 ml->acl_add.field_value[0].value.u8 =
1609 mh->match.acl.proto;
1610 ml->acl_add.field_value[0].mask_range.u8 =
1611 mh->match.acl.proto_mask;
1613 ml->acl_add.field_value[1].value.u32 =
1614 mh->match.acl.ipv4.sa;
1615 ml->acl_add.field_value[1].mask_range.u32 =
1616 mh->match.acl.sa_depth;
1618 ml->acl_add.field_value[2].value.u32 =
1619 mh->match.acl.ipv4.da;
1620 ml->acl_add.field_value[2].mask_range.u32 =
1621 mh->match.acl.da_depth;
1623 ml->acl_add.field_value[3].value.u16 =
1625 ml->acl_add.field_value[3].mask_range.u16 =
1628 ml->acl_add.field_value[4].value.u16 =
1630 ml->acl_add.field_value[4].mask_range.u16 =
1633 ml->acl_add.priority =
1634 (int32_t) mh->match.acl.priority;
1636 ml->acl_delete.field_value[0].value.u8 =
1637 mh->match.acl.proto;
1638 ml->acl_delete.field_value[0].mask_range.u8 =
1639 mh->match.acl.proto_mask;
1641 ml->acl_delete.field_value[1].value.u32 =
1642 mh->match.acl.ipv4.sa;
1643 ml->acl_delete.field_value[1].mask_range.u32 =
1644 mh->match.acl.sa_depth;
1646 ml->acl_delete.field_value[2].value.u32 =
1647 mh->match.acl.ipv4.da;
1648 ml->acl_delete.field_value[2].mask_range.u32 =
1649 mh->match.acl.da_depth;
1651 ml->acl_delete.field_value[3].value.u16 =
1653 ml->acl_delete.field_value[3].mask_range.u16 =
1656 ml->acl_delete.field_value[4].value.u16 =
1658 ml->acl_delete.field_value[4].mask_range.u16 =
1664 (uint32_t *) mh->match.acl.ipv6.sa;
1666 (uint32_t *) mh->match.acl.ipv6.da;
1667 uint32_t sa32_depth[4], da32_depth[4];
1670 status = match_convert_ipv6_depth(
1671 mh->match.acl.sa_depth,
1676 status = match_convert_ipv6_depth(
1677 mh->match.acl.da_depth,
1682 ml->acl_add.field_value[0].value.u8 =
1683 mh->match.acl.proto;
1684 ml->acl_add.field_value[0].mask_range.u8 =
1685 mh->match.acl.proto_mask;
1687 ml->acl_add.field_value[1].value.u32 = sa32[0];
1688 ml->acl_add.field_value[1].mask_range.u32 =
1690 ml->acl_add.field_value[2].value.u32 = sa32[1];
1691 ml->acl_add.field_value[2].mask_range.u32 =
1693 ml->acl_add.field_value[3].value.u32 = sa32[2];
1694 ml->acl_add.field_value[3].mask_range.u32 =
1696 ml->acl_add.field_value[4].value.u32 = sa32[3];
1697 ml->acl_add.field_value[4].mask_range.u32 =
1700 ml->acl_add.field_value[5].value.u32 = da32[0];
1701 ml->acl_add.field_value[5].mask_range.u32 =
1703 ml->acl_add.field_value[6].value.u32 = da32[1];
1704 ml->acl_add.field_value[6].mask_range.u32 =
1706 ml->acl_add.field_value[7].value.u32 = da32[2];
1707 ml->acl_add.field_value[7].mask_range.u32 =
1709 ml->acl_add.field_value[8].value.u32 = da32[3];
1710 ml->acl_add.field_value[8].mask_range.u32 =
1713 ml->acl_add.field_value[9].value.u16 =
1715 ml->acl_add.field_value[9].mask_range.u16 =
1718 ml->acl_add.field_value[10].value.u16 =
1720 ml->acl_add.field_value[10].mask_range.u16 =
1723 ml->acl_add.priority =
1724 (int32_t) mh->match.acl.priority;
1727 (uint32_t *) mh->match.acl.ipv6.sa;
1729 (uint32_t *) mh->match.acl.ipv6.da;
1730 uint32_t sa32_depth[4], da32_depth[4];
1733 status = match_convert_ipv6_depth(
1734 mh->match.acl.sa_depth,
1739 status = match_convert_ipv6_depth(
1740 mh->match.acl.da_depth,
1745 ml->acl_delete.field_value[0].value.u8 =
1746 mh->match.acl.proto;
1747 ml->acl_delete.field_value[0].mask_range.u8 =
1748 mh->match.acl.proto_mask;
1750 ml->acl_delete.field_value[1].value.u32 =
1752 ml->acl_delete.field_value[1].mask_range.u32 =
1754 ml->acl_delete.field_value[2].value.u32 =
1756 ml->acl_delete.field_value[2].mask_range.u32 =
1758 ml->acl_delete.field_value[3].value.u32 =
1760 ml->acl_delete.field_value[3].mask_range.u32 =
1762 ml->acl_delete.field_value[4].value.u32 =
1764 ml->acl_delete.field_value[4].mask_range.u32 =
1767 ml->acl_delete.field_value[5].value.u32 =
1769 ml->acl_delete.field_value[5].mask_range.u32 =
1771 ml->acl_delete.field_value[6].value.u32 =
1773 ml->acl_delete.field_value[6].mask_range.u32 =
1775 ml->acl_delete.field_value[7].value.u32 =
1777 ml->acl_delete.field_value[7].mask_range.u32 =
1779 ml->acl_delete.field_value[8].value.u32 =
1781 ml->acl_delete.field_value[8].mask_range.u32 =
1784 ml->acl_delete.field_value[9].value.u16 =
1786 ml->acl_delete.field_value[9].mask_range.u16 =
1789 ml->acl_delete.field_value[10].value.u16 =
1791 ml->acl_delete.field_value[10].mask_range.u16 =
1797 ml->array.pos = mh->match.array.pos;
1801 memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash));
1805 if (mh->match.lpm.ip_version) {
1806 ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
1807 ml->lpm_ipv4.depth = mh->match.lpm.depth;
1809 memcpy(ml->lpm_ipv6.ip,
1810 mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
1811 ml->lpm_ipv6.depth = mh->match.lpm.depth;
1821 static struct pipeline_msg_rsp *
1822 pipeline_msg_handle_table_rule_add(struct pipeline_data *p,
1823 struct pipeline_msg_req *req)
1825 union table_rule_match_low_level match_ll;
1826 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1827 struct table_rule_match *match = &req->table_rule_add.match;
1828 struct table_rule_action *action = &req->table_rule_add.action;
1829 struct rte_pipeline_table_entry *data_in, *data_out;
1830 uint32_t table_id = req->id;
1831 int key_found, status;
1832 struct rte_table_action *a = p->table_data[table_id].a;
1835 memset(p->buffer, 0, sizeof(p->buffer));
1836 data_in = (struct rte_pipeline_table_entry *) p->buffer;
1838 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1839 status = rte_table_action_apply(a,
1841 RTE_TABLE_ACTION_FWD,
1850 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
1851 status = rte_table_action_apply(a,
1853 RTE_TABLE_ACTION_MTR,
1862 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
1863 status = rte_table_action_apply(a,
1865 RTE_TABLE_ACTION_TM,
1874 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
1875 status = rte_table_action_apply(a,
1877 RTE_TABLE_ACTION_ENCAP,
1886 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
1887 status = rte_table_action_apply(a,
1889 RTE_TABLE_ACTION_NAT,
1898 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
1899 status = rte_table_action_apply(a,
1901 RTE_TABLE_ACTION_TTL,
1910 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
1911 status = rte_table_action_apply(a,
1913 RTE_TABLE_ACTION_STATS,
1922 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
1923 status = rte_table_action_apply(a,
1925 RTE_TABLE_ACTION_TIME,
1934 /* Add rule (match, action) to table */
1935 status = match_convert(match, &match_ll, 1);
1941 status = rte_pipeline_table_entry_add(p->p,
1952 /* Write response */
1954 rsp->table_rule_add.data = data_out;
1959 static struct pipeline_msg_rsp *
1960 pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p,
1961 struct pipeline_msg_req *req)
1963 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
1964 struct table_rule_action *action = &req->table_rule_add_default.action;
1965 struct rte_pipeline_table_entry *data_in, *data_out;
1966 uint32_t table_id = req->id;
1970 memset(p->buffer, 0, sizeof(p->buffer));
1971 data_in = (struct rte_pipeline_table_entry *) p->buffer;
1973 data_in->action = action->fwd.action;
1974 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
1975 data_in->port_id = action->fwd.id;
1976 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
1977 data_in->table_id = action->fwd.id;
1979 /* Add default rule to table */
1980 status = rte_pipeline_table_default_entry_add(p->p,
1989 /* Write response */
1991 rsp->table_rule_add_default.data = data_out;
1996 static struct pipeline_msg_rsp *
1997 pipeline_msg_handle_table_rule_add_bulk(struct pipeline_data *p,
1998 struct pipeline_msg_req *req)
2001 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2003 uint32_t table_id = req->id;
2004 struct table_rule_match *match = req->table_rule_add_bulk.match;
2005 struct table_rule_action *action = req->table_rule_add_bulk.action;
2006 struct rte_pipeline_table_entry **data =
2007 (struct rte_pipeline_table_entry **)req->table_rule_add_bulk.data;
2008 uint32_t n_rules = req->table_rule_add_bulk.n_rules;
2009 uint32_t bulk = req->table_rule_add_bulk.bulk;
2011 struct rte_table_action *a = p->table_data[table_id].a;
2012 union table_rule_match_low_level *match_ll;
2014 void **match_ll_ptr;
2015 struct rte_pipeline_table_entry **action_ll_ptr;
2019 /* Memory allocation */
2020 match_ll = calloc(n_rules, sizeof(union table_rule_match_low_level));
2021 action_ll = calloc(n_rules, TABLE_RULE_ACTION_SIZE_MAX);
2022 match_ll_ptr = calloc(n_rules, sizeof(void *));
2024 calloc(n_rules, sizeof(struct rte_pipeline_table_entry *));
2025 found = calloc(n_rules, sizeof(int));
2027 if ((match_ll == NULL) ||
2028 (action_ll == NULL) ||
2029 (match_ll_ptr == NULL) ||
2030 (action_ll_ptr == NULL) ||
2034 for (i = 0; i < n_rules; i++) {
2035 match_ll_ptr[i] = (void *)&match_ll[i];
2037 (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
2040 /* Rule match conversion */
2041 for (i = 0; i < n_rules; i++) {
2042 status = match_convert(&match[i], match_ll_ptr[i], 1);
2047 /* Rule action conversion */
2048 for (i = 0; i < n_rules; i++) {
2049 void *data_in = action_ll_ptr[i];
2050 struct table_rule_action *act = &action[i];
2052 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
2053 status = rte_table_action_apply(a,
2055 RTE_TABLE_ACTION_FWD,
2062 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2063 status = rte_table_action_apply(a,
2065 RTE_TABLE_ACTION_MTR,
2072 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2073 status = rte_table_action_apply(a,
2075 RTE_TABLE_ACTION_TM,
2082 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2083 status = rte_table_action_apply(a,
2085 RTE_TABLE_ACTION_ENCAP,
2092 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2093 status = rte_table_action_apply(a,
2095 RTE_TABLE_ACTION_NAT,
2102 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2103 status = rte_table_action_apply(a,
2105 RTE_TABLE_ACTION_TTL,
2112 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2113 status = rte_table_action_apply(a,
2115 RTE_TABLE_ACTION_STATS,
2122 if (act->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2123 status = rte_table_action_apply(a,
2125 RTE_TABLE_ACTION_TIME,
2133 /* Add rule (match, action) to table */
2135 status = rte_pipeline_table_entry_add_bulk(p->p,
2145 for (i = 0; i < n_rules; i++) {
2146 status = rte_pipeline_table_entry_add(p->p,
2158 /* Write response */
2160 rsp->table_rule_add_bulk.n_rules = n_rules;
2164 free(action_ll_ptr);
2173 free(action_ll_ptr);
2179 rsp->table_rule_add_bulk.n_rules = 0;
2183 static struct pipeline_msg_rsp *
2184 pipeline_msg_handle_table_rule_delete(struct pipeline_data *p,
2185 struct pipeline_msg_req *req)
2187 union table_rule_match_low_level match_ll;
2188 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2189 struct table_rule_match *match = &req->table_rule_delete.match;
2190 uint32_t table_id = req->id;
2191 int key_found, status;
2193 status = match_convert(match, &match_ll, 0);
2199 rsp->status = rte_pipeline_table_entry_delete(p->p,
2208 static struct pipeline_msg_rsp *
2209 pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p,
2210 struct pipeline_msg_req *req)
2212 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2213 uint32_t table_id = req->id;
2215 rsp->status = rte_pipeline_table_default_entry_delete(p->p,
2222 static struct pipeline_msg_rsp *
2223 pipeline_msg_handle_table_rule_stats_read(struct pipeline_data *p,
2224 struct pipeline_msg_req *req)
2226 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2227 uint32_t table_id = req->id;
2228 void *data = req->table_rule_stats_read.data;
2229 int clear = req->table_rule_stats_read.clear;
2230 struct rte_table_action *a = p->table_data[table_id].a;
2232 rsp->status = rte_table_action_stats_read(a,
2234 &rsp->table_rule_stats_read.stats,
2240 static struct pipeline_msg_rsp *
2241 pipeline_msg_handle_table_mtr_profile_add(struct pipeline_data *p,
2242 struct pipeline_msg_req *req)
2244 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2245 uint32_t table_id = req->id;
2246 uint32_t meter_profile_id = req->table_mtr_profile_add.meter_profile_id;
2247 struct rte_table_action_meter_profile *profile =
2248 &req->table_mtr_profile_add.profile;
2249 struct rte_table_action *a = p->table_data[table_id].a;
2251 rsp->status = rte_table_action_meter_profile_add(a,
2258 static struct pipeline_msg_rsp *
2259 pipeline_msg_handle_table_mtr_profile_delete(struct pipeline_data *p,
2260 struct pipeline_msg_req *req)
2262 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2263 uint32_t table_id = req->id;
2264 uint32_t meter_profile_id =
2265 req->table_mtr_profile_delete.meter_profile_id;
2266 struct rte_table_action *a = p->table_data[table_id].a;
2268 rsp->status = rte_table_action_meter_profile_delete(a,
2275 pipeline_msg_handle(struct pipeline_data *p)
2278 struct pipeline_msg_req *req;
2279 struct pipeline_msg_rsp *rsp;
2281 req = pipeline_msg_recv(p->msgq_req);
2285 switch (req->type) {
2286 case PIPELINE_REQ_PORT_IN_STATS_READ:
2287 rsp = pipeline_msg_handle_port_in_stats_read(p, req);
2290 case PIPELINE_REQ_PORT_IN_ENABLE:
2291 rsp = pipeline_msg_handle_port_in_enable(p, req);
2294 case PIPELINE_REQ_PORT_IN_DISABLE:
2295 rsp = pipeline_msg_handle_port_in_disable(p, req);
2298 case PIPELINE_REQ_PORT_OUT_STATS_READ:
2299 rsp = pipeline_msg_handle_port_out_stats_read(p, req);
2302 case PIPELINE_REQ_TABLE_STATS_READ:
2303 rsp = pipeline_msg_handle_table_stats_read(p, req);
2306 case PIPELINE_REQ_TABLE_RULE_ADD:
2307 rsp = pipeline_msg_handle_table_rule_add(p, req);
2310 case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT:
2311 rsp = pipeline_msg_handle_table_rule_add_default(p, req);
2314 case PIPELINE_REQ_TABLE_RULE_ADD_BULK:
2315 rsp = pipeline_msg_handle_table_rule_add_bulk(p, req);
2318 case PIPELINE_REQ_TABLE_RULE_DELETE:
2319 rsp = pipeline_msg_handle_table_rule_delete(p, req);
2322 case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT:
2323 rsp = pipeline_msg_handle_table_rule_delete_default(p, req);
2326 case PIPELINE_REQ_TABLE_RULE_STATS_READ:
2327 rsp = pipeline_msg_handle_table_rule_stats_read(p, req);
2330 case PIPELINE_REQ_TABLE_MTR_PROFILE_ADD:
2331 rsp = pipeline_msg_handle_table_mtr_profile_add(p, req);
2334 case PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE:
2335 rsp = pipeline_msg_handle_table_mtr_profile_delete(p, req);
2339 rsp = (struct pipeline_msg_rsp *) req;
2343 pipeline_msg_send(p->msgq_rsp, rsp);
2348 * Data plane threads: main
2351 thread_main(void *arg __rte_unused)
2353 struct thread_data *t;
2354 uint32_t thread_id, i;
2356 thread_id = rte_lcore_id();
2357 t = &thread_data[thread_id];
2360 for (i = 0; ; i++) {
2364 for (j = 0; j < t->n_pipelines; j++)
2365 rte_pipeline_run(t->p[j]);
2368 if ((i & 0xF) == 0) {
2369 uint64_t time = rte_get_tsc_cycles();
2370 uint64_t time_next_min = UINT64_MAX;
2372 if (time < t->time_next_min)
2375 /* Pipeline message queues */
2376 for (j = 0; j < t->n_pipelines; j++) {
2377 struct pipeline_data *p =
2378 &t->pipeline_data[j];
2379 uint64_t time_next = p->time_next;
2381 if (time_next <= time) {
2382 pipeline_msg_handle(p);
2383 rte_pipeline_flush(p->p);
2384 time_next = time + p->timer_period;
2385 p->time_next = time_next;
2388 if (time_next < time_next_min)
2389 time_next_min = time_next;
2392 /* Thread message queues */
2394 uint64_t time_next = t->time_next;
2396 if (time_next <= time) {
2397 thread_msg_handle(t);
2398 time_next = time + t->timer_period;
2399 t->time_next = time_next;
2402 if (time_next < time_next_min)
2403 time_next_min = time_next;
2406 t->time_next_min = time_next_min;