1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_cycles.h>
12 #include <rte_table_acl.h>
13 #include <rte_table_array.h>
14 #include <rte_table_hash.h>
15 #include <rte_table_lpm.h>
16 #include <rte_table_lpm_ipv6.h>
22 #ifndef THREAD_PIPELINES_MAX
23 #define THREAD_PIPELINES_MAX 256
26 #ifndef THREAD_MSGQ_SIZE
27 #define THREAD_MSGQ_SIZE 64
30 #ifndef THREAD_TIMER_PERIOD_MS
31 #define THREAD_TIMER_PERIOD_MS 100
35 * Master thead: data plane thread context
38 struct rte_ring *msgq_req;
39 struct rte_ring *msgq_rsp;
44 static struct thread thread[RTE_MAX_LCORE];
47 * Data plane threads: context
50 struct rte_table_action *a;
53 struct pipeline_data {
54 struct rte_pipeline *p;
55 struct table_data table_data[RTE_PIPELINE_TABLE_MAX];
58 struct rte_ring *msgq_req;
59 struct rte_ring *msgq_rsp;
60 uint64_t timer_period; /* Measured in CPU cycles. */
63 uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
67 struct rte_pipeline *p[THREAD_PIPELINES_MAX];
70 struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
71 struct rte_ring *msgq_req;
72 struct rte_ring *msgq_rsp;
73 uint64_t timer_period; /* Measured in CPU cycles. */
75 uint64_t time_next_min;
76 } __rte_cache_aligned;
78 static struct thread_data thread_data[RTE_MAX_LCORE];
81 * Master thread: data plane thread init
88 for (i = 0; i < RTE_MAX_LCORE; i++) {
89 struct thread *t = &thread[i];
91 if (!rte_lcore_is_enabled(i))
96 rte_ring_free(t->msgq_req);
99 rte_ring_free(t->msgq_rsp);
108 RTE_LCORE_FOREACH_SLAVE(i) {
110 struct rte_ring *msgq_req, *msgq_rsp;
111 struct thread *t = &thread[i];
112 struct thread_data *t_data = &thread_data[i];
113 uint32_t cpu_id = rte_lcore_to_socket_id(i);
116 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-REQ", i);
118 msgq_req = rte_ring_create(name,
121 RING_F_SP_ENQ | RING_F_SC_DEQ);
123 if (msgq_req == NULL) {
128 snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-RSP", i);
130 msgq_rsp = rte_ring_create(name,
133 RING_F_SP_ENQ | RING_F_SC_DEQ);
135 if (msgq_rsp == NULL) {
140 /* Master thread records */
141 t->msgq_req = msgq_req;
142 t->msgq_rsp = msgq_rsp;
145 /* Data plane thread records */
146 t_data->n_pipelines = 0;
147 t_data->msgq_req = msgq_req;
148 t_data->msgq_rsp = msgq_rsp;
149 t_data->timer_period =
150 (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
151 t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
152 t_data->time_next_min = t_data->time_next;
159 thread_is_running(uint32_t thread_id)
161 enum rte_lcore_state_t thread_state;
163 thread_state = rte_eal_get_lcore_state(thread_id);
164 return (thread_state == RUNNING) ? 1 : 0;
168 * Pipeline is running when:
169 * (A) Pipeline is mapped to a data plane thread AND
170 * (B) Its data plane thread is in RUNNING state.
173 pipeline_is_running(struct pipeline *p)
178 return thread_is_running(p->thread_id);
182 * Master thread & data plane threads: message passing
184 enum thread_req_type {
185 THREAD_REQ_PIPELINE_ENABLE = 0,
186 THREAD_REQ_PIPELINE_DISABLE,
190 struct thread_msg_req {
191 enum thread_req_type type;
195 struct rte_pipeline *p;
197 struct rte_table_action *a;
198 } table[RTE_PIPELINE_TABLE_MAX];
199 struct rte_ring *msgq_req;
200 struct rte_ring *msgq_rsp;
201 uint32_t timer_period_ms;
206 struct rte_pipeline *p;
211 struct thread_msg_rsp {
218 static struct thread_msg_req *
219 thread_msg_alloc(void)
221 size_t size = RTE_MAX(sizeof(struct thread_msg_req),
222 sizeof(struct thread_msg_rsp));
224 return calloc(1, size);
228 thread_msg_free(struct thread_msg_rsp *rsp)
233 static struct thread_msg_rsp *
234 thread_msg_send_recv(uint32_t thread_id,
235 struct thread_msg_req *req)
237 struct thread *t = &thread[thread_id];
238 struct rte_ring *msgq_req = t->msgq_req;
239 struct rte_ring *msgq_rsp = t->msgq_rsp;
240 struct thread_msg_rsp *rsp;
245 status = rte_ring_sp_enqueue(msgq_req, req);
246 } while (status == -ENOBUFS);
250 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
251 } while (status != 0);
257 thread_pipeline_enable(uint32_t thread_id,
258 const char *pipeline_name)
260 struct pipeline *p = pipeline_find(pipeline_name);
262 struct thread_msg_req *req;
263 struct thread_msg_rsp *rsp;
267 /* Check input params */
268 if ((thread_id >= RTE_MAX_LCORE) ||
270 (p->n_ports_in == 0) ||
271 (p->n_ports_out == 0) ||
275 t = &thread[thread_id];
276 if ((t->enabled == 0) ||
280 if (!thread_is_running(thread_id)) {
281 struct thread_data *td = &thread_data[thread_id];
282 struct pipeline_data *tdp = &td->pipeline_data[td->n_pipelines];
284 if (td->n_pipelines >= THREAD_PIPELINES_MAX)
287 /* Data plane thread */
288 td->p[td->n_pipelines] = p->p;
291 for (i = 0; i < p->n_tables; i++)
292 tdp->table_data[i].a = p->table[i].a;
294 tdp->n_tables = p->n_tables;
296 tdp->msgq_req = p->msgq_req;
297 tdp->msgq_rsp = p->msgq_rsp;
298 tdp->timer_period = (rte_get_tsc_hz() * p->timer_period_ms) / 1000;
299 tdp->time_next = rte_get_tsc_cycles() + tdp->timer_period;
304 p->thread_id = thread_id;
310 /* Allocate request */
311 req = thread_msg_alloc();
316 req->type = THREAD_REQ_PIPELINE_ENABLE;
317 req->pipeline_enable.p = p->p;
318 for (i = 0; i < p->n_tables; i++)
319 req->pipeline_enable.table[i].a =
321 req->pipeline_enable.msgq_req = p->msgq_req;
322 req->pipeline_enable.msgq_rsp = p->msgq_rsp;
323 req->pipeline_enable.timer_period_ms = p->timer_period_ms;
324 req->pipeline_enable.n_tables = p->n_tables;
326 /* Send request and wait for response */
327 rsp = thread_msg_send_recv(thread_id, req);
332 status = rsp->status;
335 thread_msg_free(rsp);
337 /* Request completion */
341 p->thread_id = thread_id;
348 thread_pipeline_disable(uint32_t thread_id,
349 const char *pipeline_name)
351 struct pipeline *p = pipeline_find(pipeline_name);
353 struct thread_msg_req *req;
354 struct thread_msg_rsp *rsp;
357 /* Check input params */
358 if ((thread_id >= RTE_MAX_LCORE) ||
362 t = &thread[thread_id];
369 if (p->thread_id != thread_id)
372 if (!thread_is_running(thread_id)) {
373 struct thread_data *td = &thread_data[thread_id];
376 for (i = 0; i < td->n_pipelines; i++) {
377 struct pipeline_data *tdp = &td->pipeline_data[i];
382 /* Data plane thread */
383 if (i < td->n_pipelines - 1) {
384 struct rte_pipeline *pipeline_last =
385 td->p[td->n_pipelines - 1];
386 struct pipeline_data *tdp_last =
387 &td->pipeline_data[td->n_pipelines - 1];
389 td->p[i] = pipeline_last;
390 memcpy(tdp, tdp_last, sizeof(*tdp));
404 /* Allocate request */
405 req = thread_msg_alloc();
410 req->type = THREAD_REQ_PIPELINE_DISABLE;
411 req->pipeline_disable.p = p->p;
413 /* Send request and wait for response */
414 rsp = thread_msg_send_recv(thread_id, req);
419 status = rsp->status;
422 thread_msg_free(rsp);
424 /* Request completion */
434 * Data plane threads: message handling
436 static inline struct thread_msg_req *
437 thread_msg_recv(struct rte_ring *msgq_req)
439 struct thread_msg_req *req;
441 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
450 thread_msg_send(struct rte_ring *msgq_rsp,
451 struct thread_msg_rsp *rsp)
456 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
457 } while (status == -ENOBUFS);
460 static struct thread_msg_rsp *
461 thread_msg_handle_pipeline_enable(struct thread_data *t,
462 struct thread_msg_req *req)
464 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
465 struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
469 if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
474 t->p[t->n_pipelines] = req->pipeline_enable.p;
476 p->p = req->pipeline_enable.p;
477 for (i = 0; i < req->pipeline_enable.n_tables; i++)
479 req->pipeline_enable.table[i].a;
481 p->n_tables = req->pipeline_enable.n_tables;
483 p->msgq_req = req->pipeline_enable.msgq_req;
484 p->msgq_rsp = req->pipeline_enable.msgq_rsp;
486 (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
487 p->time_next = rte_get_tsc_cycles() + p->timer_period;
496 static struct thread_msg_rsp *
497 thread_msg_handle_pipeline_disable(struct thread_data *t,
498 struct thread_msg_req *req)
500 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
501 uint32_t n_pipelines = t->n_pipelines;
502 struct rte_pipeline *pipeline = req->pipeline_disable.p;
506 for (i = 0; i < n_pipelines; i++) {
507 struct pipeline_data *p = &t->pipeline_data[i];
509 if (p->p != pipeline)
512 if (i < n_pipelines - 1) {
513 struct rte_pipeline *pipeline_last =
514 t->p[n_pipelines - 1];
515 struct pipeline_data *p_last =
516 &t->pipeline_data[n_pipelines - 1];
518 t->p[i] = pipeline_last;
519 memcpy(p, p_last, sizeof(*p));
528 /* should not get here */
534 thread_msg_handle(struct thread_data *t)
537 struct thread_msg_req *req;
538 struct thread_msg_rsp *rsp;
540 req = thread_msg_recv(t->msgq_req);
545 case THREAD_REQ_PIPELINE_ENABLE:
546 rsp = thread_msg_handle_pipeline_enable(t, req);
549 case THREAD_REQ_PIPELINE_DISABLE:
550 rsp = thread_msg_handle_pipeline_disable(t, req);
554 rsp = (struct thread_msg_rsp *) req;
558 thread_msg_send(t->msgq_rsp, rsp);
563 * Master thread & data plane threads: message passing
565 enum pipeline_req_type {
567 PIPELINE_REQ_PORT_IN_STATS_READ,
568 PIPELINE_REQ_PORT_IN_ENABLE,
569 PIPELINE_REQ_PORT_IN_DISABLE,
572 PIPELINE_REQ_PORT_OUT_STATS_READ,
575 PIPELINE_REQ_TABLE_STATS_READ,
576 PIPELINE_REQ_TABLE_RULE_ADD,
577 PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT,
578 PIPELINE_REQ_TABLE_RULE_ADD_BULK,
579 PIPELINE_REQ_TABLE_RULE_DELETE,
580 PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT,
581 PIPELINE_REQ_TABLE_RULE_STATS_READ,
582 PIPELINE_REQ_TABLE_MTR_PROFILE_ADD,
583 PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE,
584 PIPELINE_REQ_TABLE_RULE_MTR_READ,
585 PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE,
586 PIPELINE_REQ_TABLE_RULE_TTL_READ,
590 struct pipeline_msg_req_port_in_stats_read {
594 struct pipeline_msg_req_port_out_stats_read {
598 struct pipeline_msg_req_table_stats_read {
602 struct pipeline_msg_req_table_rule_add {
603 struct table_rule_match match;
604 struct table_rule_action action;
607 struct pipeline_msg_req_table_rule_add_default {
608 struct table_rule_action action;
611 struct pipeline_msg_req_table_rule_add_bulk {
612 struct table_rule_match *match;
613 struct table_rule_action *action;
619 struct pipeline_msg_req_table_rule_delete {
620 struct table_rule_match match;
623 struct pipeline_msg_req_table_rule_stats_read {
628 struct pipeline_msg_req_table_mtr_profile_add {
629 uint32_t meter_profile_id;
630 struct rte_table_action_meter_profile profile;
633 struct pipeline_msg_req_table_mtr_profile_delete {
634 uint32_t meter_profile_id;
637 struct pipeline_msg_req_table_rule_mtr_read {
643 struct pipeline_msg_req_table_dscp_table_update {
645 struct rte_table_action_dscp_table dscp_table;
648 struct pipeline_msg_req_table_rule_ttl_read {
653 struct pipeline_msg_req {
654 enum pipeline_req_type type;
655 uint32_t id; /* Port IN, port OUT or table ID */
659 struct pipeline_msg_req_port_in_stats_read port_in_stats_read;
660 struct pipeline_msg_req_port_out_stats_read port_out_stats_read;
661 struct pipeline_msg_req_table_stats_read table_stats_read;
662 struct pipeline_msg_req_table_rule_add table_rule_add;
663 struct pipeline_msg_req_table_rule_add_default table_rule_add_default;
664 struct pipeline_msg_req_table_rule_add_bulk table_rule_add_bulk;
665 struct pipeline_msg_req_table_rule_delete table_rule_delete;
666 struct pipeline_msg_req_table_rule_stats_read table_rule_stats_read;
667 struct pipeline_msg_req_table_mtr_profile_add table_mtr_profile_add;
668 struct pipeline_msg_req_table_mtr_profile_delete table_mtr_profile_delete;
669 struct pipeline_msg_req_table_rule_mtr_read table_rule_mtr_read;
670 struct pipeline_msg_req_table_dscp_table_update table_dscp_table_update;
671 struct pipeline_msg_req_table_rule_ttl_read table_rule_ttl_read;
675 struct pipeline_msg_rsp_port_in_stats_read {
676 struct rte_pipeline_port_in_stats stats;
679 struct pipeline_msg_rsp_port_out_stats_read {
680 struct rte_pipeline_port_out_stats stats;
683 struct pipeline_msg_rsp_table_stats_read {
684 struct rte_pipeline_table_stats stats;
687 struct pipeline_msg_rsp_table_rule_add {
691 struct pipeline_msg_rsp_table_rule_add_default {
695 struct pipeline_msg_rsp_table_rule_add_bulk {
699 struct pipeline_msg_rsp_table_rule_stats_read {
700 struct rte_table_action_stats_counters stats;
703 struct pipeline_msg_rsp_table_rule_mtr_read {
704 struct rte_table_action_mtr_counters stats;
707 struct pipeline_msg_rsp_table_rule_ttl_read {
708 struct rte_table_action_ttl_counters stats;
711 struct pipeline_msg_rsp {
716 struct pipeline_msg_rsp_port_in_stats_read port_in_stats_read;
717 struct pipeline_msg_rsp_port_out_stats_read port_out_stats_read;
718 struct pipeline_msg_rsp_table_stats_read table_stats_read;
719 struct pipeline_msg_rsp_table_rule_add table_rule_add;
720 struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default;
721 struct pipeline_msg_rsp_table_rule_add_bulk table_rule_add_bulk;
722 struct pipeline_msg_rsp_table_rule_stats_read table_rule_stats_read;
723 struct pipeline_msg_rsp_table_rule_mtr_read table_rule_mtr_read;
724 struct pipeline_msg_rsp_table_rule_ttl_read table_rule_ttl_read;
731 static struct pipeline_msg_req *
732 pipeline_msg_alloc(void)
734 size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
735 sizeof(struct pipeline_msg_rsp));
737 return calloc(1, size);
741 pipeline_msg_free(struct pipeline_msg_rsp *rsp)
746 static struct pipeline_msg_rsp *
747 pipeline_msg_send_recv(struct pipeline *p,
748 struct pipeline_msg_req *req)
750 struct rte_ring *msgq_req = p->msgq_req;
751 struct rte_ring *msgq_rsp = p->msgq_rsp;
752 struct pipeline_msg_rsp *rsp;
757 status = rte_ring_sp_enqueue(msgq_req, req);
758 } while (status == -ENOBUFS);
762 status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
763 } while (status != 0);
769 pipeline_port_in_stats_read(const char *pipeline_name,
771 struct rte_pipeline_port_in_stats *stats,
775 struct pipeline_msg_req *req;
776 struct pipeline_msg_rsp *rsp;
779 /* Check input params */
780 if ((pipeline_name == NULL) ||
784 p = pipeline_find(pipeline_name);
786 (port_id >= p->n_ports_in))
789 if (!pipeline_is_running(p)) {
790 status = rte_pipeline_port_in_stats_read(p->p,
798 /* Allocate request */
799 req = pipeline_msg_alloc();
804 req->type = PIPELINE_REQ_PORT_IN_STATS_READ;
806 req->port_in_stats_read.clear = clear;
808 /* Send request and wait for response */
809 rsp = pipeline_msg_send_recv(p, req);
814 status = rsp->status;
816 memcpy(stats, &rsp->port_in_stats_read.stats, sizeof(*stats));
819 pipeline_msg_free(rsp);
825 pipeline_port_in_enable(const char *pipeline_name,
829 struct pipeline_msg_req *req;
830 struct pipeline_msg_rsp *rsp;
833 /* Check input params */
834 if (pipeline_name == NULL)
837 p = pipeline_find(pipeline_name);
839 (port_id >= p->n_ports_in))
842 if (!pipeline_is_running(p)) {
843 status = rte_pipeline_port_in_enable(p->p, port_id);
847 /* Allocate request */
848 req = pipeline_msg_alloc();
853 req->type = PIPELINE_REQ_PORT_IN_ENABLE;
856 /* Send request and wait for response */
857 rsp = pipeline_msg_send_recv(p, req);
862 status = rsp->status;
865 pipeline_msg_free(rsp);
871 pipeline_port_in_disable(const char *pipeline_name,
875 struct pipeline_msg_req *req;
876 struct pipeline_msg_rsp *rsp;
879 /* Check input params */
880 if (pipeline_name == NULL)
883 p = pipeline_find(pipeline_name);
885 (port_id >= p->n_ports_in))
888 if (!pipeline_is_running(p)) {
889 status = rte_pipeline_port_in_disable(p->p, port_id);
893 /* Allocate request */
894 req = pipeline_msg_alloc();
899 req->type = PIPELINE_REQ_PORT_IN_DISABLE;
902 /* Send request and wait for response */
903 rsp = pipeline_msg_send_recv(p, req);
908 status = rsp->status;
911 pipeline_msg_free(rsp);
917 pipeline_port_out_stats_read(const char *pipeline_name,
919 struct rte_pipeline_port_out_stats *stats,
923 struct pipeline_msg_req *req;
924 struct pipeline_msg_rsp *rsp;
927 /* Check input params */
928 if ((pipeline_name == NULL) ||
932 p = pipeline_find(pipeline_name);
934 (port_id >= p->n_ports_out))
937 if (!pipeline_is_running(p)) {
938 status = rte_pipeline_port_out_stats_read(p->p,
946 /* Allocate request */
947 req = pipeline_msg_alloc();
952 req->type = PIPELINE_REQ_PORT_OUT_STATS_READ;
954 req->port_out_stats_read.clear = clear;
956 /* Send request and wait for response */
957 rsp = pipeline_msg_send_recv(p, req);
962 status = rsp->status;
964 memcpy(stats, &rsp->port_out_stats_read.stats, sizeof(*stats));
967 pipeline_msg_free(rsp);
973 pipeline_table_stats_read(const char *pipeline_name,
975 struct rte_pipeline_table_stats *stats,
979 struct pipeline_msg_req *req;
980 struct pipeline_msg_rsp *rsp;
983 /* Check input params */
984 if ((pipeline_name == NULL) ||
988 p = pipeline_find(pipeline_name);
990 (table_id >= p->n_tables))
993 if (!pipeline_is_running(p)) {
994 status = rte_pipeline_table_stats_read(p->p,
1002 /* Allocate request */
1003 req = pipeline_msg_alloc();
1008 req->type = PIPELINE_REQ_TABLE_STATS_READ;
1010 req->table_stats_read.clear = clear;
1012 /* Send request and wait for response */
1013 rsp = pipeline_msg_send_recv(p, req);
1018 status = rsp->status;
1020 memcpy(stats, &rsp->table_stats_read.stats, sizeof(*stats));
1023 pipeline_msg_free(rsp);
1029 match_check(struct table_rule_match *match,
1033 struct table *table;
1035 if ((match == NULL) ||
1037 (table_id >= p->n_tables))
1040 table = &p->table[table_id];
1041 if (match->match_type != table->params.match_type)
1044 switch (match->match_type) {
1047 struct table_acl_params *t = &table->params.match.acl;
1048 struct table_rule_match_acl *r = &match->match.acl;
1050 if ((r->ip_version && (t->ip_version == 0)) ||
1051 ((r->ip_version == 0) && t->ip_version))
1054 if (r->ip_version) {
1055 if ((r->sa_depth > 32) ||
1059 if ((r->sa_depth > 128) ||
1060 (r->da_depth > 128))
1074 struct table_lpm_params *t = &table->params.match.lpm;
1075 struct table_rule_match_lpm *r = &match->match.lpm;
1077 if ((r->ip_version && (t->key_size != 4)) ||
1078 ((r->ip_version == 0) && (t->key_size != 16)))
1081 if (r->ip_version) {
1100 action_check(struct table_rule_action *action,
1104 struct table_action_profile *ap;
1106 if ((action == NULL) ||
1108 (table_id >= p->n_tables))
1111 ap = p->table[table_id].ap;
1112 if (action->action_mask != ap->params.action_mask)
1115 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1116 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
1117 (action->fwd.id >= p->n_ports_out))
1120 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
1121 (action->fwd.id >= p->n_tables))
1125 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
1126 uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1;
1127 uint32_t tc_mask1 = action->mtr.tc_mask;
1129 if (tc_mask1 != tc_mask0)
1133 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
1134 uint32_t n_subports_per_port =
1135 ap->params.tm.n_subports_per_port;
1136 uint32_t n_pipes_per_subport =
1137 ap->params.tm.n_pipes_per_subport;
1138 uint32_t subport_id = action->tm.subport_id;
1139 uint32_t pipe_id = action->tm.pipe_id;
1141 if ((subport_id >= n_subports_per_port) ||
1142 (pipe_id >= n_pipes_per_subport))
1146 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
1147 uint64_t encap_mask = ap->params.encap.encap_mask;
1148 enum rte_table_action_encap_type type = action->encap.type;
1150 if ((encap_mask & (1LLU << type)) == 0)
1154 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
1155 int ip_version0 = ap->params.common.ip_version;
1156 int ip_version1 = action->nat.ip_version;
1158 if ((ip_version1 && (ip_version0 == 0)) ||
1159 ((ip_version1 == 0) && ip_version0))
1167 action_default_check(struct table_rule_action *action,
1171 if ((action == NULL) ||
1172 (action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD)) ||
1174 (table_id >= p->n_tables))
1177 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1178 if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
1179 (action->fwd.id >= p->n_ports_out))
1182 if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
1183 (action->fwd.id >= p->n_tables))
1190 union table_rule_match_low_level {
1191 struct rte_table_acl_rule_add_params acl_add;
1192 struct rte_table_acl_rule_delete_params acl_delete;
1193 struct rte_table_array_key array;
1194 uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX];
1195 struct rte_table_lpm_key lpm_ipv4;
1196 struct rte_table_lpm_ipv6_key lpm_ipv6;
1200 match_convert(struct table_rule_match *mh,
1201 union table_rule_match_low_level *ml,
1205 action_convert(struct rte_table_action *a,
1206 struct table_rule_action *action,
1207 struct rte_pipeline_table_entry *data);
1210 pipeline_table_rule_add(const char *pipeline_name,
1212 struct table_rule_match *match,
1213 struct table_rule_action *action)
1216 struct table *table;
1217 struct pipeline_msg_req *req;
1218 struct pipeline_msg_rsp *rsp;
1219 struct table_rule *rule;
1222 /* Check input params */
1223 if ((pipeline_name == NULL) ||
1228 p = pipeline_find(pipeline_name);
1230 (table_id >= p->n_tables) ||
1231 match_check(match, p, table_id) ||
1232 action_check(action, p, table_id))
1235 table = &p->table[table_id];
1237 rule = calloc(1, sizeof(struct table_rule));
1241 memcpy(&rule->match, match, sizeof(*match));
1242 memcpy(&rule->action, action, sizeof(*action));
1244 if (!pipeline_is_running(p)) {
1245 union table_rule_match_low_level match_ll;
1246 struct rte_pipeline_table_entry *data_in, *data_out;
1250 buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
1251 if (buffer == NULL) {
1256 /* Table match-action rule conversion */
1257 data_in = (struct rte_pipeline_table_entry *)buffer;
1259 status = match_convert(match, &match_ll, 1);
1266 status = action_convert(table->a, action, data_in);
1273 /* Add rule (match, action) to table */
1274 status = rte_pipeline_table_entry_add(p->p,
1286 /* Write Response */
1287 rule->data = data_out;
1288 table_rule_add(table, rule);
1294 /* Allocate request */
1295 req = pipeline_msg_alloc();
1302 req->type = PIPELINE_REQ_TABLE_RULE_ADD;
1304 memcpy(&req->table_rule_add.match, match, sizeof(*match));
1305 memcpy(&req->table_rule_add.action, action, sizeof(*action));
1307 /* Send request and wait for response */
1308 rsp = pipeline_msg_send_recv(p, req);
1315 status = rsp->status;
1317 rule->data = rsp->table_rule_add.data;
1318 table_rule_add(table, rule);
1323 pipeline_msg_free(rsp);
1329 pipeline_table_rule_add_default(const char *pipeline_name,
1331 struct table_rule_action *action,
1335 struct pipeline_msg_req *req;
1336 struct pipeline_msg_rsp *rsp;
1339 /* Check input params */
1340 if ((pipeline_name == NULL) ||
1345 p = pipeline_find(pipeline_name);
1347 (table_id >= p->n_tables) ||
1348 action_default_check(action, p, table_id))
1351 if (!pipeline_is_running(p)) {
1352 struct rte_pipeline_table_entry *data_in, *data_out;
1355 buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
1360 data_in = (struct rte_pipeline_table_entry *)buffer;
1362 data_in->action = action->fwd.action;
1363 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
1364 data_in->port_id = action->fwd.id;
1365 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
1366 data_in->table_id = action->fwd.id;
1368 /* Add default rule to table */
1369 status = rte_pipeline_table_default_entry_add(p->p,
1378 /* Write Response */
1385 /* Allocate request */
1386 req = pipeline_msg_alloc();
1391 req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT;
1393 memcpy(&req->table_rule_add_default.action, action, sizeof(*action));
1395 /* Send request and wait for response */
1396 rsp = pipeline_msg_send_recv(p, req);
1401 status = rsp->status;
1403 *data = rsp->table_rule_add_default.data;
1406 pipeline_msg_free(rsp);
1412 pipeline_table_rule_add_bulk(const char *pipeline_name,
1414 struct table_rule_match *match,
1415 struct table_rule_action *action,
1420 struct pipeline_msg_req *req;
1421 struct pipeline_msg_rsp *rsp;
1425 /* Check input params */
1426 if ((pipeline_name == NULL) ||
1430 (n_rules == NULL) ||
1434 p = pipeline_find(pipeline_name);
1436 (table_id >= p->n_tables))
1439 for (i = 0; i < *n_rules; i++)
1440 if (match_check(match, p, table_id) ||
1441 action_check(action, p, table_id))
1444 if (!pipeline_is_running(p)) {
1445 struct rte_table_action *a = p->table[table_id].a;
1446 union table_rule_match_low_level *match_ll;
1448 void **match_ll_ptr;
1449 struct rte_pipeline_table_entry **action_ll_ptr;
1450 struct rte_pipeline_table_entry **entries_ptr =
1451 (struct rte_pipeline_table_entry **)data;
1453 (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
1456 /* Memory allocation */
1457 match_ll = calloc(*n_rules, sizeof(union table_rule_match_low_level));
1458 action_ll = calloc(*n_rules, TABLE_RULE_ACTION_SIZE_MAX);
1459 match_ll_ptr = calloc(*n_rules, sizeof(void *));
1461 calloc(*n_rules, sizeof(struct rte_pipeline_table_entry *));
1462 found = calloc(*n_rules, sizeof(int));
1464 if (match_ll == NULL ||
1465 action_ll == NULL ||
1466 match_ll_ptr == NULL ||
1467 action_ll_ptr == NULL ||
1471 for (i = 0; i < *n_rules; i++) {
1472 match_ll_ptr[i] = (void *)&match_ll[i];
1474 (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
1477 /* Rule match conversion */
1478 for (i = 0; i < *n_rules; i++) {
1479 status = match_convert(&match[i], match_ll_ptr[i], 1);
1484 /* Rule action conversion */
1485 for (i = 0; i < *n_rules; i++) {
1486 status = action_convert(a, &action[i], action_ll_ptr[i]);
1491 /* Add rule (match, action) to table */
1493 status = rte_pipeline_table_entry_add_bulk(p->p,
1503 for (i = 0; i < *n_rules; i++) {
1504 status = rte_pipeline_table_entry_add(p->p,
1519 free(action_ll_ptr);
1528 free(action_ll_ptr);
1537 /* Allocate request */
1538 req = pipeline_msg_alloc();
1543 req->type = PIPELINE_REQ_TABLE_RULE_ADD_BULK;
1545 req->table_rule_add_bulk.match = match;
1546 req->table_rule_add_bulk.action = action;
1547 req->table_rule_add_bulk.data = data;
1548 req->table_rule_add_bulk.n_rules = *n_rules;
1549 req->table_rule_add_bulk.bulk =
1550 (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
1552 /* Send request and wait for response */
1553 rsp = pipeline_msg_send_recv(p, req);
1558 status = rsp->status;
1560 *n_rules = rsp->table_rule_add_bulk.n_rules;
1563 pipeline_msg_free(rsp);
1569 pipeline_table_rule_delete(const char *pipeline_name,
1571 struct table_rule_match *match)
1574 struct pipeline_msg_req *req;
1575 struct pipeline_msg_rsp *rsp;
1578 /* Check input params */
1579 if ((pipeline_name == NULL) ||
1583 p = pipeline_find(pipeline_name);
1585 (table_id >= p->n_tables) ||
1586 match_check(match, p, table_id))
1589 if (!pipeline_is_running(p)) {
1590 union table_rule_match_low_level match_ll;
1593 status = match_convert(match, &match_ll, 0);
1597 status = rte_pipeline_table_entry_delete(p->p,
1606 /* Allocate request */
1607 req = pipeline_msg_alloc();
1612 req->type = PIPELINE_REQ_TABLE_RULE_DELETE;
1614 memcpy(&req->table_rule_delete.match, match, sizeof(*match));
1616 /* Send request and wait for response */
1617 rsp = pipeline_msg_send_recv(p, req);
1622 status = rsp->status;
1625 pipeline_msg_free(rsp);
1631 pipeline_table_rule_delete_default(const char *pipeline_name,
1635 struct pipeline_msg_req *req;
1636 struct pipeline_msg_rsp *rsp;
1639 /* Check input params */
1640 if (pipeline_name == NULL)
1643 p = pipeline_find(pipeline_name);
1645 (table_id >= p->n_tables))
1648 if (!pipeline_is_running(p)) {
1649 status = rte_pipeline_table_default_entry_delete(p->p,
1656 /* Allocate request */
1657 req = pipeline_msg_alloc();
1662 req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT;
1665 /* Send request and wait for response */
1666 rsp = pipeline_msg_send_recv(p, req);
1671 status = rsp->status;
1674 pipeline_msg_free(rsp);
1680 pipeline_table_rule_stats_read(const char *pipeline_name,
1683 struct rte_table_action_stats_counters *stats,
1687 struct pipeline_msg_req *req;
1688 struct pipeline_msg_rsp *rsp;
1691 /* Check input params */
1692 if ((pipeline_name == NULL) ||
1697 p = pipeline_find(pipeline_name);
1699 (table_id >= p->n_tables))
1702 if (!pipeline_is_running(p)) {
1703 struct rte_table_action *a = p->table[table_id].a;
1705 status = rte_table_action_stats_read(a,
1713 /* Allocate request */
1714 req = pipeline_msg_alloc();
1719 req->type = PIPELINE_REQ_TABLE_RULE_STATS_READ;
1721 req->table_rule_stats_read.data = data;
1722 req->table_rule_stats_read.clear = clear;
1724 /* Send request and wait for response */
1725 rsp = pipeline_msg_send_recv(p, req);
1730 status = rsp->status;
1732 memcpy(stats, &rsp->table_rule_stats_read.stats, sizeof(*stats));
1735 pipeline_msg_free(rsp);
1741 pipeline_table_mtr_profile_add(const char *pipeline_name,
1743 uint32_t meter_profile_id,
1744 struct rte_table_action_meter_profile *profile)
1747 struct pipeline_msg_req *req;
1748 struct pipeline_msg_rsp *rsp;
1751 /* Check input params */
1752 if ((pipeline_name == NULL) ||
1756 p = pipeline_find(pipeline_name);
1758 (table_id >= p->n_tables))
1761 if (!pipeline_is_running(p)) {
1762 struct rte_table_action *a = p->table[table_id].a;
1764 status = rte_table_action_meter_profile_add(a,
1771 /* Allocate request */
1772 req = pipeline_msg_alloc();
1777 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_ADD;
1779 req->table_mtr_profile_add.meter_profile_id = meter_profile_id;
1780 memcpy(&req->table_mtr_profile_add.profile, profile, sizeof(*profile));
1782 /* Send request and wait for response */
1783 rsp = pipeline_msg_send_recv(p, req);
1788 status = rsp->status;
1791 pipeline_msg_free(rsp);
1797 pipeline_table_mtr_profile_delete(const char *pipeline_name,
1799 uint32_t meter_profile_id)
1802 struct pipeline_msg_req *req;
1803 struct pipeline_msg_rsp *rsp;
1806 /* Check input params */
1807 if (pipeline_name == NULL)
1810 p = pipeline_find(pipeline_name);
1812 (table_id >= p->n_tables))
1815 if (!pipeline_is_running(p)) {
1816 struct rte_table_action *a = p->table[table_id].a;
1818 status = rte_table_action_meter_profile_delete(a,
1824 /* Allocate request */
1825 req = pipeline_msg_alloc();
1830 req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE;
1832 req->table_mtr_profile_delete.meter_profile_id = meter_profile_id;
1834 /* Send request and wait for response */
1835 rsp = pipeline_msg_send_recv(p, req);
1840 status = rsp->status;
1843 pipeline_msg_free(rsp);
1849 pipeline_table_rule_mtr_read(const char *pipeline_name,
1853 struct rte_table_action_mtr_counters *stats,
1857 struct pipeline_msg_req *req;
1858 struct pipeline_msg_rsp *rsp;
1861 /* Check input params */
1862 if ((pipeline_name == NULL) ||
1867 p = pipeline_find(pipeline_name);
1869 (table_id >= p->n_tables))
1872 if (!pipeline_is_running(p)) {
1873 struct rte_table_action *a = p->table[table_id].a;
1875 status = rte_table_action_meter_read(a,
1884 /* Allocate request */
1885 req = pipeline_msg_alloc();
1890 req->type = PIPELINE_REQ_TABLE_RULE_MTR_READ;
1892 req->table_rule_mtr_read.data = data;
1893 req->table_rule_mtr_read.tc_mask = tc_mask;
1894 req->table_rule_mtr_read.clear = clear;
1896 /* Send request and wait for response */
1897 rsp = pipeline_msg_send_recv(p, req);
1902 status = rsp->status;
1904 memcpy(stats, &rsp->table_rule_mtr_read.stats, sizeof(*stats));
1907 pipeline_msg_free(rsp);
1913 pipeline_table_dscp_table_update(const char *pipeline_name,
1916 struct rte_table_action_dscp_table *dscp_table)
1919 struct pipeline_msg_req *req;
1920 struct pipeline_msg_rsp *rsp;
1923 /* Check input params */
1924 if ((pipeline_name == NULL) ||
1925 (dscp_table == NULL))
1928 p = pipeline_find(pipeline_name);
1930 (table_id >= p->n_tables))
1933 if (!pipeline_is_running(p)) {
1934 struct rte_table_action *a = p->table[table_id].a;
1936 status = rte_table_action_dscp_table_update(a,
1943 /* Allocate request */
1944 req = pipeline_msg_alloc();
1949 req->type = PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE;
1951 req->table_dscp_table_update.dscp_mask = dscp_mask;
1952 memcpy(&req->table_dscp_table_update.dscp_table,
1953 dscp_table, sizeof(*dscp_table));
1955 /* Send request and wait for response */
1956 rsp = pipeline_msg_send_recv(p, req);
1961 status = rsp->status;
1964 pipeline_msg_free(rsp);
1970 pipeline_table_rule_ttl_read(const char *pipeline_name,
1973 struct rte_table_action_ttl_counters *stats,
1977 struct pipeline_msg_req *req;
1978 struct pipeline_msg_rsp *rsp;
1981 /* Check input params */
1982 if ((pipeline_name == NULL) ||
1987 p = pipeline_find(pipeline_name);
1989 (table_id >= p->n_tables))
1992 if (!pipeline_is_running(p)) {
1993 struct rte_table_action *a = p->table[table_id].a;
1995 status = rte_table_action_ttl_read(a,
2003 /* Allocate request */
2004 req = pipeline_msg_alloc();
2009 req->type = PIPELINE_REQ_TABLE_RULE_TTL_READ;
2011 req->table_rule_ttl_read.data = data;
2012 req->table_rule_ttl_read.clear = clear;
2014 /* Send request and wait for response */
2015 rsp = pipeline_msg_send_recv(p, req);
2020 status = rsp->status;
2022 memcpy(stats, &rsp->table_rule_ttl_read.stats, sizeof(*stats));
2025 pipeline_msg_free(rsp);
2031 * Data plane threads: message handling
2033 static inline struct pipeline_msg_req *
2034 pipeline_msg_recv(struct rte_ring *msgq_req)
2036 struct pipeline_msg_req *req;
2038 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
2047 pipeline_msg_send(struct rte_ring *msgq_rsp,
2048 struct pipeline_msg_rsp *rsp)
2053 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
2054 } while (status == -ENOBUFS);
2057 static struct pipeline_msg_rsp *
2058 pipeline_msg_handle_port_in_stats_read(struct pipeline_data *p,
2059 struct pipeline_msg_req *req)
2061 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2062 uint32_t port_id = req->id;
2063 int clear = req->port_in_stats_read.clear;
2065 rsp->status = rte_pipeline_port_in_stats_read(p->p,
2067 &rsp->port_in_stats_read.stats,
2073 static struct pipeline_msg_rsp *
2074 pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
2075 struct pipeline_msg_req *req)
2077 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2078 uint32_t port_id = req->id;
2080 rsp->status = rte_pipeline_port_in_enable(p->p,
2086 static struct pipeline_msg_rsp *
2087 pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
2088 struct pipeline_msg_req *req)
2090 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2091 uint32_t port_id = req->id;
2093 rsp->status = rte_pipeline_port_in_disable(p->p,
2099 static struct pipeline_msg_rsp *
2100 pipeline_msg_handle_port_out_stats_read(struct pipeline_data *p,
2101 struct pipeline_msg_req *req)
2103 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2104 uint32_t port_id = req->id;
2105 int clear = req->port_out_stats_read.clear;
2107 rsp->status = rte_pipeline_port_out_stats_read(p->p,
2109 &rsp->port_out_stats_read.stats,
2115 static struct pipeline_msg_rsp *
2116 pipeline_msg_handle_table_stats_read(struct pipeline_data *p,
2117 struct pipeline_msg_req *req)
2119 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2120 uint32_t port_id = req->id;
2121 int clear = req->table_stats_read.clear;
2123 rsp->status = rte_pipeline_table_stats_read(p->p,
2125 &rsp->table_stats_read.stats,
2132 match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32)
2137 switch (depth / 32) {
2147 depth32[1] = depth - 32;
2155 depth32[2] = depth - 64;
2163 depth32[3] = depth - 96;
2179 match_convert(struct table_rule_match *mh,
2180 union table_rule_match_low_level *ml,
2183 memset(ml, 0, sizeof(*ml));
2185 switch (mh->match_type) {
2187 if (mh->match.acl.ip_version)
2189 ml->acl_add.field_value[0].value.u8 =
2190 mh->match.acl.proto;
2191 ml->acl_add.field_value[0].mask_range.u8 =
2192 mh->match.acl.proto_mask;
2194 ml->acl_add.field_value[1].value.u32 =
2195 mh->match.acl.ipv4.sa;
2196 ml->acl_add.field_value[1].mask_range.u32 =
2197 mh->match.acl.sa_depth;
2199 ml->acl_add.field_value[2].value.u32 =
2200 mh->match.acl.ipv4.da;
2201 ml->acl_add.field_value[2].mask_range.u32 =
2202 mh->match.acl.da_depth;
2204 ml->acl_add.field_value[3].value.u16 =
2206 ml->acl_add.field_value[3].mask_range.u16 =
2209 ml->acl_add.field_value[4].value.u16 =
2211 ml->acl_add.field_value[4].mask_range.u16 =
2214 ml->acl_add.priority =
2215 (int32_t) mh->match.acl.priority;
2217 ml->acl_delete.field_value[0].value.u8 =
2218 mh->match.acl.proto;
2219 ml->acl_delete.field_value[0].mask_range.u8 =
2220 mh->match.acl.proto_mask;
2222 ml->acl_delete.field_value[1].value.u32 =
2223 mh->match.acl.ipv4.sa;
2224 ml->acl_delete.field_value[1].mask_range.u32 =
2225 mh->match.acl.sa_depth;
2227 ml->acl_delete.field_value[2].value.u32 =
2228 mh->match.acl.ipv4.da;
2229 ml->acl_delete.field_value[2].mask_range.u32 =
2230 mh->match.acl.da_depth;
2232 ml->acl_delete.field_value[3].value.u16 =
2234 ml->acl_delete.field_value[3].mask_range.u16 =
2237 ml->acl_delete.field_value[4].value.u16 =
2239 ml->acl_delete.field_value[4].mask_range.u16 =
2245 (uint32_t *) mh->match.acl.ipv6.sa;
2247 (uint32_t *) mh->match.acl.ipv6.da;
2248 uint32_t sa32_depth[4], da32_depth[4];
2251 status = match_convert_ipv6_depth(
2252 mh->match.acl.sa_depth,
2257 status = match_convert_ipv6_depth(
2258 mh->match.acl.da_depth,
2263 ml->acl_add.field_value[0].value.u8 =
2264 mh->match.acl.proto;
2265 ml->acl_add.field_value[0].mask_range.u8 =
2266 mh->match.acl.proto_mask;
2268 ml->acl_add.field_value[1].value.u32 =
2269 rte_be_to_cpu_32(sa32[0]);
2270 ml->acl_add.field_value[1].mask_range.u32 =
2272 ml->acl_add.field_value[2].value.u32 =
2273 rte_be_to_cpu_32(sa32[1]);
2274 ml->acl_add.field_value[2].mask_range.u32 =
2276 ml->acl_add.field_value[3].value.u32 =
2277 rte_be_to_cpu_32(sa32[2]);
2278 ml->acl_add.field_value[3].mask_range.u32 =
2280 ml->acl_add.field_value[4].value.u32 =
2281 rte_be_to_cpu_32(sa32[3]);
2282 ml->acl_add.field_value[4].mask_range.u32 =
2285 ml->acl_add.field_value[5].value.u32 =
2286 rte_be_to_cpu_32(da32[0]);
2287 ml->acl_add.field_value[5].mask_range.u32 =
2289 ml->acl_add.field_value[6].value.u32 =
2290 rte_be_to_cpu_32(da32[1]);
2291 ml->acl_add.field_value[6].mask_range.u32 =
2293 ml->acl_add.field_value[7].value.u32 =
2294 rte_be_to_cpu_32(da32[2]);
2295 ml->acl_add.field_value[7].mask_range.u32 =
2297 ml->acl_add.field_value[8].value.u32 =
2298 rte_be_to_cpu_32(da32[3]);
2299 ml->acl_add.field_value[8].mask_range.u32 =
2302 ml->acl_add.field_value[9].value.u16 =
2304 ml->acl_add.field_value[9].mask_range.u16 =
2307 ml->acl_add.field_value[10].value.u16 =
2309 ml->acl_add.field_value[10].mask_range.u16 =
2312 ml->acl_add.priority =
2313 (int32_t) mh->match.acl.priority;
2316 (uint32_t *) mh->match.acl.ipv6.sa;
2318 (uint32_t *) mh->match.acl.ipv6.da;
2319 uint32_t sa32_depth[4], da32_depth[4];
2322 status = match_convert_ipv6_depth(
2323 mh->match.acl.sa_depth,
2328 status = match_convert_ipv6_depth(
2329 mh->match.acl.da_depth,
2334 ml->acl_delete.field_value[0].value.u8 =
2335 mh->match.acl.proto;
2336 ml->acl_delete.field_value[0].mask_range.u8 =
2337 mh->match.acl.proto_mask;
2339 ml->acl_delete.field_value[1].value.u32 =
2340 rte_be_to_cpu_32(sa32[0]);
2341 ml->acl_delete.field_value[1].mask_range.u32 =
2343 ml->acl_delete.field_value[2].value.u32 =
2344 rte_be_to_cpu_32(sa32[1]);
2345 ml->acl_delete.field_value[2].mask_range.u32 =
2347 ml->acl_delete.field_value[3].value.u32 =
2348 rte_be_to_cpu_32(sa32[2]);
2349 ml->acl_delete.field_value[3].mask_range.u32 =
2351 ml->acl_delete.field_value[4].value.u32 =
2352 rte_be_to_cpu_32(sa32[3]);
2353 ml->acl_delete.field_value[4].mask_range.u32 =
2356 ml->acl_delete.field_value[5].value.u32 =
2357 rte_be_to_cpu_32(da32[0]);
2358 ml->acl_delete.field_value[5].mask_range.u32 =
2360 ml->acl_delete.field_value[6].value.u32 =
2361 rte_be_to_cpu_32(da32[1]);
2362 ml->acl_delete.field_value[6].mask_range.u32 =
2364 ml->acl_delete.field_value[7].value.u32 =
2365 rte_be_to_cpu_32(da32[2]);
2366 ml->acl_delete.field_value[7].mask_range.u32 =
2368 ml->acl_delete.field_value[8].value.u32 =
2369 rte_be_to_cpu_32(da32[3]);
2370 ml->acl_delete.field_value[8].mask_range.u32 =
2373 ml->acl_delete.field_value[9].value.u16 =
2375 ml->acl_delete.field_value[9].mask_range.u16 =
2378 ml->acl_delete.field_value[10].value.u16 =
2380 ml->acl_delete.field_value[10].mask_range.u16 =
2386 ml->array.pos = mh->match.array.pos;
2390 memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash));
2394 if (mh->match.lpm.ip_version) {
2395 ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
2396 ml->lpm_ipv4.depth = mh->match.lpm.depth;
2398 memcpy(ml->lpm_ipv6.ip,
2399 mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
2400 ml->lpm_ipv6.depth = mh->match.lpm.depth;
2411 action_convert(struct rte_table_action *a,
2412 struct table_rule_action *action,
2413 struct rte_pipeline_table_entry *data)
2418 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
2419 status = rte_table_action_apply(a,
2421 RTE_TABLE_ACTION_FWD,
2428 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2429 status = rte_table_action_apply(a,
2431 RTE_TABLE_ACTION_LB,
2438 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2439 status = rte_table_action_apply(a,
2441 RTE_TABLE_ACTION_MTR,
2448 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2449 status = rte_table_action_apply(a,
2451 RTE_TABLE_ACTION_TM,
2458 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2459 status = rte_table_action_apply(a,
2461 RTE_TABLE_ACTION_ENCAP,
2468 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2469 status = rte_table_action_apply(a,
2471 RTE_TABLE_ACTION_NAT,
2478 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2479 status = rte_table_action_apply(a,
2481 RTE_TABLE_ACTION_TTL,
2488 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2489 status = rte_table_action_apply(a,
2491 RTE_TABLE_ACTION_STATS,
2498 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2499 status = rte_table_action_apply(a,
2501 RTE_TABLE_ACTION_TIME,
2508 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
2509 status = rte_table_action_apply(a,
2511 RTE_TABLE_ACTION_SYM_CRYPTO,
2512 &action->sym_crypto);
2518 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
2519 status = rte_table_action_apply(a,
2521 RTE_TABLE_ACTION_TAG,
2528 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
2529 status = rte_table_action_apply(a,
2531 RTE_TABLE_ACTION_DECAP,
2541 static struct pipeline_msg_rsp *
2542 pipeline_msg_handle_table_rule_add(struct pipeline_data *p,
2543 struct pipeline_msg_req *req)
2545 union table_rule_match_low_level match_ll;
2546 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2547 struct table_rule_match *match = &req->table_rule_add.match;
2548 struct table_rule_action *action = &req->table_rule_add.action;
2549 struct rte_pipeline_table_entry *data_in, *data_out;
2550 uint32_t table_id = req->id;
2551 int key_found, status;
2552 struct rte_table_action *a = p->table_data[table_id].a;
2555 memset(p->buffer, 0, sizeof(p->buffer));
2556 data_in = (struct rte_pipeline_table_entry *) p->buffer;
2558 status = match_convert(match, &match_ll, 1);
2564 status = action_convert(a, action, data_in);
2570 status = rte_pipeline_table_entry_add(p->p,
2581 /* Write response */
2583 rsp->table_rule_add.data = data_out;
2588 static struct pipeline_msg_rsp *
2589 pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p,
2590 struct pipeline_msg_req *req)
2592 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2593 struct table_rule_action *action = &req->table_rule_add_default.action;
2594 struct rte_pipeline_table_entry *data_in, *data_out;
2595 uint32_t table_id = req->id;
2599 memset(p->buffer, 0, sizeof(p->buffer));
2600 data_in = (struct rte_pipeline_table_entry *) p->buffer;
2602 data_in->action = action->fwd.action;
2603 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
2604 data_in->port_id = action->fwd.id;
2605 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
2606 data_in->table_id = action->fwd.id;
2608 /* Add default rule to table */
2609 status = rte_pipeline_table_default_entry_add(p->p,
2618 /* Write response */
2620 rsp->table_rule_add_default.data = data_out;
2625 static struct pipeline_msg_rsp *
2626 pipeline_msg_handle_table_rule_add_bulk(struct pipeline_data *p,
2627 struct pipeline_msg_req *req)
2630 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2632 uint32_t table_id = req->id;
2633 struct table_rule_match *match = req->table_rule_add_bulk.match;
2634 struct table_rule_action *action = req->table_rule_add_bulk.action;
2635 struct rte_pipeline_table_entry **data =
2636 (struct rte_pipeline_table_entry **)req->table_rule_add_bulk.data;
2637 uint32_t n_rules = req->table_rule_add_bulk.n_rules;
2638 uint32_t bulk = req->table_rule_add_bulk.bulk;
2640 struct rte_table_action *a = p->table_data[table_id].a;
2641 union table_rule_match_low_level *match_ll;
2643 void **match_ll_ptr;
2644 struct rte_pipeline_table_entry **action_ll_ptr;
2648 /* Memory allocation */
2649 match_ll = calloc(n_rules, sizeof(union table_rule_match_low_level));
2650 action_ll = calloc(n_rules, TABLE_RULE_ACTION_SIZE_MAX);
2651 match_ll_ptr = calloc(n_rules, sizeof(void *));
2653 calloc(n_rules, sizeof(struct rte_pipeline_table_entry *));
2654 found = calloc(n_rules, sizeof(int));
2656 if ((match_ll == NULL) ||
2657 (action_ll == NULL) ||
2658 (match_ll_ptr == NULL) ||
2659 (action_ll_ptr == NULL) ||
2663 for (i = 0; i < n_rules; i++) {
2664 match_ll_ptr[i] = (void *)&match_ll[i];
2666 (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
2669 /* Rule match conversion */
2670 for (i = 0; i < n_rules; i++) {
2671 status = match_convert(&match[i], match_ll_ptr[i], 1);
2676 /* Rule action conversion */
2677 for (i = 0; i < n_rules; i++) {
2678 status = action_convert(a, &action[i], action_ll_ptr[i]);
2683 /* Add rule (match, action) to table */
2685 status = rte_pipeline_table_entry_add_bulk(p->p,
2695 for (i = 0; i < n_rules; i++) {
2696 status = rte_pipeline_table_entry_add(p->p,
2708 /* Write response */
2710 rsp->table_rule_add_bulk.n_rules = n_rules;
2714 free(action_ll_ptr);
2723 free(action_ll_ptr);
2729 rsp->table_rule_add_bulk.n_rules = 0;
2733 static struct pipeline_msg_rsp *
2734 pipeline_msg_handle_table_rule_delete(struct pipeline_data *p,
2735 struct pipeline_msg_req *req)
2737 union table_rule_match_low_level match_ll;
2738 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2739 struct table_rule_match *match = &req->table_rule_delete.match;
2740 uint32_t table_id = req->id;
2741 int key_found, status;
2743 status = match_convert(match, &match_ll, 0);
2749 rsp->status = rte_pipeline_table_entry_delete(p->p,
2758 static struct pipeline_msg_rsp *
2759 pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p,
2760 struct pipeline_msg_req *req)
2762 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2763 uint32_t table_id = req->id;
2765 rsp->status = rte_pipeline_table_default_entry_delete(p->p,
2772 static struct pipeline_msg_rsp *
2773 pipeline_msg_handle_table_rule_stats_read(struct pipeline_data *p,
2774 struct pipeline_msg_req *req)
2776 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2777 uint32_t table_id = req->id;
2778 void *data = req->table_rule_stats_read.data;
2779 int clear = req->table_rule_stats_read.clear;
2780 struct rte_table_action *a = p->table_data[table_id].a;
2782 rsp->status = rte_table_action_stats_read(a,
2784 &rsp->table_rule_stats_read.stats,
2790 static struct pipeline_msg_rsp *
2791 pipeline_msg_handle_table_mtr_profile_add(struct pipeline_data *p,
2792 struct pipeline_msg_req *req)
2794 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2795 uint32_t table_id = req->id;
2796 uint32_t meter_profile_id = req->table_mtr_profile_add.meter_profile_id;
2797 struct rte_table_action_meter_profile *profile =
2798 &req->table_mtr_profile_add.profile;
2799 struct rte_table_action *a = p->table_data[table_id].a;
2801 rsp->status = rte_table_action_meter_profile_add(a,
2808 static struct pipeline_msg_rsp *
2809 pipeline_msg_handle_table_mtr_profile_delete(struct pipeline_data *p,
2810 struct pipeline_msg_req *req)
2812 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2813 uint32_t table_id = req->id;
2814 uint32_t meter_profile_id =
2815 req->table_mtr_profile_delete.meter_profile_id;
2816 struct rte_table_action *a = p->table_data[table_id].a;
2818 rsp->status = rte_table_action_meter_profile_delete(a,
2824 static struct pipeline_msg_rsp *
2825 pipeline_msg_handle_table_rule_mtr_read(struct pipeline_data *p,
2826 struct pipeline_msg_req *req)
2828 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2829 uint32_t table_id = req->id;
2830 void *data = req->table_rule_mtr_read.data;
2831 uint32_t tc_mask = req->table_rule_mtr_read.tc_mask;
2832 int clear = req->table_rule_mtr_read.clear;
2833 struct rte_table_action *a = p->table_data[table_id].a;
2835 rsp->status = rte_table_action_meter_read(a,
2838 &rsp->table_rule_mtr_read.stats,
2844 static struct pipeline_msg_rsp *
2845 pipeline_msg_handle_table_dscp_table_update(struct pipeline_data *p,
2846 struct pipeline_msg_req *req)
2848 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2849 uint32_t table_id = req->id;
2850 uint64_t dscp_mask = req->table_dscp_table_update.dscp_mask;
2851 struct rte_table_action_dscp_table *dscp_table =
2852 &req->table_dscp_table_update.dscp_table;
2853 struct rte_table_action *a = p->table_data[table_id].a;
2855 rsp->status = rte_table_action_dscp_table_update(a,
2862 static struct pipeline_msg_rsp *
2863 pipeline_msg_handle_table_rule_ttl_read(struct pipeline_data *p,
2864 struct pipeline_msg_req *req)
2866 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
2867 uint32_t table_id = req->id;
2868 void *data = req->table_rule_ttl_read.data;
2869 int clear = req->table_rule_ttl_read.clear;
2870 struct rte_table_action *a = p->table_data[table_id].a;
2872 rsp->status = rte_table_action_ttl_read(a,
2874 &rsp->table_rule_ttl_read.stats,
2881 pipeline_msg_handle(struct pipeline_data *p)
2884 struct pipeline_msg_req *req;
2885 struct pipeline_msg_rsp *rsp;
2887 req = pipeline_msg_recv(p->msgq_req);
2891 switch (req->type) {
2892 case PIPELINE_REQ_PORT_IN_STATS_READ:
2893 rsp = pipeline_msg_handle_port_in_stats_read(p, req);
2896 case PIPELINE_REQ_PORT_IN_ENABLE:
2897 rsp = pipeline_msg_handle_port_in_enable(p, req);
2900 case PIPELINE_REQ_PORT_IN_DISABLE:
2901 rsp = pipeline_msg_handle_port_in_disable(p, req);
2904 case PIPELINE_REQ_PORT_OUT_STATS_READ:
2905 rsp = pipeline_msg_handle_port_out_stats_read(p, req);
2908 case PIPELINE_REQ_TABLE_STATS_READ:
2909 rsp = pipeline_msg_handle_table_stats_read(p, req);
2912 case PIPELINE_REQ_TABLE_RULE_ADD:
2913 rsp = pipeline_msg_handle_table_rule_add(p, req);
2916 case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT:
2917 rsp = pipeline_msg_handle_table_rule_add_default(p, req);
2920 case PIPELINE_REQ_TABLE_RULE_ADD_BULK:
2921 rsp = pipeline_msg_handle_table_rule_add_bulk(p, req);
2924 case PIPELINE_REQ_TABLE_RULE_DELETE:
2925 rsp = pipeline_msg_handle_table_rule_delete(p, req);
2928 case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT:
2929 rsp = pipeline_msg_handle_table_rule_delete_default(p, req);
2932 case PIPELINE_REQ_TABLE_RULE_STATS_READ:
2933 rsp = pipeline_msg_handle_table_rule_stats_read(p, req);
2936 case PIPELINE_REQ_TABLE_MTR_PROFILE_ADD:
2937 rsp = pipeline_msg_handle_table_mtr_profile_add(p, req);
2940 case PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE:
2941 rsp = pipeline_msg_handle_table_mtr_profile_delete(p, req);
2944 case PIPELINE_REQ_TABLE_RULE_MTR_READ:
2945 rsp = pipeline_msg_handle_table_rule_mtr_read(p, req);
2948 case PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE:
2949 rsp = pipeline_msg_handle_table_dscp_table_update(p, req);
2952 case PIPELINE_REQ_TABLE_RULE_TTL_READ:
2953 rsp = pipeline_msg_handle_table_rule_ttl_read(p, req);
2957 rsp = (struct pipeline_msg_rsp *) req;
2961 pipeline_msg_send(p->msgq_rsp, rsp);
2966 * Data plane threads: main
2969 thread_main(void *arg __rte_unused)
2971 struct thread_data *t;
2972 uint32_t thread_id, i;
2974 thread_id = rte_lcore_id();
2975 t = &thread_data[thread_id];
2978 for (i = 0; ; i++) {
2982 for (j = 0; j < t->n_pipelines; j++)
2983 rte_pipeline_run(t->p[j]);
2986 if ((i & 0xF) == 0) {
2987 uint64_t time = rte_get_tsc_cycles();
2988 uint64_t time_next_min = UINT64_MAX;
2990 if (time < t->time_next_min)
2993 /* Pipeline message queues */
2994 for (j = 0; j < t->n_pipelines; j++) {
2995 struct pipeline_data *p =
2996 &t->pipeline_data[j];
2997 uint64_t time_next = p->time_next;
2999 if (time_next <= time) {
3000 pipeline_msg_handle(p);
3001 rte_pipeline_flush(p->p);
3002 time_next = time + p->timer_period;
3003 p->time_next = time_next;
3006 if (time_next < time_next_min)
3007 time_next_min = time_next;
3010 /* Thread message queues */
3012 uint64_t time_next = t->time_next;
3014 if (time_next <= time) {
3015 thread_msg_handle(t);
3016 time_next = time + t->timer_period;
3017 t->time_next = time_next;
3020 if (time_next < time_next_min)
3021 time_next_min = time_next;
3024 t->time_next_min = time_next_min;