1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_cycles.h>
12 #include <rte_table_acl.h>
13 #include <rte_table_array.h>
14 #include <rte_table_hash.h>
15 #include <rte_table_lpm.h>
16 #include <rte_table_lpm_ipv6.h>
17 #include "rte_eth_softnic_internals.h"
20 * Master thread: data plane thread init
23 softnic_thread_free(struct pmd_internals *softnic)
27 RTE_LCORE_FOREACH_SLAVE(i) {
28 struct softnic_thread *t = &softnic->thread[i];
32 rte_ring_free(t->msgq_req);
35 rte_ring_free(t->msgq_rsp);
40 softnic_thread_init(struct pmd_internals *softnic)
44 RTE_LCORE_FOREACH_SLAVE(i) {
45 char ring_name[NAME_MAX];
46 struct rte_ring *msgq_req, *msgq_rsp;
47 struct softnic_thread *t = &softnic->thread[i];
48 struct softnic_thread_data *t_data = &softnic->thread_data[i];
49 uint32_t cpu_id = rte_lcore_to_socket_id(i);
52 snprintf(ring_name, sizeof(ring_name), "%s-TH%u-REQ",
56 msgq_req = rte_ring_create(ring_name,
59 RING_F_SP_ENQ | RING_F_SC_DEQ);
61 if (msgq_req == NULL) {
62 softnic_thread_free(softnic);
66 snprintf(ring_name, sizeof(ring_name), "%s-TH%u-RSP",
70 msgq_rsp = rte_ring_create(ring_name,
73 RING_F_SP_ENQ | RING_F_SC_DEQ);
75 if (msgq_rsp == NULL) {
76 softnic_thread_free(softnic);
80 /* Master thread records */
81 t->msgq_req = msgq_req;
82 t->msgq_rsp = msgq_rsp;
85 /* Data plane thread records */
86 t_data->n_pipelines = 0;
87 t_data->msgq_req = msgq_req;
88 t_data->msgq_rsp = msgq_rsp;
89 t_data->timer_period =
90 (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
91 t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
92 t_data->time_next_min = t_data->time_next;
99 thread_is_running(uint32_t thread_id)
101 enum rte_lcore_state_t thread_state;
103 thread_state = rte_eal_get_lcore_state(thread_id);
104 return (thread_state == RUNNING)? 1 : 0;
108 * Pipeline is running when:
109 * (A) Pipeline is mapped to a data plane thread AND
110 * (B) Its data plane thread is in RUNNING state.
113 pipeline_is_running(struct pipeline *p)
118 return thread_is_running(p->thread_id);
122 * Master thread & data plane threads: message passing
124 enum thread_req_type {
125 THREAD_REQ_PIPELINE_ENABLE = 0,
126 THREAD_REQ_PIPELINE_DISABLE,
130 struct thread_msg_req {
131 enum thread_req_type type;
135 struct rte_pipeline *p;
137 struct rte_table_action *a;
138 } table[RTE_PIPELINE_TABLE_MAX];
139 struct rte_ring *msgq_req;
140 struct rte_ring *msgq_rsp;
141 uint32_t timer_period_ms;
146 struct rte_pipeline *p;
151 struct thread_msg_rsp {
158 static struct thread_msg_req *
159 thread_msg_alloc(void)
161 size_t size = RTE_MAX(sizeof(struct thread_msg_req),
162 sizeof(struct thread_msg_rsp));
164 return calloc(1, size);
168 thread_msg_free(struct thread_msg_rsp *rsp)
173 static struct thread_msg_rsp *
174 thread_msg_send_recv(struct pmd_internals *softnic,
176 struct thread_msg_req *req)
178 struct softnic_thread *t = &softnic->thread[thread_id];
179 struct rte_ring *msgq_req = t->msgq_req;
180 struct rte_ring *msgq_rsp = t->msgq_rsp;
181 struct thread_msg_rsp *rsp;
186 status = rte_ring_sp_enqueue(msgq_req, req);
187 } while (status == -ENOBUFS);
191 status = rte_ring_sc_dequeue(msgq_rsp, (void **)&rsp);
192 } while (status != 0);
198 softnic_thread_pipeline_enable(struct pmd_internals *softnic,
200 const char *pipeline_name)
202 struct pipeline *p = softnic_pipeline_find(softnic, pipeline_name);
203 struct softnic_thread *t;
204 struct thread_msg_req *req;
205 struct thread_msg_rsp *rsp;
209 /* Check input params */
210 if ((thread_id >= RTE_MAX_LCORE) ||
212 (p->n_ports_in == 0) ||
213 (p->n_ports_out == 0) ||
217 t = &softnic->thread[thread_id];
218 if ((t->enabled == 0) ||
222 if (!thread_is_running(thread_id)) {
223 struct softnic_thread_data *td = &softnic->thread_data[thread_id];
224 struct pipeline_data *tdp = &td->pipeline_data[td->n_pipelines];
226 if (td->n_pipelines >= THREAD_PIPELINES_MAX)
229 /* Data plane thread */
230 td->p[td->n_pipelines] = p->p;
233 for (i = 0; i < p->n_tables; i++)
234 tdp->table_data[i].a =
236 tdp->n_tables = p->n_tables;
238 tdp->msgq_req = p->msgq_req;
239 tdp->msgq_rsp = p->msgq_rsp;
240 tdp->timer_period = (rte_get_tsc_hz() * p->timer_period_ms) / 1000;
241 tdp->time_next = rte_get_tsc_cycles() + tdp->timer_period;
246 p->thread_id = thread_id;
252 /* Allocate request */
253 req = thread_msg_alloc();
258 req->type = THREAD_REQ_PIPELINE_ENABLE;
259 req->pipeline_enable.p = p->p;
260 for (i = 0; i < p->n_tables; i++)
261 req->pipeline_enable.table[i].a =
263 req->pipeline_enable.msgq_req = p->msgq_req;
264 req->pipeline_enable.msgq_rsp = p->msgq_rsp;
265 req->pipeline_enable.timer_period_ms = p->timer_period_ms;
266 req->pipeline_enable.n_tables = p->n_tables;
268 /* Send request and wait for response */
269 rsp = thread_msg_send_recv(softnic, thread_id, req);
274 status = rsp->status;
277 thread_msg_free(rsp);
279 /* Request completion */
283 p->thread_id = thread_id;
290 softnic_thread_pipeline_disable(struct pmd_internals *softnic,
292 const char *pipeline_name)
294 struct pipeline *p = softnic_pipeline_find(softnic, pipeline_name);
295 struct softnic_thread *t;
296 struct thread_msg_req *req;
297 struct thread_msg_rsp *rsp;
300 /* Check input params */
301 if ((thread_id >= RTE_MAX_LCORE) ||
305 t = &softnic->thread[thread_id];
312 if (p->thread_id != thread_id)
315 if (!thread_is_running(thread_id)) {
316 struct softnic_thread_data *td = &softnic->thread_data[thread_id];
319 for (i = 0; i < td->n_pipelines; i++) {
320 struct pipeline_data *tdp = &td->pipeline_data[i];
325 /* Data plane thread */
326 if (i < td->n_pipelines - 1) {
327 struct rte_pipeline *pipeline_last =
328 td->p[td->n_pipelines - 1];
329 struct pipeline_data *tdp_last =
330 &td->pipeline_data[td->n_pipelines - 1];
332 td->p[i] = pipeline_last;
333 memcpy(tdp, tdp_last, sizeof(*tdp));
347 /* Allocate request */
348 req = thread_msg_alloc();
353 req->type = THREAD_REQ_PIPELINE_DISABLE;
354 req->pipeline_disable.p = p->p;
356 /* Send request and wait for response */
357 rsp = thread_msg_send_recv(softnic, thread_id, req);
362 status = rsp->status;
365 thread_msg_free(rsp);
367 /* Request completion */
377 * Data plane threads: message handling
379 static inline struct thread_msg_req *
380 thread_msg_recv(struct rte_ring *msgq_req)
382 struct thread_msg_req *req;
384 int status = rte_ring_sc_dequeue(msgq_req, (void **)&req);
393 thread_msg_send(struct rte_ring *msgq_rsp,
394 struct thread_msg_rsp *rsp)
399 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
400 } while (status == -ENOBUFS);
403 static struct thread_msg_rsp *
404 thread_msg_handle_pipeline_enable(struct softnic_thread_data *t,
405 struct thread_msg_req *req)
407 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *)req;
408 struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
412 if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
417 t->p[t->n_pipelines] = req->pipeline_enable.p;
419 p->p = req->pipeline_enable.p;
420 for (i = 0; i < req->pipeline_enable.n_tables; i++)
422 req->pipeline_enable.table[i].a;
424 p->n_tables = req->pipeline_enable.n_tables;
426 p->msgq_req = req->pipeline_enable.msgq_req;
427 p->msgq_rsp = req->pipeline_enable.msgq_rsp;
429 (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
430 p->time_next = rte_get_tsc_cycles() + p->timer_period;
439 static struct thread_msg_rsp *
440 thread_msg_handle_pipeline_disable(struct softnic_thread_data *t,
441 struct thread_msg_req *req)
443 struct thread_msg_rsp *rsp = (struct thread_msg_rsp *)req;
444 uint32_t n_pipelines = t->n_pipelines;
445 struct rte_pipeline *pipeline = req->pipeline_disable.p;
449 for (i = 0; i < n_pipelines; i++) {
450 struct pipeline_data *p = &t->pipeline_data[i];
452 if (p->p != pipeline)
455 if (i < n_pipelines - 1) {
456 struct rte_pipeline *pipeline_last =
457 t->p[n_pipelines - 1];
458 struct pipeline_data *p_last =
459 &t->pipeline_data[n_pipelines - 1];
461 t->p[i] = pipeline_last;
462 memcpy(p, p_last, sizeof(*p));
471 /* should not get here */
477 thread_msg_handle(struct softnic_thread_data *t)
480 struct thread_msg_req *req;
481 struct thread_msg_rsp *rsp;
483 req = thread_msg_recv(t->msgq_req);
488 case THREAD_REQ_PIPELINE_ENABLE:
489 rsp = thread_msg_handle_pipeline_enable(t, req);
492 case THREAD_REQ_PIPELINE_DISABLE:
493 rsp = thread_msg_handle_pipeline_disable(t, req);
497 rsp = (struct thread_msg_rsp *)req;
501 thread_msg_send(t->msgq_rsp, rsp);
506 * Master thread & data plane threads: message passing
508 enum pipeline_req_type {
510 PIPELINE_REQ_PORT_IN_ENABLE,
511 PIPELINE_REQ_PORT_IN_DISABLE,
514 PIPELINE_REQ_TABLE_RULE_ADD,
515 PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT,
516 PIPELINE_REQ_TABLE_RULE_ADD_BULK,
517 PIPELINE_REQ_TABLE_RULE_DELETE,
518 PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT,
523 struct pipeline_msg_req_table_rule_add {
524 struct softnic_table_rule_match match;
525 struct softnic_table_rule_action action;
528 struct pipeline_msg_req_table_rule_add_default {
529 struct softnic_table_rule_action action;
532 struct pipeline_msg_req_table_rule_add_bulk {
533 struct softnic_table_rule_match *match;
534 struct softnic_table_rule_action *action;
540 struct pipeline_msg_req_table_rule_delete {
541 struct softnic_table_rule_match match;
544 struct pipeline_msg_req {
545 enum pipeline_req_type type;
546 uint32_t id; /* Port IN, port OUT or table ID */
550 struct pipeline_msg_req_table_rule_add table_rule_add;
551 struct pipeline_msg_req_table_rule_add_default table_rule_add_default;
552 struct pipeline_msg_req_table_rule_add_bulk table_rule_add_bulk;
553 struct pipeline_msg_req_table_rule_delete table_rule_delete;
557 struct pipeline_msg_rsp_table_rule_add {
561 struct pipeline_msg_rsp_table_rule_add_default {
565 struct pipeline_msg_rsp_table_rule_add_bulk {
569 struct pipeline_msg_rsp {
574 struct pipeline_msg_rsp_table_rule_add table_rule_add;
575 struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default;
576 struct pipeline_msg_rsp_table_rule_add_bulk table_rule_add_bulk;
583 static struct pipeline_msg_req *
584 pipeline_msg_alloc(void)
586 size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
587 sizeof(struct pipeline_msg_rsp));
589 return calloc(1, size);
593 pipeline_msg_free(struct pipeline_msg_rsp *rsp)
598 static struct pipeline_msg_rsp *
599 pipeline_msg_send_recv(struct pipeline *p,
600 struct pipeline_msg_req *req)
602 struct rte_ring *msgq_req = p->msgq_req;
603 struct rte_ring *msgq_rsp = p->msgq_rsp;
604 struct pipeline_msg_rsp *rsp;
609 status = rte_ring_sp_enqueue(msgq_req, req);
610 } while (status == -ENOBUFS);
614 status = rte_ring_sc_dequeue(msgq_rsp, (void **)&rsp);
615 } while (status != 0);
621 softnic_pipeline_port_in_enable(struct pmd_internals *softnic,
622 const char *pipeline_name,
626 struct pipeline_msg_req *req;
627 struct pipeline_msg_rsp *rsp;
630 /* Check input params */
631 if (pipeline_name == NULL)
634 p = softnic_pipeline_find(softnic, pipeline_name);
636 port_id >= p->n_ports_in)
639 if (!pipeline_is_running(p)) {
640 status = rte_pipeline_port_in_enable(p->p, port_id);
644 /* Allocate request */
645 req = pipeline_msg_alloc();
650 req->type = PIPELINE_REQ_PORT_IN_ENABLE;
653 /* Send request and wait for response */
654 rsp = pipeline_msg_send_recv(p, req);
659 status = rsp->status;
662 pipeline_msg_free(rsp);
668 softnic_pipeline_port_in_disable(struct pmd_internals *softnic,
669 const char *pipeline_name,
673 struct pipeline_msg_req *req;
674 struct pipeline_msg_rsp *rsp;
677 /* Check input params */
678 if (pipeline_name == NULL)
681 p = softnic_pipeline_find(softnic, pipeline_name);
683 port_id >= p->n_ports_in)
686 if (!pipeline_is_running(p)) {
687 status = rte_pipeline_port_in_disable(p->p, port_id);
691 /* Allocate request */
692 req = pipeline_msg_alloc();
697 req->type = PIPELINE_REQ_PORT_IN_DISABLE;
700 /* Send request and wait for response */
701 rsp = pipeline_msg_send_recv(p, req);
706 status = rsp->status;
709 pipeline_msg_free(rsp);
715 match_check(struct softnic_table_rule_match *match,
719 struct softnic_table *table;
723 table_id >= p->n_tables)
726 table = &p->table[table_id];
727 if (match->match_type != table->params.match_type)
730 switch (match->match_type) {
733 struct softnic_table_acl_params *t = &table->params.match.acl;
734 struct softnic_table_rule_match_acl *r = &match->match.acl;
736 if ((r->ip_version && (t->ip_version == 0)) ||
737 ((r->ip_version == 0) && t->ip_version))
741 if (r->sa_depth > 32 ||
745 if (r->sa_depth > 128 ||
760 struct softnic_table_lpm_params *t = &table->params.match.lpm;
761 struct softnic_table_rule_match_lpm *r = &match->match.lpm;
763 if ((r->ip_version && (t->key_size != 4)) ||
764 ((r->ip_version == 0) && (t->key_size != 16)))
786 action_check(struct softnic_table_rule_action *action,
790 struct softnic_table_action_profile *ap;
792 if (action == NULL ||
794 table_id >= p->n_tables)
797 ap = p->table[table_id].ap;
798 if (action->action_mask != ap->params.action_mask)
801 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
802 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT &&
803 action->fwd.id >= p->n_ports_out)
806 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE &&
807 action->fwd.id >= p->n_tables)
811 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
812 uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1;
813 uint32_t tc_mask1 = action->mtr.tc_mask;
815 if (tc_mask1 != tc_mask0)
819 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
820 uint32_t n_subports_per_port =
821 ap->params.tm.n_subports_per_port;
822 uint32_t n_pipes_per_subport =
823 ap->params.tm.n_pipes_per_subport;
824 uint32_t subport_id = action->tm.subport_id;
825 uint32_t pipe_id = action->tm.pipe_id;
827 if (subport_id >= n_subports_per_port ||
828 pipe_id >= n_pipes_per_subport)
832 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
833 uint64_t encap_mask = ap->params.encap.encap_mask;
834 enum rte_table_action_encap_type type = action->encap.type;
836 if ((encap_mask & (1LLU << type)) == 0)
840 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
841 int ip_version0 = ap->params.common.ip_version;
842 int ip_version1 = action->nat.ip_version;
844 if ((ip_version1 && (ip_version0 == 0)) ||
845 ((ip_version1 == 0) && ip_version0))
853 action_default_check(struct softnic_table_rule_action *action,
857 if (action == NULL ||
858 action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD) ||
860 table_id >= p->n_tables)
863 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
864 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT &&
865 action->fwd.id >= p->n_ports_out)
868 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE &&
869 action->fwd.id >= p->n_tables)
876 union table_rule_match_low_level {
877 struct rte_table_acl_rule_add_params acl_add;
878 struct rte_table_acl_rule_delete_params acl_delete;
879 struct rte_table_array_key array;
880 uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX];
881 struct rte_table_lpm_key lpm_ipv4;
882 struct rte_table_lpm_ipv6_key lpm_ipv6;
886 match_convert(struct softnic_table_rule_match *mh,
887 union table_rule_match_low_level *ml,
891 action_convert(struct rte_table_action *a,
892 struct softnic_table_rule_action *action,
893 struct rte_pipeline_table_entry *data);
896 softnic_pipeline_table_rule_add(struct pmd_internals *softnic,
897 const char *pipeline_name,
899 struct softnic_table_rule_match *match,
900 struct softnic_table_rule_action *action,
904 struct pipeline_msg_req *req;
905 struct pipeline_msg_rsp *rsp;
908 /* Check input params */
909 if (pipeline_name == NULL ||
915 p = softnic_pipeline_find(softnic, pipeline_name);
917 table_id >= p->n_tables ||
918 match_check(match, p, table_id) ||
919 action_check(action, p, table_id))
922 if (!pipeline_is_running(p)) {
923 struct rte_table_action *a = p->table[table_id].a;
924 union table_rule_match_low_level match_ll;
925 struct rte_pipeline_table_entry *data_in, *data_out;
929 buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
933 /* Table match-action rule conversion */
934 data_in = (struct rte_pipeline_table_entry *)buffer;
936 status = match_convert(match, &match_ll, 1);
942 status = action_convert(a, action, data_in);
948 /* Add rule (match, action) to table */
949 status = rte_pipeline_table_entry_add(p->p,
967 /* Allocate request */
968 req = pipeline_msg_alloc();
973 req->type = PIPELINE_REQ_TABLE_RULE_ADD;
975 memcpy(&req->table_rule_add.match, match, sizeof(*match));
976 memcpy(&req->table_rule_add.action, action, sizeof(*action));
978 /* Send request and wait for response */
979 rsp = pipeline_msg_send_recv(p, req);
984 status = rsp->status;
986 *data = rsp->table_rule_add.data;
989 pipeline_msg_free(rsp);
995 softnic_pipeline_table_rule_add_default(struct pmd_internals *softnic,
996 const char *pipeline_name,
998 struct softnic_table_rule_action *action,
1002 struct pipeline_msg_req *req;
1003 struct pipeline_msg_rsp *rsp;
1006 /* Check input params */
1007 if (pipeline_name == NULL ||
1012 p = softnic_pipeline_find(softnic, pipeline_name);
1014 table_id >= p->n_tables ||
1015 action_default_check(action, p, table_id))
1018 if (!pipeline_is_running(p)) {
1019 struct rte_pipeline_table_entry *data_in, *data_out;
1022 buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
1027 data_in = (struct rte_pipeline_table_entry *)buffer;
1029 data_in->action = action->fwd.action;
1030 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
1031 data_in->port_id = action->fwd.id;
1032 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
1033 data_in->table_id = action->fwd.id;
1035 /* Add default rule to table */
1036 status = rte_pipeline_table_default_entry_add(p->p,
1045 /* Write Response */
1052 /* Allocate request */
1053 req = pipeline_msg_alloc();
1058 req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT;
1060 memcpy(&req->table_rule_add_default.action, action, sizeof(*action));
1062 /* Send request and wait for response */
1063 rsp = pipeline_msg_send_recv(p, req);
1068 status = rsp->status;
1070 *data = rsp->table_rule_add_default.data;
1073 pipeline_msg_free(rsp);
1079 softnic_pipeline_table_rule_add_bulk(struct pmd_internals *softnic,
1080 const char *pipeline_name,
1082 struct softnic_table_rule_match *match,
1083 struct softnic_table_rule_action *action,
1088 struct pipeline_msg_req *req;
1089 struct pipeline_msg_rsp *rsp;
1093 /* Check input params */
1094 if (pipeline_name == NULL ||
1102 p = softnic_pipeline_find(softnic, pipeline_name);
1104 table_id >= p->n_tables)
1107 for (i = 0; i < *n_rules; i++)
1108 if (match_check(match, p, table_id) ||
1109 action_check(action, p, table_id))
1112 if (!pipeline_is_running(p)) {
1113 struct rte_table_action *a = p->table[table_id].a;
1114 union table_rule_match_low_level *match_ll;
1116 void **match_ll_ptr;
1117 struct rte_pipeline_table_entry **action_ll_ptr;
1118 struct rte_pipeline_table_entry **entries_ptr =
1119 (struct rte_pipeline_table_entry **)data;
1121 (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
1124 /* Memory allocation */
1125 match_ll = calloc(*n_rules, sizeof(union table_rule_match_low_level));
1126 action_ll = calloc(*n_rules, TABLE_RULE_ACTION_SIZE_MAX);
1127 match_ll_ptr = calloc(*n_rules, sizeof(void *));
1129 calloc(*n_rules, sizeof(struct rte_pipeline_table_entry *));
1130 found = calloc(*n_rules, sizeof(int));
1132 if (match_ll == NULL ||
1133 action_ll == NULL ||
1134 match_ll_ptr == NULL ||
1135 action_ll_ptr == NULL ||
1139 for (i = 0; i < *n_rules; i++) {
1140 match_ll_ptr[i] = (void *)&match_ll[i];
1142 (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
1145 /* Rule match conversion */
1146 for (i = 0; i < *n_rules; i++) {
1147 status = match_convert(&match[i], match_ll_ptr[i], 1);
1152 /* Rule action conversion */
1153 for (i = 0; i < *n_rules; i++) {
1154 status = action_convert(a, &action[i], action_ll_ptr[i]);
1159 /* Add rule (match, action) to table */
1161 status = rte_pipeline_table_entry_add_bulk(p->p,
1171 for (i = 0; i < *n_rules; i++) {
1172 status = rte_pipeline_table_entry_add(p->p,
1187 free(action_ll_ptr);
1196 free(action_ll_ptr);
1205 /* Allocate request */
1206 req = pipeline_msg_alloc();
1211 req->type = PIPELINE_REQ_TABLE_RULE_ADD_BULK;
1213 req->table_rule_add_bulk.match = match;
1214 req->table_rule_add_bulk.action = action;
1215 req->table_rule_add_bulk.data = data;
1216 req->table_rule_add_bulk.n_rules = *n_rules;
1217 req->table_rule_add_bulk.bulk =
1218 (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
1220 /* Send request and wait for response */
1221 rsp = pipeline_msg_send_recv(p, req);
1226 status = rsp->status;
1228 *n_rules = rsp->table_rule_add_bulk.n_rules;
1231 pipeline_msg_free(rsp);
1237 softnic_pipeline_table_rule_delete(struct pmd_internals *softnic,
1238 const char *pipeline_name,
1240 struct softnic_table_rule_match *match)
1243 struct pipeline_msg_req *req;
1244 struct pipeline_msg_rsp *rsp;
1247 /* Check input params */
1248 if (pipeline_name == NULL ||
1252 p = softnic_pipeline_find(softnic, pipeline_name);
1254 table_id >= p->n_tables ||
1255 match_check(match, p, table_id))
1258 if (!pipeline_is_running(p)) {
1259 union table_rule_match_low_level match_ll;
1262 status = match_convert(match, &match_ll, 0);
1266 status = rte_pipeline_table_entry_delete(p->p,
1275 /* Allocate request */
1276 req = pipeline_msg_alloc();
1281 req->type = PIPELINE_REQ_TABLE_RULE_DELETE;
1283 memcpy(&req->table_rule_delete.match, match, sizeof(*match));
1285 /* Send request and wait for response */
1286 rsp = pipeline_msg_send_recv(p, req);
1291 status = rsp->status;
1294 pipeline_msg_free(rsp);
1300 softnic_pipeline_table_rule_delete_default(struct pmd_internals *softnic,
1301 const char *pipeline_name,
1305 struct pipeline_msg_req *req;
1306 struct pipeline_msg_rsp *rsp;
1309 /* Check input params */
1310 if (pipeline_name == NULL)
1313 p = softnic_pipeline_find(softnic, pipeline_name);
1315 table_id >= p->n_tables)
1318 if (!pipeline_is_running(p)) {
1319 status = rte_pipeline_table_default_entry_delete(p->p,
1326 /* Allocate request */
1327 req = pipeline_msg_alloc();
1332 req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT;
1335 /* Send request and wait for response */
1336 rsp = pipeline_msg_send_recv(p, req);
1341 status = rsp->status;
1344 pipeline_msg_free(rsp);
1350 * Data plane threads: message handling
1352 static inline struct pipeline_msg_req *
1353 pipeline_msg_recv(struct rte_ring *msgq_req)
1355 struct pipeline_msg_req *req;
1357 int status = rte_ring_sc_dequeue(msgq_req, (void **)&req);
1366 pipeline_msg_send(struct rte_ring *msgq_rsp,
1367 struct pipeline_msg_rsp *rsp)
1372 status = rte_ring_sp_enqueue(msgq_rsp, rsp);
1373 } while (status == -ENOBUFS);
1376 static struct pipeline_msg_rsp *
1377 pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
1378 struct pipeline_msg_req *req)
1380 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
1381 uint32_t port_id = req->id;
1383 rsp->status = rte_pipeline_port_in_enable(p->p,
1389 static struct pipeline_msg_rsp *
1390 pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
1391 struct pipeline_msg_req *req)
1393 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
1394 uint32_t port_id = req->id;
1396 rsp->status = rte_pipeline_port_in_disable(p->p,
1403 match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32)
1408 switch (depth / 32) {
1418 depth32[1] = depth - 32;
1426 depth32[2] = depth - 64;
1434 depth32[3] = depth - 96;
1450 match_convert(struct softnic_table_rule_match *mh,
1451 union table_rule_match_low_level *ml,
1454 memset(ml, 0, sizeof(*ml));
1456 switch (mh->match_type) {
1458 if (mh->match.acl.ip_version)
1460 ml->acl_add.field_value[0].value.u8 =
1461 mh->match.acl.proto;
1462 ml->acl_add.field_value[0].mask_range.u8 =
1463 mh->match.acl.proto_mask;
1465 ml->acl_add.field_value[1].value.u32 =
1466 mh->match.acl.ipv4.sa;
1467 ml->acl_add.field_value[1].mask_range.u32 =
1468 mh->match.acl.sa_depth;
1470 ml->acl_add.field_value[2].value.u32 =
1471 mh->match.acl.ipv4.da;
1472 ml->acl_add.field_value[2].mask_range.u32 =
1473 mh->match.acl.da_depth;
1475 ml->acl_add.field_value[3].value.u16 =
1477 ml->acl_add.field_value[3].mask_range.u16 =
1480 ml->acl_add.field_value[4].value.u16 =
1482 ml->acl_add.field_value[4].mask_range.u16 =
1485 ml->acl_add.priority =
1486 (int32_t)mh->match.acl.priority;
1488 ml->acl_delete.field_value[0].value.u8 =
1489 mh->match.acl.proto;
1490 ml->acl_delete.field_value[0].mask_range.u8 =
1491 mh->match.acl.proto_mask;
1493 ml->acl_delete.field_value[1].value.u32 =
1494 mh->match.acl.ipv4.sa;
1495 ml->acl_delete.field_value[1].mask_range.u32 =
1496 mh->match.acl.sa_depth;
1498 ml->acl_delete.field_value[2].value.u32 =
1499 mh->match.acl.ipv4.da;
1500 ml->acl_delete.field_value[2].mask_range.u32 =
1501 mh->match.acl.da_depth;
1503 ml->acl_delete.field_value[3].value.u16 =
1505 ml->acl_delete.field_value[3].mask_range.u16 =
1508 ml->acl_delete.field_value[4].value.u16 =
1510 ml->acl_delete.field_value[4].mask_range.u16 =
1516 (uint32_t *)mh->match.acl.ipv6.sa;
1518 (uint32_t *)mh->match.acl.ipv6.da;
1519 uint32_t sa32_depth[4], da32_depth[4];
1522 status = match_convert_ipv6_depth(mh->match.acl.sa_depth,
1527 status = match_convert_ipv6_depth(
1528 mh->match.acl.da_depth,
1533 ml->acl_add.field_value[0].value.u8 =
1534 mh->match.acl.proto;
1535 ml->acl_add.field_value[0].mask_range.u8 =
1536 mh->match.acl.proto_mask;
1538 ml->acl_add.field_value[1].value.u32 = sa32[0];
1539 ml->acl_add.field_value[1].mask_range.u32 =
1541 ml->acl_add.field_value[2].value.u32 = sa32[1];
1542 ml->acl_add.field_value[2].mask_range.u32 =
1544 ml->acl_add.field_value[3].value.u32 = sa32[2];
1545 ml->acl_add.field_value[3].mask_range.u32 =
1547 ml->acl_add.field_value[4].value.u32 = sa32[3];
1548 ml->acl_add.field_value[4].mask_range.u32 =
1551 ml->acl_add.field_value[5].value.u32 = da32[0];
1552 ml->acl_add.field_value[5].mask_range.u32 =
1554 ml->acl_add.field_value[6].value.u32 = da32[1];
1555 ml->acl_add.field_value[6].mask_range.u32 =
1557 ml->acl_add.field_value[7].value.u32 = da32[2];
1558 ml->acl_add.field_value[7].mask_range.u32 =
1560 ml->acl_add.field_value[8].value.u32 = da32[3];
1561 ml->acl_add.field_value[8].mask_range.u32 =
1564 ml->acl_add.field_value[9].value.u16 =
1566 ml->acl_add.field_value[9].mask_range.u16 =
1569 ml->acl_add.field_value[10].value.u16 =
1571 ml->acl_add.field_value[10].mask_range.u16 =
1574 ml->acl_add.priority =
1575 (int32_t)mh->match.acl.priority;
1578 (uint32_t *)mh->match.acl.ipv6.sa;
1580 (uint32_t *)mh->match.acl.ipv6.da;
1581 uint32_t sa32_depth[4], da32_depth[4];
1584 status = match_convert_ipv6_depth(mh->match.acl.sa_depth,
1589 status = match_convert_ipv6_depth(mh->match.acl.da_depth,
1594 ml->acl_delete.field_value[0].value.u8 =
1595 mh->match.acl.proto;
1596 ml->acl_delete.field_value[0].mask_range.u8 =
1597 mh->match.acl.proto_mask;
1599 ml->acl_delete.field_value[1].value.u32 =
1601 ml->acl_delete.field_value[1].mask_range.u32 =
1603 ml->acl_delete.field_value[2].value.u32 =
1605 ml->acl_delete.field_value[2].mask_range.u32 =
1607 ml->acl_delete.field_value[3].value.u32 =
1609 ml->acl_delete.field_value[3].mask_range.u32 =
1611 ml->acl_delete.field_value[4].value.u32 =
1613 ml->acl_delete.field_value[4].mask_range.u32 =
1616 ml->acl_delete.field_value[5].value.u32 =
1618 ml->acl_delete.field_value[5].mask_range.u32 =
1620 ml->acl_delete.field_value[6].value.u32 =
1622 ml->acl_delete.field_value[6].mask_range.u32 =
1624 ml->acl_delete.field_value[7].value.u32 =
1626 ml->acl_delete.field_value[7].mask_range.u32 =
1628 ml->acl_delete.field_value[8].value.u32 =
1630 ml->acl_delete.field_value[8].mask_range.u32 =
1633 ml->acl_delete.field_value[9].value.u16 =
1635 ml->acl_delete.field_value[9].mask_range.u16 =
1638 ml->acl_delete.field_value[10].value.u16 =
1640 ml->acl_delete.field_value[10].mask_range.u16 =
1646 ml->array.pos = mh->match.array.pos;
1650 memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash));
1654 if (mh->match.lpm.ip_version) {
1655 ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
1656 ml->lpm_ipv4.depth = mh->match.lpm.depth;
1658 memcpy(ml->lpm_ipv6.ip,
1659 mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
1660 ml->lpm_ipv6.depth = mh->match.lpm.depth;
1671 action_convert(struct rte_table_action *a,
1672 struct softnic_table_rule_action *action,
1673 struct rte_pipeline_table_entry *data)
1678 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
1679 status = rte_table_action_apply(a,
1681 RTE_TABLE_ACTION_FWD,
1688 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
1689 status = rte_table_action_apply(a,
1691 RTE_TABLE_ACTION_LB,
1698 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
1699 status = rte_table_action_apply(a,
1701 RTE_TABLE_ACTION_MTR,
1708 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
1709 status = rte_table_action_apply(a,
1711 RTE_TABLE_ACTION_TM,
1718 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
1719 status = rte_table_action_apply(a,
1721 RTE_TABLE_ACTION_ENCAP,
1728 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
1729 status = rte_table_action_apply(a,
1731 RTE_TABLE_ACTION_NAT,
1738 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
1739 status = rte_table_action_apply(a,
1741 RTE_TABLE_ACTION_TTL,
1748 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
1749 status = rte_table_action_apply(a,
1751 RTE_TABLE_ACTION_STATS,
1758 if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
1759 status = rte_table_action_apply(a,
1761 RTE_TABLE_ACTION_TIME,
1771 static struct pipeline_msg_rsp *
1772 pipeline_msg_handle_table_rule_add(struct pipeline_data *p,
1773 struct pipeline_msg_req *req)
1775 union table_rule_match_low_level match_ll;
1776 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
1777 struct softnic_table_rule_match *match = &req->table_rule_add.match;
1778 struct softnic_table_rule_action *action = &req->table_rule_add.action;
1779 struct rte_pipeline_table_entry *data_in, *data_out;
1780 uint32_t table_id = req->id;
1781 int key_found, status;
1782 struct rte_table_action *a = p->table_data[table_id].a;
1785 memset(p->buffer, 0, sizeof(p->buffer));
1786 data_in = (struct rte_pipeline_table_entry *)p->buffer;
1788 status = match_convert(match, &match_ll, 1);
1794 status = action_convert(a, action, data_in);
1800 status = rte_pipeline_table_entry_add(p->p,
1811 /* Write response */
1813 rsp->table_rule_add.data = data_out;
1818 static struct pipeline_msg_rsp *
1819 pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p,
1820 struct pipeline_msg_req *req)
1822 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
1823 struct softnic_table_rule_action *action = &req->table_rule_add_default.action;
1824 struct rte_pipeline_table_entry *data_in, *data_out;
1825 uint32_t table_id = req->id;
1829 memset(p->buffer, 0, sizeof(p->buffer));
1830 data_in = (struct rte_pipeline_table_entry *)p->buffer;
1832 data_in->action = action->fwd.action;
1833 if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
1834 data_in->port_id = action->fwd.id;
1835 if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
1836 data_in->table_id = action->fwd.id;
1838 /* Add default rule to table */
1839 status = rte_pipeline_table_default_entry_add(p->p,
1848 /* Write response */
1850 rsp->table_rule_add_default.data = data_out;
1855 static struct pipeline_msg_rsp *
1856 pipeline_msg_handle_table_rule_add_bulk(struct pipeline_data *p,
1857 struct pipeline_msg_req *req)
1859 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
1861 uint32_t table_id = req->id;
1862 struct softnic_table_rule_match *match = req->table_rule_add_bulk.match;
1863 struct softnic_table_rule_action *action = req->table_rule_add_bulk.action;
1864 struct rte_pipeline_table_entry **data =
1865 (struct rte_pipeline_table_entry **)req->table_rule_add_bulk.data;
1866 uint32_t n_rules = req->table_rule_add_bulk.n_rules;
1867 uint32_t bulk = req->table_rule_add_bulk.bulk;
1869 struct rte_table_action *a = p->table_data[table_id].a;
1870 union table_rule_match_low_level *match_ll;
1872 void **match_ll_ptr;
1873 struct rte_pipeline_table_entry **action_ll_ptr;
1877 /* Memory allocation */
1878 match_ll = calloc(n_rules, sizeof(union table_rule_match_low_level));
1879 action_ll = calloc(n_rules, TABLE_RULE_ACTION_SIZE_MAX);
1880 match_ll_ptr = calloc(n_rules, sizeof(void *));
1882 calloc(n_rules, sizeof(struct rte_pipeline_table_entry *));
1883 found = calloc(n_rules, sizeof(int));
1885 if (match_ll == NULL ||
1886 action_ll == NULL ||
1887 match_ll_ptr == NULL ||
1888 action_ll_ptr == NULL ||
1892 for (i = 0; i < n_rules; i++) {
1893 match_ll_ptr[i] = (void *)&match_ll[i];
1895 (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
1898 /* Rule match conversion */
1899 for (i = 0; i < n_rules; i++) {
1900 status = match_convert(&match[i], match_ll_ptr[i], 1);
1905 /* Rule action conversion */
1906 for (i = 0; i < n_rules; i++) {
1907 status = action_convert(a, &action[i], action_ll_ptr[i]);
1912 /* Add rule (match, action) to table */
1914 status = rte_pipeline_table_entry_add_bulk(p->p,
1924 for (i = 0; i < n_rules; i++) {
1925 status = rte_pipeline_table_entry_add(p->p,
1938 /* Write response */
1940 rsp->table_rule_add_bulk.n_rules = n_rules;
1944 free(action_ll_ptr);
1953 free(action_ll_ptr);
1959 rsp->table_rule_add_bulk.n_rules = 0;
1963 static struct pipeline_msg_rsp *
1964 pipeline_msg_handle_table_rule_delete(struct pipeline_data *p,
1965 struct pipeline_msg_req *req)
1967 union table_rule_match_low_level match_ll;
1968 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
1969 struct softnic_table_rule_match *match = &req->table_rule_delete.match;
1970 uint32_t table_id = req->id;
1971 int key_found, status;
1973 status = match_convert(match, &match_ll, 0);
1979 rsp->status = rte_pipeline_table_entry_delete(p->p,
1988 static struct pipeline_msg_rsp *
1989 pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p,
1990 struct pipeline_msg_req *req)
1992 struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
1993 uint32_t table_id = req->id;
1995 rsp->status = rte_pipeline_table_default_entry_delete(p->p,
2003 pipeline_msg_handle(struct pipeline_data *p)
2006 struct pipeline_msg_req *req;
2007 struct pipeline_msg_rsp *rsp;
2009 req = pipeline_msg_recv(p->msgq_req);
2013 switch (req->type) {
2014 case PIPELINE_REQ_PORT_IN_ENABLE:
2015 rsp = pipeline_msg_handle_port_in_enable(p, req);
2018 case PIPELINE_REQ_PORT_IN_DISABLE:
2019 rsp = pipeline_msg_handle_port_in_disable(p, req);
2022 case PIPELINE_REQ_TABLE_RULE_ADD:
2023 rsp = pipeline_msg_handle_table_rule_add(p, req);
2026 case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT:
2027 rsp = pipeline_msg_handle_table_rule_add_default(p, req);
2030 case PIPELINE_REQ_TABLE_RULE_ADD_BULK:
2031 rsp = pipeline_msg_handle_table_rule_add_bulk(p, req);
2034 case PIPELINE_REQ_TABLE_RULE_DELETE:
2035 rsp = pipeline_msg_handle_table_rule_delete(p, req);
2038 case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT:
2039 rsp = pipeline_msg_handle_table_rule_delete_default(p, req);
2043 rsp = (struct pipeline_msg_rsp *)req;
2047 pipeline_msg_send(p->msgq_rsp, rsp);
2052 * Data plane threads: main
2055 rte_pmd_softnic_run(uint16_t port_id)
2057 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2058 struct pmd_internals *softnic;
2059 struct softnic_thread_data *t;
2060 uint32_t thread_id, j;
2062 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2063 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
2066 softnic = dev->data->dev_private;
2067 thread_id = rte_lcore_id();
2068 t = &softnic->thread_data[thread_id];
2072 for (j = 0; j < t->n_pipelines; j++)
2073 rte_pipeline_run(t->p[j]);
2076 if ((t->iter & 0xFLLU) == 0) {
2077 uint64_t time = rte_get_tsc_cycles();
2078 uint64_t time_next_min = UINT64_MAX;
2080 if (time < t->time_next_min)
2083 /* Pipeline message queues */
2084 for (j = 0; j < t->n_pipelines; j++) {
2085 struct pipeline_data *p =
2086 &t->pipeline_data[j];
2087 uint64_t time_next = p->time_next;
2089 if (time_next <= time) {
2090 pipeline_msg_handle(p);
2091 rte_pipeline_flush(p->p);
2092 time_next = time + p->timer_period;
2093 p->time_next = time_next;
2096 if (time_next < time_next_min)
2097 time_next_min = time_next;
2100 /* Thread message queues */
2102 uint64_t time_next = t->time_next;
2104 if (time_next <= time) {
2105 thread_msg_handle(t);
2106 time_next = time + t->timer_period;
2107 t->time_next = time_next;
2110 if (time_next < time_next_min)
2111 time_next_min = time_next;
2114 t->time_next_min = time_next_min;