1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
7 #include <rte_common.h>
12 #include "ipsec-secgw.h"
15 #define FLOW_RULES_MAX 128
17 struct flow_rule_entry {
22 struct rte_flow_item_ipv4 spec;
23 struct rte_flow_item_ipv4 mask;
26 struct rte_flow_item_ipv6 spec;
27 struct rte_flow_item_ipv6 mask;
32 struct rte_flow *flow;
33 } flow_rule_tbl[FLOW_RULES_MAX];
38 ipv4_hdr_print(struct rte_ipv4_hdr *hdr)
42 uint32_t_to_char(rte_bswap32(hdr->src_addr), &a, &b, &c, &d);
43 printf("src: %3hhu.%3hhu.%3hhu.%3hhu \t", a, b, c, d);
45 uint32_t_to_char(rte_bswap32(hdr->dst_addr), &a, &b, &c, &d);
46 printf("dst: %3hhu.%3hhu.%3hhu.%3hhu", a, b, c, d);
50 ipv4_addr_cpy(rte_be32_t *spec, rte_be32_t *mask, char *token,
51 struct parse_status *status)
56 APP_CHECK(parse_ipv4_addr(token, &ip, &depth) == 0, status,
57 "unrecognized input \"%s\", expect valid ipv4 addr", token);
58 if (status->status < 0)
64 memcpy(mask, &rte_flow_item_ipv4_mask.hdr.src_addr, sizeof(ip));
68 *mask = *mask << (32-depth);
74 ipv6_hdr_print(struct rte_ipv6_hdr *hdr)
79 printf("src: %4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx \t",
80 (uint16_t)((addr[0] << 8) | addr[1]),
81 (uint16_t)((addr[2] << 8) | addr[3]),
82 (uint16_t)((addr[4] << 8) | addr[5]),
83 (uint16_t)((addr[6] << 8) | addr[7]),
84 (uint16_t)((addr[8] << 8) | addr[9]),
85 (uint16_t)((addr[10] << 8) | addr[11]),
86 (uint16_t)((addr[12] << 8) | addr[13]),
87 (uint16_t)((addr[14] << 8) | addr[15]));
90 printf("dst: %4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx",
91 (uint16_t)((addr[0] << 8) | addr[1]),
92 (uint16_t)((addr[2] << 8) | addr[3]),
93 (uint16_t)((addr[4] << 8) | addr[5]),
94 (uint16_t)((addr[6] << 8) | addr[7]),
95 (uint16_t)((addr[8] << 8) | addr[9]),
96 (uint16_t)((addr[10] << 8) | addr[11]),
97 (uint16_t)((addr[12] << 8) | addr[13]),
98 (uint16_t)((addr[14] << 8) | addr[15]));
102 ipv6_addr_cpy(uint8_t *spec, uint8_t *mask, char *token,
103 struct parse_status *status)
108 APP_CHECK(parse_ipv6_addr(token, &ip, &depth) == 0, status,
109 "unrecognized input \"%s\", expect valid ipv6 address", token);
110 if (status->status < 0)
113 memcpy(mask, &rte_flow_item_ipv6_mask.hdr.src_addr, sizeof(ip));
114 memcpy(spec, ip.s6_addr, sizeof(struct in6_addr));
116 for (i = 0; i < depth && (i%8 <= sizeof(struct in6_addr)); i++)
117 mask[i/8] &= ~(1 << (7-i%8));
123 parse_flow_tokens(char **tokens, uint32_t n_tokens,
124 struct parse_status *status)
126 struct flow_rule_entry *rule;
129 if (nb_flow_rule >= FLOW_RULES_MAX) {
130 printf("Too many flow rules\n");
134 rule = &flow_rule_tbl[nb_flow_rule];
135 memset(rule, 0, sizeof(*rule));
137 if (strcmp(tokens[0], "ipv4") == 0) {
139 } else if (strcmp(tokens[0], "ipv6") == 0) {
142 APP_CHECK(0, status, "unrecognized input \"%s\"", tokens[0]);
146 for (ti = 1; ti < n_tokens; ti++) {
147 if (strcmp(tokens[ti], "src") == 0) {
148 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
149 if (status->status < 0)
153 if (ipv4_addr_cpy(&rule->ipv4.spec.hdr.src_addr,
154 &rule->ipv4.mask.hdr.src_addr,
158 if (ipv6_addr_cpy(rule->ipv6.spec.hdr.src_addr,
159 rule->ipv6.mask.hdr.src_addr,
164 if (strcmp(tokens[ti], "dst") == 0) {
165 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
166 if (status->status < 0)
170 if (ipv4_addr_cpy(&rule->ipv4.spec.hdr.dst_addr,
171 &rule->ipv4.mask.hdr.dst_addr,
175 if (ipv6_addr_cpy(rule->ipv6.spec.hdr.dst_addr,
176 rule->ipv6.mask.hdr.dst_addr,
182 if (strcmp(tokens[ti], "port") == 0) {
183 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
184 if (status->status < 0)
186 APP_CHECK_TOKEN_IS_NUM(tokens, ti, status);
187 if (status->status < 0)
190 rule->port = atoi(tokens[ti]);
192 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
193 if (status->status < 0)
195 APP_CHECK_TOKEN_IS_NUM(tokens, ti, status);
196 if (status->status < 0)
199 rule->queue = atoi(tokens[ti]);
206 #define MAX_RTE_FLOW_PATTERN (3)
207 #define MAX_RTE_FLOW_ACTIONS (2)
210 flow_init_single(struct flow_rule_entry *rule)
212 struct rte_flow_item pattern[MAX_RTE_FLOW_PATTERN] = {};
213 struct rte_flow_action action[MAX_RTE_FLOW_ACTIONS] = {};
214 struct rte_flow_attr attr = {};
215 struct rte_flow_error err;
221 action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
222 action[0].conf = &(struct rte_flow_action_queue) {
223 .index = rule->queue,
225 action[1].type = RTE_FLOW_ACTION_TYPE_END;
227 pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
230 pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
231 pattern[1].spec = &rule->ipv4.spec;
232 pattern[1].mask = &rule->ipv4.mask;
234 pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
235 pattern[1].spec = &rule->ipv6.spec;
236 pattern[1].mask = &rule->ipv6.mask;
239 pattern[2].type = RTE_FLOW_ITEM_TYPE_END;
241 ret = rte_flow_validate(rule->port, &attr, pattern, action, &err);
243 RTE_LOG(ERR, IPSEC, "Flow validation failed %s\n", err.message);
247 rule->flow = rte_flow_create(rule->port, &attr, pattern, action, &err);
248 if (rule->flow == NULL)
249 RTE_LOG(ERR, IPSEC, "Flow creation return %s\n", err.message);
255 struct flow_rule_entry *rule;
258 for (i = 0; i < nb_flow_rule; i++) {
259 rule = &flow_rule_tbl[i];
260 flow_init_single(rule);
263 for (i = 0; i < nb_flow_rule; i++) {
264 rule = &flow_rule_tbl[i];
266 printf("Flow #%3d: spec ipv4 ", i);
267 ipv4_hdr_print(&rule->ipv4.spec.hdr);
269 printf(" mask ipv4 ");
270 ipv4_hdr_print(&rule->ipv4.mask.hdr);
272 printf("Flow #%3d: spec ipv6 ", i);
273 ipv6_hdr_print(&rule->ipv6.spec.hdr);
275 printf(" mask ipv6 ");
276 ipv6_hdr_print(&rule->ipv6.mask.hdr);
279 printf("\tPort: %d, Queue: %d", rule->port, rule->queue);
281 if (rule->flow == NULL)
282 printf(" [UNSUPPORTED]");