1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
4 * This file contain the implementations of the items
5 * related methods. Each Item have a method to prepare
6 * the item and add it into items array in given index.
12 #include "items_gen.h"
15 /* Storage for additional parameters for items */
16 struct additional_para {
22 add_ether(struct rte_flow_item *items,
23 uint8_t items_counter,
24 __rte_unused struct additional_para para)
26 static struct rte_flow_item_eth eth_spec;
27 static struct rte_flow_item_eth eth_mask;
29 items[items_counter].type = RTE_FLOW_ITEM_TYPE_ETH;
30 items[items_counter].spec = ð_spec;
31 items[items_counter].mask = ð_mask;
35 add_vlan(struct rte_flow_item *items,
36 uint8_t items_counter,
37 __rte_unused struct additional_para para)
39 static struct rte_flow_item_vlan vlan_spec = {
40 .tci = RTE_BE16(VLAN_VALUE),
42 static struct rte_flow_item_vlan vlan_mask = {
43 .tci = RTE_BE16(0xffff),
46 items[items_counter].type = RTE_FLOW_ITEM_TYPE_VLAN;
47 items[items_counter].spec = &vlan_spec;
48 items[items_counter].mask = &vlan_mask;
52 add_ipv4(struct rte_flow_item *items,
53 uint8_t items_counter, struct additional_para para)
55 static struct rte_flow_item_ipv4 ipv4_specs[RTE_MAX_LCORE] __rte_cache_aligned;
56 static struct rte_flow_item_ipv4 ipv4_masks[RTE_MAX_LCORE] __rte_cache_aligned;
57 uint8_t ti = para.core_idx;
59 ipv4_specs[ti].hdr.src_addr = RTE_BE32(para.src_ip);
60 ipv4_masks[ti].hdr.src_addr = RTE_BE32(0xffffffff);
62 items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV4;
63 items[items_counter].spec = &ipv4_specs[ti];
64 items[items_counter].mask = &ipv4_masks[ti];
69 add_ipv6(struct rte_flow_item *items,
70 uint8_t items_counter, struct additional_para para)
72 static struct rte_flow_item_ipv6 ipv6_specs[RTE_MAX_LCORE] __rte_cache_aligned;
73 static struct rte_flow_item_ipv6 ipv6_masks[RTE_MAX_LCORE] __rte_cache_aligned;
74 uint8_t ti = para.core_idx;
77 memset(&ipv6_specs[ti].hdr.src_addr, para.src_ip,
78 sizeof(ipv6_specs->hdr.src_addr) / 2);
81 memset(&ipv6_masks[ti].hdr.src_addr, 0xff,
82 sizeof(ipv6_specs->hdr.src_addr));
84 items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV6;
85 items[items_counter].spec = &ipv6_specs[ti];
86 items[items_counter].mask = &ipv6_masks[ti];
90 add_tcp(struct rte_flow_item *items,
91 uint8_t items_counter,
92 __rte_unused struct additional_para para)
94 static struct rte_flow_item_tcp tcp_spec;
95 static struct rte_flow_item_tcp tcp_mask;
97 items[items_counter].type = RTE_FLOW_ITEM_TYPE_TCP;
98 items[items_counter].spec = &tcp_spec;
99 items[items_counter].mask = &tcp_mask;
103 add_udp(struct rte_flow_item *items,
104 uint8_t items_counter,
105 __rte_unused struct additional_para para)
107 static struct rte_flow_item_udp udp_spec;
108 static struct rte_flow_item_udp udp_mask;
110 items[items_counter].type = RTE_FLOW_ITEM_TYPE_UDP;
111 items[items_counter].spec = &udp_spec;
112 items[items_counter].mask = &udp_mask;
116 add_vxlan(struct rte_flow_item *items,
117 uint8_t items_counter,
118 struct additional_para para)
120 static struct rte_flow_item_vxlan vxlan_specs[RTE_MAX_LCORE] __rte_cache_aligned;
121 static struct rte_flow_item_vxlan vxlan_masks[RTE_MAX_LCORE] __rte_cache_aligned;
122 uint8_t ti = para.core_idx;
126 vni_value = VNI_VALUE;
128 /* Set standard vxlan vni */
129 for (i = 0; i < 3; i++) {
130 vxlan_specs[ti].vni[2 - i] = vni_value >> (i * 8);
131 vxlan_masks[ti].vni[2 - i] = 0xff;
134 /* Standard vxlan flags */
135 vxlan_specs[ti].flags = 0x8;
137 items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN;
138 items[items_counter].spec = &vxlan_specs[ti];
139 items[items_counter].mask = &vxlan_masks[ti];
143 add_vxlan_gpe(struct rte_flow_item *items,
144 uint8_t items_counter,
145 __rte_unused struct additional_para para)
147 static struct rte_flow_item_vxlan_gpe vxlan_gpe_specs[RTE_MAX_LCORE] __rte_cache_aligned;
148 static struct rte_flow_item_vxlan_gpe vxlan_gpe_masks[RTE_MAX_LCORE] __rte_cache_aligned;
149 uint8_t ti = para.core_idx;
153 vni_value = VNI_VALUE;
155 /* Set vxlan-gpe vni */
156 for (i = 0; i < 3; i++) {
157 vxlan_gpe_specs[ti].vni[2 - i] = vni_value >> (i * 8);
158 vxlan_gpe_masks[ti].vni[2 - i] = 0xff;
161 /* vxlan-gpe flags */
162 vxlan_gpe_specs[ti].flags = 0x0c;
164 items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE;
165 items[items_counter].spec = &vxlan_gpe_specs[ti];
166 items[items_counter].mask = &vxlan_gpe_masks[ti];
170 add_gre(struct rte_flow_item *items,
171 uint8_t items_counter,
172 __rte_unused struct additional_para para)
174 static struct rte_flow_item_gre gre_spec = {
175 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB),
177 static struct rte_flow_item_gre gre_mask = {
178 .protocol = RTE_BE16(0xffff),
181 items[items_counter].type = RTE_FLOW_ITEM_TYPE_GRE;
182 items[items_counter].spec = &gre_spec;
183 items[items_counter].mask = &gre_mask;
187 add_geneve(struct rte_flow_item *items,
188 uint8_t items_counter,
189 __rte_unused struct additional_para para)
191 static struct rte_flow_item_geneve geneve_specs[RTE_MAX_LCORE] __rte_cache_aligned;
192 static struct rte_flow_item_geneve geneve_masks[RTE_MAX_LCORE] __rte_cache_aligned;
193 uint8_t ti = para.core_idx;
197 vni_value = VNI_VALUE;
199 for (i = 0; i < 3; i++) {
200 geneve_specs[ti].vni[2 - i] = vni_value >> (i * 8);
201 geneve_masks[ti].vni[2 - i] = 0xff;
204 items[items_counter].type = RTE_FLOW_ITEM_TYPE_GENEVE;
205 items[items_counter].spec = &geneve_specs[ti];
206 items[items_counter].mask = &geneve_masks[ti];
210 add_gtp(struct rte_flow_item *items,
211 uint8_t items_counter,
212 __rte_unused struct additional_para para)
214 static struct rte_flow_item_gtp gtp_spec = {
215 .teid = RTE_BE32(TEID_VALUE),
217 static struct rte_flow_item_gtp gtp_mask = {
218 .teid = RTE_BE32(0xffffffff),
221 items[items_counter].type = RTE_FLOW_ITEM_TYPE_GTP;
222 items[items_counter].spec = >p_spec;
223 items[items_counter].mask = >p_mask;
227 add_meta_data(struct rte_flow_item *items,
228 uint8_t items_counter,
229 __rte_unused struct additional_para para)
231 static struct rte_flow_item_meta meta_spec = {
232 .data = RTE_BE32(META_DATA),
234 static struct rte_flow_item_meta meta_mask = {
235 .data = RTE_BE32(0xffffffff),
238 items[items_counter].type = RTE_FLOW_ITEM_TYPE_META;
239 items[items_counter].spec = &meta_spec;
240 items[items_counter].mask = &meta_mask;
245 add_meta_tag(struct rte_flow_item *items,
246 uint8_t items_counter,
247 __rte_unused struct additional_para para)
249 static struct rte_flow_item_tag tag_spec = {
250 .data = RTE_BE32(META_DATA),
253 static struct rte_flow_item_tag tag_mask = {
254 .data = RTE_BE32(0xffffffff),
258 items[items_counter].type = RTE_FLOW_ITEM_TYPE_TAG;
259 items[items_counter].spec = &tag_spec;
260 items[items_counter].mask = &tag_mask;
264 add_icmpv4(struct rte_flow_item *items,
265 uint8_t items_counter,
266 __rte_unused struct additional_para para)
268 static struct rte_flow_item_icmp icmpv4_spec;
269 static struct rte_flow_item_icmp icmpv4_mask;
271 items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP;
272 items[items_counter].spec = &icmpv4_spec;
273 items[items_counter].mask = &icmpv4_mask;
277 add_icmpv6(struct rte_flow_item *items,
278 uint8_t items_counter,
279 __rte_unused struct additional_para para)
281 static struct rte_flow_item_icmp6 icmpv6_spec;
282 static struct rte_flow_item_icmp6 icmpv6_mask;
284 items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP6;
285 items[items_counter].spec = &icmpv6_spec;
286 items[items_counter].mask = &icmpv6_mask;
290 fill_items(struct rte_flow_item *items,
291 uint64_t *flow_items, uint32_t outer_ip_src,
294 uint8_t items_counter = 0;
296 struct additional_para additional_para_data = {
297 .src_ip = outer_ip_src,
298 .core_idx = core_idx,
301 /* Support outer items up to tunnel layer only. */
302 static const struct items_dict {
305 struct rte_flow_item *items,
306 uint8_t items_counter,
307 struct additional_para para
311 .mask = RTE_FLOW_ITEM_TYPE_META,
312 .funct = add_meta_data,
315 .mask = RTE_FLOW_ITEM_TYPE_TAG,
316 .funct = add_meta_tag,
319 .mask = RTE_FLOW_ITEM_TYPE_ETH,
323 .mask = RTE_FLOW_ITEM_TYPE_VLAN,
327 .mask = RTE_FLOW_ITEM_TYPE_IPV4,
331 .mask = RTE_FLOW_ITEM_TYPE_IPV6,
335 .mask = RTE_FLOW_ITEM_TYPE_TCP,
339 .mask = RTE_FLOW_ITEM_TYPE_UDP,
343 .mask = RTE_FLOW_ITEM_TYPE_VXLAN,
347 .mask = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
348 .funct = add_vxlan_gpe,
351 .mask = RTE_FLOW_ITEM_TYPE_GRE,
355 .mask = RTE_FLOW_ITEM_TYPE_GENEVE,
359 .mask = RTE_FLOW_ITEM_TYPE_GTP,
363 .mask = RTE_FLOW_ITEM_TYPE_ICMP,
367 .mask = RTE_FLOW_ITEM_TYPE_ICMP6,
372 for (j = 0; j < MAX_ITEMS_NUM; j++) {
373 if (flow_items[j] == 0)
375 for (i = 0; i < RTE_DIM(items_list); i++) {
377 FLOW_ITEM_MASK(items_list[i].mask)) == 0)
380 items, items_counter++,
387 items[items_counter].type = RTE_FLOW_ITEM_TYPE_END;