1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
4 * This file contain the implementations of the items
5 * related methods. Each Item have a method to prepare
6 * the item and add it into items array in given index.
12 #include "items_gen.h"
15 /* Storage for additional parameters for items */
16 struct additional_para {
22 add_ether(struct rte_flow_item *items,
23 uint8_t items_counter,
24 __rte_unused struct additional_para para)
26 static struct rte_flow_item_eth eth_spec;
27 static struct rte_flow_item_eth eth_mask;
29 memset(ð_spec, 0, sizeof(struct rte_flow_item_eth));
30 memset(ð_mask, 0, sizeof(struct rte_flow_item_eth));
32 items[items_counter].type = RTE_FLOW_ITEM_TYPE_ETH;
33 items[items_counter].spec = ð_spec;
34 items[items_counter].mask = ð_mask;
38 add_vlan(struct rte_flow_item *items,
39 uint8_t items_counter,
40 __rte_unused struct additional_para para)
42 static struct rte_flow_item_vlan vlan_spec;
43 static struct rte_flow_item_vlan vlan_mask;
45 uint16_t vlan_value = VLAN_VALUE;
47 memset(&vlan_spec, 0, sizeof(struct rte_flow_item_vlan));
48 memset(&vlan_mask, 0, sizeof(struct rte_flow_item_vlan));
50 vlan_spec.tci = RTE_BE16(vlan_value);
51 vlan_mask.tci = RTE_BE16(0xffff);
53 items[items_counter].type = RTE_FLOW_ITEM_TYPE_VLAN;
54 items[items_counter].spec = &vlan_spec;
55 items[items_counter].mask = &vlan_mask;
59 add_ipv4(struct rte_flow_item *items,
60 uint8_t items_counter, struct additional_para para)
62 static struct rte_flow_item_ipv4 ipv4_specs[RTE_MAX_LCORE] __rte_cache_aligned;
63 static struct rte_flow_item_ipv4 ipv4_masks[RTE_MAX_LCORE] __rte_cache_aligned;
64 uint8_t ti = para.core_idx;
66 memset(&ipv4_specs[ti], 0, sizeof(struct rte_flow_item_ipv4));
67 memset(&ipv4_masks[ti], 0, sizeof(struct rte_flow_item_ipv4));
69 ipv4_specs[ti].hdr.src_addr = RTE_BE32(para.src_ip);
70 ipv4_masks[ti].hdr.src_addr = RTE_BE32(0xffffffff);
72 items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV4;
73 items[items_counter].spec = &ipv4_specs[ti];
74 items[items_counter].mask = &ipv4_masks[ti];
79 add_ipv6(struct rte_flow_item *items,
80 uint8_t items_counter, struct additional_para para)
82 static struct rte_flow_item_ipv6 ipv6_specs[RTE_MAX_LCORE] __rte_cache_aligned;
83 static struct rte_flow_item_ipv6 ipv6_masks[RTE_MAX_LCORE] __rte_cache_aligned;
84 uint8_t ti = para.core_idx;
86 memset(&ipv6_specs[ti], 0, sizeof(struct rte_flow_item_ipv6));
87 memset(&ipv6_masks[ti], 0, sizeof(struct rte_flow_item_ipv6));
90 memset(&ipv6_specs[ti].hdr.src_addr, para.src_ip,
91 sizeof(ipv6_specs->hdr.src_addr) / 2);
94 memset(&ipv6_masks[ti].hdr.src_addr, 0xff,
95 sizeof(ipv6_specs->hdr.src_addr));
97 items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV6;
98 items[items_counter].spec = &ipv6_specs[ti];
99 items[items_counter].mask = &ipv6_masks[ti];
103 add_tcp(struct rte_flow_item *items,
104 uint8_t items_counter,
105 __rte_unused struct additional_para para)
107 static struct rte_flow_item_tcp tcp_spec;
108 static struct rte_flow_item_tcp tcp_mask;
110 memset(&tcp_spec, 0, sizeof(struct rte_flow_item_tcp));
111 memset(&tcp_mask, 0, sizeof(struct rte_flow_item_tcp));
113 items[items_counter].type = RTE_FLOW_ITEM_TYPE_TCP;
114 items[items_counter].spec = &tcp_spec;
115 items[items_counter].mask = &tcp_mask;
119 add_udp(struct rte_flow_item *items,
120 uint8_t items_counter,
121 __rte_unused struct additional_para para)
123 static struct rte_flow_item_udp udp_spec;
124 static struct rte_flow_item_udp udp_mask;
126 memset(&udp_spec, 0, sizeof(struct rte_flow_item_udp));
127 memset(&udp_mask, 0, sizeof(struct rte_flow_item_udp));
129 items[items_counter].type = RTE_FLOW_ITEM_TYPE_UDP;
130 items[items_counter].spec = &udp_spec;
131 items[items_counter].mask = &udp_mask;
135 add_vxlan(struct rte_flow_item *items,
136 uint8_t items_counter,
137 struct additional_para para)
139 static struct rte_flow_item_vxlan vxlan_specs[RTE_MAX_LCORE] __rte_cache_aligned;
140 static struct rte_flow_item_vxlan vxlan_masks[RTE_MAX_LCORE] __rte_cache_aligned;
141 uint8_t ti = para.core_idx;
145 vni_value = VNI_VALUE;
147 memset(&vxlan_specs[ti], 0, sizeof(struct rte_flow_item_vxlan));
148 memset(&vxlan_masks[ti], 0, sizeof(struct rte_flow_item_vxlan));
150 /* Set standard vxlan vni */
151 for (i = 0; i < 3; i++) {
152 vxlan_specs[ti].vni[2 - i] = vni_value >> (i * 8);
153 vxlan_masks[ti].vni[2 - i] = 0xff;
156 /* Standard vxlan flags */
157 vxlan_specs[ti].flags = 0x8;
159 items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN;
160 items[items_counter].spec = &vxlan_specs[ti];
161 items[items_counter].mask = &vxlan_masks[ti];
165 add_vxlan_gpe(struct rte_flow_item *items,
166 uint8_t items_counter,
167 __rte_unused struct additional_para para)
169 static struct rte_flow_item_vxlan_gpe vxlan_gpe_specs[RTE_MAX_LCORE] __rte_cache_aligned;
170 static struct rte_flow_item_vxlan_gpe vxlan_gpe_masks[RTE_MAX_LCORE] __rte_cache_aligned;
171 uint8_t ti = para.core_idx;
175 vni_value = VNI_VALUE;
177 memset(&vxlan_gpe_specs[ti], 0, sizeof(struct rte_flow_item_vxlan_gpe));
178 memset(&vxlan_gpe_masks[ti], 0, sizeof(struct rte_flow_item_vxlan_gpe));
180 /* Set vxlan-gpe vni */
181 for (i = 0; i < 3; i++) {
182 vxlan_gpe_specs[ti].vni[2 - i] = vni_value >> (i * 8);
183 vxlan_gpe_masks[ti].vni[2 - i] = 0xff;
186 /* vxlan-gpe flags */
187 vxlan_gpe_specs[ti].flags = 0x0c;
189 items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE;
190 items[items_counter].spec = &vxlan_gpe_specs[ti];
191 items[items_counter].mask = &vxlan_gpe_masks[ti];
195 add_gre(struct rte_flow_item *items,
196 uint8_t items_counter,
197 __rte_unused struct additional_para para)
199 static struct rte_flow_item_gre gre_spec;
200 static struct rte_flow_item_gre gre_mask;
204 proto = RTE_ETHER_TYPE_TEB;
206 memset(&gre_spec, 0, sizeof(struct rte_flow_item_gre));
207 memset(&gre_mask, 0, sizeof(struct rte_flow_item_gre));
209 gre_spec.protocol = RTE_BE16(proto);
210 gre_mask.protocol = RTE_BE16(0xffff);
212 items[items_counter].type = RTE_FLOW_ITEM_TYPE_GRE;
213 items[items_counter].spec = &gre_spec;
214 items[items_counter].mask = &gre_mask;
218 add_geneve(struct rte_flow_item *items,
219 uint8_t items_counter,
220 __rte_unused struct additional_para para)
222 static struct rte_flow_item_geneve geneve_specs[RTE_MAX_LCORE] __rte_cache_aligned;
223 static struct rte_flow_item_geneve geneve_masks[RTE_MAX_LCORE] __rte_cache_aligned;
224 uint8_t ti = para.core_idx;
228 vni_value = VNI_VALUE;
230 memset(&geneve_specs[ti], 0, sizeof(struct rte_flow_item_geneve));
231 memset(&geneve_masks[ti], 0, sizeof(struct rte_flow_item_geneve));
233 for (i = 0; i < 3; i++) {
234 geneve_specs[ti].vni[2 - i] = vni_value >> (i * 8);
235 geneve_masks[ti].vni[2 - i] = 0xff;
238 items[items_counter].type = RTE_FLOW_ITEM_TYPE_GENEVE;
239 items[items_counter].spec = &geneve_specs[ti];
240 items[items_counter].mask = &geneve_masks[ti];
244 add_gtp(struct rte_flow_item *items,
245 uint8_t items_counter,
246 __rte_unused struct additional_para para)
248 static struct rte_flow_item_gtp gtp_spec;
249 static struct rte_flow_item_gtp gtp_mask;
253 teid_value = TEID_VALUE;
255 memset(>p_spec, 0, sizeof(struct rte_flow_item_gtp));
256 memset(>p_mask, 0, sizeof(struct rte_flow_item_gtp));
258 gtp_spec.teid = RTE_BE32(teid_value);
259 gtp_mask.teid = RTE_BE32(0xffffffff);
261 items[items_counter].type = RTE_FLOW_ITEM_TYPE_GTP;
262 items[items_counter].spec = >p_spec;
263 items[items_counter].mask = >p_mask;
267 add_meta_data(struct rte_flow_item *items,
268 uint8_t items_counter,
269 __rte_unused struct additional_para para)
271 static struct rte_flow_item_meta meta_spec;
272 static struct rte_flow_item_meta meta_mask;
278 memset(&meta_spec, 0, sizeof(struct rte_flow_item_meta));
279 memset(&meta_mask, 0, sizeof(struct rte_flow_item_meta));
281 meta_spec.data = RTE_BE32(data);
282 meta_mask.data = RTE_BE32(0xffffffff);
284 items[items_counter].type = RTE_FLOW_ITEM_TYPE_META;
285 items[items_counter].spec = &meta_spec;
286 items[items_counter].mask = &meta_mask;
291 add_meta_tag(struct rte_flow_item *items,
292 uint8_t items_counter,
293 __rte_unused struct additional_para para)
295 static struct rte_flow_item_tag tag_spec;
296 static struct rte_flow_item_tag tag_mask;
303 memset(&tag_spec, 0, sizeof(struct rte_flow_item_tag));
304 memset(&tag_mask, 0, sizeof(struct rte_flow_item_tag));
306 tag_spec.data = RTE_BE32(data);
307 tag_mask.data = RTE_BE32(0xffffffff);
308 tag_spec.index = index;
309 tag_mask.index = 0xff;
311 items[items_counter].type = RTE_FLOW_ITEM_TYPE_TAG;
312 items[items_counter].spec = &tag_spec;
313 items[items_counter].mask = &tag_mask;
317 add_icmpv4(struct rte_flow_item *items,
318 uint8_t items_counter,
319 __rte_unused struct additional_para para)
321 static struct rte_flow_item_icmp icmpv4_spec;
322 static struct rte_flow_item_icmp icmpv4_mask;
324 memset(&icmpv4_spec, 0, sizeof(struct rte_flow_item_icmp));
325 memset(&icmpv4_mask, 0, sizeof(struct rte_flow_item_icmp));
327 items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP;
328 items[items_counter].spec = &icmpv4_spec;
329 items[items_counter].mask = &icmpv4_mask;
333 add_icmpv6(struct rte_flow_item *items,
334 uint8_t items_counter,
335 __rte_unused struct additional_para para)
337 static struct rte_flow_item_icmp6 icmpv6_spec;
338 static struct rte_flow_item_icmp6 icmpv6_mask;
340 memset(&icmpv6_spec, 0, sizeof(struct rte_flow_item_icmp6));
341 memset(&icmpv6_mask, 0, sizeof(struct rte_flow_item_icmp6));
343 items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP6;
344 items[items_counter].spec = &icmpv6_spec;
345 items[items_counter].mask = &icmpv6_mask;
349 fill_items(struct rte_flow_item *items,
350 uint64_t *flow_items, uint32_t outer_ip_src,
353 uint8_t items_counter = 0;
355 struct additional_para additional_para_data = {
356 .src_ip = outer_ip_src,
357 .core_idx = core_idx,
360 /* Support outer items up to tunnel layer only. */
361 static const struct items_dict {
364 struct rte_flow_item *items,
365 uint8_t items_counter,
366 struct additional_para para
370 .mask = RTE_FLOW_ITEM_TYPE_META,
371 .funct = add_meta_data,
374 .mask = RTE_FLOW_ITEM_TYPE_TAG,
375 .funct = add_meta_tag,
378 .mask = RTE_FLOW_ITEM_TYPE_ETH,
382 .mask = RTE_FLOW_ITEM_TYPE_VLAN,
386 .mask = RTE_FLOW_ITEM_TYPE_IPV4,
390 .mask = RTE_FLOW_ITEM_TYPE_IPV6,
394 .mask = RTE_FLOW_ITEM_TYPE_TCP,
398 .mask = RTE_FLOW_ITEM_TYPE_UDP,
402 .mask = RTE_FLOW_ITEM_TYPE_VXLAN,
406 .mask = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
407 .funct = add_vxlan_gpe,
410 .mask = RTE_FLOW_ITEM_TYPE_GRE,
414 .mask = RTE_FLOW_ITEM_TYPE_GENEVE,
418 .mask = RTE_FLOW_ITEM_TYPE_GTP,
422 .mask = RTE_FLOW_ITEM_TYPE_ICMP,
426 .mask = RTE_FLOW_ITEM_TYPE_ICMP6,
431 for (j = 0; j < MAX_ITEMS_NUM; j++) {
432 if (flow_items[j] == 0)
434 for (i = 0; i < RTE_DIM(items_list); i++) {
436 FLOW_ITEM_MASK(items_list[i].mask)) == 0)
439 items, items_counter++,
446 items[items_counter].type = RTE_FLOW_ITEM_TYPE_END;