bpf: allow self-xor operation
[dpdk.git] / app / test-flow-perf / items_gen.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  *
4  * This file contain the implementations of the items
5  * related methods. Each Item have a method to prepare
6  * the item and add it into items array in given index.
7  */
8
9 #include <stdint.h>
10 #include <rte_flow.h>
11
12 #include "items_gen.h"
13 #include "config.h"
14
15 /* Storage for additional parameters for items */
16 struct additional_para {
17         rte_be32_t src_ip;
18         uint8_t core_idx;
19 };
20
21 static void
22 add_ether(struct rte_flow_item *items,
23         uint8_t items_counter,
24         __rte_unused struct additional_para para)
25 {
26         static struct rte_flow_item_eth eth_spec;
27         static struct rte_flow_item_eth eth_mask;
28
29         items[items_counter].type = RTE_FLOW_ITEM_TYPE_ETH;
30         items[items_counter].spec = &eth_spec;
31         items[items_counter].mask = &eth_mask;
32 }
33
34 static void
35 add_vlan(struct rte_flow_item *items,
36         uint8_t items_counter,
37         __rte_unused struct additional_para para)
38 {
39         static struct rte_flow_item_vlan vlan_spec = {
40                 .tci = RTE_BE16(VLAN_VALUE),
41         };
42         static struct rte_flow_item_vlan vlan_mask = {
43                 .tci = RTE_BE16(0xffff),
44         };
45
46         items[items_counter].type = RTE_FLOW_ITEM_TYPE_VLAN;
47         items[items_counter].spec = &vlan_spec;
48         items[items_counter].mask = &vlan_mask;
49 }
50
51 static void
52 add_ipv4(struct rte_flow_item *items,
53         uint8_t items_counter, struct additional_para para)
54 {
55         static struct rte_flow_item_ipv4 ipv4_specs[RTE_MAX_LCORE] __rte_cache_aligned;
56         static struct rte_flow_item_ipv4 ipv4_masks[RTE_MAX_LCORE] __rte_cache_aligned;
57         uint8_t ti = para.core_idx;
58
59         ipv4_specs[ti].hdr.src_addr = RTE_BE32(para.src_ip);
60         ipv4_masks[ti].hdr.src_addr = RTE_BE32(0xffffffff);
61
62         items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV4;
63         items[items_counter].spec = &ipv4_specs[ti];
64         items[items_counter].mask = &ipv4_masks[ti];
65 }
66
67
68 static void
69 add_ipv6(struct rte_flow_item *items,
70         uint8_t items_counter, struct additional_para para)
71 {
72         static struct rte_flow_item_ipv6 ipv6_specs[RTE_MAX_LCORE] __rte_cache_aligned;
73         static struct rte_flow_item_ipv6 ipv6_masks[RTE_MAX_LCORE] __rte_cache_aligned;
74         uint8_t ti = para.core_idx;
75         uint8_t i;
76
77         /** Set ipv6 src **/
78         for (i = 0; i < 16; i++) {
79                 /* Currently src_ip is limited to 32 bit */
80                 if (i < 4)
81                         ipv6_specs[ti].hdr.src_addr[15 - i] = para.src_ip >> (i * 8);
82                 ipv6_masks[ti].hdr.src_addr[15 - i] = 0xff;
83         }
84
85         items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV6;
86         items[items_counter].spec = &ipv6_specs[ti];
87         items[items_counter].mask = &ipv6_masks[ti];
88 }
89
90 static void
91 add_tcp(struct rte_flow_item *items,
92         uint8_t items_counter,
93         __rte_unused struct additional_para para)
94 {
95         static struct rte_flow_item_tcp tcp_spec;
96         static struct rte_flow_item_tcp tcp_mask;
97
98         items[items_counter].type = RTE_FLOW_ITEM_TYPE_TCP;
99         items[items_counter].spec = &tcp_spec;
100         items[items_counter].mask = &tcp_mask;
101 }
102
103 static void
104 add_udp(struct rte_flow_item *items,
105         uint8_t items_counter,
106         __rte_unused struct additional_para para)
107 {
108         static struct rte_flow_item_udp udp_spec;
109         static struct rte_flow_item_udp udp_mask;
110
111         items[items_counter].type = RTE_FLOW_ITEM_TYPE_UDP;
112         items[items_counter].spec = &udp_spec;
113         items[items_counter].mask = &udp_mask;
114 }
115
116 static void
117 add_vxlan(struct rte_flow_item *items,
118         uint8_t items_counter,
119         struct additional_para para)
120 {
121         static struct rte_flow_item_vxlan vxlan_specs[RTE_MAX_LCORE] __rte_cache_aligned;
122         static struct rte_flow_item_vxlan vxlan_masks[RTE_MAX_LCORE] __rte_cache_aligned;
123         uint8_t ti = para.core_idx;
124         uint32_t vni_value;
125         uint8_t i;
126
127         vni_value = VNI_VALUE;
128
129         /* Set standard vxlan vni */
130         for (i = 0; i < 3; i++) {
131                 vxlan_specs[ti].vni[2 - i] = vni_value >> (i * 8);
132                 vxlan_masks[ti].vni[2 - i] = 0xff;
133         }
134
135         /* Standard vxlan flags */
136         vxlan_specs[ti].flags = 0x8;
137
138         items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN;
139         items[items_counter].spec = &vxlan_specs[ti];
140         items[items_counter].mask = &vxlan_masks[ti];
141 }
142
143 static void
144 add_vxlan_gpe(struct rte_flow_item *items,
145         uint8_t items_counter,
146         __rte_unused struct additional_para para)
147 {
148         static struct rte_flow_item_vxlan_gpe vxlan_gpe_specs[RTE_MAX_LCORE] __rte_cache_aligned;
149         static struct rte_flow_item_vxlan_gpe vxlan_gpe_masks[RTE_MAX_LCORE] __rte_cache_aligned;
150         uint8_t ti = para.core_idx;
151         uint32_t vni_value;
152         uint8_t i;
153
154         vni_value = VNI_VALUE;
155
156         /* Set vxlan-gpe vni */
157         for (i = 0; i < 3; i++) {
158                 vxlan_gpe_specs[ti].vni[2 - i] = vni_value >> (i * 8);
159                 vxlan_gpe_masks[ti].vni[2 - i] = 0xff;
160         }
161
162         /* vxlan-gpe flags */
163         vxlan_gpe_specs[ti].flags = 0x0c;
164
165         items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE;
166         items[items_counter].spec = &vxlan_gpe_specs[ti];
167         items[items_counter].mask = &vxlan_gpe_masks[ti];
168 }
169
170 static void
171 add_gre(struct rte_flow_item *items,
172         uint8_t items_counter,
173         __rte_unused struct additional_para para)
174 {
175         static struct rte_flow_item_gre gre_spec = {
176                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB),
177         };
178         static struct rte_flow_item_gre gre_mask = {
179                 .protocol = RTE_BE16(0xffff),
180         };
181
182         items[items_counter].type = RTE_FLOW_ITEM_TYPE_GRE;
183         items[items_counter].spec = &gre_spec;
184         items[items_counter].mask = &gre_mask;
185 }
186
187 static void
188 add_geneve(struct rte_flow_item *items,
189         uint8_t items_counter,
190         __rte_unused struct additional_para para)
191 {
192         static struct rte_flow_item_geneve geneve_specs[RTE_MAX_LCORE] __rte_cache_aligned;
193         static struct rte_flow_item_geneve geneve_masks[RTE_MAX_LCORE] __rte_cache_aligned;
194         uint8_t ti = para.core_idx;
195         uint32_t vni_value;
196         uint8_t i;
197
198         vni_value = VNI_VALUE;
199
200         for (i = 0; i < 3; i++) {
201                 geneve_specs[ti].vni[2 - i] = vni_value >> (i * 8);
202                 geneve_masks[ti].vni[2 - i] = 0xff;
203         }
204
205         items[items_counter].type = RTE_FLOW_ITEM_TYPE_GENEVE;
206         items[items_counter].spec = &geneve_specs[ti];
207         items[items_counter].mask = &geneve_masks[ti];
208 }
209
210 static void
211 add_gtp(struct rte_flow_item *items,
212         uint8_t items_counter,
213         __rte_unused struct additional_para para)
214 {
215         static struct rte_flow_item_gtp gtp_spec = {
216                 .teid = RTE_BE32(TEID_VALUE),
217         };
218         static struct rte_flow_item_gtp gtp_mask = {
219                 .teid = RTE_BE32(0xffffffff),
220         };
221
222         items[items_counter].type = RTE_FLOW_ITEM_TYPE_GTP;
223         items[items_counter].spec = &gtp_spec;
224         items[items_counter].mask = &gtp_mask;
225 }
226
227 static void
228 add_meta_data(struct rte_flow_item *items,
229         uint8_t items_counter,
230         __rte_unused struct additional_para para)
231 {
232         static struct rte_flow_item_meta meta_spec = {
233                 .data = RTE_BE32(META_DATA),
234         };
235         static struct rte_flow_item_meta meta_mask = {
236                 .data = RTE_BE32(0xffffffff),
237         };
238
239         items[items_counter].type = RTE_FLOW_ITEM_TYPE_META;
240         items[items_counter].spec = &meta_spec;
241         items[items_counter].mask = &meta_mask;
242 }
243
244
245 static void
246 add_meta_tag(struct rte_flow_item *items,
247         uint8_t items_counter,
248         __rte_unused struct additional_para para)
249 {
250         static struct rte_flow_item_tag tag_spec = {
251                 .data = RTE_BE32(META_DATA),
252                 .index = TAG_INDEX,
253         };
254         static struct rte_flow_item_tag tag_mask = {
255                 .data = RTE_BE32(0xffffffff),
256                 .index = 0xff,
257         };
258
259         items[items_counter].type = RTE_FLOW_ITEM_TYPE_TAG;
260         items[items_counter].spec = &tag_spec;
261         items[items_counter].mask = &tag_mask;
262 }
263
264 static void
265 add_icmpv4(struct rte_flow_item *items,
266         uint8_t items_counter,
267         __rte_unused struct additional_para para)
268 {
269         static struct rte_flow_item_icmp icmpv4_spec;
270         static struct rte_flow_item_icmp icmpv4_mask;
271
272         items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP;
273         items[items_counter].spec = &icmpv4_spec;
274         items[items_counter].mask = &icmpv4_mask;
275 }
276
277 static void
278 add_icmpv6(struct rte_flow_item *items,
279         uint8_t items_counter,
280         __rte_unused struct additional_para para)
281 {
282         static struct rte_flow_item_icmp6 icmpv6_spec;
283         static struct rte_flow_item_icmp6 icmpv6_mask;
284
285         items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP6;
286         items[items_counter].spec = &icmpv6_spec;
287         items[items_counter].mask = &icmpv6_mask;
288 }
289
290 void
291 fill_items(struct rte_flow_item *items,
292         uint64_t *flow_items, uint32_t outer_ip_src,
293         uint8_t core_idx)
294 {
295         uint8_t items_counter = 0;
296         uint8_t i, j;
297         struct additional_para additional_para_data = {
298                 .src_ip = outer_ip_src,
299                 .core_idx = core_idx,
300         };
301
302         /* Support outer items up to tunnel layer only. */
303         static const struct items_dict {
304                 uint64_t mask;
305                 void (*funct)(
306                         struct rte_flow_item *items,
307                         uint8_t items_counter,
308                         struct additional_para para
309                         );
310         } items_list[] = {
311                 {
312                         .mask = RTE_FLOW_ITEM_TYPE_META,
313                         .funct = add_meta_data,
314                 },
315                 {
316                         .mask = RTE_FLOW_ITEM_TYPE_TAG,
317                         .funct = add_meta_tag,
318                 },
319                 {
320                         .mask = RTE_FLOW_ITEM_TYPE_ETH,
321                         .funct = add_ether,
322                 },
323                 {
324                         .mask = RTE_FLOW_ITEM_TYPE_VLAN,
325                         .funct = add_vlan,
326                 },
327                 {
328                         .mask = RTE_FLOW_ITEM_TYPE_IPV4,
329                         .funct = add_ipv4,
330                 },
331                 {
332                         .mask = RTE_FLOW_ITEM_TYPE_IPV6,
333                         .funct = add_ipv6,
334                 },
335                 {
336                         .mask = RTE_FLOW_ITEM_TYPE_TCP,
337                         .funct = add_tcp,
338                 },
339                 {
340                         .mask = RTE_FLOW_ITEM_TYPE_UDP,
341                         .funct = add_udp,
342                 },
343                 {
344                         .mask = RTE_FLOW_ITEM_TYPE_VXLAN,
345                         .funct = add_vxlan,
346                 },
347                 {
348                         .mask = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
349                         .funct = add_vxlan_gpe,
350                 },
351                 {
352                         .mask = RTE_FLOW_ITEM_TYPE_GRE,
353                         .funct = add_gre,
354                 },
355                 {
356                         .mask = RTE_FLOW_ITEM_TYPE_GENEVE,
357                         .funct = add_geneve,
358                 },
359                 {
360                         .mask = RTE_FLOW_ITEM_TYPE_GTP,
361                         .funct = add_gtp,
362                 },
363                 {
364                         .mask = RTE_FLOW_ITEM_TYPE_ICMP,
365                         .funct = add_icmpv4,
366                 },
367                 {
368                         .mask = RTE_FLOW_ITEM_TYPE_ICMP6,
369                         .funct = add_icmpv6,
370                 },
371         };
372
373         for (j = 0; j < MAX_ITEMS_NUM; j++) {
374                 if (flow_items[j] == 0)
375                         break;
376                 for (i = 0; i < RTE_DIM(items_list); i++) {
377                         if ((flow_items[j] &
378                                 FLOW_ITEM_MASK(items_list[i].mask)) == 0)
379                                 continue;
380                         items_list[i].funct(
381                                 items, items_counter++,
382                                 additional_para_data
383                         );
384                         break;
385                 }
386         }
387
388         items[items_counter].type = RTE_FLOW_ITEM_TYPE_END;
389 }