0950023608c8f703ed9f7a67729cf9f35f19c08f
[dpdk.git] / app / test-flow-perf / items_gen.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  *
4  * This file contain the implementations of the items
5  * related methods. Each Item have a method to prepare
6  * the item and add it into items array in given index.
7  */
8
9 #include <stdint.h>
10 #include <rte_flow.h>
11
12 #include "items_gen.h"
13 #include "config.h"
14
15 /* Storage for additional parameters for items */
16 struct additional_para {
17         rte_be32_t src_ip;
18         uint8_t core_idx;
19 };
20
21 static void
22 add_ether(struct rte_flow_item *items,
23         uint8_t items_counter,
24         __rte_unused struct additional_para para)
25 {
26         static struct rte_flow_item_eth eth_spec;
27         static struct rte_flow_item_eth eth_mask;
28
29         memset(&eth_spec, 0, sizeof(struct rte_flow_item_eth));
30         memset(&eth_mask, 0, sizeof(struct rte_flow_item_eth));
31
32         items[items_counter].type = RTE_FLOW_ITEM_TYPE_ETH;
33         items[items_counter].spec = &eth_spec;
34         items[items_counter].mask = &eth_mask;
35 }
36
37 static void
38 add_vlan(struct rte_flow_item *items,
39         uint8_t items_counter,
40         __rte_unused struct additional_para para)
41 {
42         static struct rte_flow_item_vlan vlan_spec;
43         static struct rte_flow_item_vlan vlan_mask;
44
45         uint16_t vlan_value = VLAN_VALUE;
46
47         memset(&vlan_spec, 0, sizeof(struct rte_flow_item_vlan));
48         memset(&vlan_mask, 0, sizeof(struct rte_flow_item_vlan));
49
50         vlan_spec.tci = RTE_BE16(vlan_value);
51         vlan_mask.tci = RTE_BE16(0xffff);
52
53         items[items_counter].type = RTE_FLOW_ITEM_TYPE_VLAN;
54         items[items_counter].spec = &vlan_spec;
55         items[items_counter].mask = &vlan_mask;
56 }
57
58 static void
59 add_ipv4(struct rte_flow_item *items,
60         uint8_t items_counter, struct additional_para para)
61 {
62         static struct rte_flow_item_ipv4 ipv4_specs[RTE_MAX_LCORE] __rte_cache_aligned;
63         static struct rte_flow_item_ipv4 ipv4_masks[RTE_MAX_LCORE] __rte_cache_aligned;
64         uint8_t ti = para.core_idx;
65
66         memset(&ipv4_specs[ti], 0, sizeof(struct rte_flow_item_ipv4));
67         memset(&ipv4_masks[ti], 0, sizeof(struct rte_flow_item_ipv4));
68
69         ipv4_specs[ti].hdr.src_addr = RTE_BE32(para.src_ip);
70         ipv4_masks[ti].hdr.src_addr = RTE_BE32(0xffffffff);
71
72         items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV4;
73         items[items_counter].spec = &ipv4_specs[ti];
74         items[items_counter].mask = &ipv4_masks[ti];
75 }
76
77
78 static void
79 add_ipv6(struct rte_flow_item *items,
80         uint8_t items_counter, struct additional_para para)
81 {
82         static struct rte_flow_item_ipv6 ipv6_specs[RTE_MAX_LCORE] __rte_cache_aligned;
83         static struct rte_flow_item_ipv6 ipv6_masks[RTE_MAX_LCORE] __rte_cache_aligned;
84         uint8_t ti = para.core_idx;
85
86         memset(&ipv6_specs[ti], 0, sizeof(struct rte_flow_item_ipv6));
87         memset(&ipv6_masks[ti], 0, sizeof(struct rte_flow_item_ipv6));
88
89         /** Set ipv6 src **/
90         memset(&ipv6_specs[ti].hdr.src_addr, para.src_ip,
91                 sizeof(ipv6_specs->hdr.src_addr) / 2);
92
93         /** Full mask **/
94         memset(&ipv6_masks[ti].hdr.src_addr, 0xff,
95                 sizeof(ipv6_specs->hdr.src_addr));
96
97         items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV6;
98         items[items_counter].spec = &ipv6_specs[ti];
99         items[items_counter].mask = &ipv6_masks[ti];
100 }
101
102 static void
103 add_tcp(struct rte_flow_item *items,
104         uint8_t items_counter,
105         __rte_unused struct additional_para para)
106 {
107         static struct rte_flow_item_tcp tcp_spec;
108         static struct rte_flow_item_tcp tcp_mask;
109
110         memset(&tcp_spec, 0, sizeof(struct rte_flow_item_tcp));
111         memset(&tcp_mask, 0, sizeof(struct rte_flow_item_tcp));
112
113         items[items_counter].type = RTE_FLOW_ITEM_TYPE_TCP;
114         items[items_counter].spec = &tcp_spec;
115         items[items_counter].mask = &tcp_mask;
116 }
117
118 static void
119 add_udp(struct rte_flow_item *items,
120         uint8_t items_counter,
121         __rte_unused struct additional_para para)
122 {
123         static struct rte_flow_item_udp udp_spec;
124         static struct rte_flow_item_udp udp_mask;
125
126         memset(&udp_spec, 0, sizeof(struct rte_flow_item_udp));
127         memset(&udp_mask, 0, sizeof(struct rte_flow_item_udp));
128
129         items[items_counter].type = RTE_FLOW_ITEM_TYPE_UDP;
130         items[items_counter].spec = &udp_spec;
131         items[items_counter].mask = &udp_mask;
132 }
133
134 static void
135 add_vxlan(struct rte_flow_item *items,
136         uint8_t items_counter,
137         struct additional_para para)
138 {
139         static struct rte_flow_item_vxlan vxlan_specs[RTE_MAX_LCORE] __rte_cache_aligned;
140         static struct rte_flow_item_vxlan vxlan_masks[RTE_MAX_LCORE] __rte_cache_aligned;
141         uint8_t ti = para.core_idx;
142         uint32_t vni_value;
143         uint8_t i;
144
145         vni_value = VNI_VALUE;
146
147         memset(&vxlan_specs[ti], 0, sizeof(struct rte_flow_item_vxlan));
148         memset(&vxlan_masks[ti], 0, sizeof(struct rte_flow_item_vxlan));
149
150         /* Set standard vxlan vni */
151         for (i = 0; i < 3; i++) {
152                 vxlan_specs[ti].vni[2 - i] = vni_value >> (i * 8);
153                 vxlan_masks[ti].vni[2 - i] = 0xff;
154         }
155
156         /* Standard vxlan flags */
157         vxlan_specs[ti].flags = 0x8;
158
159         items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN;
160         items[items_counter].spec = &vxlan_specs[ti];
161         items[items_counter].mask = &vxlan_masks[ti];
162 }
163
164 static void
165 add_vxlan_gpe(struct rte_flow_item *items,
166         uint8_t items_counter,
167         __rte_unused struct additional_para para)
168 {
169         static struct rte_flow_item_vxlan_gpe vxlan_gpe_specs[RTE_MAX_LCORE] __rte_cache_aligned;
170         static struct rte_flow_item_vxlan_gpe vxlan_gpe_masks[RTE_MAX_LCORE] __rte_cache_aligned;
171         uint8_t ti = para.core_idx;
172         uint32_t vni_value;
173         uint8_t i;
174
175         vni_value = VNI_VALUE;
176
177         memset(&vxlan_gpe_specs[ti], 0, sizeof(struct rte_flow_item_vxlan_gpe));
178         memset(&vxlan_gpe_masks[ti], 0, sizeof(struct rte_flow_item_vxlan_gpe));
179
180         /* Set vxlan-gpe vni */
181         for (i = 0; i < 3; i++) {
182                 vxlan_gpe_specs[ti].vni[2 - i] = vni_value >> (i * 8);
183                 vxlan_gpe_masks[ti].vni[2 - i] = 0xff;
184         }
185
186         /* vxlan-gpe flags */
187         vxlan_gpe_specs[ti].flags = 0x0c;
188
189         items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE;
190         items[items_counter].spec = &vxlan_gpe_specs[ti];
191         items[items_counter].mask = &vxlan_gpe_masks[ti];
192 }
193
194 static void
195 add_gre(struct rte_flow_item *items,
196         uint8_t items_counter,
197         __rte_unused struct additional_para para)
198 {
199         static struct rte_flow_item_gre gre_spec;
200         static struct rte_flow_item_gre gre_mask;
201
202         uint16_t proto;
203
204         proto = RTE_ETHER_TYPE_TEB;
205
206         memset(&gre_spec, 0, sizeof(struct rte_flow_item_gre));
207         memset(&gre_mask, 0, sizeof(struct rte_flow_item_gre));
208
209         gre_spec.protocol = RTE_BE16(proto);
210         gre_mask.protocol = RTE_BE16(0xffff);
211
212         items[items_counter].type = RTE_FLOW_ITEM_TYPE_GRE;
213         items[items_counter].spec = &gre_spec;
214         items[items_counter].mask = &gre_mask;
215 }
216
217 static void
218 add_geneve(struct rte_flow_item *items,
219         uint8_t items_counter,
220         __rte_unused struct additional_para para)
221 {
222         static struct rte_flow_item_geneve geneve_specs[RTE_MAX_LCORE] __rte_cache_aligned;
223         static struct rte_flow_item_geneve geneve_masks[RTE_MAX_LCORE] __rte_cache_aligned;
224         uint8_t ti = para.core_idx;
225         uint32_t vni_value;
226         uint8_t i;
227
228         vni_value = VNI_VALUE;
229
230         memset(&geneve_specs[ti], 0, sizeof(struct rte_flow_item_geneve));
231         memset(&geneve_masks[ti], 0, sizeof(struct rte_flow_item_geneve));
232
233         for (i = 0; i < 3; i++) {
234                 geneve_specs[ti].vni[2 - i] = vni_value >> (i * 8);
235                 geneve_masks[ti].vni[2 - i] = 0xff;
236         }
237
238         items[items_counter].type = RTE_FLOW_ITEM_TYPE_GENEVE;
239         items[items_counter].spec = &geneve_specs[ti];
240         items[items_counter].mask = &geneve_masks[ti];
241 }
242
243 static void
244 add_gtp(struct rte_flow_item *items,
245         uint8_t items_counter,
246         __rte_unused struct additional_para para)
247 {
248         static struct rte_flow_item_gtp gtp_spec;
249         static struct rte_flow_item_gtp gtp_mask;
250
251         uint32_t teid_value;
252
253         teid_value = TEID_VALUE;
254
255         memset(&gtp_spec, 0, sizeof(struct rte_flow_item_gtp));
256         memset(&gtp_mask, 0, sizeof(struct rte_flow_item_gtp));
257
258         gtp_spec.teid = RTE_BE32(teid_value);
259         gtp_mask.teid = RTE_BE32(0xffffffff);
260
261         items[items_counter].type = RTE_FLOW_ITEM_TYPE_GTP;
262         items[items_counter].spec = &gtp_spec;
263         items[items_counter].mask = &gtp_mask;
264 }
265
266 static void
267 add_meta_data(struct rte_flow_item *items,
268         uint8_t items_counter,
269         __rte_unused struct additional_para para)
270 {
271         static struct rte_flow_item_meta meta_spec;
272         static struct rte_flow_item_meta meta_mask;
273
274         uint32_t data;
275
276         data = META_DATA;
277
278         memset(&meta_spec, 0, sizeof(struct rte_flow_item_meta));
279         memset(&meta_mask, 0, sizeof(struct rte_flow_item_meta));
280
281         meta_spec.data = RTE_BE32(data);
282         meta_mask.data = RTE_BE32(0xffffffff);
283
284         items[items_counter].type = RTE_FLOW_ITEM_TYPE_META;
285         items[items_counter].spec = &meta_spec;
286         items[items_counter].mask = &meta_mask;
287 }
288
289
290 static void
291 add_meta_tag(struct rte_flow_item *items,
292         uint8_t items_counter,
293         __rte_unused struct additional_para para)
294 {
295         static struct rte_flow_item_tag tag_spec;
296         static struct rte_flow_item_tag tag_mask;
297         uint32_t data;
298         uint8_t index;
299
300         data = META_DATA;
301         index = TAG_INDEX;
302
303         memset(&tag_spec, 0, sizeof(struct rte_flow_item_tag));
304         memset(&tag_mask, 0, sizeof(struct rte_flow_item_tag));
305
306         tag_spec.data = RTE_BE32(data);
307         tag_mask.data = RTE_BE32(0xffffffff);
308         tag_spec.index = index;
309         tag_mask.index = 0xff;
310
311         items[items_counter].type = RTE_FLOW_ITEM_TYPE_TAG;
312         items[items_counter].spec = &tag_spec;
313         items[items_counter].mask = &tag_mask;
314 }
315
316 static void
317 add_icmpv4(struct rte_flow_item *items,
318         uint8_t items_counter,
319         __rte_unused struct additional_para para)
320 {
321         static struct rte_flow_item_icmp icmpv4_spec;
322         static struct rte_flow_item_icmp icmpv4_mask;
323
324         memset(&icmpv4_spec, 0, sizeof(struct rte_flow_item_icmp));
325         memset(&icmpv4_mask, 0, sizeof(struct rte_flow_item_icmp));
326
327         items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP;
328         items[items_counter].spec = &icmpv4_spec;
329         items[items_counter].mask = &icmpv4_mask;
330 }
331
332 static void
333 add_icmpv6(struct rte_flow_item *items,
334         uint8_t items_counter,
335         __rte_unused struct additional_para para)
336 {
337         static struct rte_flow_item_icmp6 icmpv6_spec;
338         static struct rte_flow_item_icmp6 icmpv6_mask;
339
340         memset(&icmpv6_spec, 0, sizeof(struct rte_flow_item_icmp6));
341         memset(&icmpv6_mask, 0, sizeof(struct rte_flow_item_icmp6));
342
343         items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP6;
344         items[items_counter].spec = &icmpv6_spec;
345         items[items_counter].mask = &icmpv6_mask;
346 }
347
348 void
349 fill_items(struct rte_flow_item *items,
350         uint64_t *flow_items, uint32_t outer_ip_src,
351         uint8_t core_idx)
352 {
353         uint8_t items_counter = 0;
354         uint8_t i, j;
355         struct additional_para additional_para_data = {
356                 .src_ip = outer_ip_src,
357                 .core_idx = core_idx,
358         };
359
360         /* Support outer items up to tunnel layer only. */
361         static const struct items_dict {
362                 uint64_t mask;
363                 void (*funct)(
364                         struct rte_flow_item *items,
365                         uint8_t items_counter,
366                         struct additional_para para
367                         );
368         } items_list[] = {
369                 {
370                         .mask = RTE_FLOW_ITEM_TYPE_META,
371                         .funct = add_meta_data,
372                 },
373                 {
374                         .mask = RTE_FLOW_ITEM_TYPE_TAG,
375                         .funct = add_meta_tag,
376                 },
377                 {
378                         .mask = RTE_FLOW_ITEM_TYPE_ETH,
379                         .funct = add_ether,
380                 },
381                 {
382                         .mask = RTE_FLOW_ITEM_TYPE_VLAN,
383                         .funct = add_vlan,
384                 },
385                 {
386                         .mask = RTE_FLOW_ITEM_TYPE_IPV4,
387                         .funct = add_ipv4,
388                 },
389                 {
390                         .mask = RTE_FLOW_ITEM_TYPE_IPV6,
391                         .funct = add_ipv6,
392                 },
393                 {
394                         .mask = RTE_FLOW_ITEM_TYPE_TCP,
395                         .funct = add_tcp,
396                 },
397                 {
398                         .mask = RTE_FLOW_ITEM_TYPE_UDP,
399                         .funct = add_udp,
400                 },
401                 {
402                         .mask = RTE_FLOW_ITEM_TYPE_VXLAN,
403                         .funct = add_vxlan,
404                 },
405                 {
406                         .mask = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
407                         .funct = add_vxlan_gpe,
408                 },
409                 {
410                         .mask = RTE_FLOW_ITEM_TYPE_GRE,
411                         .funct = add_gre,
412                 },
413                 {
414                         .mask = RTE_FLOW_ITEM_TYPE_GENEVE,
415                         .funct = add_geneve,
416                 },
417                 {
418                         .mask = RTE_FLOW_ITEM_TYPE_GTP,
419                         .funct = add_gtp,
420                 },
421                 {
422                         .mask = RTE_FLOW_ITEM_TYPE_ICMP,
423                         .funct = add_icmpv4,
424                 },
425                 {
426                         .mask = RTE_FLOW_ITEM_TYPE_ICMP6,
427                         .funct = add_icmpv6,
428                 },
429         };
430
431         for (j = 0; j < MAX_ITEMS_NUM; j++) {
432                 if (flow_items[j] == 0)
433                         break;
434                 for (i = 0; i < RTE_DIM(items_list); i++) {
435                         if ((flow_items[j] &
436                                 FLOW_ITEM_MASK(items_list[i].mask)) == 0)
437                                 continue;
438                         items_list[i].funct(
439                                 items, items_counter++,
440                                 additional_para_data
441                         );
442                         break;
443                 }
444         }
445
446         items[items_counter].type = RTE_FLOW_ITEM_TYPE_END;
447 }