1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
4 * This file contain the implementations of the items
5 * related methods. Each Item have a method to prepare
6 * the item and add it into items array in given index.
12 #include "items_gen.h"
15 /* Storage for additional parameters for items */
16 struct additional_para {
21 add_ether(struct rte_flow_item *items,
22 uint8_t items_counter,
23 __rte_unused struct additional_para para)
25 static struct rte_flow_item_eth eth_spec;
26 static struct rte_flow_item_eth eth_mask;
28 memset(ð_spec, 0, sizeof(struct rte_flow_item_eth));
29 memset(ð_mask, 0, sizeof(struct rte_flow_item_eth));
31 items[items_counter].type = RTE_FLOW_ITEM_TYPE_ETH;
32 items[items_counter].spec = ð_spec;
33 items[items_counter].mask = ð_mask;
37 add_vlan(struct rte_flow_item *items,
38 uint8_t items_counter,
39 __rte_unused struct additional_para para)
41 static struct rte_flow_item_vlan vlan_spec;
42 static struct rte_flow_item_vlan vlan_mask;
44 uint16_t vlan_value = VLAN_VALUE;
46 memset(&vlan_spec, 0, sizeof(struct rte_flow_item_vlan));
47 memset(&vlan_mask, 0, sizeof(struct rte_flow_item_vlan));
49 vlan_spec.tci = RTE_BE16(vlan_value);
50 vlan_mask.tci = RTE_BE16(0xffff);
52 items[items_counter].type = RTE_FLOW_ITEM_TYPE_VLAN;
53 items[items_counter].spec = &vlan_spec;
54 items[items_counter].mask = &vlan_mask;
58 add_ipv4(struct rte_flow_item *items,
59 uint8_t items_counter, struct additional_para para)
61 static struct rte_flow_item_ipv4 ipv4_spec;
62 static struct rte_flow_item_ipv4 ipv4_mask;
64 memset(&ipv4_spec, 0, sizeof(struct rte_flow_item_ipv4));
65 memset(&ipv4_mask, 0, sizeof(struct rte_flow_item_ipv4));
67 ipv4_spec.hdr.src_addr = para.src_ip;
68 ipv4_mask.hdr.src_addr = RTE_BE32(0xffffffff);
70 items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV4;
71 items[items_counter].spec = &ipv4_spec;
72 items[items_counter].mask = &ipv4_mask;
77 add_ipv6(struct rte_flow_item *items,
78 uint8_t items_counter, struct additional_para para)
80 static struct rte_flow_item_ipv6 ipv6_spec;
81 static struct rte_flow_item_ipv6 ipv6_mask;
83 memset(&ipv6_spec, 0, sizeof(struct rte_flow_item_ipv6));
84 memset(&ipv6_mask, 0, sizeof(struct rte_flow_item_ipv6));
87 memset(&ipv6_spec.hdr.src_addr, para.src_ip,
88 sizeof(ipv6_spec.hdr.src_addr) / 2);
91 memset(&ipv6_mask.hdr.src_addr, 0xff,
92 sizeof(ipv6_spec.hdr.src_addr));
94 items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV6;
95 items[items_counter].spec = &ipv6_spec;
96 items[items_counter].mask = &ipv6_mask;
100 add_tcp(struct rte_flow_item *items,
101 uint8_t items_counter,
102 __rte_unused struct additional_para para)
104 static struct rte_flow_item_tcp tcp_spec;
105 static struct rte_flow_item_tcp tcp_mask;
107 memset(&tcp_spec, 0, sizeof(struct rte_flow_item_tcp));
108 memset(&tcp_mask, 0, sizeof(struct rte_flow_item_tcp));
110 items[items_counter].type = RTE_FLOW_ITEM_TYPE_TCP;
111 items[items_counter].spec = &tcp_spec;
112 items[items_counter].mask = &tcp_mask;
116 add_udp(struct rte_flow_item *items,
117 uint8_t items_counter,
118 __rte_unused struct additional_para para)
120 static struct rte_flow_item_udp udp_spec;
121 static struct rte_flow_item_udp udp_mask;
123 memset(&udp_spec, 0, sizeof(struct rte_flow_item_udp));
124 memset(&udp_mask, 0, sizeof(struct rte_flow_item_udp));
126 items[items_counter].type = RTE_FLOW_ITEM_TYPE_UDP;
127 items[items_counter].spec = &udp_spec;
128 items[items_counter].mask = &udp_mask;
132 add_vxlan(struct rte_flow_item *items,
133 uint8_t items_counter,
134 __rte_unused struct additional_para para)
136 static struct rte_flow_item_vxlan vxlan_spec;
137 static struct rte_flow_item_vxlan vxlan_mask;
142 vni_value = VNI_VALUE;
144 memset(&vxlan_spec, 0, sizeof(struct rte_flow_item_vxlan));
145 memset(&vxlan_mask, 0, sizeof(struct rte_flow_item_vxlan));
147 /* Set standard vxlan vni */
148 for (i = 0; i < 3; i++) {
149 vxlan_spec.vni[2 - i] = vni_value >> (i * 8);
150 vxlan_mask.vni[2 - i] = 0xff;
153 /* Standard vxlan flags */
154 vxlan_spec.flags = 0x8;
156 items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN;
157 items[items_counter].spec = &vxlan_spec;
158 items[items_counter].mask = &vxlan_mask;
162 add_vxlan_gpe(struct rte_flow_item *items,
163 uint8_t items_counter,
164 __rte_unused struct additional_para para)
166 static struct rte_flow_item_vxlan_gpe vxlan_gpe_spec;
167 static struct rte_flow_item_vxlan_gpe vxlan_gpe_mask;
172 vni_value = VNI_VALUE;
174 memset(&vxlan_gpe_spec, 0, sizeof(struct rte_flow_item_vxlan_gpe));
175 memset(&vxlan_gpe_mask, 0, sizeof(struct rte_flow_item_vxlan_gpe));
177 /* Set vxlan-gpe vni */
178 for (i = 0; i < 3; i++) {
179 vxlan_gpe_spec.vni[2 - i] = vni_value >> (i * 8);
180 vxlan_gpe_mask.vni[2 - i] = 0xff;
183 /* vxlan-gpe flags */
184 vxlan_gpe_spec.flags = 0x0c;
186 items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE;
187 items[items_counter].spec = &vxlan_gpe_spec;
188 items[items_counter].mask = &vxlan_gpe_mask;
192 add_gre(struct rte_flow_item *items,
193 uint8_t items_counter,
194 __rte_unused struct additional_para para)
196 static struct rte_flow_item_gre gre_spec;
197 static struct rte_flow_item_gre gre_mask;
201 proto = RTE_ETHER_TYPE_TEB;
203 memset(&gre_spec, 0, sizeof(struct rte_flow_item_gre));
204 memset(&gre_mask, 0, sizeof(struct rte_flow_item_gre));
206 gre_spec.protocol = RTE_BE16(proto);
207 gre_mask.protocol = RTE_BE16(0xffff);
209 items[items_counter].type = RTE_FLOW_ITEM_TYPE_GRE;
210 items[items_counter].spec = &gre_spec;
211 items[items_counter].mask = &gre_mask;
215 add_geneve(struct rte_flow_item *items,
216 uint8_t items_counter,
217 __rte_unused struct additional_para para)
219 static struct rte_flow_item_geneve geneve_spec;
220 static struct rte_flow_item_geneve geneve_mask;
225 vni_value = VNI_VALUE;
227 memset(&geneve_spec, 0, sizeof(struct rte_flow_item_geneve));
228 memset(&geneve_mask, 0, sizeof(struct rte_flow_item_geneve));
230 for (i = 0; i < 3; i++) {
231 geneve_spec.vni[2 - i] = vni_value >> (i * 8);
232 geneve_mask.vni[2 - i] = 0xff;
235 items[items_counter].type = RTE_FLOW_ITEM_TYPE_GENEVE;
236 items[items_counter].spec = &geneve_spec;
237 items[items_counter].mask = &geneve_mask;
241 add_gtp(struct rte_flow_item *items,
242 uint8_t items_counter,
243 __rte_unused struct additional_para para)
245 static struct rte_flow_item_gtp gtp_spec;
246 static struct rte_flow_item_gtp gtp_mask;
250 teid_value = TEID_VALUE;
252 memset(>p_spec, 0, sizeof(struct rte_flow_item_gtp));
253 memset(>p_mask, 0, sizeof(struct rte_flow_item_gtp));
255 gtp_spec.teid = RTE_BE32(teid_value);
256 gtp_mask.teid = RTE_BE32(0xffffffff);
258 items[items_counter].type = RTE_FLOW_ITEM_TYPE_GTP;
259 items[items_counter].spec = >p_spec;
260 items[items_counter].mask = >p_mask;
264 add_meta_data(struct rte_flow_item *items,
265 uint8_t items_counter,
266 __rte_unused struct additional_para para)
268 static struct rte_flow_item_meta meta_spec;
269 static struct rte_flow_item_meta meta_mask;
275 memset(&meta_spec, 0, sizeof(struct rte_flow_item_meta));
276 memset(&meta_mask, 0, sizeof(struct rte_flow_item_meta));
278 meta_spec.data = RTE_BE32(data);
279 meta_mask.data = RTE_BE32(0xffffffff);
281 items[items_counter].type = RTE_FLOW_ITEM_TYPE_META;
282 items[items_counter].spec = &meta_spec;
283 items[items_counter].mask = &meta_mask;
288 add_meta_tag(struct rte_flow_item *items,
289 uint8_t items_counter,
290 __rte_unused struct additional_para para)
292 static struct rte_flow_item_tag tag_spec;
293 static struct rte_flow_item_tag tag_mask;
300 memset(&tag_spec, 0, sizeof(struct rte_flow_item_tag));
301 memset(&tag_mask, 0, sizeof(struct rte_flow_item_tag));
303 tag_spec.data = RTE_BE32(data);
304 tag_mask.data = RTE_BE32(0xffffffff);
305 tag_spec.index = index;
306 tag_mask.index = 0xff;
308 items[items_counter].type = RTE_FLOW_ITEM_TYPE_TAG;
309 items[items_counter].spec = &tag_spec;
310 items[items_counter].mask = &tag_mask;
314 fill_items(struct rte_flow_item *items,
315 uint64_t flow_items, uint32_t outer_ip_src)
317 uint8_t items_counter = 0;
319 struct additional_para additional_para_data = {
320 .src_ip = outer_ip_src,
323 /* Support outer items up to tunnel layer only. */
324 static const struct items_dict {
327 struct rte_flow_item *items,
328 uint8_t items_counter,
329 struct additional_para para
333 .mask = RTE_FLOW_ITEM_TYPE_META,
334 .funct = add_meta_data,
337 .mask = RTE_FLOW_ITEM_TYPE_TAG,
338 .funct = add_meta_tag,
341 .mask = RTE_FLOW_ITEM_TYPE_ETH,
345 .mask = RTE_FLOW_ITEM_TYPE_VLAN,
349 .mask = RTE_FLOW_ITEM_TYPE_IPV4,
353 .mask = RTE_FLOW_ITEM_TYPE_IPV6,
357 .mask = RTE_FLOW_ITEM_TYPE_TCP,
361 .mask = RTE_FLOW_ITEM_TYPE_UDP,
365 .mask = RTE_FLOW_ITEM_TYPE_VXLAN,
369 .mask = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
370 .funct = add_vxlan_gpe,
373 .mask = RTE_FLOW_ITEM_TYPE_GRE,
377 .mask = RTE_FLOW_ITEM_TYPE_GENEVE,
381 .mask = RTE_FLOW_ITEM_TYPE_GTP,
387 for (i = 0; i < RTE_DIM(flows_items); i++) {
388 if ((flow_items & FLOW_ITEM_MASK(flows_items[i].mask)) == 0)
390 flows_items[i].funct(
391 items, items_counter++,
396 items[items_counter].type = RTE_FLOW_ITEM_TYPE_END;