1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
4 * This file contain the implementations of the items
5 * related methods. Each Item have a method to prepare
6 * the item and add it into items array in given index.
12 #include "items_gen.h"
15 /* Storage for additional parameters for items */
16 struct additional_para {
22 add_ether(struct rte_flow_item *items,
23 uint8_t items_counter,
24 __rte_unused struct additional_para para)
26 static struct rte_flow_item_eth eth_spec;
27 static struct rte_flow_item_eth eth_mask;
29 items[items_counter].type = RTE_FLOW_ITEM_TYPE_ETH;
30 items[items_counter].spec = ð_spec;
31 items[items_counter].mask = ð_mask;
35 add_vlan(struct rte_flow_item *items,
36 uint8_t items_counter,
37 __rte_unused struct additional_para para)
39 static struct rte_flow_item_vlan vlan_spec = {
40 .tci = RTE_BE16(VLAN_VALUE),
42 static struct rte_flow_item_vlan vlan_mask = {
43 .tci = RTE_BE16(0xffff),
46 items[items_counter].type = RTE_FLOW_ITEM_TYPE_VLAN;
47 items[items_counter].spec = &vlan_spec;
48 items[items_counter].mask = &vlan_mask;
52 add_ipv4(struct rte_flow_item *items,
53 uint8_t items_counter, struct additional_para para)
55 static struct rte_flow_item_ipv4 ipv4_specs[RTE_MAX_LCORE] __rte_cache_aligned;
56 static struct rte_flow_item_ipv4 ipv4_masks[RTE_MAX_LCORE] __rte_cache_aligned;
57 uint8_t ti = para.core_idx;
59 ipv4_specs[ti].hdr.src_addr = RTE_BE32(para.src_ip);
60 ipv4_masks[ti].hdr.src_addr = RTE_BE32(0xffffffff);
62 items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV4;
63 items[items_counter].spec = &ipv4_specs[ti];
64 items[items_counter].mask = &ipv4_masks[ti];
69 add_ipv6(struct rte_flow_item *items,
70 uint8_t items_counter, struct additional_para para)
72 static struct rte_flow_item_ipv6 ipv6_specs[RTE_MAX_LCORE] __rte_cache_aligned;
73 static struct rte_flow_item_ipv6 ipv6_masks[RTE_MAX_LCORE] __rte_cache_aligned;
74 uint8_t ti = para.core_idx;
78 for (i = 0; i < 16; i++) {
79 /* Currently src_ip is limited to 32 bit */
81 ipv6_specs[ti].hdr.src_addr[15 - i] = para.src_ip >> (i * 8);
82 ipv6_masks[ti].hdr.src_addr[15 - i] = 0xff;
85 items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV6;
86 items[items_counter].spec = &ipv6_specs[ti];
87 items[items_counter].mask = &ipv6_masks[ti];
91 add_tcp(struct rte_flow_item *items,
92 uint8_t items_counter,
93 __rte_unused struct additional_para para)
95 static struct rte_flow_item_tcp tcp_spec;
96 static struct rte_flow_item_tcp tcp_mask;
98 items[items_counter].type = RTE_FLOW_ITEM_TYPE_TCP;
99 items[items_counter].spec = &tcp_spec;
100 items[items_counter].mask = &tcp_mask;
104 add_udp(struct rte_flow_item *items,
105 uint8_t items_counter,
106 __rte_unused struct additional_para para)
108 static struct rte_flow_item_udp udp_spec;
109 static struct rte_flow_item_udp udp_mask;
111 items[items_counter].type = RTE_FLOW_ITEM_TYPE_UDP;
112 items[items_counter].spec = &udp_spec;
113 items[items_counter].mask = &udp_mask;
117 add_vxlan(struct rte_flow_item *items,
118 uint8_t items_counter,
119 struct additional_para para)
121 static struct rte_flow_item_vxlan vxlan_specs[RTE_MAX_LCORE] __rte_cache_aligned;
122 static struct rte_flow_item_vxlan vxlan_masks[RTE_MAX_LCORE] __rte_cache_aligned;
123 uint8_t ti = para.core_idx;
127 vni_value = VNI_VALUE;
129 /* Set standard vxlan vni */
130 for (i = 0; i < 3; i++) {
131 vxlan_specs[ti].vni[2 - i] = vni_value >> (i * 8);
132 vxlan_masks[ti].vni[2 - i] = 0xff;
135 /* Standard vxlan flags */
136 vxlan_specs[ti].flags = 0x8;
138 items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN;
139 items[items_counter].spec = &vxlan_specs[ti];
140 items[items_counter].mask = &vxlan_masks[ti];
144 add_vxlan_gpe(struct rte_flow_item *items,
145 uint8_t items_counter,
146 __rte_unused struct additional_para para)
148 static struct rte_flow_item_vxlan_gpe vxlan_gpe_specs[RTE_MAX_LCORE] __rte_cache_aligned;
149 static struct rte_flow_item_vxlan_gpe vxlan_gpe_masks[RTE_MAX_LCORE] __rte_cache_aligned;
150 uint8_t ti = para.core_idx;
154 vni_value = VNI_VALUE;
156 /* Set vxlan-gpe vni */
157 for (i = 0; i < 3; i++) {
158 vxlan_gpe_specs[ti].vni[2 - i] = vni_value >> (i * 8);
159 vxlan_gpe_masks[ti].vni[2 - i] = 0xff;
162 /* vxlan-gpe flags */
163 vxlan_gpe_specs[ti].flags = 0x0c;
165 items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE;
166 items[items_counter].spec = &vxlan_gpe_specs[ti];
167 items[items_counter].mask = &vxlan_gpe_masks[ti];
171 add_gre(struct rte_flow_item *items,
172 uint8_t items_counter,
173 __rte_unused struct additional_para para)
175 static struct rte_flow_item_gre gre_spec = {
176 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB),
178 static struct rte_flow_item_gre gre_mask = {
179 .protocol = RTE_BE16(0xffff),
182 items[items_counter].type = RTE_FLOW_ITEM_TYPE_GRE;
183 items[items_counter].spec = &gre_spec;
184 items[items_counter].mask = &gre_mask;
188 add_geneve(struct rte_flow_item *items,
189 uint8_t items_counter,
190 __rte_unused struct additional_para para)
192 static struct rte_flow_item_geneve geneve_specs[RTE_MAX_LCORE] __rte_cache_aligned;
193 static struct rte_flow_item_geneve geneve_masks[RTE_MAX_LCORE] __rte_cache_aligned;
194 uint8_t ti = para.core_idx;
198 vni_value = VNI_VALUE;
200 for (i = 0; i < 3; i++) {
201 geneve_specs[ti].vni[2 - i] = vni_value >> (i * 8);
202 geneve_masks[ti].vni[2 - i] = 0xff;
205 items[items_counter].type = RTE_FLOW_ITEM_TYPE_GENEVE;
206 items[items_counter].spec = &geneve_specs[ti];
207 items[items_counter].mask = &geneve_masks[ti];
211 add_gtp(struct rte_flow_item *items,
212 uint8_t items_counter,
213 __rte_unused struct additional_para para)
215 static struct rte_flow_item_gtp gtp_spec = {
216 .teid = RTE_BE32(TEID_VALUE),
218 static struct rte_flow_item_gtp gtp_mask = {
219 .teid = RTE_BE32(0xffffffff),
222 items[items_counter].type = RTE_FLOW_ITEM_TYPE_GTP;
223 items[items_counter].spec = >p_spec;
224 items[items_counter].mask = >p_mask;
228 add_meta_data(struct rte_flow_item *items,
229 uint8_t items_counter,
230 __rte_unused struct additional_para para)
232 static struct rte_flow_item_meta meta_spec = {
233 .data = RTE_BE32(META_DATA),
235 static struct rte_flow_item_meta meta_mask = {
236 .data = RTE_BE32(0xffffffff),
239 items[items_counter].type = RTE_FLOW_ITEM_TYPE_META;
240 items[items_counter].spec = &meta_spec;
241 items[items_counter].mask = &meta_mask;
246 add_meta_tag(struct rte_flow_item *items,
247 uint8_t items_counter,
248 __rte_unused struct additional_para para)
250 static struct rte_flow_item_tag tag_spec = {
251 .data = RTE_BE32(META_DATA),
254 static struct rte_flow_item_tag tag_mask = {
255 .data = RTE_BE32(0xffffffff),
259 items[items_counter].type = RTE_FLOW_ITEM_TYPE_TAG;
260 items[items_counter].spec = &tag_spec;
261 items[items_counter].mask = &tag_mask;
265 add_icmpv4(struct rte_flow_item *items,
266 uint8_t items_counter,
267 __rte_unused struct additional_para para)
269 static struct rte_flow_item_icmp icmpv4_spec;
270 static struct rte_flow_item_icmp icmpv4_mask;
272 items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP;
273 items[items_counter].spec = &icmpv4_spec;
274 items[items_counter].mask = &icmpv4_mask;
278 add_icmpv6(struct rte_flow_item *items,
279 uint8_t items_counter,
280 __rte_unused struct additional_para para)
282 static struct rte_flow_item_icmp6 icmpv6_spec;
283 static struct rte_flow_item_icmp6 icmpv6_mask;
285 items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP6;
286 items[items_counter].spec = &icmpv6_spec;
287 items[items_counter].mask = &icmpv6_mask;
291 fill_items(struct rte_flow_item *items,
292 uint64_t *flow_items, uint32_t outer_ip_src,
295 uint8_t items_counter = 0;
297 struct additional_para additional_para_data = {
298 .src_ip = outer_ip_src,
299 .core_idx = core_idx,
302 /* Support outer items up to tunnel layer only. */
303 static const struct items_dict {
306 struct rte_flow_item *items,
307 uint8_t items_counter,
308 struct additional_para para
312 .mask = RTE_FLOW_ITEM_TYPE_META,
313 .funct = add_meta_data,
316 .mask = RTE_FLOW_ITEM_TYPE_TAG,
317 .funct = add_meta_tag,
320 .mask = RTE_FLOW_ITEM_TYPE_ETH,
324 .mask = RTE_FLOW_ITEM_TYPE_VLAN,
328 .mask = RTE_FLOW_ITEM_TYPE_IPV4,
332 .mask = RTE_FLOW_ITEM_TYPE_IPV6,
336 .mask = RTE_FLOW_ITEM_TYPE_TCP,
340 .mask = RTE_FLOW_ITEM_TYPE_UDP,
344 .mask = RTE_FLOW_ITEM_TYPE_VXLAN,
348 .mask = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
349 .funct = add_vxlan_gpe,
352 .mask = RTE_FLOW_ITEM_TYPE_GRE,
356 .mask = RTE_FLOW_ITEM_TYPE_GENEVE,
360 .mask = RTE_FLOW_ITEM_TYPE_GTP,
364 .mask = RTE_FLOW_ITEM_TYPE_ICMP,
368 .mask = RTE_FLOW_ITEM_TYPE_ICMP6,
373 for (j = 0; j < MAX_ITEMS_NUM; j++) {
374 if (flow_items[j] == 0)
376 for (i = 0; i < RTE_DIM(items_list); i++) {
378 FLOW_ITEM_MASK(items_list[i].mask)) == 0)
381 items, items_counter++,
388 items[items_counter].type = RTE_FLOW_ITEM_TYPE_END;