net/ice: support flow priority for DCF switch filter
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27
28
29 #define MAX_QGRP_NUM_TYPE       7
30 #define MAX_INPUT_SET_BYTE      32
31 #define ICE_PPP_IPV4_PROTO      0x0021
32 #define ICE_PPP_IPV6_PROTO      0x0057
33 #define ICE_IPV4_PROTO_NVGRE    0x002F
34
35 #define ICE_SW_INSET_ETHER ( \
36         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
39         ICE_INSET_VLAN_INNER)
40 #define ICE_SW_INSET_MAC_QINQ  ( \
41         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
42         ICE_INSET_VLAN_OUTER)
43 #define ICE_SW_INSET_MAC_IPV4 ( \
44         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
46 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
47         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
48 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
49         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
50         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
51         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
52 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
53         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
54         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
55         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
56 #define ICE_SW_INSET_MAC_IPV6 ( \
57         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
58         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
59         ICE_INSET_IPV6_NEXT_HDR)
60 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
61         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
62 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
63         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
64         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
65         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
66 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
67         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
68         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
69         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
70 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
71         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
72         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
74         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
77         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
79         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
81         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
83         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
84 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
85         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
87         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
88 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
89         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
91         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
92 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
93         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
94         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
95 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
96         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
97         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
98         ICE_INSET_TUN_IPV4_TOS)
99 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
100         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
101         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
102         ICE_INSET_TUN_IPV4_TOS)
103 #define ICE_SW_INSET_MAC_PPPOE  ( \
104         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
105         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
106 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
107         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
108         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
109         ICE_INSET_PPPOE_PROTO)
110 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
111         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
112 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
113         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
114 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
115         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
116 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
117         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
118 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
119         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
120 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
121         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
122 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
123         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
124 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
125         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
126 #define ICE_SW_INSET_MAC_IPV4_AH ( \
127         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
128 #define ICE_SW_INSET_MAC_IPV6_AH ( \
129         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
130 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
131         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
132 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
133         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
134 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
135         ICE_SW_INSET_MAC_IPV4 | \
136         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
137 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
138         ICE_SW_INSET_MAC_IPV6 | \
139         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
140 #define ICE_SW_INSET_MAC_IPV4_GTPU ( \
141         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID)
142 #define ICE_SW_INSET_MAC_IPV6_GTPU ( \
143         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID)
144 #define ICE_SW_INSET_MAC_IPV4_GTPU_IPV4 ( \
145         ICE_INSET_DMAC | ICE_INSET_GTPU_TEID | \
146         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
147 #define ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4 ( \
148         ICE_SW_INSET_MAC_IPV4_GTPU_IPV4 | ICE_INSET_GTPU_QFI)
149 #define ICE_SW_INSET_MAC_IPV4_GTPU_IPV6 ( \
150         ICE_INSET_DMAC | ICE_INSET_GTPU_TEID | \
151         ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST)
152 #define ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6 ( \
153         ICE_SW_INSET_MAC_IPV4_GTPU_IPV6 | ICE_INSET_GTPU_QFI)
154 #define ICE_SW_INSET_MAC_IPV6_GTPU_IPV4 ( \
155         ICE_INSET_DMAC | ICE_INSET_GTPU_TEID | \
156         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
157 #define ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4 ( \
158         ICE_SW_INSET_MAC_IPV6_GTPU_IPV4 | ICE_INSET_GTPU_QFI)
159 #define ICE_SW_INSET_MAC_IPV6_GTPU_IPV6 ( \
160         ICE_INSET_DMAC | ICE_INSET_GTPU_TEID | \
161         ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST)
162 #define ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6 ( \
163         ICE_SW_INSET_MAC_IPV6_GTPU_IPV6 | ICE_INSET_GTPU_QFI)
164 #define ICE_SW_INSET_MAC_IPV4_GTPU_IPV4_UDP ( \
165         ICE_SW_INSET_MAC_IPV4_GTPU_IPV4 | \
166         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
167 #define ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4_UDP ( \
168         ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4 | \
169         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
170 #define ICE_SW_INSET_MAC_IPV4_GTPU_IPV4_TCP ( \
171         ICE_SW_INSET_MAC_IPV4_GTPU_IPV4 | \
172         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
173 #define ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4_TCP ( \
174         ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4 | \
175         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
176 #define ICE_SW_INSET_MAC_IPV4_GTPU_IPV6_UDP ( \
177         ICE_SW_INSET_MAC_IPV4_GTPU_IPV6 | \
178         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
179 #define ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6_UDP ( \
180         ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6 | \
181         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
182 #define ICE_SW_INSET_MAC_IPV4_GTPU_IPV6_TCP ( \
183         ICE_SW_INSET_MAC_IPV4_GTPU_IPV6 | \
184         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
185 #define ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6_TCP ( \
186         ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6 | \
187         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
188 #define ICE_SW_INSET_MAC_IPV6_GTPU_IPV4_UDP ( \
189         ICE_SW_INSET_MAC_IPV6_GTPU_IPV4 | \
190         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
191 #define ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4_UDP ( \
192         ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4 | \
193         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
194 #define ICE_SW_INSET_MAC_IPV6_GTPU_IPV4_TCP ( \
195         ICE_SW_INSET_MAC_IPV6_GTPU_IPV4 | \
196         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
197 #define ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4_TCP ( \
198         ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4 | \
199         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
200 #define ICE_SW_INSET_MAC_IPV6_GTPU_IPV6_UDP ( \
201         ICE_SW_INSET_MAC_IPV6_GTPU_IPV6 | \
202         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
203 #define ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6_UDP ( \
204         ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6 | \
205         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
206 #define ICE_SW_INSET_MAC_IPV6_GTPU_IPV6_TCP ( \
207         ICE_SW_INSET_MAC_IPV6_GTPU_IPV6 | \
208         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
209 #define ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6_TCP ( \
210         ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6 | \
211         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
212
213 struct sw_meta {
214         struct ice_adv_lkup_elem *list;
215         uint16_t lkups_num;
216         struct ice_adv_rule_info rule_info;
217 };
218
219 static struct ice_flow_parser ice_switch_dist_parser;
220 static struct ice_flow_parser ice_switch_perm_parser;
221
222 static struct
223 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
224         {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE, ICE_INSET_NONE},
225         {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE, ICE_INSET_NONE},
226         {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE, ICE_INSET_NONE},
227         {pattern_eth_arp,                               ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
228         {pattern_eth_ipv4,                              ICE_SW_INSET_MAC_IPV4,                  ICE_INSET_NONE, ICE_INSET_NONE},
229         {pattern_eth_ipv4_udp,                          ICE_SW_INSET_MAC_IPV4_UDP,              ICE_INSET_NONE, ICE_INSET_NONE},
230         {pattern_eth_ipv4_tcp,                          ICE_SW_INSET_MAC_IPV4_TCP,              ICE_INSET_NONE, ICE_INSET_NONE},
231         {pattern_eth_ipv6,                              ICE_SW_INSET_MAC_IPV6,                  ICE_INSET_NONE, ICE_INSET_NONE},
232         {pattern_eth_ipv6_udp,                          ICE_SW_INSET_MAC_IPV6_UDP,              ICE_INSET_NONE, ICE_INSET_NONE},
233         {pattern_eth_ipv6_tcp,                          ICE_SW_INSET_MAC_IPV6_TCP,              ICE_INSET_NONE, ICE_INSET_NONE},
234         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_SW_INSET_DIST_VXLAN_IPV4,           ICE_INSET_NONE, ICE_INSET_NONE},
235         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_SW_INSET_DIST_VXLAN_IPV4_UDP,       ICE_INSET_NONE, ICE_INSET_NONE},
236         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_SW_INSET_DIST_VXLAN_IPV4_TCP,       ICE_INSET_NONE, ICE_INSET_NONE},
237         {pattern_eth_ipv4_nvgre_eth_ipv4,               ICE_SW_INSET_DIST_NVGRE_IPV4,           ICE_INSET_NONE, ICE_INSET_NONE},
238         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,           ICE_SW_INSET_DIST_NVGRE_IPV4_UDP,       ICE_INSET_NONE, ICE_INSET_NONE},
239         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,           ICE_SW_INSET_DIST_NVGRE_IPV4_TCP,       ICE_INSET_NONE, ICE_INSET_NONE},
240         {pattern_eth_pppoes,                            ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
241         {pattern_eth_vlan_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
242         {pattern_eth_pppoes_proto,                      ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
243         {pattern_eth_vlan_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
244         {pattern_eth_pppoes_ipv4,                       ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
245         {pattern_eth_pppoes_ipv4_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
246         {pattern_eth_pppoes_ipv4_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
247         {pattern_eth_pppoes_ipv6,                       ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
248         {pattern_eth_pppoes_ipv6_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
249         {pattern_eth_pppoes_ipv6_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
250         {pattern_eth_vlan_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
251         {pattern_eth_vlan_pppoes_ipv4_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
252         {pattern_eth_vlan_pppoes_ipv4_udp,              ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
253         {pattern_eth_vlan_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
254         {pattern_eth_vlan_pppoes_ipv6_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
255         {pattern_eth_vlan_pppoes_ipv6_udp,              ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
256         {pattern_eth_ipv4_esp,                          ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
257         {pattern_eth_ipv4_udp_esp,                      ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
258         {pattern_eth_ipv6_esp,                          ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
259         {pattern_eth_ipv6_udp_esp,                      ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
260         {pattern_eth_ipv4_ah,                           ICE_SW_INSET_MAC_IPV4_AH,               ICE_INSET_NONE, ICE_INSET_NONE},
261         {pattern_eth_ipv6_ah,                           ICE_SW_INSET_MAC_IPV6_AH,               ICE_INSET_NONE, ICE_INSET_NONE},
262         {pattern_eth_ipv6_udp_ah,                       ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
263         {pattern_eth_ipv4_l2tp,                         ICE_SW_INSET_MAC_IPV4_L2TP,             ICE_INSET_NONE, ICE_INSET_NONE},
264         {pattern_eth_ipv6_l2tp,                         ICE_SW_INSET_MAC_IPV6_L2TP,             ICE_INSET_NONE, ICE_INSET_NONE},
265         {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
266         {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
267         {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE, ICE_INSET_NONE},
268         {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE, ICE_INSET_NONE},
269         {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
270         {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
271         {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
272         {pattern_eth_qinq_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
273         {pattern_eth_ipv4_gtpu,                         ICE_SW_INSET_MAC_IPV4_GTPU,             ICE_INSET_NONE, ICE_INSET_NONE},
274         {pattern_eth_ipv6_gtpu,                         ICE_SW_INSET_MAC_IPV6_GTPU,             ICE_INSET_NONE, ICE_INSET_NONE},
275         {pattern_eth_ipv4_gtpu_ipv4,                    ICE_SW_INSET_MAC_IPV4_GTPU_IPV4,        ICE_INSET_NONE, ICE_INSET_NONE},
276         {pattern_eth_ipv4_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4,     ICE_INSET_NONE, ICE_INSET_NONE},
277         {pattern_eth_ipv4_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_IPV4_GTPU_IPV4_UDP,    ICE_INSET_NONE, ICE_INSET_NONE},
278         {pattern_eth_ipv4_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
279         {pattern_eth_ipv4_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_IPV4_GTPU_IPV4_TCP,    ICE_INSET_NONE, ICE_INSET_NONE},
280         {pattern_eth_ipv4_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
281         {pattern_eth_ipv4_gtpu_ipv6,                    ICE_SW_INSET_MAC_IPV4_GTPU_IPV6,        ICE_INSET_NONE, ICE_INSET_NONE},
282         {pattern_eth_ipv4_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6,     ICE_INSET_NONE, ICE_INSET_NONE},
283         {pattern_eth_ipv4_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_IPV4_GTPU_IPV6_UDP,    ICE_INSET_NONE, ICE_INSET_NONE},
284         {pattern_eth_ipv4_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
285         {pattern_eth_ipv4_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_IPV4_GTPU_IPV6_TCP,    ICE_INSET_NONE, ICE_INSET_NONE},
286         {pattern_eth_ipv4_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
287         {pattern_eth_ipv6_gtpu_ipv4,                    ICE_SW_INSET_MAC_IPV6_GTPU_IPV4,        ICE_INSET_NONE, ICE_INSET_NONE},
288         {pattern_eth_ipv6_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4,     ICE_INSET_NONE, ICE_INSET_NONE},
289         {pattern_eth_ipv6_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_IPV6_GTPU_IPV4_UDP,    ICE_INSET_NONE, ICE_INSET_NONE},
290         {pattern_eth_ipv6_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
291         {pattern_eth_ipv6_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_IPV6_GTPU_IPV4_TCP,    ICE_INSET_NONE, ICE_INSET_NONE},
292         {pattern_eth_ipv6_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
293         {pattern_eth_ipv6_gtpu_ipv6,                    ICE_SW_INSET_MAC_IPV6_GTPU_IPV6,        ICE_INSET_NONE, ICE_INSET_NONE},
294         {pattern_eth_ipv6_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6,     ICE_INSET_NONE, ICE_INSET_NONE},
295         {pattern_eth_ipv6_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_IPV6_GTPU_IPV6_UDP,    ICE_INSET_NONE, ICE_INSET_NONE},
296         {pattern_eth_ipv6_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
297         {pattern_eth_ipv6_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_IPV6_GTPU_IPV6_UDP,    ICE_INSET_NONE, ICE_INSET_NONE},
298         {pattern_eth_ipv6_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
299 };
300
301 static struct
302 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
303         {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE, ICE_INSET_NONE},
304         {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE, ICE_INSET_NONE},
305         {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE, ICE_INSET_NONE},
306         {pattern_eth_arp,                               ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
307         {pattern_eth_ipv4,                              ICE_SW_INSET_MAC_IPV4,                  ICE_INSET_NONE, ICE_INSET_NONE},
308         {pattern_eth_ipv4_udp,                          ICE_SW_INSET_MAC_IPV4_UDP,              ICE_INSET_NONE, ICE_INSET_NONE},
309         {pattern_eth_ipv4_tcp,                          ICE_SW_INSET_MAC_IPV4_TCP,              ICE_INSET_NONE, ICE_INSET_NONE},
310         {pattern_eth_ipv6,                              ICE_SW_INSET_MAC_IPV6,                  ICE_INSET_NONE, ICE_INSET_NONE},
311         {pattern_eth_ipv6_udp,                          ICE_SW_INSET_MAC_IPV6_UDP,              ICE_INSET_NONE, ICE_INSET_NONE},
312         {pattern_eth_ipv6_tcp,                          ICE_SW_INSET_MAC_IPV6_TCP,              ICE_INSET_NONE, ICE_INSET_NONE},
313         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_SW_INSET_PERM_TUNNEL_IPV4,          ICE_INSET_NONE, ICE_INSET_NONE},
314         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP,      ICE_INSET_NONE, ICE_INSET_NONE},
315         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP,      ICE_INSET_NONE, ICE_INSET_NONE},
316         {pattern_eth_ipv4_nvgre_eth_ipv4,               ICE_SW_INSET_PERM_TUNNEL_IPV4,          ICE_INSET_NONE, ICE_INSET_NONE},
317         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,           ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP,      ICE_INSET_NONE, ICE_INSET_NONE},
318         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,           ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP,      ICE_INSET_NONE, ICE_INSET_NONE},
319         {pattern_eth_pppoes,                            ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
320         {pattern_eth_vlan_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
321         {pattern_eth_pppoes_proto,                      ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
322         {pattern_eth_vlan_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
323         {pattern_eth_pppoes_ipv4,                       ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
324         {pattern_eth_pppoes_ipv4_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
325         {pattern_eth_pppoes_ipv4_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
326         {pattern_eth_pppoes_ipv6,                       ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
327         {pattern_eth_pppoes_ipv6_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
328         {pattern_eth_pppoes_ipv6_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
329         {pattern_eth_vlan_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
330         {pattern_eth_vlan_pppoes_ipv4_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
331         {pattern_eth_vlan_pppoes_ipv4_udp,              ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
332         {pattern_eth_vlan_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
333         {pattern_eth_vlan_pppoes_ipv6_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
334         {pattern_eth_vlan_pppoes_ipv6_udp,              ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
335         {pattern_eth_ipv4_esp,                          ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
336         {pattern_eth_ipv4_udp_esp,                      ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
337         {pattern_eth_ipv6_esp,                          ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
338         {pattern_eth_ipv6_udp_esp,                      ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
339         {pattern_eth_ipv4_ah,                           ICE_SW_INSET_MAC_IPV4_AH,               ICE_INSET_NONE, ICE_INSET_NONE},
340         {pattern_eth_ipv6_ah,                           ICE_SW_INSET_MAC_IPV6_AH,               ICE_INSET_NONE, ICE_INSET_NONE},
341         {pattern_eth_ipv6_udp_ah,                       ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
342         {pattern_eth_ipv4_l2tp,                         ICE_SW_INSET_MAC_IPV4_L2TP,             ICE_INSET_NONE, ICE_INSET_NONE},
343         {pattern_eth_ipv6_l2tp,                         ICE_SW_INSET_MAC_IPV6_L2TP,             ICE_INSET_NONE, ICE_INSET_NONE},
344         {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
345         {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
346         {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE, ICE_INSET_NONE},
347         {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE, ICE_INSET_NONE},
348         {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
349         {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
350         {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
351         {pattern_eth_qinq_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
352         {pattern_eth_ipv4_gtpu,                         ICE_SW_INSET_MAC_IPV4_GTPU,             ICE_INSET_NONE, ICE_INSET_NONE},
353         {pattern_eth_ipv6_gtpu,                         ICE_SW_INSET_MAC_IPV6_GTPU,             ICE_INSET_NONE, ICE_INSET_NONE},
354         {pattern_eth_ipv4_gtpu_ipv4,                    ICE_SW_INSET_MAC_IPV4_GTPU_IPV4,        ICE_INSET_NONE, ICE_INSET_NONE},
355         {pattern_eth_ipv4_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4,     ICE_INSET_NONE, ICE_INSET_NONE},
356         {pattern_eth_ipv4_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_IPV4_GTPU_IPV4_UDP,    ICE_INSET_NONE, ICE_INSET_NONE},
357         {pattern_eth_ipv4_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
358         {pattern_eth_ipv4_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_IPV4_GTPU_IPV4_TCP,    ICE_INSET_NONE, ICE_INSET_NONE},
359         {pattern_eth_ipv4_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
360         {pattern_eth_ipv4_gtpu_ipv6,                    ICE_SW_INSET_MAC_IPV4_GTPU_IPV6,        ICE_INSET_NONE, ICE_INSET_NONE},
361         {pattern_eth_ipv4_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6,     ICE_INSET_NONE, ICE_INSET_NONE},
362         {pattern_eth_ipv4_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_IPV4_GTPU_IPV6_UDP,    ICE_INSET_NONE, ICE_INSET_NONE},
363         {pattern_eth_ipv4_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
364         {pattern_eth_ipv4_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_IPV4_GTPU_IPV6_TCP,    ICE_INSET_NONE, ICE_INSET_NONE},
365         {pattern_eth_ipv4_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
366         {pattern_eth_ipv6_gtpu_ipv4,                    ICE_SW_INSET_MAC_IPV6_GTPU_IPV4,        ICE_INSET_NONE, ICE_INSET_NONE},
367         {pattern_eth_ipv6_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4,     ICE_INSET_NONE, ICE_INSET_NONE},
368         {pattern_eth_ipv6_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_IPV6_GTPU_IPV4_UDP,    ICE_INSET_NONE, ICE_INSET_NONE},
369         {pattern_eth_ipv6_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
370         {pattern_eth_ipv6_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_IPV6_GTPU_IPV4_TCP,    ICE_INSET_NONE, ICE_INSET_NONE},
371         {pattern_eth_ipv6_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
372         {pattern_eth_ipv6_gtpu_ipv6,                    ICE_SW_INSET_MAC_IPV6_GTPU_IPV6,        ICE_INSET_NONE, ICE_INSET_NONE},
373         {pattern_eth_ipv6_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6,     ICE_INSET_NONE, ICE_INSET_NONE},
374         {pattern_eth_ipv6_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_IPV6_GTPU_IPV6_UDP,    ICE_INSET_NONE, ICE_INSET_NONE},
375         {pattern_eth_ipv6_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
376         {pattern_eth_ipv6_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_IPV6_GTPU_IPV6_UDP,    ICE_INSET_NONE, ICE_INSET_NONE},
377         {pattern_eth_ipv6_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
378 };
379
380 static int
381 ice_switch_create(struct ice_adapter *ad,
382                 struct rte_flow *flow,
383                 void *meta,
384                 struct rte_flow_error *error)
385 {
386         int ret = 0;
387         struct ice_pf *pf = &ad->pf;
388         struct ice_hw *hw = ICE_PF_TO_HW(pf);
389         struct ice_rule_query_data rule_added = {0};
390         struct ice_rule_query_data *filter_ptr;
391         struct ice_adv_lkup_elem *list =
392                 ((struct sw_meta *)meta)->list;
393         uint16_t lkups_cnt =
394                 ((struct sw_meta *)meta)->lkups_num;
395         struct ice_adv_rule_info *rule_info =
396                 &((struct sw_meta *)meta)->rule_info;
397
398         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
399                 rte_flow_error_set(error, EINVAL,
400                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
401                         "item number too large for rule");
402                 goto error;
403         }
404         if (!list) {
405                 rte_flow_error_set(error, EINVAL,
406                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
407                         "lookup list should not be NULL");
408                 goto error;
409         }
410         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
411         if (!ret) {
412                 filter_ptr = rte_zmalloc("ice_switch_filter",
413                         sizeof(struct ice_rule_query_data), 0);
414                 if (!filter_ptr) {
415                         rte_flow_error_set(error, EINVAL,
416                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
417                                    "No memory for ice_switch_filter");
418                         goto error;
419                 }
420                 flow->rule = filter_ptr;
421                 rte_memcpy(filter_ptr,
422                         &rule_added,
423                         sizeof(struct ice_rule_query_data));
424         } else {
425                 rte_flow_error_set(error, EINVAL,
426                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
427                         "switch filter create flow fail");
428                 goto error;
429         }
430
431         rte_free(list);
432         rte_free(meta);
433         return 0;
434
435 error:
436         rte_free(list);
437         rte_free(meta);
438
439         return -rte_errno;
440 }
441
442 static int
443 ice_switch_destroy(struct ice_adapter *ad,
444                 struct rte_flow *flow,
445                 struct rte_flow_error *error)
446 {
447         struct ice_hw *hw = &ad->hw;
448         int ret;
449         struct ice_rule_query_data *filter_ptr;
450
451         filter_ptr = (struct ice_rule_query_data *)
452                 flow->rule;
453
454         if (!filter_ptr) {
455                 rte_flow_error_set(error, EINVAL,
456                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
457                         "no such flow"
458                         " create by switch filter");
459                 return -rte_errno;
460         }
461
462         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
463         if (ret) {
464                 rte_flow_error_set(error, EINVAL,
465                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
466                         "fail to destroy switch filter rule");
467                 return -rte_errno;
468         }
469
470         rte_free(filter_ptr);
471         return ret;
472 }
473
474 static void
475 ice_switch_filter_rule_free(struct rte_flow *flow)
476 {
477         rte_free(flow->rule);
478 }
479
480 static uint64_t
481 ice_switch_inset_get(const struct rte_flow_item pattern[],
482                 struct rte_flow_error *error,
483                 struct ice_adv_lkup_elem *list,
484                 uint16_t *lkups_num,
485                 enum ice_sw_tunnel_type *tun_type)
486 {
487         const struct rte_flow_item *item = pattern;
488         enum rte_flow_item_type item_type;
489         const struct rte_flow_item_eth *eth_spec, *eth_mask;
490         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
491         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
492         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
493         const struct rte_flow_item_udp *udp_spec, *udp_mask;
494         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
495         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
496         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
497         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
498         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
499         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
500                                 *pppoe_proto_mask;
501         const struct rte_flow_item_esp *esp_spec, *esp_mask;
502         const struct rte_flow_item_ah *ah_spec, *ah_mask;
503         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
504         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
505         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
506         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
507         uint64_t input_set = ICE_INSET_NONE;
508         uint16_t input_set_byte = 0;
509         bool pppoe_elem_valid = 0;
510         bool pppoe_patt_valid = 0;
511         bool pppoe_prot_valid = 0;
512         bool inner_vlan_valid = 0;
513         bool outer_vlan_valid = 0;
514         bool tunnel_valid = 0;
515         bool profile_rule = 0;
516         bool nvgre_valid = 0;
517         bool vxlan_valid = 0;
518         bool qinq_valid = 0;
519         bool ipv6_valid = 0;
520         bool ipv4_valid = 0;
521         bool udp_valid = 0;
522         bool tcp_valid = 0;
523         bool gtpu_valid = 0;
524         bool gtpu_psc_valid = 0;
525         bool inner_ipv4_valid = 0;
526         bool inner_ipv6_valid = 0;
527         bool inner_tcp_valid = 0;
528         bool inner_udp_valid = 0;
529         uint16_t j, k, t = 0;
530
531         if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
532             *tun_type == ICE_NON_TUN_QINQ)
533                 qinq_valid = 1;
534
535         for (item = pattern; item->type !=
536                         RTE_FLOW_ITEM_TYPE_END; item++) {
537                 if (item->last) {
538                         rte_flow_error_set(error, EINVAL,
539                                         RTE_FLOW_ERROR_TYPE_ITEM,
540                                         item,
541                                         "Not support range");
542                         return 0;
543                 }
544                 item_type = item->type;
545
546                 switch (item_type) {
547                 case RTE_FLOW_ITEM_TYPE_ETH:
548                         eth_spec = item->spec;
549                         eth_mask = item->mask;
550                         if (eth_spec && eth_mask) {
551                                 const uint8_t *a = eth_mask->src.addr_bytes;
552                                 const uint8_t *b = eth_mask->dst.addr_bytes;
553                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
554                                         if (a[j] && tunnel_valid) {
555                                                 input_set |=
556                                                         ICE_INSET_TUN_SMAC;
557                                                 break;
558                                         } else if (a[j]) {
559                                                 input_set |=
560                                                         ICE_INSET_SMAC;
561                                                 break;
562                                         }
563                                 }
564                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
565                                         if (b[j] && tunnel_valid) {
566                                                 input_set |=
567                                                         ICE_INSET_TUN_DMAC;
568                                                 break;
569                                         } else if (b[j]) {
570                                                 input_set |=
571                                                         ICE_INSET_DMAC;
572                                                 break;
573                                         }
574                                 }
575                                 if (eth_mask->type)
576                                         input_set |= ICE_INSET_ETHERTYPE;
577                                 list[t].type = (tunnel_valid  == 0) ?
578                                         ICE_MAC_OFOS : ICE_MAC_IL;
579                                 struct ice_ether_hdr *h;
580                                 struct ice_ether_hdr *m;
581                                 uint16_t i = 0;
582                                 h = &list[t].h_u.eth_hdr;
583                                 m = &list[t].m_u.eth_hdr;
584                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
585                                         if (eth_mask->src.addr_bytes[j]) {
586                                                 h->src_addr[j] =
587                                                 eth_spec->src.addr_bytes[j];
588                                                 m->src_addr[j] =
589                                                 eth_mask->src.addr_bytes[j];
590                                                 i = 1;
591                                                 input_set_byte++;
592                                         }
593                                         if (eth_mask->dst.addr_bytes[j]) {
594                                                 h->dst_addr[j] =
595                                                 eth_spec->dst.addr_bytes[j];
596                                                 m->dst_addr[j] =
597                                                 eth_mask->dst.addr_bytes[j];
598                                                 i = 1;
599                                                 input_set_byte++;
600                                         }
601                                 }
602                                 if (i)
603                                         t++;
604                                 if (eth_mask->type) {
605                                         list[t].type = ICE_ETYPE_OL;
606                                         list[t].h_u.ethertype.ethtype_id =
607                                                 eth_spec->type;
608                                         list[t].m_u.ethertype.ethtype_id =
609                                                 eth_mask->type;
610                                         input_set_byte += 2;
611                                         t++;
612                                 }
613                         }
614                         break;
615
616                 case RTE_FLOW_ITEM_TYPE_IPV4:
617                         ipv4_spec = item->spec;
618                         ipv4_mask = item->mask;
619                         if (tunnel_valid)
620                                 inner_ipv4_valid = 1;
621                         else
622                                 ipv4_valid = 1;
623
624                         if (ipv4_spec && ipv4_mask) {
625                                 /* Check IPv4 mask and update input set */
626                                 if (ipv4_mask->hdr.version_ihl ||
627                                         ipv4_mask->hdr.total_length ||
628                                         ipv4_mask->hdr.packet_id ||
629                                         ipv4_mask->hdr.hdr_checksum) {
630                                         rte_flow_error_set(error, EINVAL,
631                                                    RTE_FLOW_ERROR_TYPE_ITEM,
632                                                    item,
633                                                    "Invalid IPv4 mask.");
634                                         return 0;
635                                 }
636
637                                 if (tunnel_valid) {
638                                         if (ipv4_mask->hdr.type_of_service)
639                                                 input_set |=
640                                                         ICE_INSET_TUN_IPV4_TOS;
641                                         if (ipv4_mask->hdr.src_addr)
642                                                 input_set |=
643                                                         ICE_INSET_TUN_IPV4_SRC;
644                                         if (ipv4_mask->hdr.dst_addr)
645                                                 input_set |=
646                                                         ICE_INSET_TUN_IPV4_DST;
647                                         if (ipv4_mask->hdr.time_to_live)
648                                                 input_set |=
649                                                         ICE_INSET_TUN_IPV4_TTL;
650                                         if (ipv4_mask->hdr.next_proto_id)
651                                                 input_set |=
652                                                 ICE_INSET_TUN_IPV4_PROTO;
653                                 } else {
654                                         if (ipv4_mask->hdr.src_addr)
655                                                 input_set |= ICE_INSET_IPV4_SRC;
656                                         if (ipv4_mask->hdr.dst_addr)
657                                                 input_set |= ICE_INSET_IPV4_DST;
658                                         if (ipv4_mask->hdr.time_to_live)
659                                                 input_set |= ICE_INSET_IPV4_TTL;
660                                         if (ipv4_mask->hdr.next_proto_id)
661                                                 input_set |=
662                                                 ICE_INSET_IPV4_PROTO;
663                                         if (ipv4_mask->hdr.type_of_service)
664                                                 input_set |=
665                                                         ICE_INSET_IPV4_TOS;
666                                 }
667                                 list[t].type = (tunnel_valid  == 0) ?
668                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
669                                 if (ipv4_mask->hdr.src_addr) {
670                                         list[t].h_u.ipv4_hdr.src_addr =
671                                                 ipv4_spec->hdr.src_addr;
672                                         list[t].m_u.ipv4_hdr.src_addr =
673                                                 ipv4_mask->hdr.src_addr;
674                                         input_set_byte += 2;
675                                 }
676                                 if (ipv4_mask->hdr.dst_addr) {
677                                         list[t].h_u.ipv4_hdr.dst_addr =
678                                                 ipv4_spec->hdr.dst_addr;
679                                         list[t].m_u.ipv4_hdr.dst_addr =
680                                                 ipv4_mask->hdr.dst_addr;
681                                         input_set_byte += 2;
682                                 }
683                                 if (ipv4_mask->hdr.time_to_live) {
684                                         list[t].h_u.ipv4_hdr.time_to_live =
685                                                 ipv4_spec->hdr.time_to_live;
686                                         list[t].m_u.ipv4_hdr.time_to_live =
687                                                 ipv4_mask->hdr.time_to_live;
688                                         input_set_byte++;
689                                 }
690                                 if (ipv4_mask->hdr.next_proto_id) {
691                                         list[t].h_u.ipv4_hdr.protocol =
692                                                 ipv4_spec->hdr.next_proto_id;
693                                         list[t].m_u.ipv4_hdr.protocol =
694                                                 ipv4_mask->hdr.next_proto_id;
695                                         input_set_byte++;
696                                 }
697                                 if ((ipv4_spec->hdr.next_proto_id &
698                                         ipv4_mask->hdr.next_proto_id) ==
699                                         ICE_IPV4_PROTO_NVGRE)
700                                         *tun_type = ICE_SW_TUN_AND_NON_TUN;
701                                 if (ipv4_mask->hdr.type_of_service) {
702                                         list[t].h_u.ipv4_hdr.tos =
703                                                 ipv4_spec->hdr.type_of_service;
704                                         list[t].m_u.ipv4_hdr.tos =
705                                                 ipv4_mask->hdr.type_of_service;
706                                         input_set_byte++;
707                                 }
708                                 t++;
709                         }
710                         break;
711
712                 case RTE_FLOW_ITEM_TYPE_IPV6:
713                         ipv6_spec = item->spec;
714                         ipv6_mask = item->mask;
715                         if (tunnel_valid)
716                                 inner_ipv6_valid = 1;
717                         else
718                                 ipv6_valid = 1;
719                         if (ipv6_spec && ipv6_mask) {
720                                 if (ipv6_mask->hdr.payload_len) {
721                                         rte_flow_error_set(error, EINVAL,
722                                            RTE_FLOW_ERROR_TYPE_ITEM,
723                                            item,
724                                            "Invalid IPv6 mask");
725                                         return 0;
726                                 }
727
728                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
729                                         if (ipv6_mask->hdr.src_addr[j] &&
730                                                 tunnel_valid) {
731                                                 input_set |=
732                                                 ICE_INSET_TUN_IPV6_SRC;
733                                                 break;
734                                         } else if (ipv6_mask->hdr.src_addr[j]) {
735                                                 input_set |= ICE_INSET_IPV6_SRC;
736                                                 break;
737                                         }
738                                 }
739                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
740                                         if (ipv6_mask->hdr.dst_addr[j] &&
741                                                 tunnel_valid) {
742                                                 input_set |=
743                                                 ICE_INSET_TUN_IPV6_DST;
744                                                 break;
745                                         } else if (ipv6_mask->hdr.dst_addr[j]) {
746                                                 input_set |= ICE_INSET_IPV6_DST;
747                                                 break;
748                                         }
749                                 }
750                                 if (ipv6_mask->hdr.proto &&
751                                         tunnel_valid)
752                                         input_set |=
753                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
754                                 else if (ipv6_mask->hdr.proto)
755                                         input_set |=
756                                                 ICE_INSET_IPV6_NEXT_HDR;
757                                 if (ipv6_mask->hdr.hop_limits &&
758                                         tunnel_valid)
759                                         input_set |=
760                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
761                                 else if (ipv6_mask->hdr.hop_limits)
762                                         input_set |=
763                                                 ICE_INSET_IPV6_HOP_LIMIT;
764                                 if ((ipv6_mask->hdr.vtc_flow &
765                                                 rte_cpu_to_be_32
766                                                 (RTE_IPV6_HDR_TC_MASK)) &&
767                                         tunnel_valid)
768                                         input_set |=
769                                                         ICE_INSET_TUN_IPV6_TC;
770                                 else if (ipv6_mask->hdr.vtc_flow &
771                                                 rte_cpu_to_be_32
772                                                 (RTE_IPV6_HDR_TC_MASK))
773                                         input_set |= ICE_INSET_IPV6_TC;
774
775                                 list[t].type = (tunnel_valid  == 0) ?
776                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
777                                 struct ice_ipv6_hdr *f;
778                                 struct ice_ipv6_hdr *s;
779                                 f = &list[t].h_u.ipv6_hdr;
780                                 s = &list[t].m_u.ipv6_hdr;
781                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
782                                         if (ipv6_mask->hdr.src_addr[j]) {
783                                                 f->src_addr[j] =
784                                                 ipv6_spec->hdr.src_addr[j];
785                                                 s->src_addr[j] =
786                                                 ipv6_mask->hdr.src_addr[j];
787                                                 input_set_byte++;
788                                         }
789                                         if (ipv6_mask->hdr.dst_addr[j]) {
790                                                 f->dst_addr[j] =
791                                                 ipv6_spec->hdr.dst_addr[j];
792                                                 s->dst_addr[j] =
793                                                 ipv6_mask->hdr.dst_addr[j];
794                                                 input_set_byte++;
795                                         }
796                                 }
797                                 if (ipv6_mask->hdr.proto) {
798                                         f->next_hdr =
799                                                 ipv6_spec->hdr.proto;
800                                         s->next_hdr =
801                                                 ipv6_mask->hdr.proto;
802                                         input_set_byte++;
803                                 }
804                                 if (ipv6_mask->hdr.hop_limits) {
805                                         f->hop_limit =
806                                                 ipv6_spec->hdr.hop_limits;
807                                         s->hop_limit =
808                                                 ipv6_mask->hdr.hop_limits;
809                                         input_set_byte++;
810                                 }
811                                 if (ipv6_mask->hdr.vtc_flow &
812                                                 rte_cpu_to_be_32
813                                                 (RTE_IPV6_HDR_TC_MASK)) {
814                                         struct ice_le_ver_tc_flow vtf;
815                                         vtf.u.fld.version = 0;
816                                         vtf.u.fld.flow_label = 0;
817                                         vtf.u.fld.tc = (rte_be_to_cpu_32
818                                                 (ipv6_spec->hdr.vtc_flow) &
819                                                         RTE_IPV6_HDR_TC_MASK) >>
820                                                         RTE_IPV6_HDR_TC_SHIFT;
821                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
822                                         vtf.u.fld.tc = (rte_be_to_cpu_32
823                                                 (ipv6_mask->hdr.vtc_flow) &
824                                                         RTE_IPV6_HDR_TC_MASK) >>
825                                                         RTE_IPV6_HDR_TC_SHIFT;
826                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
827                                         input_set_byte += 4;
828                                 }
829                                 t++;
830                         }
831                         break;
832
833                 case RTE_FLOW_ITEM_TYPE_UDP:
834                         udp_spec = item->spec;
835                         udp_mask = item->mask;
836                         if (tunnel_valid)
837                                 inner_udp_valid = 1;
838                         else
839                                 udp_valid = 1;
840                         if (udp_spec && udp_mask) {
841                                 /* Check UDP mask and update input set*/
842                                 if (udp_mask->hdr.dgram_len ||
843                                     udp_mask->hdr.dgram_cksum) {
844                                         rte_flow_error_set(error, EINVAL,
845                                                    RTE_FLOW_ERROR_TYPE_ITEM,
846                                                    item,
847                                                    "Invalid UDP mask");
848                                         return 0;
849                                 }
850
851                                 if (tunnel_valid) {
852                                         if (udp_mask->hdr.src_port)
853                                                 input_set |=
854                                                 ICE_INSET_TUN_UDP_SRC_PORT;
855                                         if (udp_mask->hdr.dst_port)
856                                                 input_set |=
857                                                 ICE_INSET_TUN_UDP_DST_PORT;
858                                 } else {
859                                         if (udp_mask->hdr.src_port)
860                                                 input_set |=
861                                                 ICE_INSET_UDP_SRC_PORT;
862                                         if (udp_mask->hdr.dst_port)
863                                                 input_set |=
864                                                 ICE_INSET_UDP_DST_PORT;
865                                 }
866                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
867                                                 tunnel_valid == 0)
868                                         list[t].type = ICE_UDP_OF;
869                                 else
870                                         list[t].type = ICE_UDP_ILOS;
871                                 if (udp_mask->hdr.src_port) {
872                                         list[t].h_u.l4_hdr.src_port =
873                                                 udp_spec->hdr.src_port;
874                                         list[t].m_u.l4_hdr.src_port =
875                                                 udp_mask->hdr.src_port;
876                                         input_set_byte += 2;
877                                 }
878                                 if (udp_mask->hdr.dst_port) {
879                                         list[t].h_u.l4_hdr.dst_port =
880                                                 udp_spec->hdr.dst_port;
881                                         list[t].m_u.l4_hdr.dst_port =
882                                                 udp_mask->hdr.dst_port;
883                                         input_set_byte += 2;
884                                 }
885                                 t++;
886                         }
887                         break;
888
889                 case RTE_FLOW_ITEM_TYPE_TCP:
890                         tcp_spec = item->spec;
891                         tcp_mask = item->mask;
892                         if (tunnel_valid)
893                                 inner_tcp_valid = 1;
894                         else
895                                 tcp_valid = 1;
896                         if (tcp_spec && tcp_mask) {
897                                 /* Check TCP mask and update input set */
898                                 if (tcp_mask->hdr.sent_seq ||
899                                         tcp_mask->hdr.recv_ack ||
900                                         tcp_mask->hdr.data_off ||
901                                         tcp_mask->hdr.tcp_flags ||
902                                         tcp_mask->hdr.rx_win ||
903                                         tcp_mask->hdr.cksum ||
904                                         tcp_mask->hdr.tcp_urp) {
905                                         rte_flow_error_set(error, EINVAL,
906                                            RTE_FLOW_ERROR_TYPE_ITEM,
907                                            item,
908                                            "Invalid TCP mask");
909                                         return 0;
910                                 }
911
912                                 if (tunnel_valid) {
913                                         if (tcp_mask->hdr.src_port)
914                                                 input_set |=
915                                                 ICE_INSET_TUN_TCP_SRC_PORT;
916                                         if (tcp_mask->hdr.dst_port)
917                                                 input_set |=
918                                                 ICE_INSET_TUN_TCP_DST_PORT;
919                                 } else {
920                                         if (tcp_mask->hdr.src_port)
921                                                 input_set |=
922                                                 ICE_INSET_TCP_SRC_PORT;
923                                         if (tcp_mask->hdr.dst_port)
924                                                 input_set |=
925                                                 ICE_INSET_TCP_DST_PORT;
926                                 }
927                                 list[t].type = ICE_TCP_IL;
928                                 if (tcp_mask->hdr.src_port) {
929                                         list[t].h_u.l4_hdr.src_port =
930                                                 tcp_spec->hdr.src_port;
931                                         list[t].m_u.l4_hdr.src_port =
932                                                 tcp_mask->hdr.src_port;
933                                         input_set_byte += 2;
934                                 }
935                                 if (tcp_mask->hdr.dst_port) {
936                                         list[t].h_u.l4_hdr.dst_port =
937                                                 tcp_spec->hdr.dst_port;
938                                         list[t].m_u.l4_hdr.dst_port =
939                                                 tcp_mask->hdr.dst_port;
940                                         input_set_byte += 2;
941                                 }
942                                 t++;
943                         }
944                         break;
945
946                 case RTE_FLOW_ITEM_TYPE_SCTP:
947                         sctp_spec = item->spec;
948                         sctp_mask = item->mask;
949                         if (sctp_spec && sctp_mask) {
950                                 /* Check SCTP mask and update input set */
951                                 if (sctp_mask->hdr.cksum) {
952                                         rte_flow_error_set(error, EINVAL,
953                                            RTE_FLOW_ERROR_TYPE_ITEM,
954                                            item,
955                                            "Invalid SCTP mask");
956                                         return 0;
957                                 }
958
959                                 if (tunnel_valid) {
960                                         if (sctp_mask->hdr.src_port)
961                                                 input_set |=
962                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
963                                         if (sctp_mask->hdr.dst_port)
964                                                 input_set |=
965                                                 ICE_INSET_TUN_SCTP_DST_PORT;
966                                 } else {
967                                         if (sctp_mask->hdr.src_port)
968                                                 input_set |=
969                                                 ICE_INSET_SCTP_SRC_PORT;
970                                         if (sctp_mask->hdr.dst_port)
971                                                 input_set |=
972                                                 ICE_INSET_SCTP_DST_PORT;
973                                 }
974                                 list[t].type = ICE_SCTP_IL;
975                                 if (sctp_mask->hdr.src_port) {
976                                         list[t].h_u.sctp_hdr.src_port =
977                                                 sctp_spec->hdr.src_port;
978                                         list[t].m_u.sctp_hdr.src_port =
979                                                 sctp_mask->hdr.src_port;
980                                         input_set_byte += 2;
981                                 }
982                                 if (sctp_mask->hdr.dst_port) {
983                                         list[t].h_u.sctp_hdr.dst_port =
984                                                 sctp_spec->hdr.dst_port;
985                                         list[t].m_u.sctp_hdr.dst_port =
986                                                 sctp_mask->hdr.dst_port;
987                                         input_set_byte += 2;
988                                 }
989                                 t++;
990                         }
991                         break;
992
993                 case RTE_FLOW_ITEM_TYPE_VXLAN:
994                         vxlan_spec = item->spec;
995                         vxlan_mask = item->mask;
996                         /* Check if VXLAN item is used to describe protocol.
997                          * If yes, both spec and mask should be NULL.
998                          * If no, both spec and mask shouldn't be NULL.
999                          */
1000                         if ((!vxlan_spec && vxlan_mask) ||
1001                             (vxlan_spec && !vxlan_mask)) {
1002                                 rte_flow_error_set(error, EINVAL,
1003                                            RTE_FLOW_ERROR_TYPE_ITEM,
1004                                            item,
1005                                            "Invalid VXLAN item");
1006                                 return 0;
1007                         }
1008                         vxlan_valid = 1;
1009                         tunnel_valid = 1;
1010                         if (vxlan_spec && vxlan_mask) {
1011                                 list[t].type = ICE_VXLAN;
1012                                 if (vxlan_mask->vni[0] ||
1013                                         vxlan_mask->vni[1] ||
1014                                         vxlan_mask->vni[2]) {
1015                                         list[t].h_u.tnl_hdr.vni =
1016                                                 (vxlan_spec->vni[2] << 16) |
1017                                                 (vxlan_spec->vni[1] << 8) |
1018                                                 vxlan_spec->vni[0];
1019                                         list[t].m_u.tnl_hdr.vni =
1020                                                 (vxlan_mask->vni[2] << 16) |
1021                                                 (vxlan_mask->vni[1] << 8) |
1022                                                 vxlan_mask->vni[0];
1023                                         input_set |=
1024                                                 ICE_INSET_TUN_VXLAN_VNI;
1025                                         input_set_byte += 2;
1026                                 }
1027                                 t++;
1028                         }
1029                         break;
1030
1031                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1032                         nvgre_spec = item->spec;
1033                         nvgre_mask = item->mask;
1034                         /* Check if NVGRE item is used to describe protocol.
1035                          * If yes, both spec and mask should be NULL.
1036                          * If no, both spec and mask shouldn't be NULL.
1037                          */
1038                         if ((!nvgre_spec && nvgre_mask) ||
1039                             (nvgre_spec && !nvgre_mask)) {
1040                                 rte_flow_error_set(error, EINVAL,
1041                                            RTE_FLOW_ERROR_TYPE_ITEM,
1042                                            item,
1043                                            "Invalid NVGRE item");
1044                                 return 0;
1045                         }
1046                         nvgre_valid = 1;
1047                         tunnel_valid = 1;
1048                         if (nvgre_spec && nvgre_mask) {
1049                                 list[t].type = ICE_NVGRE;
1050                                 if (nvgre_mask->tni[0] ||
1051                                         nvgre_mask->tni[1] ||
1052                                         nvgre_mask->tni[2]) {
1053                                         list[t].h_u.nvgre_hdr.tni_flow =
1054                                                 (nvgre_spec->tni[2] << 16) |
1055                                                 (nvgre_spec->tni[1] << 8) |
1056                                                 nvgre_spec->tni[0];
1057                                         list[t].m_u.nvgre_hdr.tni_flow =
1058                                                 (nvgre_mask->tni[2] << 16) |
1059                                                 (nvgre_mask->tni[1] << 8) |
1060                                                 nvgre_mask->tni[0];
1061                                         input_set |=
1062                                                 ICE_INSET_TUN_NVGRE_TNI;
1063                                         input_set_byte += 2;
1064                                 }
1065                                 t++;
1066                         }
1067                         break;
1068
1069                 case RTE_FLOW_ITEM_TYPE_VLAN:
1070                         vlan_spec = item->spec;
1071                         vlan_mask = item->mask;
1072                         /* Check if VLAN item is used to describe protocol.
1073                          * If yes, both spec and mask should be NULL.
1074                          * If no, both spec and mask shouldn't be NULL.
1075                          */
1076                         if ((!vlan_spec && vlan_mask) ||
1077                             (vlan_spec && !vlan_mask)) {
1078                                 rte_flow_error_set(error, EINVAL,
1079                                            RTE_FLOW_ERROR_TYPE_ITEM,
1080                                            item,
1081                                            "Invalid VLAN item");
1082                                 return 0;
1083                         }
1084
1085                         if (qinq_valid) {
1086                                 if (!outer_vlan_valid)
1087                                         outer_vlan_valid = 1;
1088                                 else
1089                                         inner_vlan_valid = 1;
1090                         }
1091
1092                         if (vlan_spec && vlan_mask) {
1093                                 if (qinq_valid) {
1094                                         if (!inner_vlan_valid) {
1095                                                 list[t].type = ICE_VLAN_EX;
1096                                                 input_set |=
1097                                                         ICE_INSET_VLAN_OUTER;
1098                                         } else {
1099                                                 list[t].type = ICE_VLAN_IN;
1100                                                 input_set |=
1101                                                         ICE_INSET_VLAN_INNER;
1102                                         }
1103                                 } else {
1104                                         list[t].type = ICE_VLAN_OFOS;
1105                                         input_set |= ICE_INSET_VLAN_INNER;
1106                                 }
1107
1108                                 if (vlan_mask->tci) {
1109                                         list[t].h_u.vlan_hdr.vlan =
1110                                                 vlan_spec->tci;
1111                                         list[t].m_u.vlan_hdr.vlan =
1112                                                 vlan_mask->tci;
1113                                         input_set_byte += 2;
1114                                 }
1115                                 if (vlan_mask->inner_type) {
1116                                         rte_flow_error_set(error, EINVAL,
1117                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1118                                                 item,
1119                                                 "Invalid VLAN input set.");
1120                                         return 0;
1121                                 }
1122                                 t++;
1123                         }
1124                         break;
1125
1126                 case RTE_FLOW_ITEM_TYPE_PPPOED:
1127                 case RTE_FLOW_ITEM_TYPE_PPPOES:
1128                         pppoe_spec = item->spec;
1129                         pppoe_mask = item->mask;
1130                         /* Check if PPPoE item is used to describe protocol.
1131                          * If yes, both spec and mask should be NULL.
1132                          * If no, both spec and mask shouldn't be NULL.
1133                          */
1134                         if ((!pppoe_spec && pppoe_mask) ||
1135                                 (pppoe_spec && !pppoe_mask)) {
1136                                 rte_flow_error_set(error, EINVAL,
1137                                         RTE_FLOW_ERROR_TYPE_ITEM,
1138                                         item,
1139                                         "Invalid pppoe item");
1140                                 return 0;
1141                         }
1142                         pppoe_patt_valid = 1;
1143                         if (pppoe_spec && pppoe_mask) {
1144                                 /* Check pppoe mask and update input set */
1145                                 if (pppoe_mask->length ||
1146                                         pppoe_mask->code ||
1147                                         pppoe_mask->version_type) {
1148                                         rte_flow_error_set(error, EINVAL,
1149                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1150                                                 item,
1151                                                 "Invalid pppoe mask");
1152                                         return 0;
1153                                 }
1154                                 list[t].type = ICE_PPPOE;
1155                                 if (pppoe_mask->session_id) {
1156                                         list[t].h_u.pppoe_hdr.session_id =
1157                                                 pppoe_spec->session_id;
1158                                         list[t].m_u.pppoe_hdr.session_id =
1159                                                 pppoe_mask->session_id;
1160                                         input_set |= ICE_INSET_PPPOE_SESSION;
1161                                         input_set_byte += 2;
1162                                 }
1163                                 t++;
1164                                 pppoe_elem_valid = 1;
1165                         }
1166                         break;
1167
1168                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1169                         pppoe_proto_spec = item->spec;
1170                         pppoe_proto_mask = item->mask;
1171                         /* Check if PPPoE optional proto_id item
1172                          * is used to describe protocol.
1173                          * If yes, both spec and mask should be NULL.
1174                          * If no, both spec and mask shouldn't be NULL.
1175                          */
1176                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1177                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
1178                                 rte_flow_error_set(error, EINVAL,
1179                                         RTE_FLOW_ERROR_TYPE_ITEM,
1180                                         item,
1181                                         "Invalid pppoe proto item");
1182                                 return 0;
1183                         }
1184                         if (pppoe_proto_spec && pppoe_proto_mask) {
1185                                 if (pppoe_elem_valid)
1186                                         t--;
1187                                 list[t].type = ICE_PPPOE;
1188                                 if (pppoe_proto_mask->proto_id) {
1189                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
1190                                                 pppoe_proto_spec->proto_id;
1191                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
1192                                                 pppoe_proto_mask->proto_id;
1193                                         input_set |= ICE_INSET_PPPOE_PROTO;
1194                                         input_set_byte += 2;
1195                                         pppoe_prot_valid = 1;
1196                                 }
1197                                 if ((pppoe_proto_mask->proto_id &
1198                                         pppoe_proto_spec->proto_id) !=
1199                                             CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1200                                         (pppoe_proto_mask->proto_id &
1201                                         pppoe_proto_spec->proto_id) !=
1202                                             CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1203                                         *tun_type = ICE_SW_TUN_PPPOE_PAY;
1204                                 else
1205                                         *tun_type = ICE_SW_TUN_PPPOE;
1206                                 t++;
1207                         }
1208
1209                         break;
1210
1211                 case RTE_FLOW_ITEM_TYPE_ESP:
1212                         esp_spec = item->spec;
1213                         esp_mask = item->mask;
1214                         if ((esp_spec && !esp_mask) ||
1215                                 (!esp_spec && esp_mask)) {
1216                                 rte_flow_error_set(error, EINVAL,
1217                                            RTE_FLOW_ERROR_TYPE_ITEM,
1218                                            item,
1219                                            "Invalid esp item");
1220                                 return 0;
1221                         }
1222                         /* Check esp mask and update input set */
1223                         if (esp_mask && esp_mask->hdr.seq) {
1224                                 rte_flow_error_set(error, EINVAL,
1225                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1226                                                 item,
1227                                                 "Invalid esp mask");
1228                                 return 0;
1229                         }
1230
1231                         if (!esp_spec && !esp_mask && !input_set) {
1232                                 profile_rule = 1;
1233                                 if (ipv6_valid && udp_valid)
1234                                         *tun_type =
1235                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1236                                 else if (ipv6_valid)
1237                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1238                                 else if (ipv4_valid)
1239                                         return 0;
1240                         } else if (esp_spec && esp_mask &&
1241                                                 esp_mask->hdr.spi){
1242                                 if (udp_valid)
1243                                         list[t].type = ICE_NAT_T;
1244                                 else
1245                                         list[t].type = ICE_ESP;
1246                                 list[t].h_u.esp_hdr.spi =
1247                                         esp_spec->hdr.spi;
1248                                 list[t].m_u.esp_hdr.spi =
1249                                         esp_mask->hdr.spi;
1250                                 input_set |= ICE_INSET_ESP_SPI;
1251                                 input_set_byte += 4;
1252                                 t++;
1253                         }
1254
1255                         if (!profile_rule) {
1256                                 if (ipv6_valid && udp_valid)
1257                                         *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1258                                 else if (ipv4_valid && udp_valid)
1259                                         *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1260                                 else if (ipv6_valid)
1261                                         *tun_type = ICE_SW_TUN_IPV6_ESP;
1262                                 else if (ipv4_valid)
1263                                         *tun_type = ICE_SW_TUN_IPV4_ESP;
1264                         }
1265                         break;
1266
1267                 case RTE_FLOW_ITEM_TYPE_AH:
1268                         ah_spec = item->spec;
1269                         ah_mask = item->mask;
1270                         if ((ah_spec && !ah_mask) ||
1271                                 (!ah_spec && ah_mask)) {
1272                                 rte_flow_error_set(error, EINVAL,
1273                                            RTE_FLOW_ERROR_TYPE_ITEM,
1274                                            item,
1275                                            "Invalid ah item");
1276                                 return 0;
1277                         }
1278                         /* Check ah mask and update input set */
1279                         if (ah_mask &&
1280                                 (ah_mask->next_hdr ||
1281                                 ah_mask->payload_len ||
1282                                 ah_mask->seq_num ||
1283                                 ah_mask->reserved)) {
1284                                 rte_flow_error_set(error, EINVAL,
1285                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1286                                                 item,
1287                                                 "Invalid ah mask");
1288                                 return 0;
1289                         }
1290
1291                         if (!ah_spec && !ah_mask && !input_set) {
1292                                 profile_rule = 1;
1293                                 if (ipv6_valid && udp_valid)
1294                                         *tun_type =
1295                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1296                                 else if (ipv6_valid)
1297                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1298                                 else if (ipv4_valid)
1299                                         return 0;
1300                         } else if (ah_spec && ah_mask &&
1301                                                 ah_mask->spi){
1302                                 list[t].type = ICE_AH;
1303                                 list[t].h_u.ah_hdr.spi =
1304                                         ah_spec->spi;
1305                                 list[t].m_u.ah_hdr.spi =
1306                                         ah_mask->spi;
1307                                 input_set |= ICE_INSET_AH_SPI;
1308                                 input_set_byte += 4;
1309                                 t++;
1310                         }
1311
1312                         if (!profile_rule) {
1313                                 if (udp_valid)
1314                                         return 0;
1315                                 else if (ipv6_valid)
1316                                         *tun_type = ICE_SW_TUN_IPV6_AH;
1317                                 else if (ipv4_valid)
1318                                         *tun_type = ICE_SW_TUN_IPV4_AH;
1319                         }
1320                         break;
1321
1322                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1323                         l2tp_spec = item->spec;
1324                         l2tp_mask = item->mask;
1325                         if ((l2tp_spec && !l2tp_mask) ||
1326                                 (!l2tp_spec && l2tp_mask)) {
1327                                 rte_flow_error_set(error, EINVAL,
1328                                            RTE_FLOW_ERROR_TYPE_ITEM,
1329                                            item,
1330                                            "Invalid l2tp item");
1331                                 return 0;
1332                         }
1333
1334                         if (!l2tp_spec && !l2tp_mask && !input_set) {
1335                                 if (ipv6_valid)
1336                                         *tun_type =
1337                                         ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1338                                 else if (ipv4_valid)
1339                                         return 0;
1340                         } else if (l2tp_spec && l2tp_mask &&
1341                                                 l2tp_mask->session_id){
1342                                 list[t].type = ICE_L2TPV3;
1343                                 list[t].h_u.l2tpv3_sess_hdr.session_id =
1344                                         l2tp_spec->session_id;
1345                                 list[t].m_u.l2tpv3_sess_hdr.session_id =
1346                                         l2tp_mask->session_id;
1347                                 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1348                                 input_set_byte += 4;
1349                                 t++;
1350                         }
1351
1352                         if (!profile_rule) {
1353                                 if (ipv6_valid)
1354                                         *tun_type =
1355                                         ICE_SW_TUN_IPV6_L2TPV3;
1356                                 else if (ipv4_valid)
1357                                         *tun_type =
1358                                         ICE_SW_TUN_IPV4_L2TPV3;
1359                         }
1360                         break;
1361
1362                 case RTE_FLOW_ITEM_TYPE_PFCP:
1363                         pfcp_spec = item->spec;
1364                         pfcp_mask = item->mask;
1365                         /* Check if PFCP item is used to describe protocol.
1366                          * If yes, both spec and mask should be NULL.
1367                          * If no, both spec and mask shouldn't be NULL.
1368                          */
1369                         if ((!pfcp_spec && pfcp_mask) ||
1370                             (pfcp_spec && !pfcp_mask)) {
1371                                 rte_flow_error_set(error, EINVAL,
1372                                            RTE_FLOW_ERROR_TYPE_ITEM,
1373                                            item,
1374                                            "Invalid PFCP item");
1375                                 return -ENOTSUP;
1376                         }
1377                         if (pfcp_spec && pfcp_mask) {
1378                                 /* Check pfcp mask and update input set */
1379                                 if (pfcp_mask->msg_type ||
1380                                         pfcp_mask->msg_len ||
1381                                         pfcp_mask->seid) {
1382                                         rte_flow_error_set(error, EINVAL,
1383                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1384                                                 item,
1385                                                 "Invalid pfcp mask");
1386                                         return -ENOTSUP;
1387                                 }
1388                                 if (pfcp_mask->s_field &&
1389                                         pfcp_spec->s_field == 0x01 &&
1390                                         ipv6_valid)
1391                                         *tun_type =
1392                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1393                                 else if (pfcp_mask->s_field &&
1394                                         pfcp_spec->s_field == 0x01)
1395                                         *tun_type =
1396                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1397                                 else if (pfcp_mask->s_field &&
1398                                         !pfcp_spec->s_field &&
1399                                         ipv6_valid)
1400                                         *tun_type =
1401                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1402                                 else if (pfcp_mask->s_field &&
1403                                         !pfcp_spec->s_field)
1404                                         *tun_type =
1405                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1406                                 else
1407                                         return -ENOTSUP;
1408                         }
1409                         break;
1410
1411                 case RTE_FLOW_ITEM_TYPE_GTPU:
1412                         gtp_spec = item->spec;
1413                         gtp_mask = item->mask;
1414                         if (gtp_spec && !gtp_mask) {
1415                                 rte_flow_error_set(error, EINVAL,
1416                                         RTE_FLOW_ERROR_TYPE_ITEM,
1417                                         item,
1418                                         "Invalid GTP item");
1419                                 return 0;
1420                         }
1421                         if (gtp_spec && gtp_mask) {
1422                                 if (gtp_mask->v_pt_rsv_flags ||
1423                                     gtp_mask->msg_type ||
1424                                     gtp_mask->msg_len) {
1425                                         rte_flow_error_set(error, EINVAL,
1426                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1427                                                 item,
1428                                                 "Invalid GTP mask");
1429                                         return 0;
1430                                 }
1431                                 if (gtp_mask->teid)
1432                                         input_set |= ICE_INSET_GTPU_TEID;
1433                                 list[t].type = ICE_GTP;
1434                                 list[t].h_u.gtp_hdr.teid =
1435                                         gtp_spec->teid;
1436                                 list[t].m_u.gtp_hdr.teid =
1437                                         gtp_mask->teid;
1438                                 input_set_byte += 4;
1439                                 t++;
1440                         }
1441                         tunnel_valid = 1;
1442                         gtpu_valid = 1;
1443                         break;
1444
1445                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1446                         gtp_psc_spec = item->spec;
1447                         gtp_psc_mask = item->mask;
1448                         if (gtp_psc_spec && !gtp_psc_mask) {
1449                                 rte_flow_error_set(error, EINVAL,
1450                                         RTE_FLOW_ERROR_TYPE_ITEM,
1451                                         item,
1452                                         "Invalid GTPU_EH item");
1453                                 return 0;
1454                         }
1455                         if (gtp_psc_spec && gtp_psc_mask) {
1456                                 if (gtp_psc_mask->pdu_type) {
1457                                         rte_flow_error_set(error, EINVAL,
1458                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1459                                                 item,
1460                                                 "Invalid GTPU_EH mask");
1461                                         return 0;
1462                                 }
1463                                 if (gtp_psc_mask->qfi)
1464                                         input_set |= ICE_INSET_GTPU_QFI;
1465                                 list[t].type = ICE_GTP;
1466                                 list[t].h_u.gtp_hdr.qfi =
1467                                         gtp_psc_spec->qfi;
1468                                 list[t].m_u.gtp_hdr.qfi =
1469                                         gtp_psc_mask->qfi;
1470                                 input_set_byte += 1;
1471                                 t++;
1472                         }
1473                         gtpu_psc_valid = 1;
1474                         break;
1475
1476                 case RTE_FLOW_ITEM_TYPE_VOID:
1477                         break;
1478
1479                 default:
1480                         rte_flow_error_set(error, EINVAL,
1481                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1482                                    "Invalid pattern item.");
1483                         goto out;
1484                 }
1485         }
1486
1487         if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1488             inner_vlan_valid && outer_vlan_valid)
1489                 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1490         else if (*tun_type == ICE_SW_TUN_PPPOE &&
1491                  inner_vlan_valid && outer_vlan_valid)
1492                 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1493         else if (*tun_type == ICE_NON_TUN &&
1494                  inner_vlan_valid && outer_vlan_valid)
1495                 *tun_type = ICE_NON_TUN_QINQ;
1496         else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1497                  inner_vlan_valid && outer_vlan_valid)
1498                 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1499
1500         if (pppoe_patt_valid && !pppoe_prot_valid) {
1501                 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1502                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1503                 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1504                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1505                 else if (inner_vlan_valid && outer_vlan_valid)
1506                         *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1507                 else if (ipv6_valid && udp_valid)
1508                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1509                 else if (ipv6_valid && tcp_valid)
1510                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1511                 else if (ipv4_valid && udp_valid)
1512                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1513                 else if (ipv4_valid && tcp_valid)
1514                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1515                 else if (ipv6_valid)
1516                         *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1517                 else if (ipv4_valid)
1518                         *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1519                 else
1520                         *tun_type = ICE_SW_TUN_PPPOE;
1521         }
1522
1523         if (gtpu_valid && gtpu_psc_valid) {
1524                 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1525                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP;
1526                 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1527                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP;
1528                 else if (ipv4_valid && inner_ipv4_valid)
1529                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4;
1530                 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1531                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP;
1532                 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1533                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP;
1534                 else if (ipv4_valid && inner_ipv6_valid)
1535                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6;
1536                 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1537                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP;
1538                 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1539                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP;
1540                 else if (ipv6_valid && inner_ipv4_valid)
1541                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4;
1542                 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1543                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP;
1544                 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1545                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP;
1546                 else if (ipv6_valid && inner_ipv6_valid)
1547                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6;
1548                 else if (ipv4_valid)
1549                         *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1550                 else if (ipv6_valid)
1551                         *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1552         } else if (gtpu_valid) {
1553                 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1554                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_UDP;
1555                 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1556                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_TCP;
1557                 else if (ipv4_valid && inner_ipv4_valid)
1558                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1559                 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1560                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_UDP;
1561                 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1562                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_TCP;
1563                 else if (ipv4_valid && inner_ipv6_valid)
1564                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1565                 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1566                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_UDP;
1567                 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1568                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_TCP;
1569                 else if (ipv6_valid && inner_ipv4_valid)
1570                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1571                 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1572                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_UDP;
1573                 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1574                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_TCP;
1575                 else if (ipv6_valid && inner_ipv6_valid)
1576                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1577                 else if (ipv4_valid)
1578                         *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1579                 else if (ipv6_valid)
1580                         *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1581         }
1582
1583         if (*tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY ||
1584             *tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
1585                 for (k = 0; k < t; k++) {
1586                         if (list[k].type == ICE_GTP)
1587                                 list[k].type = ICE_GTP_NO_PAY;
1588                 }
1589         }
1590
1591         if (*tun_type == ICE_NON_TUN) {
1592                 if (vxlan_valid)
1593                         *tun_type = ICE_SW_TUN_VXLAN;
1594                 else if (nvgre_valid)
1595                         *tun_type = ICE_SW_TUN_NVGRE;
1596                 else if (ipv4_valid && tcp_valid)
1597                         *tun_type = ICE_SW_IPV4_TCP;
1598                 else if (ipv4_valid && udp_valid)
1599                         *tun_type = ICE_SW_IPV4_UDP;
1600                 else if (ipv6_valid && tcp_valid)
1601                         *tun_type = ICE_SW_IPV6_TCP;
1602                 else if (ipv6_valid && udp_valid)
1603                         *tun_type = ICE_SW_IPV6_UDP;
1604         }
1605
1606         if (input_set_byte > MAX_INPUT_SET_BYTE) {
1607                 rte_flow_error_set(error, EINVAL,
1608                         RTE_FLOW_ERROR_TYPE_ITEM,
1609                         item,
1610                         "too much input set");
1611                 return -ENOTSUP;
1612         }
1613
1614         *lkups_num = t;
1615
1616         return input_set;
1617 out:
1618         return 0;
1619 }
1620
1621 static int
1622 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1623                             const struct rte_flow_action *actions,
1624                             uint32_t priority,
1625                             struct rte_flow_error *error,
1626                             struct ice_adv_rule_info *rule_info)
1627 {
1628         const struct rte_flow_action_vf *act_vf;
1629         const struct rte_flow_action *action;
1630         enum rte_flow_action_type action_type;
1631
1632         for (action = actions; action->type !=
1633                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1634                 action_type = action->type;
1635                 switch (action_type) {
1636                 case RTE_FLOW_ACTION_TYPE_VF:
1637                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1638                         act_vf = action->conf;
1639
1640                         if (act_vf->id >= ad->real_hw.num_vfs &&
1641                                 !act_vf->original) {
1642                                 rte_flow_error_set(error,
1643                                         EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1644                                         actions,
1645                                         "Invalid vf id");
1646                                 return -rte_errno;
1647                         }
1648
1649                         if (act_vf->original)
1650                                 rule_info->sw_act.vsi_handle =
1651                                         ad->real_hw.avf.bus.func;
1652                         else
1653                                 rule_info->sw_act.vsi_handle = act_vf->id;
1654                         break;
1655
1656                 case RTE_FLOW_ACTION_TYPE_DROP:
1657                         rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1658                         break;
1659
1660                 default:
1661                         rte_flow_error_set(error,
1662                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1663                                            actions,
1664                                            "Invalid action type");
1665                         return -rte_errno;
1666                 }
1667         }
1668
1669         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1670         rule_info->sw_act.flag = ICE_FLTR_RX;
1671         rule_info->rx = 1;
1672         rule_info->priority = priority + 5;
1673
1674         return 0;
1675 }
1676
1677 static int
1678 ice_switch_parse_action(struct ice_pf *pf,
1679                 const struct rte_flow_action *actions,
1680                 uint32_t priority,
1681                 struct rte_flow_error *error,
1682                 struct ice_adv_rule_info *rule_info)
1683 {
1684         struct ice_vsi *vsi = pf->main_vsi;
1685         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1686         const struct rte_flow_action_queue *act_q;
1687         const struct rte_flow_action_rss *act_qgrop;
1688         uint16_t base_queue, i;
1689         const struct rte_flow_action *action;
1690         enum rte_flow_action_type action_type;
1691         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1692                  2, 4, 8, 16, 32, 64, 128};
1693
1694         base_queue = pf->base_queue + vsi->base_queue;
1695         for (action = actions; action->type !=
1696                         RTE_FLOW_ACTION_TYPE_END; action++) {
1697                 action_type = action->type;
1698                 switch (action_type) {
1699                 case RTE_FLOW_ACTION_TYPE_RSS:
1700                         act_qgrop = action->conf;
1701                         if (act_qgrop->queue_num <= 1)
1702                                 goto error;
1703                         rule_info->sw_act.fltr_act =
1704                                 ICE_FWD_TO_QGRP;
1705                         rule_info->sw_act.fwd_id.q_id =
1706                                 base_queue + act_qgrop->queue[0];
1707                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1708                                 if (act_qgrop->queue_num ==
1709                                         valid_qgrop_number[i])
1710                                         break;
1711                         }
1712                         if (i == MAX_QGRP_NUM_TYPE)
1713                                 goto error;
1714                         if ((act_qgrop->queue[0] +
1715                                 act_qgrop->queue_num) >
1716                                 dev->data->nb_rx_queues)
1717                                 goto error1;
1718                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1719                                 if (act_qgrop->queue[i + 1] !=
1720                                         act_qgrop->queue[i] + 1)
1721                                         goto error2;
1722                         rule_info->sw_act.qgrp_size =
1723                                 act_qgrop->queue_num;
1724                         break;
1725                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1726                         act_q = action->conf;
1727                         if (act_q->index >= dev->data->nb_rx_queues)
1728                                 goto error;
1729                         rule_info->sw_act.fltr_act =
1730                                 ICE_FWD_TO_Q;
1731                         rule_info->sw_act.fwd_id.q_id =
1732                                 base_queue + act_q->index;
1733                         break;
1734
1735                 case RTE_FLOW_ACTION_TYPE_DROP:
1736                         rule_info->sw_act.fltr_act =
1737                                 ICE_DROP_PACKET;
1738                         break;
1739
1740                 case RTE_FLOW_ACTION_TYPE_VOID:
1741                         break;
1742
1743                 default:
1744                         goto error;
1745                 }
1746         }
1747
1748         rule_info->sw_act.vsi_handle = vsi->idx;
1749         rule_info->rx = 1;
1750         rule_info->sw_act.src = vsi->idx;
1751         rule_info->priority = priority + 5;
1752
1753         return 0;
1754
1755 error:
1756         rte_flow_error_set(error,
1757                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1758                 actions,
1759                 "Invalid action type or queue number");
1760         return -rte_errno;
1761
1762 error1:
1763         rte_flow_error_set(error,
1764                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1765                 actions,
1766                 "Invalid queue region indexes");
1767         return -rte_errno;
1768
1769 error2:
1770         rte_flow_error_set(error,
1771                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1772                 actions,
1773                 "Discontinuous queue region");
1774         return -rte_errno;
1775 }
1776
1777 static int
1778 ice_switch_check_action(const struct rte_flow_action *actions,
1779                             struct rte_flow_error *error)
1780 {
1781         const struct rte_flow_action *action;
1782         enum rte_flow_action_type action_type;
1783         uint16_t actions_num = 0;
1784
1785         for (action = actions; action->type !=
1786                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1787                 action_type = action->type;
1788                 switch (action_type) {
1789                 case RTE_FLOW_ACTION_TYPE_VF:
1790                 case RTE_FLOW_ACTION_TYPE_RSS:
1791                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1792                 case RTE_FLOW_ACTION_TYPE_DROP:
1793                         actions_num++;
1794                         break;
1795                 case RTE_FLOW_ACTION_TYPE_VOID:
1796                         continue;
1797                 default:
1798                         rte_flow_error_set(error,
1799                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1800                                            actions,
1801                                            "Invalid action type");
1802                         return -rte_errno;
1803                 }
1804         }
1805
1806         if (actions_num != 1) {
1807                 rte_flow_error_set(error,
1808                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1809                                    actions,
1810                                    "Invalid action number");
1811                 return -rte_errno;
1812         }
1813
1814         return 0;
1815 }
1816
1817 static int
1818 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1819                 struct ice_pattern_match_item *array,
1820                 uint32_t array_len,
1821                 const struct rte_flow_item pattern[],
1822                 const struct rte_flow_action actions[],
1823                 uint32_t priority,
1824                 void **meta,
1825                 struct rte_flow_error *error)
1826 {
1827         struct ice_pf *pf = &ad->pf;
1828         uint64_t inputset = 0;
1829         int ret = 0;
1830         struct sw_meta *sw_meta_ptr = NULL;
1831         struct ice_adv_rule_info rule_info;
1832         struct ice_adv_lkup_elem *list = NULL;
1833         uint16_t lkups_num = 0;
1834         const struct rte_flow_item *item = pattern;
1835         uint16_t item_num = 0;
1836         uint16_t vlan_num = 0;
1837         enum ice_sw_tunnel_type tun_type =
1838                         ICE_NON_TUN;
1839         struct ice_pattern_match_item *pattern_match_item = NULL;
1840
1841         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1842                 item_num++;
1843                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1844                         const struct rte_flow_item_eth *eth_mask;
1845                         if (item->mask)
1846                                 eth_mask = item->mask;
1847                         else
1848                                 continue;
1849                         if (eth_mask->type == UINT16_MAX)
1850                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1851                 }
1852
1853                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1854                         vlan_num++;
1855
1856                 /* reserve one more memory slot for ETH which may
1857                  * consume 2 lookup items.
1858                  */
1859                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1860                         item_num++;
1861         }
1862
1863         if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1864                 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1865         else if (vlan_num == 2)
1866                 tun_type = ICE_NON_TUN_QINQ;
1867
1868         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1869         if (!list) {
1870                 rte_flow_error_set(error, EINVAL,
1871                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1872                                    "No memory for PMD internal items");
1873                 return -rte_errno;
1874         }
1875
1876         sw_meta_ptr =
1877                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1878         if (!sw_meta_ptr) {
1879                 rte_flow_error_set(error, EINVAL,
1880                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1881                                    "No memory for sw_pattern_meta_ptr");
1882                 goto error;
1883         }
1884
1885         pattern_match_item =
1886                 ice_search_pattern_match_item(ad, pattern, array, array_len,
1887                                               error);
1888         if (!pattern_match_item) {
1889                 rte_flow_error_set(error, EINVAL,
1890                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1891                                    "Invalid input pattern");
1892                 goto error;
1893         }
1894
1895         inputset = ice_switch_inset_get
1896                 (pattern, error, list, &lkups_num, &tun_type);
1897         if ((!inputset && !ice_is_prof_rule(tun_type)) ||
1898                 (inputset & ~pattern_match_item->input_set_mask_o)) {
1899                 rte_flow_error_set(error, EINVAL,
1900                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1901                                    pattern,
1902                                    "Invalid input set");
1903                 goto error;
1904         }
1905
1906         memset(&rule_info, 0, sizeof(rule_info));
1907         rule_info.tun_type = tun_type;
1908
1909         ret = ice_switch_check_action(actions, error);
1910         if (ret)
1911                 goto error;
1912
1913         if (ad->hw.dcf_enabled)
1914                 ret = ice_switch_parse_dcf_action((void *)ad, actions, priority,
1915                                                   error, &rule_info);
1916         else
1917                 ret = ice_switch_parse_action(pf, actions, priority, error,
1918                                               &rule_info);
1919
1920         if (ret)
1921                 goto error;
1922
1923         if (meta) {
1924                 *meta = sw_meta_ptr;
1925                 ((struct sw_meta *)*meta)->list = list;
1926                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1927                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1928         } else {
1929                 rte_free(list);
1930                 rte_free(sw_meta_ptr);
1931         }
1932
1933         rte_free(pattern_match_item);
1934
1935         return 0;
1936
1937 error:
1938         rte_free(list);
1939         rte_free(sw_meta_ptr);
1940         rte_free(pattern_match_item);
1941
1942         return -rte_errno;
1943 }
1944
1945 static int
1946 ice_switch_query(struct ice_adapter *ad __rte_unused,
1947                 struct rte_flow *flow __rte_unused,
1948                 struct rte_flow_query_count *count __rte_unused,
1949                 struct rte_flow_error *error)
1950 {
1951         rte_flow_error_set(error, EINVAL,
1952                 RTE_FLOW_ERROR_TYPE_HANDLE,
1953                 NULL,
1954                 "count action not supported by switch filter");
1955
1956         return -rte_errno;
1957 }
1958
1959 static int
1960 ice_switch_redirect(struct ice_adapter *ad,
1961                     struct rte_flow *flow,
1962                     struct ice_flow_redirect *rd)
1963 {
1964         struct ice_rule_query_data *rdata = flow->rule;
1965         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1966         struct ice_adv_lkup_elem *lkups_dp = NULL;
1967         struct LIST_HEAD_TYPE *list_head;
1968         struct ice_adv_rule_info rinfo;
1969         struct ice_hw *hw = &ad->hw;
1970         struct ice_switch_info *sw;
1971         uint16_t lkups_cnt;
1972         int ret;
1973
1974         if (rdata->vsi_handle != rd->vsi_handle)
1975                 return 0;
1976
1977         sw = hw->switch_info;
1978         if (!sw->recp_list[rdata->rid].recp_created)
1979                 return -EINVAL;
1980
1981         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1982                 return -ENOTSUP;
1983
1984         list_head = &sw->recp_list[rdata->rid].filt_rules;
1985         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1986                             list_entry) {
1987                 rinfo = list_itr->rule_info;
1988                 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1989                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1990                     rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1991                     (rinfo.fltr_rule_id == rdata->rule_id &&
1992                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1993                         lkups_cnt = list_itr->lkups_cnt;
1994                         lkups_dp = (struct ice_adv_lkup_elem *)
1995                                 ice_memdup(hw, list_itr->lkups,
1996                                            sizeof(*list_itr->lkups) *
1997                                            lkups_cnt, ICE_NONDMA_TO_NONDMA);
1998
1999                         if (!lkups_dp) {
2000                                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
2001                                 return -EINVAL;
2002                         }
2003
2004                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
2005                                 rinfo.sw_act.vsi_handle = rd->vsi_handle;
2006                                 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
2007                         }
2008                         break;
2009                 }
2010         }
2011
2012         if (!lkups_dp)
2013                 return -EINVAL;
2014
2015         /* Remove the old rule */
2016         ret = ice_rem_adv_rule(hw, list_itr->lkups,
2017                                lkups_cnt, &rinfo);
2018         if (ret) {
2019                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
2020                             rdata->rule_id);
2021                 ret = -EINVAL;
2022                 goto out;
2023         }
2024
2025         /* Update VSI context */
2026         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
2027
2028         /* Replay the rule */
2029         ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
2030                                &rinfo, rdata);
2031         if (ret) {
2032                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
2033                 ret = -EINVAL;
2034         }
2035
2036 out:
2037         ice_free(hw, lkups_dp);
2038         return ret;
2039 }
2040
2041 static int
2042 ice_switch_init(struct ice_adapter *ad)
2043 {
2044         int ret = 0;
2045         struct ice_flow_parser *dist_parser;
2046         struct ice_flow_parser *perm_parser;
2047
2048         if (ad->devargs.pipe_mode_support) {
2049                 perm_parser = &ice_switch_perm_parser;
2050                 ret = ice_register_parser(perm_parser, ad);
2051         } else {
2052                 dist_parser = &ice_switch_dist_parser;
2053                 ret = ice_register_parser(dist_parser, ad);
2054         }
2055         return ret;
2056 }
2057
2058 static void
2059 ice_switch_uninit(struct ice_adapter *ad)
2060 {
2061         struct ice_flow_parser *dist_parser;
2062         struct ice_flow_parser *perm_parser;
2063
2064         if (ad->devargs.pipe_mode_support) {
2065                 perm_parser = &ice_switch_perm_parser;
2066                 ice_unregister_parser(perm_parser, ad);
2067         } else {
2068                 dist_parser = &ice_switch_dist_parser;
2069                 ice_unregister_parser(dist_parser, ad);
2070         }
2071 }
2072
2073 static struct
2074 ice_flow_engine ice_switch_engine = {
2075         .init = ice_switch_init,
2076         .uninit = ice_switch_uninit,
2077         .create = ice_switch_create,
2078         .destroy = ice_switch_destroy,
2079         .query_count = ice_switch_query,
2080         .redirect = ice_switch_redirect,
2081         .free = ice_switch_filter_rule_free,
2082         .type = ICE_FLOW_ENGINE_SWITCH,
2083 };
2084
2085 static struct
2086 ice_flow_parser ice_switch_dist_parser = {
2087         .engine = &ice_switch_engine,
2088         .array = ice_switch_pattern_dist_list,
2089         .array_len = RTE_DIM(ice_switch_pattern_dist_list),
2090         .parse_pattern_action = ice_switch_parse_pattern_action,
2091         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2092 };
2093
2094 static struct
2095 ice_flow_parser ice_switch_perm_parser = {
2096         .engine = &ice_switch_engine,
2097         .array = ice_switch_pattern_perm_list,
2098         .array_len = RTE_DIM(ice_switch_pattern_perm_list),
2099         .parse_pattern_action = ice_switch_parse_pattern_action,
2100         .stage = ICE_FLOW_STAGE_PERMISSION,
2101 };
2102
2103 RTE_INIT(ice_sw_engine_init)
2104 {
2105         struct ice_flow_engine *engine = &ice_switch_engine;
2106         ice_register_flow_engine(engine);
2107 }