devtools: forbid indent with tabs in Meson
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27
28
29 #define MAX_QGRP_NUM_TYPE       7
30 #define MAX_INPUT_SET_BYTE      32
31 #define ICE_PPP_IPV4_PROTO      0x0021
32 #define ICE_PPP_IPV6_PROTO      0x0057
33 #define ICE_IPV4_PROTO_NVGRE    0x002F
34 #define ICE_SW_PRI_BASE 6
35
36 #define ICE_SW_INSET_ETHER ( \
37         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
38 #define ICE_SW_INSET_MAC_VLAN ( \
39         ICE_SW_INSET_ETHER | ICE_INSET_VLAN_INNER)
40 #define ICE_SW_INSET_MAC_QINQ  ( \
41         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
42         ICE_INSET_VLAN_OUTER)
43 #define ICE_SW_INSET_MAC_IPV4 ( \
44         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
46 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
47         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
48 #define ICE_SW_INSET_MAC_QINQ_IPV4_TCP ( \
49         ICE_SW_INSET_MAC_QINQ_IPV4 | \
50         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_QINQ_IPV4_UDP ( \
52         ICE_SW_INSET_MAC_QINQ_IPV4 | \
53         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
55         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
56         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
57         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
58 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
59         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
60         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
61         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
62 #define ICE_SW_INSET_MAC_IPV6 ( \
63         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
64         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
65         ICE_INSET_IPV6_NEXT_HDR)
66 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
67         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
68 #define ICE_SW_INSET_MAC_QINQ_IPV6_TCP ( \
69         ICE_SW_INSET_MAC_QINQ_IPV6 | \
70         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
71 #define ICE_SW_INSET_MAC_QINQ_IPV6_UDP ( \
72         ICE_SW_INSET_MAC_QINQ_IPV6 | \
73         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
74 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
75         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
76         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
77         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
78 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
79         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
80         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
81         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
82 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
83         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
84         ICE_INSET_NVGRE_TNI)
85 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
86         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
87         ICE_INSET_VXLAN_VNI)
88 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
89         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
90         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
91         ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
92 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
93         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
94         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
95         ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
96 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
97         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
98         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
99         ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
100 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
101         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
102         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
103         ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
104 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
105         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
106         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
107 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
108         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
109         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
110         ICE_INSET_IPV4_TOS)
111 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
112         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
113         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
114         ICE_INSET_IPV4_TOS)
115 #define ICE_SW_INSET_MAC_PPPOE  ( \
116         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
117         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
118 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
119         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
120         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
121         ICE_INSET_PPPOE_PROTO)
122 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
123         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
124 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
125         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
126 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
127         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
128 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
129         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
130 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
131         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
132 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
133         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
134 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
135         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
136 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
137         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
138 #define ICE_SW_INSET_MAC_IPV4_AH ( \
139         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
140 #define ICE_SW_INSET_MAC_IPV6_AH ( \
141         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
142 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
143         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
144 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
145         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
146 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
147         ICE_SW_INSET_MAC_IPV4 | \
148         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
149 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
150         ICE_SW_INSET_MAC_IPV6 | \
151         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
152 #define ICE_SW_INSET_MAC_IPV4_GTPU ( \
153         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID)
154 #define ICE_SW_INSET_MAC_IPV6_GTPU ( \
155         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID)
156 #define ICE_SW_INSET_MAC_GTPU_OUTER ( \
157         ICE_INSET_DMAC | ICE_INSET_GTPU_TEID)
158 #define ICE_SW_INSET_MAC_GTPU_EH_OUTER ( \
159         ICE_SW_INSET_MAC_GTPU_OUTER | ICE_INSET_GTPU_QFI)
160 #define ICE_SW_INSET_GTPU_IPV4 ( \
161         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
162 #define ICE_SW_INSET_GTPU_IPV6 ( \
163         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST)
164 #define ICE_SW_INSET_GTPU_IPV4_UDP ( \
165         ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_UDP_SRC_PORT | \
166         ICE_INSET_UDP_DST_PORT)
167 #define ICE_SW_INSET_GTPU_IPV4_TCP ( \
168         ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_TCP_SRC_PORT | \
169         ICE_INSET_TCP_DST_PORT)
170 #define ICE_SW_INSET_GTPU_IPV6_UDP ( \
171         ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_UDP_SRC_PORT | \
172         ICE_INSET_UDP_DST_PORT)
173 #define ICE_SW_INSET_GTPU_IPV6_TCP ( \
174         ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_TCP_SRC_PORT | \
175         ICE_INSET_TCP_DST_PORT)
176
177 struct sw_meta {
178         struct ice_adv_lkup_elem *list;
179         uint16_t lkups_num;
180         struct ice_adv_rule_info rule_info;
181 };
182
183 static struct ice_flow_parser ice_switch_dist_parser;
184 static struct ice_flow_parser ice_switch_perm_parser;
185
186 static struct
187 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
188         {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE,                         ICE_INSET_NONE},
189         {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
190         {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
191         {pattern_eth_arp,                               ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
192         {pattern_eth_ipv4,                              ICE_SW_INSET_MAC_IPV4,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
193         {pattern_eth_ipv4_udp,                          ICE_SW_INSET_MAC_IPV4_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
194         {pattern_eth_ipv4_tcp,                          ICE_SW_INSET_MAC_IPV4_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
195         {pattern_eth_ipv6,                              ICE_SW_INSET_MAC_IPV6,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
196         {pattern_eth_ipv6_udp,                          ICE_SW_INSET_MAC_IPV6_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
197         {pattern_eth_ipv6_tcp,                          ICE_SW_INSET_MAC_IPV6_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
198         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_VXLAN_IPV4,           ICE_INSET_NONE},
199         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_VXLAN_IPV4_UDP,       ICE_INSET_NONE},
200         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_VXLAN_IPV4_TCP,       ICE_INSET_NONE},
201         {pattern_eth_ipv4_nvgre_eth_ipv4,               ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_NVGRE_IPV4,           ICE_INSET_NONE},
202         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,           ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_NVGRE_IPV4_UDP,       ICE_INSET_NONE},
203         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,           ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_NVGRE_IPV4_TCP,       ICE_INSET_NONE},
204         {pattern_eth_pppoes,                            ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
205         {pattern_eth_vlan_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
206         {pattern_eth_pppoes_proto,                      ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
207         {pattern_eth_vlan_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
208         {pattern_eth_pppoes_ipv4,                       ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
209         {pattern_eth_pppoes_ipv4_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
210         {pattern_eth_pppoes_ipv4_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
211         {pattern_eth_pppoes_ipv6,                       ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
212         {pattern_eth_pppoes_ipv6_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
213         {pattern_eth_pppoes_ipv6_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
214         {pattern_eth_vlan_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
215         {pattern_eth_vlan_pppoes_ipv4_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
216         {pattern_eth_vlan_pppoes_ipv4_udp,              ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
217         {pattern_eth_vlan_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
218         {pattern_eth_vlan_pppoes_ipv6_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
219         {pattern_eth_vlan_pppoes_ipv6_udp,              ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
220         {pattern_eth_ipv4_esp,                          ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
221         {pattern_eth_ipv4_udp_esp,                      ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
222         {pattern_eth_ipv6_esp,                          ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
223         {pattern_eth_ipv6_udp_esp,                      ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
224         {pattern_eth_ipv4_ah,                           ICE_SW_INSET_MAC_IPV4_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
225         {pattern_eth_ipv6_ah,                           ICE_SW_INSET_MAC_IPV6_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
226         {pattern_eth_ipv6_udp_ah,                       ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
227         {pattern_eth_ipv4_l2tp,                         ICE_SW_INSET_MAC_IPV4_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
228         {pattern_eth_ipv6_l2tp,                         ICE_SW_INSET_MAC_IPV6_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
229         {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
230         {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
231         {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE,                         ICE_INSET_NONE},
232         {pattern_eth_qinq_ipv4_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV4_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
233         {pattern_eth_qinq_ipv4_udp,                     ICE_SW_INSET_MAC_QINQ_IPV4_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
234         {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE,                         ICE_INSET_NONE},
235         {pattern_eth_qinq_ipv6_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV6_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
236         {pattern_eth_qinq_ipv6_udp,                     ICE_SW_INSET_MAC_QINQ_IPV6_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
237         {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
238         {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
239         {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
240         {pattern_eth_qinq_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
241         {pattern_eth_ipv4_gtpu,                         ICE_SW_INSET_MAC_IPV4_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
242         {pattern_eth_ipv6_gtpu,                         ICE_SW_INSET_MAC_IPV6_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
243         {pattern_eth_ipv4_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
244         {pattern_eth_ipv4_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
245         {pattern_eth_ipv4_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
246         {pattern_eth_ipv4_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
247         {pattern_eth_ipv4_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
248         {pattern_eth_ipv4_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
249         {pattern_eth_ipv4_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
250         {pattern_eth_ipv4_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
251         {pattern_eth_ipv4_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
252         {pattern_eth_ipv4_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
253         {pattern_eth_ipv4_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
254         {pattern_eth_ipv4_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
255         {pattern_eth_ipv6_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
256         {pattern_eth_ipv6_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
257         {pattern_eth_ipv6_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
258         {pattern_eth_ipv6_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
259         {pattern_eth_ipv6_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
260         {pattern_eth_ipv6_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
261         {pattern_eth_ipv6_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
262         {pattern_eth_ipv6_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
263         {pattern_eth_ipv6_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
264         {pattern_eth_ipv6_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
265         {pattern_eth_ipv6_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
266         {pattern_eth_ipv6_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
267 };
268
269 static struct
270 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
271         {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE,                         ICE_INSET_NONE},
272         {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
273         {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
274         {pattern_eth_arp,                               ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
275         {pattern_eth_ipv4,                              ICE_SW_INSET_MAC_IPV4,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
276         {pattern_eth_ipv4_udp,                          ICE_SW_INSET_MAC_IPV4_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
277         {pattern_eth_ipv4_tcp,                          ICE_SW_INSET_MAC_IPV4_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
278         {pattern_eth_ipv6,                              ICE_SW_INSET_MAC_IPV6,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
279         {pattern_eth_ipv6_udp,                          ICE_SW_INSET_MAC_IPV6_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
280         {pattern_eth_ipv6_tcp,                          ICE_SW_INSET_MAC_IPV6_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
281         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4,          ICE_INSET_NONE},
282         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP,      ICE_INSET_NONE},
283         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP,      ICE_INSET_NONE},
284         {pattern_eth_ipv4_nvgre_eth_ipv4,               ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4,          ICE_INSET_NONE},
285         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,           ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP,      ICE_INSET_NONE},
286         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,           ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP,      ICE_INSET_NONE},
287         {pattern_eth_pppoes,                            ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
288         {pattern_eth_vlan_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
289         {pattern_eth_pppoes_proto,                      ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
290         {pattern_eth_vlan_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
291         {pattern_eth_pppoes_ipv4,                       ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
292         {pattern_eth_pppoes_ipv4_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
293         {pattern_eth_pppoes_ipv4_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
294         {pattern_eth_pppoes_ipv6,                       ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
295         {pattern_eth_pppoes_ipv6_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
296         {pattern_eth_pppoes_ipv6_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
297         {pattern_eth_vlan_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
298         {pattern_eth_vlan_pppoes_ipv4_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
299         {pattern_eth_vlan_pppoes_ipv4_udp,              ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
300         {pattern_eth_vlan_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
301         {pattern_eth_vlan_pppoes_ipv6_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
302         {pattern_eth_vlan_pppoes_ipv6_udp,              ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
303         {pattern_eth_ipv4_esp,                          ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
304         {pattern_eth_ipv4_udp_esp,                      ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
305         {pattern_eth_ipv6_esp,                          ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
306         {pattern_eth_ipv6_udp_esp,                      ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
307         {pattern_eth_ipv4_ah,                           ICE_SW_INSET_MAC_IPV4_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
308         {pattern_eth_ipv6_ah,                           ICE_SW_INSET_MAC_IPV6_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
309         {pattern_eth_ipv6_udp_ah,                       ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
310         {pattern_eth_ipv4_l2tp,                         ICE_SW_INSET_MAC_IPV4_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
311         {pattern_eth_ipv6_l2tp,                         ICE_SW_INSET_MAC_IPV6_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
312         {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
313         {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
314         {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE,                         ICE_INSET_NONE},
315         {pattern_eth_qinq_ipv4_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV4_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
316         {pattern_eth_qinq_ipv4_udp,                     ICE_SW_INSET_MAC_QINQ_IPV4_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
317         {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE,                         ICE_INSET_NONE},
318         {pattern_eth_qinq_ipv6_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV6_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
319         {pattern_eth_qinq_ipv6_udp,                     ICE_SW_INSET_MAC_QINQ_IPV6_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
320         {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
321         {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
322         {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
323         {pattern_eth_qinq_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
324         {pattern_eth_ipv4_gtpu,                         ICE_SW_INSET_MAC_IPV4_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
325         {pattern_eth_ipv6_gtpu,                         ICE_SW_INSET_MAC_IPV6_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
326         {pattern_eth_ipv4_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
327         {pattern_eth_ipv4_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
328         {pattern_eth_ipv4_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
329         {pattern_eth_ipv4_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
330         {pattern_eth_ipv4_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
331         {pattern_eth_ipv4_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
332         {pattern_eth_ipv4_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
333         {pattern_eth_ipv4_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
334         {pattern_eth_ipv4_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
335         {pattern_eth_ipv4_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
336         {pattern_eth_ipv4_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
337         {pattern_eth_ipv4_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
338         {pattern_eth_ipv6_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
339         {pattern_eth_ipv6_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
340         {pattern_eth_ipv6_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
341         {pattern_eth_ipv6_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
342         {pattern_eth_ipv6_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
343         {pattern_eth_ipv6_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
344         {pattern_eth_ipv6_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
345         {pattern_eth_ipv6_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
346         {pattern_eth_ipv6_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
347         {pattern_eth_ipv6_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
348         {pattern_eth_ipv6_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
349         {pattern_eth_ipv6_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
350 };
351
352 static int
353 ice_switch_create(struct ice_adapter *ad,
354                 struct rte_flow *flow,
355                 void *meta,
356                 struct rte_flow_error *error)
357 {
358         int ret = 0;
359         struct ice_pf *pf = &ad->pf;
360         struct ice_hw *hw = ICE_PF_TO_HW(pf);
361         struct ice_rule_query_data rule_added = {0};
362         struct ice_rule_query_data *filter_ptr;
363         struct ice_adv_lkup_elem *list =
364                 ((struct sw_meta *)meta)->list;
365         uint16_t lkups_cnt =
366                 ((struct sw_meta *)meta)->lkups_num;
367         struct ice_adv_rule_info *rule_info =
368                 &((struct sw_meta *)meta)->rule_info;
369
370         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
371                 rte_flow_error_set(error, EINVAL,
372                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
373                         "item number too large for rule");
374                 goto error;
375         }
376         if (!list) {
377                 rte_flow_error_set(error, EINVAL,
378                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
379                         "lookup list should not be NULL");
380                 goto error;
381         }
382         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
383         if (!ret) {
384                 filter_ptr = rte_zmalloc("ice_switch_filter",
385                         sizeof(struct ice_rule_query_data), 0);
386                 if (!filter_ptr) {
387                         rte_flow_error_set(error, EINVAL,
388                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
389                                    "No memory for ice_switch_filter");
390                         goto error;
391                 }
392                 flow->rule = filter_ptr;
393                 rte_memcpy(filter_ptr,
394                         &rule_added,
395                         sizeof(struct ice_rule_query_data));
396         } else {
397                 rte_flow_error_set(error, EINVAL,
398                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
399                         "switch filter create flow fail");
400                 goto error;
401         }
402
403         rte_free(list);
404         rte_free(meta);
405         return 0;
406
407 error:
408         rte_free(list);
409         rte_free(meta);
410
411         return -rte_errno;
412 }
413
414 static int
415 ice_switch_destroy(struct ice_adapter *ad,
416                 struct rte_flow *flow,
417                 struct rte_flow_error *error)
418 {
419         struct ice_hw *hw = &ad->hw;
420         int ret;
421         struct ice_rule_query_data *filter_ptr;
422
423         filter_ptr = (struct ice_rule_query_data *)
424                 flow->rule;
425
426         if (!filter_ptr) {
427                 rte_flow_error_set(error, EINVAL,
428                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
429                         "no such flow"
430                         " create by switch filter");
431                 return -rte_errno;
432         }
433
434         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
435         if (ret) {
436                 rte_flow_error_set(error, EINVAL,
437                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
438                         "fail to destroy switch filter rule");
439                 return -rte_errno;
440         }
441
442         rte_free(filter_ptr);
443         return ret;
444 }
445
446 static void
447 ice_switch_filter_rule_free(struct rte_flow *flow)
448 {
449         rte_free(flow->rule);
450 }
451
452 static bool
453 ice_switch_parse_pattern(const struct rte_flow_item pattern[],
454                 struct rte_flow_error *error,
455                 struct ice_adv_lkup_elem *list,
456                 uint16_t *lkups_num,
457                 enum ice_sw_tunnel_type *tun_type,
458                 const struct ice_pattern_match_item *pattern_match_item)
459 {
460         const struct rte_flow_item *item = pattern;
461         enum rte_flow_item_type item_type;
462         const struct rte_flow_item_eth *eth_spec, *eth_mask;
463         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
464         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
465         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
466         const struct rte_flow_item_udp *udp_spec, *udp_mask;
467         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
468         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
469         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
470         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
471         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
472         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
473                                 *pppoe_proto_mask;
474         const struct rte_flow_item_esp *esp_spec, *esp_mask;
475         const struct rte_flow_item_ah *ah_spec, *ah_mask;
476         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
477         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
478         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
479         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
480         uint64_t outer_input_set = ICE_INSET_NONE;
481         uint64_t inner_input_set = ICE_INSET_NONE;
482         uint64_t *input = NULL;
483         uint16_t input_set_byte = 0;
484         bool pppoe_elem_valid = 0;
485         bool pppoe_patt_valid = 0;
486         bool pppoe_prot_valid = 0;
487         bool inner_vlan_valid = 0;
488         bool outer_vlan_valid = 0;
489         bool tunnel_valid = 0;
490         bool profile_rule = 0;
491         bool nvgre_valid = 0;
492         bool vxlan_valid = 0;
493         bool qinq_valid = 0;
494         bool ipv6_valid = 0;
495         bool ipv4_valid = 0;
496         bool udp_valid = 0;
497         bool tcp_valid = 0;
498         bool gtpu_valid = 0;
499         bool gtpu_psc_valid = 0;
500         bool inner_ipv4_valid = 0;
501         bool inner_ipv6_valid = 0;
502         bool inner_tcp_valid = 0;
503         bool inner_udp_valid = 0;
504         uint16_t j, k, t = 0;
505
506         if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
507             *tun_type == ICE_NON_TUN_QINQ)
508                 qinq_valid = 1;
509
510         for (item = pattern; item->type !=
511                         RTE_FLOW_ITEM_TYPE_END; item++) {
512                 if (item->last) {
513                         rte_flow_error_set(error, EINVAL,
514                                         RTE_FLOW_ERROR_TYPE_ITEM,
515                                         item,
516                                         "Not support range");
517                         return false;
518                 }
519                 item_type = item->type;
520
521                 switch (item_type) {
522                 case RTE_FLOW_ITEM_TYPE_ETH:
523                         eth_spec = item->spec;
524                         eth_mask = item->mask;
525                         if (eth_spec && eth_mask) {
526                                 const uint8_t *a = eth_mask->src.addr_bytes;
527                                 const uint8_t *b = eth_mask->dst.addr_bytes;
528                                 if (tunnel_valid)
529                                         input = &inner_input_set;
530                                 else
531                                         input = &outer_input_set;
532                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
533                                         if (a[j]) {
534                                                 *input |= ICE_INSET_SMAC;
535                                                 break;
536                                         }
537                                 }
538                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
539                                         if (b[j]) {
540                                                 *input |= ICE_INSET_DMAC;
541                                                 break;
542                                         }
543                                 }
544                                 if (eth_mask->type)
545                                         *input |= ICE_INSET_ETHERTYPE;
546                                 list[t].type = (tunnel_valid  == 0) ?
547                                         ICE_MAC_OFOS : ICE_MAC_IL;
548                                 struct ice_ether_hdr *h;
549                                 struct ice_ether_hdr *m;
550                                 uint16_t i = 0;
551                                 h = &list[t].h_u.eth_hdr;
552                                 m = &list[t].m_u.eth_hdr;
553                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
554                                         if (eth_mask->src.addr_bytes[j]) {
555                                                 h->src_addr[j] =
556                                                 eth_spec->src.addr_bytes[j];
557                                                 m->src_addr[j] =
558                                                 eth_mask->src.addr_bytes[j];
559                                                 i = 1;
560                                                 input_set_byte++;
561                                         }
562                                         if (eth_mask->dst.addr_bytes[j]) {
563                                                 h->dst_addr[j] =
564                                                 eth_spec->dst.addr_bytes[j];
565                                                 m->dst_addr[j] =
566                                                 eth_mask->dst.addr_bytes[j];
567                                                 i = 1;
568                                                 input_set_byte++;
569                                         }
570                                 }
571                                 if (i)
572                                         t++;
573                                 if (eth_mask->type) {
574                                         list[t].type = ICE_ETYPE_OL;
575                                         list[t].h_u.ethertype.ethtype_id =
576                                                 eth_spec->type;
577                                         list[t].m_u.ethertype.ethtype_id =
578                                                 eth_mask->type;
579                                         input_set_byte += 2;
580                                         t++;
581                                 }
582                         }
583                         break;
584
585                 case RTE_FLOW_ITEM_TYPE_IPV4:
586                         ipv4_spec = item->spec;
587                         ipv4_mask = item->mask;
588                         if (tunnel_valid) {
589                                 inner_ipv4_valid = 1;
590                                 input = &inner_input_set;
591                         } else {
592                                 ipv4_valid = 1;
593                                 input = &outer_input_set;
594                         }
595
596                         if (ipv4_spec && ipv4_mask) {
597                                 /* Check IPv4 mask and update input set */
598                                 if (ipv4_mask->hdr.version_ihl ||
599                                         ipv4_mask->hdr.total_length ||
600                                         ipv4_mask->hdr.packet_id ||
601                                         ipv4_mask->hdr.hdr_checksum) {
602                                         rte_flow_error_set(error, EINVAL,
603                                                    RTE_FLOW_ERROR_TYPE_ITEM,
604                                                    item,
605                                                    "Invalid IPv4 mask.");
606                                         return false;
607                                 }
608
609                                 if (ipv4_mask->hdr.src_addr)
610                                         *input |= ICE_INSET_IPV4_SRC;
611                                 if (ipv4_mask->hdr.dst_addr)
612                                         *input |= ICE_INSET_IPV4_DST;
613                                 if (ipv4_mask->hdr.time_to_live)
614                                         *input |= ICE_INSET_IPV4_TTL;
615                                 if (ipv4_mask->hdr.next_proto_id)
616                                         *input |= ICE_INSET_IPV4_PROTO;
617                                 if (ipv4_mask->hdr.type_of_service)
618                                         *input |= ICE_INSET_IPV4_TOS;
619
620                                 list[t].type = (tunnel_valid  == 0) ?
621                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
622                                 if (ipv4_mask->hdr.src_addr) {
623                                         list[t].h_u.ipv4_hdr.src_addr =
624                                                 ipv4_spec->hdr.src_addr;
625                                         list[t].m_u.ipv4_hdr.src_addr =
626                                                 ipv4_mask->hdr.src_addr;
627                                         input_set_byte += 2;
628                                 }
629                                 if (ipv4_mask->hdr.dst_addr) {
630                                         list[t].h_u.ipv4_hdr.dst_addr =
631                                                 ipv4_spec->hdr.dst_addr;
632                                         list[t].m_u.ipv4_hdr.dst_addr =
633                                                 ipv4_mask->hdr.dst_addr;
634                                         input_set_byte += 2;
635                                 }
636                                 if (ipv4_mask->hdr.time_to_live) {
637                                         list[t].h_u.ipv4_hdr.time_to_live =
638                                                 ipv4_spec->hdr.time_to_live;
639                                         list[t].m_u.ipv4_hdr.time_to_live =
640                                                 ipv4_mask->hdr.time_to_live;
641                                         input_set_byte++;
642                                 }
643                                 if (ipv4_mask->hdr.next_proto_id) {
644                                         list[t].h_u.ipv4_hdr.protocol =
645                                                 ipv4_spec->hdr.next_proto_id;
646                                         list[t].m_u.ipv4_hdr.protocol =
647                                                 ipv4_mask->hdr.next_proto_id;
648                                         input_set_byte++;
649                                 }
650                                 if ((ipv4_spec->hdr.next_proto_id &
651                                         ipv4_mask->hdr.next_proto_id) ==
652                                         ICE_IPV4_PROTO_NVGRE)
653                                         *tun_type = ICE_SW_TUN_AND_NON_TUN;
654                                 if (ipv4_mask->hdr.type_of_service) {
655                                         list[t].h_u.ipv4_hdr.tos =
656                                                 ipv4_spec->hdr.type_of_service;
657                                         list[t].m_u.ipv4_hdr.tos =
658                                                 ipv4_mask->hdr.type_of_service;
659                                         input_set_byte++;
660                                 }
661                                 t++;
662                         }
663                         break;
664
665                 case RTE_FLOW_ITEM_TYPE_IPV6:
666                         ipv6_spec = item->spec;
667                         ipv6_mask = item->mask;
668                         if (tunnel_valid) {
669                                 inner_ipv6_valid = 1;
670                                 input = &inner_input_set;
671                         } else {
672                                 ipv6_valid = 1;
673                                 input = &outer_input_set;
674                         }
675
676                         if (ipv6_spec && ipv6_mask) {
677                                 if (ipv6_mask->hdr.payload_len) {
678                                         rte_flow_error_set(error, EINVAL,
679                                            RTE_FLOW_ERROR_TYPE_ITEM,
680                                            item,
681                                            "Invalid IPv6 mask");
682                                         return false;
683                                 }
684
685                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
686                                         if (ipv6_mask->hdr.src_addr[j]) {
687                                                 *input |= ICE_INSET_IPV6_SRC;
688                                                 break;
689                                         }
690                                 }
691                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
692                                         if (ipv6_mask->hdr.dst_addr[j]) {
693                                                 *input |= ICE_INSET_IPV6_DST;
694                                                 break;
695                                         }
696                                 }
697                                 if (ipv6_mask->hdr.proto)
698                                         *input |= ICE_INSET_IPV6_NEXT_HDR;
699                                 if (ipv6_mask->hdr.hop_limits)
700                                         *input |= ICE_INSET_IPV6_HOP_LIMIT;
701                                 if (ipv6_mask->hdr.vtc_flow &
702                                     rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
703                                         *input |= ICE_INSET_IPV6_TC;
704
705                                 list[t].type = (tunnel_valid  == 0) ?
706                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
707                                 struct ice_ipv6_hdr *f;
708                                 struct ice_ipv6_hdr *s;
709                                 f = &list[t].h_u.ipv6_hdr;
710                                 s = &list[t].m_u.ipv6_hdr;
711                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
712                                         if (ipv6_mask->hdr.src_addr[j]) {
713                                                 f->src_addr[j] =
714                                                 ipv6_spec->hdr.src_addr[j];
715                                                 s->src_addr[j] =
716                                                 ipv6_mask->hdr.src_addr[j];
717                                                 input_set_byte++;
718                                         }
719                                         if (ipv6_mask->hdr.dst_addr[j]) {
720                                                 f->dst_addr[j] =
721                                                 ipv6_spec->hdr.dst_addr[j];
722                                                 s->dst_addr[j] =
723                                                 ipv6_mask->hdr.dst_addr[j];
724                                                 input_set_byte++;
725                                         }
726                                 }
727                                 if (ipv6_mask->hdr.proto) {
728                                         f->next_hdr =
729                                                 ipv6_spec->hdr.proto;
730                                         s->next_hdr =
731                                                 ipv6_mask->hdr.proto;
732                                         input_set_byte++;
733                                 }
734                                 if (ipv6_mask->hdr.hop_limits) {
735                                         f->hop_limit =
736                                                 ipv6_spec->hdr.hop_limits;
737                                         s->hop_limit =
738                                                 ipv6_mask->hdr.hop_limits;
739                                         input_set_byte++;
740                                 }
741                                 if (ipv6_mask->hdr.vtc_flow &
742                                                 rte_cpu_to_be_32
743                                                 (RTE_IPV6_HDR_TC_MASK)) {
744                                         struct ice_le_ver_tc_flow vtf;
745                                         vtf.u.fld.version = 0;
746                                         vtf.u.fld.flow_label = 0;
747                                         vtf.u.fld.tc = (rte_be_to_cpu_32
748                                                 (ipv6_spec->hdr.vtc_flow) &
749                                                         RTE_IPV6_HDR_TC_MASK) >>
750                                                         RTE_IPV6_HDR_TC_SHIFT;
751                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
752                                         vtf.u.fld.tc = (rte_be_to_cpu_32
753                                                 (ipv6_mask->hdr.vtc_flow) &
754                                                         RTE_IPV6_HDR_TC_MASK) >>
755                                                         RTE_IPV6_HDR_TC_SHIFT;
756                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
757                                         input_set_byte += 4;
758                                 }
759                                 t++;
760                         }
761                         break;
762
763                 case RTE_FLOW_ITEM_TYPE_UDP:
764                         udp_spec = item->spec;
765                         udp_mask = item->mask;
766                         if (tunnel_valid) {
767                                 inner_udp_valid = 1;
768                                 input = &inner_input_set;
769                         } else {
770                                 udp_valid = 1;
771                                 input = &outer_input_set;
772                         }
773
774                         if (udp_spec && udp_mask) {
775                                 /* Check UDP mask and update input set*/
776                                 if (udp_mask->hdr.dgram_len ||
777                                     udp_mask->hdr.dgram_cksum) {
778                                         rte_flow_error_set(error, EINVAL,
779                                                    RTE_FLOW_ERROR_TYPE_ITEM,
780                                                    item,
781                                                    "Invalid UDP mask");
782                                         return false;
783                                 }
784
785                                 if (udp_mask->hdr.src_port)
786                                         *input |= ICE_INSET_UDP_SRC_PORT;
787                                 if (udp_mask->hdr.dst_port)
788                                         *input |= ICE_INSET_UDP_DST_PORT;
789
790                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
791                                                 tunnel_valid == 0)
792                                         list[t].type = ICE_UDP_OF;
793                                 else
794                                         list[t].type = ICE_UDP_ILOS;
795                                 if (udp_mask->hdr.src_port) {
796                                         list[t].h_u.l4_hdr.src_port =
797                                                 udp_spec->hdr.src_port;
798                                         list[t].m_u.l4_hdr.src_port =
799                                                 udp_mask->hdr.src_port;
800                                         input_set_byte += 2;
801                                 }
802                                 if (udp_mask->hdr.dst_port) {
803                                         list[t].h_u.l4_hdr.dst_port =
804                                                 udp_spec->hdr.dst_port;
805                                         list[t].m_u.l4_hdr.dst_port =
806                                                 udp_mask->hdr.dst_port;
807                                         input_set_byte += 2;
808                                 }
809                                 t++;
810                         }
811                         break;
812
813                 case RTE_FLOW_ITEM_TYPE_TCP:
814                         tcp_spec = item->spec;
815                         tcp_mask = item->mask;
816                         if (tunnel_valid) {
817                                 inner_tcp_valid = 1;
818                                 input = &inner_input_set;
819                         } else {
820                                 tcp_valid = 1;
821                                 input = &outer_input_set;
822                         }
823
824                         if (tcp_spec && tcp_mask) {
825                                 /* Check TCP mask and update input set */
826                                 if (tcp_mask->hdr.sent_seq ||
827                                         tcp_mask->hdr.recv_ack ||
828                                         tcp_mask->hdr.data_off ||
829                                         tcp_mask->hdr.tcp_flags ||
830                                         tcp_mask->hdr.rx_win ||
831                                         tcp_mask->hdr.cksum ||
832                                         tcp_mask->hdr.tcp_urp) {
833                                         rte_flow_error_set(error, EINVAL,
834                                            RTE_FLOW_ERROR_TYPE_ITEM,
835                                            item,
836                                            "Invalid TCP mask");
837                                         return false;
838                                 }
839
840                                 if (tcp_mask->hdr.src_port)
841                                         *input |= ICE_INSET_TCP_SRC_PORT;
842                                 if (tcp_mask->hdr.dst_port)
843                                         *input |= ICE_INSET_TCP_DST_PORT;
844                                 list[t].type = ICE_TCP_IL;
845                                 if (tcp_mask->hdr.src_port) {
846                                         list[t].h_u.l4_hdr.src_port =
847                                                 tcp_spec->hdr.src_port;
848                                         list[t].m_u.l4_hdr.src_port =
849                                                 tcp_mask->hdr.src_port;
850                                         input_set_byte += 2;
851                                 }
852                                 if (tcp_mask->hdr.dst_port) {
853                                         list[t].h_u.l4_hdr.dst_port =
854                                                 tcp_spec->hdr.dst_port;
855                                         list[t].m_u.l4_hdr.dst_port =
856                                                 tcp_mask->hdr.dst_port;
857                                         input_set_byte += 2;
858                                 }
859                                 t++;
860                         }
861                         break;
862
863                 case RTE_FLOW_ITEM_TYPE_SCTP:
864                         sctp_spec = item->spec;
865                         sctp_mask = item->mask;
866                         if (sctp_spec && sctp_mask) {
867                                 /* Check SCTP mask and update input set */
868                                 if (sctp_mask->hdr.cksum) {
869                                         rte_flow_error_set(error, EINVAL,
870                                            RTE_FLOW_ERROR_TYPE_ITEM,
871                                            item,
872                                            "Invalid SCTP mask");
873                                         return false;
874                                 }
875                                 if (tunnel_valid)
876                                         input = &inner_input_set;
877                                 else
878                                         input = &outer_input_set;
879
880                                 if (sctp_mask->hdr.src_port)
881                                         *input |= ICE_INSET_SCTP_SRC_PORT;
882                                 if (sctp_mask->hdr.dst_port)
883                                         *input |= ICE_INSET_SCTP_DST_PORT;
884
885                                 list[t].type = ICE_SCTP_IL;
886                                 if (sctp_mask->hdr.src_port) {
887                                         list[t].h_u.sctp_hdr.src_port =
888                                                 sctp_spec->hdr.src_port;
889                                         list[t].m_u.sctp_hdr.src_port =
890                                                 sctp_mask->hdr.src_port;
891                                         input_set_byte += 2;
892                                 }
893                                 if (sctp_mask->hdr.dst_port) {
894                                         list[t].h_u.sctp_hdr.dst_port =
895                                                 sctp_spec->hdr.dst_port;
896                                         list[t].m_u.sctp_hdr.dst_port =
897                                                 sctp_mask->hdr.dst_port;
898                                         input_set_byte += 2;
899                                 }
900                                 t++;
901                         }
902                         break;
903
904                 case RTE_FLOW_ITEM_TYPE_VXLAN:
905                         vxlan_spec = item->spec;
906                         vxlan_mask = item->mask;
907                         /* Check if VXLAN item is used to describe protocol.
908                          * If yes, both spec and mask should be NULL.
909                          * If no, both spec and mask shouldn't be NULL.
910                          */
911                         if ((!vxlan_spec && vxlan_mask) ||
912                             (vxlan_spec && !vxlan_mask)) {
913                                 rte_flow_error_set(error, EINVAL,
914                                            RTE_FLOW_ERROR_TYPE_ITEM,
915                                            item,
916                                            "Invalid VXLAN item");
917                                 return false;
918                         }
919                         vxlan_valid = 1;
920                         tunnel_valid = 1;
921                         input = &inner_input_set;
922                         if (vxlan_spec && vxlan_mask) {
923                                 list[t].type = ICE_VXLAN;
924                                 if (vxlan_mask->vni[0] ||
925                                         vxlan_mask->vni[1] ||
926                                         vxlan_mask->vni[2]) {
927                                         list[t].h_u.tnl_hdr.vni =
928                                                 (vxlan_spec->vni[2] << 16) |
929                                                 (vxlan_spec->vni[1] << 8) |
930                                                 vxlan_spec->vni[0];
931                                         list[t].m_u.tnl_hdr.vni =
932                                                 (vxlan_mask->vni[2] << 16) |
933                                                 (vxlan_mask->vni[1] << 8) |
934                                                 vxlan_mask->vni[0];
935                                         *input |= ICE_INSET_VXLAN_VNI;
936                                         input_set_byte += 2;
937                                 }
938                                 t++;
939                         }
940                         break;
941
942                 case RTE_FLOW_ITEM_TYPE_NVGRE:
943                         nvgre_spec = item->spec;
944                         nvgre_mask = item->mask;
945                         /* Check if NVGRE item is used to describe protocol.
946                          * If yes, both spec and mask should be NULL.
947                          * If no, both spec and mask shouldn't be NULL.
948                          */
949                         if ((!nvgre_spec && nvgre_mask) ||
950                             (nvgre_spec && !nvgre_mask)) {
951                                 rte_flow_error_set(error, EINVAL,
952                                            RTE_FLOW_ERROR_TYPE_ITEM,
953                                            item,
954                                            "Invalid NVGRE item");
955                                 return false;
956                         }
957                         nvgre_valid = 1;
958                         tunnel_valid = 1;
959                         input = &inner_input_set;
960                         if (nvgre_spec && nvgre_mask) {
961                                 list[t].type = ICE_NVGRE;
962                                 if (nvgre_mask->tni[0] ||
963                                         nvgre_mask->tni[1] ||
964                                         nvgre_mask->tni[2]) {
965                                         list[t].h_u.nvgre_hdr.tni_flow =
966                                                 (nvgre_spec->tni[2] << 16) |
967                                                 (nvgre_spec->tni[1] << 8) |
968                                                 nvgre_spec->tni[0];
969                                         list[t].m_u.nvgre_hdr.tni_flow =
970                                                 (nvgre_mask->tni[2] << 16) |
971                                                 (nvgre_mask->tni[1] << 8) |
972                                                 nvgre_mask->tni[0];
973                                         *input |= ICE_INSET_NVGRE_TNI;
974                                         input_set_byte += 2;
975                                 }
976                                 t++;
977                         }
978                         break;
979
980                 case RTE_FLOW_ITEM_TYPE_VLAN:
981                         vlan_spec = item->spec;
982                         vlan_mask = item->mask;
983                         /* Check if VLAN item is used to describe protocol.
984                          * If yes, both spec and mask should be NULL.
985                          * If no, both spec and mask shouldn't be NULL.
986                          */
987                         if ((!vlan_spec && vlan_mask) ||
988                             (vlan_spec && !vlan_mask)) {
989                                 rte_flow_error_set(error, EINVAL,
990                                            RTE_FLOW_ERROR_TYPE_ITEM,
991                                            item,
992                                            "Invalid VLAN item");
993                                 return false;
994                         }
995
996                         if (qinq_valid) {
997                                 if (!outer_vlan_valid)
998                                         outer_vlan_valid = 1;
999                                 else
1000                                         inner_vlan_valid = 1;
1001                         }
1002
1003                         input = &outer_input_set;
1004
1005                         if (vlan_spec && vlan_mask) {
1006                                 if (qinq_valid) {
1007                                         if (!inner_vlan_valid) {
1008                                                 list[t].type = ICE_VLAN_EX;
1009                                                 *input |=
1010                                                         ICE_INSET_VLAN_OUTER;
1011                                         } else {
1012                                                 list[t].type = ICE_VLAN_IN;
1013                                                 *input |=
1014                                                         ICE_INSET_VLAN_INNER;
1015                                         }
1016                                 } else {
1017                                         list[t].type = ICE_VLAN_OFOS;
1018                                         *input |= ICE_INSET_VLAN_INNER;
1019                                 }
1020
1021                                 if (vlan_mask->tci) {
1022                                         list[t].h_u.vlan_hdr.vlan =
1023                                                 vlan_spec->tci;
1024                                         list[t].m_u.vlan_hdr.vlan =
1025                                                 vlan_mask->tci;
1026                                         input_set_byte += 2;
1027                                 }
1028                                 if (vlan_mask->inner_type) {
1029                                         rte_flow_error_set(error, EINVAL,
1030                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1031                                                 item,
1032                                                 "Invalid VLAN input set.");
1033                                         return false;
1034                                 }
1035                                 t++;
1036                         }
1037                         break;
1038
1039                 case RTE_FLOW_ITEM_TYPE_PPPOED:
1040                 case RTE_FLOW_ITEM_TYPE_PPPOES:
1041                         pppoe_spec = item->spec;
1042                         pppoe_mask = item->mask;
1043                         /* Check if PPPoE item is used to describe protocol.
1044                          * If yes, both spec and mask should be NULL.
1045                          * If no, both spec and mask shouldn't be NULL.
1046                          */
1047                         if ((!pppoe_spec && pppoe_mask) ||
1048                                 (pppoe_spec && !pppoe_mask)) {
1049                                 rte_flow_error_set(error, EINVAL,
1050                                         RTE_FLOW_ERROR_TYPE_ITEM,
1051                                         item,
1052                                         "Invalid pppoe item");
1053                                 return false;
1054                         }
1055                         pppoe_patt_valid = 1;
1056                         input = &outer_input_set;
1057                         if (pppoe_spec && pppoe_mask) {
1058                                 /* Check pppoe mask and update input set */
1059                                 if (pppoe_mask->length ||
1060                                         pppoe_mask->code ||
1061                                         pppoe_mask->version_type) {
1062                                         rte_flow_error_set(error, EINVAL,
1063                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1064                                                 item,
1065                                                 "Invalid pppoe mask");
1066                                         return false;
1067                                 }
1068                                 list[t].type = ICE_PPPOE;
1069                                 if (pppoe_mask->session_id) {
1070                                         list[t].h_u.pppoe_hdr.session_id =
1071                                                 pppoe_spec->session_id;
1072                                         list[t].m_u.pppoe_hdr.session_id =
1073                                                 pppoe_mask->session_id;
1074                                         *input |= ICE_INSET_PPPOE_SESSION;
1075                                         input_set_byte += 2;
1076                                 }
1077                                 t++;
1078                                 pppoe_elem_valid = 1;
1079                         }
1080                         break;
1081
1082                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1083                         pppoe_proto_spec = item->spec;
1084                         pppoe_proto_mask = item->mask;
1085                         /* Check if PPPoE optional proto_id item
1086                          * is used to describe protocol.
1087                          * If yes, both spec and mask should be NULL.
1088                          * If no, both spec and mask shouldn't be NULL.
1089                          */
1090                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1091                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
1092                                 rte_flow_error_set(error, EINVAL,
1093                                         RTE_FLOW_ERROR_TYPE_ITEM,
1094                                         item,
1095                                         "Invalid pppoe proto item");
1096                                 return false;
1097                         }
1098                         input = &outer_input_set;
1099                         if (pppoe_proto_spec && pppoe_proto_mask) {
1100                                 if (pppoe_elem_valid)
1101                                         t--;
1102                                 list[t].type = ICE_PPPOE;
1103                                 if (pppoe_proto_mask->proto_id) {
1104                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
1105                                                 pppoe_proto_spec->proto_id;
1106                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
1107                                                 pppoe_proto_mask->proto_id;
1108                                         *input |= ICE_INSET_PPPOE_PROTO;
1109                                         input_set_byte += 2;
1110                                         pppoe_prot_valid = 1;
1111                                 }
1112                                 if ((pppoe_proto_mask->proto_id &
1113                                         pppoe_proto_spec->proto_id) !=
1114                                             CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1115                                         (pppoe_proto_mask->proto_id &
1116                                         pppoe_proto_spec->proto_id) !=
1117                                             CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1118                                         *tun_type = ICE_SW_TUN_PPPOE_PAY;
1119                                 else
1120                                         *tun_type = ICE_SW_TUN_PPPOE;
1121                                 t++;
1122                         }
1123
1124                         break;
1125
1126                 case RTE_FLOW_ITEM_TYPE_ESP:
1127                         esp_spec = item->spec;
1128                         esp_mask = item->mask;
1129                         if ((esp_spec && !esp_mask) ||
1130                                 (!esp_spec && esp_mask)) {
1131                                 rte_flow_error_set(error, EINVAL,
1132                                            RTE_FLOW_ERROR_TYPE_ITEM,
1133                                            item,
1134                                            "Invalid esp item");
1135                                 return false;
1136                         }
1137                         /* Check esp mask and update input set */
1138                         if (esp_mask && esp_mask->hdr.seq) {
1139                                 rte_flow_error_set(error, EINVAL,
1140                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1141                                                 item,
1142                                                 "Invalid esp mask");
1143                                 return false;
1144                         }
1145                         input = &outer_input_set;
1146                         if (!esp_spec && !esp_mask && !(*input)) {
1147                                 profile_rule = 1;
1148                                 if (ipv6_valid && udp_valid)
1149                                         *tun_type =
1150                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1151                                 else if (ipv6_valid)
1152                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1153                                 else if (ipv4_valid)
1154                                         goto inset_check;
1155                         } else if (esp_spec && esp_mask &&
1156                                                 esp_mask->hdr.spi){
1157                                 if (udp_valid)
1158                                         list[t].type = ICE_NAT_T;
1159                                 else
1160                                         list[t].type = ICE_ESP;
1161                                 list[t].h_u.esp_hdr.spi =
1162                                         esp_spec->hdr.spi;
1163                                 list[t].m_u.esp_hdr.spi =
1164                                         esp_mask->hdr.spi;
1165                                 *input |= ICE_INSET_ESP_SPI;
1166                                 input_set_byte += 4;
1167                                 t++;
1168                         }
1169
1170                         if (!profile_rule) {
1171                                 if (ipv6_valid && udp_valid)
1172                                         *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1173                                 else if (ipv4_valid && udp_valid)
1174                                         *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1175                                 else if (ipv6_valid)
1176                                         *tun_type = ICE_SW_TUN_IPV6_ESP;
1177                                 else if (ipv4_valid)
1178                                         *tun_type = ICE_SW_TUN_IPV4_ESP;
1179                         }
1180                         break;
1181
1182                 case RTE_FLOW_ITEM_TYPE_AH:
1183                         ah_spec = item->spec;
1184                         ah_mask = item->mask;
1185                         if ((ah_spec && !ah_mask) ||
1186                                 (!ah_spec && ah_mask)) {
1187                                 rte_flow_error_set(error, EINVAL,
1188                                            RTE_FLOW_ERROR_TYPE_ITEM,
1189                                            item,
1190                                            "Invalid ah item");
1191                                 return false;
1192                         }
1193                         /* Check ah mask and update input set */
1194                         if (ah_mask &&
1195                                 (ah_mask->next_hdr ||
1196                                 ah_mask->payload_len ||
1197                                 ah_mask->seq_num ||
1198                                 ah_mask->reserved)) {
1199                                 rte_flow_error_set(error, EINVAL,
1200                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1201                                                 item,
1202                                                 "Invalid ah mask");
1203                                 return false;
1204                         }
1205
1206                         input = &outer_input_set;
1207                         if (!ah_spec && !ah_mask && !(*input)) {
1208                                 profile_rule = 1;
1209                                 if (ipv6_valid && udp_valid)
1210                                         *tun_type =
1211                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1212                                 else if (ipv6_valid)
1213                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1214                                 else if (ipv4_valid)
1215                                         goto inset_check;
1216                         } else if (ah_spec && ah_mask &&
1217                                                 ah_mask->spi){
1218                                 list[t].type = ICE_AH;
1219                                 list[t].h_u.ah_hdr.spi =
1220                                         ah_spec->spi;
1221                                 list[t].m_u.ah_hdr.spi =
1222                                         ah_mask->spi;
1223                                 *input |= ICE_INSET_AH_SPI;
1224                                 input_set_byte += 4;
1225                                 t++;
1226                         }
1227
1228                         if (!profile_rule) {
1229                                 if (udp_valid)
1230                                         goto inset_check;
1231                                 else if (ipv6_valid)
1232                                         *tun_type = ICE_SW_TUN_IPV6_AH;
1233                                 else if (ipv4_valid)
1234                                         *tun_type = ICE_SW_TUN_IPV4_AH;
1235                         }
1236                         break;
1237
1238                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1239                         l2tp_spec = item->spec;
1240                         l2tp_mask = item->mask;
1241                         if ((l2tp_spec && !l2tp_mask) ||
1242                                 (!l2tp_spec && l2tp_mask)) {
1243                                 rte_flow_error_set(error, EINVAL,
1244                                            RTE_FLOW_ERROR_TYPE_ITEM,
1245                                            item,
1246                                            "Invalid l2tp item");
1247                                 return false;
1248                         }
1249
1250                         input = &outer_input_set;
1251                         if (!l2tp_spec && !l2tp_mask && !(*input)) {
1252                                 if (ipv6_valid)
1253                                         *tun_type =
1254                                         ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1255                                 else if (ipv4_valid)
1256                                         goto inset_check;
1257                         } else if (l2tp_spec && l2tp_mask &&
1258                                                 l2tp_mask->session_id){
1259                                 list[t].type = ICE_L2TPV3;
1260                                 list[t].h_u.l2tpv3_sess_hdr.session_id =
1261                                         l2tp_spec->session_id;
1262                                 list[t].m_u.l2tpv3_sess_hdr.session_id =
1263                                         l2tp_mask->session_id;
1264                                 *input |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1265                                 input_set_byte += 4;
1266                                 t++;
1267                         }
1268
1269                         if (!profile_rule) {
1270                                 if (ipv6_valid)
1271                                         *tun_type =
1272                                         ICE_SW_TUN_IPV6_L2TPV3;
1273                                 else if (ipv4_valid)
1274                                         *tun_type =
1275                                         ICE_SW_TUN_IPV4_L2TPV3;
1276                         }
1277                         break;
1278
1279                 case RTE_FLOW_ITEM_TYPE_PFCP:
1280                         pfcp_spec = item->spec;
1281                         pfcp_mask = item->mask;
1282                         /* Check if PFCP item is used to describe protocol.
1283                          * If yes, both spec and mask should be NULL.
1284                          * If no, both spec and mask shouldn't be NULL.
1285                          */
1286                         if ((!pfcp_spec && pfcp_mask) ||
1287                             (pfcp_spec && !pfcp_mask)) {
1288                                 rte_flow_error_set(error, EINVAL,
1289                                            RTE_FLOW_ERROR_TYPE_ITEM,
1290                                            item,
1291                                            "Invalid PFCP item");
1292                                 return false;
1293                         }
1294                         if (pfcp_spec && pfcp_mask) {
1295                                 /* Check pfcp mask and update input set */
1296                                 if (pfcp_mask->msg_type ||
1297                                         pfcp_mask->msg_len ||
1298                                         pfcp_mask->seid) {
1299                                         rte_flow_error_set(error, EINVAL,
1300                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1301                                                 item,
1302                                                 "Invalid pfcp mask");
1303                                         return false;
1304                                 }
1305                                 if (pfcp_mask->s_field &&
1306                                         pfcp_spec->s_field == 0x01 &&
1307                                         ipv6_valid)
1308                                         *tun_type =
1309                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1310                                 else if (pfcp_mask->s_field &&
1311                                         pfcp_spec->s_field == 0x01)
1312                                         *tun_type =
1313                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1314                                 else if (pfcp_mask->s_field &&
1315                                         !pfcp_spec->s_field &&
1316                                         ipv6_valid)
1317                                         *tun_type =
1318                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1319                                 else if (pfcp_mask->s_field &&
1320                                         !pfcp_spec->s_field)
1321                                         *tun_type =
1322                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1323                                 else
1324                                         return false;
1325                         }
1326                         break;
1327
1328                 case RTE_FLOW_ITEM_TYPE_GTPU:
1329                         gtp_spec = item->spec;
1330                         gtp_mask = item->mask;
1331                         if (gtp_spec && !gtp_mask) {
1332                                 rte_flow_error_set(error, EINVAL,
1333                                         RTE_FLOW_ERROR_TYPE_ITEM,
1334                                         item,
1335                                         "Invalid GTP item");
1336                                 return false;
1337                         }
1338                         if (gtp_spec && gtp_mask) {
1339                                 if (gtp_mask->v_pt_rsv_flags ||
1340                                     gtp_mask->msg_type ||
1341                                     gtp_mask->msg_len) {
1342                                         rte_flow_error_set(error, EINVAL,
1343                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1344                                                 item,
1345                                                 "Invalid GTP mask");
1346                                         return false;
1347                                 }
1348                                 input = &outer_input_set;
1349                                 if (gtp_mask->teid)
1350                                         *input |= ICE_INSET_GTPU_TEID;
1351                                 list[t].type = ICE_GTP;
1352                                 list[t].h_u.gtp_hdr.teid =
1353                                         gtp_spec->teid;
1354                                 list[t].m_u.gtp_hdr.teid =
1355                                         gtp_mask->teid;
1356                                 input_set_byte += 4;
1357                                 t++;
1358                         }
1359                         tunnel_valid = 1;
1360                         gtpu_valid = 1;
1361                         break;
1362
1363                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1364                         gtp_psc_spec = item->spec;
1365                         gtp_psc_mask = item->mask;
1366                         if (gtp_psc_spec && !gtp_psc_mask) {
1367                                 rte_flow_error_set(error, EINVAL,
1368                                         RTE_FLOW_ERROR_TYPE_ITEM,
1369                                         item,
1370                                         "Invalid GTPU_EH item");
1371                                 return false;
1372                         }
1373                         if (gtp_psc_spec && gtp_psc_mask) {
1374                                 if (gtp_psc_mask->hdr.type) {
1375                                         rte_flow_error_set(error, EINVAL,
1376                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1377                                                 item,
1378                                                 "Invalid GTPU_EH mask");
1379                                         return false;
1380                                 }
1381                                 input = &outer_input_set;
1382                                 if (gtp_psc_mask->hdr.qfi)
1383                                         *input |= ICE_INSET_GTPU_QFI;
1384                                 list[t].type = ICE_GTP;
1385                                 list[t].h_u.gtp_hdr.qfi =
1386                                         gtp_psc_spec->hdr.qfi;
1387                                 list[t].m_u.gtp_hdr.qfi =
1388                                         gtp_psc_mask->hdr.qfi;
1389                                 input_set_byte += 1;
1390                                 t++;
1391                         }
1392                         gtpu_psc_valid = 1;
1393                         break;
1394
1395                 case RTE_FLOW_ITEM_TYPE_VOID:
1396                         break;
1397
1398                 default:
1399                         rte_flow_error_set(error, EINVAL,
1400                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1401                                    "Invalid pattern item.");
1402                         return false;
1403                 }
1404         }
1405
1406         if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1407             inner_vlan_valid && outer_vlan_valid)
1408                 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1409         else if (*tun_type == ICE_SW_TUN_PPPOE &&
1410                  inner_vlan_valid && outer_vlan_valid)
1411                 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1412         else if (*tun_type == ICE_NON_TUN &&
1413                  inner_vlan_valid && outer_vlan_valid)
1414                 *tun_type = ICE_NON_TUN_QINQ;
1415         else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1416                  inner_vlan_valid && outer_vlan_valid)
1417                 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1418
1419         if (pppoe_patt_valid && !pppoe_prot_valid) {
1420                 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1421                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1422                 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1423                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1424                 else if (inner_vlan_valid && outer_vlan_valid)
1425                         *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1426                 else if (ipv6_valid && udp_valid)
1427                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1428                 else if (ipv6_valid && tcp_valid)
1429                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1430                 else if (ipv4_valid && udp_valid)
1431                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1432                 else if (ipv4_valid && tcp_valid)
1433                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1434                 else if (ipv6_valid)
1435                         *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1436                 else if (ipv4_valid)
1437                         *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1438                 else
1439                         *tun_type = ICE_SW_TUN_PPPOE;
1440         }
1441
1442         if (gtpu_valid && gtpu_psc_valid) {
1443                 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1444                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP;
1445                 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1446                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP;
1447                 else if (ipv4_valid && inner_ipv4_valid)
1448                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4;
1449                 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1450                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP;
1451                 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1452                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP;
1453                 else if (ipv4_valid && inner_ipv6_valid)
1454                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6;
1455                 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1456                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP;
1457                 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1458                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP;
1459                 else if (ipv6_valid && inner_ipv4_valid)
1460                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4;
1461                 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1462                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP;
1463                 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1464                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP;
1465                 else if (ipv6_valid && inner_ipv6_valid)
1466                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6;
1467                 else if (ipv4_valid)
1468                         *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1469                 else if (ipv6_valid)
1470                         *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1471         } else if (gtpu_valid) {
1472                 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1473                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_UDP;
1474                 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1475                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_TCP;
1476                 else if (ipv4_valid && inner_ipv4_valid)
1477                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1478                 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1479                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_UDP;
1480                 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1481                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_TCP;
1482                 else if (ipv4_valid && inner_ipv6_valid)
1483                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1484                 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1485                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_UDP;
1486                 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1487                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_TCP;
1488                 else if (ipv6_valid && inner_ipv4_valid)
1489                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1490                 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1491                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_UDP;
1492                 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1493                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_TCP;
1494                 else if (ipv6_valid && inner_ipv6_valid)
1495                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1496                 else if (ipv4_valid)
1497                         *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1498                 else if (ipv6_valid)
1499                         *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1500         }
1501
1502         if (*tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY ||
1503             *tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
1504                 for (k = 0; k < t; k++) {
1505                         if (list[k].type == ICE_GTP)
1506                                 list[k].type = ICE_GTP_NO_PAY;
1507                 }
1508         }
1509
1510         if (*tun_type == ICE_NON_TUN) {
1511                 if (vxlan_valid)
1512                         *tun_type = ICE_SW_TUN_VXLAN;
1513                 else if (nvgre_valid)
1514                         *tun_type = ICE_SW_TUN_NVGRE;
1515                 else if (ipv4_valid && tcp_valid)
1516                         *tun_type = ICE_SW_IPV4_TCP;
1517                 else if (ipv4_valid && udp_valid)
1518                         *tun_type = ICE_SW_IPV4_UDP;
1519                 else if (ipv6_valid && tcp_valid)
1520                         *tun_type = ICE_SW_IPV6_TCP;
1521                 else if (ipv6_valid && udp_valid)
1522                         *tun_type = ICE_SW_IPV6_UDP;
1523         }
1524
1525         if (input_set_byte > MAX_INPUT_SET_BYTE) {
1526                 rte_flow_error_set(error, EINVAL,
1527                         RTE_FLOW_ERROR_TYPE_ITEM,
1528                         item,
1529                         "too much input set");
1530                 return false;
1531         }
1532
1533         *lkups_num = t;
1534
1535 inset_check:
1536         if ((!outer_input_set && !inner_input_set &&
1537             !ice_is_prof_rule(*tun_type)) || (outer_input_set &
1538             ~pattern_match_item->input_set_mask_o) ||
1539             (inner_input_set & ~pattern_match_item->input_set_mask_i))
1540                 return false;
1541
1542         return true;
1543 }
1544
1545 static int
1546 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1547                             const struct rte_flow_action *actions,
1548                             uint32_t priority,
1549                             struct rte_flow_error *error,
1550                             struct ice_adv_rule_info *rule_info)
1551 {
1552         const struct rte_flow_action_vf *act_vf;
1553         const struct rte_flow_action *action;
1554         enum rte_flow_action_type action_type;
1555
1556         for (action = actions; action->type !=
1557                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1558                 action_type = action->type;
1559                 switch (action_type) {
1560                 case RTE_FLOW_ACTION_TYPE_VF:
1561                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1562                         act_vf = action->conf;
1563
1564                         if (act_vf->id >= ad->real_hw.num_vfs &&
1565                                 !act_vf->original) {
1566                                 rte_flow_error_set(error,
1567                                         EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1568                                         actions,
1569                                         "Invalid vf id");
1570                                 return -rte_errno;
1571                         }
1572
1573                         if (act_vf->original)
1574                                 rule_info->sw_act.vsi_handle =
1575                                         ad->real_hw.avf.bus.func;
1576                         else
1577                                 rule_info->sw_act.vsi_handle = act_vf->id;
1578                         break;
1579
1580                 case RTE_FLOW_ACTION_TYPE_DROP:
1581                         rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1582                         break;
1583
1584                 default:
1585                         rte_flow_error_set(error,
1586                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1587                                            actions,
1588                                            "Invalid action type");
1589                         return -rte_errno;
1590                 }
1591         }
1592
1593         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1594         rule_info->sw_act.flag = ICE_FLTR_RX;
1595         rule_info->rx = 1;
1596         /* 0 denotes lowest priority of recipe and highest priority
1597          * of rte_flow. Change rte_flow priority into recipe priority.
1598          */
1599         rule_info->priority = ICE_SW_PRI_BASE - priority;
1600
1601         return 0;
1602 }
1603
1604 static int
1605 ice_switch_parse_action(struct ice_pf *pf,
1606                 const struct rte_flow_action *actions,
1607                 uint32_t priority,
1608                 struct rte_flow_error *error,
1609                 struct ice_adv_rule_info *rule_info)
1610 {
1611         struct ice_vsi *vsi = pf->main_vsi;
1612         struct rte_eth_dev_data *dev_data = pf->adapter->pf.dev_data;
1613         const struct rte_flow_action_queue *act_q;
1614         const struct rte_flow_action_rss *act_qgrop;
1615         uint16_t base_queue, i;
1616         const struct rte_flow_action *action;
1617         enum rte_flow_action_type action_type;
1618         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1619                  2, 4, 8, 16, 32, 64, 128};
1620
1621         base_queue = pf->base_queue + vsi->base_queue;
1622         for (action = actions; action->type !=
1623                         RTE_FLOW_ACTION_TYPE_END; action++) {
1624                 action_type = action->type;
1625                 switch (action_type) {
1626                 case RTE_FLOW_ACTION_TYPE_RSS:
1627                         act_qgrop = action->conf;
1628                         if (act_qgrop->queue_num <= 1)
1629                                 goto error;
1630                         rule_info->sw_act.fltr_act =
1631                                 ICE_FWD_TO_QGRP;
1632                         rule_info->sw_act.fwd_id.q_id =
1633                                 base_queue + act_qgrop->queue[0];
1634                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1635                                 if (act_qgrop->queue_num ==
1636                                         valid_qgrop_number[i])
1637                                         break;
1638                         }
1639                         if (i == MAX_QGRP_NUM_TYPE)
1640                                 goto error;
1641                         if ((act_qgrop->queue[0] +
1642                                 act_qgrop->queue_num) >
1643                                 dev_data->nb_rx_queues)
1644                                 goto error1;
1645                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1646                                 if (act_qgrop->queue[i + 1] !=
1647                                         act_qgrop->queue[i] + 1)
1648                                         goto error2;
1649                         rule_info->sw_act.qgrp_size =
1650                                 act_qgrop->queue_num;
1651                         break;
1652                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1653                         act_q = action->conf;
1654                         if (act_q->index >= dev_data->nb_rx_queues)
1655                                 goto error;
1656                         rule_info->sw_act.fltr_act =
1657                                 ICE_FWD_TO_Q;
1658                         rule_info->sw_act.fwd_id.q_id =
1659                                 base_queue + act_q->index;
1660                         break;
1661
1662                 case RTE_FLOW_ACTION_TYPE_DROP:
1663                         rule_info->sw_act.fltr_act =
1664                                 ICE_DROP_PACKET;
1665                         break;
1666
1667                 case RTE_FLOW_ACTION_TYPE_VOID:
1668                         break;
1669
1670                 default:
1671                         goto error;
1672                 }
1673         }
1674
1675         rule_info->sw_act.vsi_handle = vsi->idx;
1676         rule_info->rx = 1;
1677         rule_info->sw_act.src = vsi->idx;
1678         /* 0 denotes lowest priority of recipe and highest priority
1679          * of rte_flow. Change rte_flow priority into recipe priority.
1680          */
1681         rule_info->priority = ICE_SW_PRI_BASE - priority;
1682
1683         return 0;
1684
1685 error:
1686         rte_flow_error_set(error,
1687                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1688                 actions,
1689                 "Invalid action type or queue number");
1690         return -rte_errno;
1691
1692 error1:
1693         rte_flow_error_set(error,
1694                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1695                 actions,
1696                 "Invalid queue region indexes");
1697         return -rte_errno;
1698
1699 error2:
1700         rte_flow_error_set(error,
1701                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1702                 actions,
1703                 "Discontinuous queue region");
1704         return -rte_errno;
1705 }
1706
1707 static int
1708 ice_switch_check_action(const struct rte_flow_action *actions,
1709                             struct rte_flow_error *error)
1710 {
1711         const struct rte_flow_action *action;
1712         enum rte_flow_action_type action_type;
1713         uint16_t actions_num = 0;
1714
1715         for (action = actions; action->type !=
1716                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1717                 action_type = action->type;
1718                 switch (action_type) {
1719                 case RTE_FLOW_ACTION_TYPE_VF:
1720                 case RTE_FLOW_ACTION_TYPE_RSS:
1721                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1722                 case RTE_FLOW_ACTION_TYPE_DROP:
1723                         actions_num++;
1724                         break;
1725                 case RTE_FLOW_ACTION_TYPE_VOID:
1726                         continue;
1727                 default:
1728                         rte_flow_error_set(error,
1729                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1730                                            actions,
1731                                            "Invalid action type");
1732                         return -rte_errno;
1733                 }
1734         }
1735
1736         if (actions_num != 1) {
1737                 rte_flow_error_set(error,
1738                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1739                                    actions,
1740                                    "Invalid action number");
1741                 return -rte_errno;
1742         }
1743
1744         return 0;
1745 }
1746
1747 static int
1748 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1749                 struct ice_pattern_match_item *array,
1750                 uint32_t array_len,
1751                 const struct rte_flow_item pattern[],
1752                 const struct rte_flow_action actions[],
1753                 uint32_t priority,
1754                 void **meta,
1755                 struct rte_flow_error *error)
1756 {
1757         struct ice_pf *pf = &ad->pf;
1758         int ret = 0;
1759         struct sw_meta *sw_meta_ptr = NULL;
1760         struct ice_adv_rule_info rule_info;
1761         struct ice_adv_lkup_elem *list = NULL;
1762         uint16_t lkups_num = 0;
1763         const struct rte_flow_item *item = pattern;
1764         uint16_t item_num = 0;
1765         uint16_t vlan_num = 0;
1766         enum ice_sw_tunnel_type tun_type =
1767                         ICE_NON_TUN;
1768         struct ice_pattern_match_item *pattern_match_item = NULL;
1769
1770         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1771                 item_num++;
1772                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1773                         const struct rte_flow_item_eth *eth_mask;
1774                         if (item->mask)
1775                                 eth_mask = item->mask;
1776                         else
1777                                 continue;
1778                         if (eth_mask->type == UINT16_MAX)
1779                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1780                 }
1781
1782                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1783                         vlan_num++;
1784
1785                 /* reserve one more memory slot for ETH which may
1786                  * consume 2 lookup items.
1787                  */
1788                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1789                         item_num++;
1790         }
1791
1792         if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1793                 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1794         else if (vlan_num == 2)
1795                 tun_type = ICE_NON_TUN_QINQ;
1796
1797         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1798         if (!list) {
1799                 rte_flow_error_set(error, EINVAL,
1800                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1801                                    "No memory for PMD internal items");
1802                 return -rte_errno;
1803         }
1804
1805         sw_meta_ptr =
1806                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1807         if (!sw_meta_ptr) {
1808                 rte_flow_error_set(error, EINVAL,
1809                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1810                                    "No memory for sw_pattern_meta_ptr");
1811                 goto error;
1812         }
1813
1814         pattern_match_item =
1815                 ice_search_pattern_match_item(ad, pattern, array, array_len,
1816                                               error);
1817         if (!pattern_match_item) {
1818                 rte_flow_error_set(error, EINVAL,
1819                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1820                                    "Invalid input pattern");
1821                 goto error;
1822         }
1823
1824         if (!ice_switch_parse_pattern(pattern, error, list, &lkups_num,
1825                                    &tun_type, pattern_match_item)) {
1826                 rte_flow_error_set(error, EINVAL,
1827                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1828                                    pattern,
1829                                    "Invalid input set");
1830                 goto error;
1831         }
1832
1833         memset(&rule_info, 0, sizeof(rule_info));
1834         rule_info.tun_type = tun_type;
1835
1836         ret = ice_switch_check_action(actions, error);
1837         if (ret)
1838                 goto error;
1839
1840         if (ad->hw.dcf_enabled)
1841                 ret = ice_switch_parse_dcf_action((void *)ad, actions, priority,
1842                                                   error, &rule_info);
1843         else
1844                 ret = ice_switch_parse_action(pf, actions, priority, error,
1845                                               &rule_info);
1846
1847         if (ret)
1848                 goto error;
1849
1850         if (meta) {
1851                 *meta = sw_meta_ptr;
1852                 ((struct sw_meta *)*meta)->list = list;
1853                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1854                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1855         } else {
1856                 rte_free(list);
1857                 rte_free(sw_meta_ptr);
1858         }
1859
1860         rte_free(pattern_match_item);
1861
1862         return 0;
1863
1864 error:
1865         rte_free(list);
1866         rte_free(sw_meta_ptr);
1867         rte_free(pattern_match_item);
1868
1869         return -rte_errno;
1870 }
1871
1872 static int
1873 ice_switch_query(struct ice_adapter *ad __rte_unused,
1874                 struct rte_flow *flow __rte_unused,
1875                 struct rte_flow_query_count *count __rte_unused,
1876                 struct rte_flow_error *error)
1877 {
1878         rte_flow_error_set(error, EINVAL,
1879                 RTE_FLOW_ERROR_TYPE_HANDLE,
1880                 NULL,
1881                 "count action not supported by switch filter");
1882
1883         return -rte_errno;
1884 }
1885
1886 static int
1887 ice_switch_redirect(struct ice_adapter *ad,
1888                     struct rte_flow *flow,
1889                     struct ice_flow_redirect *rd)
1890 {
1891         struct ice_rule_query_data *rdata = flow->rule;
1892         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1893         struct ice_adv_lkup_elem *lkups_dp = NULL;
1894         struct LIST_HEAD_TYPE *list_head;
1895         struct ice_adv_rule_info rinfo;
1896         struct ice_hw *hw = &ad->hw;
1897         struct ice_switch_info *sw;
1898         uint16_t lkups_cnt;
1899         int ret;
1900
1901         if (rdata->vsi_handle != rd->vsi_handle)
1902                 return 0;
1903
1904         sw = hw->switch_info;
1905         if (!sw->recp_list[rdata->rid].recp_created)
1906                 return -EINVAL;
1907
1908         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1909                 return -ENOTSUP;
1910
1911         list_head = &sw->recp_list[rdata->rid].filt_rules;
1912         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1913                             list_entry) {
1914                 rinfo = list_itr->rule_info;
1915                 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1916                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1917                     rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1918                     (rinfo.fltr_rule_id == rdata->rule_id &&
1919                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1920                         lkups_cnt = list_itr->lkups_cnt;
1921                         lkups_dp = (struct ice_adv_lkup_elem *)
1922                                 ice_memdup(hw, list_itr->lkups,
1923                                            sizeof(*list_itr->lkups) *
1924                                            lkups_cnt, ICE_NONDMA_TO_NONDMA);
1925
1926                         if (!lkups_dp) {
1927                                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1928                                 return -EINVAL;
1929                         }
1930
1931                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1932                                 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1933                                 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1934                         }
1935                         break;
1936                 }
1937         }
1938
1939         if (!lkups_dp)
1940                 return -EINVAL;
1941
1942         /* Remove the old rule */
1943         ret = ice_rem_adv_rule(hw, list_itr->lkups,
1944                                lkups_cnt, &rinfo);
1945         if (ret) {
1946                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1947                             rdata->rule_id);
1948                 ret = -EINVAL;
1949                 goto out;
1950         }
1951
1952         /* Update VSI context */
1953         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1954
1955         /* Replay the rule */
1956         ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1957                                &rinfo, rdata);
1958         if (ret) {
1959                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1960                 ret = -EINVAL;
1961         }
1962
1963 out:
1964         ice_free(hw, lkups_dp);
1965         return ret;
1966 }
1967
1968 static int
1969 ice_switch_init(struct ice_adapter *ad)
1970 {
1971         int ret = 0;
1972         struct ice_flow_parser *dist_parser;
1973         struct ice_flow_parser *perm_parser;
1974
1975         if (ad->devargs.pipe_mode_support) {
1976                 perm_parser = &ice_switch_perm_parser;
1977                 ret = ice_register_parser(perm_parser, ad);
1978         } else {
1979                 dist_parser = &ice_switch_dist_parser;
1980                 ret = ice_register_parser(dist_parser, ad);
1981         }
1982         return ret;
1983 }
1984
1985 static void
1986 ice_switch_uninit(struct ice_adapter *ad)
1987 {
1988         struct ice_flow_parser *dist_parser;
1989         struct ice_flow_parser *perm_parser;
1990
1991         if (ad->devargs.pipe_mode_support) {
1992                 perm_parser = &ice_switch_perm_parser;
1993                 ice_unregister_parser(perm_parser, ad);
1994         } else {
1995                 dist_parser = &ice_switch_dist_parser;
1996                 ice_unregister_parser(dist_parser, ad);
1997         }
1998 }
1999
2000 static struct
2001 ice_flow_engine ice_switch_engine = {
2002         .init = ice_switch_init,
2003         .uninit = ice_switch_uninit,
2004         .create = ice_switch_create,
2005         .destroy = ice_switch_destroy,
2006         .query_count = ice_switch_query,
2007         .redirect = ice_switch_redirect,
2008         .free = ice_switch_filter_rule_free,
2009         .type = ICE_FLOW_ENGINE_SWITCH,
2010 };
2011
2012 static struct
2013 ice_flow_parser ice_switch_dist_parser = {
2014         .engine = &ice_switch_engine,
2015         .array = ice_switch_pattern_dist_list,
2016         .array_len = RTE_DIM(ice_switch_pattern_dist_list),
2017         .parse_pattern_action = ice_switch_parse_pattern_action,
2018         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2019 };
2020
2021 static struct
2022 ice_flow_parser ice_switch_perm_parser = {
2023         .engine = &ice_switch_engine,
2024         .array = ice_switch_pattern_perm_list,
2025         .array_len = RTE_DIM(ice_switch_pattern_perm_list),
2026         .parse_pattern_action = ice_switch_parse_pattern_action,
2027         .stage = ICE_FLOW_STAGE_PERMISSION,
2028 };
2029
2030 RTE_INIT(ice_sw_engine_init)
2031 {
2032         struct ice_flow_engine *engine = &ice_switch_engine;
2033         ice_register_flow_engine(engine);
2034 }