net/ice/base: init metainit table for parser
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27
28
29 #define MAX_QGRP_NUM_TYPE       7
30 #define MAX_INPUT_SET_BYTE      32
31 #define ICE_PPP_IPV4_PROTO      0x0021
32 #define ICE_PPP_IPV6_PROTO      0x0057
33 #define ICE_IPV4_PROTO_NVGRE    0x002F
34
35 #define ICE_SW_INSET_ETHER ( \
36         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38         ICE_SW_INSET_ETHER | ICE_INSET_VLAN_INNER)
39 #define ICE_SW_INSET_MAC_QINQ  ( \
40         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
41         ICE_INSET_VLAN_OUTER)
42 #define ICE_SW_INSET_MAC_IPV4 ( \
43         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
45 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
46         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
47 #define ICE_SW_INSET_MAC_QINQ_IPV4_TCP ( \
48         ICE_SW_INSET_MAC_QINQ_IPV4 | \
49         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
50 #define ICE_SW_INSET_MAC_QINQ_IPV4_UDP ( \
51         ICE_SW_INSET_MAC_QINQ_IPV4 | \
52         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
53 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
54         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
55         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
56         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
57 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
58         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
59         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
60         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
61 #define ICE_SW_INSET_MAC_IPV6 ( \
62         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
63         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
64         ICE_INSET_IPV6_NEXT_HDR)
65 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
66         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
67 #define ICE_SW_INSET_MAC_QINQ_IPV6_TCP ( \
68         ICE_SW_INSET_MAC_QINQ_IPV6 | \
69         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
70 #define ICE_SW_INSET_MAC_QINQ_IPV6_UDP ( \
71         ICE_SW_INSET_MAC_QINQ_IPV6 | \
72         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
73 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
74         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
75         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
76         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
77 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
78         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
79         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
80         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
81 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
82         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
83         ICE_INSET_NVGRE_TNI)
84 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
85         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
86         ICE_INSET_VXLAN_VNI)
87 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
88         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
89         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
90         ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
91 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
92         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
93         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
94         ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
95 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
96         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
97         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
98         ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
99 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
100         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
101         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
102         ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
103 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
104         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
105         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
106 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
107         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
108         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
109         ICE_INSET_IPV4_TOS)
110 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
111         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
112         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
113         ICE_INSET_IPV4_TOS)
114 #define ICE_SW_INSET_MAC_PPPOE  ( \
115         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
116         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
117 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
118         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
119         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
120         ICE_INSET_PPPOE_PROTO)
121 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
122         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
123 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
124         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
125 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
126         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
127 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
128         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
129 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
130         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
131 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
132         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
133 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
134         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
135 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
136         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
137 #define ICE_SW_INSET_MAC_IPV4_AH ( \
138         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
139 #define ICE_SW_INSET_MAC_IPV6_AH ( \
140         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
141 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
142         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
143 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
144         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
145 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
146         ICE_SW_INSET_MAC_IPV4 | \
147         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
148 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
149         ICE_SW_INSET_MAC_IPV6 | \
150         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
151 #define ICE_SW_INSET_MAC_IPV4_GTPU ( \
152         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID)
153 #define ICE_SW_INSET_MAC_IPV6_GTPU ( \
154         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID)
155 #define ICE_SW_INSET_MAC_GTPU_OUTER ( \
156         ICE_INSET_DMAC | ICE_INSET_GTPU_TEID)
157 #define ICE_SW_INSET_MAC_GTPU_EH_OUTER ( \
158         ICE_SW_INSET_MAC_GTPU_OUTER | ICE_INSET_GTPU_QFI)
159 #define ICE_SW_INSET_GTPU_IPV4 ( \
160         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
161 #define ICE_SW_INSET_GTPU_IPV6 ( \
162         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST)
163 #define ICE_SW_INSET_GTPU_IPV4_UDP ( \
164         ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_UDP_SRC_PORT | \
165         ICE_INSET_UDP_DST_PORT)
166 #define ICE_SW_INSET_GTPU_IPV4_TCP ( \
167         ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_TCP_SRC_PORT | \
168         ICE_INSET_TCP_DST_PORT)
169 #define ICE_SW_INSET_GTPU_IPV6_UDP ( \
170         ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_UDP_SRC_PORT | \
171         ICE_INSET_UDP_DST_PORT)
172 #define ICE_SW_INSET_GTPU_IPV6_TCP ( \
173         ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_TCP_SRC_PORT | \
174         ICE_INSET_TCP_DST_PORT)
175
176 struct sw_meta {
177         struct ice_adv_lkup_elem *list;
178         uint16_t lkups_num;
179         struct ice_adv_rule_info rule_info;
180 };
181
182 static struct ice_flow_parser ice_switch_dist_parser;
183 static struct ice_flow_parser ice_switch_perm_parser;
184
185 static struct
186 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
187         {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE,                         ICE_INSET_NONE},
188         {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
189         {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
190         {pattern_eth_arp,                               ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
191         {pattern_eth_ipv4,                              ICE_SW_INSET_MAC_IPV4,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
192         {pattern_eth_ipv4_udp,                          ICE_SW_INSET_MAC_IPV4_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
193         {pattern_eth_ipv4_tcp,                          ICE_SW_INSET_MAC_IPV4_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
194         {pattern_eth_ipv6,                              ICE_SW_INSET_MAC_IPV6,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
195         {pattern_eth_ipv6_udp,                          ICE_SW_INSET_MAC_IPV6_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
196         {pattern_eth_ipv6_tcp,                          ICE_SW_INSET_MAC_IPV6_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
197         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_VXLAN_IPV4,           ICE_INSET_NONE},
198         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_VXLAN_IPV4_UDP,       ICE_INSET_NONE},
199         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_VXLAN_IPV4_TCP,       ICE_INSET_NONE},
200         {pattern_eth_ipv4_nvgre_eth_ipv4,               ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_NVGRE_IPV4,           ICE_INSET_NONE},
201         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,           ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_NVGRE_IPV4_UDP,       ICE_INSET_NONE},
202         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,           ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_NVGRE_IPV4_TCP,       ICE_INSET_NONE},
203         {pattern_eth_pppoes,                            ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
204         {pattern_eth_vlan_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
205         {pattern_eth_pppoes_proto,                      ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
206         {pattern_eth_vlan_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
207         {pattern_eth_pppoes_ipv4,                       ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
208         {pattern_eth_pppoes_ipv4_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
209         {pattern_eth_pppoes_ipv4_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
210         {pattern_eth_pppoes_ipv6,                       ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
211         {pattern_eth_pppoes_ipv6_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
212         {pattern_eth_pppoes_ipv6_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
213         {pattern_eth_vlan_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
214         {pattern_eth_vlan_pppoes_ipv4_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
215         {pattern_eth_vlan_pppoes_ipv4_udp,              ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
216         {pattern_eth_vlan_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
217         {pattern_eth_vlan_pppoes_ipv6_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
218         {pattern_eth_vlan_pppoes_ipv6_udp,              ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
219         {pattern_eth_ipv4_esp,                          ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
220         {pattern_eth_ipv4_udp_esp,                      ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
221         {pattern_eth_ipv6_esp,                          ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
222         {pattern_eth_ipv6_udp_esp,                      ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
223         {pattern_eth_ipv4_ah,                           ICE_SW_INSET_MAC_IPV4_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
224         {pattern_eth_ipv6_ah,                           ICE_SW_INSET_MAC_IPV6_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
225         {pattern_eth_ipv6_udp_ah,                       ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
226         {pattern_eth_ipv4_l2tp,                         ICE_SW_INSET_MAC_IPV4_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
227         {pattern_eth_ipv6_l2tp,                         ICE_SW_INSET_MAC_IPV6_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
228         {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
229         {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
230         {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE,                         ICE_INSET_NONE},
231         {pattern_eth_qinq_ipv4_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV4_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
232         {pattern_eth_qinq_ipv4_udp,                     ICE_SW_INSET_MAC_QINQ_IPV4_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
233         {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE,                         ICE_INSET_NONE},
234         {pattern_eth_qinq_ipv6_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV6_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
235         {pattern_eth_qinq_ipv6_udp,                     ICE_SW_INSET_MAC_QINQ_IPV6_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
236         {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
237         {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
238         {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
239         {pattern_eth_qinq_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
240         {pattern_eth_ipv4_gtpu,                         ICE_SW_INSET_MAC_IPV4_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
241         {pattern_eth_ipv6_gtpu,                         ICE_SW_INSET_MAC_IPV6_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
242         {pattern_eth_ipv4_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
243         {pattern_eth_ipv4_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
244         {pattern_eth_ipv4_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
245         {pattern_eth_ipv4_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
246         {pattern_eth_ipv4_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
247         {pattern_eth_ipv4_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
248         {pattern_eth_ipv4_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
249         {pattern_eth_ipv4_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
250         {pattern_eth_ipv4_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
251         {pattern_eth_ipv4_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
252         {pattern_eth_ipv4_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
253         {pattern_eth_ipv4_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
254         {pattern_eth_ipv6_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
255         {pattern_eth_ipv6_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
256         {pattern_eth_ipv6_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
257         {pattern_eth_ipv6_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
258         {pattern_eth_ipv6_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
259         {pattern_eth_ipv6_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
260         {pattern_eth_ipv6_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
261         {pattern_eth_ipv6_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
262         {pattern_eth_ipv6_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
263         {pattern_eth_ipv6_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
264         {pattern_eth_ipv6_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
265         {pattern_eth_ipv6_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
266 };
267
268 static struct
269 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
270         {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE,                         ICE_INSET_NONE},
271         {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
272         {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
273         {pattern_eth_arp,                               ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
274         {pattern_eth_ipv4,                              ICE_SW_INSET_MAC_IPV4,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
275         {pattern_eth_ipv4_udp,                          ICE_SW_INSET_MAC_IPV4_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
276         {pattern_eth_ipv4_tcp,                          ICE_SW_INSET_MAC_IPV4_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
277         {pattern_eth_ipv6,                              ICE_SW_INSET_MAC_IPV6,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
278         {pattern_eth_ipv6_udp,                          ICE_SW_INSET_MAC_IPV6_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
279         {pattern_eth_ipv6_tcp,                          ICE_SW_INSET_MAC_IPV6_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
280         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4,          ICE_INSET_NONE},
281         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP,      ICE_INSET_NONE},
282         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP,      ICE_INSET_NONE},
283         {pattern_eth_ipv4_nvgre_eth_ipv4,               ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4,          ICE_INSET_NONE},
284         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,           ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP,      ICE_INSET_NONE},
285         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,           ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP,      ICE_INSET_NONE},
286         {pattern_eth_pppoes,                            ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
287         {pattern_eth_vlan_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
288         {pattern_eth_pppoes_proto,                      ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
289         {pattern_eth_vlan_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
290         {pattern_eth_pppoes_ipv4,                       ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
291         {pattern_eth_pppoes_ipv4_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
292         {pattern_eth_pppoes_ipv4_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
293         {pattern_eth_pppoes_ipv6,                       ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
294         {pattern_eth_pppoes_ipv6_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
295         {pattern_eth_pppoes_ipv6_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
296         {pattern_eth_vlan_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
297         {pattern_eth_vlan_pppoes_ipv4_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
298         {pattern_eth_vlan_pppoes_ipv4_udp,              ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
299         {pattern_eth_vlan_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
300         {pattern_eth_vlan_pppoes_ipv6_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
301         {pattern_eth_vlan_pppoes_ipv6_udp,              ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
302         {pattern_eth_ipv4_esp,                          ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
303         {pattern_eth_ipv4_udp_esp,                      ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
304         {pattern_eth_ipv6_esp,                          ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
305         {pattern_eth_ipv6_udp_esp,                      ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
306         {pattern_eth_ipv4_ah,                           ICE_SW_INSET_MAC_IPV4_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
307         {pattern_eth_ipv6_ah,                           ICE_SW_INSET_MAC_IPV6_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
308         {pattern_eth_ipv6_udp_ah,                       ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
309         {pattern_eth_ipv4_l2tp,                         ICE_SW_INSET_MAC_IPV4_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
310         {pattern_eth_ipv6_l2tp,                         ICE_SW_INSET_MAC_IPV6_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
311         {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
312         {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
313         {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE,                         ICE_INSET_NONE},
314         {pattern_eth_qinq_ipv4_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV4_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
315         {pattern_eth_qinq_ipv4_udp,                     ICE_SW_INSET_MAC_QINQ_IPV4_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
316         {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE,                         ICE_INSET_NONE},
317         {pattern_eth_qinq_ipv6_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV6_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
318         {pattern_eth_qinq_ipv6_udp,                     ICE_SW_INSET_MAC_QINQ_IPV6_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
319         {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
320         {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
321         {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
322         {pattern_eth_qinq_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
323         {pattern_eth_ipv4_gtpu,                         ICE_SW_INSET_MAC_IPV4_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
324         {pattern_eth_ipv6_gtpu,                         ICE_SW_INSET_MAC_IPV6_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
325         {pattern_eth_ipv4_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
326         {pattern_eth_ipv4_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
327         {pattern_eth_ipv4_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
328         {pattern_eth_ipv4_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
329         {pattern_eth_ipv4_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
330         {pattern_eth_ipv4_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
331         {pattern_eth_ipv4_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
332         {pattern_eth_ipv4_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
333         {pattern_eth_ipv4_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
334         {pattern_eth_ipv4_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
335         {pattern_eth_ipv4_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
336         {pattern_eth_ipv4_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
337         {pattern_eth_ipv6_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
338         {pattern_eth_ipv6_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
339         {pattern_eth_ipv6_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
340         {pattern_eth_ipv6_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
341         {pattern_eth_ipv6_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
342         {pattern_eth_ipv6_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
343         {pattern_eth_ipv6_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
344         {pattern_eth_ipv6_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
345         {pattern_eth_ipv6_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
346         {pattern_eth_ipv6_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
347         {pattern_eth_ipv6_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
348         {pattern_eth_ipv6_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
349 };
350
351 static int
352 ice_switch_create(struct ice_adapter *ad,
353                 struct rte_flow *flow,
354                 void *meta,
355                 struct rte_flow_error *error)
356 {
357         int ret = 0;
358         struct ice_pf *pf = &ad->pf;
359         struct ice_hw *hw = ICE_PF_TO_HW(pf);
360         struct ice_rule_query_data rule_added = {0};
361         struct ice_rule_query_data *filter_ptr;
362         struct ice_adv_lkup_elem *list =
363                 ((struct sw_meta *)meta)->list;
364         uint16_t lkups_cnt =
365                 ((struct sw_meta *)meta)->lkups_num;
366         struct ice_adv_rule_info *rule_info =
367                 &((struct sw_meta *)meta)->rule_info;
368
369         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
370                 rte_flow_error_set(error, EINVAL,
371                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
372                         "item number too large for rule");
373                 goto error;
374         }
375         if (!list) {
376                 rte_flow_error_set(error, EINVAL,
377                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
378                         "lookup list should not be NULL");
379                 goto error;
380         }
381         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
382         if (!ret) {
383                 filter_ptr = rte_zmalloc("ice_switch_filter",
384                         sizeof(struct ice_rule_query_data), 0);
385                 if (!filter_ptr) {
386                         rte_flow_error_set(error, EINVAL,
387                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
388                                    "No memory for ice_switch_filter");
389                         goto error;
390                 }
391                 flow->rule = filter_ptr;
392                 rte_memcpy(filter_ptr,
393                         &rule_added,
394                         sizeof(struct ice_rule_query_data));
395         } else {
396                 rte_flow_error_set(error, EINVAL,
397                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
398                         "switch filter create flow fail");
399                 goto error;
400         }
401
402         rte_free(list);
403         rte_free(meta);
404         return 0;
405
406 error:
407         rte_free(list);
408         rte_free(meta);
409
410         return -rte_errno;
411 }
412
413 static int
414 ice_switch_destroy(struct ice_adapter *ad,
415                 struct rte_flow *flow,
416                 struct rte_flow_error *error)
417 {
418         struct ice_hw *hw = &ad->hw;
419         int ret;
420         struct ice_rule_query_data *filter_ptr;
421
422         filter_ptr = (struct ice_rule_query_data *)
423                 flow->rule;
424
425         if (!filter_ptr) {
426                 rte_flow_error_set(error, EINVAL,
427                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
428                         "no such flow"
429                         " create by switch filter");
430                 return -rte_errno;
431         }
432
433         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
434         if (ret) {
435                 rte_flow_error_set(error, EINVAL,
436                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
437                         "fail to destroy switch filter rule");
438                 return -rte_errno;
439         }
440
441         rte_free(filter_ptr);
442         return ret;
443 }
444
445 static void
446 ice_switch_filter_rule_free(struct rte_flow *flow)
447 {
448         rte_free(flow->rule);
449 }
450
451 static bool
452 ice_switch_parse_pattern(const struct rte_flow_item pattern[],
453                 struct rte_flow_error *error,
454                 struct ice_adv_lkup_elem *list,
455                 uint16_t *lkups_num,
456                 enum ice_sw_tunnel_type *tun_type,
457                 const struct ice_pattern_match_item *pattern_match_item)
458 {
459         const struct rte_flow_item *item = pattern;
460         enum rte_flow_item_type item_type;
461         const struct rte_flow_item_eth *eth_spec, *eth_mask;
462         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
463         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
464         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
465         const struct rte_flow_item_udp *udp_spec, *udp_mask;
466         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
467         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
468         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
469         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
470         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
471         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
472                                 *pppoe_proto_mask;
473         const struct rte_flow_item_esp *esp_spec, *esp_mask;
474         const struct rte_flow_item_ah *ah_spec, *ah_mask;
475         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
476         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
477         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
478         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
479         uint64_t outer_input_set = ICE_INSET_NONE;
480         uint64_t inner_input_set = ICE_INSET_NONE;
481         uint64_t *input = NULL;
482         uint16_t input_set_byte = 0;
483         bool pppoe_elem_valid = 0;
484         bool pppoe_patt_valid = 0;
485         bool pppoe_prot_valid = 0;
486         bool inner_vlan_valid = 0;
487         bool outer_vlan_valid = 0;
488         bool tunnel_valid = 0;
489         bool profile_rule = 0;
490         bool nvgre_valid = 0;
491         bool vxlan_valid = 0;
492         bool qinq_valid = 0;
493         bool ipv6_valid = 0;
494         bool ipv4_valid = 0;
495         bool udp_valid = 0;
496         bool tcp_valid = 0;
497         bool gtpu_valid = 0;
498         bool gtpu_psc_valid = 0;
499         bool inner_ipv4_valid = 0;
500         bool inner_ipv6_valid = 0;
501         bool inner_tcp_valid = 0;
502         bool inner_udp_valid = 0;
503         uint16_t j, k, t = 0;
504
505         if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
506             *tun_type == ICE_NON_TUN_QINQ)
507                 qinq_valid = 1;
508
509         for (item = pattern; item->type !=
510                         RTE_FLOW_ITEM_TYPE_END; item++) {
511                 if (item->last) {
512                         rte_flow_error_set(error, EINVAL,
513                                         RTE_FLOW_ERROR_TYPE_ITEM,
514                                         item,
515                                         "Not support range");
516                         return false;
517                 }
518                 item_type = item->type;
519
520                 switch (item_type) {
521                 case RTE_FLOW_ITEM_TYPE_ETH:
522                         eth_spec = item->spec;
523                         eth_mask = item->mask;
524                         if (eth_spec && eth_mask) {
525                                 const uint8_t *a = eth_mask->src.addr_bytes;
526                                 const uint8_t *b = eth_mask->dst.addr_bytes;
527                                 if (tunnel_valid)
528                                         input = &inner_input_set;
529                                 else
530                                         input = &outer_input_set;
531                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
532                                         if (a[j]) {
533                                                 *input |= ICE_INSET_SMAC;
534                                                 break;
535                                         }
536                                 }
537                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
538                                         if (b[j]) {
539                                                 *input |= ICE_INSET_DMAC;
540                                                 break;
541                                         }
542                                 }
543                                 if (eth_mask->type)
544                                         *input |= ICE_INSET_ETHERTYPE;
545                                 list[t].type = (tunnel_valid  == 0) ?
546                                         ICE_MAC_OFOS : ICE_MAC_IL;
547                                 struct ice_ether_hdr *h;
548                                 struct ice_ether_hdr *m;
549                                 uint16_t i = 0;
550                                 h = &list[t].h_u.eth_hdr;
551                                 m = &list[t].m_u.eth_hdr;
552                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
553                                         if (eth_mask->src.addr_bytes[j]) {
554                                                 h->src_addr[j] =
555                                                 eth_spec->src.addr_bytes[j];
556                                                 m->src_addr[j] =
557                                                 eth_mask->src.addr_bytes[j];
558                                                 i = 1;
559                                                 input_set_byte++;
560                                         }
561                                         if (eth_mask->dst.addr_bytes[j]) {
562                                                 h->dst_addr[j] =
563                                                 eth_spec->dst.addr_bytes[j];
564                                                 m->dst_addr[j] =
565                                                 eth_mask->dst.addr_bytes[j];
566                                                 i = 1;
567                                                 input_set_byte++;
568                                         }
569                                 }
570                                 if (i)
571                                         t++;
572                                 if (eth_mask->type) {
573                                         list[t].type = ICE_ETYPE_OL;
574                                         list[t].h_u.ethertype.ethtype_id =
575                                                 eth_spec->type;
576                                         list[t].m_u.ethertype.ethtype_id =
577                                                 eth_mask->type;
578                                         input_set_byte += 2;
579                                         t++;
580                                 }
581                         }
582                         break;
583
584                 case RTE_FLOW_ITEM_TYPE_IPV4:
585                         ipv4_spec = item->spec;
586                         ipv4_mask = item->mask;
587                         if (tunnel_valid) {
588                                 inner_ipv4_valid = 1;
589                                 input = &inner_input_set;
590                         } else {
591                                 ipv4_valid = 1;
592                                 input = &outer_input_set;
593                         }
594
595                         if (ipv4_spec && ipv4_mask) {
596                                 /* Check IPv4 mask and update input set */
597                                 if (ipv4_mask->hdr.version_ihl ||
598                                         ipv4_mask->hdr.total_length ||
599                                         ipv4_mask->hdr.packet_id ||
600                                         ipv4_mask->hdr.hdr_checksum) {
601                                         rte_flow_error_set(error, EINVAL,
602                                                    RTE_FLOW_ERROR_TYPE_ITEM,
603                                                    item,
604                                                    "Invalid IPv4 mask.");
605                                         return false;
606                                 }
607
608                                 if (ipv4_mask->hdr.src_addr)
609                                         *input |= ICE_INSET_IPV4_SRC;
610                                 if (ipv4_mask->hdr.dst_addr)
611                                         *input |= ICE_INSET_IPV4_DST;
612                                 if (ipv4_mask->hdr.time_to_live)
613                                         *input |= ICE_INSET_IPV4_TTL;
614                                 if (ipv4_mask->hdr.next_proto_id)
615                                         *input |= ICE_INSET_IPV4_PROTO;
616                                 if (ipv4_mask->hdr.type_of_service)
617                                         *input |= ICE_INSET_IPV4_TOS;
618
619                                 list[t].type = (tunnel_valid  == 0) ?
620                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
621                                 if (ipv4_mask->hdr.src_addr) {
622                                         list[t].h_u.ipv4_hdr.src_addr =
623                                                 ipv4_spec->hdr.src_addr;
624                                         list[t].m_u.ipv4_hdr.src_addr =
625                                                 ipv4_mask->hdr.src_addr;
626                                         input_set_byte += 2;
627                                 }
628                                 if (ipv4_mask->hdr.dst_addr) {
629                                         list[t].h_u.ipv4_hdr.dst_addr =
630                                                 ipv4_spec->hdr.dst_addr;
631                                         list[t].m_u.ipv4_hdr.dst_addr =
632                                                 ipv4_mask->hdr.dst_addr;
633                                         input_set_byte += 2;
634                                 }
635                                 if (ipv4_mask->hdr.time_to_live) {
636                                         list[t].h_u.ipv4_hdr.time_to_live =
637                                                 ipv4_spec->hdr.time_to_live;
638                                         list[t].m_u.ipv4_hdr.time_to_live =
639                                                 ipv4_mask->hdr.time_to_live;
640                                         input_set_byte++;
641                                 }
642                                 if (ipv4_mask->hdr.next_proto_id) {
643                                         list[t].h_u.ipv4_hdr.protocol =
644                                                 ipv4_spec->hdr.next_proto_id;
645                                         list[t].m_u.ipv4_hdr.protocol =
646                                                 ipv4_mask->hdr.next_proto_id;
647                                         input_set_byte++;
648                                 }
649                                 if ((ipv4_spec->hdr.next_proto_id &
650                                         ipv4_mask->hdr.next_proto_id) ==
651                                         ICE_IPV4_PROTO_NVGRE)
652                                         *tun_type = ICE_SW_TUN_AND_NON_TUN;
653                                 if (ipv4_mask->hdr.type_of_service) {
654                                         list[t].h_u.ipv4_hdr.tos =
655                                                 ipv4_spec->hdr.type_of_service;
656                                         list[t].m_u.ipv4_hdr.tos =
657                                                 ipv4_mask->hdr.type_of_service;
658                                         input_set_byte++;
659                                 }
660                                 t++;
661                         }
662                         break;
663
664                 case RTE_FLOW_ITEM_TYPE_IPV6:
665                         ipv6_spec = item->spec;
666                         ipv6_mask = item->mask;
667                         if (tunnel_valid) {
668                                 inner_ipv6_valid = 1;
669                                 input = &inner_input_set;
670                         } else {
671                                 ipv6_valid = 1;
672                                 input = &outer_input_set;
673                         }
674
675                         if (ipv6_spec && ipv6_mask) {
676                                 if (ipv6_mask->hdr.payload_len) {
677                                         rte_flow_error_set(error, EINVAL,
678                                            RTE_FLOW_ERROR_TYPE_ITEM,
679                                            item,
680                                            "Invalid IPv6 mask");
681                                         return false;
682                                 }
683
684                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
685                                         if (ipv6_mask->hdr.src_addr[j]) {
686                                                 *input |= ICE_INSET_IPV6_SRC;
687                                                 break;
688                                         }
689                                 }
690                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
691                                         if (ipv6_mask->hdr.dst_addr[j]) {
692                                                 *input |= ICE_INSET_IPV6_DST;
693                                                 break;
694                                         }
695                                 }
696                                 if (ipv6_mask->hdr.proto)
697                                         *input |= ICE_INSET_IPV6_NEXT_HDR;
698                                 if (ipv6_mask->hdr.hop_limits)
699                                         *input |= ICE_INSET_IPV6_HOP_LIMIT;
700                                 if (ipv6_mask->hdr.vtc_flow &
701                                     rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
702                                         *input |= ICE_INSET_IPV6_TC;
703
704                                 list[t].type = (tunnel_valid  == 0) ?
705                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
706                                 struct ice_ipv6_hdr *f;
707                                 struct ice_ipv6_hdr *s;
708                                 f = &list[t].h_u.ipv6_hdr;
709                                 s = &list[t].m_u.ipv6_hdr;
710                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
711                                         if (ipv6_mask->hdr.src_addr[j]) {
712                                                 f->src_addr[j] =
713                                                 ipv6_spec->hdr.src_addr[j];
714                                                 s->src_addr[j] =
715                                                 ipv6_mask->hdr.src_addr[j];
716                                                 input_set_byte++;
717                                         }
718                                         if (ipv6_mask->hdr.dst_addr[j]) {
719                                                 f->dst_addr[j] =
720                                                 ipv6_spec->hdr.dst_addr[j];
721                                                 s->dst_addr[j] =
722                                                 ipv6_mask->hdr.dst_addr[j];
723                                                 input_set_byte++;
724                                         }
725                                 }
726                                 if (ipv6_mask->hdr.proto) {
727                                         f->next_hdr =
728                                                 ipv6_spec->hdr.proto;
729                                         s->next_hdr =
730                                                 ipv6_mask->hdr.proto;
731                                         input_set_byte++;
732                                 }
733                                 if (ipv6_mask->hdr.hop_limits) {
734                                         f->hop_limit =
735                                                 ipv6_spec->hdr.hop_limits;
736                                         s->hop_limit =
737                                                 ipv6_mask->hdr.hop_limits;
738                                         input_set_byte++;
739                                 }
740                                 if (ipv6_mask->hdr.vtc_flow &
741                                                 rte_cpu_to_be_32
742                                                 (RTE_IPV6_HDR_TC_MASK)) {
743                                         struct ice_le_ver_tc_flow vtf;
744                                         vtf.u.fld.version = 0;
745                                         vtf.u.fld.flow_label = 0;
746                                         vtf.u.fld.tc = (rte_be_to_cpu_32
747                                                 (ipv6_spec->hdr.vtc_flow) &
748                                                         RTE_IPV6_HDR_TC_MASK) >>
749                                                         RTE_IPV6_HDR_TC_SHIFT;
750                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
751                                         vtf.u.fld.tc = (rte_be_to_cpu_32
752                                                 (ipv6_mask->hdr.vtc_flow) &
753                                                         RTE_IPV6_HDR_TC_MASK) >>
754                                                         RTE_IPV6_HDR_TC_SHIFT;
755                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
756                                         input_set_byte += 4;
757                                 }
758                                 t++;
759                         }
760                         break;
761
762                 case RTE_FLOW_ITEM_TYPE_UDP:
763                         udp_spec = item->spec;
764                         udp_mask = item->mask;
765                         if (tunnel_valid) {
766                                 inner_udp_valid = 1;
767                                 input = &inner_input_set;
768                         } else {
769                                 udp_valid = 1;
770                                 input = &outer_input_set;
771                         }
772
773                         if (udp_spec && udp_mask) {
774                                 /* Check UDP mask and update input set*/
775                                 if (udp_mask->hdr.dgram_len ||
776                                     udp_mask->hdr.dgram_cksum) {
777                                         rte_flow_error_set(error, EINVAL,
778                                                    RTE_FLOW_ERROR_TYPE_ITEM,
779                                                    item,
780                                                    "Invalid UDP mask");
781                                         return false;
782                                 }
783
784                                 if (udp_mask->hdr.src_port)
785                                         *input |= ICE_INSET_UDP_SRC_PORT;
786                                 if (udp_mask->hdr.dst_port)
787                                         *input |= ICE_INSET_UDP_DST_PORT;
788
789                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
790                                                 tunnel_valid == 0)
791                                         list[t].type = ICE_UDP_OF;
792                                 else
793                                         list[t].type = ICE_UDP_ILOS;
794                                 if (udp_mask->hdr.src_port) {
795                                         list[t].h_u.l4_hdr.src_port =
796                                                 udp_spec->hdr.src_port;
797                                         list[t].m_u.l4_hdr.src_port =
798                                                 udp_mask->hdr.src_port;
799                                         input_set_byte += 2;
800                                 }
801                                 if (udp_mask->hdr.dst_port) {
802                                         list[t].h_u.l4_hdr.dst_port =
803                                                 udp_spec->hdr.dst_port;
804                                         list[t].m_u.l4_hdr.dst_port =
805                                                 udp_mask->hdr.dst_port;
806                                         input_set_byte += 2;
807                                 }
808                                 t++;
809                         }
810                         break;
811
812                 case RTE_FLOW_ITEM_TYPE_TCP:
813                         tcp_spec = item->spec;
814                         tcp_mask = item->mask;
815                         if (tunnel_valid) {
816                                 inner_tcp_valid = 1;
817                                 input = &inner_input_set;
818                         } else {
819                                 tcp_valid = 1;
820                                 input = &outer_input_set;
821                         }
822
823                         if (tcp_spec && tcp_mask) {
824                                 /* Check TCP mask and update input set */
825                                 if (tcp_mask->hdr.sent_seq ||
826                                         tcp_mask->hdr.recv_ack ||
827                                         tcp_mask->hdr.data_off ||
828                                         tcp_mask->hdr.tcp_flags ||
829                                         tcp_mask->hdr.rx_win ||
830                                         tcp_mask->hdr.cksum ||
831                                         tcp_mask->hdr.tcp_urp) {
832                                         rte_flow_error_set(error, EINVAL,
833                                            RTE_FLOW_ERROR_TYPE_ITEM,
834                                            item,
835                                            "Invalid TCP mask");
836                                         return false;
837                                 }
838
839                                 if (tcp_mask->hdr.src_port)
840                                         *input |= ICE_INSET_TCP_SRC_PORT;
841                                 if (tcp_mask->hdr.dst_port)
842                                         *input |= ICE_INSET_TCP_DST_PORT;
843                                 list[t].type = ICE_TCP_IL;
844                                 if (tcp_mask->hdr.src_port) {
845                                         list[t].h_u.l4_hdr.src_port =
846                                                 tcp_spec->hdr.src_port;
847                                         list[t].m_u.l4_hdr.src_port =
848                                                 tcp_mask->hdr.src_port;
849                                         input_set_byte += 2;
850                                 }
851                                 if (tcp_mask->hdr.dst_port) {
852                                         list[t].h_u.l4_hdr.dst_port =
853                                                 tcp_spec->hdr.dst_port;
854                                         list[t].m_u.l4_hdr.dst_port =
855                                                 tcp_mask->hdr.dst_port;
856                                         input_set_byte += 2;
857                                 }
858                                 t++;
859                         }
860                         break;
861
862                 case RTE_FLOW_ITEM_TYPE_SCTP:
863                         sctp_spec = item->spec;
864                         sctp_mask = item->mask;
865                         if (sctp_spec && sctp_mask) {
866                                 /* Check SCTP mask and update input set */
867                                 if (sctp_mask->hdr.cksum) {
868                                         rte_flow_error_set(error, EINVAL,
869                                            RTE_FLOW_ERROR_TYPE_ITEM,
870                                            item,
871                                            "Invalid SCTP mask");
872                                         return false;
873                                 }
874                                 if (tunnel_valid)
875                                         input = &inner_input_set;
876                                 else
877                                         input = &outer_input_set;
878
879                                 if (sctp_mask->hdr.src_port)
880                                         *input |= ICE_INSET_SCTP_SRC_PORT;
881                                 if (sctp_mask->hdr.dst_port)
882                                         *input |= ICE_INSET_SCTP_DST_PORT;
883
884                                 list[t].type = ICE_SCTP_IL;
885                                 if (sctp_mask->hdr.src_port) {
886                                         list[t].h_u.sctp_hdr.src_port =
887                                                 sctp_spec->hdr.src_port;
888                                         list[t].m_u.sctp_hdr.src_port =
889                                                 sctp_mask->hdr.src_port;
890                                         input_set_byte += 2;
891                                 }
892                                 if (sctp_mask->hdr.dst_port) {
893                                         list[t].h_u.sctp_hdr.dst_port =
894                                                 sctp_spec->hdr.dst_port;
895                                         list[t].m_u.sctp_hdr.dst_port =
896                                                 sctp_mask->hdr.dst_port;
897                                         input_set_byte += 2;
898                                 }
899                                 t++;
900                         }
901                         break;
902
903                 case RTE_FLOW_ITEM_TYPE_VXLAN:
904                         vxlan_spec = item->spec;
905                         vxlan_mask = item->mask;
906                         /* Check if VXLAN item is used to describe protocol.
907                          * If yes, both spec and mask should be NULL.
908                          * If no, both spec and mask shouldn't be NULL.
909                          */
910                         if ((!vxlan_spec && vxlan_mask) ||
911                             (vxlan_spec && !vxlan_mask)) {
912                                 rte_flow_error_set(error, EINVAL,
913                                            RTE_FLOW_ERROR_TYPE_ITEM,
914                                            item,
915                                            "Invalid VXLAN item");
916                                 return false;
917                         }
918                         vxlan_valid = 1;
919                         tunnel_valid = 1;
920                         input = &inner_input_set;
921                         if (vxlan_spec && vxlan_mask) {
922                                 list[t].type = ICE_VXLAN;
923                                 if (vxlan_mask->vni[0] ||
924                                         vxlan_mask->vni[1] ||
925                                         vxlan_mask->vni[2]) {
926                                         list[t].h_u.tnl_hdr.vni =
927                                                 (vxlan_spec->vni[2] << 16) |
928                                                 (vxlan_spec->vni[1] << 8) |
929                                                 vxlan_spec->vni[0];
930                                         list[t].m_u.tnl_hdr.vni =
931                                                 (vxlan_mask->vni[2] << 16) |
932                                                 (vxlan_mask->vni[1] << 8) |
933                                                 vxlan_mask->vni[0];
934                                         *input |= ICE_INSET_VXLAN_VNI;
935                                         input_set_byte += 2;
936                                 }
937                                 t++;
938                         }
939                         break;
940
941                 case RTE_FLOW_ITEM_TYPE_NVGRE:
942                         nvgre_spec = item->spec;
943                         nvgre_mask = item->mask;
944                         /* Check if NVGRE item is used to describe protocol.
945                          * If yes, both spec and mask should be NULL.
946                          * If no, both spec and mask shouldn't be NULL.
947                          */
948                         if ((!nvgre_spec && nvgre_mask) ||
949                             (nvgre_spec && !nvgre_mask)) {
950                                 rte_flow_error_set(error, EINVAL,
951                                            RTE_FLOW_ERROR_TYPE_ITEM,
952                                            item,
953                                            "Invalid NVGRE item");
954                                 return false;
955                         }
956                         nvgre_valid = 1;
957                         tunnel_valid = 1;
958                         input = &inner_input_set;
959                         if (nvgre_spec && nvgre_mask) {
960                                 list[t].type = ICE_NVGRE;
961                                 if (nvgre_mask->tni[0] ||
962                                         nvgre_mask->tni[1] ||
963                                         nvgre_mask->tni[2]) {
964                                         list[t].h_u.nvgre_hdr.tni_flow =
965                                                 (nvgre_spec->tni[2] << 16) |
966                                                 (nvgre_spec->tni[1] << 8) |
967                                                 nvgre_spec->tni[0];
968                                         list[t].m_u.nvgre_hdr.tni_flow =
969                                                 (nvgre_mask->tni[2] << 16) |
970                                                 (nvgre_mask->tni[1] << 8) |
971                                                 nvgre_mask->tni[0];
972                                         *input |= ICE_INSET_NVGRE_TNI;
973                                         input_set_byte += 2;
974                                 }
975                                 t++;
976                         }
977                         break;
978
979                 case RTE_FLOW_ITEM_TYPE_VLAN:
980                         vlan_spec = item->spec;
981                         vlan_mask = item->mask;
982                         /* Check if VLAN item is used to describe protocol.
983                          * If yes, both spec and mask should be NULL.
984                          * If no, both spec and mask shouldn't be NULL.
985                          */
986                         if ((!vlan_spec && vlan_mask) ||
987                             (vlan_spec && !vlan_mask)) {
988                                 rte_flow_error_set(error, EINVAL,
989                                            RTE_FLOW_ERROR_TYPE_ITEM,
990                                            item,
991                                            "Invalid VLAN item");
992                                 return false;
993                         }
994
995                         if (qinq_valid) {
996                                 if (!outer_vlan_valid)
997                                         outer_vlan_valid = 1;
998                                 else
999                                         inner_vlan_valid = 1;
1000                         }
1001
1002                         input = &outer_input_set;
1003
1004                         if (vlan_spec && vlan_mask) {
1005                                 if (qinq_valid) {
1006                                         if (!inner_vlan_valid) {
1007                                                 list[t].type = ICE_VLAN_EX;
1008                                                 *input |=
1009                                                         ICE_INSET_VLAN_OUTER;
1010                                         } else {
1011                                                 list[t].type = ICE_VLAN_IN;
1012                                                 *input |=
1013                                                         ICE_INSET_VLAN_INNER;
1014                                         }
1015                                 } else {
1016                                         list[t].type = ICE_VLAN_OFOS;
1017                                         *input |= ICE_INSET_VLAN_INNER;
1018                                 }
1019
1020                                 if (vlan_mask->tci) {
1021                                         list[t].h_u.vlan_hdr.vlan =
1022                                                 vlan_spec->tci;
1023                                         list[t].m_u.vlan_hdr.vlan =
1024                                                 vlan_mask->tci;
1025                                         input_set_byte += 2;
1026                                 }
1027                                 if (vlan_mask->inner_type) {
1028                                         rte_flow_error_set(error, EINVAL,
1029                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1030                                                 item,
1031                                                 "Invalid VLAN input set.");
1032                                         return false;
1033                                 }
1034                                 t++;
1035                         }
1036                         break;
1037
1038                 case RTE_FLOW_ITEM_TYPE_PPPOED:
1039                 case RTE_FLOW_ITEM_TYPE_PPPOES:
1040                         pppoe_spec = item->spec;
1041                         pppoe_mask = item->mask;
1042                         /* Check if PPPoE item is used to describe protocol.
1043                          * If yes, both spec and mask should be NULL.
1044                          * If no, both spec and mask shouldn't be NULL.
1045                          */
1046                         if ((!pppoe_spec && pppoe_mask) ||
1047                                 (pppoe_spec && !pppoe_mask)) {
1048                                 rte_flow_error_set(error, EINVAL,
1049                                         RTE_FLOW_ERROR_TYPE_ITEM,
1050                                         item,
1051                                         "Invalid pppoe item");
1052                                 return false;
1053                         }
1054                         pppoe_patt_valid = 1;
1055                         input = &outer_input_set;
1056                         if (pppoe_spec && pppoe_mask) {
1057                                 /* Check pppoe mask and update input set */
1058                                 if (pppoe_mask->length ||
1059                                         pppoe_mask->code ||
1060                                         pppoe_mask->version_type) {
1061                                         rte_flow_error_set(error, EINVAL,
1062                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1063                                                 item,
1064                                                 "Invalid pppoe mask");
1065                                         return false;
1066                                 }
1067                                 list[t].type = ICE_PPPOE;
1068                                 if (pppoe_mask->session_id) {
1069                                         list[t].h_u.pppoe_hdr.session_id =
1070                                                 pppoe_spec->session_id;
1071                                         list[t].m_u.pppoe_hdr.session_id =
1072                                                 pppoe_mask->session_id;
1073                                         *input |= ICE_INSET_PPPOE_SESSION;
1074                                         input_set_byte += 2;
1075                                 }
1076                                 t++;
1077                                 pppoe_elem_valid = 1;
1078                         }
1079                         break;
1080
1081                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1082                         pppoe_proto_spec = item->spec;
1083                         pppoe_proto_mask = item->mask;
1084                         /* Check if PPPoE optional proto_id item
1085                          * is used to describe protocol.
1086                          * If yes, both spec and mask should be NULL.
1087                          * If no, both spec and mask shouldn't be NULL.
1088                          */
1089                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1090                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
1091                                 rte_flow_error_set(error, EINVAL,
1092                                         RTE_FLOW_ERROR_TYPE_ITEM,
1093                                         item,
1094                                         "Invalid pppoe proto item");
1095                                 return false;
1096                         }
1097                         input = &outer_input_set;
1098                         if (pppoe_proto_spec && pppoe_proto_mask) {
1099                                 if (pppoe_elem_valid)
1100                                         t--;
1101                                 list[t].type = ICE_PPPOE;
1102                                 if (pppoe_proto_mask->proto_id) {
1103                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
1104                                                 pppoe_proto_spec->proto_id;
1105                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
1106                                                 pppoe_proto_mask->proto_id;
1107                                         *input |= ICE_INSET_PPPOE_PROTO;
1108                                         input_set_byte += 2;
1109                                         pppoe_prot_valid = 1;
1110                                 }
1111                                 if ((pppoe_proto_mask->proto_id &
1112                                         pppoe_proto_spec->proto_id) !=
1113                                             CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1114                                         (pppoe_proto_mask->proto_id &
1115                                         pppoe_proto_spec->proto_id) !=
1116                                             CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1117                                         *tun_type = ICE_SW_TUN_PPPOE_PAY;
1118                                 else
1119                                         *tun_type = ICE_SW_TUN_PPPOE;
1120                                 t++;
1121                         }
1122
1123                         break;
1124
1125                 case RTE_FLOW_ITEM_TYPE_ESP:
1126                         esp_spec = item->spec;
1127                         esp_mask = item->mask;
1128                         if ((esp_spec && !esp_mask) ||
1129                                 (!esp_spec && esp_mask)) {
1130                                 rte_flow_error_set(error, EINVAL,
1131                                            RTE_FLOW_ERROR_TYPE_ITEM,
1132                                            item,
1133                                            "Invalid esp item");
1134                                 return false;
1135                         }
1136                         /* Check esp mask and update input set */
1137                         if (esp_mask && esp_mask->hdr.seq) {
1138                                 rte_flow_error_set(error, EINVAL,
1139                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1140                                                 item,
1141                                                 "Invalid esp mask");
1142                                 return false;
1143                         }
1144                         input = &outer_input_set;
1145                         if (!esp_spec && !esp_mask && !(*input)) {
1146                                 profile_rule = 1;
1147                                 if (ipv6_valid && udp_valid)
1148                                         *tun_type =
1149                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1150                                 else if (ipv6_valid)
1151                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1152                                 else if (ipv4_valid)
1153                                         goto inset_check;
1154                         } else if (esp_spec && esp_mask &&
1155                                                 esp_mask->hdr.spi){
1156                                 if (udp_valid)
1157                                         list[t].type = ICE_NAT_T;
1158                                 else
1159                                         list[t].type = ICE_ESP;
1160                                 list[t].h_u.esp_hdr.spi =
1161                                         esp_spec->hdr.spi;
1162                                 list[t].m_u.esp_hdr.spi =
1163                                         esp_mask->hdr.spi;
1164                                 *input |= ICE_INSET_ESP_SPI;
1165                                 input_set_byte += 4;
1166                                 t++;
1167                         }
1168
1169                         if (!profile_rule) {
1170                                 if (ipv6_valid && udp_valid)
1171                                         *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1172                                 else if (ipv4_valid && udp_valid)
1173                                         *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1174                                 else if (ipv6_valid)
1175                                         *tun_type = ICE_SW_TUN_IPV6_ESP;
1176                                 else if (ipv4_valid)
1177                                         *tun_type = ICE_SW_TUN_IPV4_ESP;
1178                         }
1179                         break;
1180
1181                 case RTE_FLOW_ITEM_TYPE_AH:
1182                         ah_spec = item->spec;
1183                         ah_mask = item->mask;
1184                         if ((ah_spec && !ah_mask) ||
1185                                 (!ah_spec && ah_mask)) {
1186                                 rte_flow_error_set(error, EINVAL,
1187                                            RTE_FLOW_ERROR_TYPE_ITEM,
1188                                            item,
1189                                            "Invalid ah item");
1190                                 return false;
1191                         }
1192                         /* Check ah mask and update input set */
1193                         if (ah_mask &&
1194                                 (ah_mask->next_hdr ||
1195                                 ah_mask->payload_len ||
1196                                 ah_mask->seq_num ||
1197                                 ah_mask->reserved)) {
1198                                 rte_flow_error_set(error, EINVAL,
1199                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1200                                                 item,
1201                                                 "Invalid ah mask");
1202                                 return false;
1203                         }
1204
1205                         input = &outer_input_set;
1206                         if (!ah_spec && !ah_mask && !(*input)) {
1207                                 profile_rule = 1;
1208                                 if (ipv6_valid && udp_valid)
1209                                         *tun_type =
1210                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1211                                 else if (ipv6_valid)
1212                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1213                                 else if (ipv4_valid)
1214                                         goto inset_check;
1215                         } else if (ah_spec && ah_mask &&
1216                                                 ah_mask->spi){
1217                                 list[t].type = ICE_AH;
1218                                 list[t].h_u.ah_hdr.spi =
1219                                         ah_spec->spi;
1220                                 list[t].m_u.ah_hdr.spi =
1221                                         ah_mask->spi;
1222                                 *input |= ICE_INSET_AH_SPI;
1223                                 input_set_byte += 4;
1224                                 t++;
1225                         }
1226
1227                         if (!profile_rule) {
1228                                 if (udp_valid)
1229                                         goto inset_check;
1230                                 else if (ipv6_valid)
1231                                         *tun_type = ICE_SW_TUN_IPV6_AH;
1232                                 else if (ipv4_valid)
1233                                         *tun_type = ICE_SW_TUN_IPV4_AH;
1234                         }
1235                         break;
1236
1237                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1238                         l2tp_spec = item->spec;
1239                         l2tp_mask = item->mask;
1240                         if ((l2tp_spec && !l2tp_mask) ||
1241                                 (!l2tp_spec && l2tp_mask)) {
1242                                 rte_flow_error_set(error, EINVAL,
1243                                            RTE_FLOW_ERROR_TYPE_ITEM,
1244                                            item,
1245                                            "Invalid l2tp item");
1246                                 return false;
1247                         }
1248
1249                         input = &outer_input_set;
1250                         if (!l2tp_spec && !l2tp_mask && !(*input)) {
1251                                 if (ipv6_valid)
1252                                         *tun_type =
1253                                         ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1254                                 else if (ipv4_valid)
1255                                         goto inset_check;
1256                         } else if (l2tp_spec && l2tp_mask &&
1257                                                 l2tp_mask->session_id){
1258                                 list[t].type = ICE_L2TPV3;
1259                                 list[t].h_u.l2tpv3_sess_hdr.session_id =
1260                                         l2tp_spec->session_id;
1261                                 list[t].m_u.l2tpv3_sess_hdr.session_id =
1262                                         l2tp_mask->session_id;
1263                                 *input |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1264                                 input_set_byte += 4;
1265                                 t++;
1266                         }
1267
1268                         if (!profile_rule) {
1269                                 if (ipv6_valid)
1270                                         *tun_type =
1271                                         ICE_SW_TUN_IPV6_L2TPV3;
1272                                 else if (ipv4_valid)
1273                                         *tun_type =
1274                                         ICE_SW_TUN_IPV4_L2TPV3;
1275                         }
1276                         break;
1277
1278                 case RTE_FLOW_ITEM_TYPE_PFCP:
1279                         pfcp_spec = item->spec;
1280                         pfcp_mask = item->mask;
1281                         /* Check if PFCP item is used to describe protocol.
1282                          * If yes, both spec and mask should be NULL.
1283                          * If no, both spec and mask shouldn't be NULL.
1284                          */
1285                         if ((!pfcp_spec && pfcp_mask) ||
1286                             (pfcp_spec && !pfcp_mask)) {
1287                                 rte_flow_error_set(error, EINVAL,
1288                                            RTE_FLOW_ERROR_TYPE_ITEM,
1289                                            item,
1290                                            "Invalid PFCP item");
1291                                 return false;
1292                         }
1293                         if (pfcp_spec && pfcp_mask) {
1294                                 /* Check pfcp mask and update input set */
1295                                 if (pfcp_mask->msg_type ||
1296                                         pfcp_mask->msg_len ||
1297                                         pfcp_mask->seid) {
1298                                         rte_flow_error_set(error, EINVAL,
1299                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1300                                                 item,
1301                                                 "Invalid pfcp mask");
1302                                         return false;
1303                                 }
1304                                 if (pfcp_mask->s_field &&
1305                                         pfcp_spec->s_field == 0x01 &&
1306                                         ipv6_valid)
1307                                         *tun_type =
1308                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1309                                 else if (pfcp_mask->s_field &&
1310                                         pfcp_spec->s_field == 0x01)
1311                                         *tun_type =
1312                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1313                                 else if (pfcp_mask->s_field &&
1314                                         !pfcp_spec->s_field &&
1315                                         ipv6_valid)
1316                                         *tun_type =
1317                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1318                                 else if (pfcp_mask->s_field &&
1319                                         !pfcp_spec->s_field)
1320                                         *tun_type =
1321                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1322                                 else
1323                                         return false;
1324                         }
1325                         break;
1326
1327                 case RTE_FLOW_ITEM_TYPE_GTPU:
1328                         gtp_spec = item->spec;
1329                         gtp_mask = item->mask;
1330                         if (gtp_spec && !gtp_mask) {
1331                                 rte_flow_error_set(error, EINVAL,
1332                                         RTE_FLOW_ERROR_TYPE_ITEM,
1333                                         item,
1334                                         "Invalid GTP item");
1335                                 return false;
1336                         }
1337                         if (gtp_spec && gtp_mask) {
1338                                 if (gtp_mask->v_pt_rsv_flags ||
1339                                     gtp_mask->msg_type ||
1340                                     gtp_mask->msg_len) {
1341                                         rte_flow_error_set(error, EINVAL,
1342                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1343                                                 item,
1344                                                 "Invalid GTP mask");
1345                                         return false;
1346                                 }
1347                                 input = &outer_input_set;
1348                                 if (gtp_mask->teid)
1349                                         *input |= ICE_INSET_GTPU_TEID;
1350                                 list[t].type = ICE_GTP;
1351                                 list[t].h_u.gtp_hdr.teid =
1352                                         gtp_spec->teid;
1353                                 list[t].m_u.gtp_hdr.teid =
1354                                         gtp_mask->teid;
1355                                 input_set_byte += 4;
1356                                 t++;
1357                         }
1358                         tunnel_valid = 1;
1359                         gtpu_valid = 1;
1360                         break;
1361
1362                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1363                         gtp_psc_spec = item->spec;
1364                         gtp_psc_mask = item->mask;
1365                         if (gtp_psc_spec && !gtp_psc_mask) {
1366                                 rte_flow_error_set(error, EINVAL,
1367                                         RTE_FLOW_ERROR_TYPE_ITEM,
1368                                         item,
1369                                         "Invalid GTPU_EH item");
1370                                 return false;
1371                         }
1372                         if (gtp_psc_spec && gtp_psc_mask) {
1373                                 if (gtp_psc_mask->pdu_type) {
1374                                         rte_flow_error_set(error, EINVAL,
1375                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1376                                                 item,
1377                                                 "Invalid GTPU_EH mask");
1378                                         return false;
1379                                 }
1380                                 input = &outer_input_set;
1381                                 if (gtp_psc_mask->qfi)
1382                                         *input |= ICE_INSET_GTPU_QFI;
1383                                 list[t].type = ICE_GTP;
1384                                 list[t].h_u.gtp_hdr.qfi =
1385                                         gtp_psc_spec->qfi;
1386                                 list[t].m_u.gtp_hdr.qfi =
1387                                         gtp_psc_mask->qfi;
1388                                 input_set_byte += 1;
1389                                 t++;
1390                         }
1391                         gtpu_psc_valid = 1;
1392                         break;
1393
1394                 case RTE_FLOW_ITEM_TYPE_VOID:
1395                         break;
1396
1397                 default:
1398                         rte_flow_error_set(error, EINVAL,
1399                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1400                                    "Invalid pattern item.");
1401                         return false;
1402                 }
1403         }
1404
1405         if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1406             inner_vlan_valid && outer_vlan_valid)
1407                 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1408         else if (*tun_type == ICE_SW_TUN_PPPOE &&
1409                  inner_vlan_valid && outer_vlan_valid)
1410                 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1411         else if (*tun_type == ICE_NON_TUN &&
1412                  inner_vlan_valid && outer_vlan_valid)
1413                 *tun_type = ICE_NON_TUN_QINQ;
1414         else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1415                  inner_vlan_valid && outer_vlan_valid)
1416                 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1417
1418         if (pppoe_patt_valid && !pppoe_prot_valid) {
1419                 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1420                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1421                 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1422                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1423                 else if (inner_vlan_valid && outer_vlan_valid)
1424                         *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1425                 else if (ipv6_valid && udp_valid)
1426                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1427                 else if (ipv6_valid && tcp_valid)
1428                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1429                 else if (ipv4_valid && udp_valid)
1430                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1431                 else if (ipv4_valid && tcp_valid)
1432                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1433                 else if (ipv6_valid)
1434                         *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1435                 else if (ipv4_valid)
1436                         *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1437                 else
1438                         *tun_type = ICE_SW_TUN_PPPOE;
1439         }
1440
1441         if (gtpu_valid && gtpu_psc_valid) {
1442                 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1443                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP;
1444                 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1445                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP;
1446                 else if (ipv4_valid && inner_ipv4_valid)
1447                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4;
1448                 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1449                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP;
1450                 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1451                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP;
1452                 else if (ipv4_valid && inner_ipv6_valid)
1453                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6;
1454                 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1455                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP;
1456                 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1457                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP;
1458                 else if (ipv6_valid && inner_ipv4_valid)
1459                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4;
1460                 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1461                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP;
1462                 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1463                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP;
1464                 else if (ipv6_valid && inner_ipv6_valid)
1465                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6;
1466                 else if (ipv4_valid)
1467                         *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1468                 else if (ipv6_valid)
1469                         *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1470         } else if (gtpu_valid) {
1471                 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1472                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_UDP;
1473                 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1474                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_TCP;
1475                 else if (ipv4_valid && inner_ipv4_valid)
1476                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1477                 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1478                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_UDP;
1479                 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1480                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_TCP;
1481                 else if (ipv4_valid && inner_ipv6_valid)
1482                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1483                 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1484                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_UDP;
1485                 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1486                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_TCP;
1487                 else if (ipv6_valid && inner_ipv4_valid)
1488                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1489                 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1490                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_UDP;
1491                 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1492                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_TCP;
1493                 else if (ipv6_valid && inner_ipv6_valid)
1494                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1495                 else if (ipv4_valid)
1496                         *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1497                 else if (ipv6_valid)
1498                         *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1499         }
1500
1501         if (*tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY ||
1502             *tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
1503                 for (k = 0; k < t; k++) {
1504                         if (list[k].type == ICE_GTP)
1505                                 list[k].type = ICE_GTP_NO_PAY;
1506                 }
1507         }
1508
1509         if (*tun_type == ICE_NON_TUN) {
1510                 if (vxlan_valid)
1511                         *tun_type = ICE_SW_TUN_VXLAN;
1512                 else if (nvgre_valid)
1513                         *tun_type = ICE_SW_TUN_NVGRE;
1514                 else if (ipv4_valid && tcp_valid)
1515                         *tun_type = ICE_SW_IPV4_TCP;
1516                 else if (ipv4_valid && udp_valid)
1517                         *tun_type = ICE_SW_IPV4_UDP;
1518                 else if (ipv6_valid && tcp_valid)
1519                         *tun_type = ICE_SW_IPV6_TCP;
1520                 else if (ipv6_valid && udp_valid)
1521                         *tun_type = ICE_SW_IPV6_UDP;
1522         }
1523
1524         if (input_set_byte > MAX_INPUT_SET_BYTE) {
1525                 rte_flow_error_set(error, EINVAL,
1526                         RTE_FLOW_ERROR_TYPE_ITEM,
1527                         item,
1528                         "too much input set");
1529                 return false;
1530         }
1531
1532         *lkups_num = t;
1533
1534 inset_check:
1535         if ((!outer_input_set && !inner_input_set &&
1536             !ice_is_prof_rule(*tun_type)) || (outer_input_set &
1537             ~pattern_match_item->input_set_mask_o) ||
1538             (inner_input_set & ~pattern_match_item->input_set_mask_i))
1539                 return false;
1540
1541         return true;
1542 }
1543
1544 static int
1545 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1546                             const struct rte_flow_action *actions,
1547                             uint32_t priority,
1548                             struct rte_flow_error *error,
1549                             struct ice_adv_rule_info *rule_info)
1550 {
1551         const struct rte_flow_action_vf *act_vf;
1552         const struct rte_flow_action *action;
1553         enum rte_flow_action_type action_type;
1554
1555         for (action = actions; action->type !=
1556                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1557                 action_type = action->type;
1558                 switch (action_type) {
1559                 case RTE_FLOW_ACTION_TYPE_VF:
1560                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1561                         act_vf = action->conf;
1562
1563                         if (act_vf->id >= ad->real_hw.num_vfs &&
1564                                 !act_vf->original) {
1565                                 rte_flow_error_set(error,
1566                                         EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1567                                         actions,
1568                                         "Invalid vf id");
1569                                 return -rte_errno;
1570                         }
1571
1572                         if (act_vf->original)
1573                                 rule_info->sw_act.vsi_handle =
1574                                         ad->real_hw.avf.bus.func;
1575                         else
1576                                 rule_info->sw_act.vsi_handle = act_vf->id;
1577                         break;
1578
1579                 case RTE_FLOW_ACTION_TYPE_DROP:
1580                         rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1581                         break;
1582
1583                 default:
1584                         rte_flow_error_set(error,
1585                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1586                                            actions,
1587                                            "Invalid action type");
1588                         return -rte_errno;
1589                 }
1590         }
1591
1592         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1593         rule_info->sw_act.flag = ICE_FLTR_RX;
1594         rule_info->rx = 1;
1595         rule_info->priority = 6 - priority;
1596
1597         return 0;
1598 }
1599
1600 static int
1601 ice_switch_parse_action(struct ice_pf *pf,
1602                 const struct rte_flow_action *actions,
1603                 uint32_t priority,
1604                 struct rte_flow_error *error,
1605                 struct ice_adv_rule_info *rule_info)
1606 {
1607         struct ice_vsi *vsi = pf->main_vsi;
1608         struct rte_eth_dev_data *dev_data = pf->adapter->pf.dev_data;
1609         const struct rte_flow_action_queue *act_q;
1610         const struct rte_flow_action_rss *act_qgrop;
1611         uint16_t base_queue, i;
1612         const struct rte_flow_action *action;
1613         enum rte_flow_action_type action_type;
1614         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1615                  2, 4, 8, 16, 32, 64, 128};
1616
1617         base_queue = pf->base_queue + vsi->base_queue;
1618         for (action = actions; action->type !=
1619                         RTE_FLOW_ACTION_TYPE_END; action++) {
1620                 action_type = action->type;
1621                 switch (action_type) {
1622                 case RTE_FLOW_ACTION_TYPE_RSS:
1623                         act_qgrop = action->conf;
1624                         if (act_qgrop->queue_num <= 1)
1625                                 goto error;
1626                         rule_info->sw_act.fltr_act =
1627                                 ICE_FWD_TO_QGRP;
1628                         rule_info->sw_act.fwd_id.q_id =
1629                                 base_queue + act_qgrop->queue[0];
1630                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1631                                 if (act_qgrop->queue_num ==
1632                                         valid_qgrop_number[i])
1633                                         break;
1634                         }
1635                         if (i == MAX_QGRP_NUM_TYPE)
1636                                 goto error;
1637                         if ((act_qgrop->queue[0] +
1638                                 act_qgrop->queue_num) >
1639                                 dev_data->nb_rx_queues)
1640                                 goto error1;
1641                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1642                                 if (act_qgrop->queue[i + 1] !=
1643                                         act_qgrop->queue[i] + 1)
1644                                         goto error2;
1645                         rule_info->sw_act.qgrp_size =
1646                                 act_qgrop->queue_num;
1647                         break;
1648                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1649                         act_q = action->conf;
1650                         if (act_q->index >= dev_data->nb_rx_queues)
1651                                 goto error;
1652                         rule_info->sw_act.fltr_act =
1653                                 ICE_FWD_TO_Q;
1654                         rule_info->sw_act.fwd_id.q_id =
1655                                 base_queue + act_q->index;
1656                         break;
1657
1658                 case RTE_FLOW_ACTION_TYPE_DROP:
1659                         rule_info->sw_act.fltr_act =
1660                                 ICE_DROP_PACKET;
1661                         break;
1662
1663                 case RTE_FLOW_ACTION_TYPE_VOID:
1664                         break;
1665
1666                 default:
1667                         goto error;
1668                 }
1669         }
1670
1671         rule_info->sw_act.vsi_handle = vsi->idx;
1672         rule_info->rx = 1;
1673         rule_info->sw_act.src = vsi->idx;
1674         rule_info->priority = priority + 5;
1675
1676         return 0;
1677
1678 error:
1679         rte_flow_error_set(error,
1680                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1681                 actions,
1682                 "Invalid action type or queue number");
1683         return -rte_errno;
1684
1685 error1:
1686         rte_flow_error_set(error,
1687                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1688                 actions,
1689                 "Invalid queue region indexes");
1690         return -rte_errno;
1691
1692 error2:
1693         rte_flow_error_set(error,
1694                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1695                 actions,
1696                 "Discontinuous queue region");
1697         return -rte_errno;
1698 }
1699
1700 static int
1701 ice_switch_check_action(const struct rte_flow_action *actions,
1702                             struct rte_flow_error *error)
1703 {
1704         const struct rte_flow_action *action;
1705         enum rte_flow_action_type action_type;
1706         uint16_t actions_num = 0;
1707
1708         for (action = actions; action->type !=
1709                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1710                 action_type = action->type;
1711                 switch (action_type) {
1712                 case RTE_FLOW_ACTION_TYPE_VF:
1713                 case RTE_FLOW_ACTION_TYPE_RSS:
1714                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1715                 case RTE_FLOW_ACTION_TYPE_DROP:
1716                         actions_num++;
1717                         break;
1718                 case RTE_FLOW_ACTION_TYPE_VOID:
1719                         continue;
1720                 default:
1721                         rte_flow_error_set(error,
1722                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1723                                            actions,
1724                                            "Invalid action type");
1725                         return -rte_errno;
1726                 }
1727         }
1728
1729         if (actions_num != 1) {
1730                 rte_flow_error_set(error,
1731                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1732                                    actions,
1733                                    "Invalid action number");
1734                 return -rte_errno;
1735         }
1736
1737         return 0;
1738 }
1739
1740 static int
1741 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1742                 struct ice_pattern_match_item *array,
1743                 uint32_t array_len,
1744                 const struct rte_flow_item pattern[],
1745                 const struct rte_flow_action actions[],
1746                 uint32_t priority,
1747                 void **meta,
1748                 struct rte_flow_error *error)
1749 {
1750         struct ice_pf *pf = &ad->pf;
1751         int ret = 0;
1752         struct sw_meta *sw_meta_ptr = NULL;
1753         struct ice_adv_rule_info rule_info;
1754         struct ice_adv_lkup_elem *list = NULL;
1755         uint16_t lkups_num = 0;
1756         const struct rte_flow_item *item = pattern;
1757         uint16_t item_num = 0;
1758         uint16_t vlan_num = 0;
1759         enum ice_sw_tunnel_type tun_type =
1760                         ICE_NON_TUN;
1761         struct ice_pattern_match_item *pattern_match_item = NULL;
1762
1763         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1764                 item_num++;
1765                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1766                         const struct rte_flow_item_eth *eth_mask;
1767                         if (item->mask)
1768                                 eth_mask = item->mask;
1769                         else
1770                                 continue;
1771                         if (eth_mask->type == UINT16_MAX)
1772                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1773                 }
1774
1775                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1776                         vlan_num++;
1777
1778                 /* reserve one more memory slot for ETH which may
1779                  * consume 2 lookup items.
1780                  */
1781                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1782                         item_num++;
1783         }
1784
1785         if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1786                 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1787         else if (vlan_num == 2)
1788                 tun_type = ICE_NON_TUN_QINQ;
1789
1790         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1791         if (!list) {
1792                 rte_flow_error_set(error, EINVAL,
1793                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1794                                    "No memory for PMD internal items");
1795                 return -rte_errno;
1796         }
1797
1798         sw_meta_ptr =
1799                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1800         if (!sw_meta_ptr) {
1801                 rte_flow_error_set(error, EINVAL,
1802                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1803                                    "No memory for sw_pattern_meta_ptr");
1804                 goto error;
1805         }
1806
1807         pattern_match_item =
1808                 ice_search_pattern_match_item(ad, pattern, array, array_len,
1809                                               error);
1810         if (!pattern_match_item) {
1811                 rte_flow_error_set(error, EINVAL,
1812                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1813                                    "Invalid input pattern");
1814                 goto error;
1815         }
1816
1817         if (!ice_switch_parse_pattern(pattern, error, list, &lkups_num,
1818                                    &tun_type, pattern_match_item)) {
1819                 rte_flow_error_set(error, EINVAL,
1820                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1821                                    pattern,
1822                                    "Invalid input set");
1823                 goto error;
1824         }
1825
1826         memset(&rule_info, 0, sizeof(rule_info));
1827         rule_info.tun_type = tun_type;
1828
1829         ret = ice_switch_check_action(actions, error);
1830         if (ret)
1831                 goto error;
1832
1833         if (ad->hw.dcf_enabled)
1834                 ret = ice_switch_parse_dcf_action((void *)ad, actions, priority,
1835                                                   error, &rule_info);
1836         else
1837                 ret = ice_switch_parse_action(pf, actions, priority, error,
1838                                               &rule_info);
1839
1840         if (ret)
1841                 goto error;
1842
1843         if (meta) {
1844                 *meta = sw_meta_ptr;
1845                 ((struct sw_meta *)*meta)->list = list;
1846                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1847                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1848         } else {
1849                 rte_free(list);
1850                 rte_free(sw_meta_ptr);
1851         }
1852
1853         rte_free(pattern_match_item);
1854
1855         return 0;
1856
1857 error:
1858         rte_free(list);
1859         rte_free(sw_meta_ptr);
1860         rte_free(pattern_match_item);
1861
1862         return -rte_errno;
1863 }
1864
1865 static int
1866 ice_switch_query(struct ice_adapter *ad __rte_unused,
1867                 struct rte_flow *flow __rte_unused,
1868                 struct rte_flow_query_count *count __rte_unused,
1869                 struct rte_flow_error *error)
1870 {
1871         rte_flow_error_set(error, EINVAL,
1872                 RTE_FLOW_ERROR_TYPE_HANDLE,
1873                 NULL,
1874                 "count action not supported by switch filter");
1875
1876         return -rte_errno;
1877 }
1878
1879 static int
1880 ice_switch_redirect(struct ice_adapter *ad,
1881                     struct rte_flow *flow,
1882                     struct ice_flow_redirect *rd)
1883 {
1884         struct ice_rule_query_data *rdata = flow->rule;
1885         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1886         struct ice_adv_lkup_elem *lkups_dp = NULL;
1887         struct LIST_HEAD_TYPE *list_head;
1888         struct ice_adv_rule_info rinfo;
1889         struct ice_hw *hw = &ad->hw;
1890         struct ice_switch_info *sw;
1891         uint16_t lkups_cnt;
1892         int ret;
1893
1894         if (rdata->vsi_handle != rd->vsi_handle)
1895                 return 0;
1896
1897         sw = hw->switch_info;
1898         if (!sw->recp_list[rdata->rid].recp_created)
1899                 return -EINVAL;
1900
1901         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1902                 return -ENOTSUP;
1903
1904         list_head = &sw->recp_list[rdata->rid].filt_rules;
1905         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1906                             list_entry) {
1907                 rinfo = list_itr->rule_info;
1908                 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1909                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1910                     rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1911                     (rinfo.fltr_rule_id == rdata->rule_id &&
1912                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1913                         lkups_cnt = list_itr->lkups_cnt;
1914                         lkups_dp = (struct ice_adv_lkup_elem *)
1915                                 ice_memdup(hw, list_itr->lkups,
1916                                            sizeof(*list_itr->lkups) *
1917                                            lkups_cnt, ICE_NONDMA_TO_NONDMA);
1918
1919                         if (!lkups_dp) {
1920                                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1921                                 return -EINVAL;
1922                         }
1923
1924                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1925                                 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1926                                 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1927                         }
1928                         break;
1929                 }
1930         }
1931
1932         if (!lkups_dp)
1933                 return -EINVAL;
1934
1935         /* Remove the old rule */
1936         ret = ice_rem_adv_rule(hw, list_itr->lkups,
1937                                lkups_cnt, &rinfo);
1938         if (ret) {
1939                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1940                             rdata->rule_id);
1941                 ret = -EINVAL;
1942                 goto out;
1943         }
1944
1945         /* Update VSI context */
1946         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1947
1948         /* Replay the rule */
1949         ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1950                                &rinfo, rdata);
1951         if (ret) {
1952                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1953                 ret = -EINVAL;
1954         }
1955
1956 out:
1957         ice_free(hw, lkups_dp);
1958         return ret;
1959 }
1960
1961 static int
1962 ice_switch_init(struct ice_adapter *ad)
1963 {
1964         int ret = 0;
1965         struct ice_flow_parser *dist_parser;
1966         struct ice_flow_parser *perm_parser;
1967
1968         if (ad->devargs.pipe_mode_support) {
1969                 perm_parser = &ice_switch_perm_parser;
1970                 ret = ice_register_parser(perm_parser, ad);
1971         } else {
1972                 dist_parser = &ice_switch_dist_parser;
1973                 ret = ice_register_parser(dist_parser, ad);
1974         }
1975         return ret;
1976 }
1977
1978 static void
1979 ice_switch_uninit(struct ice_adapter *ad)
1980 {
1981         struct ice_flow_parser *dist_parser;
1982         struct ice_flow_parser *perm_parser;
1983
1984         if (ad->devargs.pipe_mode_support) {
1985                 perm_parser = &ice_switch_perm_parser;
1986                 ice_unregister_parser(perm_parser, ad);
1987         } else {
1988                 dist_parser = &ice_switch_dist_parser;
1989                 ice_unregister_parser(dist_parser, ad);
1990         }
1991 }
1992
1993 static struct
1994 ice_flow_engine ice_switch_engine = {
1995         .init = ice_switch_init,
1996         .uninit = ice_switch_uninit,
1997         .create = ice_switch_create,
1998         .destroy = ice_switch_destroy,
1999         .query_count = ice_switch_query,
2000         .redirect = ice_switch_redirect,
2001         .free = ice_switch_filter_rule_free,
2002         .type = ICE_FLOW_ENGINE_SWITCH,
2003 };
2004
2005 static struct
2006 ice_flow_parser ice_switch_dist_parser = {
2007         .engine = &ice_switch_engine,
2008         .array = ice_switch_pattern_dist_list,
2009         .array_len = RTE_DIM(ice_switch_pattern_dist_list),
2010         .parse_pattern_action = ice_switch_parse_pattern_action,
2011         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2012 };
2013
2014 static struct
2015 ice_flow_parser ice_switch_perm_parser = {
2016         .engine = &ice_switch_engine,
2017         .array = ice_switch_pattern_perm_list,
2018         .array_len = RTE_DIM(ice_switch_pattern_perm_list),
2019         .parse_pattern_action = ice_switch_parse_pattern_action,
2020         .stage = ICE_FLOW_STAGE_PERMISSION,
2021 };
2022
2023 RTE_INIT(ice_sw_engine_init)
2024 {
2025         struct ice_flow_engine *engine = &ice_switch_engine;
2026         ice_register_flow_engine(engine);
2027 }