mbuf: rename outer IP checksum macro
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27
28
29 #define MAX_QGRP_NUM_TYPE       7
30 #define MAX_INPUT_SET_BYTE      32
31 #define ICE_PPP_IPV4_PROTO      0x0021
32 #define ICE_PPP_IPV6_PROTO      0x0057
33 #define ICE_IPV4_PROTO_NVGRE    0x002F
34
35 #define ICE_SW_INSET_ETHER ( \
36         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
39         ICE_INSET_VLAN_INNER)
40 #define ICE_SW_INSET_MAC_QINQ  ( \
41         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
42         ICE_INSET_VLAN_OUTER)
43 #define ICE_SW_INSET_MAC_IPV4 ( \
44         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
46 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
47         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
48 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
49         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
50         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
51         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
52 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
53         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
54         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
55         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
56 #define ICE_SW_INSET_MAC_IPV6 ( \
57         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
58         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
59         ICE_INSET_IPV6_NEXT_HDR)
60 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
61         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
62 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
63         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
64         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
65         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
66 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
67         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
68         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
69         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
70 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
71         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
72         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
74         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
77         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
79         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
81         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
83         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
84 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
85         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
87         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
88 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
89         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
91         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
92 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
93         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
94         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
95 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
96         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
97         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
98         ICE_INSET_TUN_IPV4_TOS)
99 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
100         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
101         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
102         ICE_INSET_TUN_IPV4_TOS)
103 #define ICE_SW_INSET_MAC_PPPOE  ( \
104         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
105         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
106 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
107         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
108         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
109         ICE_INSET_PPPOE_PROTO)
110 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
111         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
112 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
113         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
114 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
115         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
116 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
117         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
118 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
119         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
120 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
121         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
122 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
123         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
124 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
125         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
126 #define ICE_SW_INSET_MAC_IPV4_AH ( \
127         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
128 #define ICE_SW_INSET_MAC_IPV6_AH ( \
129         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
130 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
131         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
132 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
133         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
134 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
135         ICE_SW_INSET_MAC_IPV4 | \
136         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
137 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
138         ICE_SW_INSET_MAC_IPV6 | \
139         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
140
141 struct sw_meta {
142         struct ice_adv_lkup_elem *list;
143         uint16_t lkups_num;
144         struct ice_adv_rule_info rule_info;
145 };
146
147 static struct ice_flow_parser ice_switch_dist_parser;
148 static struct ice_flow_parser ice_switch_perm_parser;
149
150 static struct
151 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
152         {pattern_ethertype,
153                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
154         {pattern_ethertype_vlan,
155                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
156         {pattern_ethertype_qinq,
157                         ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE},
158         {pattern_eth_arp,
159                         ICE_INSET_NONE, ICE_INSET_NONE},
160         {pattern_eth_ipv4,
161                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
162         {pattern_eth_ipv4_udp,
163                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
164         {pattern_eth_ipv4_tcp,
165                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
166         {pattern_eth_ipv6,
167                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
168         {pattern_eth_ipv6_udp,
169                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
170         {pattern_eth_ipv6_tcp,
171                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
172         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
173                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
174         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
175                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
176         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
177                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
178         {pattern_eth_ipv4_nvgre_eth_ipv4,
179                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
180         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
181                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
182         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
183                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
184         {pattern_eth_pppoes,
185                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
186         {pattern_eth_vlan_pppoes,
187                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
188         {pattern_eth_pppoes_proto,
189                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
190         {pattern_eth_vlan_pppoes_proto,
191                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
192         {pattern_eth_pppoes_ipv4,
193                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
194         {pattern_eth_pppoes_ipv4_tcp,
195                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
196         {pattern_eth_pppoes_ipv4_udp,
197                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
198         {pattern_eth_pppoes_ipv6,
199                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
200         {pattern_eth_pppoes_ipv6_tcp,
201                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
202         {pattern_eth_pppoes_ipv6_udp,
203                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
204         {pattern_eth_vlan_pppoes_ipv4,
205                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
206         {pattern_eth_vlan_pppoes_ipv4_tcp,
207                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
208         {pattern_eth_vlan_pppoes_ipv4_udp,
209                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
210         {pattern_eth_vlan_pppoes_ipv6,
211                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
212         {pattern_eth_vlan_pppoes_ipv6_tcp,
213                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
214         {pattern_eth_vlan_pppoes_ipv6_udp,
215                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
216         {pattern_eth_ipv4_esp,
217                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
218         {pattern_eth_ipv4_udp_esp,
219                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
220         {pattern_eth_ipv6_esp,
221                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
222         {pattern_eth_ipv6_udp_esp,
223                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
224         {pattern_eth_ipv4_ah,
225                         ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
226         {pattern_eth_ipv6_ah,
227                         ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
228         {pattern_eth_ipv6_udp_ah,
229                         ICE_INSET_NONE, ICE_INSET_NONE},
230         {pattern_eth_ipv4_l2tp,
231                         ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
232         {pattern_eth_ipv6_l2tp,
233                         ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
234         {pattern_eth_ipv4_pfcp,
235                         ICE_INSET_NONE, ICE_INSET_NONE},
236         {pattern_eth_ipv6_pfcp,
237                         ICE_INSET_NONE, ICE_INSET_NONE},
238         {pattern_eth_qinq_ipv4,
239                         ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE},
240         {pattern_eth_qinq_ipv6,
241                         ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE},
242         {pattern_eth_qinq_pppoes,
243                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
244         {pattern_eth_qinq_pppoes_proto,
245                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
246         {pattern_eth_qinq_pppoes_ipv4,
247                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
248         {pattern_eth_qinq_pppoes_ipv6,
249                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
250 };
251
252 static struct
253 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
254         {pattern_ethertype,
255                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
256         {pattern_ethertype_vlan,
257                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
258         {pattern_ethertype_qinq,
259                         ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE},
260         {pattern_eth_arp,
261                 ICE_INSET_NONE, ICE_INSET_NONE},
262         {pattern_eth_ipv4,
263                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
264         {pattern_eth_ipv4_udp,
265                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
266         {pattern_eth_ipv4_tcp,
267                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
268         {pattern_eth_ipv6,
269                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
270         {pattern_eth_ipv6_udp,
271                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
272         {pattern_eth_ipv6_tcp,
273                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
274         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
275                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
276         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
277                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
278         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
279                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
280         {pattern_eth_ipv4_nvgre_eth_ipv4,
281                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
282         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
283                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
284         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
285                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
286         {pattern_eth_pppoes,
287                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
288         {pattern_eth_vlan_pppoes,
289                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
290         {pattern_eth_pppoes_proto,
291                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
292         {pattern_eth_vlan_pppoes_proto,
293                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
294         {pattern_eth_pppoes_ipv4,
295                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
296         {pattern_eth_pppoes_ipv4_tcp,
297                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
298         {pattern_eth_pppoes_ipv4_udp,
299                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
300         {pattern_eth_pppoes_ipv6,
301                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
302         {pattern_eth_pppoes_ipv6_tcp,
303                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
304         {pattern_eth_pppoes_ipv6_udp,
305                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
306         {pattern_eth_vlan_pppoes_ipv4,
307                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
308         {pattern_eth_vlan_pppoes_ipv4_tcp,
309                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
310         {pattern_eth_vlan_pppoes_ipv4_udp,
311                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
312         {pattern_eth_vlan_pppoes_ipv6,
313                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
314         {pattern_eth_vlan_pppoes_ipv6_tcp,
315                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
316         {pattern_eth_vlan_pppoes_ipv6_udp,
317                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
318         {pattern_eth_ipv4_esp,
319                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
320         {pattern_eth_ipv4_udp_esp,
321                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
322         {pattern_eth_ipv6_esp,
323                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
324         {pattern_eth_ipv6_udp_esp,
325                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
326         {pattern_eth_ipv4_ah,
327                         ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
328         {pattern_eth_ipv6_ah,
329                         ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
330         {pattern_eth_ipv6_udp_ah,
331                         ICE_INSET_NONE, ICE_INSET_NONE},
332         {pattern_eth_ipv4_l2tp,
333                         ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
334         {pattern_eth_ipv6_l2tp,
335                         ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
336         {pattern_eth_ipv4_pfcp,
337                         ICE_INSET_NONE, ICE_INSET_NONE},
338         {pattern_eth_ipv6_pfcp,
339                         ICE_INSET_NONE, ICE_INSET_NONE},
340         {pattern_eth_qinq_ipv4,
341                         ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE},
342         {pattern_eth_qinq_ipv6,
343                         ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE},
344         {pattern_eth_qinq_pppoes,
345                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
346         {pattern_eth_qinq_pppoes_proto,
347                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
348         {pattern_eth_qinq_pppoes_ipv4,
349                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
350         {pattern_eth_qinq_pppoes_ipv6,
351                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
352 };
353
354 static int
355 ice_switch_create(struct ice_adapter *ad,
356                 struct rte_flow *flow,
357                 void *meta,
358                 struct rte_flow_error *error)
359 {
360         int ret = 0;
361         struct ice_pf *pf = &ad->pf;
362         struct ice_hw *hw = ICE_PF_TO_HW(pf);
363         struct ice_rule_query_data rule_added = {0};
364         struct ice_rule_query_data *filter_ptr;
365         struct ice_adv_lkup_elem *list =
366                 ((struct sw_meta *)meta)->list;
367         uint16_t lkups_cnt =
368                 ((struct sw_meta *)meta)->lkups_num;
369         struct ice_adv_rule_info *rule_info =
370                 &((struct sw_meta *)meta)->rule_info;
371
372         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
373                 rte_flow_error_set(error, EINVAL,
374                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
375                         "item number too large for rule");
376                 goto error;
377         }
378         if (!list) {
379                 rte_flow_error_set(error, EINVAL,
380                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
381                         "lookup list should not be NULL");
382                 goto error;
383         }
384         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
385         if (!ret) {
386                 filter_ptr = rte_zmalloc("ice_switch_filter",
387                         sizeof(struct ice_rule_query_data), 0);
388                 if (!filter_ptr) {
389                         rte_flow_error_set(error, EINVAL,
390                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
391                                    "No memory for ice_switch_filter");
392                         goto error;
393                 }
394                 flow->rule = filter_ptr;
395                 rte_memcpy(filter_ptr,
396                         &rule_added,
397                         sizeof(struct ice_rule_query_data));
398         } else {
399                 rte_flow_error_set(error, EINVAL,
400                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
401                         "switch filter create flow fail");
402                 goto error;
403         }
404
405         rte_free(list);
406         rte_free(meta);
407         return 0;
408
409 error:
410         rte_free(list);
411         rte_free(meta);
412
413         return -rte_errno;
414 }
415
416 static int
417 ice_switch_destroy(struct ice_adapter *ad,
418                 struct rte_flow *flow,
419                 struct rte_flow_error *error)
420 {
421         struct ice_hw *hw = &ad->hw;
422         int ret;
423         struct ice_rule_query_data *filter_ptr;
424
425         filter_ptr = (struct ice_rule_query_data *)
426                 flow->rule;
427
428         if (!filter_ptr) {
429                 rte_flow_error_set(error, EINVAL,
430                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
431                         "no such flow"
432                         " create by switch filter");
433                 return -rte_errno;
434         }
435
436         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
437         if (ret) {
438                 rte_flow_error_set(error, EINVAL,
439                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
440                         "fail to destroy switch filter rule");
441                 return -rte_errno;
442         }
443
444         rte_free(filter_ptr);
445         return ret;
446 }
447
448 static void
449 ice_switch_filter_rule_free(struct rte_flow *flow)
450 {
451         rte_free(flow->rule);
452 }
453
454 static uint64_t
455 ice_switch_inset_get(const struct rte_flow_item pattern[],
456                 struct rte_flow_error *error,
457                 struct ice_adv_lkup_elem *list,
458                 uint16_t *lkups_num,
459                 enum ice_sw_tunnel_type *tun_type)
460 {
461         const struct rte_flow_item *item = pattern;
462         enum rte_flow_item_type item_type;
463         const struct rte_flow_item_eth *eth_spec, *eth_mask;
464         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
465         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
466         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
467         const struct rte_flow_item_udp *udp_spec, *udp_mask;
468         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
469         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
470         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
471         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
472         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
473         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
474                                 *pppoe_proto_mask;
475         const struct rte_flow_item_esp *esp_spec, *esp_mask;
476         const struct rte_flow_item_ah *ah_spec, *ah_mask;
477         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
478         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
479         uint64_t input_set = ICE_INSET_NONE;
480         uint16_t input_set_byte = 0;
481         bool pppoe_elem_valid = 0;
482         bool pppoe_patt_valid = 0;
483         bool pppoe_prot_valid = 0;
484         bool inner_vlan_valid = 0;
485         bool outer_vlan_valid = 0;
486         bool tunnel_valid = 0;
487         bool profile_rule = 0;
488         bool nvgre_valid = 0;
489         bool vxlan_valid = 0;
490         bool ipv6_valid = 0;
491         bool ipv4_valid = 0;
492         bool udp_valid = 0;
493         bool tcp_valid = 0;
494         uint16_t j, t = 0;
495
496         for (item = pattern; item->type !=
497                         RTE_FLOW_ITEM_TYPE_END; item++) {
498                 if (item->last) {
499                         rte_flow_error_set(error, EINVAL,
500                                         RTE_FLOW_ERROR_TYPE_ITEM,
501                                         item,
502                                         "Not support range");
503                         return 0;
504                 }
505                 item_type = item->type;
506
507                 switch (item_type) {
508                 case RTE_FLOW_ITEM_TYPE_ETH:
509                         eth_spec = item->spec;
510                         eth_mask = item->mask;
511                         if (eth_spec && eth_mask) {
512                                 const uint8_t *a = eth_mask->src.addr_bytes;
513                                 const uint8_t *b = eth_mask->dst.addr_bytes;
514                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
515                                         if (a[j] && tunnel_valid) {
516                                                 input_set |=
517                                                         ICE_INSET_TUN_SMAC;
518                                                 break;
519                                         } else if (a[j]) {
520                                                 input_set |=
521                                                         ICE_INSET_SMAC;
522                                                 break;
523                                         }
524                                 }
525                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
526                                         if (b[j] && tunnel_valid) {
527                                                 input_set |=
528                                                         ICE_INSET_TUN_DMAC;
529                                                 break;
530                                         } else if (b[j]) {
531                                                 input_set |=
532                                                         ICE_INSET_DMAC;
533                                                 break;
534                                         }
535                                 }
536                                 if (eth_mask->type)
537                                         input_set |= ICE_INSET_ETHERTYPE;
538                                 list[t].type = (tunnel_valid  == 0) ?
539                                         ICE_MAC_OFOS : ICE_MAC_IL;
540                                 struct ice_ether_hdr *h;
541                                 struct ice_ether_hdr *m;
542                                 uint16_t i = 0;
543                                 h = &list[t].h_u.eth_hdr;
544                                 m = &list[t].m_u.eth_hdr;
545                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
546                                         if (eth_mask->src.addr_bytes[j]) {
547                                                 h->src_addr[j] =
548                                                 eth_spec->src.addr_bytes[j];
549                                                 m->src_addr[j] =
550                                                 eth_mask->src.addr_bytes[j];
551                                                 i = 1;
552                                                 input_set_byte++;
553                                         }
554                                         if (eth_mask->dst.addr_bytes[j]) {
555                                                 h->dst_addr[j] =
556                                                 eth_spec->dst.addr_bytes[j];
557                                                 m->dst_addr[j] =
558                                                 eth_mask->dst.addr_bytes[j];
559                                                 i = 1;
560                                                 input_set_byte++;
561                                         }
562                                 }
563                                 if (i)
564                                         t++;
565                                 if (eth_mask->type) {
566                                         list[t].type = ICE_ETYPE_OL;
567                                         list[t].h_u.ethertype.ethtype_id =
568                                                 eth_spec->type;
569                                         list[t].m_u.ethertype.ethtype_id =
570                                                 eth_mask->type;
571                                         input_set_byte += 2;
572                                         t++;
573                                 }
574                         }
575                         break;
576
577                 case RTE_FLOW_ITEM_TYPE_IPV4:
578                         ipv4_spec = item->spec;
579                         ipv4_mask = item->mask;
580                         ipv4_valid = 1;
581                         if (ipv4_spec && ipv4_mask) {
582                                 /* Check IPv4 mask and update input set */
583                                 if (ipv4_mask->hdr.version_ihl ||
584                                         ipv4_mask->hdr.total_length ||
585                                         ipv4_mask->hdr.packet_id ||
586                                         ipv4_mask->hdr.hdr_checksum) {
587                                         rte_flow_error_set(error, EINVAL,
588                                                    RTE_FLOW_ERROR_TYPE_ITEM,
589                                                    item,
590                                                    "Invalid IPv4 mask.");
591                                         return 0;
592                                 }
593
594                                 if (tunnel_valid) {
595                                         if (ipv4_mask->hdr.type_of_service)
596                                                 input_set |=
597                                                         ICE_INSET_TUN_IPV4_TOS;
598                                         if (ipv4_mask->hdr.src_addr)
599                                                 input_set |=
600                                                         ICE_INSET_TUN_IPV4_SRC;
601                                         if (ipv4_mask->hdr.dst_addr)
602                                                 input_set |=
603                                                         ICE_INSET_TUN_IPV4_DST;
604                                         if (ipv4_mask->hdr.time_to_live)
605                                                 input_set |=
606                                                         ICE_INSET_TUN_IPV4_TTL;
607                                         if (ipv4_mask->hdr.next_proto_id)
608                                                 input_set |=
609                                                 ICE_INSET_TUN_IPV4_PROTO;
610                                 } else {
611                                         if (ipv4_mask->hdr.src_addr)
612                                                 input_set |= ICE_INSET_IPV4_SRC;
613                                         if (ipv4_mask->hdr.dst_addr)
614                                                 input_set |= ICE_INSET_IPV4_DST;
615                                         if (ipv4_mask->hdr.time_to_live)
616                                                 input_set |= ICE_INSET_IPV4_TTL;
617                                         if (ipv4_mask->hdr.next_proto_id)
618                                                 input_set |=
619                                                 ICE_INSET_IPV4_PROTO;
620                                         if (ipv4_mask->hdr.type_of_service)
621                                                 input_set |=
622                                                         ICE_INSET_IPV4_TOS;
623                                 }
624                                 list[t].type = (tunnel_valid  == 0) ?
625                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
626                                 if (ipv4_mask->hdr.src_addr) {
627                                         list[t].h_u.ipv4_hdr.src_addr =
628                                                 ipv4_spec->hdr.src_addr;
629                                         list[t].m_u.ipv4_hdr.src_addr =
630                                                 ipv4_mask->hdr.src_addr;
631                                         input_set_byte += 2;
632                                 }
633                                 if (ipv4_mask->hdr.dst_addr) {
634                                         list[t].h_u.ipv4_hdr.dst_addr =
635                                                 ipv4_spec->hdr.dst_addr;
636                                         list[t].m_u.ipv4_hdr.dst_addr =
637                                                 ipv4_mask->hdr.dst_addr;
638                                         input_set_byte += 2;
639                                 }
640                                 if (ipv4_mask->hdr.time_to_live) {
641                                         list[t].h_u.ipv4_hdr.time_to_live =
642                                                 ipv4_spec->hdr.time_to_live;
643                                         list[t].m_u.ipv4_hdr.time_to_live =
644                                                 ipv4_mask->hdr.time_to_live;
645                                         input_set_byte++;
646                                 }
647                                 if (ipv4_mask->hdr.next_proto_id) {
648                                         list[t].h_u.ipv4_hdr.protocol =
649                                                 ipv4_spec->hdr.next_proto_id;
650                                         list[t].m_u.ipv4_hdr.protocol =
651                                                 ipv4_mask->hdr.next_proto_id;
652                                         input_set_byte++;
653                                 }
654                                 if ((ipv4_spec->hdr.next_proto_id &
655                                         ipv4_mask->hdr.next_proto_id) ==
656                                         ICE_IPV4_PROTO_NVGRE)
657                                         *tun_type = ICE_SW_TUN_AND_NON_TUN;
658                                 if (ipv4_mask->hdr.type_of_service) {
659                                         list[t].h_u.ipv4_hdr.tos =
660                                                 ipv4_spec->hdr.type_of_service;
661                                         list[t].m_u.ipv4_hdr.tos =
662                                                 ipv4_mask->hdr.type_of_service;
663                                         input_set_byte++;
664                                 }
665                                 t++;
666                         }
667                         break;
668
669                 case RTE_FLOW_ITEM_TYPE_IPV6:
670                         ipv6_spec = item->spec;
671                         ipv6_mask = item->mask;
672                         ipv6_valid = 1;
673                         if (ipv6_spec && ipv6_mask) {
674                                 if (ipv6_mask->hdr.payload_len) {
675                                         rte_flow_error_set(error, EINVAL,
676                                            RTE_FLOW_ERROR_TYPE_ITEM,
677                                            item,
678                                            "Invalid IPv6 mask");
679                                         return 0;
680                                 }
681
682                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
683                                         if (ipv6_mask->hdr.src_addr[j] &&
684                                                 tunnel_valid) {
685                                                 input_set |=
686                                                 ICE_INSET_TUN_IPV6_SRC;
687                                                 break;
688                                         } else if (ipv6_mask->hdr.src_addr[j]) {
689                                                 input_set |= ICE_INSET_IPV6_SRC;
690                                                 break;
691                                         }
692                                 }
693                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
694                                         if (ipv6_mask->hdr.dst_addr[j] &&
695                                                 tunnel_valid) {
696                                                 input_set |=
697                                                 ICE_INSET_TUN_IPV6_DST;
698                                                 break;
699                                         } else if (ipv6_mask->hdr.dst_addr[j]) {
700                                                 input_set |= ICE_INSET_IPV6_DST;
701                                                 break;
702                                         }
703                                 }
704                                 if (ipv6_mask->hdr.proto &&
705                                         tunnel_valid)
706                                         input_set |=
707                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
708                                 else if (ipv6_mask->hdr.proto)
709                                         input_set |=
710                                                 ICE_INSET_IPV6_NEXT_HDR;
711                                 if (ipv6_mask->hdr.hop_limits &&
712                                         tunnel_valid)
713                                         input_set |=
714                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
715                                 else if (ipv6_mask->hdr.hop_limits)
716                                         input_set |=
717                                                 ICE_INSET_IPV6_HOP_LIMIT;
718                                 if ((ipv6_mask->hdr.vtc_flow &
719                                                 rte_cpu_to_be_32
720                                                 (RTE_IPV6_HDR_TC_MASK)) &&
721                                         tunnel_valid)
722                                         input_set |=
723                                                         ICE_INSET_TUN_IPV6_TC;
724                                 else if (ipv6_mask->hdr.vtc_flow &
725                                                 rte_cpu_to_be_32
726                                                 (RTE_IPV6_HDR_TC_MASK))
727                                         input_set |= ICE_INSET_IPV6_TC;
728
729                                 list[t].type = (tunnel_valid  == 0) ?
730                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
731                                 struct ice_ipv6_hdr *f;
732                                 struct ice_ipv6_hdr *s;
733                                 f = &list[t].h_u.ipv6_hdr;
734                                 s = &list[t].m_u.ipv6_hdr;
735                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
736                                         if (ipv6_mask->hdr.src_addr[j]) {
737                                                 f->src_addr[j] =
738                                                 ipv6_spec->hdr.src_addr[j];
739                                                 s->src_addr[j] =
740                                                 ipv6_mask->hdr.src_addr[j];
741                                                 input_set_byte++;
742                                         }
743                                         if (ipv6_mask->hdr.dst_addr[j]) {
744                                                 f->dst_addr[j] =
745                                                 ipv6_spec->hdr.dst_addr[j];
746                                                 s->dst_addr[j] =
747                                                 ipv6_mask->hdr.dst_addr[j];
748                                                 input_set_byte++;
749                                         }
750                                 }
751                                 if (ipv6_mask->hdr.proto) {
752                                         f->next_hdr =
753                                                 ipv6_spec->hdr.proto;
754                                         s->next_hdr =
755                                                 ipv6_mask->hdr.proto;
756                                         input_set_byte++;
757                                 }
758                                 if (ipv6_mask->hdr.hop_limits) {
759                                         f->hop_limit =
760                                                 ipv6_spec->hdr.hop_limits;
761                                         s->hop_limit =
762                                                 ipv6_mask->hdr.hop_limits;
763                                         input_set_byte++;
764                                 }
765                                 if (ipv6_mask->hdr.vtc_flow &
766                                                 rte_cpu_to_be_32
767                                                 (RTE_IPV6_HDR_TC_MASK)) {
768                                         struct ice_le_ver_tc_flow vtf;
769                                         vtf.u.fld.version = 0;
770                                         vtf.u.fld.flow_label = 0;
771                                         vtf.u.fld.tc = (rte_be_to_cpu_32
772                                                 (ipv6_spec->hdr.vtc_flow) &
773                                                         RTE_IPV6_HDR_TC_MASK) >>
774                                                         RTE_IPV6_HDR_TC_SHIFT;
775                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
776                                         vtf.u.fld.tc = (rte_be_to_cpu_32
777                                                 (ipv6_mask->hdr.vtc_flow) &
778                                                         RTE_IPV6_HDR_TC_MASK) >>
779                                                         RTE_IPV6_HDR_TC_SHIFT;
780                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
781                                         input_set_byte += 4;
782                                 }
783                                 t++;
784                         }
785                         break;
786
787                 case RTE_FLOW_ITEM_TYPE_UDP:
788                         udp_spec = item->spec;
789                         udp_mask = item->mask;
790                         udp_valid = 1;
791                         if (udp_spec && udp_mask) {
792                                 /* Check UDP mask and update input set*/
793                                 if (udp_mask->hdr.dgram_len ||
794                                     udp_mask->hdr.dgram_cksum) {
795                                         rte_flow_error_set(error, EINVAL,
796                                                    RTE_FLOW_ERROR_TYPE_ITEM,
797                                                    item,
798                                                    "Invalid UDP mask");
799                                         return 0;
800                                 }
801
802                                 if (tunnel_valid) {
803                                         if (udp_mask->hdr.src_port)
804                                                 input_set |=
805                                                 ICE_INSET_TUN_UDP_SRC_PORT;
806                                         if (udp_mask->hdr.dst_port)
807                                                 input_set |=
808                                                 ICE_INSET_TUN_UDP_DST_PORT;
809                                 } else {
810                                         if (udp_mask->hdr.src_port)
811                                                 input_set |=
812                                                 ICE_INSET_UDP_SRC_PORT;
813                                         if (udp_mask->hdr.dst_port)
814                                                 input_set |=
815                                                 ICE_INSET_UDP_DST_PORT;
816                                 }
817                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
818                                                 tunnel_valid == 0)
819                                         list[t].type = ICE_UDP_OF;
820                                 else
821                                         list[t].type = ICE_UDP_ILOS;
822                                 if (udp_mask->hdr.src_port) {
823                                         list[t].h_u.l4_hdr.src_port =
824                                                 udp_spec->hdr.src_port;
825                                         list[t].m_u.l4_hdr.src_port =
826                                                 udp_mask->hdr.src_port;
827                                         input_set_byte += 2;
828                                 }
829                                 if (udp_mask->hdr.dst_port) {
830                                         list[t].h_u.l4_hdr.dst_port =
831                                                 udp_spec->hdr.dst_port;
832                                         list[t].m_u.l4_hdr.dst_port =
833                                                 udp_mask->hdr.dst_port;
834                                         input_set_byte += 2;
835                                 }
836                                 t++;
837                         }
838                         break;
839
840                 case RTE_FLOW_ITEM_TYPE_TCP:
841                         tcp_spec = item->spec;
842                         tcp_mask = item->mask;
843                         tcp_valid = 1;
844                         if (tcp_spec && tcp_mask) {
845                                 /* Check TCP mask and update input set */
846                                 if (tcp_mask->hdr.sent_seq ||
847                                         tcp_mask->hdr.recv_ack ||
848                                         tcp_mask->hdr.data_off ||
849                                         tcp_mask->hdr.tcp_flags ||
850                                         tcp_mask->hdr.rx_win ||
851                                         tcp_mask->hdr.cksum ||
852                                         tcp_mask->hdr.tcp_urp) {
853                                         rte_flow_error_set(error, EINVAL,
854                                            RTE_FLOW_ERROR_TYPE_ITEM,
855                                            item,
856                                            "Invalid TCP mask");
857                                         return 0;
858                                 }
859
860                                 if (tunnel_valid) {
861                                         if (tcp_mask->hdr.src_port)
862                                                 input_set |=
863                                                 ICE_INSET_TUN_TCP_SRC_PORT;
864                                         if (tcp_mask->hdr.dst_port)
865                                                 input_set |=
866                                                 ICE_INSET_TUN_TCP_DST_PORT;
867                                 } else {
868                                         if (tcp_mask->hdr.src_port)
869                                                 input_set |=
870                                                 ICE_INSET_TCP_SRC_PORT;
871                                         if (tcp_mask->hdr.dst_port)
872                                                 input_set |=
873                                                 ICE_INSET_TCP_DST_PORT;
874                                 }
875                                 list[t].type = ICE_TCP_IL;
876                                 if (tcp_mask->hdr.src_port) {
877                                         list[t].h_u.l4_hdr.src_port =
878                                                 tcp_spec->hdr.src_port;
879                                         list[t].m_u.l4_hdr.src_port =
880                                                 tcp_mask->hdr.src_port;
881                                         input_set_byte += 2;
882                                 }
883                                 if (tcp_mask->hdr.dst_port) {
884                                         list[t].h_u.l4_hdr.dst_port =
885                                                 tcp_spec->hdr.dst_port;
886                                         list[t].m_u.l4_hdr.dst_port =
887                                                 tcp_mask->hdr.dst_port;
888                                         input_set_byte += 2;
889                                 }
890                                 t++;
891                         }
892                         break;
893
894                 case RTE_FLOW_ITEM_TYPE_SCTP:
895                         sctp_spec = item->spec;
896                         sctp_mask = item->mask;
897                         if (sctp_spec && sctp_mask) {
898                                 /* Check SCTP mask and update input set */
899                                 if (sctp_mask->hdr.cksum) {
900                                         rte_flow_error_set(error, EINVAL,
901                                            RTE_FLOW_ERROR_TYPE_ITEM,
902                                            item,
903                                            "Invalid SCTP mask");
904                                         return 0;
905                                 }
906
907                                 if (tunnel_valid) {
908                                         if (sctp_mask->hdr.src_port)
909                                                 input_set |=
910                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
911                                         if (sctp_mask->hdr.dst_port)
912                                                 input_set |=
913                                                 ICE_INSET_TUN_SCTP_DST_PORT;
914                                 } else {
915                                         if (sctp_mask->hdr.src_port)
916                                                 input_set |=
917                                                 ICE_INSET_SCTP_SRC_PORT;
918                                         if (sctp_mask->hdr.dst_port)
919                                                 input_set |=
920                                                 ICE_INSET_SCTP_DST_PORT;
921                                 }
922                                 list[t].type = ICE_SCTP_IL;
923                                 if (sctp_mask->hdr.src_port) {
924                                         list[t].h_u.sctp_hdr.src_port =
925                                                 sctp_spec->hdr.src_port;
926                                         list[t].m_u.sctp_hdr.src_port =
927                                                 sctp_mask->hdr.src_port;
928                                         input_set_byte += 2;
929                                 }
930                                 if (sctp_mask->hdr.dst_port) {
931                                         list[t].h_u.sctp_hdr.dst_port =
932                                                 sctp_spec->hdr.dst_port;
933                                         list[t].m_u.sctp_hdr.dst_port =
934                                                 sctp_mask->hdr.dst_port;
935                                         input_set_byte += 2;
936                                 }
937                                 t++;
938                         }
939                         break;
940
941                 case RTE_FLOW_ITEM_TYPE_VXLAN:
942                         vxlan_spec = item->spec;
943                         vxlan_mask = item->mask;
944                         /* Check if VXLAN item is used to describe protocol.
945                          * If yes, both spec and mask should be NULL.
946                          * If no, both spec and mask shouldn't be NULL.
947                          */
948                         if ((!vxlan_spec && vxlan_mask) ||
949                             (vxlan_spec && !vxlan_mask)) {
950                                 rte_flow_error_set(error, EINVAL,
951                                            RTE_FLOW_ERROR_TYPE_ITEM,
952                                            item,
953                                            "Invalid VXLAN item");
954                                 return 0;
955                         }
956                         vxlan_valid = 1;
957                         tunnel_valid = 1;
958                         if (vxlan_spec && vxlan_mask) {
959                                 list[t].type = ICE_VXLAN;
960                                 if (vxlan_mask->vni[0] ||
961                                         vxlan_mask->vni[1] ||
962                                         vxlan_mask->vni[2]) {
963                                         list[t].h_u.tnl_hdr.vni =
964                                                 (vxlan_spec->vni[2] << 16) |
965                                                 (vxlan_spec->vni[1] << 8) |
966                                                 vxlan_spec->vni[0];
967                                         list[t].m_u.tnl_hdr.vni =
968                                                 (vxlan_mask->vni[2] << 16) |
969                                                 (vxlan_mask->vni[1] << 8) |
970                                                 vxlan_mask->vni[0];
971                                         input_set |=
972                                                 ICE_INSET_TUN_VXLAN_VNI;
973                                         input_set_byte += 2;
974                                 }
975                                 t++;
976                         }
977                         break;
978
979                 case RTE_FLOW_ITEM_TYPE_NVGRE:
980                         nvgre_spec = item->spec;
981                         nvgre_mask = item->mask;
982                         /* Check if NVGRE item is used to describe protocol.
983                          * If yes, both spec and mask should be NULL.
984                          * If no, both spec and mask shouldn't be NULL.
985                          */
986                         if ((!nvgre_spec && nvgre_mask) ||
987                             (nvgre_spec && !nvgre_mask)) {
988                                 rte_flow_error_set(error, EINVAL,
989                                            RTE_FLOW_ERROR_TYPE_ITEM,
990                                            item,
991                                            "Invalid NVGRE item");
992                                 return 0;
993                         }
994                         nvgre_valid = 1;
995                         tunnel_valid = 1;
996                         if (nvgre_spec && nvgre_mask) {
997                                 list[t].type = ICE_NVGRE;
998                                 if (nvgre_mask->tni[0] ||
999                                         nvgre_mask->tni[1] ||
1000                                         nvgre_mask->tni[2]) {
1001                                         list[t].h_u.nvgre_hdr.tni_flow =
1002                                                 (nvgre_spec->tni[2] << 16) |
1003                                                 (nvgre_spec->tni[1] << 8) |
1004                                                 nvgre_spec->tni[0];
1005                                         list[t].m_u.nvgre_hdr.tni_flow =
1006                                                 (nvgre_mask->tni[2] << 16) |
1007                                                 (nvgre_mask->tni[1] << 8) |
1008                                                 nvgre_mask->tni[0];
1009                                         input_set |=
1010                                                 ICE_INSET_TUN_NVGRE_TNI;
1011                                         input_set_byte += 2;
1012                                 }
1013                                 t++;
1014                         }
1015                         break;
1016
1017                 case RTE_FLOW_ITEM_TYPE_VLAN:
1018                         vlan_spec = item->spec;
1019                         vlan_mask = item->mask;
1020                         /* Check if VLAN item is used to describe protocol.
1021                          * If yes, both spec and mask should be NULL.
1022                          * If no, both spec and mask shouldn't be NULL.
1023                          */
1024                         if ((!vlan_spec && vlan_mask) ||
1025                             (vlan_spec && !vlan_mask)) {
1026                                 rte_flow_error_set(error, EINVAL,
1027                                            RTE_FLOW_ERROR_TYPE_ITEM,
1028                                            item,
1029                                            "Invalid VLAN item");
1030                                 return 0;
1031                         }
1032
1033                         if (!outer_vlan_valid &&
1034                             (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
1035                              *tun_type == ICE_NON_TUN_QINQ))
1036                                 outer_vlan_valid = 1;
1037                         else if (!inner_vlan_valid &&
1038                                  (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
1039                                   *tun_type == ICE_NON_TUN_QINQ))
1040                                 inner_vlan_valid = 1;
1041                         else if (!inner_vlan_valid)
1042                                 inner_vlan_valid = 1;
1043
1044                         if (vlan_spec && vlan_mask) {
1045                                 if (outer_vlan_valid && !inner_vlan_valid) {
1046                                         list[t].type = ICE_VLAN_EX;
1047                                         input_set |= ICE_INSET_VLAN_OUTER;
1048                                 } else if (inner_vlan_valid) {
1049                                         list[t].type = ICE_VLAN_OFOS;
1050                                         input_set |= ICE_INSET_VLAN_INNER;
1051                                 }
1052
1053                                 if (vlan_mask->tci) {
1054                                         list[t].h_u.vlan_hdr.vlan =
1055                                                 vlan_spec->tci;
1056                                         list[t].m_u.vlan_hdr.vlan =
1057                                                 vlan_mask->tci;
1058                                         input_set_byte += 2;
1059                                 }
1060                                 if (vlan_mask->inner_type) {
1061                                         rte_flow_error_set(error, EINVAL,
1062                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1063                                                 item,
1064                                                 "Invalid VLAN input set.");
1065                                         return 0;
1066                                 }
1067                                 t++;
1068                         }
1069                         break;
1070
1071                 case RTE_FLOW_ITEM_TYPE_PPPOED:
1072                 case RTE_FLOW_ITEM_TYPE_PPPOES:
1073                         pppoe_spec = item->spec;
1074                         pppoe_mask = item->mask;
1075                         /* Check if PPPoE item is used to describe protocol.
1076                          * If yes, both spec and mask should be NULL.
1077                          * If no, both spec and mask shouldn't be NULL.
1078                          */
1079                         if ((!pppoe_spec && pppoe_mask) ||
1080                                 (pppoe_spec && !pppoe_mask)) {
1081                                 rte_flow_error_set(error, EINVAL,
1082                                         RTE_FLOW_ERROR_TYPE_ITEM,
1083                                         item,
1084                                         "Invalid pppoe item");
1085                                 return 0;
1086                         }
1087                         pppoe_patt_valid = 1;
1088                         if (pppoe_spec && pppoe_mask) {
1089                                 /* Check pppoe mask and update input set */
1090                                 if (pppoe_mask->length ||
1091                                         pppoe_mask->code ||
1092                                         pppoe_mask->version_type) {
1093                                         rte_flow_error_set(error, EINVAL,
1094                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1095                                                 item,
1096                                                 "Invalid pppoe mask");
1097                                         return 0;
1098                                 }
1099                                 list[t].type = ICE_PPPOE;
1100                                 if (pppoe_mask->session_id) {
1101                                         list[t].h_u.pppoe_hdr.session_id =
1102                                                 pppoe_spec->session_id;
1103                                         list[t].m_u.pppoe_hdr.session_id =
1104                                                 pppoe_mask->session_id;
1105                                         input_set |= ICE_INSET_PPPOE_SESSION;
1106                                         input_set_byte += 2;
1107                                 }
1108                                 t++;
1109                                 pppoe_elem_valid = 1;
1110                         }
1111                         break;
1112
1113                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1114                         pppoe_proto_spec = item->spec;
1115                         pppoe_proto_mask = item->mask;
1116                         /* Check if PPPoE optional proto_id item
1117                          * is used to describe protocol.
1118                          * If yes, both spec and mask should be NULL.
1119                          * If no, both spec and mask shouldn't be NULL.
1120                          */
1121                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1122                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
1123                                 rte_flow_error_set(error, EINVAL,
1124                                         RTE_FLOW_ERROR_TYPE_ITEM,
1125                                         item,
1126                                         "Invalid pppoe proto item");
1127                                 return 0;
1128                         }
1129                         if (pppoe_proto_spec && pppoe_proto_mask) {
1130                                 if (pppoe_elem_valid)
1131                                         t--;
1132                                 list[t].type = ICE_PPPOE;
1133                                 if (pppoe_proto_mask->proto_id) {
1134                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
1135                                                 pppoe_proto_spec->proto_id;
1136                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
1137                                                 pppoe_proto_mask->proto_id;
1138                                         input_set |= ICE_INSET_PPPOE_PROTO;
1139                                         input_set_byte += 2;
1140                                         pppoe_prot_valid = 1;
1141                                 }
1142                                 if ((pppoe_proto_mask->proto_id &
1143                                         pppoe_proto_spec->proto_id) !=
1144                                             CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1145                                         (pppoe_proto_mask->proto_id &
1146                                         pppoe_proto_spec->proto_id) !=
1147                                             CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1148                                         *tun_type = ICE_SW_TUN_PPPOE_PAY;
1149                                 else
1150                                         *tun_type = ICE_SW_TUN_PPPOE;
1151                                 t++;
1152                         }
1153
1154                         break;
1155
1156                 case RTE_FLOW_ITEM_TYPE_ESP:
1157                         esp_spec = item->spec;
1158                         esp_mask = item->mask;
1159                         if ((esp_spec && !esp_mask) ||
1160                                 (!esp_spec && esp_mask)) {
1161                                 rte_flow_error_set(error, EINVAL,
1162                                            RTE_FLOW_ERROR_TYPE_ITEM,
1163                                            item,
1164                                            "Invalid esp item");
1165                                 return 0;
1166                         }
1167                         /* Check esp mask and update input set */
1168                         if (esp_mask && esp_mask->hdr.seq) {
1169                                 rte_flow_error_set(error, EINVAL,
1170                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1171                                                 item,
1172                                                 "Invalid esp mask");
1173                                 return 0;
1174                         }
1175
1176                         if (!esp_spec && !esp_mask && !input_set) {
1177                                 profile_rule = 1;
1178                                 if (ipv6_valid && udp_valid)
1179                                         *tun_type =
1180                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1181                                 else if (ipv6_valid)
1182                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1183                                 else if (ipv4_valid)
1184                                         return 0;
1185                         } else if (esp_spec && esp_mask &&
1186                                                 esp_mask->hdr.spi){
1187                                 if (udp_valid)
1188                                         list[t].type = ICE_NAT_T;
1189                                 else
1190                                         list[t].type = ICE_ESP;
1191                                 list[t].h_u.esp_hdr.spi =
1192                                         esp_spec->hdr.spi;
1193                                 list[t].m_u.esp_hdr.spi =
1194                                         esp_mask->hdr.spi;
1195                                 input_set |= ICE_INSET_ESP_SPI;
1196                                 input_set_byte += 4;
1197                                 t++;
1198                         }
1199
1200                         if (!profile_rule) {
1201                                 if (ipv6_valid && udp_valid)
1202                                         *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1203                                 else if (ipv4_valid && udp_valid)
1204                                         *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1205                                 else if (ipv6_valid)
1206                                         *tun_type = ICE_SW_TUN_IPV6_ESP;
1207                                 else if (ipv4_valid)
1208                                         *tun_type = ICE_SW_TUN_IPV4_ESP;
1209                         }
1210                         break;
1211
1212                 case RTE_FLOW_ITEM_TYPE_AH:
1213                         ah_spec = item->spec;
1214                         ah_mask = item->mask;
1215                         if ((ah_spec && !ah_mask) ||
1216                                 (!ah_spec && ah_mask)) {
1217                                 rte_flow_error_set(error, EINVAL,
1218                                            RTE_FLOW_ERROR_TYPE_ITEM,
1219                                            item,
1220                                            "Invalid ah item");
1221                                 return 0;
1222                         }
1223                         /* Check ah mask and update input set */
1224                         if (ah_mask &&
1225                                 (ah_mask->next_hdr ||
1226                                 ah_mask->payload_len ||
1227                                 ah_mask->seq_num ||
1228                                 ah_mask->reserved)) {
1229                                 rte_flow_error_set(error, EINVAL,
1230                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1231                                                 item,
1232                                                 "Invalid ah mask");
1233                                 return 0;
1234                         }
1235
1236                         if (!ah_spec && !ah_mask && !input_set) {
1237                                 profile_rule = 1;
1238                                 if (ipv6_valid && udp_valid)
1239                                         *tun_type =
1240                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1241                                 else if (ipv6_valid)
1242                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1243                                 else if (ipv4_valid)
1244                                         return 0;
1245                         } else if (ah_spec && ah_mask &&
1246                                                 ah_mask->spi){
1247                                 list[t].type = ICE_AH;
1248                                 list[t].h_u.ah_hdr.spi =
1249                                         ah_spec->spi;
1250                                 list[t].m_u.ah_hdr.spi =
1251                                         ah_mask->spi;
1252                                 input_set |= ICE_INSET_AH_SPI;
1253                                 input_set_byte += 4;
1254                                 t++;
1255                         }
1256
1257                         if (!profile_rule) {
1258                                 if (udp_valid)
1259                                         return 0;
1260                                 else if (ipv6_valid)
1261                                         *tun_type = ICE_SW_TUN_IPV6_AH;
1262                                 else if (ipv4_valid)
1263                                         *tun_type = ICE_SW_TUN_IPV4_AH;
1264                         }
1265                         break;
1266
1267                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1268                         l2tp_spec = item->spec;
1269                         l2tp_mask = item->mask;
1270                         if ((l2tp_spec && !l2tp_mask) ||
1271                                 (!l2tp_spec && l2tp_mask)) {
1272                                 rte_flow_error_set(error, EINVAL,
1273                                            RTE_FLOW_ERROR_TYPE_ITEM,
1274                                            item,
1275                                            "Invalid l2tp item");
1276                                 return 0;
1277                         }
1278
1279                         if (!l2tp_spec && !l2tp_mask && !input_set) {
1280                                 if (ipv6_valid)
1281                                         *tun_type =
1282                                         ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1283                                 else if (ipv4_valid)
1284                                         return 0;
1285                         } else if (l2tp_spec && l2tp_mask &&
1286                                                 l2tp_mask->session_id){
1287                                 list[t].type = ICE_L2TPV3;
1288                                 list[t].h_u.l2tpv3_sess_hdr.session_id =
1289                                         l2tp_spec->session_id;
1290                                 list[t].m_u.l2tpv3_sess_hdr.session_id =
1291                                         l2tp_mask->session_id;
1292                                 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1293                                 input_set_byte += 4;
1294                                 t++;
1295                         }
1296
1297                         if (!profile_rule) {
1298                                 if (ipv6_valid)
1299                                         *tun_type =
1300                                         ICE_SW_TUN_IPV6_L2TPV3;
1301                                 else if (ipv4_valid)
1302                                         *tun_type =
1303                                         ICE_SW_TUN_IPV4_L2TPV3;
1304                         }
1305                         break;
1306
1307                 case RTE_FLOW_ITEM_TYPE_PFCP:
1308                         pfcp_spec = item->spec;
1309                         pfcp_mask = item->mask;
1310                         /* Check if PFCP item is used to describe protocol.
1311                          * If yes, both spec and mask should be NULL.
1312                          * If no, both spec and mask shouldn't be NULL.
1313                          */
1314                         if ((!pfcp_spec && pfcp_mask) ||
1315                             (pfcp_spec && !pfcp_mask)) {
1316                                 rte_flow_error_set(error, EINVAL,
1317                                            RTE_FLOW_ERROR_TYPE_ITEM,
1318                                            item,
1319                                            "Invalid PFCP item");
1320                                 return -ENOTSUP;
1321                         }
1322                         if (pfcp_spec && pfcp_mask) {
1323                                 /* Check pfcp mask and update input set */
1324                                 if (pfcp_mask->msg_type ||
1325                                         pfcp_mask->msg_len ||
1326                                         pfcp_mask->seid) {
1327                                         rte_flow_error_set(error, EINVAL,
1328                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1329                                                 item,
1330                                                 "Invalid pfcp mask");
1331                                         return -ENOTSUP;
1332                                 }
1333                                 if (pfcp_mask->s_field &&
1334                                         pfcp_spec->s_field == 0x01 &&
1335                                         ipv6_valid)
1336                                         *tun_type =
1337                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1338                                 else if (pfcp_mask->s_field &&
1339                                         pfcp_spec->s_field == 0x01)
1340                                         *tun_type =
1341                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1342                                 else if (pfcp_mask->s_field &&
1343                                         !pfcp_spec->s_field &&
1344                                         ipv6_valid)
1345                                         *tun_type =
1346                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1347                                 else if (pfcp_mask->s_field &&
1348                                         !pfcp_spec->s_field)
1349                                         *tun_type =
1350                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1351                                 else
1352                                         return -ENOTSUP;
1353                         }
1354                         break;
1355
1356                 case RTE_FLOW_ITEM_TYPE_VOID:
1357                         break;
1358
1359                 default:
1360                         rte_flow_error_set(error, EINVAL,
1361                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1362                                    "Invalid pattern item.");
1363                         goto out;
1364                 }
1365         }
1366
1367         if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1368             inner_vlan_valid && outer_vlan_valid)
1369                 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1370         else if (*tun_type == ICE_SW_TUN_PPPOE &&
1371                  inner_vlan_valid && outer_vlan_valid)
1372                 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1373         else if (*tun_type == ICE_NON_TUN &&
1374                  inner_vlan_valid && outer_vlan_valid)
1375                 *tun_type = ICE_NON_TUN_QINQ;
1376         else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1377                  inner_vlan_valid && outer_vlan_valid)
1378                 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1379
1380         if (pppoe_patt_valid && !pppoe_prot_valid) {
1381                 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1382                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1383                 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1384                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1385                 else if (inner_vlan_valid && outer_vlan_valid)
1386                         *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1387                 else if (ipv6_valid && udp_valid)
1388                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1389                 else if (ipv6_valid && tcp_valid)
1390                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1391                 else if (ipv4_valid && udp_valid)
1392                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1393                 else if (ipv4_valid && tcp_valid)
1394                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1395                 else if (ipv6_valid)
1396                         *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1397                 else if (ipv4_valid)
1398                         *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1399                 else
1400                         *tun_type = ICE_SW_TUN_PPPOE;
1401         }
1402
1403         if (*tun_type == ICE_NON_TUN) {
1404                 if (vxlan_valid)
1405                         *tun_type = ICE_SW_TUN_VXLAN;
1406                 else if (nvgre_valid)
1407                         *tun_type = ICE_SW_TUN_NVGRE;
1408                 else if (ipv4_valid && tcp_valid)
1409                         *tun_type = ICE_SW_IPV4_TCP;
1410                 else if (ipv4_valid && udp_valid)
1411                         *tun_type = ICE_SW_IPV4_UDP;
1412                 else if (ipv6_valid && tcp_valid)
1413                         *tun_type = ICE_SW_IPV6_TCP;
1414                 else if (ipv6_valid && udp_valid)
1415                         *tun_type = ICE_SW_IPV6_UDP;
1416         }
1417
1418         if (input_set_byte > MAX_INPUT_SET_BYTE) {
1419                 rte_flow_error_set(error, EINVAL,
1420                         RTE_FLOW_ERROR_TYPE_ITEM,
1421                         item,
1422                         "too much input set");
1423                 return -ENOTSUP;
1424         }
1425
1426         *lkups_num = t;
1427
1428         return input_set;
1429 out:
1430         return 0;
1431 }
1432
1433 static int
1434 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1435                             const struct rte_flow_action *actions,
1436                             struct rte_flow_error *error,
1437                             struct ice_adv_rule_info *rule_info)
1438 {
1439         const struct rte_flow_action_vf *act_vf;
1440         const struct rte_flow_action *action;
1441         enum rte_flow_action_type action_type;
1442
1443         for (action = actions; action->type !=
1444                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1445                 action_type = action->type;
1446                 switch (action_type) {
1447                 case RTE_FLOW_ACTION_TYPE_VF:
1448                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1449                         act_vf = action->conf;
1450
1451                         if (act_vf->id >= ad->real_hw.num_vfs &&
1452                                 !act_vf->original) {
1453                                 rte_flow_error_set(error,
1454                                         EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1455                                         actions,
1456                                         "Invalid vf id");
1457                                 return -rte_errno;
1458                         }
1459
1460                         if (act_vf->original)
1461                                 rule_info->sw_act.vsi_handle =
1462                                         ad->real_hw.avf.bus.func;
1463                         else
1464                                 rule_info->sw_act.vsi_handle = act_vf->id;
1465                         break;
1466
1467                 case RTE_FLOW_ACTION_TYPE_DROP:
1468                         rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1469                         break;
1470
1471                 default:
1472                         rte_flow_error_set(error,
1473                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1474                                            actions,
1475                                            "Invalid action type");
1476                         return -rte_errno;
1477                 }
1478         }
1479
1480         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1481         rule_info->sw_act.flag = ICE_FLTR_RX;
1482         rule_info->rx = 1;
1483         rule_info->priority = 5;
1484
1485         return 0;
1486 }
1487
1488 static int
1489 ice_switch_parse_action(struct ice_pf *pf,
1490                 const struct rte_flow_action *actions,
1491                 struct rte_flow_error *error,
1492                 struct ice_adv_rule_info *rule_info)
1493 {
1494         struct ice_vsi *vsi = pf->main_vsi;
1495         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1496         const struct rte_flow_action_queue *act_q;
1497         const struct rte_flow_action_rss *act_qgrop;
1498         uint16_t base_queue, i;
1499         const struct rte_flow_action *action;
1500         enum rte_flow_action_type action_type;
1501         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1502                  2, 4, 8, 16, 32, 64, 128};
1503
1504         base_queue = pf->base_queue + vsi->base_queue;
1505         for (action = actions; action->type !=
1506                         RTE_FLOW_ACTION_TYPE_END; action++) {
1507                 action_type = action->type;
1508                 switch (action_type) {
1509                 case RTE_FLOW_ACTION_TYPE_RSS:
1510                         act_qgrop = action->conf;
1511                         if (act_qgrop->queue_num <= 1)
1512                                 goto error;
1513                         rule_info->sw_act.fltr_act =
1514                                 ICE_FWD_TO_QGRP;
1515                         rule_info->sw_act.fwd_id.q_id =
1516                                 base_queue + act_qgrop->queue[0];
1517                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1518                                 if (act_qgrop->queue_num ==
1519                                         valid_qgrop_number[i])
1520                                         break;
1521                         }
1522                         if (i == MAX_QGRP_NUM_TYPE)
1523                                 goto error;
1524                         if ((act_qgrop->queue[0] +
1525                                 act_qgrop->queue_num) >
1526                                 dev->data->nb_rx_queues)
1527                                 goto error1;
1528                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1529                                 if (act_qgrop->queue[i + 1] !=
1530                                         act_qgrop->queue[i] + 1)
1531                                         goto error2;
1532                         rule_info->sw_act.qgrp_size =
1533                                 act_qgrop->queue_num;
1534                         break;
1535                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1536                         act_q = action->conf;
1537                         if (act_q->index >= dev->data->nb_rx_queues)
1538                                 goto error;
1539                         rule_info->sw_act.fltr_act =
1540                                 ICE_FWD_TO_Q;
1541                         rule_info->sw_act.fwd_id.q_id =
1542                                 base_queue + act_q->index;
1543                         break;
1544
1545                 case RTE_FLOW_ACTION_TYPE_DROP:
1546                         rule_info->sw_act.fltr_act =
1547                                 ICE_DROP_PACKET;
1548                         break;
1549
1550                 case RTE_FLOW_ACTION_TYPE_VOID:
1551                         break;
1552
1553                 default:
1554                         goto error;
1555                 }
1556         }
1557
1558         rule_info->sw_act.vsi_handle = vsi->idx;
1559         rule_info->rx = 1;
1560         rule_info->sw_act.src = vsi->idx;
1561         rule_info->priority = 5;
1562
1563         return 0;
1564
1565 error:
1566         rte_flow_error_set(error,
1567                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1568                 actions,
1569                 "Invalid action type or queue number");
1570         return -rte_errno;
1571
1572 error1:
1573         rte_flow_error_set(error,
1574                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1575                 actions,
1576                 "Invalid queue region indexes");
1577         return -rte_errno;
1578
1579 error2:
1580         rte_flow_error_set(error,
1581                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1582                 actions,
1583                 "Discontinuous queue region");
1584         return -rte_errno;
1585 }
1586
1587 static int
1588 ice_switch_check_action(const struct rte_flow_action *actions,
1589                             struct rte_flow_error *error)
1590 {
1591         const struct rte_flow_action *action;
1592         enum rte_flow_action_type action_type;
1593         uint16_t actions_num = 0;
1594
1595         for (action = actions; action->type !=
1596                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1597                 action_type = action->type;
1598                 switch (action_type) {
1599                 case RTE_FLOW_ACTION_TYPE_VF:
1600                 case RTE_FLOW_ACTION_TYPE_RSS:
1601                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1602                 case RTE_FLOW_ACTION_TYPE_DROP:
1603                         actions_num++;
1604                         break;
1605                 case RTE_FLOW_ACTION_TYPE_VOID:
1606                         continue;
1607                 default:
1608                         rte_flow_error_set(error,
1609                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1610                                            actions,
1611                                            "Invalid action type");
1612                         return -rte_errno;
1613                 }
1614         }
1615
1616         if (actions_num != 1) {
1617                 rte_flow_error_set(error,
1618                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1619                                    actions,
1620                                    "Invalid action number");
1621                 return -rte_errno;
1622         }
1623
1624         return 0;
1625 }
1626
1627 static bool
1628 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1629 {
1630         switch (tun_type) {
1631         case ICE_SW_TUN_PROFID_IPV6_ESP:
1632         case ICE_SW_TUN_PROFID_IPV6_AH:
1633         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1634         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1635         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1636         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1637         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1638         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1639                 return true;
1640         default:
1641                 break;
1642         }
1643
1644         return false;
1645 }
1646
1647 static int
1648 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1649                 struct ice_pattern_match_item *array,
1650                 uint32_t array_len,
1651                 const struct rte_flow_item pattern[],
1652                 const struct rte_flow_action actions[],
1653                 void **meta,
1654                 struct rte_flow_error *error)
1655 {
1656         struct ice_pf *pf = &ad->pf;
1657         uint64_t inputset = 0;
1658         int ret = 0;
1659         struct sw_meta *sw_meta_ptr = NULL;
1660         struct ice_adv_rule_info rule_info;
1661         struct ice_adv_lkup_elem *list = NULL;
1662         uint16_t lkups_num = 0;
1663         const struct rte_flow_item *item = pattern;
1664         uint16_t item_num = 0;
1665         uint16_t vlan_num = 0;
1666         enum ice_sw_tunnel_type tun_type =
1667                         ICE_NON_TUN;
1668         struct ice_pattern_match_item *pattern_match_item = NULL;
1669
1670         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1671                 item_num++;
1672                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1673                         const struct rte_flow_item_eth *eth_mask;
1674                         if (item->mask)
1675                                 eth_mask = item->mask;
1676                         else
1677                                 continue;
1678                         if (eth_mask->type == UINT16_MAX)
1679                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1680                 }
1681
1682                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1683                         vlan_num++;
1684
1685                 /* reserve one more memory slot for ETH which may
1686                  * consume 2 lookup items.
1687                  */
1688                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1689                         item_num++;
1690         }
1691
1692         if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1693                 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1694         else if (vlan_num == 2)
1695                 tun_type = ICE_NON_TUN_QINQ;
1696
1697         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1698         if (!list) {
1699                 rte_flow_error_set(error, EINVAL,
1700                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1701                                    "No memory for PMD internal items");
1702                 return -rte_errno;
1703         }
1704
1705         sw_meta_ptr =
1706                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1707         if (!sw_meta_ptr) {
1708                 rte_flow_error_set(error, EINVAL,
1709                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1710                                    "No memory for sw_pattern_meta_ptr");
1711                 goto error;
1712         }
1713
1714         pattern_match_item =
1715                 ice_search_pattern_match_item(ad, pattern, array, array_len,
1716                                               error);
1717         if (!pattern_match_item) {
1718                 rte_flow_error_set(error, EINVAL,
1719                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1720                                    "Invalid input pattern");
1721                 goto error;
1722         }
1723
1724         inputset = ice_switch_inset_get
1725                 (pattern, error, list, &lkups_num, &tun_type);
1726         if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1727                 (inputset & ~pattern_match_item->input_set_mask)) {
1728                 rte_flow_error_set(error, EINVAL,
1729                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1730                                    pattern,
1731                                    "Invalid input set");
1732                 goto error;
1733         }
1734
1735         memset(&rule_info, 0, sizeof(rule_info));
1736         rule_info.tun_type = tun_type;
1737
1738         ret = ice_switch_check_action(actions, error);
1739         if (ret)
1740                 goto error;
1741
1742         if (ad->hw.dcf_enabled)
1743                 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1744                                                   &rule_info);
1745         else
1746                 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1747
1748         if (ret)
1749                 goto error;
1750
1751         if (meta) {
1752                 *meta = sw_meta_ptr;
1753                 ((struct sw_meta *)*meta)->list = list;
1754                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1755                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1756         } else {
1757                 rte_free(list);
1758                 rte_free(sw_meta_ptr);
1759         }
1760
1761         rte_free(pattern_match_item);
1762
1763         return 0;
1764
1765 error:
1766         rte_free(list);
1767         rte_free(sw_meta_ptr);
1768         rte_free(pattern_match_item);
1769
1770         return -rte_errno;
1771 }
1772
1773 static int
1774 ice_switch_query(struct ice_adapter *ad __rte_unused,
1775                 struct rte_flow *flow __rte_unused,
1776                 struct rte_flow_query_count *count __rte_unused,
1777                 struct rte_flow_error *error)
1778 {
1779         rte_flow_error_set(error, EINVAL,
1780                 RTE_FLOW_ERROR_TYPE_HANDLE,
1781                 NULL,
1782                 "count action not supported by switch filter");
1783
1784         return -rte_errno;
1785 }
1786
1787 static int
1788 ice_switch_redirect(struct ice_adapter *ad,
1789                     struct rte_flow *flow,
1790                     struct ice_flow_redirect *rd)
1791 {
1792         struct ice_rule_query_data *rdata = flow->rule;
1793         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1794         struct ice_adv_lkup_elem *lkups_dp = NULL;
1795         struct LIST_HEAD_TYPE *list_head;
1796         struct ice_adv_rule_info rinfo;
1797         struct ice_hw *hw = &ad->hw;
1798         struct ice_switch_info *sw;
1799         uint16_t lkups_cnt;
1800         int ret;
1801
1802         if (rdata->vsi_handle != rd->vsi_handle)
1803                 return 0;
1804
1805         sw = hw->switch_info;
1806         if (!sw->recp_list[rdata->rid].recp_created)
1807                 return -EINVAL;
1808
1809         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1810                 return -ENOTSUP;
1811
1812         list_head = &sw->recp_list[rdata->rid].filt_rules;
1813         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1814                             list_entry) {
1815                 rinfo = list_itr->rule_info;
1816                 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1817                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1818                     rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1819                     (rinfo.fltr_rule_id == rdata->rule_id &&
1820                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1821                         lkups_cnt = list_itr->lkups_cnt;
1822                         lkups_dp = (struct ice_adv_lkup_elem *)
1823                                 ice_memdup(hw, list_itr->lkups,
1824                                            sizeof(*list_itr->lkups) *
1825                                            lkups_cnt, ICE_NONDMA_TO_NONDMA);
1826
1827                         if (!lkups_dp) {
1828                                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1829                                 return -EINVAL;
1830                         }
1831
1832                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1833                                 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1834                                 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1835                         }
1836                         break;
1837                 }
1838         }
1839
1840         if (!lkups_dp)
1841                 return -EINVAL;
1842
1843         /* Remove the old rule */
1844         ret = ice_rem_adv_rule(hw, list_itr->lkups,
1845                                lkups_cnt, &rinfo);
1846         if (ret) {
1847                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1848                             rdata->rule_id);
1849                 ret = -EINVAL;
1850                 goto out;
1851         }
1852
1853         /* Update VSI context */
1854         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1855
1856         /* Replay the rule */
1857         ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1858                                &rinfo, rdata);
1859         if (ret) {
1860                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1861                 ret = -EINVAL;
1862         }
1863
1864 out:
1865         ice_free(hw, lkups_dp);
1866         return ret;
1867 }
1868
1869 static int
1870 ice_switch_init(struct ice_adapter *ad)
1871 {
1872         int ret = 0;
1873         struct ice_flow_parser *dist_parser;
1874         struct ice_flow_parser *perm_parser;
1875
1876         if (ad->devargs.pipe_mode_support) {
1877                 perm_parser = &ice_switch_perm_parser;
1878                 ret = ice_register_parser(perm_parser, ad);
1879         } else {
1880                 dist_parser = &ice_switch_dist_parser;
1881                 ret = ice_register_parser(dist_parser, ad);
1882         }
1883         return ret;
1884 }
1885
1886 static void
1887 ice_switch_uninit(struct ice_adapter *ad)
1888 {
1889         struct ice_flow_parser *dist_parser;
1890         struct ice_flow_parser *perm_parser;
1891
1892         if (ad->devargs.pipe_mode_support) {
1893                 perm_parser = &ice_switch_perm_parser;
1894                 ice_unregister_parser(perm_parser, ad);
1895         } else {
1896                 dist_parser = &ice_switch_dist_parser;
1897                 ice_unregister_parser(dist_parser, ad);
1898         }
1899 }
1900
1901 static struct
1902 ice_flow_engine ice_switch_engine = {
1903         .init = ice_switch_init,
1904         .uninit = ice_switch_uninit,
1905         .create = ice_switch_create,
1906         .destroy = ice_switch_destroy,
1907         .query_count = ice_switch_query,
1908         .redirect = ice_switch_redirect,
1909         .free = ice_switch_filter_rule_free,
1910         .type = ICE_FLOW_ENGINE_SWITCH,
1911 };
1912
1913 static struct
1914 ice_flow_parser ice_switch_dist_parser = {
1915         .engine = &ice_switch_engine,
1916         .array = ice_switch_pattern_dist_list,
1917         .array_len = RTE_DIM(ice_switch_pattern_dist_list),
1918         .parse_pattern_action = ice_switch_parse_pattern_action,
1919         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1920 };
1921
1922 static struct
1923 ice_flow_parser ice_switch_perm_parser = {
1924         .engine = &ice_switch_engine,
1925         .array = ice_switch_pattern_perm_list,
1926         .array_len = RTE_DIM(ice_switch_pattern_perm_list),
1927         .parse_pattern_action = ice_switch_parse_pattern_action,
1928         .stage = ICE_FLOW_STAGE_PERMISSION,
1929 };
1930
1931 RTE_INIT(ice_sw_engine_init)
1932 {
1933         struct ice_flow_engine *engine = &ice_switch_engine;
1934         ice_register_flow_engine(engine);
1935 }