net/ice/base: add AQ LLDP filter control command
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27
28
29 #define MAX_QGRP_NUM_TYPE       7
30 #define MAX_INPUT_SET_BYTE      32
31 #define ICE_PPP_IPV4_PROTO      0x0021
32 #define ICE_PPP_IPV6_PROTO      0x0057
33 #define ICE_IPV4_PROTO_NVGRE    0x002F
34
35 #define ICE_SW_INSET_ETHER ( \
36         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38                 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
39                 ICE_INSET_VLAN_OUTER)
40 #define ICE_SW_INSET_MAC_IPV4 ( \
41         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
42         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
43 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
44         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
46         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
47 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
48         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
49         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
50         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_IPV6 ( \
52         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
53         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
54         ICE_INSET_IPV6_NEXT_HDR)
55 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
56         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
57         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
58         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
59 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
60         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
61         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
62         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
63 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
64         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
65         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
66 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
67         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
68         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
69 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
70         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
71         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
72         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
74         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
76         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
77 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
78         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
79         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
80         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
81 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
82         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
83         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
84         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
85 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
86         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
87         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
88 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
89         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
91         ICE_INSET_TUN_IPV4_TOS)
92 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
93         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
94         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
95         ICE_INSET_TUN_IPV4_TOS)
96 #define ICE_SW_INSET_MAC_PPPOE  ( \
97         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
98         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
99 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
100         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
101         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
102         ICE_INSET_PPPOE_PROTO)
103 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
104         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
105 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
106         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
107 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
108         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
109 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
110         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
111 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
112         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
113 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
114         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
115 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
116         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
117 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
118         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
119 #define ICE_SW_INSET_MAC_IPV4_AH ( \
120         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
121 #define ICE_SW_INSET_MAC_IPV6_AH ( \
122         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
123 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
124         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
125 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
126         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
127 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
128         ICE_SW_INSET_MAC_IPV4 | \
129         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
130 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
131         ICE_SW_INSET_MAC_IPV6 | \
132         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
133
134 struct sw_meta {
135         struct ice_adv_lkup_elem *list;
136         uint16_t lkups_num;
137         struct ice_adv_rule_info rule_info;
138 };
139
140 static struct ice_flow_parser ice_switch_dist_parser_os;
141 static struct ice_flow_parser ice_switch_dist_parser_comms;
142 static struct ice_flow_parser ice_switch_perm_parser_os;
143 static struct ice_flow_parser ice_switch_perm_parser_comms;
144
145 static struct
146 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
147         {pattern_ethertype,
148                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
149         {pattern_ethertype_vlan,
150                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
151         {pattern_eth_arp,
152                         ICE_INSET_NONE, ICE_INSET_NONE},
153         {pattern_eth_ipv4,
154                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
155         {pattern_eth_ipv4_udp,
156                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
157         {pattern_eth_ipv4_tcp,
158                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
159         {pattern_eth_ipv6,
160                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
161         {pattern_eth_ipv6_udp,
162                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
163         {pattern_eth_ipv6_tcp,
164                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
165         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
166                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
167         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
168                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
169         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
170                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
171         {pattern_eth_ipv4_nvgre_eth_ipv4,
172                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
173         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
174                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
175         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
176                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
177 };
178
179 static struct
180 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
181         {pattern_ethertype,
182                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
183         {pattern_ethertype_vlan,
184                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
185         {pattern_eth_arp,
186                         ICE_INSET_NONE, ICE_INSET_NONE},
187         {pattern_eth_ipv4,
188                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
189         {pattern_eth_ipv4_udp,
190                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
191         {pattern_eth_ipv4_tcp,
192                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
193         {pattern_eth_ipv6,
194                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
195         {pattern_eth_ipv6_udp,
196                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
197         {pattern_eth_ipv6_tcp,
198                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
199         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
200                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
201         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
202                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
203         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
204                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
205         {pattern_eth_ipv4_nvgre_eth_ipv4,
206                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
207         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
208                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
209         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
210                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
211         {pattern_eth_pppoes,
212                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
213         {pattern_eth_vlan_pppoes,
214                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
215         {pattern_eth_pppoes_proto,
216                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
217         {pattern_eth_vlan_pppoes_proto,
218                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
219         {pattern_eth_pppoes_ipv4,
220                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
221         {pattern_eth_pppoes_ipv4_tcp,
222                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
223         {pattern_eth_pppoes_ipv4_udp,
224                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
225         {pattern_eth_pppoes_ipv6,
226                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
227         {pattern_eth_pppoes_ipv6_tcp,
228                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
229         {pattern_eth_pppoes_ipv6_udp,
230                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
231         {pattern_eth_vlan_pppoes_ipv4,
232                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
233         {pattern_eth_vlan_pppoes_ipv4_tcp,
234                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
235         {pattern_eth_vlan_pppoes_ipv4_udp,
236                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
237         {pattern_eth_vlan_pppoes_ipv6,
238                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
239         {pattern_eth_vlan_pppoes_ipv6_tcp,
240                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
241         {pattern_eth_vlan_pppoes_ipv6_udp,
242                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
243         {pattern_eth_ipv4_esp,
244                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
245         {pattern_eth_ipv4_udp_esp,
246                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
247         {pattern_eth_ipv6_esp,
248                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
249         {pattern_eth_ipv6_udp_esp,
250                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
251         {pattern_eth_ipv4_ah,
252                         ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
253         {pattern_eth_ipv6_ah,
254                         ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
255         {pattern_eth_ipv6_udp_ah,
256                         ICE_INSET_NONE, ICE_INSET_NONE},
257         {pattern_eth_ipv4_l2tp,
258                         ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
259         {pattern_eth_ipv6_l2tp,
260                         ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
261         {pattern_eth_ipv4_pfcp,
262                         ICE_INSET_NONE, ICE_INSET_NONE},
263         {pattern_eth_ipv6_pfcp,
264                         ICE_INSET_NONE, ICE_INSET_NONE},
265 };
266
267 static struct
268 ice_pattern_match_item ice_switch_pattern_perm_os[] = {
269         {pattern_ethertype,
270                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
271         {pattern_ethertype_vlan,
272                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
273         {pattern_eth_arp,
274                         ICE_INSET_NONE, ICE_INSET_NONE},
275         {pattern_eth_ipv4,
276                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
277         {pattern_eth_ipv4_udp,
278                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
279         {pattern_eth_ipv4_tcp,
280                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
281         {pattern_eth_ipv6,
282                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
283         {pattern_eth_ipv6_udp,
284                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
285         {pattern_eth_ipv6_tcp,
286                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
287         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
288                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
289         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
290                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
291         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
292                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
293         {pattern_eth_ipv4_nvgre_eth_ipv4,
294                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
295         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
296                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
297         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
298                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
299 };
300
301 static struct
302 ice_pattern_match_item ice_switch_pattern_perm_comms[] = {
303         {pattern_ethertype,
304                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
305         {pattern_ethertype_vlan,
306                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
307         {pattern_eth_arp,
308                 ICE_INSET_NONE, ICE_INSET_NONE},
309         {pattern_eth_ipv4,
310                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
311         {pattern_eth_ipv4_udp,
312                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
313         {pattern_eth_ipv4_tcp,
314                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
315         {pattern_eth_ipv6,
316                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
317         {pattern_eth_ipv6_udp,
318                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
319         {pattern_eth_ipv6_tcp,
320                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
321         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
322                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
323         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
324                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
325         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
326                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
327         {pattern_eth_ipv4_nvgre_eth_ipv4,
328                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
329         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
330                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
331         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
332                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
333         {pattern_eth_pppoes,
334                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
335         {pattern_eth_vlan_pppoes,
336                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
337         {pattern_eth_pppoes_proto,
338                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
339         {pattern_eth_vlan_pppoes_proto,
340                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
341         {pattern_eth_pppoes_ipv4,
342                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
343         {pattern_eth_pppoes_ipv4_tcp,
344                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
345         {pattern_eth_pppoes_ipv4_udp,
346                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
347         {pattern_eth_pppoes_ipv6,
348                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
349         {pattern_eth_pppoes_ipv6_tcp,
350                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
351         {pattern_eth_pppoes_ipv6_udp,
352                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
353         {pattern_eth_vlan_pppoes_ipv4,
354                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
355         {pattern_eth_vlan_pppoes_ipv4_tcp,
356                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
357         {pattern_eth_vlan_pppoes_ipv4_udp,
358                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
359         {pattern_eth_vlan_pppoes_ipv6,
360                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
361         {pattern_eth_vlan_pppoes_ipv6_tcp,
362                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
363         {pattern_eth_vlan_pppoes_ipv6_udp,
364                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
365         {pattern_eth_ipv4_esp,
366                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
367         {pattern_eth_ipv4_udp_esp,
368                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
369         {pattern_eth_ipv6_esp,
370                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
371         {pattern_eth_ipv6_udp_esp,
372                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
373         {pattern_eth_ipv4_ah,
374                         ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
375         {pattern_eth_ipv6_ah,
376                         ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
377         {pattern_eth_ipv6_udp_ah,
378                         ICE_INSET_NONE, ICE_INSET_NONE},
379         {pattern_eth_ipv4_l2tp,
380                         ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
381         {pattern_eth_ipv6_l2tp,
382                         ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
383         {pattern_eth_ipv4_pfcp,
384                         ICE_INSET_NONE, ICE_INSET_NONE},
385         {pattern_eth_ipv6_pfcp,
386                         ICE_INSET_NONE, ICE_INSET_NONE},
387 };
388
389 static int
390 ice_switch_create(struct ice_adapter *ad,
391                 struct rte_flow *flow,
392                 void *meta,
393                 struct rte_flow_error *error)
394 {
395         int ret = 0;
396         struct ice_pf *pf = &ad->pf;
397         struct ice_hw *hw = ICE_PF_TO_HW(pf);
398         struct ice_rule_query_data rule_added = {0};
399         struct ice_rule_query_data *filter_ptr;
400         struct ice_adv_lkup_elem *list =
401                 ((struct sw_meta *)meta)->list;
402         uint16_t lkups_cnt =
403                 ((struct sw_meta *)meta)->lkups_num;
404         struct ice_adv_rule_info *rule_info =
405                 &((struct sw_meta *)meta)->rule_info;
406
407         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
408                 rte_flow_error_set(error, EINVAL,
409                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
410                         "item number too large for rule");
411                 goto error;
412         }
413         if (!list) {
414                 rte_flow_error_set(error, EINVAL,
415                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
416                         "lookup list should not be NULL");
417                 goto error;
418         }
419         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
420         if (!ret) {
421                 filter_ptr = rte_zmalloc("ice_switch_filter",
422                         sizeof(struct ice_rule_query_data), 0);
423                 if (!filter_ptr) {
424                         rte_flow_error_set(error, EINVAL,
425                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
426                                    "No memory for ice_switch_filter");
427                         goto error;
428                 }
429                 flow->rule = filter_ptr;
430                 rte_memcpy(filter_ptr,
431                         &rule_added,
432                         sizeof(struct ice_rule_query_data));
433         } else {
434                 rte_flow_error_set(error, EINVAL,
435                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
436                         "switch filter create flow fail");
437                 goto error;
438         }
439
440         rte_free(list);
441         rte_free(meta);
442         return 0;
443
444 error:
445         rte_free(list);
446         rte_free(meta);
447
448         return -rte_errno;
449 }
450
451 static int
452 ice_switch_destroy(struct ice_adapter *ad,
453                 struct rte_flow *flow,
454                 struct rte_flow_error *error)
455 {
456         struct ice_hw *hw = &ad->hw;
457         int ret;
458         struct ice_rule_query_data *filter_ptr;
459
460         filter_ptr = (struct ice_rule_query_data *)
461                 flow->rule;
462
463         if (!filter_ptr) {
464                 rte_flow_error_set(error, EINVAL,
465                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
466                         "no such flow"
467                         " create by switch filter");
468                 return -rte_errno;
469         }
470
471         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
472         if (ret) {
473                 rte_flow_error_set(error, EINVAL,
474                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
475                         "fail to destroy switch filter rule");
476                 return -rte_errno;
477         }
478
479         rte_free(filter_ptr);
480         return ret;
481 }
482
483 static void
484 ice_switch_filter_rule_free(struct rte_flow *flow)
485 {
486         rte_free(flow->rule);
487 }
488
489 static uint64_t
490 ice_switch_inset_get(const struct rte_flow_item pattern[],
491                 struct rte_flow_error *error,
492                 struct ice_adv_lkup_elem *list,
493                 uint16_t *lkups_num,
494                 enum ice_sw_tunnel_type *tun_type)
495 {
496         const struct rte_flow_item *item = pattern;
497         enum rte_flow_item_type item_type;
498         const struct rte_flow_item_eth *eth_spec, *eth_mask;
499         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
500         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
501         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
502         const struct rte_flow_item_udp *udp_spec, *udp_mask;
503         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
504         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
505         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
506         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
507         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
508         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
509                                 *pppoe_proto_mask;
510         const struct rte_flow_item_esp *esp_spec, *esp_mask;
511         const struct rte_flow_item_ah *ah_spec, *ah_mask;
512         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
513         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
514         uint64_t input_set = ICE_INSET_NONE;
515         uint16_t input_set_byte = 0;
516         bool pppoe_elem_valid = 0;
517         bool pppoe_patt_valid = 0;
518         bool pppoe_prot_valid = 0;
519         bool tunnel_valid = 0;
520         bool profile_rule = 0;
521         bool nvgre_valid = 0;
522         bool vxlan_valid = 0;
523         bool ipv6_valid = 0;
524         bool ipv4_valid = 0;
525         bool udp_valid = 0;
526         bool tcp_valid = 0;
527         uint16_t j, t = 0;
528
529         for (item = pattern; item->type !=
530                         RTE_FLOW_ITEM_TYPE_END; item++) {
531                 if (item->last) {
532                         rte_flow_error_set(error, EINVAL,
533                                         RTE_FLOW_ERROR_TYPE_ITEM,
534                                         item,
535                                         "Not support range");
536                         return 0;
537                 }
538                 item_type = item->type;
539
540                 switch (item_type) {
541                 case RTE_FLOW_ITEM_TYPE_ETH:
542                         eth_spec = item->spec;
543                         eth_mask = item->mask;
544                         if (eth_spec && eth_mask) {
545                                 const uint8_t *a = eth_mask->src.addr_bytes;
546                                 const uint8_t *b = eth_mask->dst.addr_bytes;
547                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
548                                         if (a[j] && tunnel_valid) {
549                                                 input_set |=
550                                                         ICE_INSET_TUN_SMAC;
551                                                 break;
552                                         } else if (a[j]) {
553                                                 input_set |=
554                                                         ICE_INSET_SMAC;
555                                                 break;
556                                         }
557                                 }
558                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
559                                         if (b[j] && tunnel_valid) {
560                                                 input_set |=
561                                                         ICE_INSET_TUN_DMAC;
562                                                 break;
563                                         } else if (b[j]) {
564                                                 input_set |=
565                                                         ICE_INSET_DMAC;
566                                                 break;
567                                         }
568                                 }
569                                 if (eth_mask->type)
570                                         input_set |= ICE_INSET_ETHERTYPE;
571                                 list[t].type = (tunnel_valid  == 0) ?
572                                         ICE_MAC_OFOS : ICE_MAC_IL;
573                                 struct ice_ether_hdr *h;
574                                 struct ice_ether_hdr *m;
575                                 uint16_t i = 0;
576                                 h = &list[t].h_u.eth_hdr;
577                                 m = &list[t].m_u.eth_hdr;
578                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
579                                         if (eth_mask->src.addr_bytes[j]) {
580                                                 h->src_addr[j] =
581                                                 eth_spec->src.addr_bytes[j];
582                                                 m->src_addr[j] =
583                                                 eth_mask->src.addr_bytes[j];
584                                                 i = 1;
585                                                 input_set_byte++;
586                                         }
587                                         if (eth_mask->dst.addr_bytes[j]) {
588                                                 h->dst_addr[j] =
589                                                 eth_spec->dst.addr_bytes[j];
590                                                 m->dst_addr[j] =
591                                                 eth_mask->dst.addr_bytes[j];
592                                                 i = 1;
593                                                 input_set_byte++;
594                                         }
595                                 }
596                                 if (i)
597                                         t++;
598                                 if (eth_mask->type) {
599                                         list[t].type = ICE_ETYPE_OL;
600                                         list[t].h_u.ethertype.ethtype_id =
601                                                 eth_spec->type;
602                                         list[t].m_u.ethertype.ethtype_id =
603                                                 eth_mask->type;
604                                         input_set_byte += 2;
605                                         t++;
606                                 }
607                         }
608                         break;
609
610                 case RTE_FLOW_ITEM_TYPE_IPV4:
611                         ipv4_spec = item->spec;
612                         ipv4_mask = item->mask;
613                         ipv4_valid = 1;
614                         if (ipv4_spec && ipv4_mask) {
615                                 /* Check IPv4 mask and update input set */
616                                 if (ipv4_mask->hdr.version_ihl ||
617                                         ipv4_mask->hdr.total_length ||
618                                         ipv4_mask->hdr.packet_id ||
619                                         ipv4_mask->hdr.hdr_checksum) {
620                                         rte_flow_error_set(error, EINVAL,
621                                                    RTE_FLOW_ERROR_TYPE_ITEM,
622                                                    item,
623                                                    "Invalid IPv4 mask.");
624                                         return 0;
625                                 }
626
627                                 if (tunnel_valid) {
628                                         if (ipv4_mask->hdr.type_of_service)
629                                                 input_set |=
630                                                         ICE_INSET_TUN_IPV4_TOS;
631                                         if (ipv4_mask->hdr.src_addr)
632                                                 input_set |=
633                                                         ICE_INSET_TUN_IPV4_SRC;
634                                         if (ipv4_mask->hdr.dst_addr)
635                                                 input_set |=
636                                                         ICE_INSET_TUN_IPV4_DST;
637                                         if (ipv4_mask->hdr.time_to_live)
638                                                 input_set |=
639                                                         ICE_INSET_TUN_IPV4_TTL;
640                                         if (ipv4_mask->hdr.next_proto_id)
641                                                 input_set |=
642                                                 ICE_INSET_TUN_IPV4_PROTO;
643                                 } else {
644                                         if (ipv4_mask->hdr.src_addr)
645                                                 input_set |= ICE_INSET_IPV4_SRC;
646                                         if (ipv4_mask->hdr.dst_addr)
647                                                 input_set |= ICE_INSET_IPV4_DST;
648                                         if (ipv4_mask->hdr.time_to_live)
649                                                 input_set |= ICE_INSET_IPV4_TTL;
650                                         if (ipv4_mask->hdr.next_proto_id)
651                                                 input_set |=
652                                                 ICE_INSET_IPV4_PROTO;
653                                         if (ipv4_mask->hdr.type_of_service)
654                                                 input_set |=
655                                                         ICE_INSET_IPV4_TOS;
656                                 }
657                                 list[t].type = (tunnel_valid  == 0) ?
658                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
659                                 if (ipv4_mask->hdr.src_addr) {
660                                         list[t].h_u.ipv4_hdr.src_addr =
661                                                 ipv4_spec->hdr.src_addr;
662                                         list[t].m_u.ipv4_hdr.src_addr =
663                                                 ipv4_mask->hdr.src_addr;
664                                         input_set_byte += 2;
665                                 }
666                                 if (ipv4_mask->hdr.dst_addr) {
667                                         list[t].h_u.ipv4_hdr.dst_addr =
668                                                 ipv4_spec->hdr.dst_addr;
669                                         list[t].m_u.ipv4_hdr.dst_addr =
670                                                 ipv4_mask->hdr.dst_addr;
671                                         input_set_byte += 2;
672                                 }
673                                 if (ipv4_mask->hdr.time_to_live) {
674                                         list[t].h_u.ipv4_hdr.time_to_live =
675                                                 ipv4_spec->hdr.time_to_live;
676                                         list[t].m_u.ipv4_hdr.time_to_live =
677                                                 ipv4_mask->hdr.time_to_live;
678                                         input_set_byte++;
679                                 }
680                                 if (ipv4_mask->hdr.next_proto_id) {
681                                         list[t].h_u.ipv4_hdr.protocol =
682                                                 ipv4_spec->hdr.next_proto_id;
683                                         list[t].m_u.ipv4_hdr.protocol =
684                                                 ipv4_mask->hdr.next_proto_id;
685                                         input_set_byte++;
686                                 }
687                                 if ((ipv4_spec->hdr.next_proto_id &
688                                         ipv4_mask->hdr.next_proto_id) ==
689                                         ICE_IPV4_PROTO_NVGRE)
690                                         *tun_type = ICE_SW_TUN_AND_NON_TUN;
691                                 if (ipv4_mask->hdr.type_of_service) {
692                                         list[t].h_u.ipv4_hdr.tos =
693                                                 ipv4_spec->hdr.type_of_service;
694                                         list[t].m_u.ipv4_hdr.tos =
695                                                 ipv4_mask->hdr.type_of_service;
696                                         input_set_byte++;
697                                 }
698                                 t++;
699                         }
700                         break;
701
702                 case RTE_FLOW_ITEM_TYPE_IPV6:
703                         ipv6_spec = item->spec;
704                         ipv6_mask = item->mask;
705                         ipv6_valid = 1;
706                         if (ipv6_spec && ipv6_mask) {
707                                 if (ipv6_mask->hdr.payload_len) {
708                                         rte_flow_error_set(error, EINVAL,
709                                            RTE_FLOW_ERROR_TYPE_ITEM,
710                                            item,
711                                            "Invalid IPv6 mask");
712                                         return 0;
713                                 }
714
715                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
716                                         if (ipv6_mask->hdr.src_addr[j] &&
717                                                 tunnel_valid) {
718                                                 input_set |=
719                                                 ICE_INSET_TUN_IPV6_SRC;
720                                                 break;
721                                         } else if (ipv6_mask->hdr.src_addr[j]) {
722                                                 input_set |= ICE_INSET_IPV6_SRC;
723                                                 break;
724                                         }
725                                 }
726                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
727                                         if (ipv6_mask->hdr.dst_addr[j] &&
728                                                 tunnel_valid) {
729                                                 input_set |=
730                                                 ICE_INSET_TUN_IPV6_DST;
731                                                 break;
732                                         } else if (ipv6_mask->hdr.dst_addr[j]) {
733                                                 input_set |= ICE_INSET_IPV6_DST;
734                                                 break;
735                                         }
736                                 }
737                                 if (ipv6_mask->hdr.proto &&
738                                         tunnel_valid)
739                                         input_set |=
740                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
741                                 else if (ipv6_mask->hdr.proto)
742                                         input_set |=
743                                                 ICE_INSET_IPV6_NEXT_HDR;
744                                 if (ipv6_mask->hdr.hop_limits &&
745                                         tunnel_valid)
746                                         input_set |=
747                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
748                                 else if (ipv6_mask->hdr.hop_limits)
749                                         input_set |=
750                                                 ICE_INSET_IPV6_HOP_LIMIT;
751                                 if ((ipv6_mask->hdr.vtc_flow &
752                                                 rte_cpu_to_be_32
753                                                 (RTE_IPV6_HDR_TC_MASK)) &&
754                                         tunnel_valid)
755                                         input_set |=
756                                                         ICE_INSET_TUN_IPV6_TC;
757                                 else if (ipv6_mask->hdr.vtc_flow &
758                                                 rte_cpu_to_be_32
759                                                 (RTE_IPV6_HDR_TC_MASK))
760                                         input_set |= ICE_INSET_IPV6_TC;
761
762                                 list[t].type = (tunnel_valid  == 0) ?
763                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
764                                 struct ice_ipv6_hdr *f;
765                                 struct ice_ipv6_hdr *s;
766                                 f = &list[t].h_u.ipv6_hdr;
767                                 s = &list[t].m_u.ipv6_hdr;
768                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
769                                         if (ipv6_mask->hdr.src_addr[j]) {
770                                                 f->src_addr[j] =
771                                                 ipv6_spec->hdr.src_addr[j];
772                                                 s->src_addr[j] =
773                                                 ipv6_mask->hdr.src_addr[j];
774                                                 input_set_byte++;
775                                         }
776                                         if (ipv6_mask->hdr.dst_addr[j]) {
777                                                 f->dst_addr[j] =
778                                                 ipv6_spec->hdr.dst_addr[j];
779                                                 s->dst_addr[j] =
780                                                 ipv6_mask->hdr.dst_addr[j];
781                                                 input_set_byte++;
782                                         }
783                                 }
784                                 if (ipv6_mask->hdr.proto) {
785                                         f->next_hdr =
786                                                 ipv6_spec->hdr.proto;
787                                         s->next_hdr =
788                                                 ipv6_mask->hdr.proto;
789                                         input_set_byte++;
790                                 }
791                                 if (ipv6_mask->hdr.hop_limits) {
792                                         f->hop_limit =
793                                                 ipv6_spec->hdr.hop_limits;
794                                         s->hop_limit =
795                                                 ipv6_mask->hdr.hop_limits;
796                                         input_set_byte++;
797                                 }
798                                 if (ipv6_mask->hdr.vtc_flow &
799                                                 rte_cpu_to_be_32
800                                                 (RTE_IPV6_HDR_TC_MASK)) {
801                                         struct ice_le_ver_tc_flow vtf;
802                                         vtf.u.fld.version = 0;
803                                         vtf.u.fld.flow_label = 0;
804                                         vtf.u.fld.tc = (rte_be_to_cpu_32
805                                                 (ipv6_spec->hdr.vtc_flow) &
806                                                         RTE_IPV6_HDR_TC_MASK) >>
807                                                         RTE_IPV6_HDR_TC_SHIFT;
808                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
809                                         vtf.u.fld.tc = (rte_be_to_cpu_32
810                                                 (ipv6_mask->hdr.vtc_flow) &
811                                                         RTE_IPV6_HDR_TC_MASK) >>
812                                                         RTE_IPV6_HDR_TC_SHIFT;
813                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
814                                         input_set_byte += 4;
815                                 }
816                                 t++;
817                         }
818                         break;
819
820                 case RTE_FLOW_ITEM_TYPE_UDP:
821                         udp_spec = item->spec;
822                         udp_mask = item->mask;
823                         udp_valid = 1;
824                         if (udp_spec && udp_mask) {
825                                 /* Check UDP mask and update input set*/
826                                 if (udp_mask->hdr.dgram_len ||
827                                     udp_mask->hdr.dgram_cksum) {
828                                         rte_flow_error_set(error, EINVAL,
829                                                    RTE_FLOW_ERROR_TYPE_ITEM,
830                                                    item,
831                                                    "Invalid UDP mask");
832                                         return 0;
833                                 }
834
835                                 if (tunnel_valid) {
836                                         if (udp_mask->hdr.src_port)
837                                                 input_set |=
838                                                 ICE_INSET_TUN_UDP_SRC_PORT;
839                                         if (udp_mask->hdr.dst_port)
840                                                 input_set |=
841                                                 ICE_INSET_TUN_UDP_DST_PORT;
842                                 } else {
843                                         if (udp_mask->hdr.src_port)
844                                                 input_set |=
845                                                 ICE_INSET_UDP_SRC_PORT;
846                                         if (udp_mask->hdr.dst_port)
847                                                 input_set |=
848                                                 ICE_INSET_UDP_DST_PORT;
849                                 }
850                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
851                                                 tunnel_valid == 0)
852                                         list[t].type = ICE_UDP_OF;
853                                 else
854                                         list[t].type = ICE_UDP_ILOS;
855                                 if (udp_mask->hdr.src_port) {
856                                         list[t].h_u.l4_hdr.src_port =
857                                                 udp_spec->hdr.src_port;
858                                         list[t].m_u.l4_hdr.src_port =
859                                                 udp_mask->hdr.src_port;
860                                         input_set_byte += 2;
861                                 }
862                                 if (udp_mask->hdr.dst_port) {
863                                         list[t].h_u.l4_hdr.dst_port =
864                                                 udp_spec->hdr.dst_port;
865                                         list[t].m_u.l4_hdr.dst_port =
866                                                 udp_mask->hdr.dst_port;
867                                         input_set_byte += 2;
868                                 }
869                                 t++;
870                         }
871                         break;
872
873                 case RTE_FLOW_ITEM_TYPE_TCP:
874                         tcp_spec = item->spec;
875                         tcp_mask = item->mask;
876                         tcp_valid = 1;
877                         if (tcp_spec && tcp_mask) {
878                                 /* Check TCP mask and update input set */
879                                 if (tcp_mask->hdr.sent_seq ||
880                                         tcp_mask->hdr.recv_ack ||
881                                         tcp_mask->hdr.data_off ||
882                                         tcp_mask->hdr.tcp_flags ||
883                                         tcp_mask->hdr.rx_win ||
884                                         tcp_mask->hdr.cksum ||
885                                         tcp_mask->hdr.tcp_urp) {
886                                         rte_flow_error_set(error, EINVAL,
887                                            RTE_FLOW_ERROR_TYPE_ITEM,
888                                            item,
889                                            "Invalid TCP mask");
890                                         return 0;
891                                 }
892
893                                 if (tunnel_valid) {
894                                         if (tcp_mask->hdr.src_port)
895                                                 input_set |=
896                                                 ICE_INSET_TUN_TCP_SRC_PORT;
897                                         if (tcp_mask->hdr.dst_port)
898                                                 input_set |=
899                                                 ICE_INSET_TUN_TCP_DST_PORT;
900                                 } else {
901                                         if (tcp_mask->hdr.src_port)
902                                                 input_set |=
903                                                 ICE_INSET_TCP_SRC_PORT;
904                                         if (tcp_mask->hdr.dst_port)
905                                                 input_set |=
906                                                 ICE_INSET_TCP_DST_PORT;
907                                 }
908                                 list[t].type = ICE_TCP_IL;
909                                 if (tcp_mask->hdr.src_port) {
910                                         list[t].h_u.l4_hdr.src_port =
911                                                 tcp_spec->hdr.src_port;
912                                         list[t].m_u.l4_hdr.src_port =
913                                                 tcp_mask->hdr.src_port;
914                                         input_set_byte += 2;
915                                 }
916                                 if (tcp_mask->hdr.dst_port) {
917                                         list[t].h_u.l4_hdr.dst_port =
918                                                 tcp_spec->hdr.dst_port;
919                                         list[t].m_u.l4_hdr.dst_port =
920                                                 tcp_mask->hdr.dst_port;
921                                         input_set_byte += 2;
922                                 }
923                                 t++;
924                         }
925                         break;
926
927                 case RTE_FLOW_ITEM_TYPE_SCTP:
928                         sctp_spec = item->spec;
929                         sctp_mask = item->mask;
930                         if (sctp_spec && sctp_mask) {
931                                 /* Check SCTP mask and update input set */
932                                 if (sctp_mask->hdr.cksum) {
933                                         rte_flow_error_set(error, EINVAL,
934                                            RTE_FLOW_ERROR_TYPE_ITEM,
935                                            item,
936                                            "Invalid SCTP mask");
937                                         return 0;
938                                 }
939
940                                 if (tunnel_valid) {
941                                         if (sctp_mask->hdr.src_port)
942                                                 input_set |=
943                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
944                                         if (sctp_mask->hdr.dst_port)
945                                                 input_set |=
946                                                 ICE_INSET_TUN_SCTP_DST_PORT;
947                                 } else {
948                                         if (sctp_mask->hdr.src_port)
949                                                 input_set |=
950                                                 ICE_INSET_SCTP_SRC_PORT;
951                                         if (sctp_mask->hdr.dst_port)
952                                                 input_set |=
953                                                 ICE_INSET_SCTP_DST_PORT;
954                                 }
955                                 list[t].type = ICE_SCTP_IL;
956                                 if (sctp_mask->hdr.src_port) {
957                                         list[t].h_u.sctp_hdr.src_port =
958                                                 sctp_spec->hdr.src_port;
959                                         list[t].m_u.sctp_hdr.src_port =
960                                                 sctp_mask->hdr.src_port;
961                                         input_set_byte += 2;
962                                 }
963                                 if (sctp_mask->hdr.dst_port) {
964                                         list[t].h_u.sctp_hdr.dst_port =
965                                                 sctp_spec->hdr.dst_port;
966                                         list[t].m_u.sctp_hdr.dst_port =
967                                                 sctp_mask->hdr.dst_port;
968                                         input_set_byte += 2;
969                                 }
970                                 t++;
971                         }
972                         break;
973
974                 case RTE_FLOW_ITEM_TYPE_VXLAN:
975                         vxlan_spec = item->spec;
976                         vxlan_mask = item->mask;
977                         /* Check if VXLAN item is used to describe protocol.
978                          * If yes, both spec and mask should be NULL.
979                          * If no, both spec and mask shouldn't be NULL.
980                          */
981                         if ((!vxlan_spec && vxlan_mask) ||
982                             (vxlan_spec && !vxlan_mask)) {
983                                 rte_flow_error_set(error, EINVAL,
984                                            RTE_FLOW_ERROR_TYPE_ITEM,
985                                            item,
986                                            "Invalid VXLAN item");
987                                 return 0;
988                         }
989                         vxlan_valid = 1;
990                         tunnel_valid = 1;
991                         if (vxlan_spec && vxlan_mask) {
992                                 list[t].type = ICE_VXLAN;
993                                 if (vxlan_mask->vni[0] ||
994                                         vxlan_mask->vni[1] ||
995                                         vxlan_mask->vni[2]) {
996                                         list[t].h_u.tnl_hdr.vni =
997                                                 (vxlan_spec->vni[2] << 16) |
998                                                 (vxlan_spec->vni[1] << 8) |
999                                                 vxlan_spec->vni[0];
1000                                         list[t].m_u.tnl_hdr.vni =
1001                                                 (vxlan_mask->vni[2] << 16) |
1002                                                 (vxlan_mask->vni[1] << 8) |
1003                                                 vxlan_mask->vni[0];
1004                                         input_set |=
1005                                                 ICE_INSET_TUN_VXLAN_VNI;
1006                                         input_set_byte += 2;
1007                                 }
1008                                 t++;
1009                         }
1010                         break;
1011
1012                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1013                         nvgre_spec = item->spec;
1014                         nvgre_mask = item->mask;
1015                         /* Check if NVGRE item is used to describe protocol.
1016                          * If yes, both spec and mask should be NULL.
1017                          * If no, both spec and mask shouldn't be NULL.
1018                          */
1019                         if ((!nvgre_spec && nvgre_mask) ||
1020                             (nvgre_spec && !nvgre_mask)) {
1021                                 rte_flow_error_set(error, EINVAL,
1022                                            RTE_FLOW_ERROR_TYPE_ITEM,
1023                                            item,
1024                                            "Invalid NVGRE item");
1025                                 return 0;
1026                         }
1027                         nvgre_valid = 1;
1028                         tunnel_valid = 1;
1029                         if (nvgre_spec && nvgre_mask) {
1030                                 list[t].type = ICE_NVGRE;
1031                                 if (nvgre_mask->tni[0] ||
1032                                         nvgre_mask->tni[1] ||
1033                                         nvgre_mask->tni[2]) {
1034                                         list[t].h_u.nvgre_hdr.tni_flow =
1035                                                 (nvgre_spec->tni[2] << 16) |
1036                                                 (nvgre_spec->tni[1] << 8) |
1037                                                 nvgre_spec->tni[0];
1038                                         list[t].m_u.nvgre_hdr.tni_flow =
1039                                                 (nvgre_mask->tni[2] << 16) |
1040                                                 (nvgre_mask->tni[1] << 8) |
1041                                                 nvgre_mask->tni[0];
1042                                         input_set |=
1043                                                 ICE_INSET_TUN_NVGRE_TNI;
1044                                         input_set_byte += 2;
1045                                 }
1046                                 t++;
1047                         }
1048                         break;
1049
1050                 case RTE_FLOW_ITEM_TYPE_VLAN:
1051                         vlan_spec = item->spec;
1052                         vlan_mask = item->mask;
1053                         /* Check if VLAN item is used to describe protocol.
1054                          * If yes, both spec and mask should be NULL.
1055                          * If no, both spec and mask shouldn't be NULL.
1056                          */
1057                         if ((!vlan_spec && vlan_mask) ||
1058                             (vlan_spec && !vlan_mask)) {
1059                                 rte_flow_error_set(error, EINVAL,
1060                                            RTE_FLOW_ERROR_TYPE_ITEM,
1061                                            item,
1062                                            "Invalid VLAN item");
1063                                 return 0;
1064                         }
1065                         if (vlan_spec && vlan_mask) {
1066                                 list[t].type = ICE_VLAN_OFOS;
1067                                 if (vlan_mask->tci) {
1068                                         list[t].h_u.vlan_hdr.vlan =
1069                                                 vlan_spec->tci;
1070                                         list[t].m_u.vlan_hdr.vlan =
1071                                                 vlan_mask->tci;
1072                                         input_set |= ICE_INSET_VLAN_OUTER;
1073                                         input_set_byte += 2;
1074                                 }
1075                                 if (vlan_mask->inner_type) {
1076                                         list[t].h_u.vlan_hdr.type =
1077                                                 vlan_spec->inner_type;
1078                                         list[t].m_u.vlan_hdr.type =
1079                                                 vlan_mask->inner_type;
1080                                         input_set |= ICE_INSET_ETHERTYPE;
1081                                         input_set_byte += 2;
1082                                 }
1083                                 t++;
1084                         }
1085                         break;
1086
1087                 case RTE_FLOW_ITEM_TYPE_PPPOED:
1088                 case RTE_FLOW_ITEM_TYPE_PPPOES:
1089                         pppoe_spec = item->spec;
1090                         pppoe_mask = item->mask;
1091                         /* Check if PPPoE item is used to describe protocol.
1092                          * If yes, both spec and mask should be NULL.
1093                          * If no, both spec and mask shouldn't be NULL.
1094                          */
1095                         if ((!pppoe_spec && pppoe_mask) ||
1096                                 (pppoe_spec && !pppoe_mask)) {
1097                                 rte_flow_error_set(error, EINVAL,
1098                                         RTE_FLOW_ERROR_TYPE_ITEM,
1099                                         item,
1100                                         "Invalid pppoe item");
1101                                 return 0;
1102                         }
1103                         pppoe_patt_valid = 1;
1104                         if (pppoe_spec && pppoe_mask) {
1105                                 /* Check pppoe mask and update input set */
1106                                 if (pppoe_mask->length ||
1107                                         pppoe_mask->code ||
1108                                         pppoe_mask->version_type) {
1109                                         rte_flow_error_set(error, EINVAL,
1110                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1111                                                 item,
1112                                                 "Invalid pppoe mask");
1113                                         return 0;
1114                                 }
1115                                 list[t].type = ICE_PPPOE;
1116                                 if (pppoe_mask->session_id) {
1117                                         list[t].h_u.pppoe_hdr.session_id =
1118                                                 pppoe_spec->session_id;
1119                                         list[t].m_u.pppoe_hdr.session_id =
1120                                                 pppoe_mask->session_id;
1121                                         input_set |= ICE_INSET_PPPOE_SESSION;
1122                                         input_set_byte += 2;
1123                                 }
1124                                 t++;
1125                                 pppoe_elem_valid = 1;
1126                         }
1127                         break;
1128
1129                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1130                         pppoe_proto_spec = item->spec;
1131                         pppoe_proto_mask = item->mask;
1132                         /* Check if PPPoE optional proto_id item
1133                          * is used to describe protocol.
1134                          * If yes, both spec and mask should be NULL.
1135                          * If no, both spec and mask shouldn't be NULL.
1136                          */
1137                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1138                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
1139                                 rte_flow_error_set(error, EINVAL,
1140                                         RTE_FLOW_ERROR_TYPE_ITEM,
1141                                         item,
1142                                         "Invalid pppoe proto item");
1143                                 return 0;
1144                         }
1145                         if (pppoe_proto_spec && pppoe_proto_mask) {
1146                                 if (pppoe_elem_valid)
1147                                         t--;
1148                                 list[t].type = ICE_PPPOE;
1149                                 if (pppoe_proto_mask->proto_id) {
1150                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
1151                                                 pppoe_proto_spec->proto_id;
1152                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
1153                                                 pppoe_proto_mask->proto_id;
1154                                         input_set |= ICE_INSET_PPPOE_PROTO;
1155                                         input_set_byte += 2;
1156                                         pppoe_prot_valid = 1;
1157                                 }
1158                                 if ((pppoe_proto_mask->proto_id &
1159                                         pppoe_proto_spec->proto_id) !=
1160                                             CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1161                                         (pppoe_proto_mask->proto_id &
1162                                         pppoe_proto_spec->proto_id) !=
1163                                             CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1164                                         *tun_type = ICE_SW_TUN_PPPOE_PAY;
1165                                 else
1166                                         *tun_type = ICE_SW_TUN_PPPOE;
1167                                 t++;
1168                         }
1169
1170                         break;
1171
1172                 case RTE_FLOW_ITEM_TYPE_ESP:
1173                         esp_spec = item->spec;
1174                         esp_mask = item->mask;
1175                         if ((esp_spec && !esp_mask) ||
1176                                 (!esp_spec && esp_mask)) {
1177                                 rte_flow_error_set(error, EINVAL,
1178                                            RTE_FLOW_ERROR_TYPE_ITEM,
1179                                            item,
1180                                            "Invalid esp item");
1181                                 return 0;
1182                         }
1183                         /* Check esp mask and update input set */
1184                         if (esp_mask && esp_mask->hdr.seq) {
1185                                 rte_flow_error_set(error, EINVAL,
1186                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1187                                                 item,
1188                                                 "Invalid esp mask");
1189                                 return 0;
1190                         }
1191
1192                         if (!esp_spec && !esp_mask && !input_set) {
1193                                 profile_rule = 1;
1194                                 if (ipv6_valid && udp_valid)
1195                                         *tun_type =
1196                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1197                                 else if (ipv6_valid)
1198                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1199                                 else if (ipv4_valid)
1200                                         return 0;
1201                         } else if (esp_spec && esp_mask &&
1202                                                 esp_mask->hdr.spi){
1203                                 if (udp_valid)
1204                                         list[t].type = ICE_NAT_T;
1205                                 else
1206                                         list[t].type = ICE_ESP;
1207                                 list[t].h_u.esp_hdr.spi =
1208                                         esp_spec->hdr.spi;
1209                                 list[t].m_u.esp_hdr.spi =
1210                                         esp_mask->hdr.spi;
1211                                 input_set |= ICE_INSET_ESP_SPI;
1212                                 input_set_byte += 4;
1213                                 t++;
1214                         }
1215
1216                         if (!profile_rule) {
1217                                 if (ipv6_valid && udp_valid)
1218                                         *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1219                                 else if (ipv4_valid && udp_valid)
1220                                         *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1221                                 else if (ipv6_valid)
1222                                         *tun_type = ICE_SW_TUN_IPV6_ESP;
1223                                 else if (ipv4_valid)
1224                                         *tun_type = ICE_SW_TUN_IPV4_ESP;
1225                         }
1226                         break;
1227
1228                 case RTE_FLOW_ITEM_TYPE_AH:
1229                         ah_spec = item->spec;
1230                         ah_mask = item->mask;
1231                         if ((ah_spec && !ah_mask) ||
1232                                 (!ah_spec && ah_mask)) {
1233                                 rte_flow_error_set(error, EINVAL,
1234                                            RTE_FLOW_ERROR_TYPE_ITEM,
1235                                            item,
1236                                            "Invalid ah item");
1237                                 return 0;
1238                         }
1239                         /* Check ah mask and update input set */
1240                         if (ah_mask &&
1241                                 (ah_mask->next_hdr ||
1242                                 ah_mask->payload_len ||
1243                                 ah_mask->seq_num ||
1244                                 ah_mask->reserved)) {
1245                                 rte_flow_error_set(error, EINVAL,
1246                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1247                                                 item,
1248                                                 "Invalid ah mask");
1249                                 return 0;
1250                         }
1251
1252                         if (!ah_spec && !ah_mask && !input_set) {
1253                                 profile_rule = 1;
1254                                 if (ipv6_valid && udp_valid)
1255                                         *tun_type =
1256                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1257                                 else if (ipv6_valid)
1258                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1259                                 else if (ipv4_valid)
1260                                         return 0;
1261                         } else if (ah_spec && ah_mask &&
1262                                                 ah_mask->spi){
1263                                 list[t].type = ICE_AH;
1264                                 list[t].h_u.ah_hdr.spi =
1265                                         ah_spec->spi;
1266                                 list[t].m_u.ah_hdr.spi =
1267                                         ah_mask->spi;
1268                                 input_set |= ICE_INSET_AH_SPI;
1269                                 input_set_byte += 4;
1270                                 t++;
1271                         }
1272
1273                         if (!profile_rule) {
1274                                 if (udp_valid)
1275                                         return 0;
1276                                 else if (ipv6_valid)
1277                                         *tun_type = ICE_SW_TUN_IPV6_AH;
1278                                 else if (ipv4_valid)
1279                                         *tun_type = ICE_SW_TUN_IPV4_AH;
1280                         }
1281                         break;
1282
1283                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1284                         l2tp_spec = item->spec;
1285                         l2tp_mask = item->mask;
1286                         if ((l2tp_spec && !l2tp_mask) ||
1287                                 (!l2tp_spec && l2tp_mask)) {
1288                                 rte_flow_error_set(error, EINVAL,
1289                                            RTE_FLOW_ERROR_TYPE_ITEM,
1290                                            item,
1291                                            "Invalid l2tp item");
1292                                 return 0;
1293                         }
1294
1295                         if (!l2tp_spec && !l2tp_mask && !input_set) {
1296                                 if (ipv6_valid)
1297                                         *tun_type =
1298                                         ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1299                                 else if (ipv4_valid)
1300                                         return 0;
1301                         } else if (l2tp_spec && l2tp_mask &&
1302                                                 l2tp_mask->session_id){
1303                                 list[t].type = ICE_L2TPV3;
1304                                 list[t].h_u.l2tpv3_sess_hdr.session_id =
1305                                         l2tp_spec->session_id;
1306                                 list[t].m_u.l2tpv3_sess_hdr.session_id =
1307                                         l2tp_mask->session_id;
1308                                 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1309                                 input_set_byte += 4;
1310                                 t++;
1311                         }
1312
1313                         if (!profile_rule) {
1314                                 if (ipv6_valid)
1315                                         *tun_type =
1316                                         ICE_SW_TUN_IPV6_L2TPV3;
1317                                 else if (ipv4_valid)
1318                                         *tun_type =
1319                                         ICE_SW_TUN_IPV4_L2TPV3;
1320                         }
1321                         break;
1322
1323                 case RTE_FLOW_ITEM_TYPE_PFCP:
1324                         pfcp_spec = item->spec;
1325                         pfcp_mask = item->mask;
1326                         /* Check if PFCP item is used to describe protocol.
1327                          * If yes, both spec and mask should be NULL.
1328                          * If no, both spec and mask shouldn't be NULL.
1329                          */
1330                         if ((!pfcp_spec && pfcp_mask) ||
1331                             (pfcp_spec && !pfcp_mask)) {
1332                                 rte_flow_error_set(error, EINVAL,
1333                                            RTE_FLOW_ERROR_TYPE_ITEM,
1334                                            item,
1335                                            "Invalid PFCP item");
1336                                 return -ENOTSUP;
1337                         }
1338                         if (pfcp_spec && pfcp_mask) {
1339                                 /* Check pfcp mask and update input set */
1340                                 if (pfcp_mask->msg_type ||
1341                                         pfcp_mask->msg_len ||
1342                                         pfcp_mask->seid) {
1343                                         rte_flow_error_set(error, EINVAL,
1344                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1345                                                 item,
1346                                                 "Invalid pfcp mask");
1347                                         return -ENOTSUP;
1348                                 }
1349                                 if (pfcp_mask->s_field &&
1350                                         pfcp_spec->s_field == 0x01 &&
1351                                         ipv6_valid)
1352                                         *tun_type =
1353                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1354                                 else if (pfcp_mask->s_field &&
1355                                         pfcp_spec->s_field == 0x01)
1356                                         *tun_type =
1357                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1358                                 else if (pfcp_mask->s_field &&
1359                                         !pfcp_spec->s_field &&
1360                                         ipv6_valid)
1361                                         *tun_type =
1362                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1363                                 else if (pfcp_mask->s_field &&
1364                                         !pfcp_spec->s_field)
1365                                         *tun_type =
1366                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1367                                 else
1368                                         return -ENOTSUP;
1369                         }
1370                         break;
1371
1372                 case RTE_FLOW_ITEM_TYPE_VOID:
1373                         break;
1374
1375                 default:
1376                         rte_flow_error_set(error, EINVAL,
1377                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1378                                    "Invalid pattern item.");
1379                         goto out;
1380                 }
1381         }
1382
1383         if (pppoe_patt_valid && !pppoe_prot_valid) {
1384                 if (ipv6_valid && udp_valid)
1385                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1386                 else if (ipv6_valid && tcp_valid)
1387                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1388                 else if (ipv4_valid && udp_valid)
1389                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1390                 else if (ipv4_valid && tcp_valid)
1391                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1392                 else if (ipv6_valid)
1393                         *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1394                 else if (ipv4_valid)
1395                         *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1396                 else
1397                         *tun_type = ICE_SW_TUN_PPPOE;
1398         }
1399
1400         if (*tun_type == ICE_NON_TUN) {
1401                 if (vxlan_valid)
1402                         *tun_type = ICE_SW_TUN_VXLAN;
1403                 else if (nvgre_valid)
1404                         *tun_type = ICE_SW_TUN_NVGRE;
1405                 else if (ipv4_valid && tcp_valid)
1406                         *tun_type = ICE_SW_IPV4_TCP;
1407                 else if (ipv4_valid && udp_valid)
1408                         *tun_type = ICE_SW_IPV4_UDP;
1409                 else if (ipv6_valid && tcp_valid)
1410                         *tun_type = ICE_SW_IPV6_TCP;
1411                 else if (ipv6_valid && udp_valid)
1412                         *tun_type = ICE_SW_IPV6_UDP;
1413         }
1414
1415         if (input_set_byte > MAX_INPUT_SET_BYTE) {
1416                 rte_flow_error_set(error, EINVAL,
1417                         RTE_FLOW_ERROR_TYPE_ITEM,
1418                         item,
1419                         "too much input set");
1420                 return -ENOTSUP;
1421         }
1422
1423         *lkups_num = t;
1424
1425         return input_set;
1426 out:
1427         return 0;
1428 }
1429
1430 static int
1431 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1432                             const struct rte_flow_action *actions,
1433                             struct rte_flow_error *error,
1434                             struct ice_adv_rule_info *rule_info)
1435 {
1436         const struct rte_flow_action_vf *act_vf;
1437         const struct rte_flow_action *action;
1438         enum rte_flow_action_type action_type;
1439
1440         for (action = actions; action->type !=
1441                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1442                 action_type = action->type;
1443                 switch (action_type) {
1444                 case RTE_FLOW_ACTION_TYPE_VF:
1445                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1446                         act_vf = action->conf;
1447
1448                         if (act_vf->id >= ad->real_hw.num_vfs &&
1449                                 !act_vf->original) {
1450                                 rte_flow_error_set(error,
1451                                         EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1452                                         actions,
1453                                         "Invalid vf id");
1454                                 return -rte_errno;
1455                         }
1456
1457                         if (act_vf->original)
1458                                 rule_info->sw_act.vsi_handle =
1459                                         ad->real_hw.avf.bus.func;
1460                         else
1461                                 rule_info->sw_act.vsi_handle = act_vf->id;
1462                         break;
1463                 default:
1464                         rte_flow_error_set(error,
1465                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1466                                            actions,
1467                                            "Invalid action type");
1468                         return -rte_errno;
1469                 }
1470         }
1471
1472         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1473         rule_info->sw_act.flag = ICE_FLTR_RX;
1474         rule_info->rx = 1;
1475         rule_info->priority = 5;
1476
1477         return 0;
1478 }
1479
1480 static int
1481 ice_switch_parse_action(struct ice_pf *pf,
1482                 const struct rte_flow_action *actions,
1483                 struct rte_flow_error *error,
1484                 struct ice_adv_rule_info *rule_info)
1485 {
1486         struct ice_vsi *vsi = pf->main_vsi;
1487         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1488         const struct rte_flow_action_queue *act_q;
1489         const struct rte_flow_action_rss *act_qgrop;
1490         uint16_t base_queue, i;
1491         const struct rte_flow_action *action;
1492         enum rte_flow_action_type action_type;
1493         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1494                  2, 4, 8, 16, 32, 64, 128};
1495
1496         base_queue = pf->base_queue + vsi->base_queue;
1497         for (action = actions; action->type !=
1498                         RTE_FLOW_ACTION_TYPE_END; action++) {
1499                 action_type = action->type;
1500                 switch (action_type) {
1501                 case RTE_FLOW_ACTION_TYPE_RSS:
1502                         act_qgrop = action->conf;
1503                         if (act_qgrop->queue_num <= 1)
1504                                 goto error;
1505                         rule_info->sw_act.fltr_act =
1506                                 ICE_FWD_TO_QGRP;
1507                         rule_info->sw_act.fwd_id.q_id =
1508                                 base_queue + act_qgrop->queue[0];
1509                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1510                                 if (act_qgrop->queue_num ==
1511                                         valid_qgrop_number[i])
1512                                         break;
1513                         }
1514                         if (i == MAX_QGRP_NUM_TYPE)
1515                                 goto error;
1516                         if ((act_qgrop->queue[0] +
1517                                 act_qgrop->queue_num) >
1518                                 dev->data->nb_rx_queues)
1519                                 goto error1;
1520                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1521                                 if (act_qgrop->queue[i + 1] !=
1522                                         act_qgrop->queue[i] + 1)
1523                                         goto error2;
1524                         rule_info->sw_act.qgrp_size =
1525                                 act_qgrop->queue_num;
1526                         break;
1527                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1528                         act_q = action->conf;
1529                         if (act_q->index >= dev->data->nb_rx_queues)
1530                                 goto error;
1531                         rule_info->sw_act.fltr_act =
1532                                 ICE_FWD_TO_Q;
1533                         rule_info->sw_act.fwd_id.q_id =
1534                                 base_queue + act_q->index;
1535                         break;
1536
1537                 case RTE_FLOW_ACTION_TYPE_DROP:
1538                         rule_info->sw_act.fltr_act =
1539                                 ICE_DROP_PACKET;
1540                         break;
1541
1542                 case RTE_FLOW_ACTION_TYPE_VOID:
1543                         break;
1544
1545                 default:
1546                         goto error;
1547                 }
1548         }
1549
1550         rule_info->sw_act.vsi_handle = vsi->idx;
1551         rule_info->rx = 1;
1552         rule_info->sw_act.src = vsi->idx;
1553         rule_info->priority = 5;
1554
1555         return 0;
1556
1557 error:
1558         rte_flow_error_set(error,
1559                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1560                 actions,
1561                 "Invalid action type or queue number");
1562         return -rte_errno;
1563
1564 error1:
1565         rte_flow_error_set(error,
1566                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1567                 actions,
1568                 "Invalid queue region indexes");
1569         return -rte_errno;
1570
1571 error2:
1572         rte_flow_error_set(error,
1573                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1574                 actions,
1575                 "Discontinuous queue region");
1576         return -rte_errno;
1577 }
1578
1579 static int
1580 ice_switch_check_action(const struct rte_flow_action *actions,
1581                             struct rte_flow_error *error)
1582 {
1583         const struct rte_flow_action *action;
1584         enum rte_flow_action_type action_type;
1585         uint16_t actions_num = 0;
1586
1587         for (action = actions; action->type !=
1588                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1589                 action_type = action->type;
1590                 switch (action_type) {
1591                 case RTE_FLOW_ACTION_TYPE_VF:
1592                 case RTE_FLOW_ACTION_TYPE_RSS:
1593                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1594                 case RTE_FLOW_ACTION_TYPE_DROP:
1595                         actions_num++;
1596                         break;
1597                 case RTE_FLOW_ACTION_TYPE_VOID:
1598                         continue;
1599                 default:
1600                         rte_flow_error_set(error,
1601                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1602                                            actions,
1603                                            "Invalid action type");
1604                         return -rte_errno;
1605                 }
1606         }
1607
1608         if (actions_num != 1) {
1609                 rte_flow_error_set(error,
1610                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1611                                    actions,
1612                                    "Invalid action number");
1613                 return -rte_errno;
1614         }
1615
1616         return 0;
1617 }
1618
1619 static bool
1620 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1621 {
1622         switch (tun_type) {
1623         case ICE_SW_TUN_PROFID_IPV6_ESP:
1624         case ICE_SW_TUN_PROFID_IPV6_AH:
1625         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1626         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1627         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1628         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1629         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1630         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1631                 return true;
1632         default:
1633                 break;
1634         }
1635
1636         return false;
1637 }
1638
1639 static int
1640 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1641                 struct ice_pattern_match_item *array,
1642                 uint32_t array_len,
1643                 const struct rte_flow_item pattern[],
1644                 const struct rte_flow_action actions[],
1645                 void **meta,
1646                 struct rte_flow_error *error)
1647 {
1648         struct ice_pf *pf = &ad->pf;
1649         uint64_t inputset = 0;
1650         int ret = 0;
1651         struct sw_meta *sw_meta_ptr = NULL;
1652         struct ice_adv_rule_info rule_info;
1653         struct ice_adv_lkup_elem *list = NULL;
1654         uint16_t lkups_num = 0;
1655         const struct rte_flow_item *item = pattern;
1656         uint16_t item_num = 0;
1657         enum ice_sw_tunnel_type tun_type =
1658                         ICE_NON_TUN;
1659         struct ice_pattern_match_item *pattern_match_item = NULL;
1660
1661         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1662                 item_num++;
1663                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1664                         const struct rte_flow_item_eth *eth_mask;
1665                         if (item->mask)
1666                                 eth_mask = item->mask;
1667                         else
1668                                 continue;
1669                         if (eth_mask->type == UINT16_MAX)
1670                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1671                 }
1672                 /* reserve one more memory slot for ETH which may
1673                  * consume 2 lookup items.
1674                  */
1675                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1676                         item_num++;
1677         }
1678
1679         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1680         if (!list) {
1681                 rte_flow_error_set(error, EINVAL,
1682                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1683                                    "No memory for PMD internal items");
1684                 return -rte_errno;
1685         }
1686
1687         sw_meta_ptr =
1688                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1689         if (!sw_meta_ptr) {
1690                 rte_flow_error_set(error, EINVAL,
1691                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1692                                    "No memory for sw_pattern_meta_ptr");
1693                 goto error;
1694         }
1695
1696         pattern_match_item =
1697                 ice_search_pattern_match_item(pattern, array, array_len, error);
1698         if (!pattern_match_item) {
1699                 rte_flow_error_set(error, EINVAL,
1700                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1701                                    "Invalid input pattern");
1702                 goto error;
1703         }
1704
1705         inputset = ice_switch_inset_get
1706                 (pattern, error, list, &lkups_num, &tun_type);
1707         if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1708                 (inputset & ~pattern_match_item->input_set_mask)) {
1709                 rte_flow_error_set(error, EINVAL,
1710                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1711                                    pattern,
1712                                    "Invalid input set");
1713                 goto error;
1714         }
1715
1716         memset(&rule_info, 0, sizeof(rule_info));
1717         rule_info.tun_type = tun_type;
1718
1719         ret = ice_switch_check_action(actions, error);
1720         if (ret)
1721                 goto error;
1722
1723         if (ad->hw.dcf_enabled)
1724                 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1725                                                   &rule_info);
1726         else
1727                 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1728
1729         if (ret)
1730                 goto error;
1731
1732         if (meta) {
1733                 *meta = sw_meta_ptr;
1734                 ((struct sw_meta *)*meta)->list = list;
1735                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1736                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1737         } else {
1738                 rte_free(list);
1739                 rte_free(sw_meta_ptr);
1740         }
1741
1742         rte_free(pattern_match_item);
1743
1744         return 0;
1745
1746 error:
1747         rte_free(list);
1748         rte_free(sw_meta_ptr);
1749         rte_free(pattern_match_item);
1750
1751         return -rte_errno;
1752 }
1753
1754 static int
1755 ice_switch_query(struct ice_adapter *ad __rte_unused,
1756                 struct rte_flow *flow __rte_unused,
1757                 struct rte_flow_query_count *count __rte_unused,
1758                 struct rte_flow_error *error)
1759 {
1760         rte_flow_error_set(error, EINVAL,
1761                 RTE_FLOW_ERROR_TYPE_HANDLE,
1762                 NULL,
1763                 "count action not supported by switch filter");
1764
1765         return -rte_errno;
1766 }
1767
1768 static int
1769 ice_switch_redirect(struct ice_adapter *ad,
1770                     struct rte_flow *flow,
1771                     struct ice_flow_redirect *rd)
1772 {
1773         struct ice_rule_query_data *rdata = flow->rule;
1774         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1775         struct ice_adv_lkup_elem *lkups_dp = NULL;
1776         struct LIST_HEAD_TYPE *list_head;
1777         struct ice_adv_rule_info rinfo;
1778         struct ice_hw *hw = &ad->hw;
1779         struct ice_switch_info *sw;
1780         uint16_t lkups_cnt;
1781         int ret;
1782
1783         if (rdata->vsi_handle != rd->vsi_handle)
1784                 return 0;
1785
1786         sw = hw->switch_info;
1787         if (!sw->recp_list[rdata->rid].recp_created)
1788                 return -EINVAL;
1789
1790         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1791                 return -ENOTSUP;
1792
1793         list_head = &sw->recp_list[rdata->rid].filt_rules;
1794         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1795                             list_entry) {
1796                 rinfo = list_itr->rule_info;
1797                 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1798                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1799                     rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1800                     (rinfo.fltr_rule_id == rdata->rule_id &&
1801                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1802                         lkups_cnt = list_itr->lkups_cnt;
1803                         lkups_dp = (struct ice_adv_lkup_elem *)
1804                                 ice_memdup(hw, list_itr->lkups,
1805                                            sizeof(*list_itr->lkups) *
1806                                            lkups_cnt, ICE_NONDMA_TO_NONDMA);
1807
1808                         if (!lkups_dp) {
1809                                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1810                                 return -EINVAL;
1811                         }
1812
1813                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1814                                 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1815                                 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1816                         }
1817                         break;
1818                 }
1819         }
1820
1821         if (!lkups_dp)
1822                 return -EINVAL;
1823
1824         /* Remove the old rule */
1825         ret = ice_rem_adv_rule(hw, list_itr->lkups,
1826                                lkups_cnt, &rinfo);
1827         if (ret) {
1828                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1829                             rdata->rule_id);
1830                 ret = -EINVAL;
1831                 goto out;
1832         }
1833
1834         /* Update VSI context */
1835         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1836
1837         /* Replay the rule */
1838         ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1839                                &rinfo, rdata);
1840         if (ret) {
1841                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1842                 ret = -EINVAL;
1843         }
1844
1845 out:
1846         ice_free(hw, lkups_dp);
1847         return ret;
1848 }
1849
1850 static int
1851 ice_switch_init(struct ice_adapter *ad)
1852 {
1853         int ret = 0;
1854         struct ice_flow_parser *dist_parser;
1855         struct ice_flow_parser *perm_parser;
1856
1857         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1858                 dist_parser = &ice_switch_dist_parser_comms;
1859         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1860                 dist_parser = &ice_switch_dist_parser_os;
1861         else
1862                 return -EINVAL;
1863
1864         if (ad->devargs.pipe_mode_support) {
1865                 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1866                         perm_parser = &ice_switch_perm_parser_comms;
1867                 else
1868                         perm_parser = &ice_switch_perm_parser_os;
1869
1870                 ret = ice_register_parser(perm_parser, ad);
1871         } else {
1872                 ret = ice_register_parser(dist_parser, ad);
1873         }
1874         return ret;
1875 }
1876
1877 static void
1878 ice_switch_uninit(struct ice_adapter *ad)
1879 {
1880         struct ice_flow_parser *dist_parser;
1881         struct ice_flow_parser *perm_parser;
1882
1883         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1884                 dist_parser = &ice_switch_dist_parser_comms;
1885         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1886                 dist_parser = &ice_switch_dist_parser_os;
1887         else
1888                 return;
1889
1890         if (ad->devargs.pipe_mode_support) {
1891                 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1892                         perm_parser = &ice_switch_perm_parser_comms;
1893                 else
1894                         perm_parser = &ice_switch_perm_parser_os;
1895
1896                 ice_unregister_parser(perm_parser, ad);
1897         } else {
1898                 ice_unregister_parser(dist_parser, ad);
1899         }
1900 }
1901
1902 static struct
1903 ice_flow_engine ice_switch_engine = {
1904         .init = ice_switch_init,
1905         .uninit = ice_switch_uninit,
1906         .create = ice_switch_create,
1907         .destroy = ice_switch_destroy,
1908         .query_count = ice_switch_query,
1909         .redirect = ice_switch_redirect,
1910         .free = ice_switch_filter_rule_free,
1911         .type = ICE_FLOW_ENGINE_SWITCH,
1912 };
1913
1914 static struct
1915 ice_flow_parser ice_switch_dist_parser_os = {
1916         .engine = &ice_switch_engine,
1917         .array = ice_switch_pattern_dist_os,
1918         .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1919         .parse_pattern_action = ice_switch_parse_pattern_action,
1920         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1921 };
1922
1923 static struct
1924 ice_flow_parser ice_switch_dist_parser_comms = {
1925         .engine = &ice_switch_engine,
1926         .array = ice_switch_pattern_dist_comms,
1927         .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1928         .parse_pattern_action = ice_switch_parse_pattern_action,
1929         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1930 };
1931
1932 static struct
1933 ice_flow_parser ice_switch_perm_parser_os = {
1934         .engine = &ice_switch_engine,
1935         .array = ice_switch_pattern_perm_os,
1936         .array_len = RTE_DIM(ice_switch_pattern_perm_os),
1937         .parse_pattern_action = ice_switch_parse_pattern_action,
1938         .stage = ICE_FLOW_STAGE_PERMISSION,
1939 };
1940
1941 static struct
1942 ice_flow_parser ice_switch_perm_parser_comms = {
1943         .engine = &ice_switch_engine,
1944         .array = ice_switch_pattern_perm_comms,
1945         .array_len = RTE_DIM(ice_switch_pattern_perm_comms),
1946         .parse_pattern_action = ice_switch_parse_pattern_action,
1947         .stage = ICE_FLOW_STAGE_PERMISSION,
1948 };
1949
1950 RTE_INIT(ice_sw_engine_init)
1951 {
1952         struct ice_flow_engine *engine = &ice_switch_engine;
1953         ice_register_flow_engine(engine);
1954 }