net/ice: refactor packet type parsing
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27
28
29 #define MAX_QGRP_NUM_TYPE       7
30 #define MAX_INPUT_SET_BYTE      32
31 #define ICE_PPP_IPV4_PROTO      0x0021
32 #define ICE_PPP_IPV6_PROTO      0x0057
33 #define ICE_IPV4_PROTO_NVGRE    0x002F
34
35 #define ICE_SW_INSET_ETHER ( \
36         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38                 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
39                 ICE_INSET_VLAN_OUTER)
40 #define ICE_SW_INSET_MAC_IPV4 ( \
41         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
42         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
43 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
44         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
46         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
47 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
48         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
49         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
50         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_IPV6 ( \
52         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
53         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
54         ICE_INSET_IPV6_NEXT_HDR)
55 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
56         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
57         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
58         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
59 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
60         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
61         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
62         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
63 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
64         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
65         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
66 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
67         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
68         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
69 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
70         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
71         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
72         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
74         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
76         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
77 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
78         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
79         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
80         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
81 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
82         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
83         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
84         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
85 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
86         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
87         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
88 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
89         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
91         ICE_INSET_TUN_IPV4_TOS)
92 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
93         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
94         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
95         ICE_INSET_TUN_IPV4_TOS)
96 #define ICE_SW_INSET_MAC_PPPOE  ( \
97         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
98         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
99 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
100         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
101         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
102         ICE_INSET_PPPOE_PROTO)
103 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
104         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
105 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
106         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
107 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
108         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
109 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
110         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
111 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
112         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
113 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
114         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
115 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
116         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
117 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
118         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
119 #define ICE_SW_INSET_MAC_IPV4_AH ( \
120         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
121 #define ICE_SW_INSET_MAC_IPV6_AH ( \
122         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
123 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
124         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
125 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
126         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
127 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
128         ICE_SW_INSET_MAC_IPV4 | \
129         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
130 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
131         ICE_SW_INSET_MAC_IPV6 | \
132         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
133
134 struct sw_meta {
135         struct ice_adv_lkup_elem *list;
136         uint16_t lkups_num;
137         struct ice_adv_rule_info rule_info;
138 };
139
140 static struct ice_flow_parser ice_switch_dist_parser;
141 static struct ice_flow_parser ice_switch_perm_parser;
142
143 static struct
144 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
145         {pattern_ethertype,
146                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
147         {pattern_ethertype_vlan,
148                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
149         {pattern_eth_arp,
150                         ICE_INSET_NONE, ICE_INSET_NONE},
151         {pattern_eth_ipv4,
152                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
153         {pattern_eth_ipv4_udp,
154                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
155         {pattern_eth_ipv4_tcp,
156                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
157         {pattern_eth_ipv6,
158                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
159         {pattern_eth_ipv6_udp,
160                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
161         {pattern_eth_ipv6_tcp,
162                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
163         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
164                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
165         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
166                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
167         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
168                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
169         {pattern_eth_ipv4_nvgre_eth_ipv4,
170                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
171         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
172                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
173         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
174                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
175         {pattern_eth_pppoes,
176                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
177         {pattern_eth_vlan_pppoes,
178                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
179         {pattern_eth_pppoes_proto,
180                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
181         {pattern_eth_vlan_pppoes_proto,
182                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
183         {pattern_eth_pppoes_ipv4,
184                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
185         {pattern_eth_pppoes_ipv4_tcp,
186                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
187         {pattern_eth_pppoes_ipv4_udp,
188                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
189         {pattern_eth_pppoes_ipv6,
190                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
191         {pattern_eth_pppoes_ipv6_tcp,
192                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
193         {pattern_eth_pppoes_ipv6_udp,
194                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
195         {pattern_eth_vlan_pppoes_ipv4,
196                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
197         {pattern_eth_vlan_pppoes_ipv4_tcp,
198                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
199         {pattern_eth_vlan_pppoes_ipv4_udp,
200                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
201         {pattern_eth_vlan_pppoes_ipv6,
202                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
203         {pattern_eth_vlan_pppoes_ipv6_tcp,
204                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
205         {pattern_eth_vlan_pppoes_ipv6_udp,
206                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
207         {pattern_eth_ipv4_esp,
208                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
209         {pattern_eth_ipv4_udp_esp,
210                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
211         {pattern_eth_ipv6_esp,
212                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
213         {pattern_eth_ipv6_udp_esp,
214                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
215         {pattern_eth_ipv4_ah,
216                         ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
217         {pattern_eth_ipv6_ah,
218                         ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
219         {pattern_eth_ipv6_udp_ah,
220                         ICE_INSET_NONE, ICE_INSET_NONE},
221         {pattern_eth_ipv4_l2tp,
222                         ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
223         {pattern_eth_ipv6_l2tp,
224                         ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
225         {pattern_eth_ipv4_pfcp,
226                         ICE_INSET_NONE, ICE_INSET_NONE},
227         {pattern_eth_ipv6_pfcp,
228                         ICE_INSET_NONE, ICE_INSET_NONE},
229 };
230
231 static struct
232 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
233         {pattern_ethertype,
234                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
235         {pattern_ethertype_vlan,
236                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
237         {pattern_eth_arp,
238                 ICE_INSET_NONE, ICE_INSET_NONE},
239         {pattern_eth_ipv4,
240                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
241         {pattern_eth_ipv4_udp,
242                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
243         {pattern_eth_ipv4_tcp,
244                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
245         {pattern_eth_ipv6,
246                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
247         {pattern_eth_ipv6_udp,
248                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
249         {pattern_eth_ipv6_tcp,
250                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
251         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
252                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
253         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
254                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
255         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
256                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
257         {pattern_eth_ipv4_nvgre_eth_ipv4,
258                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
259         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
260                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
261         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
262                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
263         {pattern_eth_pppoes,
264                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
265         {pattern_eth_vlan_pppoes,
266                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
267         {pattern_eth_pppoes_proto,
268                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
269         {pattern_eth_vlan_pppoes_proto,
270                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
271         {pattern_eth_pppoes_ipv4,
272                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
273         {pattern_eth_pppoes_ipv4_tcp,
274                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
275         {pattern_eth_pppoes_ipv4_udp,
276                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
277         {pattern_eth_pppoes_ipv6,
278                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
279         {pattern_eth_pppoes_ipv6_tcp,
280                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
281         {pattern_eth_pppoes_ipv6_udp,
282                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
283         {pattern_eth_vlan_pppoes_ipv4,
284                         ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
285         {pattern_eth_vlan_pppoes_ipv4_tcp,
286                         ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE},
287         {pattern_eth_vlan_pppoes_ipv4_udp,
288                         ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE},
289         {pattern_eth_vlan_pppoes_ipv6,
290                         ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
291         {pattern_eth_vlan_pppoes_ipv6_tcp,
292                         ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE},
293         {pattern_eth_vlan_pppoes_ipv6_udp,
294                         ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE},
295         {pattern_eth_ipv4_esp,
296                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
297         {pattern_eth_ipv4_udp_esp,
298                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
299         {pattern_eth_ipv6_esp,
300                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
301         {pattern_eth_ipv6_udp_esp,
302                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
303         {pattern_eth_ipv4_ah,
304                         ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
305         {pattern_eth_ipv6_ah,
306                         ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
307         {pattern_eth_ipv6_udp_ah,
308                         ICE_INSET_NONE, ICE_INSET_NONE},
309         {pattern_eth_ipv4_l2tp,
310                         ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
311         {pattern_eth_ipv6_l2tp,
312                         ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
313         {pattern_eth_ipv4_pfcp,
314                         ICE_INSET_NONE, ICE_INSET_NONE},
315         {pattern_eth_ipv6_pfcp,
316                         ICE_INSET_NONE, ICE_INSET_NONE},
317 };
318
319 static int
320 ice_switch_create(struct ice_adapter *ad,
321                 struct rte_flow *flow,
322                 void *meta,
323                 struct rte_flow_error *error)
324 {
325         int ret = 0;
326         struct ice_pf *pf = &ad->pf;
327         struct ice_hw *hw = ICE_PF_TO_HW(pf);
328         struct ice_rule_query_data rule_added = {0};
329         struct ice_rule_query_data *filter_ptr;
330         struct ice_adv_lkup_elem *list =
331                 ((struct sw_meta *)meta)->list;
332         uint16_t lkups_cnt =
333                 ((struct sw_meta *)meta)->lkups_num;
334         struct ice_adv_rule_info *rule_info =
335                 &((struct sw_meta *)meta)->rule_info;
336
337         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
338                 rte_flow_error_set(error, EINVAL,
339                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
340                         "item number too large for rule");
341                 goto error;
342         }
343         if (!list) {
344                 rte_flow_error_set(error, EINVAL,
345                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
346                         "lookup list should not be NULL");
347                 goto error;
348         }
349         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
350         if (!ret) {
351                 filter_ptr = rte_zmalloc("ice_switch_filter",
352                         sizeof(struct ice_rule_query_data), 0);
353                 if (!filter_ptr) {
354                         rte_flow_error_set(error, EINVAL,
355                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
356                                    "No memory for ice_switch_filter");
357                         goto error;
358                 }
359                 flow->rule = filter_ptr;
360                 rte_memcpy(filter_ptr,
361                         &rule_added,
362                         sizeof(struct ice_rule_query_data));
363         } else {
364                 rte_flow_error_set(error, EINVAL,
365                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
366                         "switch filter create flow fail");
367                 goto error;
368         }
369
370         rte_free(list);
371         rte_free(meta);
372         return 0;
373
374 error:
375         rte_free(list);
376         rte_free(meta);
377
378         return -rte_errno;
379 }
380
381 static int
382 ice_switch_destroy(struct ice_adapter *ad,
383                 struct rte_flow *flow,
384                 struct rte_flow_error *error)
385 {
386         struct ice_hw *hw = &ad->hw;
387         int ret;
388         struct ice_rule_query_data *filter_ptr;
389
390         filter_ptr = (struct ice_rule_query_data *)
391                 flow->rule;
392
393         if (!filter_ptr) {
394                 rte_flow_error_set(error, EINVAL,
395                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
396                         "no such flow"
397                         " create by switch filter");
398                 return -rte_errno;
399         }
400
401         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
402         if (ret) {
403                 rte_flow_error_set(error, EINVAL,
404                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
405                         "fail to destroy switch filter rule");
406                 return -rte_errno;
407         }
408
409         rte_free(filter_ptr);
410         return ret;
411 }
412
413 static void
414 ice_switch_filter_rule_free(struct rte_flow *flow)
415 {
416         rte_free(flow->rule);
417 }
418
419 static uint64_t
420 ice_switch_inset_get(const struct rte_flow_item pattern[],
421                 struct rte_flow_error *error,
422                 struct ice_adv_lkup_elem *list,
423                 uint16_t *lkups_num,
424                 enum ice_sw_tunnel_type *tun_type)
425 {
426         const struct rte_flow_item *item = pattern;
427         enum rte_flow_item_type item_type;
428         const struct rte_flow_item_eth *eth_spec, *eth_mask;
429         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
430         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
431         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
432         const struct rte_flow_item_udp *udp_spec, *udp_mask;
433         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
434         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
435         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
436         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
437         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
438         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
439                                 *pppoe_proto_mask;
440         const struct rte_flow_item_esp *esp_spec, *esp_mask;
441         const struct rte_flow_item_ah *ah_spec, *ah_mask;
442         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
443         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
444         uint64_t input_set = ICE_INSET_NONE;
445         uint16_t input_set_byte = 0;
446         bool pppoe_elem_valid = 0;
447         bool pppoe_patt_valid = 0;
448         bool pppoe_prot_valid = 0;
449         bool tunnel_valid = 0;
450         bool profile_rule = 0;
451         bool nvgre_valid = 0;
452         bool vxlan_valid = 0;
453         bool ipv6_valid = 0;
454         bool ipv4_valid = 0;
455         bool udp_valid = 0;
456         bool tcp_valid = 0;
457         uint16_t j, t = 0;
458
459         for (item = pattern; item->type !=
460                         RTE_FLOW_ITEM_TYPE_END; item++) {
461                 if (item->last) {
462                         rte_flow_error_set(error, EINVAL,
463                                         RTE_FLOW_ERROR_TYPE_ITEM,
464                                         item,
465                                         "Not support range");
466                         return 0;
467                 }
468                 item_type = item->type;
469
470                 switch (item_type) {
471                 case RTE_FLOW_ITEM_TYPE_ETH:
472                         eth_spec = item->spec;
473                         eth_mask = item->mask;
474                         if (eth_spec && eth_mask) {
475                                 const uint8_t *a = eth_mask->src.addr_bytes;
476                                 const uint8_t *b = eth_mask->dst.addr_bytes;
477                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
478                                         if (a[j] && tunnel_valid) {
479                                                 input_set |=
480                                                         ICE_INSET_TUN_SMAC;
481                                                 break;
482                                         } else if (a[j]) {
483                                                 input_set |=
484                                                         ICE_INSET_SMAC;
485                                                 break;
486                                         }
487                                 }
488                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
489                                         if (b[j] && tunnel_valid) {
490                                                 input_set |=
491                                                         ICE_INSET_TUN_DMAC;
492                                                 break;
493                                         } else if (b[j]) {
494                                                 input_set |=
495                                                         ICE_INSET_DMAC;
496                                                 break;
497                                         }
498                                 }
499                                 if (eth_mask->type)
500                                         input_set |= ICE_INSET_ETHERTYPE;
501                                 list[t].type = (tunnel_valid  == 0) ?
502                                         ICE_MAC_OFOS : ICE_MAC_IL;
503                                 struct ice_ether_hdr *h;
504                                 struct ice_ether_hdr *m;
505                                 uint16_t i = 0;
506                                 h = &list[t].h_u.eth_hdr;
507                                 m = &list[t].m_u.eth_hdr;
508                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
509                                         if (eth_mask->src.addr_bytes[j]) {
510                                                 h->src_addr[j] =
511                                                 eth_spec->src.addr_bytes[j];
512                                                 m->src_addr[j] =
513                                                 eth_mask->src.addr_bytes[j];
514                                                 i = 1;
515                                                 input_set_byte++;
516                                         }
517                                         if (eth_mask->dst.addr_bytes[j]) {
518                                                 h->dst_addr[j] =
519                                                 eth_spec->dst.addr_bytes[j];
520                                                 m->dst_addr[j] =
521                                                 eth_mask->dst.addr_bytes[j];
522                                                 i = 1;
523                                                 input_set_byte++;
524                                         }
525                                 }
526                                 if (i)
527                                         t++;
528                                 if (eth_mask->type) {
529                                         list[t].type = ICE_ETYPE_OL;
530                                         list[t].h_u.ethertype.ethtype_id =
531                                                 eth_spec->type;
532                                         list[t].m_u.ethertype.ethtype_id =
533                                                 eth_mask->type;
534                                         input_set_byte += 2;
535                                         t++;
536                                 }
537                         }
538                         break;
539
540                 case RTE_FLOW_ITEM_TYPE_IPV4:
541                         ipv4_spec = item->spec;
542                         ipv4_mask = item->mask;
543                         ipv4_valid = 1;
544                         if (ipv4_spec && ipv4_mask) {
545                                 /* Check IPv4 mask and update input set */
546                                 if (ipv4_mask->hdr.version_ihl ||
547                                         ipv4_mask->hdr.total_length ||
548                                         ipv4_mask->hdr.packet_id ||
549                                         ipv4_mask->hdr.hdr_checksum) {
550                                         rte_flow_error_set(error, EINVAL,
551                                                    RTE_FLOW_ERROR_TYPE_ITEM,
552                                                    item,
553                                                    "Invalid IPv4 mask.");
554                                         return 0;
555                                 }
556
557                                 if (tunnel_valid) {
558                                         if (ipv4_mask->hdr.type_of_service)
559                                                 input_set |=
560                                                         ICE_INSET_TUN_IPV4_TOS;
561                                         if (ipv4_mask->hdr.src_addr)
562                                                 input_set |=
563                                                         ICE_INSET_TUN_IPV4_SRC;
564                                         if (ipv4_mask->hdr.dst_addr)
565                                                 input_set |=
566                                                         ICE_INSET_TUN_IPV4_DST;
567                                         if (ipv4_mask->hdr.time_to_live)
568                                                 input_set |=
569                                                         ICE_INSET_TUN_IPV4_TTL;
570                                         if (ipv4_mask->hdr.next_proto_id)
571                                                 input_set |=
572                                                 ICE_INSET_TUN_IPV4_PROTO;
573                                 } else {
574                                         if (ipv4_mask->hdr.src_addr)
575                                                 input_set |= ICE_INSET_IPV4_SRC;
576                                         if (ipv4_mask->hdr.dst_addr)
577                                                 input_set |= ICE_INSET_IPV4_DST;
578                                         if (ipv4_mask->hdr.time_to_live)
579                                                 input_set |= ICE_INSET_IPV4_TTL;
580                                         if (ipv4_mask->hdr.next_proto_id)
581                                                 input_set |=
582                                                 ICE_INSET_IPV4_PROTO;
583                                         if (ipv4_mask->hdr.type_of_service)
584                                                 input_set |=
585                                                         ICE_INSET_IPV4_TOS;
586                                 }
587                                 list[t].type = (tunnel_valid  == 0) ?
588                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
589                                 if (ipv4_mask->hdr.src_addr) {
590                                         list[t].h_u.ipv4_hdr.src_addr =
591                                                 ipv4_spec->hdr.src_addr;
592                                         list[t].m_u.ipv4_hdr.src_addr =
593                                                 ipv4_mask->hdr.src_addr;
594                                         input_set_byte += 2;
595                                 }
596                                 if (ipv4_mask->hdr.dst_addr) {
597                                         list[t].h_u.ipv4_hdr.dst_addr =
598                                                 ipv4_spec->hdr.dst_addr;
599                                         list[t].m_u.ipv4_hdr.dst_addr =
600                                                 ipv4_mask->hdr.dst_addr;
601                                         input_set_byte += 2;
602                                 }
603                                 if (ipv4_mask->hdr.time_to_live) {
604                                         list[t].h_u.ipv4_hdr.time_to_live =
605                                                 ipv4_spec->hdr.time_to_live;
606                                         list[t].m_u.ipv4_hdr.time_to_live =
607                                                 ipv4_mask->hdr.time_to_live;
608                                         input_set_byte++;
609                                 }
610                                 if (ipv4_mask->hdr.next_proto_id) {
611                                         list[t].h_u.ipv4_hdr.protocol =
612                                                 ipv4_spec->hdr.next_proto_id;
613                                         list[t].m_u.ipv4_hdr.protocol =
614                                                 ipv4_mask->hdr.next_proto_id;
615                                         input_set_byte++;
616                                 }
617                                 if ((ipv4_spec->hdr.next_proto_id &
618                                         ipv4_mask->hdr.next_proto_id) ==
619                                         ICE_IPV4_PROTO_NVGRE)
620                                         *tun_type = ICE_SW_TUN_AND_NON_TUN;
621                                 if (ipv4_mask->hdr.type_of_service) {
622                                         list[t].h_u.ipv4_hdr.tos =
623                                                 ipv4_spec->hdr.type_of_service;
624                                         list[t].m_u.ipv4_hdr.tos =
625                                                 ipv4_mask->hdr.type_of_service;
626                                         input_set_byte++;
627                                 }
628                                 t++;
629                         }
630                         break;
631
632                 case RTE_FLOW_ITEM_TYPE_IPV6:
633                         ipv6_spec = item->spec;
634                         ipv6_mask = item->mask;
635                         ipv6_valid = 1;
636                         if (ipv6_spec && ipv6_mask) {
637                                 if (ipv6_mask->hdr.payload_len) {
638                                         rte_flow_error_set(error, EINVAL,
639                                            RTE_FLOW_ERROR_TYPE_ITEM,
640                                            item,
641                                            "Invalid IPv6 mask");
642                                         return 0;
643                                 }
644
645                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
646                                         if (ipv6_mask->hdr.src_addr[j] &&
647                                                 tunnel_valid) {
648                                                 input_set |=
649                                                 ICE_INSET_TUN_IPV6_SRC;
650                                                 break;
651                                         } else if (ipv6_mask->hdr.src_addr[j]) {
652                                                 input_set |= ICE_INSET_IPV6_SRC;
653                                                 break;
654                                         }
655                                 }
656                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
657                                         if (ipv6_mask->hdr.dst_addr[j] &&
658                                                 tunnel_valid) {
659                                                 input_set |=
660                                                 ICE_INSET_TUN_IPV6_DST;
661                                                 break;
662                                         } else if (ipv6_mask->hdr.dst_addr[j]) {
663                                                 input_set |= ICE_INSET_IPV6_DST;
664                                                 break;
665                                         }
666                                 }
667                                 if (ipv6_mask->hdr.proto &&
668                                         tunnel_valid)
669                                         input_set |=
670                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
671                                 else if (ipv6_mask->hdr.proto)
672                                         input_set |=
673                                                 ICE_INSET_IPV6_NEXT_HDR;
674                                 if (ipv6_mask->hdr.hop_limits &&
675                                         tunnel_valid)
676                                         input_set |=
677                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
678                                 else if (ipv6_mask->hdr.hop_limits)
679                                         input_set |=
680                                                 ICE_INSET_IPV6_HOP_LIMIT;
681                                 if ((ipv6_mask->hdr.vtc_flow &
682                                                 rte_cpu_to_be_32
683                                                 (RTE_IPV6_HDR_TC_MASK)) &&
684                                         tunnel_valid)
685                                         input_set |=
686                                                         ICE_INSET_TUN_IPV6_TC;
687                                 else if (ipv6_mask->hdr.vtc_flow &
688                                                 rte_cpu_to_be_32
689                                                 (RTE_IPV6_HDR_TC_MASK))
690                                         input_set |= ICE_INSET_IPV6_TC;
691
692                                 list[t].type = (tunnel_valid  == 0) ?
693                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
694                                 struct ice_ipv6_hdr *f;
695                                 struct ice_ipv6_hdr *s;
696                                 f = &list[t].h_u.ipv6_hdr;
697                                 s = &list[t].m_u.ipv6_hdr;
698                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
699                                         if (ipv6_mask->hdr.src_addr[j]) {
700                                                 f->src_addr[j] =
701                                                 ipv6_spec->hdr.src_addr[j];
702                                                 s->src_addr[j] =
703                                                 ipv6_mask->hdr.src_addr[j];
704                                                 input_set_byte++;
705                                         }
706                                         if (ipv6_mask->hdr.dst_addr[j]) {
707                                                 f->dst_addr[j] =
708                                                 ipv6_spec->hdr.dst_addr[j];
709                                                 s->dst_addr[j] =
710                                                 ipv6_mask->hdr.dst_addr[j];
711                                                 input_set_byte++;
712                                         }
713                                 }
714                                 if (ipv6_mask->hdr.proto) {
715                                         f->next_hdr =
716                                                 ipv6_spec->hdr.proto;
717                                         s->next_hdr =
718                                                 ipv6_mask->hdr.proto;
719                                         input_set_byte++;
720                                 }
721                                 if (ipv6_mask->hdr.hop_limits) {
722                                         f->hop_limit =
723                                                 ipv6_spec->hdr.hop_limits;
724                                         s->hop_limit =
725                                                 ipv6_mask->hdr.hop_limits;
726                                         input_set_byte++;
727                                 }
728                                 if (ipv6_mask->hdr.vtc_flow &
729                                                 rte_cpu_to_be_32
730                                                 (RTE_IPV6_HDR_TC_MASK)) {
731                                         struct ice_le_ver_tc_flow vtf;
732                                         vtf.u.fld.version = 0;
733                                         vtf.u.fld.flow_label = 0;
734                                         vtf.u.fld.tc = (rte_be_to_cpu_32
735                                                 (ipv6_spec->hdr.vtc_flow) &
736                                                         RTE_IPV6_HDR_TC_MASK) >>
737                                                         RTE_IPV6_HDR_TC_SHIFT;
738                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
739                                         vtf.u.fld.tc = (rte_be_to_cpu_32
740                                                 (ipv6_mask->hdr.vtc_flow) &
741                                                         RTE_IPV6_HDR_TC_MASK) >>
742                                                         RTE_IPV6_HDR_TC_SHIFT;
743                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
744                                         input_set_byte += 4;
745                                 }
746                                 t++;
747                         }
748                         break;
749
750                 case RTE_FLOW_ITEM_TYPE_UDP:
751                         udp_spec = item->spec;
752                         udp_mask = item->mask;
753                         udp_valid = 1;
754                         if (udp_spec && udp_mask) {
755                                 /* Check UDP mask and update input set*/
756                                 if (udp_mask->hdr.dgram_len ||
757                                     udp_mask->hdr.dgram_cksum) {
758                                         rte_flow_error_set(error, EINVAL,
759                                                    RTE_FLOW_ERROR_TYPE_ITEM,
760                                                    item,
761                                                    "Invalid UDP mask");
762                                         return 0;
763                                 }
764
765                                 if (tunnel_valid) {
766                                         if (udp_mask->hdr.src_port)
767                                                 input_set |=
768                                                 ICE_INSET_TUN_UDP_SRC_PORT;
769                                         if (udp_mask->hdr.dst_port)
770                                                 input_set |=
771                                                 ICE_INSET_TUN_UDP_DST_PORT;
772                                 } else {
773                                         if (udp_mask->hdr.src_port)
774                                                 input_set |=
775                                                 ICE_INSET_UDP_SRC_PORT;
776                                         if (udp_mask->hdr.dst_port)
777                                                 input_set |=
778                                                 ICE_INSET_UDP_DST_PORT;
779                                 }
780                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
781                                                 tunnel_valid == 0)
782                                         list[t].type = ICE_UDP_OF;
783                                 else
784                                         list[t].type = ICE_UDP_ILOS;
785                                 if (udp_mask->hdr.src_port) {
786                                         list[t].h_u.l4_hdr.src_port =
787                                                 udp_spec->hdr.src_port;
788                                         list[t].m_u.l4_hdr.src_port =
789                                                 udp_mask->hdr.src_port;
790                                         input_set_byte += 2;
791                                 }
792                                 if (udp_mask->hdr.dst_port) {
793                                         list[t].h_u.l4_hdr.dst_port =
794                                                 udp_spec->hdr.dst_port;
795                                         list[t].m_u.l4_hdr.dst_port =
796                                                 udp_mask->hdr.dst_port;
797                                         input_set_byte += 2;
798                                 }
799                                 t++;
800                         }
801                         break;
802
803                 case RTE_FLOW_ITEM_TYPE_TCP:
804                         tcp_spec = item->spec;
805                         tcp_mask = item->mask;
806                         tcp_valid = 1;
807                         if (tcp_spec && tcp_mask) {
808                                 /* Check TCP mask and update input set */
809                                 if (tcp_mask->hdr.sent_seq ||
810                                         tcp_mask->hdr.recv_ack ||
811                                         tcp_mask->hdr.data_off ||
812                                         tcp_mask->hdr.tcp_flags ||
813                                         tcp_mask->hdr.rx_win ||
814                                         tcp_mask->hdr.cksum ||
815                                         tcp_mask->hdr.tcp_urp) {
816                                         rte_flow_error_set(error, EINVAL,
817                                            RTE_FLOW_ERROR_TYPE_ITEM,
818                                            item,
819                                            "Invalid TCP mask");
820                                         return 0;
821                                 }
822
823                                 if (tunnel_valid) {
824                                         if (tcp_mask->hdr.src_port)
825                                                 input_set |=
826                                                 ICE_INSET_TUN_TCP_SRC_PORT;
827                                         if (tcp_mask->hdr.dst_port)
828                                                 input_set |=
829                                                 ICE_INSET_TUN_TCP_DST_PORT;
830                                 } else {
831                                         if (tcp_mask->hdr.src_port)
832                                                 input_set |=
833                                                 ICE_INSET_TCP_SRC_PORT;
834                                         if (tcp_mask->hdr.dst_port)
835                                                 input_set |=
836                                                 ICE_INSET_TCP_DST_PORT;
837                                 }
838                                 list[t].type = ICE_TCP_IL;
839                                 if (tcp_mask->hdr.src_port) {
840                                         list[t].h_u.l4_hdr.src_port =
841                                                 tcp_spec->hdr.src_port;
842                                         list[t].m_u.l4_hdr.src_port =
843                                                 tcp_mask->hdr.src_port;
844                                         input_set_byte += 2;
845                                 }
846                                 if (tcp_mask->hdr.dst_port) {
847                                         list[t].h_u.l4_hdr.dst_port =
848                                                 tcp_spec->hdr.dst_port;
849                                         list[t].m_u.l4_hdr.dst_port =
850                                                 tcp_mask->hdr.dst_port;
851                                         input_set_byte += 2;
852                                 }
853                                 t++;
854                         }
855                         break;
856
857                 case RTE_FLOW_ITEM_TYPE_SCTP:
858                         sctp_spec = item->spec;
859                         sctp_mask = item->mask;
860                         if (sctp_spec && sctp_mask) {
861                                 /* Check SCTP mask and update input set */
862                                 if (sctp_mask->hdr.cksum) {
863                                         rte_flow_error_set(error, EINVAL,
864                                            RTE_FLOW_ERROR_TYPE_ITEM,
865                                            item,
866                                            "Invalid SCTP mask");
867                                         return 0;
868                                 }
869
870                                 if (tunnel_valid) {
871                                         if (sctp_mask->hdr.src_port)
872                                                 input_set |=
873                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
874                                         if (sctp_mask->hdr.dst_port)
875                                                 input_set |=
876                                                 ICE_INSET_TUN_SCTP_DST_PORT;
877                                 } else {
878                                         if (sctp_mask->hdr.src_port)
879                                                 input_set |=
880                                                 ICE_INSET_SCTP_SRC_PORT;
881                                         if (sctp_mask->hdr.dst_port)
882                                                 input_set |=
883                                                 ICE_INSET_SCTP_DST_PORT;
884                                 }
885                                 list[t].type = ICE_SCTP_IL;
886                                 if (sctp_mask->hdr.src_port) {
887                                         list[t].h_u.sctp_hdr.src_port =
888                                                 sctp_spec->hdr.src_port;
889                                         list[t].m_u.sctp_hdr.src_port =
890                                                 sctp_mask->hdr.src_port;
891                                         input_set_byte += 2;
892                                 }
893                                 if (sctp_mask->hdr.dst_port) {
894                                         list[t].h_u.sctp_hdr.dst_port =
895                                                 sctp_spec->hdr.dst_port;
896                                         list[t].m_u.sctp_hdr.dst_port =
897                                                 sctp_mask->hdr.dst_port;
898                                         input_set_byte += 2;
899                                 }
900                                 t++;
901                         }
902                         break;
903
904                 case RTE_FLOW_ITEM_TYPE_VXLAN:
905                         vxlan_spec = item->spec;
906                         vxlan_mask = item->mask;
907                         /* Check if VXLAN item is used to describe protocol.
908                          * If yes, both spec and mask should be NULL.
909                          * If no, both spec and mask shouldn't be NULL.
910                          */
911                         if ((!vxlan_spec && vxlan_mask) ||
912                             (vxlan_spec && !vxlan_mask)) {
913                                 rte_flow_error_set(error, EINVAL,
914                                            RTE_FLOW_ERROR_TYPE_ITEM,
915                                            item,
916                                            "Invalid VXLAN item");
917                                 return 0;
918                         }
919                         vxlan_valid = 1;
920                         tunnel_valid = 1;
921                         if (vxlan_spec && vxlan_mask) {
922                                 list[t].type = ICE_VXLAN;
923                                 if (vxlan_mask->vni[0] ||
924                                         vxlan_mask->vni[1] ||
925                                         vxlan_mask->vni[2]) {
926                                         list[t].h_u.tnl_hdr.vni =
927                                                 (vxlan_spec->vni[2] << 16) |
928                                                 (vxlan_spec->vni[1] << 8) |
929                                                 vxlan_spec->vni[0];
930                                         list[t].m_u.tnl_hdr.vni =
931                                                 (vxlan_mask->vni[2] << 16) |
932                                                 (vxlan_mask->vni[1] << 8) |
933                                                 vxlan_mask->vni[0];
934                                         input_set |=
935                                                 ICE_INSET_TUN_VXLAN_VNI;
936                                         input_set_byte += 2;
937                                 }
938                                 t++;
939                         }
940                         break;
941
942                 case RTE_FLOW_ITEM_TYPE_NVGRE:
943                         nvgre_spec = item->spec;
944                         nvgre_mask = item->mask;
945                         /* Check if NVGRE item is used to describe protocol.
946                          * If yes, both spec and mask should be NULL.
947                          * If no, both spec and mask shouldn't be NULL.
948                          */
949                         if ((!nvgre_spec && nvgre_mask) ||
950                             (nvgre_spec && !nvgre_mask)) {
951                                 rte_flow_error_set(error, EINVAL,
952                                            RTE_FLOW_ERROR_TYPE_ITEM,
953                                            item,
954                                            "Invalid NVGRE item");
955                                 return 0;
956                         }
957                         nvgre_valid = 1;
958                         tunnel_valid = 1;
959                         if (nvgre_spec && nvgre_mask) {
960                                 list[t].type = ICE_NVGRE;
961                                 if (nvgre_mask->tni[0] ||
962                                         nvgre_mask->tni[1] ||
963                                         nvgre_mask->tni[2]) {
964                                         list[t].h_u.nvgre_hdr.tni_flow =
965                                                 (nvgre_spec->tni[2] << 16) |
966                                                 (nvgre_spec->tni[1] << 8) |
967                                                 nvgre_spec->tni[0];
968                                         list[t].m_u.nvgre_hdr.tni_flow =
969                                                 (nvgre_mask->tni[2] << 16) |
970                                                 (nvgre_mask->tni[1] << 8) |
971                                                 nvgre_mask->tni[0];
972                                         input_set |=
973                                                 ICE_INSET_TUN_NVGRE_TNI;
974                                         input_set_byte += 2;
975                                 }
976                                 t++;
977                         }
978                         break;
979
980                 case RTE_FLOW_ITEM_TYPE_VLAN:
981                         vlan_spec = item->spec;
982                         vlan_mask = item->mask;
983                         /* Check if VLAN item is used to describe protocol.
984                          * If yes, both spec and mask should be NULL.
985                          * If no, both spec and mask shouldn't be NULL.
986                          */
987                         if ((!vlan_spec && vlan_mask) ||
988                             (vlan_spec && !vlan_mask)) {
989                                 rte_flow_error_set(error, EINVAL,
990                                            RTE_FLOW_ERROR_TYPE_ITEM,
991                                            item,
992                                            "Invalid VLAN item");
993                                 return 0;
994                         }
995                         if (vlan_spec && vlan_mask) {
996                                 list[t].type = ICE_VLAN_OFOS;
997                                 if (vlan_mask->tci) {
998                                         list[t].h_u.vlan_hdr.vlan =
999                                                 vlan_spec->tci;
1000                                         list[t].m_u.vlan_hdr.vlan =
1001                                                 vlan_mask->tci;
1002                                         input_set |= ICE_INSET_VLAN_OUTER;
1003                                         input_set_byte += 2;
1004                                 }
1005                                 if (vlan_mask->inner_type) {
1006                                         list[t].h_u.vlan_hdr.type =
1007                                                 vlan_spec->inner_type;
1008                                         list[t].m_u.vlan_hdr.type =
1009                                                 vlan_mask->inner_type;
1010                                         input_set |= ICE_INSET_ETHERTYPE;
1011                                         input_set_byte += 2;
1012                                 }
1013                                 t++;
1014                         }
1015                         break;
1016
1017                 case RTE_FLOW_ITEM_TYPE_PPPOED:
1018                 case RTE_FLOW_ITEM_TYPE_PPPOES:
1019                         pppoe_spec = item->spec;
1020                         pppoe_mask = item->mask;
1021                         /* Check if PPPoE item is used to describe protocol.
1022                          * If yes, both spec and mask should be NULL.
1023                          * If no, both spec and mask shouldn't be NULL.
1024                          */
1025                         if ((!pppoe_spec && pppoe_mask) ||
1026                                 (pppoe_spec && !pppoe_mask)) {
1027                                 rte_flow_error_set(error, EINVAL,
1028                                         RTE_FLOW_ERROR_TYPE_ITEM,
1029                                         item,
1030                                         "Invalid pppoe item");
1031                                 return 0;
1032                         }
1033                         pppoe_patt_valid = 1;
1034                         if (pppoe_spec && pppoe_mask) {
1035                                 /* Check pppoe mask and update input set */
1036                                 if (pppoe_mask->length ||
1037                                         pppoe_mask->code ||
1038                                         pppoe_mask->version_type) {
1039                                         rte_flow_error_set(error, EINVAL,
1040                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1041                                                 item,
1042                                                 "Invalid pppoe mask");
1043                                         return 0;
1044                                 }
1045                                 list[t].type = ICE_PPPOE;
1046                                 if (pppoe_mask->session_id) {
1047                                         list[t].h_u.pppoe_hdr.session_id =
1048                                                 pppoe_spec->session_id;
1049                                         list[t].m_u.pppoe_hdr.session_id =
1050                                                 pppoe_mask->session_id;
1051                                         input_set |= ICE_INSET_PPPOE_SESSION;
1052                                         input_set_byte += 2;
1053                                 }
1054                                 t++;
1055                                 pppoe_elem_valid = 1;
1056                         }
1057                         break;
1058
1059                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1060                         pppoe_proto_spec = item->spec;
1061                         pppoe_proto_mask = item->mask;
1062                         /* Check if PPPoE optional proto_id item
1063                          * is used to describe protocol.
1064                          * If yes, both spec and mask should be NULL.
1065                          * If no, both spec and mask shouldn't be NULL.
1066                          */
1067                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1068                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
1069                                 rte_flow_error_set(error, EINVAL,
1070                                         RTE_FLOW_ERROR_TYPE_ITEM,
1071                                         item,
1072                                         "Invalid pppoe proto item");
1073                                 return 0;
1074                         }
1075                         if (pppoe_proto_spec && pppoe_proto_mask) {
1076                                 if (pppoe_elem_valid)
1077                                         t--;
1078                                 list[t].type = ICE_PPPOE;
1079                                 if (pppoe_proto_mask->proto_id) {
1080                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
1081                                                 pppoe_proto_spec->proto_id;
1082                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
1083                                                 pppoe_proto_mask->proto_id;
1084                                         input_set |= ICE_INSET_PPPOE_PROTO;
1085                                         input_set_byte += 2;
1086                                         pppoe_prot_valid = 1;
1087                                 }
1088                                 if ((pppoe_proto_mask->proto_id &
1089                                         pppoe_proto_spec->proto_id) !=
1090                                             CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1091                                         (pppoe_proto_mask->proto_id &
1092                                         pppoe_proto_spec->proto_id) !=
1093                                             CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1094                                         *tun_type = ICE_SW_TUN_PPPOE_PAY;
1095                                 else
1096                                         *tun_type = ICE_SW_TUN_PPPOE;
1097                                 t++;
1098                         }
1099
1100                         break;
1101
1102                 case RTE_FLOW_ITEM_TYPE_ESP:
1103                         esp_spec = item->spec;
1104                         esp_mask = item->mask;
1105                         if ((esp_spec && !esp_mask) ||
1106                                 (!esp_spec && esp_mask)) {
1107                                 rte_flow_error_set(error, EINVAL,
1108                                            RTE_FLOW_ERROR_TYPE_ITEM,
1109                                            item,
1110                                            "Invalid esp item");
1111                                 return 0;
1112                         }
1113                         /* Check esp mask and update input set */
1114                         if (esp_mask && esp_mask->hdr.seq) {
1115                                 rte_flow_error_set(error, EINVAL,
1116                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1117                                                 item,
1118                                                 "Invalid esp mask");
1119                                 return 0;
1120                         }
1121
1122                         if (!esp_spec && !esp_mask && !input_set) {
1123                                 profile_rule = 1;
1124                                 if (ipv6_valid && udp_valid)
1125                                         *tun_type =
1126                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1127                                 else if (ipv6_valid)
1128                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1129                                 else if (ipv4_valid)
1130                                         return 0;
1131                         } else if (esp_spec && esp_mask &&
1132                                                 esp_mask->hdr.spi){
1133                                 if (udp_valid)
1134                                         list[t].type = ICE_NAT_T;
1135                                 else
1136                                         list[t].type = ICE_ESP;
1137                                 list[t].h_u.esp_hdr.spi =
1138                                         esp_spec->hdr.spi;
1139                                 list[t].m_u.esp_hdr.spi =
1140                                         esp_mask->hdr.spi;
1141                                 input_set |= ICE_INSET_ESP_SPI;
1142                                 input_set_byte += 4;
1143                                 t++;
1144                         }
1145
1146                         if (!profile_rule) {
1147                                 if (ipv6_valid && udp_valid)
1148                                         *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1149                                 else if (ipv4_valid && udp_valid)
1150                                         *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1151                                 else if (ipv6_valid)
1152                                         *tun_type = ICE_SW_TUN_IPV6_ESP;
1153                                 else if (ipv4_valid)
1154                                         *tun_type = ICE_SW_TUN_IPV4_ESP;
1155                         }
1156                         break;
1157
1158                 case RTE_FLOW_ITEM_TYPE_AH:
1159                         ah_spec = item->spec;
1160                         ah_mask = item->mask;
1161                         if ((ah_spec && !ah_mask) ||
1162                                 (!ah_spec && ah_mask)) {
1163                                 rte_flow_error_set(error, EINVAL,
1164                                            RTE_FLOW_ERROR_TYPE_ITEM,
1165                                            item,
1166                                            "Invalid ah item");
1167                                 return 0;
1168                         }
1169                         /* Check ah mask and update input set */
1170                         if (ah_mask &&
1171                                 (ah_mask->next_hdr ||
1172                                 ah_mask->payload_len ||
1173                                 ah_mask->seq_num ||
1174                                 ah_mask->reserved)) {
1175                                 rte_flow_error_set(error, EINVAL,
1176                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1177                                                 item,
1178                                                 "Invalid ah mask");
1179                                 return 0;
1180                         }
1181
1182                         if (!ah_spec && !ah_mask && !input_set) {
1183                                 profile_rule = 1;
1184                                 if (ipv6_valid && udp_valid)
1185                                         *tun_type =
1186                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1187                                 else if (ipv6_valid)
1188                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1189                                 else if (ipv4_valid)
1190                                         return 0;
1191                         } else if (ah_spec && ah_mask &&
1192                                                 ah_mask->spi){
1193                                 list[t].type = ICE_AH;
1194                                 list[t].h_u.ah_hdr.spi =
1195                                         ah_spec->spi;
1196                                 list[t].m_u.ah_hdr.spi =
1197                                         ah_mask->spi;
1198                                 input_set |= ICE_INSET_AH_SPI;
1199                                 input_set_byte += 4;
1200                                 t++;
1201                         }
1202
1203                         if (!profile_rule) {
1204                                 if (udp_valid)
1205                                         return 0;
1206                                 else if (ipv6_valid)
1207                                         *tun_type = ICE_SW_TUN_IPV6_AH;
1208                                 else if (ipv4_valid)
1209                                         *tun_type = ICE_SW_TUN_IPV4_AH;
1210                         }
1211                         break;
1212
1213                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1214                         l2tp_spec = item->spec;
1215                         l2tp_mask = item->mask;
1216                         if ((l2tp_spec && !l2tp_mask) ||
1217                                 (!l2tp_spec && l2tp_mask)) {
1218                                 rte_flow_error_set(error, EINVAL,
1219                                            RTE_FLOW_ERROR_TYPE_ITEM,
1220                                            item,
1221                                            "Invalid l2tp item");
1222                                 return 0;
1223                         }
1224
1225                         if (!l2tp_spec && !l2tp_mask && !input_set) {
1226                                 if (ipv6_valid)
1227                                         *tun_type =
1228                                         ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1229                                 else if (ipv4_valid)
1230                                         return 0;
1231                         } else if (l2tp_spec && l2tp_mask &&
1232                                                 l2tp_mask->session_id){
1233                                 list[t].type = ICE_L2TPV3;
1234                                 list[t].h_u.l2tpv3_sess_hdr.session_id =
1235                                         l2tp_spec->session_id;
1236                                 list[t].m_u.l2tpv3_sess_hdr.session_id =
1237                                         l2tp_mask->session_id;
1238                                 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1239                                 input_set_byte += 4;
1240                                 t++;
1241                         }
1242
1243                         if (!profile_rule) {
1244                                 if (ipv6_valid)
1245                                         *tun_type =
1246                                         ICE_SW_TUN_IPV6_L2TPV3;
1247                                 else if (ipv4_valid)
1248                                         *tun_type =
1249                                         ICE_SW_TUN_IPV4_L2TPV3;
1250                         }
1251                         break;
1252
1253                 case RTE_FLOW_ITEM_TYPE_PFCP:
1254                         pfcp_spec = item->spec;
1255                         pfcp_mask = item->mask;
1256                         /* Check if PFCP item is used to describe protocol.
1257                          * If yes, both spec and mask should be NULL.
1258                          * If no, both spec and mask shouldn't be NULL.
1259                          */
1260                         if ((!pfcp_spec && pfcp_mask) ||
1261                             (pfcp_spec && !pfcp_mask)) {
1262                                 rte_flow_error_set(error, EINVAL,
1263                                            RTE_FLOW_ERROR_TYPE_ITEM,
1264                                            item,
1265                                            "Invalid PFCP item");
1266                                 return -ENOTSUP;
1267                         }
1268                         if (pfcp_spec && pfcp_mask) {
1269                                 /* Check pfcp mask and update input set */
1270                                 if (pfcp_mask->msg_type ||
1271                                         pfcp_mask->msg_len ||
1272                                         pfcp_mask->seid) {
1273                                         rte_flow_error_set(error, EINVAL,
1274                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1275                                                 item,
1276                                                 "Invalid pfcp mask");
1277                                         return -ENOTSUP;
1278                                 }
1279                                 if (pfcp_mask->s_field &&
1280                                         pfcp_spec->s_field == 0x01 &&
1281                                         ipv6_valid)
1282                                         *tun_type =
1283                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1284                                 else if (pfcp_mask->s_field &&
1285                                         pfcp_spec->s_field == 0x01)
1286                                         *tun_type =
1287                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1288                                 else if (pfcp_mask->s_field &&
1289                                         !pfcp_spec->s_field &&
1290                                         ipv6_valid)
1291                                         *tun_type =
1292                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1293                                 else if (pfcp_mask->s_field &&
1294                                         !pfcp_spec->s_field)
1295                                         *tun_type =
1296                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1297                                 else
1298                                         return -ENOTSUP;
1299                         }
1300                         break;
1301
1302                 case RTE_FLOW_ITEM_TYPE_VOID:
1303                         break;
1304
1305                 default:
1306                         rte_flow_error_set(error, EINVAL,
1307                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1308                                    "Invalid pattern item.");
1309                         goto out;
1310                 }
1311         }
1312
1313         if (pppoe_patt_valid && !pppoe_prot_valid) {
1314                 if (ipv6_valid && udp_valid)
1315                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1316                 else if (ipv6_valid && tcp_valid)
1317                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1318                 else if (ipv4_valid && udp_valid)
1319                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1320                 else if (ipv4_valid && tcp_valid)
1321                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1322                 else if (ipv6_valid)
1323                         *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1324                 else if (ipv4_valid)
1325                         *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1326                 else
1327                         *tun_type = ICE_SW_TUN_PPPOE;
1328         }
1329
1330         if (*tun_type == ICE_NON_TUN) {
1331                 if (vxlan_valid)
1332                         *tun_type = ICE_SW_TUN_VXLAN;
1333                 else if (nvgre_valid)
1334                         *tun_type = ICE_SW_TUN_NVGRE;
1335                 else if (ipv4_valid && tcp_valid)
1336                         *tun_type = ICE_SW_IPV4_TCP;
1337                 else if (ipv4_valid && udp_valid)
1338                         *tun_type = ICE_SW_IPV4_UDP;
1339                 else if (ipv6_valid && tcp_valid)
1340                         *tun_type = ICE_SW_IPV6_TCP;
1341                 else if (ipv6_valid && udp_valid)
1342                         *tun_type = ICE_SW_IPV6_UDP;
1343         }
1344
1345         if (input_set_byte > MAX_INPUT_SET_BYTE) {
1346                 rte_flow_error_set(error, EINVAL,
1347                         RTE_FLOW_ERROR_TYPE_ITEM,
1348                         item,
1349                         "too much input set");
1350                 return -ENOTSUP;
1351         }
1352
1353         *lkups_num = t;
1354
1355         return input_set;
1356 out:
1357         return 0;
1358 }
1359
1360 static int
1361 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1362                             const struct rte_flow_action *actions,
1363                             struct rte_flow_error *error,
1364                             struct ice_adv_rule_info *rule_info)
1365 {
1366         const struct rte_flow_action_vf *act_vf;
1367         const struct rte_flow_action *action;
1368         enum rte_flow_action_type action_type;
1369
1370         for (action = actions; action->type !=
1371                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1372                 action_type = action->type;
1373                 switch (action_type) {
1374                 case RTE_FLOW_ACTION_TYPE_VF:
1375                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1376                         act_vf = action->conf;
1377
1378                         if (act_vf->id >= ad->real_hw.num_vfs &&
1379                                 !act_vf->original) {
1380                                 rte_flow_error_set(error,
1381                                         EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1382                                         actions,
1383                                         "Invalid vf id");
1384                                 return -rte_errno;
1385                         }
1386
1387                         if (act_vf->original)
1388                                 rule_info->sw_act.vsi_handle =
1389                                         ad->real_hw.avf.bus.func;
1390                         else
1391                                 rule_info->sw_act.vsi_handle = act_vf->id;
1392                         break;
1393
1394                 case RTE_FLOW_ACTION_TYPE_DROP:
1395                         rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1396                         break;
1397
1398                 default:
1399                         rte_flow_error_set(error,
1400                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1401                                            actions,
1402                                            "Invalid action type");
1403                         return -rte_errno;
1404                 }
1405         }
1406
1407         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1408         rule_info->sw_act.flag = ICE_FLTR_RX;
1409         rule_info->rx = 1;
1410         rule_info->priority = 5;
1411
1412         return 0;
1413 }
1414
1415 static int
1416 ice_switch_parse_action(struct ice_pf *pf,
1417                 const struct rte_flow_action *actions,
1418                 struct rte_flow_error *error,
1419                 struct ice_adv_rule_info *rule_info)
1420 {
1421         struct ice_vsi *vsi = pf->main_vsi;
1422         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1423         const struct rte_flow_action_queue *act_q;
1424         const struct rte_flow_action_rss *act_qgrop;
1425         uint16_t base_queue, i;
1426         const struct rte_flow_action *action;
1427         enum rte_flow_action_type action_type;
1428         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1429                  2, 4, 8, 16, 32, 64, 128};
1430
1431         base_queue = pf->base_queue + vsi->base_queue;
1432         for (action = actions; action->type !=
1433                         RTE_FLOW_ACTION_TYPE_END; action++) {
1434                 action_type = action->type;
1435                 switch (action_type) {
1436                 case RTE_FLOW_ACTION_TYPE_RSS:
1437                         act_qgrop = action->conf;
1438                         if (act_qgrop->queue_num <= 1)
1439                                 goto error;
1440                         rule_info->sw_act.fltr_act =
1441                                 ICE_FWD_TO_QGRP;
1442                         rule_info->sw_act.fwd_id.q_id =
1443                                 base_queue + act_qgrop->queue[0];
1444                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1445                                 if (act_qgrop->queue_num ==
1446                                         valid_qgrop_number[i])
1447                                         break;
1448                         }
1449                         if (i == MAX_QGRP_NUM_TYPE)
1450                                 goto error;
1451                         if ((act_qgrop->queue[0] +
1452                                 act_qgrop->queue_num) >
1453                                 dev->data->nb_rx_queues)
1454                                 goto error1;
1455                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1456                                 if (act_qgrop->queue[i + 1] !=
1457                                         act_qgrop->queue[i] + 1)
1458                                         goto error2;
1459                         rule_info->sw_act.qgrp_size =
1460                                 act_qgrop->queue_num;
1461                         break;
1462                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1463                         act_q = action->conf;
1464                         if (act_q->index >= dev->data->nb_rx_queues)
1465                                 goto error;
1466                         rule_info->sw_act.fltr_act =
1467                                 ICE_FWD_TO_Q;
1468                         rule_info->sw_act.fwd_id.q_id =
1469                                 base_queue + act_q->index;
1470                         break;
1471
1472                 case RTE_FLOW_ACTION_TYPE_DROP:
1473                         rule_info->sw_act.fltr_act =
1474                                 ICE_DROP_PACKET;
1475                         break;
1476
1477                 case RTE_FLOW_ACTION_TYPE_VOID:
1478                         break;
1479
1480                 default:
1481                         goto error;
1482                 }
1483         }
1484
1485         rule_info->sw_act.vsi_handle = vsi->idx;
1486         rule_info->rx = 1;
1487         rule_info->sw_act.src = vsi->idx;
1488         rule_info->priority = 5;
1489
1490         return 0;
1491
1492 error:
1493         rte_flow_error_set(error,
1494                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1495                 actions,
1496                 "Invalid action type or queue number");
1497         return -rte_errno;
1498
1499 error1:
1500         rte_flow_error_set(error,
1501                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1502                 actions,
1503                 "Invalid queue region indexes");
1504         return -rte_errno;
1505
1506 error2:
1507         rte_flow_error_set(error,
1508                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1509                 actions,
1510                 "Discontinuous queue region");
1511         return -rte_errno;
1512 }
1513
1514 static int
1515 ice_switch_check_action(const struct rte_flow_action *actions,
1516                             struct rte_flow_error *error)
1517 {
1518         const struct rte_flow_action *action;
1519         enum rte_flow_action_type action_type;
1520         uint16_t actions_num = 0;
1521
1522         for (action = actions; action->type !=
1523                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1524                 action_type = action->type;
1525                 switch (action_type) {
1526                 case RTE_FLOW_ACTION_TYPE_VF:
1527                 case RTE_FLOW_ACTION_TYPE_RSS:
1528                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1529                 case RTE_FLOW_ACTION_TYPE_DROP:
1530                         actions_num++;
1531                         break;
1532                 case RTE_FLOW_ACTION_TYPE_VOID:
1533                         continue;
1534                 default:
1535                         rte_flow_error_set(error,
1536                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1537                                            actions,
1538                                            "Invalid action type");
1539                         return -rte_errno;
1540                 }
1541         }
1542
1543         if (actions_num != 1) {
1544                 rte_flow_error_set(error,
1545                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1546                                    actions,
1547                                    "Invalid action number");
1548                 return -rte_errno;
1549         }
1550
1551         return 0;
1552 }
1553
1554 static bool
1555 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1556 {
1557         switch (tun_type) {
1558         case ICE_SW_TUN_PROFID_IPV6_ESP:
1559         case ICE_SW_TUN_PROFID_IPV6_AH:
1560         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1561         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1562         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1563         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1564         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1565         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1566                 return true;
1567         default:
1568                 break;
1569         }
1570
1571         return false;
1572 }
1573
1574 static int
1575 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1576                 struct ice_pattern_match_item *array,
1577                 uint32_t array_len,
1578                 const struct rte_flow_item pattern[],
1579                 const struct rte_flow_action actions[],
1580                 void **meta,
1581                 struct rte_flow_error *error)
1582 {
1583         struct ice_pf *pf = &ad->pf;
1584         uint64_t inputset = 0;
1585         int ret = 0;
1586         struct sw_meta *sw_meta_ptr = NULL;
1587         struct ice_adv_rule_info rule_info;
1588         struct ice_adv_lkup_elem *list = NULL;
1589         uint16_t lkups_num = 0;
1590         const struct rte_flow_item *item = pattern;
1591         uint16_t item_num = 0;
1592         enum ice_sw_tunnel_type tun_type =
1593                         ICE_NON_TUN;
1594         struct ice_pattern_match_item *pattern_match_item = NULL;
1595
1596         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1597                 item_num++;
1598                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1599                         const struct rte_flow_item_eth *eth_mask;
1600                         if (item->mask)
1601                                 eth_mask = item->mask;
1602                         else
1603                                 continue;
1604                         if (eth_mask->type == UINT16_MAX)
1605                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1606                 }
1607                 /* reserve one more memory slot for ETH which may
1608                  * consume 2 lookup items.
1609                  */
1610                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1611                         item_num++;
1612         }
1613
1614         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1615         if (!list) {
1616                 rte_flow_error_set(error, EINVAL,
1617                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1618                                    "No memory for PMD internal items");
1619                 return -rte_errno;
1620         }
1621
1622         sw_meta_ptr =
1623                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1624         if (!sw_meta_ptr) {
1625                 rte_flow_error_set(error, EINVAL,
1626                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1627                                    "No memory for sw_pattern_meta_ptr");
1628                 goto error;
1629         }
1630
1631         pattern_match_item =
1632                 ice_search_pattern_match_item(ad, pattern, array, array_len,
1633                                               error);
1634         if (!pattern_match_item) {
1635                 rte_flow_error_set(error, EINVAL,
1636                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1637                                    "Invalid input pattern");
1638                 goto error;
1639         }
1640
1641         inputset = ice_switch_inset_get
1642                 (pattern, error, list, &lkups_num, &tun_type);
1643         if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1644                 (inputset & ~pattern_match_item->input_set_mask)) {
1645                 rte_flow_error_set(error, EINVAL,
1646                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1647                                    pattern,
1648                                    "Invalid input set");
1649                 goto error;
1650         }
1651
1652         memset(&rule_info, 0, sizeof(rule_info));
1653         rule_info.tun_type = tun_type;
1654
1655         ret = ice_switch_check_action(actions, error);
1656         if (ret)
1657                 goto error;
1658
1659         if (ad->hw.dcf_enabled)
1660                 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1661                                                   &rule_info);
1662         else
1663                 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1664
1665         if (ret)
1666                 goto error;
1667
1668         if (meta) {
1669                 *meta = sw_meta_ptr;
1670                 ((struct sw_meta *)*meta)->list = list;
1671                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1672                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1673         } else {
1674                 rte_free(list);
1675                 rte_free(sw_meta_ptr);
1676         }
1677
1678         rte_free(pattern_match_item);
1679
1680         return 0;
1681
1682 error:
1683         rte_free(list);
1684         rte_free(sw_meta_ptr);
1685         rte_free(pattern_match_item);
1686
1687         return -rte_errno;
1688 }
1689
1690 static int
1691 ice_switch_query(struct ice_adapter *ad __rte_unused,
1692                 struct rte_flow *flow __rte_unused,
1693                 struct rte_flow_query_count *count __rte_unused,
1694                 struct rte_flow_error *error)
1695 {
1696         rte_flow_error_set(error, EINVAL,
1697                 RTE_FLOW_ERROR_TYPE_HANDLE,
1698                 NULL,
1699                 "count action not supported by switch filter");
1700
1701         return -rte_errno;
1702 }
1703
1704 static int
1705 ice_switch_redirect(struct ice_adapter *ad,
1706                     struct rte_flow *flow,
1707                     struct ice_flow_redirect *rd)
1708 {
1709         struct ice_rule_query_data *rdata = flow->rule;
1710         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1711         struct ice_adv_lkup_elem *lkups_dp = NULL;
1712         struct LIST_HEAD_TYPE *list_head;
1713         struct ice_adv_rule_info rinfo;
1714         struct ice_hw *hw = &ad->hw;
1715         struct ice_switch_info *sw;
1716         uint16_t lkups_cnt;
1717         int ret;
1718
1719         if (rdata->vsi_handle != rd->vsi_handle)
1720                 return 0;
1721
1722         sw = hw->switch_info;
1723         if (!sw->recp_list[rdata->rid].recp_created)
1724                 return -EINVAL;
1725
1726         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1727                 return -ENOTSUP;
1728
1729         list_head = &sw->recp_list[rdata->rid].filt_rules;
1730         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1731                             list_entry) {
1732                 rinfo = list_itr->rule_info;
1733                 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1734                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1735                     rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1736                     (rinfo.fltr_rule_id == rdata->rule_id &&
1737                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1738                         lkups_cnt = list_itr->lkups_cnt;
1739                         lkups_dp = (struct ice_adv_lkup_elem *)
1740                                 ice_memdup(hw, list_itr->lkups,
1741                                            sizeof(*list_itr->lkups) *
1742                                            lkups_cnt, ICE_NONDMA_TO_NONDMA);
1743
1744                         if (!lkups_dp) {
1745                                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1746                                 return -EINVAL;
1747                         }
1748
1749                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1750                                 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1751                                 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1752                         }
1753                         break;
1754                 }
1755         }
1756
1757         if (!lkups_dp)
1758                 return -EINVAL;
1759
1760         /* Remove the old rule */
1761         ret = ice_rem_adv_rule(hw, list_itr->lkups,
1762                                lkups_cnt, &rinfo);
1763         if (ret) {
1764                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1765                             rdata->rule_id);
1766                 ret = -EINVAL;
1767                 goto out;
1768         }
1769
1770         /* Update VSI context */
1771         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1772
1773         /* Replay the rule */
1774         ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1775                                &rinfo, rdata);
1776         if (ret) {
1777                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1778                 ret = -EINVAL;
1779         }
1780
1781 out:
1782         ice_free(hw, lkups_dp);
1783         return ret;
1784 }
1785
1786 static int
1787 ice_switch_init(struct ice_adapter *ad)
1788 {
1789         int ret = 0;
1790         struct ice_flow_parser *dist_parser;
1791         struct ice_flow_parser *perm_parser;
1792
1793         if (ad->devargs.pipe_mode_support) {
1794                 perm_parser = &ice_switch_perm_parser;
1795                 ret = ice_register_parser(perm_parser, ad);
1796         } else {
1797                 dist_parser = &ice_switch_dist_parser;
1798                 ret = ice_register_parser(dist_parser, ad);
1799         }
1800         return ret;
1801 }
1802
1803 static void
1804 ice_switch_uninit(struct ice_adapter *ad)
1805 {
1806         struct ice_flow_parser *dist_parser;
1807         struct ice_flow_parser *perm_parser;
1808
1809         if (ad->devargs.pipe_mode_support) {
1810                 perm_parser = &ice_switch_perm_parser;
1811                 ice_unregister_parser(perm_parser, ad);
1812         } else {
1813                 dist_parser = &ice_switch_dist_parser;
1814                 ice_unregister_parser(dist_parser, ad);
1815         }
1816 }
1817
1818 static struct
1819 ice_flow_engine ice_switch_engine = {
1820         .init = ice_switch_init,
1821         .uninit = ice_switch_uninit,
1822         .create = ice_switch_create,
1823         .destroy = ice_switch_destroy,
1824         .query_count = ice_switch_query,
1825         .redirect = ice_switch_redirect,
1826         .free = ice_switch_filter_rule_free,
1827         .type = ICE_FLOW_ENGINE_SWITCH,
1828 };
1829
1830 static struct
1831 ice_flow_parser ice_switch_dist_parser = {
1832         .engine = &ice_switch_engine,
1833         .array = ice_switch_pattern_dist_list,
1834         .array_len = RTE_DIM(ice_switch_pattern_dist_list),
1835         .parse_pattern_action = ice_switch_parse_pattern_action,
1836         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1837 };
1838
1839 static struct
1840 ice_flow_parser ice_switch_perm_parser = {
1841         .engine = &ice_switch_engine,
1842         .array = ice_switch_pattern_perm_list,
1843         .array_len = RTE_DIM(ice_switch_pattern_perm_list),
1844         .parse_pattern_action = ice_switch_parse_pattern_action,
1845         .stage = ICE_FLOW_STAGE_PERMISSION,
1846 };
1847
1848 RTE_INIT(ice_sw_engine_init)
1849 {
1850         struct ice_flow_engine *engine = &ice_switch_engine;
1851         ice_register_flow_engine(engine);
1852 }