net/ice: track DCF state of PF
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27
28
29 #define MAX_QGRP_NUM_TYPE       7
30 #define MAX_INPUT_SET_BYTE      32
31 #define ICE_PPP_IPV4_PROTO      0x0021
32 #define ICE_PPP_IPV6_PROTO      0x0057
33 #define ICE_IPV4_PROTO_NVGRE    0x002F
34 #define ICE_SW_PRI_BASE 6
35
36 #define ICE_SW_INSET_ETHER ( \
37         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
38 #define ICE_SW_INSET_MAC_VLAN ( \
39         ICE_SW_INSET_ETHER | ICE_INSET_VLAN_INNER)
40 #define ICE_SW_INSET_MAC_QINQ  ( \
41         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
42         ICE_INSET_VLAN_OUTER)
43 #define ICE_SW_INSET_MAC_IPV4 ( \
44         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
46 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
47         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
48 #define ICE_SW_INSET_MAC_QINQ_IPV4_TCP ( \
49         ICE_SW_INSET_MAC_QINQ_IPV4 | \
50         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_QINQ_IPV4_UDP ( \
52         ICE_SW_INSET_MAC_QINQ_IPV4 | \
53         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
55         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
56         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
57         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
58 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
59         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
60         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
61         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
62 #define ICE_SW_INSET_MAC_IPV6 ( \
63         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
64         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
65         ICE_INSET_IPV6_NEXT_HDR)
66 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
67         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
68 #define ICE_SW_INSET_MAC_QINQ_IPV6_TCP ( \
69         ICE_SW_INSET_MAC_QINQ_IPV6 | \
70         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
71 #define ICE_SW_INSET_MAC_QINQ_IPV6_UDP ( \
72         ICE_SW_INSET_MAC_QINQ_IPV6 | \
73         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
74 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
75         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
76         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
77         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
78 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
79         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
80         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
81         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
82 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
83         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
84         ICE_INSET_NVGRE_TNI)
85 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
86         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
87         ICE_INSET_VXLAN_VNI)
88 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
89         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
90         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
91         ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
92 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
93         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
94         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
95         ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
96 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
97         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
98         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
99         ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
100 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
101         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
102         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
103         ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
104 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
105         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
106         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
107 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
108         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
109         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
110         ICE_INSET_IPV4_TOS)
111 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
112         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
113         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
114         ICE_INSET_IPV4_TOS)
115 #define ICE_SW_INSET_MAC_PPPOE  ( \
116         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
117         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
118 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
119         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
120         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
121         ICE_INSET_PPPOE_PROTO)
122 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
123         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
124 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
125         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
126 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
127         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
128 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
129         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
130 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
131         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
132 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
133         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
134 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
135         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
136 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
137         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
138 #define ICE_SW_INSET_MAC_IPV4_AH ( \
139         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
140 #define ICE_SW_INSET_MAC_IPV6_AH ( \
141         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
142 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
143         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
144 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
145         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
146 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
147         ICE_SW_INSET_MAC_IPV4 | \
148         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
149 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
150         ICE_SW_INSET_MAC_IPV6 | \
151         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
152 #define ICE_SW_INSET_MAC_IPV4_GTPU ( \
153         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID)
154 #define ICE_SW_INSET_MAC_IPV6_GTPU ( \
155         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID)
156 #define ICE_SW_INSET_MAC_GTPU_OUTER ( \
157         ICE_INSET_DMAC | ICE_INSET_GTPU_TEID)
158 #define ICE_SW_INSET_MAC_GTPU_EH_OUTER ( \
159         ICE_SW_INSET_MAC_GTPU_OUTER | ICE_INSET_GTPU_QFI)
160 #define ICE_SW_INSET_GTPU_IPV4 ( \
161         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
162 #define ICE_SW_INSET_GTPU_IPV6 ( \
163         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST)
164 #define ICE_SW_INSET_GTPU_IPV4_UDP ( \
165         ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_UDP_SRC_PORT | \
166         ICE_INSET_UDP_DST_PORT)
167 #define ICE_SW_INSET_GTPU_IPV4_TCP ( \
168         ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_TCP_SRC_PORT | \
169         ICE_INSET_TCP_DST_PORT)
170 #define ICE_SW_INSET_GTPU_IPV6_UDP ( \
171         ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_UDP_SRC_PORT | \
172         ICE_INSET_UDP_DST_PORT)
173 #define ICE_SW_INSET_GTPU_IPV6_TCP ( \
174         ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_TCP_SRC_PORT | \
175         ICE_INSET_TCP_DST_PORT)
176
177 struct sw_meta {
178         struct ice_adv_lkup_elem *list;
179         uint16_t lkups_num;
180         struct ice_adv_rule_info rule_info;
181 };
182
183 enum ice_sw_fltr_status {
184         ICE_SW_FLTR_ADDED,
185         ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT,
186         ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT,
187 };
188
189 struct ice_switch_filter_conf {
190         enum ice_sw_fltr_status fltr_status;
191
192         struct ice_rule_query_data sw_query_data;
193
194         /*
195          * The lookup elements and rule info are saved here when filter creation
196          * succeeds.
197          */
198         uint16_t vsi_num;
199         uint16_t lkups_num;
200         struct ice_adv_lkup_elem *lkups;
201         struct ice_adv_rule_info rule_info;
202 };
203
204 static struct ice_flow_parser ice_switch_dist_parser;
205 static struct ice_flow_parser ice_switch_perm_parser;
206
207 static struct
208 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
209         {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE,                         ICE_INSET_NONE},
210         {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
211         {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
212         {pattern_eth_arp,                               ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
213         {pattern_eth_ipv4,                              ICE_SW_INSET_MAC_IPV4,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
214         {pattern_eth_ipv4_udp,                          ICE_SW_INSET_MAC_IPV4_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
215         {pattern_eth_ipv4_tcp,                          ICE_SW_INSET_MAC_IPV4_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
216         {pattern_eth_ipv6,                              ICE_SW_INSET_MAC_IPV6,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
217         {pattern_eth_ipv6_udp,                          ICE_SW_INSET_MAC_IPV6_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
218         {pattern_eth_ipv6_tcp,                          ICE_SW_INSET_MAC_IPV6_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
219         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_VXLAN_IPV4,           ICE_INSET_NONE},
220         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_VXLAN_IPV4_UDP,       ICE_INSET_NONE},
221         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_VXLAN_IPV4_TCP,       ICE_INSET_NONE},
222         {pattern_eth_ipv4_nvgre_eth_ipv4,               ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_NVGRE_IPV4,           ICE_INSET_NONE},
223         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,           ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_NVGRE_IPV4_UDP,       ICE_INSET_NONE},
224         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,           ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_NVGRE_IPV4_TCP,       ICE_INSET_NONE},
225         {pattern_eth_pppoes,                            ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
226         {pattern_eth_vlan_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
227         {pattern_eth_pppoes_proto,                      ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
228         {pattern_eth_vlan_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
229         {pattern_eth_pppoes_ipv4,                       ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
230         {pattern_eth_pppoes_ipv4_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
231         {pattern_eth_pppoes_ipv4_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
232         {pattern_eth_pppoes_ipv6,                       ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
233         {pattern_eth_pppoes_ipv6_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
234         {pattern_eth_pppoes_ipv6_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
235         {pattern_eth_vlan_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
236         {pattern_eth_vlan_pppoes_ipv4_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
237         {pattern_eth_vlan_pppoes_ipv4_udp,              ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
238         {pattern_eth_vlan_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
239         {pattern_eth_vlan_pppoes_ipv6_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
240         {pattern_eth_vlan_pppoes_ipv6_udp,              ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
241         {pattern_eth_ipv4_esp,                          ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
242         {pattern_eth_ipv4_udp_esp,                      ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
243         {pattern_eth_ipv6_esp,                          ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
244         {pattern_eth_ipv6_udp_esp,                      ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
245         {pattern_eth_ipv4_ah,                           ICE_SW_INSET_MAC_IPV4_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
246         {pattern_eth_ipv6_ah,                           ICE_SW_INSET_MAC_IPV6_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
247         {pattern_eth_ipv6_udp_ah,                       ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
248         {pattern_eth_ipv4_l2tp,                         ICE_SW_INSET_MAC_IPV4_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
249         {pattern_eth_ipv6_l2tp,                         ICE_SW_INSET_MAC_IPV6_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
250         {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
251         {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
252         {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE,                         ICE_INSET_NONE},
253         {pattern_eth_qinq_ipv4_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV4_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
254         {pattern_eth_qinq_ipv4_udp,                     ICE_SW_INSET_MAC_QINQ_IPV4_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
255         {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE,                         ICE_INSET_NONE},
256         {pattern_eth_qinq_ipv6_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV6_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
257         {pattern_eth_qinq_ipv6_udp,                     ICE_SW_INSET_MAC_QINQ_IPV6_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
258         {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
259         {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
260         {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
261         {pattern_eth_qinq_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
262         {pattern_eth_ipv4_gtpu,                         ICE_SW_INSET_MAC_IPV4_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
263         {pattern_eth_ipv6_gtpu,                         ICE_SW_INSET_MAC_IPV6_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
264         {pattern_eth_ipv4_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
265         {pattern_eth_ipv4_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
266         {pattern_eth_ipv4_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
267         {pattern_eth_ipv4_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
268         {pattern_eth_ipv4_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
269         {pattern_eth_ipv4_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
270         {pattern_eth_ipv4_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
271         {pattern_eth_ipv4_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
272         {pattern_eth_ipv4_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
273         {pattern_eth_ipv4_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
274         {pattern_eth_ipv4_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
275         {pattern_eth_ipv4_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
276         {pattern_eth_ipv6_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
277         {pattern_eth_ipv6_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
278         {pattern_eth_ipv6_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
279         {pattern_eth_ipv6_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
280         {pattern_eth_ipv6_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
281         {pattern_eth_ipv6_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
282         {pattern_eth_ipv6_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
283         {pattern_eth_ipv6_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
284         {pattern_eth_ipv6_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
285         {pattern_eth_ipv6_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
286         {pattern_eth_ipv6_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
287         {pattern_eth_ipv6_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
288 };
289
290 static struct
291 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
292         {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE,                         ICE_INSET_NONE},
293         {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
294         {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
295         {pattern_eth_arp,                               ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
296         {pattern_eth_ipv4,                              ICE_SW_INSET_MAC_IPV4,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
297         {pattern_eth_ipv4_udp,                          ICE_SW_INSET_MAC_IPV4_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
298         {pattern_eth_ipv4_tcp,                          ICE_SW_INSET_MAC_IPV4_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
299         {pattern_eth_ipv6,                              ICE_SW_INSET_MAC_IPV6,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
300         {pattern_eth_ipv6_udp,                          ICE_SW_INSET_MAC_IPV6_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
301         {pattern_eth_ipv6_tcp,                          ICE_SW_INSET_MAC_IPV6_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
302         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4,          ICE_INSET_NONE},
303         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP,      ICE_INSET_NONE},
304         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP,      ICE_INSET_NONE},
305         {pattern_eth_ipv4_nvgre_eth_ipv4,               ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4,          ICE_INSET_NONE},
306         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,           ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP,      ICE_INSET_NONE},
307         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,           ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP,      ICE_INSET_NONE},
308         {pattern_eth_pppoes,                            ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
309         {pattern_eth_vlan_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
310         {pattern_eth_pppoes_proto,                      ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
311         {pattern_eth_vlan_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
312         {pattern_eth_pppoes_ipv4,                       ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
313         {pattern_eth_pppoes_ipv4_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
314         {pattern_eth_pppoes_ipv4_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
315         {pattern_eth_pppoes_ipv6,                       ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
316         {pattern_eth_pppoes_ipv6_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
317         {pattern_eth_pppoes_ipv6_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
318         {pattern_eth_vlan_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
319         {pattern_eth_vlan_pppoes_ipv4_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
320         {pattern_eth_vlan_pppoes_ipv4_udp,              ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
321         {pattern_eth_vlan_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
322         {pattern_eth_vlan_pppoes_ipv6_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
323         {pattern_eth_vlan_pppoes_ipv6_udp,              ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
324         {pattern_eth_ipv4_esp,                          ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
325         {pattern_eth_ipv4_udp_esp,                      ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
326         {pattern_eth_ipv6_esp,                          ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
327         {pattern_eth_ipv6_udp_esp,                      ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
328         {pattern_eth_ipv4_ah,                           ICE_SW_INSET_MAC_IPV4_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
329         {pattern_eth_ipv6_ah,                           ICE_SW_INSET_MAC_IPV6_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
330         {pattern_eth_ipv6_udp_ah,                       ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
331         {pattern_eth_ipv4_l2tp,                         ICE_SW_INSET_MAC_IPV4_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
332         {pattern_eth_ipv6_l2tp,                         ICE_SW_INSET_MAC_IPV6_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
333         {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
334         {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
335         {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE,                         ICE_INSET_NONE},
336         {pattern_eth_qinq_ipv4_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV4_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
337         {pattern_eth_qinq_ipv4_udp,                     ICE_SW_INSET_MAC_QINQ_IPV4_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
338         {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE,                         ICE_INSET_NONE},
339         {pattern_eth_qinq_ipv6_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV6_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
340         {pattern_eth_qinq_ipv6_udp,                     ICE_SW_INSET_MAC_QINQ_IPV6_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
341         {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
342         {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
343         {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
344         {pattern_eth_qinq_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
345         {pattern_eth_ipv4_gtpu,                         ICE_SW_INSET_MAC_IPV4_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
346         {pattern_eth_ipv6_gtpu,                         ICE_SW_INSET_MAC_IPV6_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
347         {pattern_eth_ipv4_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
348         {pattern_eth_ipv4_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
349         {pattern_eth_ipv4_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
350         {pattern_eth_ipv4_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
351         {pattern_eth_ipv4_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
352         {pattern_eth_ipv4_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
353         {pattern_eth_ipv4_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
354         {pattern_eth_ipv4_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
355         {pattern_eth_ipv4_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
356         {pattern_eth_ipv4_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
357         {pattern_eth_ipv4_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
358         {pattern_eth_ipv4_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
359         {pattern_eth_ipv6_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
360         {pattern_eth_ipv6_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
361         {pattern_eth_ipv6_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
362         {pattern_eth_ipv6_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
363         {pattern_eth_ipv6_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
364         {pattern_eth_ipv6_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
365         {pattern_eth_ipv6_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
366         {pattern_eth_ipv6_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
367         {pattern_eth_ipv6_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
368         {pattern_eth_ipv6_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
369         {pattern_eth_ipv6_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
370         {pattern_eth_ipv6_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
371 };
372
373 static int
374 ice_switch_create(struct ice_adapter *ad,
375                 struct rte_flow *flow,
376                 void *meta,
377                 struct rte_flow_error *error)
378 {
379         int ret = 0;
380         struct ice_pf *pf = &ad->pf;
381         struct ice_hw *hw = ICE_PF_TO_HW(pf);
382         struct ice_rule_query_data rule_added = {0};
383         struct ice_switch_filter_conf *filter_conf_ptr;
384         struct ice_adv_lkup_elem *list =
385                 ((struct sw_meta *)meta)->list;
386         uint16_t lkups_cnt =
387                 ((struct sw_meta *)meta)->lkups_num;
388         struct ice_adv_rule_info *rule_info =
389                 &((struct sw_meta *)meta)->rule_info;
390
391         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
392                 rte_flow_error_set(error, EINVAL,
393                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
394                         "item number too large for rule");
395                 goto error;
396         }
397         if (!list) {
398                 rte_flow_error_set(error, EINVAL,
399                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
400                         "lookup list should not be NULL");
401                 goto error;
402         }
403
404         if (ice_dcf_adminq_need_retry(ad)) {
405                 rte_flow_error_set(error, EAGAIN,
406                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
407                         "DCF is not on");
408                 goto error;
409         }
410
411         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
412         if (!ret) {
413                 filter_conf_ptr = rte_zmalloc("ice_switch_filter",
414                         sizeof(struct ice_switch_filter_conf), 0);
415                 if (!filter_conf_ptr) {
416                         rte_flow_error_set(error, EINVAL,
417                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
418                                    "No memory for ice_switch_filter");
419                         goto error;
420                 }
421
422                 filter_conf_ptr->sw_query_data = rule_added;
423
424                 filter_conf_ptr->vsi_num =
425                         ice_get_hw_vsi_num(hw, rule_info->sw_act.vsi_handle);
426                 filter_conf_ptr->lkups = list;
427                 filter_conf_ptr->lkups_num = lkups_cnt;
428                 filter_conf_ptr->rule_info = *rule_info;
429
430                 filter_conf_ptr->fltr_status = ICE_SW_FLTR_ADDED;
431
432                 flow->rule = filter_conf_ptr;
433         } else {
434                 if (ice_dcf_adminq_need_retry(ad))
435                         ret = -EAGAIN;
436                 else
437                         ret = -EINVAL;
438
439                 rte_flow_error_set(error, -ret,
440                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
441                         "switch filter create flow fail");
442                 goto error;
443         }
444
445         rte_free(meta);
446         return 0;
447
448 error:
449         rte_free(list);
450         rte_free(meta);
451
452         return -rte_errno;
453 }
454
455 static inline void
456 ice_switch_filter_rule_free(struct rte_flow *flow)
457 {
458         struct ice_switch_filter_conf *filter_conf_ptr =
459                 (struct ice_switch_filter_conf *)flow->rule;
460
461         if (filter_conf_ptr)
462                 rte_free(filter_conf_ptr->lkups);
463
464         rte_free(filter_conf_ptr);
465 }
466
467 static int
468 ice_switch_destroy(struct ice_adapter *ad,
469                 struct rte_flow *flow,
470                 struct rte_flow_error *error)
471 {
472         struct ice_hw *hw = &ad->hw;
473         int ret;
474         struct ice_switch_filter_conf *filter_conf_ptr;
475
476         filter_conf_ptr = (struct ice_switch_filter_conf *)
477                 flow->rule;
478
479         if (!filter_conf_ptr ||
480             filter_conf_ptr->fltr_status == ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT) {
481                 rte_flow_error_set(error, EINVAL,
482                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
483                         "no such flow"
484                         " create by switch filter");
485
486                 ice_switch_filter_rule_free(flow);
487
488                 return -rte_errno;
489         }
490
491         if (ice_dcf_adminq_need_retry(ad)) {
492                 rte_flow_error_set(error, EAGAIN,
493                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
494                         "DCF is not on");
495                 return -rte_errno;
496         }
497
498         ret = ice_rem_adv_rule_by_id(hw, &filter_conf_ptr->sw_query_data);
499         if (ret) {
500                 if (ice_dcf_adminq_need_retry(ad))
501                         ret = -EAGAIN;
502                 else
503                         ret = -EINVAL;
504
505                 rte_flow_error_set(error, -ret,
506                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
507                         "fail to destroy switch filter rule");
508                 return -rte_errno;
509         }
510
511         ice_switch_filter_rule_free(flow);
512         return ret;
513 }
514
515 static bool
516 ice_switch_parse_pattern(const struct rte_flow_item pattern[],
517                 struct rte_flow_error *error,
518                 struct ice_adv_lkup_elem *list,
519                 uint16_t *lkups_num,
520                 enum ice_sw_tunnel_type *tun_type,
521                 const struct ice_pattern_match_item *pattern_match_item)
522 {
523         const struct rte_flow_item *item = pattern;
524         enum rte_flow_item_type item_type;
525         const struct rte_flow_item_eth *eth_spec, *eth_mask;
526         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
527         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
528         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
529         const struct rte_flow_item_udp *udp_spec, *udp_mask;
530         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
531         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
532         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
533         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
534         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
535         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
536                                 *pppoe_proto_mask;
537         const struct rte_flow_item_esp *esp_spec, *esp_mask;
538         const struct rte_flow_item_ah *ah_spec, *ah_mask;
539         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
540         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
541         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
542         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
543         uint64_t outer_input_set = ICE_INSET_NONE;
544         uint64_t inner_input_set = ICE_INSET_NONE;
545         uint64_t *input = NULL;
546         uint16_t input_set_byte = 0;
547         bool pppoe_elem_valid = 0;
548         bool pppoe_patt_valid = 0;
549         bool pppoe_prot_valid = 0;
550         bool inner_vlan_valid = 0;
551         bool outer_vlan_valid = 0;
552         bool tunnel_valid = 0;
553         bool profile_rule = 0;
554         bool nvgre_valid = 0;
555         bool vxlan_valid = 0;
556         bool qinq_valid = 0;
557         bool ipv6_valid = 0;
558         bool ipv4_valid = 0;
559         bool udp_valid = 0;
560         bool tcp_valid = 0;
561         bool gtpu_valid = 0;
562         bool gtpu_psc_valid = 0;
563         bool inner_ipv4_valid = 0;
564         bool inner_ipv6_valid = 0;
565         bool inner_tcp_valid = 0;
566         bool inner_udp_valid = 0;
567         uint16_t j, k, t = 0;
568
569         if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
570             *tun_type == ICE_NON_TUN_QINQ)
571                 qinq_valid = 1;
572
573         for (item = pattern; item->type !=
574                         RTE_FLOW_ITEM_TYPE_END; item++) {
575                 if (item->last) {
576                         rte_flow_error_set(error, EINVAL,
577                                         RTE_FLOW_ERROR_TYPE_ITEM,
578                                         item,
579                                         "Not support range");
580                         return false;
581                 }
582                 item_type = item->type;
583
584                 switch (item_type) {
585                 case RTE_FLOW_ITEM_TYPE_ETH:
586                         eth_spec = item->spec;
587                         eth_mask = item->mask;
588                         if (eth_spec && eth_mask) {
589                                 const uint8_t *a = eth_mask->src.addr_bytes;
590                                 const uint8_t *b = eth_mask->dst.addr_bytes;
591                                 if (tunnel_valid)
592                                         input = &inner_input_set;
593                                 else
594                                         input = &outer_input_set;
595                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
596                                         if (a[j]) {
597                                                 *input |= ICE_INSET_SMAC;
598                                                 break;
599                                         }
600                                 }
601                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
602                                         if (b[j]) {
603                                                 *input |= ICE_INSET_DMAC;
604                                                 break;
605                                         }
606                                 }
607                                 if (eth_mask->type)
608                                         *input |= ICE_INSET_ETHERTYPE;
609                                 list[t].type = (tunnel_valid  == 0) ?
610                                         ICE_MAC_OFOS : ICE_MAC_IL;
611                                 struct ice_ether_hdr *h;
612                                 struct ice_ether_hdr *m;
613                                 uint16_t i = 0;
614                                 h = &list[t].h_u.eth_hdr;
615                                 m = &list[t].m_u.eth_hdr;
616                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
617                                         if (eth_mask->src.addr_bytes[j]) {
618                                                 h->src_addr[j] =
619                                                 eth_spec->src.addr_bytes[j];
620                                                 m->src_addr[j] =
621                                                 eth_mask->src.addr_bytes[j];
622                                                 i = 1;
623                                                 input_set_byte++;
624                                         }
625                                         if (eth_mask->dst.addr_bytes[j]) {
626                                                 h->dst_addr[j] =
627                                                 eth_spec->dst.addr_bytes[j];
628                                                 m->dst_addr[j] =
629                                                 eth_mask->dst.addr_bytes[j];
630                                                 i = 1;
631                                                 input_set_byte++;
632                                         }
633                                 }
634                                 if (i)
635                                         t++;
636                                 if (eth_mask->type) {
637                                         list[t].type = ICE_ETYPE_OL;
638                                         list[t].h_u.ethertype.ethtype_id =
639                                                 eth_spec->type;
640                                         list[t].m_u.ethertype.ethtype_id =
641                                                 eth_mask->type;
642                                         input_set_byte += 2;
643                                         t++;
644                                 }
645                         }
646                         break;
647
648                 case RTE_FLOW_ITEM_TYPE_IPV4:
649                         ipv4_spec = item->spec;
650                         ipv4_mask = item->mask;
651                         if (tunnel_valid) {
652                                 inner_ipv4_valid = 1;
653                                 input = &inner_input_set;
654                         } else {
655                                 ipv4_valid = 1;
656                                 input = &outer_input_set;
657                         }
658
659                         if (ipv4_spec && ipv4_mask) {
660                                 /* Check IPv4 mask and update input set */
661                                 if (ipv4_mask->hdr.version_ihl ||
662                                         ipv4_mask->hdr.total_length ||
663                                         ipv4_mask->hdr.packet_id ||
664                                         ipv4_mask->hdr.hdr_checksum) {
665                                         rte_flow_error_set(error, EINVAL,
666                                                    RTE_FLOW_ERROR_TYPE_ITEM,
667                                                    item,
668                                                    "Invalid IPv4 mask.");
669                                         return false;
670                                 }
671
672                                 if (ipv4_mask->hdr.src_addr)
673                                         *input |= ICE_INSET_IPV4_SRC;
674                                 if (ipv4_mask->hdr.dst_addr)
675                                         *input |= ICE_INSET_IPV4_DST;
676                                 if (ipv4_mask->hdr.time_to_live)
677                                         *input |= ICE_INSET_IPV4_TTL;
678                                 if (ipv4_mask->hdr.next_proto_id)
679                                         *input |= ICE_INSET_IPV4_PROTO;
680                                 if (ipv4_mask->hdr.type_of_service)
681                                         *input |= ICE_INSET_IPV4_TOS;
682
683                                 list[t].type = (tunnel_valid  == 0) ?
684                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
685                                 if (ipv4_mask->hdr.src_addr) {
686                                         list[t].h_u.ipv4_hdr.src_addr =
687                                                 ipv4_spec->hdr.src_addr;
688                                         list[t].m_u.ipv4_hdr.src_addr =
689                                                 ipv4_mask->hdr.src_addr;
690                                         input_set_byte += 2;
691                                 }
692                                 if (ipv4_mask->hdr.dst_addr) {
693                                         list[t].h_u.ipv4_hdr.dst_addr =
694                                                 ipv4_spec->hdr.dst_addr;
695                                         list[t].m_u.ipv4_hdr.dst_addr =
696                                                 ipv4_mask->hdr.dst_addr;
697                                         input_set_byte += 2;
698                                 }
699                                 if (ipv4_mask->hdr.time_to_live) {
700                                         list[t].h_u.ipv4_hdr.time_to_live =
701                                                 ipv4_spec->hdr.time_to_live;
702                                         list[t].m_u.ipv4_hdr.time_to_live =
703                                                 ipv4_mask->hdr.time_to_live;
704                                         input_set_byte++;
705                                 }
706                                 if (ipv4_mask->hdr.next_proto_id) {
707                                         list[t].h_u.ipv4_hdr.protocol =
708                                                 ipv4_spec->hdr.next_proto_id;
709                                         list[t].m_u.ipv4_hdr.protocol =
710                                                 ipv4_mask->hdr.next_proto_id;
711                                         input_set_byte++;
712                                 }
713                                 if ((ipv4_spec->hdr.next_proto_id &
714                                         ipv4_mask->hdr.next_proto_id) ==
715                                         ICE_IPV4_PROTO_NVGRE)
716                                         *tun_type = ICE_SW_TUN_AND_NON_TUN;
717                                 if (ipv4_mask->hdr.type_of_service) {
718                                         list[t].h_u.ipv4_hdr.tos =
719                                                 ipv4_spec->hdr.type_of_service;
720                                         list[t].m_u.ipv4_hdr.tos =
721                                                 ipv4_mask->hdr.type_of_service;
722                                         input_set_byte++;
723                                 }
724                                 t++;
725                         }
726                         break;
727
728                 case RTE_FLOW_ITEM_TYPE_IPV6:
729                         ipv6_spec = item->spec;
730                         ipv6_mask = item->mask;
731                         if (tunnel_valid) {
732                                 inner_ipv6_valid = 1;
733                                 input = &inner_input_set;
734                         } else {
735                                 ipv6_valid = 1;
736                                 input = &outer_input_set;
737                         }
738
739                         if (ipv6_spec && ipv6_mask) {
740                                 if (ipv6_mask->hdr.payload_len) {
741                                         rte_flow_error_set(error, EINVAL,
742                                            RTE_FLOW_ERROR_TYPE_ITEM,
743                                            item,
744                                            "Invalid IPv6 mask");
745                                         return false;
746                                 }
747
748                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
749                                         if (ipv6_mask->hdr.src_addr[j]) {
750                                                 *input |= ICE_INSET_IPV6_SRC;
751                                                 break;
752                                         }
753                                 }
754                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
755                                         if (ipv6_mask->hdr.dst_addr[j]) {
756                                                 *input |= ICE_INSET_IPV6_DST;
757                                                 break;
758                                         }
759                                 }
760                                 if (ipv6_mask->hdr.proto)
761                                         *input |= ICE_INSET_IPV6_NEXT_HDR;
762                                 if (ipv6_mask->hdr.hop_limits)
763                                         *input |= ICE_INSET_IPV6_HOP_LIMIT;
764                                 if (ipv6_mask->hdr.vtc_flow &
765                                     rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
766                                         *input |= ICE_INSET_IPV6_TC;
767
768                                 list[t].type = (tunnel_valid  == 0) ?
769                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
770                                 struct ice_ipv6_hdr *f;
771                                 struct ice_ipv6_hdr *s;
772                                 f = &list[t].h_u.ipv6_hdr;
773                                 s = &list[t].m_u.ipv6_hdr;
774                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
775                                         if (ipv6_mask->hdr.src_addr[j]) {
776                                                 f->src_addr[j] =
777                                                 ipv6_spec->hdr.src_addr[j];
778                                                 s->src_addr[j] =
779                                                 ipv6_mask->hdr.src_addr[j];
780                                                 input_set_byte++;
781                                         }
782                                         if (ipv6_mask->hdr.dst_addr[j]) {
783                                                 f->dst_addr[j] =
784                                                 ipv6_spec->hdr.dst_addr[j];
785                                                 s->dst_addr[j] =
786                                                 ipv6_mask->hdr.dst_addr[j];
787                                                 input_set_byte++;
788                                         }
789                                 }
790                                 if (ipv6_mask->hdr.proto) {
791                                         f->next_hdr =
792                                                 ipv6_spec->hdr.proto;
793                                         s->next_hdr =
794                                                 ipv6_mask->hdr.proto;
795                                         input_set_byte++;
796                                 }
797                                 if (ipv6_mask->hdr.hop_limits) {
798                                         f->hop_limit =
799                                                 ipv6_spec->hdr.hop_limits;
800                                         s->hop_limit =
801                                                 ipv6_mask->hdr.hop_limits;
802                                         input_set_byte++;
803                                 }
804                                 if (ipv6_mask->hdr.vtc_flow &
805                                                 rte_cpu_to_be_32
806                                                 (RTE_IPV6_HDR_TC_MASK)) {
807                                         struct ice_le_ver_tc_flow vtf;
808                                         vtf.u.fld.version = 0;
809                                         vtf.u.fld.flow_label = 0;
810                                         vtf.u.fld.tc = (rte_be_to_cpu_32
811                                                 (ipv6_spec->hdr.vtc_flow) &
812                                                         RTE_IPV6_HDR_TC_MASK) >>
813                                                         RTE_IPV6_HDR_TC_SHIFT;
814                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
815                                         vtf.u.fld.tc = (rte_be_to_cpu_32
816                                                 (ipv6_mask->hdr.vtc_flow) &
817                                                         RTE_IPV6_HDR_TC_MASK) >>
818                                                         RTE_IPV6_HDR_TC_SHIFT;
819                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
820                                         input_set_byte += 4;
821                                 }
822                                 t++;
823                         }
824                         break;
825
826                 case RTE_FLOW_ITEM_TYPE_UDP:
827                         udp_spec = item->spec;
828                         udp_mask = item->mask;
829                         if (tunnel_valid) {
830                                 inner_udp_valid = 1;
831                                 input = &inner_input_set;
832                         } else {
833                                 udp_valid = 1;
834                                 input = &outer_input_set;
835                         }
836
837                         if (udp_spec && udp_mask) {
838                                 /* Check UDP mask and update input set*/
839                                 if (udp_mask->hdr.dgram_len ||
840                                     udp_mask->hdr.dgram_cksum) {
841                                         rte_flow_error_set(error, EINVAL,
842                                                    RTE_FLOW_ERROR_TYPE_ITEM,
843                                                    item,
844                                                    "Invalid UDP mask");
845                                         return false;
846                                 }
847
848                                 if (udp_mask->hdr.src_port)
849                                         *input |= ICE_INSET_UDP_SRC_PORT;
850                                 if (udp_mask->hdr.dst_port)
851                                         *input |= ICE_INSET_UDP_DST_PORT;
852
853                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
854                                                 tunnel_valid == 0)
855                                         list[t].type = ICE_UDP_OF;
856                                 else
857                                         list[t].type = ICE_UDP_ILOS;
858                                 if (udp_mask->hdr.src_port) {
859                                         list[t].h_u.l4_hdr.src_port =
860                                                 udp_spec->hdr.src_port;
861                                         list[t].m_u.l4_hdr.src_port =
862                                                 udp_mask->hdr.src_port;
863                                         input_set_byte += 2;
864                                 }
865                                 if (udp_mask->hdr.dst_port) {
866                                         list[t].h_u.l4_hdr.dst_port =
867                                                 udp_spec->hdr.dst_port;
868                                         list[t].m_u.l4_hdr.dst_port =
869                                                 udp_mask->hdr.dst_port;
870                                         input_set_byte += 2;
871                                 }
872                                 t++;
873                         }
874                         break;
875
876                 case RTE_FLOW_ITEM_TYPE_TCP:
877                         tcp_spec = item->spec;
878                         tcp_mask = item->mask;
879                         if (tunnel_valid) {
880                                 inner_tcp_valid = 1;
881                                 input = &inner_input_set;
882                         } else {
883                                 tcp_valid = 1;
884                                 input = &outer_input_set;
885                         }
886
887                         if (tcp_spec && tcp_mask) {
888                                 /* Check TCP mask and update input set */
889                                 if (tcp_mask->hdr.sent_seq ||
890                                         tcp_mask->hdr.recv_ack ||
891                                         tcp_mask->hdr.data_off ||
892                                         tcp_mask->hdr.tcp_flags ||
893                                         tcp_mask->hdr.rx_win ||
894                                         tcp_mask->hdr.cksum ||
895                                         tcp_mask->hdr.tcp_urp) {
896                                         rte_flow_error_set(error, EINVAL,
897                                            RTE_FLOW_ERROR_TYPE_ITEM,
898                                            item,
899                                            "Invalid TCP mask");
900                                         return false;
901                                 }
902
903                                 if (tcp_mask->hdr.src_port)
904                                         *input |= ICE_INSET_TCP_SRC_PORT;
905                                 if (tcp_mask->hdr.dst_port)
906                                         *input |= ICE_INSET_TCP_DST_PORT;
907                                 list[t].type = ICE_TCP_IL;
908                                 if (tcp_mask->hdr.src_port) {
909                                         list[t].h_u.l4_hdr.src_port =
910                                                 tcp_spec->hdr.src_port;
911                                         list[t].m_u.l4_hdr.src_port =
912                                                 tcp_mask->hdr.src_port;
913                                         input_set_byte += 2;
914                                 }
915                                 if (tcp_mask->hdr.dst_port) {
916                                         list[t].h_u.l4_hdr.dst_port =
917                                                 tcp_spec->hdr.dst_port;
918                                         list[t].m_u.l4_hdr.dst_port =
919                                                 tcp_mask->hdr.dst_port;
920                                         input_set_byte += 2;
921                                 }
922                                 t++;
923                         }
924                         break;
925
926                 case RTE_FLOW_ITEM_TYPE_SCTP:
927                         sctp_spec = item->spec;
928                         sctp_mask = item->mask;
929                         if (sctp_spec && sctp_mask) {
930                                 /* Check SCTP mask and update input set */
931                                 if (sctp_mask->hdr.cksum) {
932                                         rte_flow_error_set(error, EINVAL,
933                                            RTE_FLOW_ERROR_TYPE_ITEM,
934                                            item,
935                                            "Invalid SCTP mask");
936                                         return false;
937                                 }
938                                 if (tunnel_valid)
939                                         input = &inner_input_set;
940                                 else
941                                         input = &outer_input_set;
942
943                                 if (sctp_mask->hdr.src_port)
944                                         *input |= ICE_INSET_SCTP_SRC_PORT;
945                                 if (sctp_mask->hdr.dst_port)
946                                         *input |= ICE_INSET_SCTP_DST_PORT;
947
948                                 list[t].type = ICE_SCTP_IL;
949                                 if (sctp_mask->hdr.src_port) {
950                                         list[t].h_u.sctp_hdr.src_port =
951                                                 sctp_spec->hdr.src_port;
952                                         list[t].m_u.sctp_hdr.src_port =
953                                                 sctp_mask->hdr.src_port;
954                                         input_set_byte += 2;
955                                 }
956                                 if (sctp_mask->hdr.dst_port) {
957                                         list[t].h_u.sctp_hdr.dst_port =
958                                                 sctp_spec->hdr.dst_port;
959                                         list[t].m_u.sctp_hdr.dst_port =
960                                                 sctp_mask->hdr.dst_port;
961                                         input_set_byte += 2;
962                                 }
963                                 t++;
964                         }
965                         break;
966
967                 case RTE_FLOW_ITEM_TYPE_VXLAN:
968                         vxlan_spec = item->spec;
969                         vxlan_mask = item->mask;
970                         /* Check if VXLAN item is used to describe protocol.
971                          * If yes, both spec and mask should be NULL.
972                          * If no, both spec and mask shouldn't be NULL.
973                          */
974                         if ((!vxlan_spec && vxlan_mask) ||
975                             (vxlan_spec && !vxlan_mask)) {
976                                 rte_flow_error_set(error, EINVAL,
977                                            RTE_FLOW_ERROR_TYPE_ITEM,
978                                            item,
979                                            "Invalid VXLAN item");
980                                 return false;
981                         }
982                         vxlan_valid = 1;
983                         tunnel_valid = 1;
984                         input = &inner_input_set;
985                         if (vxlan_spec && vxlan_mask) {
986                                 list[t].type = ICE_VXLAN;
987                                 if (vxlan_mask->vni[0] ||
988                                         vxlan_mask->vni[1] ||
989                                         vxlan_mask->vni[2]) {
990                                         list[t].h_u.tnl_hdr.vni =
991                                                 (vxlan_spec->vni[2] << 16) |
992                                                 (vxlan_spec->vni[1] << 8) |
993                                                 vxlan_spec->vni[0];
994                                         list[t].m_u.tnl_hdr.vni =
995                                                 (vxlan_mask->vni[2] << 16) |
996                                                 (vxlan_mask->vni[1] << 8) |
997                                                 vxlan_mask->vni[0];
998                                         *input |= ICE_INSET_VXLAN_VNI;
999                                         input_set_byte += 2;
1000                                 }
1001                                 t++;
1002                         }
1003                         break;
1004
1005                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1006                         nvgre_spec = item->spec;
1007                         nvgre_mask = item->mask;
1008                         /* Check if NVGRE item is used to describe protocol.
1009                          * If yes, both spec and mask should be NULL.
1010                          * If no, both spec and mask shouldn't be NULL.
1011                          */
1012                         if ((!nvgre_spec && nvgre_mask) ||
1013                             (nvgre_spec && !nvgre_mask)) {
1014                                 rte_flow_error_set(error, EINVAL,
1015                                            RTE_FLOW_ERROR_TYPE_ITEM,
1016                                            item,
1017                                            "Invalid NVGRE item");
1018                                 return false;
1019                         }
1020                         nvgre_valid = 1;
1021                         tunnel_valid = 1;
1022                         input = &inner_input_set;
1023                         if (nvgre_spec && nvgre_mask) {
1024                                 list[t].type = ICE_NVGRE;
1025                                 if (nvgre_mask->tni[0] ||
1026                                         nvgre_mask->tni[1] ||
1027                                         nvgre_mask->tni[2]) {
1028                                         list[t].h_u.nvgre_hdr.tni_flow =
1029                                                 (nvgre_spec->tni[2] << 16) |
1030                                                 (nvgre_spec->tni[1] << 8) |
1031                                                 nvgre_spec->tni[0];
1032                                         list[t].m_u.nvgre_hdr.tni_flow =
1033                                                 (nvgre_mask->tni[2] << 16) |
1034                                                 (nvgre_mask->tni[1] << 8) |
1035                                                 nvgre_mask->tni[0];
1036                                         *input |= ICE_INSET_NVGRE_TNI;
1037                                         input_set_byte += 2;
1038                                 }
1039                                 t++;
1040                         }
1041                         break;
1042
1043                 case RTE_FLOW_ITEM_TYPE_VLAN:
1044                         vlan_spec = item->spec;
1045                         vlan_mask = item->mask;
1046                         /* Check if VLAN item is used to describe protocol.
1047                          * If yes, both spec and mask should be NULL.
1048                          * If no, both spec and mask shouldn't be NULL.
1049                          */
1050                         if ((!vlan_spec && vlan_mask) ||
1051                             (vlan_spec && !vlan_mask)) {
1052                                 rte_flow_error_set(error, EINVAL,
1053                                            RTE_FLOW_ERROR_TYPE_ITEM,
1054                                            item,
1055                                            "Invalid VLAN item");
1056                                 return false;
1057                         }
1058
1059                         if (qinq_valid) {
1060                                 if (!outer_vlan_valid)
1061                                         outer_vlan_valid = 1;
1062                                 else
1063                                         inner_vlan_valid = 1;
1064                         }
1065
1066                         input = &outer_input_set;
1067
1068                         if (vlan_spec && vlan_mask) {
1069                                 if (qinq_valid) {
1070                                         if (!inner_vlan_valid) {
1071                                                 list[t].type = ICE_VLAN_EX;
1072                                                 *input |=
1073                                                         ICE_INSET_VLAN_OUTER;
1074                                         } else {
1075                                                 list[t].type = ICE_VLAN_IN;
1076                                                 *input |=
1077                                                         ICE_INSET_VLAN_INNER;
1078                                         }
1079                                 } else {
1080                                         list[t].type = ICE_VLAN_OFOS;
1081                                         *input |= ICE_INSET_VLAN_INNER;
1082                                 }
1083
1084                                 if (vlan_mask->tci) {
1085                                         list[t].h_u.vlan_hdr.vlan =
1086                                                 vlan_spec->tci;
1087                                         list[t].m_u.vlan_hdr.vlan =
1088                                                 vlan_mask->tci;
1089                                         input_set_byte += 2;
1090                                 }
1091                                 if (vlan_mask->inner_type) {
1092                                         rte_flow_error_set(error, EINVAL,
1093                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1094                                                 item,
1095                                                 "Invalid VLAN input set.");
1096                                         return false;
1097                                 }
1098                                 t++;
1099                         }
1100                         break;
1101
1102                 case RTE_FLOW_ITEM_TYPE_PPPOED:
1103                 case RTE_FLOW_ITEM_TYPE_PPPOES:
1104                         pppoe_spec = item->spec;
1105                         pppoe_mask = item->mask;
1106                         /* Check if PPPoE item is used to describe protocol.
1107                          * If yes, both spec and mask should be NULL.
1108                          * If no, both spec and mask shouldn't be NULL.
1109                          */
1110                         if ((!pppoe_spec && pppoe_mask) ||
1111                                 (pppoe_spec && !pppoe_mask)) {
1112                                 rte_flow_error_set(error, EINVAL,
1113                                         RTE_FLOW_ERROR_TYPE_ITEM,
1114                                         item,
1115                                         "Invalid pppoe item");
1116                                 return false;
1117                         }
1118                         pppoe_patt_valid = 1;
1119                         input = &outer_input_set;
1120                         if (pppoe_spec && pppoe_mask) {
1121                                 /* Check pppoe mask and update input set */
1122                                 if (pppoe_mask->length ||
1123                                         pppoe_mask->code ||
1124                                         pppoe_mask->version_type) {
1125                                         rte_flow_error_set(error, EINVAL,
1126                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1127                                                 item,
1128                                                 "Invalid pppoe mask");
1129                                         return false;
1130                                 }
1131                                 list[t].type = ICE_PPPOE;
1132                                 if (pppoe_mask->session_id) {
1133                                         list[t].h_u.pppoe_hdr.session_id =
1134                                                 pppoe_spec->session_id;
1135                                         list[t].m_u.pppoe_hdr.session_id =
1136                                                 pppoe_mask->session_id;
1137                                         *input |= ICE_INSET_PPPOE_SESSION;
1138                                         input_set_byte += 2;
1139                                 }
1140                                 t++;
1141                                 pppoe_elem_valid = 1;
1142                         }
1143                         break;
1144
1145                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1146                         pppoe_proto_spec = item->spec;
1147                         pppoe_proto_mask = item->mask;
1148                         /* Check if PPPoE optional proto_id item
1149                          * is used to describe protocol.
1150                          * If yes, both spec and mask should be NULL.
1151                          * If no, both spec and mask shouldn't be NULL.
1152                          */
1153                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1154                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
1155                                 rte_flow_error_set(error, EINVAL,
1156                                         RTE_FLOW_ERROR_TYPE_ITEM,
1157                                         item,
1158                                         "Invalid pppoe proto item");
1159                                 return false;
1160                         }
1161                         input = &outer_input_set;
1162                         if (pppoe_proto_spec && pppoe_proto_mask) {
1163                                 if (pppoe_elem_valid)
1164                                         t--;
1165                                 list[t].type = ICE_PPPOE;
1166                                 if (pppoe_proto_mask->proto_id) {
1167                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
1168                                                 pppoe_proto_spec->proto_id;
1169                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
1170                                                 pppoe_proto_mask->proto_id;
1171                                         *input |= ICE_INSET_PPPOE_PROTO;
1172                                         input_set_byte += 2;
1173                                         pppoe_prot_valid = 1;
1174                                 }
1175                                 if ((pppoe_proto_mask->proto_id &
1176                                         pppoe_proto_spec->proto_id) !=
1177                                             CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1178                                         (pppoe_proto_mask->proto_id &
1179                                         pppoe_proto_spec->proto_id) !=
1180                                             CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1181                                         *tun_type = ICE_SW_TUN_PPPOE_PAY;
1182                                 else
1183                                         *tun_type = ICE_SW_TUN_PPPOE;
1184                                 t++;
1185                         }
1186
1187                         break;
1188
1189                 case RTE_FLOW_ITEM_TYPE_ESP:
1190                         esp_spec = item->spec;
1191                         esp_mask = item->mask;
1192                         if ((esp_spec && !esp_mask) ||
1193                                 (!esp_spec && esp_mask)) {
1194                                 rte_flow_error_set(error, EINVAL,
1195                                            RTE_FLOW_ERROR_TYPE_ITEM,
1196                                            item,
1197                                            "Invalid esp item");
1198                                 return false;
1199                         }
1200                         /* Check esp mask and update input set */
1201                         if (esp_mask && esp_mask->hdr.seq) {
1202                                 rte_flow_error_set(error, EINVAL,
1203                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1204                                                 item,
1205                                                 "Invalid esp mask");
1206                                 return false;
1207                         }
1208                         input = &outer_input_set;
1209                         if (!esp_spec && !esp_mask && !(*input)) {
1210                                 profile_rule = 1;
1211                                 if (ipv6_valid && udp_valid)
1212                                         *tun_type =
1213                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1214                                 else if (ipv6_valid)
1215                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1216                                 else if (ipv4_valid)
1217                                         goto inset_check;
1218                         } else if (esp_spec && esp_mask &&
1219                                                 esp_mask->hdr.spi){
1220                                 if (udp_valid)
1221                                         list[t].type = ICE_NAT_T;
1222                                 else
1223                                         list[t].type = ICE_ESP;
1224                                 list[t].h_u.esp_hdr.spi =
1225                                         esp_spec->hdr.spi;
1226                                 list[t].m_u.esp_hdr.spi =
1227                                         esp_mask->hdr.spi;
1228                                 *input |= ICE_INSET_ESP_SPI;
1229                                 input_set_byte += 4;
1230                                 t++;
1231                         }
1232
1233                         if (!profile_rule) {
1234                                 if (ipv6_valid && udp_valid)
1235                                         *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1236                                 else if (ipv4_valid && udp_valid)
1237                                         *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1238                                 else if (ipv6_valid)
1239                                         *tun_type = ICE_SW_TUN_IPV6_ESP;
1240                                 else if (ipv4_valid)
1241                                         *tun_type = ICE_SW_TUN_IPV4_ESP;
1242                         }
1243                         break;
1244
1245                 case RTE_FLOW_ITEM_TYPE_AH:
1246                         ah_spec = item->spec;
1247                         ah_mask = item->mask;
1248                         if ((ah_spec && !ah_mask) ||
1249                                 (!ah_spec && ah_mask)) {
1250                                 rte_flow_error_set(error, EINVAL,
1251                                            RTE_FLOW_ERROR_TYPE_ITEM,
1252                                            item,
1253                                            "Invalid ah item");
1254                                 return false;
1255                         }
1256                         /* Check ah mask and update input set */
1257                         if (ah_mask &&
1258                                 (ah_mask->next_hdr ||
1259                                 ah_mask->payload_len ||
1260                                 ah_mask->seq_num ||
1261                                 ah_mask->reserved)) {
1262                                 rte_flow_error_set(error, EINVAL,
1263                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1264                                                 item,
1265                                                 "Invalid ah mask");
1266                                 return false;
1267                         }
1268
1269                         input = &outer_input_set;
1270                         if (!ah_spec && !ah_mask && !(*input)) {
1271                                 profile_rule = 1;
1272                                 if (ipv6_valid && udp_valid)
1273                                         *tun_type =
1274                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1275                                 else if (ipv6_valid)
1276                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1277                                 else if (ipv4_valid)
1278                                         goto inset_check;
1279                         } else if (ah_spec && ah_mask &&
1280                                                 ah_mask->spi){
1281                                 list[t].type = ICE_AH;
1282                                 list[t].h_u.ah_hdr.spi =
1283                                         ah_spec->spi;
1284                                 list[t].m_u.ah_hdr.spi =
1285                                         ah_mask->spi;
1286                                 *input |= ICE_INSET_AH_SPI;
1287                                 input_set_byte += 4;
1288                                 t++;
1289                         }
1290
1291                         if (!profile_rule) {
1292                                 if (udp_valid)
1293                                         goto inset_check;
1294                                 else if (ipv6_valid)
1295                                         *tun_type = ICE_SW_TUN_IPV6_AH;
1296                                 else if (ipv4_valid)
1297                                         *tun_type = ICE_SW_TUN_IPV4_AH;
1298                         }
1299                         break;
1300
1301                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1302                         l2tp_spec = item->spec;
1303                         l2tp_mask = item->mask;
1304                         if ((l2tp_spec && !l2tp_mask) ||
1305                                 (!l2tp_spec && l2tp_mask)) {
1306                                 rte_flow_error_set(error, EINVAL,
1307                                            RTE_FLOW_ERROR_TYPE_ITEM,
1308                                            item,
1309                                            "Invalid l2tp item");
1310                                 return false;
1311                         }
1312
1313                         input = &outer_input_set;
1314                         if (!l2tp_spec && !l2tp_mask && !(*input)) {
1315                                 if (ipv6_valid)
1316                                         *tun_type =
1317                                         ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1318                                 else if (ipv4_valid)
1319                                         goto inset_check;
1320                         } else if (l2tp_spec && l2tp_mask &&
1321                                                 l2tp_mask->session_id){
1322                                 list[t].type = ICE_L2TPV3;
1323                                 list[t].h_u.l2tpv3_sess_hdr.session_id =
1324                                         l2tp_spec->session_id;
1325                                 list[t].m_u.l2tpv3_sess_hdr.session_id =
1326                                         l2tp_mask->session_id;
1327                                 *input |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1328                                 input_set_byte += 4;
1329                                 t++;
1330                         }
1331
1332                         if (!profile_rule) {
1333                                 if (ipv6_valid)
1334                                         *tun_type =
1335                                         ICE_SW_TUN_IPV6_L2TPV3;
1336                                 else if (ipv4_valid)
1337                                         *tun_type =
1338                                         ICE_SW_TUN_IPV4_L2TPV3;
1339                         }
1340                         break;
1341
1342                 case RTE_FLOW_ITEM_TYPE_PFCP:
1343                         pfcp_spec = item->spec;
1344                         pfcp_mask = item->mask;
1345                         /* Check if PFCP item is used to describe protocol.
1346                          * If yes, both spec and mask should be NULL.
1347                          * If no, both spec and mask shouldn't be NULL.
1348                          */
1349                         if ((!pfcp_spec && pfcp_mask) ||
1350                             (pfcp_spec && !pfcp_mask)) {
1351                                 rte_flow_error_set(error, EINVAL,
1352                                            RTE_FLOW_ERROR_TYPE_ITEM,
1353                                            item,
1354                                            "Invalid PFCP item");
1355                                 return false;
1356                         }
1357                         if (pfcp_spec && pfcp_mask) {
1358                                 /* Check pfcp mask and update input set */
1359                                 if (pfcp_mask->msg_type ||
1360                                         pfcp_mask->msg_len ||
1361                                         pfcp_mask->seid) {
1362                                         rte_flow_error_set(error, EINVAL,
1363                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1364                                                 item,
1365                                                 "Invalid pfcp mask");
1366                                         return false;
1367                                 }
1368                                 if (pfcp_mask->s_field &&
1369                                         pfcp_spec->s_field == 0x01 &&
1370                                         ipv6_valid)
1371                                         *tun_type =
1372                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1373                                 else if (pfcp_mask->s_field &&
1374                                         pfcp_spec->s_field == 0x01)
1375                                         *tun_type =
1376                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1377                                 else if (pfcp_mask->s_field &&
1378                                         !pfcp_spec->s_field &&
1379                                         ipv6_valid)
1380                                         *tun_type =
1381                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1382                                 else if (pfcp_mask->s_field &&
1383                                         !pfcp_spec->s_field)
1384                                         *tun_type =
1385                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1386                                 else
1387                                         return false;
1388                         }
1389                         break;
1390
1391                 case RTE_FLOW_ITEM_TYPE_GTPU:
1392                         gtp_spec = item->spec;
1393                         gtp_mask = item->mask;
1394                         if (gtp_spec && !gtp_mask) {
1395                                 rte_flow_error_set(error, EINVAL,
1396                                         RTE_FLOW_ERROR_TYPE_ITEM,
1397                                         item,
1398                                         "Invalid GTP item");
1399                                 return false;
1400                         }
1401                         if (gtp_spec && gtp_mask) {
1402                                 if (gtp_mask->v_pt_rsv_flags ||
1403                                     gtp_mask->msg_type ||
1404                                     gtp_mask->msg_len) {
1405                                         rte_flow_error_set(error, EINVAL,
1406                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1407                                                 item,
1408                                                 "Invalid GTP mask");
1409                                         return false;
1410                                 }
1411                                 input = &outer_input_set;
1412                                 if (gtp_mask->teid)
1413                                         *input |= ICE_INSET_GTPU_TEID;
1414                                 list[t].type = ICE_GTP;
1415                                 list[t].h_u.gtp_hdr.teid =
1416                                         gtp_spec->teid;
1417                                 list[t].m_u.gtp_hdr.teid =
1418                                         gtp_mask->teid;
1419                                 input_set_byte += 4;
1420                                 t++;
1421                         }
1422                         tunnel_valid = 1;
1423                         gtpu_valid = 1;
1424                         break;
1425
1426                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1427                         gtp_psc_spec = item->spec;
1428                         gtp_psc_mask = item->mask;
1429                         if (gtp_psc_spec && !gtp_psc_mask) {
1430                                 rte_flow_error_set(error, EINVAL,
1431                                         RTE_FLOW_ERROR_TYPE_ITEM,
1432                                         item,
1433                                         "Invalid GTPU_EH item");
1434                                 return false;
1435                         }
1436                         if (gtp_psc_spec && gtp_psc_mask) {
1437                                 if (gtp_psc_mask->hdr.type) {
1438                                         rte_flow_error_set(error, EINVAL,
1439                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1440                                                 item,
1441                                                 "Invalid GTPU_EH mask");
1442                                         return false;
1443                                 }
1444                                 input = &outer_input_set;
1445                                 if (gtp_psc_mask->hdr.qfi)
1446                                         *input |= ICE_INSET_GTPU_QFI;
1447                                 list[t].type = ICE_GTP;
1448                                 list[t].h_u.gtp_hdr.qfi =
1449                                         gtp_psc_spec->hdr.qfi;
1450                                 list[t].m_u.gtp_hdr.qfi =
1451                                         gtp_psc_mask->hdr.qfi;
1452                                 input_set_byte += 1;
1453                                 t++;
1454                         }
1455                         gtpu_psc_valid = 1;
1456                         break;
1457
1458                 case RTE_FLOW_ITEM_TYPE_VOID:
1459                         break;
1460
1461                 default:
1462                         rte_flow_error_set(error, EINVAL,
1463                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1464                                    "Invalid pattern item.");
1465                         return false;
1466                 }
1467         }
1468
1469         if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1470             inner_vlan_valid && outer_vlan_valid)
1471                 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1472         else if (*tun_type == ICE_SW_TUN_PPPOE &&
1473                  inner_vlan_valid && outer_vlan_valid)
1474                 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1475         else if (*tun_type == ICE_NON_TUN &&
1476                  inner_vlan_valid && outer_vlan_valid)
1477                 *tun_type = ICE_NON_TUN_QINQ;
1478         else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1479                  inner_vlan_valid && outer_vlan_valid)
1480                 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1481
1482         if (pppoe_patt_valid && !pppoe_prot_valid) {
1483                 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1484                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1485                 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1486                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1487                 else if (inner_vlan_valid && outer_vlan_valid)
1488                         *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1489                 else if (ipv6_valid && udp_valid)
1490                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1491                 else if (ipv6_valid && tcp_valid)
1492                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1493                 else if (ipv4_valid && udp_valid)
1494                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1495                 else if (ipv4_valid && tcp_valid)
1496                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1497                 else if (ipv6_valid)
1498                         *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1499                 else if (ipv4_valid)
1500                         *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1501                 else
1502                         *tun_type = ICE_SW_TUN_PPPOE;
1503         }
1504
1505         if (gtpu_valid && gtpu_psc_valid) {
1506                 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1507                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP;
1508                 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1509                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP;
1510                 else if (ipv4_valid && inner_ipv4_valid)
1511                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4;
1512                 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1513                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP;
1514                 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1515                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP;
1516                 else if (ipv4_valid && inner_ipv6_valid)
1517                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6;
1518                 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1519                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP;
1520                 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1521                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP;
1522                 else if (ipv6_valid && inner_ipv4_valid)
1523                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4;
1524                 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1525                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP;
1526                 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1527                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP;
1528                 else if (ipv6_valid && inner_ipv6_valid)
1529                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6;
1530                 else if (ipv4_valid)
1531                         *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1532                 else if (ipv6_valid)
1533                         *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1534         } else if (gtpu_valid) {
1535                 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1536                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_UDP;
1537                 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1538                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_TCP;
1539                 else if (ipv4_valid && inner_ipv4_valid)
1540                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1541                 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1542                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_UDP;
1543                 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1544                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_TCP;
1545                 else if (ipv4_valid && inner_ipv6_valid)
1546                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1547                 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1548                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_UDP;
1549                 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1550                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_TCP;
1551                 else if (ipv6_valid && inner_ipv4_valid)
1552                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1553                 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1554                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_UDP;
1555                 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1556                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_TCP;
1557                 else if (ipv6_valid && inner_ipv6_valid)
1558                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1559                 else if (ipv4_valid)
1560                         *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1561                 else if (ipv6_valid)
1562                         *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1563         }
1564
1565         if (*tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY ||
1566             *tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
1567                 for (k = 0; k < t; k++) {
1568                         if (list[k].type == ICE_GTP)
1569                                 list[k].type = ICE_GTP_NO_PAY;
1570                 }
1571         }
1572
1573         if (*tun_type == ICE_NON_TUN) {
1574                 if (vxlan_valid)
1575                         *tun_type = ICE_SW_TUN_VXLAN;
1576                 else if (nvgre_valid)
1577                         *tun_type = ICE_SW_TUN_NVGRE;
1578                 else if (ipv4_valid && tcp_valid)
1579                         *tun_type = ICE_SW_IPV4_TCP;
1580                 else if (ipv4_valid && udp_valid)
1581                         *tun_type = ICE_SW_IPV4_UDP;
1582                 else if (ipv6_valid && tcp_valid)
1583                         *tun_type = ICE_SW_IPV6_TCP;
1584                 else if (ipv6_valid && udp_valid)
1585                         *tun_type = ICE_SW_IPV6_UDP;
1586         }
1587
1588         if (input_set_byte > MAX_INPUT_SET_BYTE) {
1589                 rte_flow_error_set(error, EINVAL,
1590                         RTE_FLOW_ERROR_TYPE_ITEM,
1591                         item,
1592                         "too much input set");
1593                 return false;
1594         }
1595
1596         *lkups_num = t;
1597
1598 inset_check:
1599         if ((!outer_input_set && !inner_input_set &&
1600             !ice_is_prof_rule(*tun_type)) || (outer_input_set &
1601             ~pattern_match_item->input_set_mask_o) ||
1602             (inner_input_set & ~pattern_match_item->input_set_mask_i))
1603                 return false;
1604
1605         return true;
1606 }
1607
1608 static int
1609 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1610                             const struct rte_flow_action *actions,
1611                             uint32_t priority,
1612                             struct rte_flow_error *error,
1613                             struct ice_adv_rule_info *rule_info)
1614 {
1615         const struct rte_flow_action_vf *act_vf;
1616         const struct rte_flow_action *action;
1617         enum rte_flow_action_type action_type;
1618
1619         for (action = actions; action->type !=
1620                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1621                 action_type = action->type;
1622                 switch (action_type) {
1623                 case RTE_FLOW_ACTION_TYPE_VF:
1624                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1625                         act_vf = action->conf;
1626
1627                         if (act_vf->id >= ad->real_hw.num_vfs &&
1628                                 !act_vf->original) {
1629                                 rte_flow_error_set(error,
1630                                         EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1631                                         actions,
1632                                         "Invalid vf id");
1633                                 return -rte_errno;
1634                         }
1635
1636                         if (act_vf->original)
1637                                 rule_info->sw_act.vsi_handle =
1638                                         ad->real_hw.avf.bus.func;
1639                         else
1640                                 rule_info->sw_act.vsi_handle = act_vf->id;
1641                         break;
1642
1643                 case RTE_FLOW_ACTION_TYPE_DROP:
1644                         rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1645                         break;
1646
1647                 default:
1648                         rte_flow_error_set(error,
1649                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1650                                            actions,
1651                                            "Invalid action type");
1652                         return -rte_errno;
1653                 }
1654         }
1655
1656         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1657         rule_info->sw_act.flag = ICE_FLTR_RX;
1658         rule_info->rx = 1;
1659         /* 0 denotes lowest priority of recipe and highest priority
1660          * of rte_flow. Change rte_flow priority into recipe priority.
1661          */
1662         rule_info->priority = ICE_SW_PRI_BASE - priority;
1663
1664         return 0;
1665 }
1666
1667 static int
1668 ice_switch_parse_action(struct ice_pf *pf,
1669                 const struct rte_flow_action *actions,
1670                 uint32_t priority,
1671                 struct rte_flow_error *error,
1672                 struct ice_adv_rule_info *rule_info)
1673 {
1674         struct ice_vsi *vsi = pf->main_vsi;
1675         struct rte_eth_dev_data *dev_data = pf->adapter->pf.dev_data;
1676         const struct rte_flow_action_queue *act_q;
1677         const struct rte_flow_action_rss *act_qgrop;
1678         uint16_t base_queue, i;
1679         const struct rte_flow_action *action;
1680         enum rte_flow_action_type action_type;
1681         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1682                  2, 4, 8, 16, 32, 64, 128};
1683
1684         base_queue = pf->base_queue + vsi->base_queue;
1685         for (action = actions; action->type !=
1686                         RTE_FLOW_ACTION_TYPE_END; action++) {
1687                 action_type = action->type;
1688                 switch (action_type) {
1689                 case RTE_FLOW_ACTION_TYPE_RSS:
1690                         act_qgrop = action->conf;
1691                         if (act_qgrop->queue_num <= 1)
1692                                 goto error;
1693                         rule_info->sw_act.fltr_act =
1694                                 ICE_FWD_TO_QGRP;
1695                         rule_info->sw_act.fwd_id.q_id =
1696                                 base_queue + act_qgrop->queue[0];
1697                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1698                                 if (act_qgrop->queue_num ==
1699                                         valid_qgrop_number[i])
1700                                         break;
1701                         }
1702                         if (i == MAX_QGRP_NUM_TYPE)
1703                                 goto error;
1704                         if ((act_qgrop->queue[0] +
1705                                 act_qgrop->queue_num) >
1706                                 dev_data->nb_rx_queues)
1707                                 goto error1;
1708                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1709                                 if (act_qgrop->queue[i + 1] !=
1710                                         act_qgrop->queue[i] + 1)
1711                                         goto error2;
1712                         rule_info->sw_act.qgrp_size =
1713                                 act_qgrop->queue_num;
1714                         break;
1715                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1716                         act_q = action->conf;
1717                         if (act_q->index >= dev_data->nb_rx_queues)
1718                                 goto error;
1719                         rule_info->sw_act.fltr_act =
1720                                 ICE_FWD_TO_Q;
1721                         rule_info->sw_act.fwd_id.q_id =
1722                                 base_queue + act_q->index;
1723                         break;
1724
1725                 case RTE_FLOW_ACTION_TYPE_DROP:
1726                         rule_info->sw_act.fltr_act =
1727                                 ICE_DROP_PACKET;
1728                         break;
1729
1730                 case RTE_FLOW_ACTION_TYPE_VOID:
1731                         break;
1732
1733                 default:
1734                         goto error;
1735                 }
1736         }
1737
1738         rule_info->sw_act.vsi_handle = vsi->idx;
1739         rule_info->rx = 1;
1740         rule_info->sw_act.src = vsi->idx;
1741         /* 0 denotes lowest priority of recipe and highest priority
1742          * of rte_flow. Change rte_flow priority into recipe priority.
1743          */
1744         rule_info->priority = ICE_SW_PRI_BASE - priority;
1745
1746         return 0;
1747
1748 error:
1749         rte_flow_error_set(error,
1750                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1751                 actions,
1752                 "Invalid action type or queue number");
1753         return -rte_errno;
1754
1755 error1:
1756         rte_flow_error_set(error,
1757                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1758                 actions,
1759                 "Invalid queue region indexes");
1760         return -rte_errno;
1761
1762 error2:
1763         rte_flow_error_set(error,
1764                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1765                 actions,
1766                 "Discontinuous queue region");
1767         return -rte_errno;
1768 }
1769
1770 static int
1771 ice_switch_check_action(const struct rte_flow_action *actions,
1772                             struct rte_flow_error *error)
1773 {
1774         const struct rte_flow_action *action;
1775         enum rte_flow_action_type action_type;
1776         uint16_t actions_num = 0;
1777
1778         for (action = actions; action->type !=
1779                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1780                 action_type = action->type;
1781                 switch (action_type) {
1782                 case RTE_FLOW_ACTION_TYPE_VF:
1783                 case RTE_FLOW_ACTION_TYPE_RSS:
1784                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1785                 case RTE_FLOW_ACTION_TYPE_DROP:
1786                         actions_num++;
1787                         break;
1788                 case RTE_FLOW_ACTION_TYPE_VOID:
1789                         continue;
1790                 default:
1791                         rte_flow_error_set(error,
1792                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1793                                            actions,
1794                                            "Invalid action type");
1795                         return -rte_errno;
1796                 }
1797         }
1798
1799         if (actions_num != 1) {
1800                 rte_flow_error_set(error,
1801                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1802                                    actions,
1803                                    "Invalid action number");
1804                 return -rte_errno;
1805         }
1806
1807         return 0;
1808 }
1809
1810 static int
1811 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1812                 struct ice_pattern_match_item *array,
1813                 uint32_t array_len,
1814                 const struct rte_flow_item pattern[],
1815                 const struct rte_flow_action actions[],
1816                 uint32_t priority,
1817                 void **meta,
1818                 struct rte_flow_error *error)
1819 {
1820         struct ice_pf *pf = &ad->pf;
1821         int ret = 0;
1822         struct sw_meta *sw_meta_ptr = NULL;
1823         struct ice_adv_rule_info rule_info;
1824         struct ice_adv_lkup_elem *list = NULL;
1825         uint16_t lkups_num = 0;
1826         const struct rte_flow_item *item = pattern;
1827         uint16_t item_num = 0;
1828         uint16_t vlan_num = 0;
1829         enum ice_sw_tunnel_type tun_type =
1830                         ICE_NON_TUN;
1831         struct ice_pattern_match_item *pattern_match_item = NULL;
1832
1833         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1834                 item_num++;
1835                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1836                         const struct rte_flow_item_eth *eth_mask;
1837                         if (item->mask)
1838                                 eth_mask = item->mask;
1839                         else
1840                                 continue;
1841                         if (eth_mask->type == UINT16_MAX)
1842                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1843                 }
1844
1845                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1846                         vlan_num++;
1847
1848                 /* reserve one more memory slot for ETH which may
1849                  * consume 2 lookup items.
1850                  */
1851                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1852                         item_num++;
1853         }
1854
1855         if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1856                 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1857         else if (vlan_num == 2)
1858                 tun_type = ICE_NON_TUN_QINQ;
1859
1860         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1861         if (!list) {
1862                 rte_flow_error_set(error, EINVAL,
1863                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1864                                    "No memory for PMD internal items");
1865                 return -rte_errno;
1866         }
1867
1868         sw_meta_ptr =
1869                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1870         if (!sw_meta_ptr) {
1871                 rte_flow_error_set(error, EINVAL,
1872                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1873                                    "No memory for sw_pattern_meta_ptr");
1874                 goto error;
1875         }
1876
1877         pattern_match_item =
1878                 ice_search_pattern_match_item(ad, pattern, array, array_len,
1879                                               error);
1880         if (!pattern_match_item) {
1881                 rte_flow_error_set(error, EINVAL,
1882                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1883                                    "Invalid input pattern");
1884                 goto error;
1885         }
1886
1887         if (!ice_switch_parse_pattern(pattern, error, list, &lkups_num,
1888                                    &tun_type, pattern_match_item)) {
1889                 rte_flow_error_set(error, EINVAL,
1890                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1891                                    pattern,
1892                                    "Invalid input set");
1893                 goto error;
1894         }
1895
1896         memset(&rule_info, 0, sizeof(rule_info));
1897         rule_info.tun_type = tun_type;
1898
1899         ret = ice_switch_check_action(actions, error);
1900         if (ret)
1901                 goto error;
1902
1903         if (ad->hw.dcf_enabled)
1904                 ret = ice_switch_parse_dcf_action((void *)ad, actions, priority,
1905                                                   error, &rule_info);
1906         else
1907                 ret = ice_switch_parse_action(pf, actions, priority, error,
1908                                               &rule_info);
1909
1910         if (ret)
1911                 goto error;
1912
1913         if (meta) {
1914                 *meta = sw_meta_ptr;
1915                 ((struct sw_meta *)*meta)->list = list;
1916                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1917                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1918         } else {
1919                 rte_free(list);
1920                 rte_free(sw_meta_ptr);
1921         }
1922
1923         rte_free(pattern_match_item);
1924
1925         return 0;
1926
1927 error:
1928         rte_free(list);
1929         rte_free(sw_meta_ptr);
1930         rte_free(pattern_match_item);
1931
1932         return -rte_errno;
1933 }
1934
1935 static int
1936 ice_switch_query(struct ice_adapter *ad __rte_unused,
1937                 struct rte_flow *flow __rte_unused,
1938                 struct rte_flow_query_count *count __rte_unused,
1939                 struct rte_flow_error *error)
1940 {
1941         rte_flow_error_set(error, EINVAL,
1942                 RTE_FLOW_ERROR_TYPE_HANDLE,
1943                 NULL,
1944                 "count action not supported by switch filter");
1945
1946         return -rte_errno;
1947 }
1948
1949 static int
1950 ice_switch_redirect(struct ice_adapter *ad,
1951                     struct rte_flow *flow,
1952                     struct ice_flow_redirect *rd)
1953 {
1954         struct ice_rule_query_data *rdata;
1955         struct ice_switch_filter_conf *filter_conf_ptr =
1956                 (struct ice_switch_filter_conf *)flow->rule;
1957         struct ice_rule_query_data added_rdata = { 0 };
1958         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1959         struct ice_adv_lkup_elem *lkups_ref = NULL;
1960         struct ice_adv_lkup_elem *lkups_dp = NULL;
1961         struct LIST_HEAD_TYPE *list_head;
1962         struct ice_adv_rule_info rinfo;
1963         struct ice_hw *hw = &ad->hw;
1964         struct ice_switch_info *sw;
1965         uint16_t lkups_cnt;
1966         int ret;
1967
1968         rdata = &filter_conf_ptr->sw_query_data;
1969
1970         if (rdata->vsi_handle != rd->vsi_handle)
1971                 return 0;
1972
1973         sw = hw->switch_info;
1974         if (!sw->recp_list[rdata->rid].recp_created)
1975                 return -EINVAL;
1976
1977         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1978                 return -ENOTSUP;
1979
1980         switch (filter_conf_ptr->fltr_status) {
1981         case ICE_SW_FLTR_ADDED:
1982                 list_head = &sw->recp_list[rdata->rid].filt_rules;
1983                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
1984                                     ice_adv_fltr_mgmt_list_entry,
1985                                     list_entry) {
1986                         rinfo = list_itr->rule_info;
1987                         if ((rinfo.fltr_rule_id == rdata->rule_id &&
1988                             rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1989                             rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1990                             (rinfo.fltr_rule_id == rdata->rule_id &&
1991                             rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1992                                 lkups_cnt = list_itr->lkups_cnt;
1993
1994                                 lkups_dp = (struct ice_adv_lkup_elem *)
1995                                         ice_memdup(hw, list_itr->lkups,
1996                                                    sizeof(*list_itr->lkups) *
1997                                                    lkups_cnt,
1998                                                    ICE_NONDMA_TO_NONDMA);
1999                                 if (!lkups_dp) {
2000                                         PMD_DRV_LOG(ERR,
2001                                                     "Failed to allocate memory.");
2002                                         return -EINVAL;
2003                                 }
2004                                 lkups_ref = lkups_dp;
2005
2006                                 if (rinfo.sw_act.fltr_act ==
2007                                     ICE_FWD_TO_VSI_LIST) {
2008                                         rinfo.sw_act.vsi_handle =
2009                                                 rd->vsi_handle;
2010                                         rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
2011                                 }
2012                                 break;
2013                         }
2014                 }
2015
2016                 if (!lkups_ref)
2017                         return -EINVAL;
2018
2019                 goto rmv_rule;
2020         case ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT:
2021                 /* Recover VSI context */
2022                 hw->vsi_ctx[rd->vsi_handle]->vsi_num = filter_conf_ptr->vsi_num;
2023                 rinfo = filter_conf_ptr->rule_info;
2024                 lkups_cnt = filter_conf_ptr->lkups_num;
2025                 lkups_ref = filter_conf_ptr->lkups;
2026
2027                 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
2028                         rinfo.sw_act.vsi_handle = rd->vsi_handle;
2029                         rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
2030                 }
2031
2032                 goto rmv_rule;
2033         case ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT:
2034                 rinfo = filter_conf_ptr->rule_info;
2035                 lkups_cnt = filter_conf_ptr->lkups_num;
2036                 lkups_ref = filter_conf_ptr->lkups;
2037
2038                 goto add_rule;
2039         default:
2040                 return -EINVAL;
2041         }
2042
2043 rmv_rule:
2044         if (ice_dcf_adminq_need_retry(ad)) {
2045                 PMD_DRV_LOG(WARNING, "DCF is not on");
2046                 ret = -EAGAIN;
2047                 goto out;
2048         }
2049
2050         /* Remove the old rule */
2051         ret = ice_rem_adv_rule(hw, lkups_ref, lkups_cnt, &rinfo);
2052         if (ret) {
2053                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
2054                             rdata->rule_id);
2055                 filter_conf_ptr->fltr_status =
2056                         ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT;
2057                 ret = -EINVAL;
2058                 goto out;
2059         }
2060
2061 add_rule:
2062         if (ice_dcf_adminq_need_retry(ad)) {
2063                 PMD_DRV_LOG(WARNING, "DCF is not on");
2064                 ret = -EAGAIN;
2065                 goto out;
2066         }
2067
2068         /* Update VSI context */
2069         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
2070
2071         /* Replay the rule */
2072         ret = ice_add_adv_rule(hw, lkups_ref, lkups_cnt,
2073                                &rinfo, &added_rdata);
2074         if (ret) {
2075                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
2076                 filter_conf_ptr->fltr_status =
2077                         ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT;
2078                 ret = -EINVAL;
2079         } else {
2080                 filter_conf_ptr->sw_query_data = added_rdata;
2081                 /* Save VSI number for failure recover */
2082                 filter_conf_ptr->vsi_num = rd->new_vsi_num;
2083                 filter_conf_ptr->fltr_status = ICE_SW_FLTR_ADDED;
2084         }
2085
2086 out:
2087         if (ret == -EINVAL)
2088                 if (ice_dcf_adminq_need_retry(ad))
2089                         ret = -EAGAIN;
2090
2091         ice_free(hw, lkups_dp);
2092         return ret;
2093 }
2094
2095 static int
2096 ice_switch_init(struct ice_adapter *ad)
2097 {
2098         int ret = 0;
2099         struct ice_flow_parser *dist_parser;
2100         struct ice_flow_parser *perm_parser;
2101
2102         if (ad->devargs.pipe_mode_support) {
2103                 perm_parser = &ice_switch_perm_parser;
2104                 ret = ice_register_parser(perm_parser, ad);
2105         } else {
2106                 dist_parser = &ice_switch_dist_parser;
2107                 ret = ice_register_parser(dist_parser, ad);
2108         }
2109         return ret;
2110 }
2111
2112 static void
2113 ice_switch_uninit(struct ice_adapter *ad)
2114 {
2115         struct ice_flow_parser *dist_parser;
2116         struct ice_flow_parser *perm_parser;
2117
2118         if (ad->devargs.pipe_mode_support) {
2119                 perm_parser = &ice_switch_perm_parser;
2120                 ice_unregister_parser(perm_parser, ad);
2121         } else {
2122                 dist_parser = &ice_switch_dist_parser;
2123                 ice_unregister_parser(dist_parser, ad);
2124         }
2125 }
2126
2127 static struct
2128 ice_flow_engine ice_switch_engine = {
2129         .init = ice_switch_init,
2130         .uninit = ice_switch_uninit,
2131         .create = ice_switch_create,
2132         .destroy = ice_switch_destroy,
2133         .query_count = ice_switch_query,
2134         .redirect = ice_switch_redirect,
2135         .free = ice_switch_filter_rule_free,
2136         .type = ICE_FLOW_ENGINE_SWITCH,
2137 };
2138
2139 static struct
2140 ice_flow_parser ice_switch_dist_parser = {
2141         .engine = &ice_switch_engine,
2142         .array = ice_switch_pattern_dist_list,
2143         .array_len = RTE_DIM(ice_switch_pattern_dist_list),
2144         .parse_pattern_action = ice_switch_parse_pattern_action,
2145         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2146 };
2147
2148 static struct
2149 ice_flow_parser ice_switch_perm_parser = {
2150         .engine = &ice_switch_engine,
2151         .array = ice_switch_pattern_perm_list,
2152         .array_len = RTE_DIM(ice_switch_pattern_perm_list),
2153         .parse_pattern_action = ice_switch_parse_pattern_action,
2154         .stage = ICE_FLOW_STAGE_PERMISSION,
2155 };
2156
2157 RTE_INIT(ice_sw_engine_init)
2158 {
2159         struct ice_flow_engine *engine = &ice_switch_engine;
2160         ice_register_flow_engine(engine);
2161 }