net/hns3: fix secondary process reference count
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27
28
29 #define MAX_QGRP_NUM_TYPE       7
30 #define MAX_INPUT_SET_BYTE      32
31 #define ICE_PPP_IPV4_PROTO      0x0021
32 #define ICE_PPP_IPV6_PROTO      0x0057
33 #define ICE_IPV4_PROTO_NVGRE    0x002F
34 #define ICE_SW_PRI_BASE 6
35
36 #define ICE_SW_INSET_ETHER ( \
37         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
38 #define ICE_SW_INSET_MAC_VLAN ( \
39         ICE_SW_INSET_ETHER | ICE_INSET_VLAN_INNER)
40 #define ICE_SW_INSET_MAC_QINQ  ( \
41         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
42         ICE_INSET_VLAN_OUTER)
43 #define ICE_SW_INSET_MAC_IPV4 ( \
44         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
46 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
47         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
48 #define ICE_SW_INSET_MAC_QINQ_IPV4_TCP ( \
49         ICE_SW_INSET_MAC_QINQ_IPV4 | \
50         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_QINQ_IPV4_UDP ( \
52         ICE_SW_INSET_MAC_QINQ_IPV4 | \
53         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
55         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
56         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
57         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
58 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
59         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
60         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
61         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
62 #define ICE_SW_INSET_MAC_IPV6 ( \
63         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
64         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
65         ICE_INSET_IPV6_NEXT_HDR)
66 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
67         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
68 #define ICE_SW_INSET_MAC_QINQ_IPV6_TCP ( \
69         ICE_SW_INSET_MAC_QINQ_IPV6 | \
70         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
71 #define ICE_SW_INSET_MAC_QINQ_IPV6_UDP ( \
72         ICE_SW_INSET_MAC_QINQ_IPV6 | \
73         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
74 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
75         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
76         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
77         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
78 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
79         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
80         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
81         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
82 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
83         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
84         ICE_INSET_NVGRE_TNI)
85 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
86         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
87         ICE_INSET_VXLAN_VNI)
88 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
89         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
90         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
91         ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
92 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
93         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
94         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
95         ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
96 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
97         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
98         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
99         ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
100 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
101         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
102         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
103         ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
104 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
105         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
106         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
107 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
108         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
109         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
110         ICE_INSET_IPV4_TOS)
111 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
112         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
113         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
114         ICE_INSET_IPV4_TOS)
115 #define ICE_SW_INSET_MAC_PPPOE  ( \
116         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
117         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
118 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
119         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
120         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
121         ICE_INSET_PPPOE_PROTO)
122 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
123         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
124 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
125         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
126 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
127         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
128 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
129         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
130 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
131         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
132 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
133         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
134 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
135         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
136 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
137         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
138 #define ICE_SW_INSET_MAC_IPV4_AH ( \
139         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
140 #define ICE_SW_INSET_MAC_IPV6_AH ( \
141         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
142 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
143         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
144 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
145         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
146 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
147         ICE_SW_INSET_MAC_IPV4 | \
148         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
149 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
150         ICE_SW_INSET_MAC_IPV6 | \
151         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
152 #define ICE_SW_INSET_MAC_IPV4_GTPU ( \
153         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID)
154 #define ICE_SW_INSET_MAC_IPV6_GTPU ( \
155         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID)
156 #define ICE_SW_INSET_MAC_GTPU_OUTER ( \
157         ICE_INSET_DMAC | ICE_INSET_GTPU_TEID)
158 #define ICE_SW_INSET_MAC_GTPU_EH_OUTER ( \
159         ICE_SW_INSET_MAC_GTPU_OUTER | ICE_INSET_GTPU_QFI)
160 #define ICE_SW_INSET_GTPU_IPV4 ( \
161         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
162 #define ICE_SW_INSET_GTPU_IPV6 ( \
163         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST)
164 #define ICE_SW_INSET_GTPU_IPV4_UDP ( \
165         ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_UDP_SRC_PORT | \
166         ICE_INSET_UDP_DST_PORT)
167 #define ICE_SW_INSET_GTPU_IPV4_TCP ( \
168         ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_TCP_SRC_PORT | \
169         ICE_INSET_TCP_DST_PORT)
170 #define ICE_SW_INSET_GTPU_IPV6_UDP ( \
171         ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_UDP_SRC_PORT | \
172         ICE_INSET_UDP_DST_PORT)
173 #define ICE_SW_INSET_GTPU_IPV6_TCP ( \
174         ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_TCP_SRC_PORT | \
175         ICE_INSET_TCP_DST_PORT)
176
177 struct sw_meta {
178         struct ice_adv_lkup_elem *list;
179         uint16_t lkups_num;
180         struct ice_adv_rule_info rule_info;
181 };
182
183 enum ice_sw_fltr_status {
184         ICE_SW_FLTR_ADDED,
185         ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT,
186         ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT,
187 };
188
189 struct ice_switch_filter_conf {
190         enum ice_sw_fltr_status fltr_status;
191
192         struct ice_rule_query_data sw_query_data;
193
194         /*
195          * The lookup elements and rule info are saved here when filter creation
196          * succeeds.
197          */
198         uint16_t vsi_num;
199         uint16_t lkups_num;
200         struct ice_adv_lkup_elem *lkups;
201         struct ice_adv_rule_info rule_info;
202 };
203
204 static struct ice_flow_parser ice_switch_dist_parser;
205 static struct ice_flow_parser ice_switch_perm_parser;
206
207 static struct
208 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
209         {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE,                         ICE_INSET_NONE},
210         {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
211         {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
212         {pattern_eth_arp,                               ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
213         {pattern_eth_ipv4,                              ICE_SW_INSET_MAC_IPV4,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
214         {pattern_eth_ipv4_udp,                          ICE_SW_INSET_MAC_IPV4_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
215         {pattern_eth_ipv4_tcp,                          ICE_SW_INSET_MAC_IPV4_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
216         {pattern_eth_ipv6,                              ICE_SW_INSET_MAC_IPV6,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
217         {pattern_eth_ipv6_udp,                          ICE_SW_INSET_MAC_IPV6_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
218         {pattern_eth_ipv6_tcp,                          ICE_SW_INSET_MAC_IPV6_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
219         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_VXLAN_IPV4,           ICE_INSET_NONE},
220         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_VXLAN_IPV4_UDP,       ICE_INSET_NONE},
221         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_VXLAN_IPV4_TCP,       ICE_INSET_NONE},
222         {pattern_eth_ipv4_nvgre_eth_ipv4,               ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_NVGRE_IPV4,           ICE_INSET_NONE},
223         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,           ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_NVGRE_IPV4_UDP,       ICE_INSET_NONE},
224         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,           ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_NVGRE_IPV4_TCP,       ICE_INSET_NONE},
225         {pattern_eth_pppoes,                            ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
226         {pattern_eth_vlan_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
227         {pattern_eth_pppoes_proto,                      ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
228         {pattern_eth_vlan_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
229         {pattern_eth_pppoes_ipv4,                       ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
230         {pattern_eth_pppoes_ipv4_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
231         {pattern_eth_pppoes_ipv4_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
232         {pattern_eth_pppoes_ipv6,                       ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
233         {pattern_eth_pppoes_ipv6_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
234         {pattern_eth_pppoes_ipv6_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
235         {pattern_eth_vlan_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
236         {pattern_eth_vlan_pppoes_ipv4_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
237         {pattern_eth_vlan_pppoes_ipv4_udp,              ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
238         {pattern_eth_vlan_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
239         {pattern_eth_vlan_pppoes_ipv6_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
240         {pattern_eth_vlan_pppoes_ipv6_udp,              ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
241         {pattern_eth_ipv4_esp,                          ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
242         {pattern_eth_ipv4_udp_esp,                      ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
243         {pattern_eth_ipv6_esp,                          ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
244         {pattern_eth_ipv6_udp_esp,                      ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
245         {pattern_eth_ipv4_ah,                           ICE_SW_INSET_MAC_IPV4_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
246         {pattern_eth_ipv6_ah,                           ICE_SW_INSET_MAC_IPV6_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
247         {pattern_eth_ipv6_udp_ah,                       ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
248         {pattern_eth_ipv4_l2tp,                         ICE_SW_INSET_MAC_IPV4_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
249         {pattern_eth_ipv6_l2tp,                         ICE_SW_INSET_MAC_IPV6_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
250         {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
251         {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
252         {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE,                         ICE_INSET_NONE},
253         {pattern_eth_qinq_ipv4_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV4_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
254         {pattern_eth_qinq_ipv4_udp,                     ICE_SW_INSET_MAC_QINQ_IPV4_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
255         {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE,                         ICE_INSET_NONE},
256         {pattern_eth_qinq_ipv6_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV6_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
257         {pattern_eth_qinq_ipv6_udp,                     ICE_SW_INSET_MAC_QINQ_IPV6_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
258         {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
259         {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
260         {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
261         {pattern_eth_qinq_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
262         {pattern_eth_ipv4_gtpu,                         ICE_SW_INSET_MAC_IPV4_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
263         {pattern_eth_ipv6_gtpu,                         ICE_SW_INSET_MAC_IPV6_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
264         {pattern_eth_ipv4_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
265         {pattern_eth_ipv4_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
266         {pattern_eth_ipv4_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
267         {pattern_eth_ipv4_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
268         {pattern_eth_ipv4_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
269         {pattern_eth_ipv4_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
270         {pattern_eth_ipv4_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
271         {pattern_eth_ipv4_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
272         {pattern_eth_ipv4_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
273         {pattern_eth_ipv4_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
274         {pattern_eth_ipv4_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
275         {pattern_eth_ipv4_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
276         {pattern_eth_ipv6_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
277         {pattern_eth_ipv6_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
278         {pattern_eth_ipv6_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
279         {pattern_eth_ipv6_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
280         {pattern_eth_ipv6_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
281         {pattern_eth_ipv6_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
282         {pattern_eth_ipv6_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
283         {pattern_eth_ipv6_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
284         {pattern_eth_ipv6_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
285         {pattern_eth_ipv6_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
286         {pattern_eth_ipv6_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
287         {pattern_eth_ipv6_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
288 };
289
290 static struct
291 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
292         {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE,                         ICE_INSET_NONE},
293         {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
294         {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
295         {pattern_eth_arp,                               ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
296         {pattern_eth_ipv4,                              ICE_SW_INSET_MAC_IPV4,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
297         {pattern_eth_ipv4_udp,                          ICE_SW_INSET_MAC_IPV4_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
298         {pattern_eth_ipv4_tcp,                          ICE_SW_INSET_MAC_IPV4_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
299         {pattern_eth_ipv6,                              ICE_SW_INSET_MAC_IPV6,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
300         {pattern_eth_ipv6_udp,                          ICE_SW_INSET_MAC_IPV6_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
301         {pattern_eth_ipv6_tcp,                          ICE_SW_INSET_MAC_IPV6_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
302         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4,          ICE_INSET_NONE},
303         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP,      ICE_INSET_NONE},
304         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP,      ICE_INSET_NONE},
305         {pattern_eth_ipv4_nvgre_eth_ipv4,               ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4,          ICE_INSET_NONE},
306         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,           ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP,      ICE_INSET_NONE},
307         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,           ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP,      ICE_INSET_NONE},
308         {pattern_eth_pppoes,                            ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
309         {pattern_eth_vlan_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
310         {pattern_eth_pppoes_proto,                      ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
311         {pattern_eth_vlan_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
312         {pattern_eth_pppoes_ipv4,                       ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
313         {pattern_eth_pppoes_ipv4_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
314         {pattern_eth_pppoes_ipv4_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
315         {pattern_eth_pppoes_ipv6,                       ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
316         {pattern_eth_pppoes_ipv6_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
317         {pattern_eth_pppoes_ipv6_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
318         {pattern_eth_vlan_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
319         {pattern_eth_vlan_pppoes_ipv4_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
320         {pattern_eth_vlan_pppoes_ipv4_udp,              ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
321         {pattern_eth_vlan_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
322         {pattern_eth_vlan_pppoes_ipv6_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
323         {pattern_eth_vlan_pppoes_ipv6_udp,              ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
324         {pattern_eth_ipv4_esp,                          ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
325         {pattern_eth_ipv4_udp_esp,                      ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
326         {pattern_eth_ipv6_esp,                          ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
327         {pattern_eth_ipv6_udp_esp,                      ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
328         {pattern_eth_ipv4_ah,                           ICE_SW_INSET_MAC_IPV4_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
329         {pattern_eth_ipv6_ah,                           ICE_SW_INSET_MAC_IPV6_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
330         {pattern_eth_ipv6_udp_ah,                       ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
331         {pattern_eth_ipv4_l2tp,                         ICE_SW_INSET_MAC_IPV4_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
332         {pattern_eth_ipv6_l2tp,                         ICE_SW_INSET_MAC_IPV6_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
333         {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
334         {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
335         {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE,                         ICE_INSET_NONE},
336         {pattern_eth_qinq_ipv4_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV4_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
337         {pattern_eth_qinq_ipv4_udp,                     ICE_SW_INSET_MAC_QINQ_IPV4_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
338         {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE,                         ICE_INSET_NONE},
339         {pattern_eth_qinq_ipv6_tcp,                     ICE_SW_INSET_MAC_QINQ_IPV6_TCP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
340         {pattern_eth_qinq_ipv6_udp,                     ICE_SW_INSET_MAC_QINQ_IPV6_UDP,         ICE_INSET_NONE,                         ICE_INSET_NONE},
341         {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
342         {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
343         {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
344         {pattern_eth_qinq_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
345         {pattern_eth_ipv4_gtpu,                         ICE_SW_INSET_MAC_IPV4_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
346         {pattern_eth_ipv6_gtpu,                         ICE_SW_INSET_MAC_IPV6_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
347         {pattern_eth_ipv4_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
348         {pattern_eth_ipv4_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
349         {pattern_eth_ipv4_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
350         {pattern_eth_ipv4_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
351         {pattern_eth_ipv4_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
352         {pattern_eth_ipv4_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
353         {pattern_eth_ipv4_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
354         {pattern_eth_ipv4_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
355         {pattern_eth_ipv4_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
356         {pattern_eth_ipv4_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
357         {pattern_eth_ipv4_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
358         {pattern_eth_ipv4_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
359         {pattern_eth_ipv6_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
360         {pattern_eth_ipv6_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
361         {pattern_eth_ipv6_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
362         {pattern_eth_ipv6_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
363         {pattern_eth_ipv6_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
364         {pattern_eth_ipv6_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
365         {pattern_eth_ipv6_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
366         {pattern_eth_ipv6_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
367         {pattern_eth_ipv6_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
368         {pattern_eth_ipv6_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
369         {pattern_eth_ipv6_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
370         {pattern_eth_ipv6_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
371 };
372
373 static int
374 ice_switch_create(struct ice_adapter *ad,
375                 struct rte_flow *flow,
376                 void *meta,
377                 struct rte_flow_error *error)
378 {
379         int ret = 0;
380         struct ice_pf *pf = &ad->pf;
381         struct ice_hw *hw = ICE_PF_TO_HW(pf);
382         struct ice_rule_query_data rule_added = {0};
383         struct ice_switch_filter_conf *filter_conf_ptr;
384         struct ice_adv_lkup_elem *list =
385                 ((struct sw_meta *)meta)->list;
386         uint16_t lkups_cnt =
387                 ((struct sw_meta *)meta)->lkups_num;
388         struct ice_adv_rule_info *rule_info =
389                 &((struct sw_meta *)meta)->rule_info;
390
391         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
392                 rte_flow_error_set(error, EINVAL,
393                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
394                         "item number too large for rule");
395                 goto error;
396         }
397         if (!list) {
398                 rte_flow_error_set(error, EINVAL,
399                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
400                         "lookup list should not be NULL");
401                 goto error;
402         }
403         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
404         if (!ret) {
405                 filter_conf_ptr = rte_zmalloc("ice_switch_filter",
406                         sizeof(struct ice_switch_filter_conf), 0);
407                 if (!filter_conf_ptr) {
408                         rte_flow_error_set(error, EINVAL,
409                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
410                                    "No memory for ice_switch_filter");
411                         goto error;
412                 }
413
414                 filter_conf_ptr->sw_query_data = rule_added;
415
416                 filter_conf_ptr->vsi_num =
417                         ice_get_hw_vsi_num(hw, rule_info->sw_act.vsi_handle);
418                 filter_conf_ptr->lkups = list;
419                 filter_conf_ptr->lkups_num = lkups_cnt;
420                 filter_conf_ptr->rule_info = *rule_info;
421
422                 filter_conf_ptr->fltr_status = ICE_SW_FLTR_ADDED;
423
424                 flow->rule = filter_conf_ptr;
425         } else {
426                 rte_flow_error_set(error, EINVAL,
427                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
428                         "switch filter create flow fail");
429                 goto error;
430         }
431
432         rte_free(meta);
433         return 0;
434
435 error:
436         rte_free(list);
437         rte_free(meta);
438
439         return -rte_errno;
440 }
441
442 static inline void
443 ice_switch_filter_rule_free(struct rte_flow *flow)
444 {
445         struct ice_switch_filter_conf *filter_conf_ptr =
446                 (struct ice_switch_filter_conf *)flow->rule;
447
448         if (filter_conf_ptr)
449                 rte_free(filter_conf_ptr->lkups);
450
451         rte_free(filter_conf_ptr);
452 }
453
454 static int
455 ice_switch_destroy(struct ice_adapter *ad,
456                 struct rte_flow *flow,
457                 struct rte_flow_error *error)
458 {
459         struct ice_hw *hw = &ad->hw;
460         int ret;
461         struct ice_switch_filter_conf *filter_conf_ptr;
462
463         filter_conf_ptr = (struct ice_switch_filter_conf *)
464                 flow->rule;
465
466         if (!filter_conf_ptr ||
467             filter_conf_ptr->fltr_status == ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT) {
468                 rte_flow_error_set(error, EINVAL,
469                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
470                         "no such flow"
471                         " create by switch filter");
472
473                 ice_switch_filter_rule_free(flow);
474
475                 return -rte_errno;
476         }
477
478         ret = ice_rem_adv_rule_by_id(hw, &filter_conf_ptr->sw_query_data);
479         if (ret) {
480                 rte_flow_error_set(error, EINVAL,
481                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
482                         "fail to destroy switch filter rule");
483                 return -rte_errno;
484         }
485
486         ice_switch_filter_rule_free(flow);
487         return ret;
488 }
489
490 static bool
491 ice_switch_parse_pattern(const struct rte_flow_item pattern[],
492                 struct rte_flow_error *error,
493                 struct ice_adv_lkup_elem *list,
494                 uint16_t *lkups_num,
495                 enum ice_sw_tunnel_type *tun_type,
496                 const struct ice_pattern_match_item *pattern_match_item)
497 {
498         const struct rte_flow_item *item = pattern;
499         enum rte_flow_item_type item_type;
500         const struct rte_flow_item_eth *eth_spec, *eth_mask;
501         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
502         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
503         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
504         const struct rte_flow_item_udp *udp_spec, *udp_mask;
505         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
506         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
507         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
508         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
509         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
510         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
511                                 *pppoe_proto_mask;
512         const struct rte_flow_item_esp *esp_spec, *esp_mask;
513         const struct rte_flow_item_ah *ah_spec, *ah_mask;
514         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
515         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
516         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
517         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
518         uint64_t outer_input_set = ICE_INSET_NONE;
519         uint64_t inner_input_set = ICE_INSET_NONE;
520         uint64_t *input = NULL;
521         uint16_t input_set_byte = 0;
522         bool pppoe_elem_valid = 0;
523         bool pppoe_patt_valid = 0;
524         bool pppoe_prot_valid = 0;
525         bool inner_vlan_valid = 0;
526         bool outer_vlan_valid = 0;
527         bool tunnel_valid = 0;
528         bool profile_rule = 0;
529         bool nvgre_valid = 0;
530         bool vxlan_valid = 0;
531         bool qinq_valid = 0;
532         bool ipv6_valid = 0;
533         bool ipv4_valid = 0;
534         bool udp_valid = 0;
535         bool tcp_valid = 0;
536         bool gtpu_valid = 0;
537         bool gtpu_psc_valid = 0;
538         bool inner_ipv4_valid = 0;
539         bool inner_ipv6_valid = 0;
540         bool inner_tcp_valid = 0;
541         bool inner_udp_valid = 0;
542         uint16_t j, k, t = 0;
543
544         if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
545             *tun_type == ICE_NON_TUN_QINQ)
546                 qinq_valid = 1;
547
548         for (item = pattern; item->type !=
549                         RTE_FLOW_ITEM_TYPE_END; item++) {
550                 if (item->last) {
551                         rte_flow_error_set(error, EINVAL,
552                                         RTE_FLOW_ERROR_TYPE_ITEM,
553                                         item,
554                                         "Not support range");
555                         return false;
556                 }
557                 item_type = item->type;
558
559                 switch (item_type) {
560                 case RTE_FLOW_ITEM_TYPE_ETH:
561                         eth_spec = item->spec;
562                         eth_mask = item->mask;
563                         if (eth_spec && eth_mask) {
564                                 const uint8_t *a = eth_mask->src.addr_bytes;
565                                 const uint8_t *b = eth_mask->dst.addr_bytes;
566                                 if (tunnel_valid)
567                                         input = &inner_input_set;
568                                 else
569                                         input = &outer_input_set;
570                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
571                                         if (a[j]) {
572                                                 *input |= ICE_INSET_SMAC;
573                                                 break;
574                                         }
575                                 }
576                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
577                                         if (b[j]) {
578                                                 *input |= ICE_INSET_DMAC;
579                                                 break;
580                                         }
581                                 }
582                                 if (eth_mask->type)
583                                         *input |= ICE_INSET_ETHERTYPE;
584                                 list[t].type = (tunnel_valid  == 0) ?
585                                         ICE_MAC_OFOS : ICE_MAC_IL;
586                                 struct ice_ether_hdr *h;
587                                 struct ice_ether_hdr *m;
588                                 uint16_t i = 0;
589                                 h = &list[t].h_u.eth_hdr;
590                                 m = &list[t].m_u.eth_hdr;
591                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
592                                         if (eth_mask->src.addr_bytes[j]) {
593                                                 h->src_addr[j] =
594                                                 eth_spec->src.addr_bytes[j];
595                                                 m->src_addr[j] =
596                                                 eth_mask->src.addr_bytes[j];
597                                                 i = 1;
598                                                 input_set_byte++;
599                                         }
600                                         if (eth_mask->dst.addr_bytes[j]) {
601                                                 h->dst_addr[j] =
602                                                 eth_spec->dst.addr_bytes[j];
603                                                 m->dst_addr[j] =
604                                                 eth_mask->dst.addr_bytes[j];
605                                                 i = 1;
606                                                 input_set_byte++;
607                                         }
608                                 }
609                                 if (i)
610                                         t++;
611                                 if (eth_mask->type) {
612                                         list[t].type = ICE_ETYPE_OL;
613                                         list[t].h_u.ethertype.ethtype_id =
614                                                 eth_spec->type;
615                                         list[t].m_u.ethertype.ethtype_id =
616                                                 eth_mask->type;
617                                         input_set_byte += 2;
618                                         t++;
619                                 }
620                         }
621                         break;
622
623                 case RTE_FLOW_ITEM_TYPE_IPV4:
624                         ipv4_spec = item->spec;
625                         ipv4_mask = item->mask;
626                         if (tunnel_valid) {
627                                 inner_ipv4_valid = 1;
628                                 input = &inner_input_set;
629                         } else {
630                                 ipv4_valid = 1;
631                                 input = &outer_input_set;
632                         }
633
634                         if (ipv4_spec && ipv4_mask) {
635                                 /* Check IPv4 mask and update input set */
636                                 if (ipv4_mask->hdr.version_ihl ||
637                                         ipv4_mask->hdr.total_length ||
638                                         ipv4_mask->hdr.packet_id ||
639                                         ipv4_mask->hdr.hdr_checksum) {
640                                         rte_flow_error_set(error, EINVAL,
641                                                    RTE_FLOW_ERROR_TYPE_ITEM,
642                                                    item,
643                                                    "Invalid IPv4 mask.");
644                                         return false;
645                                 }
646
647                                 if (ipv4_mask->hdr.src_addr)
648                                         *input |= ICE_INSET_IPV4_SRC;
649                                 if (ipv4_mask->hdr.dst_addr)
650                                         *input |= ICE_INSET_IPV4_DST;
651                                 if (ipv4_mask->hdr.time_to_live)
652                                         *input |= ICE_INSET_IPV4_TTL;
653                                 if (ipv4_mask->hdr.next_proto_id)
654                                         *input |= ICE_INSET_IPV4_PROTO;
655                                 if (ipv4_mask->hdr.type_of_service)
656                                         *input |= ICE_INSET_IPV4_TOS;
657
658                                 list[t].type = (tunnel_valid  == 0) ?
659                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
660                                 if (ipv4_mask->hdr.src_addr) {
661                                         list[t].h_u.ipv4_hdr.src_addr =
662                                                 ipv4_spec->hdr.src_addr;
663                                         list[t].m_u.ipv4_hdr.src_addr =
664                                                 ipv4_mask->hdr.src_addr;
665                                         input_set_byte += 2;
666                                 }
667                                 if (ipv4_mask->hdr.dst_addr) {
668                                         list[t].h_u.ipv4_hdr.dst_addr =
669                                                 ipv4_spec->hdr.dst_addr;
670                                         list[t].m_u.ipv4_hdr.dst_addr =
671                                                 ipv4_mask->hdr.dst_addr;
672                                         input_set_byte += 2;
673                                 }
674                                 if (ipv4_mask->hdr.time_to_live) {
675                                         list[t].h_u.ipv4_hdr.time_to_live =
676                                                 ipv4_spec->hdr.time_to_live;
677                                         list[t].m_u.ipv4_hdr.time_to_live =
678                                                 ipv4_mask->hdr.time_to_live;
679                                         input_set_byte++;
680                                 }
681                                 if (ipv4_mask->hdr.next_proto_id) {
682                                         list[t].h_u.ipv4_hdr.protocol =
683                                                 ipv4_spec->hdr.next_proto_id;
684                                         list[t].m_u.ipv4_hdr.protocol =
685                                                 ipv4_mask->hdr.next_proto_id;
686                                         input_set_byte++;
687                                 }
688                                 if ((ipv4_spec->hdr.next_proto_id &
689                                         ipv4_mask->hdr.next_proto_id) ==
690                                         ICE_IPV4_PROTO_NVGRE)
691                                         *tun_type = ICE_SW_TUN_AND_NON_TUN;
692                                 if (ipv4_mask->hdr.type_of_service) {
693                                         list[t].h_u.ipv4_hdr.tos =
694                                                 ipv4_spec->hdr.type_of_service;
695                                         list[t].m_u.ipv4_hdr.tos =
696                                                 ipv4_mask->hdr.type_of_service;
697                                         input_set_byte++;
698                                 }
699                                 t++;
700                         }
701                         break;
702
703                 case RTE_FLOW_ITEM_TYPE_IPV6:
704                         ipv6_spec = item->spec;
705                         ipv6_mask = item->mask;
706                         if (tunnel_valid) {
707                                 inner_ipv6_valid = 1;
708                                 input = &inner_input_set;
709                         } else {
710                                 ipv6_valid = 1;
711                                 input = &outer_input_set;
712                         }
713
714                         if (ipv6_spec && ipv6_mask) {
715                                 if (ipv6_mask->hdr.payload_len) {
716                                         rte_flow_error_set(error, EINVAL,
717                                            RTE_FLOW_ERROR_TYPE_ITEM,
718                                            item,
719                                            "Invalid IPv6 mask");
720                                         return false;
721                                 }
722
723                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
724                                         if (ipv6_mask->hdr.src_addr[j]) {
725                                                 *input |= ICE_INSET_IPV6_SRC;
726                                                 break;
727                                         }
728                                 }
729                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
730                                         if (ipv6_mask->hdr.dst_addr[j]) {
731                                                 *input |= ICE_INSET_IPV6_DST;
732                                                 break;
733                                         }
734                                 }
735                                 if (ipv6_mask->hdr.proto)
736                                         *input |= ICE_INSET_IPV6_NEXT_HDR;
737                                 if (ipv6_mask->hdr.hop_limits)
738                                         *input |= ICE_INSET_IPV6_HOP_LIMIT;
739                                 if (ipv6_mask->hdr.vtc_flow &
740                                     rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
741                                         *input |= ICE_INSET_IPV6_TC;
742
743                                 list[t].type = (tunnel_valid  == 0) ?
744                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
745                                 struct ice_ipv6_hdr *f;
746                                 struct ice_ipv6_hdr *s;
747                                 f = &list[t].h_u.ipv6_hdr;
748                                 s = &list[t].m_u.ipv6_hdr;
749                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
750                                         if (ipv6_mask->hdr.src_addr[j]) {
751                                                 f->src_addr[j] =
752                                                 ipv6_spec->hdr.src_addr[j];
753                                                 s->src_addr[j] =
754                                                 ipv6_mask->hdr.src_addr[j];
755                                                 input_set_byte++;
756                                         }
757                                         if (ipv6_mask->hdr.dst_addr[j]) {
758                                                 f->dst_addr[j] =
759                                                 ipv6_spec->hdr.dst_addr[j];
760                                                 s->dst_addr[j] =
761                                                 ipv6_mask->hdr.dst_addr[j];
762                                                 input_set_byte++;
763                                         }
764                                 }
765                                 if (ipv6_mask->hdr.proto) {
766                                         f->next_hdr =
767                                                 ipv6_spec->hdr.proto;
768                                         s->next_hdr =
769                                                 ipv6_mask->hdr.proto;
770                                         input_set_byte++;
771                                 }
772                                 if (ipv6_mask->hdr.hop_limits) {
773                                         f->hop_limit =
774                                                 ipv6_spec->hdr.hop_limits;
775                                         s->hop_limit =
776                                                 ipv6_mask->hdr.hop_limits;
777                                         input_set_byte++;
778                                 }
779                                 if (ipv6_mask->hdr.vtc_flow &
780                                                 rte_cpu_to_be_32
781                                                 (RTE_IPV6_HDR_TC_MASK)) {
782                                         struct ice_le_ver_tc_flow vtf;
783                                         vtf.u.fld.version = 0;
784                                         vtf.u.fld.flow_label = 0;
785                                         vtf.u.fld.tc = (rte_be_to_cpu_32
786                                                 (ipv6_spec->hdr.vtc_flow) &
787                                                         RTE_IPV6_HDR_TC_MASK) >>
788                                                         RTE_IPV6_HDR_TC_SHIFT;
789                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
790                                         vtf.u.fld.tc = (rte_be_to_cpu_32
791                                                 (ipv6_mask->hdr.vtc_flow) &
792                                                         RTE_IPV6_HDR_TC_MASK) >>
793                                                         RTE_IPV6_HDR_TC_SHIFT;
794                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
795                                         input_set_byte += 4;
796                                 }
797                                 t++;
798                         }
799                         break;
800
801                 case RTE_FLOW_ITEM_TYPE_UDP:
802                         udp_spec = item->spec;
803                         udp_mask = item->mask;
804                         if (tunnel_valid) {
805                                 inner_udp_valid = 1;
806                                 input = &inner_input_set;
807                         } else {
808                                 udp_valid = 1;
809                                 input = &outer_input_set;
810                         }
811
812                         if (udp_spec && udp_mask) {
813                                 /* Check UDP mask and update input set*/
814                                 if (udp_mask->hdr.dgram_len ||
815                                     udp_mask->hdr.dgram_cksum) {
816                                         rte_flow_error_set(error, EINVAL,
817                                                    RTE_FLOW_ERROR_TYPE_ITEM,
818                                                    item,
819                                                    "Invalid UDP mask");
820                                         return false;
821                                 }
822
823                                 if (udp_mask->hdr.src_port)
824                                         *input |= ICE_INSET_UDP_SRC_PORT;
825                                 if (udp_mask->hdr.dst_port)
826                                         *input |= ICE_INSET_UDP_DST_PORT;
827
828                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
829                                                 tunnel_valid == 0)
830                                         list[t].type = ICE_UDP_OF;
831                                 else
832                                         list[t].type = ICE_UDP_ILOS;
833                                 if (udp_mask->hdr.src_port) {
834                                         list[t].h_u.l4_hdr.src_port =
835                                                 udp_spec->hdr.src_port;
836                                         list[t].m_u.l4_hdr.src_port =
837                                                 udp_mask->hdr.src_port;
838                                         input_set_byte += 2;
839                                 }
840                                 if (udp_mask->hdr.dst_port) {
841                                         list[t].h_u.l4_hdr.dst_port =
842                                                 udp_spec->hdr.dst_port;
843                                         list[t].m_u.l4_hdr.dst_port =
844                                                 udp_mask->hdr.dst_port;
845                                         input_set_byte += 2;
846                                 }
847                                 t++;
848                         }
849                         break;
850
851                 case RTE_FLOW_ITEM_TYPE_TCP:
852                         tcp_spec = item->spec;
853                         tcp_mask = item->mask;
854                         if (tunnel_valid) {
855                                 inner_tcp_valid = 1;
856                                 input = &inner_input_set;
857                         } else {
858                                 tcp_valid = 1;
859                                 input = &outer_input_set;
860                         }
861
862                         if (tcp_spec && tcp_mask) {
863                                 /* Check TCP mask and update input set */
864                                 if (tcp_mask->hdr.sent_seq ||
865                                         tcp_mask->hdr.recv_ack ||
866                                         tcp_mask->hdr.data_off ||
867                                         tcp_mask->hdr.tcp_flags ||
868                                         tcp_mask->hdr.rx_win ||
869                                         tcp_mask->hdr.cksum ||
870                                         tcp_mask->hdr.tcp_urp) {
871                                         rte_flow_error_set(error, EINVAL,
872                                            RTE_FLOW_ERROR_TYPE_ITEM,
873                                            item,
874                                            "Invalid TCP mask");
875                                         return false;
876                                 }
877
878                                 if (tcp_mask->hdr.src_port)
879                                         *input |= ICE_INSET_TCP_SRC_PORT;
880                                 if (tcp_mask->hdr.dst_port)
881                                         *input |= ICE_INSET_TCP_DST_PORT;
882                                 list[t].type = ICE_TCP_IL;
883                                 if (tcp_mask->hdr.src_port) {
884                                         list[t].h_u.l4_hdr.src_port =
885                                                 tcp_spec->hdr.src_port;
886                                         list[t].m_u.l4_hdr.src_port =
887                                                 tcp_mask->hdr.src_port;
888                                         input_set_byte += 2;
889                                 }
890                                 if (tcp_mask->hdr.dst_port) {
891                                         list[t].h_u.l4_hdr.dst_port =
892                                                 tcp_spec->hdr.dst_port;
893                                         list[t].m_u.l4_hdr.dst_port =
894                                                 tcp_mask->hdr.dst_port;
895                                         input_set_byte += 2;
896                                 }
897                                 t++;
898                         }
899                         break;
900
901                 case RTE_FLOW_ITEM_TYPE_SCTP:
902                         sctp_spec = item->spec;
903                         sctp_mask = item->mask;
904                         if (sctp_spec && sctp_mask) {
905                                 /* Check SCTP mask and update input set */
906                                 if (sctp_mask->hdr.cksum) {
907                                         rte_flow_error_set(error, EINVAL,
908                                            RTE_FLOW_ERROR_TYPE_ITEM,
909                                            item,
910                                            "Invalid SCTP mask");
911                                         return false;
912                                 }
913                                 if (tunnel_valid)
914                                         input = &inner_input_set;
915                                 else
916                                         input = &outer_input_set;
917
918                                 if (sctp_mask->hdr.src_port)
919                                         *input |= ICE_INSET_SCTP_SRC_PORT;
920                                 if (sctp_mask->hdr.dst_port)
921                                         *input |= ICE_INSET_SCTP_DST_PORT;
922
923                                 list[t].type = ICE_SCTP_IL;
924                                 if (sctp_mask->hdr.src_port) {
925                                         list[t].h_u.sctp_hdr.src_port =
926                                                 sctp_spec->hdr.src_port;
927                                         list[t].m_u.sctp_hdr.src_port =
928                                                 sctp_mask->hdr.src_port;
929                                         input_set_byte += 2;
930                                 }
931                                 if (sctp_mask->hdr.dst_port) {
932                                         list[t].h_u.sctp_hdr.dst_port =
933                                                 sctp_spec->hdr.dst_port;
934                                         list[t].m_u.sctp_hdr.dst_port =
935                                                 sctp_mask->hdr.dst_port;
936                                         input_set_byte += 2;
937                                 }
938                                 t++;
939                         }
940                         break;
941
942                 case RTE_FLOW_ITEM_TYPE_VXLAN:
943                         vxlan_spec = item->spec;
944                         vxlan_mask = item->mask;
945                         /* Check if VXLAN item is used to describe protocol.
946                          * If yes, both spec and mask should be NULL.
947                          * If no, both spec and mask shouldn't be NULL.
948                          */
949                         if ((!vxlan_spec && vxlan_mask) ||
950                             (vxlan_spec && !vxlan_mask)) {
951                                 rte_flow_error_set(error, EINVAL,
952                                            RTE_FLOW_ERROR_TYPE_ITEM,
953                                            item,
954                                            "Invalid VXLAN item");
955                                 return false;
956                         }
957                         vxlan_valid = 1;
958                         tunnel_valid = 1;
959                         input = &inner_input_set;
960                         if (vxlan_spec && vxlan_mask) {
961                                 list[t].type = ICE_VXLAN;
962                                 if (vxlan_mask->vni[0] ||
963                                         vxlan_mask->vni[1] ||
964                                         vxlan_mask->vni[2]) {
965                                         list[t].h_u.tnl_hdr.vni =
966                                                 (vxlan_spec->vni[2] << 16) |
967                                                 (vxlan_spec->vni[1] << 8) |
968                                                 vxlan_spec->vni[0];
969                                         list[t].m_u.tnl_hdr.vni =
970                                                 (vxlan_mask->vni[2] << 16) |
971                                                 (vxlan_mask->vni[1] << 8) |
972                                                 vxlan_mask->vni[0];
973                                         *input |= ICE_INSET_VXLAN_VNI;
974                                         input_set_byte += 2;
975                                 }
976                                 t++;
977                         }
978                         break;
979
980                 case RTE_FLOW_ITEM_TYPE_NVGRE:
981                         nvgre_spec = item->spec;
982                         nvgre_mask = item->mask;
983                         /* Check if NVGRE item is used to describe protocol.
984                          * If yes, both spec and mask should be NULL.
985                          * If no, both spec and mask shouldn't be NULL.
986                          */
987                         if ((!nvgre_spec && nvgre_mask) ||
988                             (nvgre_spec && !nvgre_mask)) {
989                                 rte_flow_error_set(error, EINVAL,
990                                            RTE_FLOW_ERROR_TYPE_ITEM,
991                                            item,
992                                            "Invalid NVGRE item");
993                                 return false;
994                         }
995                         nvgre_valid = 1;
996                         tunnel_valid = 1;
997                         input = &inner_input_set;
998                         if (nvgre_spec && nvgre_mask) {
999                                 list[t].type = ICE_NVGRE;
1000                                 if (nvgre_mask->tni[0] ||
1001                                         nvgre_mask->tni[1] ||
1002                                         nvgre_mask->tni[2]) {
1003                                         list[t].h_u.nvgre_hdr.tni_flow =
1004                                                 (nvgre_spec->tni[2] << 16) |
1005                                                 (nvgre_spec->tni[1] << 8) |
1006                                                 nvgre_spec->tni[0];
1007                                         list[t].m_u.nvgre_hdr.tni_flow =
1008                                                 (nvgre_mask->tni[2] << 16) |
1009                                                 (nvgre_mask->tni[1] << 8) |
1010                                                 nvgre_mask->tni[0];
1011                                         *input |= ICE_INSET_NVGRE_TNI;
1012                                         input_set_byte += 2;
1013                                 }
1014                                 t++;
1015                         }
1016                         break;
1017
1018                 case RTE_FLOW_ITEM_TYPE_VLAN:
1019                         vlan_spec = item->spec;
1020                         vlan_mask = item->mask;
1021                         /* Check if VLAN item is used to describe protocol.
1022                          * If yes, both spec and mask should be NULL.
1023                          * If no, both spec and mask shouldn't be NULL.
1024                          */
1025                         if ((!vlan_spec && vlan_mask) ||
1026                             (vlan_spec && !vlan_mask)) {
1027                                 rte_flow_error_set(error, EINVAL,
1028                                            RTE_FLOW_ERROR_TYPE_ITEM,
1029                                            item,
1030                                            "Invalid VLAN item");
1031                                 return false;
1032                         }
1033
1034                         if (qinq_valid) {
1035                                 if (!outer_vlan_valid)
1036                                         outer_vlan_valid = 1;
1037                                 else
1038                                         inner_vlan_valid = 1;
1039                         }
1040
1041                         input = &outer_input_set;
1042
1043                         if (vlan_spec && vlan_mask) {
1044                                 if (qinq_valid) {
1045                                         if (!inner_vlan_valid) {
1046                                                 list[t].type = ICE_VLAN_EX;
1047                                                 *input |=
1048                                                         ICE_INSET_VLAN_OUTER;
1049                                         } else {
1050                                                 list[t].type = ICE_VLAN_IN;
1051                                                 *input |=
1052                                                         ICE_INSET_VLAN_INNER;
1053                                         }
1054                                 } else {
1055                                         list[t].type = ICE_VLAN_OFOS;
1056                                         *input |= ICE_INSET_VLAN_INNER;
1057                                 }
1058
1059                                 if (vlan_mask->tci) {
1060                                         list[t].h_u.vlan_hdr.vlan =
1061                                                 vlan_spec->tci;
1062                                         list[t].m_u.vlan_hdr.vlan =
1063                                                 vlan_mask->tci;
1064                                         input_set_byte += 2;
1065                                 }
1066                                 if (vlan_mask->inner_type) {
1067                                         rte_flow_error_set(error, EINVAL,
1068                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1069                                                 item,
1070                                                 "Invalid VLAN input set.");
1071                                         return false;
1072                                 }
1073                                 t++;
1074                         }
1075                         break;
1076
1077                 case RTE_FLOW_ITEM_TYPE_PPPOED:
1078                 case RTE_FLOW_ITEM_TYPE_PPPOES:
1079                         pppoe_spec = item->spec;
1080                         pppoe_mask = item->mask;
1081                         /* Check if PPPoE item is used to describe protocol.
1082                          * If yes, both spec and mask should be NULL.
1083                          * If no, both spec and mask shouldn't be NULL.
1084                          */
1085                         if ((!pppoe_spec && pppoe_mask) ||
1086                                 (pppoe_spec && !pppoe_mask)) {
1087                                 rte_flow_error_set(error, EINVAL,
1088                                         RTE_FLOW_ERROR_TYPE_ITEM,
1089                                         item,
1090                                         "Invalid pppoe item");
1091                                 return false;
1092                         }
1093                         pppoe_patt_valid = 1;
1094                         input = &outer_input_set;
1095                         if (pppoe_spec && pppoe_mask) {
1096                                 /* Check pppoe mask and update input set */
1097                                 if (pppoe_mask->length ||
1098                                         pppoe_mask->code ||
1099                                         pppoe_mask->version_type) {
1100                                         rte_flow_error_set(error, EINVAL,
1101                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1102                                                 item,
1103                                                 "Invalid pppoe mask");
1104                                         return false;
1105                                 }
1106                                 list[t].type = ICE_PPPOE;
1107                                 if (pppoe_mask->session_id) {
1108                                         list[t].h_u.pppoe_hdr.session_id =
1109                                                 pppoe_spec->session_id;
1110                                         list[t].m_u.pppoe_hdr.session_id =
1111                                                 pppoe_mask->session_id;
1112                                         *input |= ICE_INSET_PPPOE_SESSION;
1113                                         input_set_byte += 2;
1114                                 }
1115                                 t++;
1116                                 pppoe_elem_valid = 1;
1117                         }
1118                         break;
1119
1120                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1121                         pppoe_proto_spec = item->spec;
1122                         pppoe_proto_mask = item->mask;
1123                         /* Check if PPPoE optional proto_id item
1124                          * is used to describe protocol.
1125                          * If yes, both spec and mask should be NULL.
1126                          * If no, both spec and mask shouldn't be NULL.
1127                          */
1128                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1129                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
1130                                 rte_flow_error_set(error, EINVAL,
1131                                         RTE_FLOW_ERROR_TYPE_ITEM,
1132                                         item,
1133                                         "Invalid pppoe proto item");
1134                                 return false;
1135                         }
1136                         input = &outer_input_set;
1137                         if (pppoe_proto_spec && pppoe_proto_mask) {
1138                                 if (pppoe_elem_valid)
1139                                         t--;
1140                                 list[t].type = ICE_PPPOE;
1141                                 if (pppoe_proto_mask->proto_id) {
1142                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
1143                                                 pppoe_proto_spec->proto_id;
1144                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
1145                                                 pppoe_proto_mask->proto_id;
1146                                         *input |= ICE_INSET_PPPOE_PROTO;
1147                                         input_set_byte += 2;
1148                                         pppoe_prot_valid = 1;
1149                                 }
1150                                 if ((pppoe_proto_mask->proto_id &
1151                                         pppoe_proto_spec->proto_id) !=
1152                                             CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1153                                         (pppoe_proto_mask->proto_id &
1154                                         pppoe_proto_spec->proto_id) !=
1155                                             CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1156                                         *tun_type = ICE_SW_TUN_PPPOE_PAY;
1157                                 else
1158                                         *tun_type = ICE_SW_TUN_PPPOE;
1159                                 t++;
1160                         }
1161
1162                         break;
1163
1164                 case RTE_FLOW_ITEM_TYPE_ESP:
1165                         esp_spec = item->spec;
1166                         esp_mask = item->mask;
1167                         if ((esp_spec && !esp_mask) ||
1168                                 (!esp_spec && esp_mask)) {
1169                                 rte_flow_error_set(error, EINVAL,
1170                                            RTE_FLOW_ERROR_TYPE_ITEM,
1171                                            item,
1172                                            "Invalid esp item");
1173                                 return false;
1174                         }
1175                         /* Check esp mask and update input set */
1176                         if (esp_mask && esp_mask->hdr.seq) {
1177                                 rte_flow_error_set(error, EINVAL,
1178                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1179                                                 item,
1180                                                 "Invalid esp mask");
1181                                 return false;
1182                         }
1183                         input = &outer_input_set;
1184                         if (!esp_spec && !esp_mask && !(*input)) {
1185                                 profile_rule = 1;
1186                                 if (ipv6_valid && udp_valid)
1187                                         *tun_type =
1188                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1189                                 else if (ipv6_valid)
1190                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1191                                 else if (ipv4_valid)
1192                                         goto inset_check;
1193                         } else if (esp_spec && esp_mask &&
1194                                                 esp_mask->hdr.spi){
1195                                 if (udp_valid)
1196                                         list[t].type = ICE_NAT_T;
1197                                 else
1198                                         list[t].type = ICE_ESP;
1199                                 list[t].h_u.esp_hdr.spi =
1200                                         esp_spec->hdr.spi;
1201                                 list[t].m_u.esp_hdr.spi =
1202                                         esp_mask->hdr.spi;
1203                                 *input |= ICE_INSET_ESP_SPI;
1204                                 input_set_byte += 4;
1205                                 t++;
1206                         }
1207
1208                         if (!profile_rule) {
1209                                 if (ipv6_valid && udp_valid)
1210                                         *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1211                                 else if (ipv4_valid && udp_valid)
1212                                         *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1213                                 else if (ipv6_valid)
1214                                         *tun_type = ICE_SW_TUN_IPV6_ESP;
1215                                 else if (ipv4_valid)
1216                                         *tun_type = ICE_SW_TUN_IPV4_ESP;
1217                         }
1218                         break;
1219
1220                 case RTE_FLOW_ITEM_TYPE_AH:
1221                         ah_spec = item->spec;
1222                         ah_mask = item->mask;
1223                         if ((ah_spec && !ah_mask) ||
1224                                 (!ah_spec && ah_mask)) {
1225                                 rte_flow_error_set(error, EINVAL,
1226                                            RTE_FLOW_ERROR_TYPE_ITEM,
1227                                            item,
1228                                            "Invalid ah item");
1229                                 return false;
1230                         }
1231                         /* Check ah mask and update input set */
1232                         if (ah_mask &&
1233                                 (ah_mask->next_hdr ||
1234                                 ah_mask->payload_len ||
1235                                 ah_mask->seq_num ||
1236                                 ah_mask->reserved)) {
1237                                 rte_flow_error_set(error, EINVAL,
1238                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1239                                                 item,
1240                                                 "Invalid ah mask");
1241                                 return false;
1242                         }
1243
1244                         input = &outer_input_set;
1245                         if (!ah_spec && !ah_mask && !(*input)) {
1246                                 profile_rule = 1;
1247                                 if (ipv6_valid && udp_valid)
1248                                         *tun_type =
1249                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1250                                 else if (ipv6_valid)
1251                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1252                                 else if (ipv4_valid)
1253                                         goto inset_check;
1254                         } else if (ah_spec && ah_mask &&
1255                                                 ah_mask->spi){
1256                                 list[t].type = ICE_AH;
1257                                 list[t].h_u.ah_hdr.spi =
1258                                         ah_spec->spi;
1259                                 list[t].m_u.ah_hdr.spi =
1260                                         ah_mask->spi;
1261                                 *input |= ICE_INSET_AH_SPI;
1262                                 input_set_byte += 4;
1263                                 t++;
1264                         }
1265
1266                         if (!profile_rule) {
1267                                 if (udp_valid)
1268                                         goto inset_check;
1269                                 else if (ipv6_valid)
1270                                         *tun_type = ICE_SW_TUN_IPV6_AH;
1271                                 else if (ipv4_valid)
1272                                         *tun_type = ICE_SW_TUN_IPV4_AH;
1273                         }
1274                         break;
1275
1276                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1277                         l2tp_spec = item->spec;
1278                         l2tp_mask = item->mask;
1279                         if ((l2tp_spec && !l2tp_mask) ||
1280                                 (!l2tp_spec && l2tp_mask)) {
1281                                 rte_flow_error_set(error, EINVAL,
1282                                            RTE_FLOW_ERROR_TYPE_ITEM,
1283                                            item,
1284                                            "Invalid l2tp item");
1285                                 return false;
1286                         }
1287
1288                         input = &outer_input_set;
1289                         if (!l2tp_spec && !l2tp_mask && !(*input)) {
1290                                 if (ipv6_valid)
1291                                         *tun_type =
1292                                         ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1293                                 else if (ipv4_valid)
1294                                         goto inset_check;
1295                         } else if (l2tp_spec && l2tp_mask &&
1296                                                 l2tp_mask->session_id){
1297                                 list[t].type = ICE_L2TPV3;
1298                                 list[t].h_u.l2tpv3_sess_hdr.session_id =
1299                                         l2tp_spec->session_id;
1300                                 list[t].m_u.l2tpv3_sess_hdr.session_id =
1301                                         l2tp_mask->session_id;
1302                                 *input |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1303                                 input_set_byte += 4;
1304                                 t++;
1305                         }
1306
1307                         if (!profile_rule) {
1308                                 if (ipv6_valid)
1309                                         *tun_type =
1310                                         ICE_SW_TUN_IPV6_L2TPV3;
1311                                 else if (ipv4_valid)
1312                                         *tun_type =
1313                                         ICE_SW_TUN_IPV4_L2TPV3;
1314                         }
1315                         break;
1316
1317                 case RTE_FLOW_ITEM_TYPE_PFCP:
1318                         pfcp_spec = item->spec;
1319                         pfcp_mask = item->mask;
1320                         /* Check if PFCP item is used to describe protocol.
1321                          * If yes, both spec and mask should be NULL.
1322                          * If no, both spec and mask shouldn't be NULL.
1323                          */
1324                         if ((!pfcp_spec && pfcp_mask) ||
1325                             (pfcp_spec && !pfcp_mask)) {
1326                                 rte_flow_error_set(error, EINVAL,
1327                                            RTE_FLOW_ERROR_TYPE_ITEM,
1328                                            item,
1329                                            "Invalid PFCP item");
1330                                 return false;
1331                         }
1332                         if (pfcp_spec && pfcp_mask) {
1333                                 /* Check pfcp mask and update input set */
1334                                 if (pfcp_mask->msg_type ||
1335                                         pfcp_mask->msg_len ||
1336                                         pfcp_mask->seid) {
1337                                         rte_flow_error_set(error, EINVAL,
1338                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1339                                                 item,
1340                                                 "Invalid pfcp mask");
1341                                         return false;
1342                                 }
1343                                 if (pfcp_mask->s_field &&
1344                                         pfcp_spec->s_field == 0x01 &&
1345                                         ipv6_valid)
1346                                         *tun_type =
1347                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1348                                 else if (pfcp_mask->s_field &&
1349                                         pfcp_spec->s_field == 0x01)
1350                                         *tun_type =
1351                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1352                                 else if (pfcp_mask->s_field &&
1353                                         !pfcp_spec->s_field &&
1354                                         ipv6_valid)
1355                                         *tun_type =
1356                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1357                                 else if (pfcp_mask->s_field &&
1358                                         !pfcp_spec->s_field)
1359                                         *tun_type =
1360                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1361                                 else
1362                                         return false;
1363                         }
1364                         break;
1365
1366                 case RTE_FLOW_ITEM_TYPE_GTPU:
1367                         gtp_spec = item->spec;
1368                         gtp_mask = item->mask;
1369                         if (gtp_spec && !gtp_mask) {
1370                                 rte_flow_error_set(error, EINVAL,
1371                                         RTE_FLOW_ERROR_TYPE_ITEM,
1372                                         item,
1373                                         "Invalid GTP item");
1374                                 return false;
1375                         }
1376                         if (gtp_spec && gtp_mask) {
1377                                 if (gtp_mask->v_pt_rsv_flags ||
1378                                     gtp_mask->msg_type ||
1379                                     gtp_mask->msg_len) {
1380                                         rte_flow_error_set(error, EINVAL,
1381                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1382                                                 item,
1383                                                 "Invalid GTP mask");
1384                                         return false;
1385                                 }
1386                                 input = &outer_input_set;
1387                                 if (gtp_mask->teid)
1388                                         *input |= ICE_INSET_GTPU_TEID;
1389                                 list[t].type = ICE_GTP;
1390                                 list[t].h_u.gtp_hdr.teid =
1391                                         gtp_spec->teid;
1392                                 list[t].m_u.gtp_hdr.teid =
1393                                         gtp_mask->teid;
1394                                 input_set_byte += 4;
1395                                 t++;
1396                         }
1397                         tunnel_valid = 1;
1398                         gtpu_valid = 1;
1399                         break;
1400
1401                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1402                         gtp_psc_spec = item->spec;
1403                         gtp_psc_mask = item->mask;
1404                         if (gtp_psc_spec && !gtp_psc_mask) {
1405                                 rte_flow_error_set(error, EINVAL,
1406                                         RTE_FLOW_ERROR_TYPE_ITEM,
1407                                         item,
1408                                         "Invalid GTPU_EH item");
1409                                 return false;
1410                         }
1411                         if (gtp_psc_spec && gtp_psc_mask) {
1412                                 if (gtp_psc_mask->hdr.type) {
1413                                         rte_flow_error_set(error, EINVAL,
1414                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1415                                                 item,
1416                                                 "Invalid GTPU_EH mask");
1417                                         return false;
1418                                 }
1419                                 input = &outer_input_set;
1420                                 if (gtp_psc_mask->hdr.qfi)
1421                                         *input |= ICE_INSET_GTPU_QFI;
1422                                 list[t].type = ICE_GTP;
1423                                 list[t].h_u.gtp_hdr.qfi =
1424                                         gtp_psc_spec->hdr.qfi;
1425                                 list[t].m_u.gtp_hdr.qfi =
1426                                         gtp_psc_mask->hdr.qfi;
1427                                 input_set_byte += 1;
1428                                 t++;
1429                         }
1430                         gtpu_psc_valid = 1;
1431                         break;
1432
1433                 case RTE_FLOW_ITEM_TYPE_VOID:
1434                         break;
1435
1436                 default:
1437                         rte_flow_error_set(error, EINVAL,
1438                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1439                                    "Invalid pattern item.");
1440                         return false;
1441                 }
1442         }
1443
1444         if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1445             inner_vlan_valid && outer_vlan_valid)
1446                 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1447         else if (*tun_type == ICE_SW_TUN_PPPOE &&
1448                  inner_vlan_valid && outer_vlan_valid)
1449                 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1450         else if (*tun_type == ICE_NON_TUN &&
1451                  inner_vlan_valid && outer_vlan_valid)
1452                 *tun_type = ICE_NON_TUN_QINQ;
1453         else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1454                  inner_vlan_valid && outer_vlan_valid)
1455                 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1456
1457         if (pppoe_patt_valid && !pppoe_prot_valid) {
1458                 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1459                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1460                 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1461                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1462                 else if (inner_vlan_valid && outer_vlan_valid)
1463                         *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1464                 else if (ipv6_valid && udp_valid)
1465                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1466                 else if (ipv6_valid && tcp_valid)
1467                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1468                 else if (ipv4_valid && udp_valid)
1469                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1470                 else if (ipv4_valid && tcp_valid)
1471                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1472                 else if (ipv6_valid)
1473                         *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1474                 else if (ipv4_valid)
1475                         *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1476                 else
1477                         *tun_type = ICE_SW_TUN_PPPOE;
1478         }
1479
1480         if (gtpu_valid && gtpu_psc_valid) {
1481                 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1482                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP;
1483                 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1484                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP;
1485                 else if (ipv4_valid && inner_ipv4_valid)
1486                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4;
1487                 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1488                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP;
1489                 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1490                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP;
1491                 else if (ipv4_valid && inner_ipv6_valid)
1492                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6;
1493                 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1494                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP;
1495                 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1496                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP;
1497                 else if (ipv6_valid && inner_ipv4_valid)
1498                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4;
1499                 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1500                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP;
1501                 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1502                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP;
1503                 else if (ipv6_valid && inner_ipv6_valid)
1504                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6;
1505                 else if (ipv4_valid)
1506                         *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1507                 else if (ipv6_valid)
1508                         *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1509         } else if (gtpu_valid) {
1510                 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1511                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_UDP;
1512                 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1513                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_TCP;
1514                 else if (ipv4_valid && inner_ipv4_valid)
1515                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1516                 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1517                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_UDP;
1518                 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1519                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_TCP;
1520                 else if (ipv4_valid && inner_ipv6_valid)
1521                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1522                 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1523                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_UDP;
1524                 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1525                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_TCP;
1526                 else if (ipv6_valid && inner_ipv4_valid)
1527                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1528                 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1529                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_UDP;
1530                 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1531                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_TCP;
1532                 else if (ipv6_valid && inner_ipv6_valid)
1533                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1534                 else if (ipv4_valid)
1535                         *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1536                 else if (ipv6_valid)
1537                         *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1538         }
1539
1540         if (*tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY ||
1541             *tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
1542                 for (k = 0; k < t; k++) {
1543                         if (list[k].type == ICE_GTP)
1544                                 list[k].type = ICE_GTP_NO_PAY;
1545                 }
1546         }
1547
1548         if (*tun_type == ICE_NON_TUN) {
1549                 if (vxlan_valid)
1550                         *tun_type = ICE_SW_TUN_VXLAN;
1551                 else if (nvgre_valid)
1552                         *tun_type = ICE_SW_TUN_NVGRE;
1553                 else if (ipv4_valid && tcp_valid)
1554                         *tun_type = ICE_SW_IPV4_TCP;
1555                 else if (ipv4_valid && udp_valid)
1556                         *tun_type = ICE_SW_IPV4_UDP;
1557                 else if (ipv6_valid && tcp_valid)
1558                         *tun_type = ICE_SW_IPV6_TCP;
1559                 else if (ipv6_valid && udp_valid)
1560                         *tun_type = ICE_SW_IPV6_UDP;
1561         }
1562
1563         if (input_set_byte > MAX_INPUT_SET_BYTE) {
1564                 rte_flow_error_set(error, EINVAL,
1565                         RTE_FLOW_ERROR_TYPE_ITEM,
1566                         item,
1567                         "too much input set");
1568                 return false;
1569         }
1570
1571         *lkups_num = t;
1572
1573 inset_check:
1574         if ((!outer_input_set && !inner_input_set &&
1575             !ice_is_prof_rule(*tun_type)) || (outer_input_set &
1576             ~pattern_match_item->input_set_mask_o) ||
1577             (inner_input_set & ~pattern_match_item->input_set_mask_i))
1578                 return false;
1579
1580         return true;
1581 }
1582
1583 static int
1584 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1585                             const struct rte_flow_action *actions,
1586                             uint32_t priority,
1587                             struct rte_flow_error *error,
1588                             struct ice_adv_rule_info *rule_info)
1589 {
1590         const struct rte_flow_action_vf *act_vf;
1591         const struct rte_flow_action *action;
1592         enum rte_flow_action_type action_type;
1593
1594         for (action = actions; action->type !=
1595                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1596                 action_type = action->type;
1597                 switch (action_type) {
1598                 case RTE_FLOW_ACTION_TYPE_VF:
1599                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1600                         act_vf = action->conf;
1601
1602                         if (act_vf->id >= ad->real_hw.num_vfs &&
1603                                 !act_vf->original) {
1604                                 rte_flow_error_set(error,
1605                                         EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1606                                         actions,
1607                                         "Invalid vf id");
1608                                 return -rte_errno;
1609                         }
1610
1611                         if (act_vf->original)
1612                                 rule_info->sw_act.vsi_handle =
1613                                         ad->real_hw.avf.bus.func;
1614                         else
1615                                 rule_info->sw_act.vsi_handle = act_vf->id;
1616                         break;
1617
1618                 case RTE_FLOW_ACTION_TYPE_DROP:
1619                         rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1620                         break;
1621
1622                 default:
1623                         rte_flow_error_set(error,
1624                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1625                                            actions,
1626                                            "Invalid action type");
1627                         return -rte_errno;
1628                 }
1629         }
1630
1631         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1632         rule_info->sw_act.flag = ICE_FLTR_RX;
1633         rule_info->rx = 1;
1634         /* 0 denotes lowest priority of recipe and highest priority
1635          * of rte_flow. Change rte_flow priority into recipe priority.
1636          */
1637         rule_info->priority = ICE_SW_PRI_BASE - priority;
1638
1639         return 0;
1640 }
1641
1642 static int
1643 ice_switch_parse_action(struct ice_pf *pf,
1644                 const struct rte_flow_action *actions,
1645                 uint32_t priority,
1646                 struct rte_flow_error *error,
1647                 struct ice_adv_rule_info *rule_info)
1648 {
1649         struct ice_vsi *vsi = pf->main_vsi;
1650         struct rte_eth_dev_data *dev_data = pf->adapter->pf.dev_data;
1651         const struct rte_flow_action_queue *act_q;
1652         const struct rte_flow_action_rss *act_qgrop;
1653         uint16_t base_queue, i;
1654         const struct rte_flow_action *action;
1655         enum rte_flow_action_type action_type;
1656         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1657                  2, 4, 8, 16, 32, 64, 128};
1658
1659         base_queue = pf->base_queue + vsi->base_queue;
1660         for (action = actions; action->type !=
1661                         RTE_FLOW_ACTION_TYPE_END; action++) {
1662                 action_type = action->type;
1663                 switch (action_type) {
1664                 case RTE_FLOW_ACTION_TYPE_RSS:
1665                         act_qgrop = action->conf;
1666                         if (act_qgrop->queue_num <= 1)
1667                                 goto error;
1668                         rule_info->sw_act.fltr_act =
1669                                 ICE_FWD_TO_QGRP;
1670                         rule_info->sw_act.fwd_id.q_id =
1671                                 base_queue + act_qgrop->queue[0];
1672                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1673                                 if (act_qgrop->queue_num ==
1674                                         valid_qgrop_number[i])
1675                                         break;
1676                         }
1677                         if (i == MAX_QGRP_NUM_TYPE)
1678                                 goto error;
1679                         if ((act_qgrop->queue[0] +
1680                                 act_qgrop->queue_num) >
1681                                 dev_data->nb_rx_queues)
1682                                 goto error1;
1683                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1684                                 if (act_qgrop->queue[i + 1] !=
1685                                         act_qgrop->queue[i] + 1)
1686                                         goto error2;
1687                         rule_info->sw_act.qgrp_size =
1688                                 act_qgrop->queue_num;
1689                         break;
1690                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1691                         act_q = action->conf;
1692                         if (act_q->index >= dev_data->nb_rx_queues)
1693                                 goto error;
1694                         rule_info->sw_act.fltr_act =
1695                                 ICE_FWD_TO_Q;
1696                         rule_info->sw_act.fwd_id.q_id =
1697                                 base_queue + act_q->index;
1698                         break;
1699
1700                 case RTE_FLOW_ACTION_TYPE_DROP:
1701                         rule_info->sw_act.fltr_act =
1702                                 ICE_DROP_PACKET;
1703                         break;
1704
1705                 case RTE_FLOW_ACTION_TYPE_VOID:
1706                         break;
1707
1708                 default:
1709                         goto error;
1710                 }
1711         }
1712
1713         rule_info->sw_act.vsi_handle = vsi->idx;
1714         rule_info->rx = 1;
1715         rule_info->sw_act.src = vsi->idx;
1716         /* 0 denotes lowest priority of recipe and highest priority
1717          * of rte_flow. Change rte_flow priority into recipe priority.
1718          */
1719         rule_info->priority = ICE_SW_PRI_BASE - priority;
1720
1721         return 0;
1722
1723 error:
1724         rte_flow_error_set(error,
1725                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1726                 actions,
1727                 "Invalid action type or queue number");
1728         return -rte_errno;
1729
1730 error1:
1731         rte_flow_error_set(error,
1732                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1733                 actions,
1734                 "Invalid queue region indexes");
1735         return -rte_errno;
1736
1737 error2:
1738         rte_flow_error_set(error,
1739                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1740                 actions,
1741                 "Discontinuous queue region");
1742         return -rte_errno;
1743 }
1744
1745 static int
1746 ice_switch_check_action(const struct rte_flow_action *actions,
1747                             struct rte_flow_error *error)
1748 {
1749         const struct rte_flow_action *action;
1750         enum rte_flow_action_type action_type;
1751         uint16_t actions_num = 0;
1752
1753         for (action = actions; action->type !=
1754                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1755                 action_type = action->type;
1756                 switch (action_type) {
1757                 case RTE_FLOW_ACTION_TYPE_VF:
1758                 case RTE_FLOW_ACTION_TYPE_RSS:
1759                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1760                 case RTE_FLOW_ACTION_TYPE_DROP:
1761                         actions_num++;
1762                         break;
1763                 case RTE_FLOW_ACTION_TYPE_VOID:
1764                         continue;
1765                 default:
1766                         rte_flow_error_set(error,
1767                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1768                                            actions,
1769                                            "Invalid action type");
1770                         return -rte_errno;
1771                 }
1772         }
1773
1774         if (actions_num != 1) {
1775                 rte_flow_error_set(error,
1776                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1777                                    actions,
1778                                    "Invalid action number");
1779                 return -rte_errno;
1780         }
1781
1782         return 0;
1783 }
1784
1785 static int
1786 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1787                 struct ice_pattern_match_item *array,
1788                 uint32_t array_len,
1789                 const struct rte_flow_item pattern[],
1790                 const struct rte_flow_action actions[],
1791                 uint32_t priority,
1792                 void **meta,
1793                 struct rte_flow_error *error)
1794 {
1795         struct ice_pf *pf = &ad->pf;
1796         int ret = 0;
1797         struct sw_meta *sw_meta_ptr = NULL;
1798         struct ice_adv_rule_info rule_info;
1799         struct ice_adv_lkup_elem *list = NULL;
1800         uint16_t lkups_num = 0;
1801         const struct rte_flow_item *item = pattern;
1802         uint16_t item_num = 0;
1803         uint16_t vlan_num = 0;
1804         enum ice_sw_tunnel_type tun_type =
1805                         ICE_NON_TUN;
1806         struct ice_pattern_match_item *pattern_match_item = NULL;
1807
1808         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1809                 item_num++;
1810                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1811                         const struct rte_flow_item_eth *eth_mask;
1812                         if (item->mask)
1813                                 eth_mask = item->mask;
1814                         else
1815                                 continue;
1816                         if (eth_mask->type == UINT16_MAX)
1817                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1818                 }
1819
1820                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1821                         vlan_num++;
1822
1823                 /* reserve one more memory slot for ETH which may
1824                  * consume 2 lookup items.
1825                  */
1826                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1827                         item_num++;
1828         }
1829
1830         if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1831                 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1832         else if (vlan_num == 2)
1833                 tun_type = ICE_NON_TUN_QINQ;
1834
1835         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1836         if (!list) {
1837                 rte_flow_error_set(error, EINVAL,
1838                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1839                                    "No memory for PMD internal items");
1840                 return -rte_errno;
1841         }
1842
1843         sw_meta_ptr =
1844                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1845         if (!sw_meta_ptr) {
1846                 rte_flow_error_set(error, EINVAL,
1847                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1848                                    "No memory for sw_pattern_meta_ptr");
1849                 goto error;
1850         }
1851
1852         pattern_match_item =
1853                 ice_search_pattern_match_item(ad, pattern, array, array_len,
1854                                               error);
1855         if (!pattern_match_item) {
1856                 rte_flow_error_set(error, EINVAL,
1857                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1858                                    "Invalid input pattern");
1859                 goto error;
1860         }
1861
1862         if (!ice_switch_parse_pattern(pattern, error, list, &lkups_num,
1863                                    &tun_type, pattern_match_item)) {
1864                 rte_flow_error_set(error, EINVAL,
1865                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1866                                    pattern,
1867                                    "Invalid input set");
1868                 goto error;
1869         }
1870
1871         memset(&rule_info, 0, sizeof(rule_info));
1872         rule_info.tun_type = tun_type;
1873
1874         ret = ice_switch_check_action(actions, error);
1875         if (ret)
1876                 goto error;
1877
1878         if (ad->hw.dcf_enabled)
1879                 ret = ice_switch_parse_dcf_action((void *)ad, actions, priority,
1880                                                   error, &rule_info);
1881         else
1882                 ret = ice_switch_parse_action(pf, actions, priority, error,
1883                                               &rule_info);
1884
1885         if (ret)
1886                 goto error;
1887
1888         if (meta) {
1889                 *meta = sw_meta_ptr;
1890                 ((struct sw_meta *)*meta)->list = list;
1891                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1892                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1893         } else {
1894                 rte_free(list);
1895                 rte_free(sw_meta_ptr);
1896         }
1897
1898         rte_free(pattern_match_item);
1899
1900         return 0;
1901
1902 error:
1903         rte_free(list);
1904         rte_free(sw_meta_ptr);
1905         rte_free(pattern_match_item);
1906
1907         return -rte_errno;
1908 }
1909
1910 static int
1911 ice_switch_query(struct ice_adapter *ad __rte_unused,
1912                 struct rte_flow *flow __rte_unused,
1913                 struct rte_flow_query_count *count __rte_unused,
1914                 struct rte_flow_error *error)
1915 {
1916         rte_flow_error_set(error, EINVAL,
1917                 RTE_FLOW_ERROR_TYPE_HANDLE,
1918                 NULL,
1919                 "count action not supported by switch filter");
1920
1921         return -rte_errno;
1922 }
1923
1924 static int
1925 ice_switch_redirect(struct ice_adapter *ad,
1926                     struct rte_flow *flow,
1927                     struct ice_flow_redirect *rd)
1928 {
1929         struct ice_rule_query_data *rdata;
1930         struct ice_switch_filter_conf *filter_conf_ptr =
1931                 (struct ice_switch_filter_conf *)flow->rule;
1932         struct ice_rule_query_data added_rdata = { 0 };
1933         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1934         struct ice_adv_lkup_elem *lkups_ref = NULL;
1935         struct ice_adv_lkup_elem *lkups_dp = NULL;
1936         struct LIST_HEAD_TYPE *list_head;
1937         struct ice_adv_rule_info rinfo;
1938         struct ice_hw *hw = &ad->hw;
1939         struct ice_switch_info *sw;
1940         uint16_t lkups_cnt;
1941         int ret;
1942
1943         rdata = &filter_conf_ptr->sw_query_data;
1944
1945         if (rdata->vsi_handle != rd->vsi_handle)
1946                 return 0;
1947
1948         sw = hw->switch_info;
1949         if (!sw->recp_list[rdata->rid].recp_created)
1950                 return -EINVAL;
1951
1952         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1953                 return -ENOTSUP;
1954
1955         switch (filter_conf_ptr->fltr_status) {
1956         case ICE_SW_FLTR_ADDED:
1957                 list_head = &sw->recp_list[rdata->rid].filt_rules;
1958                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
1959                                     ice_adv_fltr_mgmt_list_entry,
1960                                     list_entry) {
1961                         rinfo = list_itr->rule_info;
1962                         if ((rinfo.fltr_rule_id == rdata->rule_id &&
1963                             rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1964                             rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1965                             (rinfo.fltr_rule_id == rdata->rule_id &&
1966                             rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1967                                 lkups_cnt = list_itr->lkups_cnt;
1968
1969                                 lkups_dp = (struct ice_adv_lkup_elem *)
1970                                         ice_memdup(hw, list_itr->lkups,
1971                                                    sizeof(*list_itr->lkups) *
1972                                                    lkups_cnt,
1973                                                    ICE_NONDMA_TO_NONDMA);
1974                                 if (!lkups_dp) {
1975                                         PMD_DRV_LOG(ERR,
1976                                                     "Failed to allocate memory.");
1977                                         return -EINVAL;
1978                                 }
1979                                 lkups_ref = lkups_dp;
1980
1981                                 if (rinfo.sw_act.fltr_act ==
1982                                     ICE_FWD_TO_VSI_LIST) {
1983                                         rinfo.sw_act.vsi_handle =
1984                                                 rd->vsi_handle;
1985                                         rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1986                                 }
1987                                 break;
1988                         }
1989                 }
1990
1991                 if (!lkups_ref)
1992                         return -EINVAL;
1993
1994                 goto rmv_rule;
1995         case ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT:
1996                 /* Recover VSI context */
1997                 hw->vsi_ctx[rd->vsi_handle]->vsi_num = filter_conf_ptr->vsi_num;
1998                 rinfo = filter_conf_ptr->rule_info;
1999                 lkups_cnt = filter_conf_ptr->lkups_num;
2000                 lkups_ref = filter_conf_ptr->lkups;
2001
2002                 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
2003                         rinfo.sw_act.vsi_handle = rd->vsi_handle;
2004                         rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
2005                 }
2006
2007                 goto rmv_rule;
2008         case ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT:
2009                 rinfo = filter_conf_ptr->rule_info;
2010                 lkups_cnt = filter_conf_ptr->lkups_num;
2011                 lkups_ref = filter_conf_ptr->lkups;
2012
2013                 goto add_rule;
2014         default:
2015                 return -EINVAL;
2016         }
2017
2018 rmv_rule:
2019         /* Remove the old rule */
2020         ret = ice_rem_adv_rule(hw, lkups_ref, lkups_cnt, &rinfo);
2021         if (ret) {
2022                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
2023                             rdata->rule_id);
2024                 filter_conf_ptr->fltr_status =
2025                         ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT;
2026                 ret = -EINVAL;
2027                 goto out;
2028         }
2029
2030 add_rule:
2031         /* Update VSI context */
2032         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
2033
2034         /* Replay the rule */
2035         ret = ice_add_adv_rule(hw, lkups_ref, lkups_cnt,
2036                                &rinfo, &added_rdata);
2037         if (ret) {
2038                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
2039                 filter_conf_ptr->fltr_status =
2040                         ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT;
2041                 ret = -EINVAL;
2042         } else {
2043                 filter_conf_ptr->sw_query_data = added_rdata;
2044                 /* Save VSI number for failure recover */
2045                 filter_conf_ptr->vsi_num = rd->new_vsi_num;
2046                 filter_conf_ptr->fltr_status = ICE_SW_FLTR_ADDED;
2047         }
2048
2049 out:
2050         ice_free(hw, lkups_dp);
2051         return ret;
2052 }
2053
2054 static int
2055 ice_switch_init(struct ice_adapter *ad)
2056 {
2057         int ret = 0;
2058         struct ice_flow_parser *dist_parser;
2059         struct ice_flow_parser *perm_parser;
2060
2061         if (ad->devargs.pipe_mode_support) {
2062                 perm_parser = &ice_switch_perm_parser;
2063                 ret = ice_register_parser(perm_parser, ad);
2064         } else {
2065                 dist_parser = &ice_switch_dist_parser;
2066                 ret = ice_register_parser(dist_parser, ad);
2067         }
2068         return ret;
2069 }
2070
2071 static void
2072 ice_switch_uninit(struct ice_adapter *ad)
2073 {
2074         struct ice_flow_parser *dist_parser;
2075         struct ice_flow_parser *perm_parser;
2076
2077         if (ad->devargs.pipe_mode_support) {
2078                 perm_parser = &ice_switch_perm_parser;
2079                 ice_unregister_parser(perm_parser, ad);
2080         } else {
2081                 dist_parser = &ice_switch_dist_parser;
2082                 ice_unregister_parser(dist_parser, ad);
2083         }
2084 }
2085
2086 static struct
2087 ice_flow_engine ice_switch_engine = {
2088         .init = ice_switch_init,
2089         .uninit = ice_switch_uninit,
2090         .create = ice_switch_create,
2091         .destroy = ice_switch_destroy,
2092         .query_count = ice_switch_query,
2093         .redirect = ice_switch_redirect,
2094         .free = ice_switch_filter_rule_free,
2095         .type = ICE_FLOW_ENGINE_SWITCH,
2096 };
2097
2098 static struct
2099 ice_flow_parser ice_switch_dist_parser = {
2100         .engine = &ice_switch_engine,
2101         .array = ice_switch_pattern_dist_list,
2102         .array_len = RTE_DIM(ice_switch_pattern_dist_list),
2103         .parse_pattern_action = ice_switch_parse_pattern_action,
2104         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2105 };
2106
2107 static struct
2108 ice_flow_parser ice_switch_perm_parser = {
2109         .engine = &ice_switch_engine,
2110         .array = ice_switch_pattern_perm_list,
2111         .array_len = RTE_DIM(ice_switch_pattern_perm_list),
2112         .parse_pattern_action = ice_switch_parse_pattern_action,
2113         .stage = ICE_FLOW_STAGE_PERMISSION,
2114 };
2115
2116 RTE_INIT(ice_sw_engine_init)
2117 {
2118         struct ice_flow_engine *engine = &ice_switch_engine;
2119         ice_register_flow_engine(engine);
2120 }