net/ice: fix variable initialization
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26
27
28 #define MAX_QGRP_NUM_TYPE 7
29
30 #define ICE_SW_INSET_ETHER ( \
31         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_VLAN ( \
33                 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
34                 ICE_INSET_VLAN_OUTER)
35 #define ICE_SW_INSET_MAC_IPV4 ( \
36         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
38 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
39         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
40         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
41         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
42 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
43         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
45         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
46 #define ICE_SW_INSET_MAC_IPV6 ( \
47         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
48         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
49         ICE_INSET_IPV6_NEXT_HDR)
50 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
51         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
52         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
53         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
55         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
56         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
57         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
58 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
59         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
62         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
64 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
65         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
66         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
67         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
68 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
69         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
70         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
71         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
73         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
75         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
77         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
79         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
81         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
83 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
84         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
85         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
86         ICE_INSET_TUN_IPV4_TOS)
87 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
88         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
90         ICE_INSET_TUN_IPV4_TOS)
91 #define ICE_SW_INSET_MAC_PPPOE  ( \
92         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
93         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
94 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
95         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
96         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
97         ICE_INSET_PPPOE_PROTO)
98 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
99         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
100 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
101         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
102 #define ICE_SW_INSET_MAC_IPV4_AH ( \
103         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
104 #define ICE_SW_INSET_MAC_IPV6_AH ( \
105         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
106 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
107         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
108 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
109         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
110 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
111         ICE_SW_INSET_MAC_IPV4 | \
112         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
113 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
114         ICE_SW_INSET_MAC_IPV6 | \
115         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
116
117 struct sw_meta {
118         struct ice_adv_lkup_elem *list;
119         uint16_t lkups_num;
120         struct ice_adv_rule_info rule_info;
121 };
122
123 static struct ice_flow_parser ice_switch_dist_parser_os;
124 static struct ice_flow_parser ice_switch_dist_parser_comms;
125 static struct ice_flow_parser ice_switch_perm_parser;
126
127 static struct
128 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
129         {pattern_ethertype,
130                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
131         {pattern_ethertype_vlan,
132                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
133         {pattern_eth_ipv4,
134                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
135         {pattern_eth_ipv4_udp,
136                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
137         {pattern_eth_ipv4_tcp,
138                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
139         {pattern_eth_ipv6,
140                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
141         {pattern_eth_ipv6_udp,
142                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
143         {pattern_eth_ipv6_tcp,
144                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
145         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
146                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
147         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
148                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
149         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
150                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
151         {pattern_eth_ipv4_nvgre_eth_ipv4,
152                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
153         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
154                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
155         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
156                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
157         {pattern_eth_pppoed,
158                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
159         {pattern_eth_vlan_pppoed,
160                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
161         {pattern_eth_pppoes,
162                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
163         {pattern_eth_vlan_pppoes,
164                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
165         {pattern_eth_pppoes_proto,
166                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
167         {pattern_eth_vlan_pppoes_proto,
168                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
169         {pattern_eth_ipv4_esp,
170                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
171         {pattern_eth_ipv4_udp_esp,
172                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
173         {pattern_eth_ipv6_esp,
174                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
175         {pattern_eth_ipv6_udp_esp,
176                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
177         {pattern_eth_ipv4_ah,
178                         ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
179         {pattern_eth_ipv6_ah,
180                         ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
181         {pattern_eth_ipv6_udp_ah,
182                         ICE_INSET_NONE, ICE_INSET_NONE},
183         {pattern_eth_ipv4_l2tp,
184                         ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
185         {pattern_eth_ipv6_l2tp,
186                         ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
187         {pattern_eth_ipv4_pfcp,
188                         ICE_INSET_NONE, ICE_INSET_NONE},
189         {pattern_eth_ipv6_pfcp,
190                         ICE_INSET_NONE, ICE_INSET_NONE},
191 };
192
193 static struct
194 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
195         {pattern_ethertype,
196                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
197         {pattern_ethertype_vlan,
198                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
199         {pattern_eth_arp,
200                         ICE_INSET_NONE, ICE_INSET_NONE},
201         {pattern_eth_ipv4,
202                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
203         {pattern_eth_ipv4_udp,
204                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
205         {pattern_eth_ipv4_tcp,
206                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
207         {pattern_eth_ipv6,
208                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
209         {pattern_eth_ipv6_udp,
210                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
211         {pattern_eth_ipv6_tcp,
212                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
213         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
214                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
215         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
216                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
217         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
218                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
219         {pattern_eth_ipv4_nvgre_eth_ipv4,
220                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
221         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
222                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
223         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
224                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
225 };
226
227 static struct
228 ice_pattern_match_item ice_switch_pattern_perm[] = {
229         {pattern_ethertype,
230                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
231         {pattern_ethertype_vlan,
232                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
233         {pattern_eth_ipv4,
234                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
235         {pattern_eth_ipv4_udp,
236                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
237         {pattern_eth_ipv4_tcp,
238                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
239         {pattern_eth_ipv6,
240                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
241         {pattern_eth_ipv6_udp,
242                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
243         {pattern_eth_ipv6_tcp,
244                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
245         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
246                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
247         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
248                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
249         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
250                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
251         {pattern_eth_ipv4_nvgre_eth_ipv4,
252                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
253         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
254                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
255         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
256                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
257         {pattern_eth_pppoed,
258                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
259         {pattern_eth_vlan_pppoed,
260                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
261         {pattern_eth_pppoes,
262                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
263         {pattern_eth_vlan_pppoes,
264                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
265         {pattern_eth_pppoes_proto,
266                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
267         {pattern_eth_vlan_pppoes_proto,
268                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
269         {pattern_eth_ipv4_esp,
270                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
271         {pattern_eth_ipv4_udp_esp,
272                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
273         {pattern_eth_ipv6_esp,
274                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
275         {pattern_eth_ipv6_udp_esp,
276                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
277         {pattern_eth_ipv4_ah,
278                         ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
279         {pattern_eth_ipv6_ah,
280                         ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
281         {pattern_eth_ipv6_udp_ah,
282                         ICE_INSET_NONE, ICE_INSET_NONE},
283         {pattern_eth_ipv4_l2tp,
284                         ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
285         {pattern_eth_ipv6_l2tp,
286                         ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
287         {pattern_eth_ipv4_pfcp,
288                         ICE_INSET_NONE, ICE_INSET_NONE},
289         {pattern_eth_ipv6_pfcp,
290                         ICE_INSET_NONE, ICE_INSET_NONE},
291 };
292
293 static int
294 ice_switch_create(struct ice_adapter *ad,
295                 struct rte_flow *flow,
296                 void *meta,
297                 struct rte_flow_error *error)
298 {
299         int ret = 0;
300         struct ice_pf *pf = &ad->pf;
301         struct ice_hw *hw = ICE_PF_TO_HW(pf);
302         struct ice_rule_query_data rule_added = {0};
303         struct ice_rule_query_data *filter_ptr;
304         struct ice_adv_lkup_elem *list =
305                 ((struct sw_meta *)meta)->list;
306         uint16_t lkups_cnt =
307                 ((struct sw_meta *)meta)->lkups_num;
308         struct ice_adv_rule_info *rule_info =
309                 &((struct sw_meta *)meta)->rule_info;
310
311         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
312                 rte_flow_error_set(error, EINVAL,
313                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
314                         "item number too large for rule");
315                 goto error;
316         }
317         if (!list) {
318                 rte_flow_error_set(error, EINVAL,
319                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
320                         "lookup list should not be NULL");
321                 goto error;
322         }
323         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
324         if (!ret) {
325                 filter_ptr = rte_zmalloc("ice_switch_filter",
326                         sizeof(struct ice_rule_query_data), 0);
327                 if (!filter_ptr) {
328                         rte_flow_error_set(error, EINVAL,
329                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
330                                    "No memory for ice_switch_filter");
331                         goto error;
332                 }
333                 flow->rule = filter_ptr;
334                 rte_memcpy(filter_ptr,
335                         &rule_added,
336                         sizeof(struct ice_rule_query_data));
337         } else {
338                 rte_flow_error_set(error, EINVAL,
339                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
340                         "switch filter create flow fail");
341                 goto error;
342         }
343
344         rte_free(list);
345         rte_free(meta);
346         return 0;
347
348 error:
349         rte_free(list);
350         rte_free(meta);
351
352         return -rte_errno;
353 }
354
355 static int
356 ice_switch_destroy(struct ice_adapter *ad,
357                 struct rte_flow *flow,
358                 struct rte_flow_error *error)
359 {
360         struct ice_hw *hw = &ad->hw;
361         int ret;
362         struct ice_rule_query_data *filter_ptr;
363
364         filter_ptr = (struct ice_rule_query_data *)
365                 flow->rule;
366
367         if (!filter_ptr) {
368                 rte_flow_error_set(error, EINVAL,
369                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
370                         "no such flow"
371                         " create by switch filter");
372                 return -rte_errno;
373         }
374
375         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
376         if (ret) {
377                 rte_flow_error_set(error, EINVAL,
378                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
379                         "fail to destroy switch filter rule");
380                 return -rte_errno;
381         }
382
383         rte_free(filter_ptr);
384         return ret;
385 }
386
387 static void
388 ice_switch_filter_rule_free(struct rte_flow *flow)
389 {
390         rte_free(flow->rule);
391 }
392
393 static uint64_t
394 ice_switch_inset_get(const struct rte_flow_item pattern[],
395                 struct rte_flow_error *error,
396                 struct ice_adv_lkup_elem *list,
397                 uint16_t *lkups_num,
398                 enum ice_sw_tunnel_type *tun_type)
399 {
400         const struct rte_flow_item *item = pattern;
401         enum rte_flow_item_type item_type;
402         const struct rte_flow_item_eth *eth_spec, *eth_mask;
403         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
404         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
405         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
406         const struct rte_flow_item_udp *udp_spec, *udp_mask;
407         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
408         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
409         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
410         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
411         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
412         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
413                                 *pppoe_proto_mask;
414         const struct rte_flow_item_esp *esp_spec, *esp_mask;
415         const struct rte_flow_item_ah *ah_spec, *ah_mask;
416         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
417         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
418         uint64_t input_set = ICE_INSET_NONE;
419         uint16_t j, t = 0;
420         bool profile_rule = 0;
421         bool tunnel_valid = 0;
422         bool pppoe_valid = 0;
423         bool ipv6_valiad = 0;
424         bool ipv4_valiad = 0;
425         bool udp_valiad = 0;
426
427         for (item = pattern; item->type !=
428                         RTE_FLOW_ITEM_TYPE_END; item++) {
429                 if (item->last) {
430                         rte_flow_error_set(error, EINVAL,
431                                         RTE_FLOW_ERROR_TYPE_ITEM,
432                                         item,
433                                         "Not support range");
434                         return 0;
435                 }
436                 item_type = item->type;
437
438                 switch (item_type) {
439                 case RTE_FLOW_ITEM_TYPE_ETH:
440                         eth_spec = item->spec;
441                         eth_mask = item->mask;
442                         if (eth_spec && eth_mask) {
443                                 const uint8_t *a = eth_mask->src.addr_bytes;
444                                 const uint8_t *b = eth_mask->dst.addr_bytes;
445                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
446                                         if (a[j] && tunnel_valid) {
447                                                 input_set |=
448                                                         ICE_INSET_TUN_SMAC;
449                                                 break;
450                                         } else if (a[j]) {
451                                                 input_set |=
452                                                         ICE_INSET_SMAC;
453                                                 break;
454                                         }
455                                 }
456                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
457                                         if (b[j] && tunnel_valid) {
458                                                 input_set |=
459                                                         ICE_INSET_TUN_DMAC;
460                                                 break;
461                                         } else if (b[j]) {
462                                                 input_set |=
463                                                         ICE_INSET_DMAC;
464                                                 break;
465                                         }
466                                 }
467                                 if (eth_mask->type)
468                                         input_set |= ICE_INSET_ETHERTYPE;
469                                 list[t].type = (tunnel_valid  == 0) ?
470                                         ICE_MAC_OFOS : ICE_MAC_IL;
471                                 struct ice_ether_hdr *h;
472                                 struct ice_ether_hdr *m;
473                                 uint16_t i = 0;
474                                 h = &list[t].h_u.eth_hdr;
475                                 m = &list[t].m_u.eth_hdr;
476                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
477                                         if (eth_mask->src.addr_bytes[j]) {
478                                                 h->src_addr[j] =
479                                                 eth_spec->src.addr_bytes[j];
480                                                 m->src_addr[j] =
481                                                 eth_mask->src.addr_bytes[j];
482                                                 i = 1;
483                                         }
484                                         if (eth_mask->dst.addr_bytes[j]) {
485                                                 h->dst_addr[j] =
486                                                 eth_spec->dst.addr_bytes[j];
487                                                 m->dst_addr[j] =
488                                                 eth_mask->dst.addr_bytes[j];
489                                                 i = 1;
490                                         }
491                                 }
492                                 if (i)
493                                         t++;
494                                 if (eth_mask->type) {
495                                         list[t].type = ICE_ETYPE_OL;
496                                         list[t].h_u.ethertype.ethtype_id =
497                                                 eth_spec->type;
498                                         list[t].m_u.ethertype.ethtype_id =
499                                                 eth_mask->type;
500                                         t++;
501                                 }
502                         }
503                         break;
504
505                 case RTE_FLOW_ITEM_TYPE_IPV4:
506                         ipv4_spec = item->spec;
507                         ipv4_mask = item->mask;
508                         ipv4_valiad = 1;
509                         if (ipv4_spec && ipv4_mask) {
510                                 /* Check IPv4 mask and update input set */
511                                 if (ipv4_mask->hdr.version_ihl ||
512                                         ipv4_mask->hdr.total_length ||
513                                         ipv4_mask->hdr.packet_id ||
514                                         ipv4_mask->hdr.hdr_checksum) {
515                                         rte_flow_error_set(error, EINVAL,
516                                                    RTE_FLOW_ERROR_TYPE_ITEM,
517                                                    item,
518                                                    "Invalid IPv4 mask.");
519                                         return 0;
520                                 }
521
522                                 if (tunnel_valid) {
523                                         if (ipv4_mask->hdr.type_of_service)
524                                                 input_set |=
525                                                         ICE_INSET_TUN_IPV4_TOS;
526                                         if (ipv4_mask->hdr.src_addr)
527                                                 input_set |=
528                                                         ICE_INSET_TUN_IPV4_SRC;
529                                         if (ipv4_mask->hdr.dst_addr)
530                                                 input_set |=
531                                                         ICE_INSET_TUN_IPV4_DST;
532                                         if (ipv4_mask->hdr.time_to_live)
533                                                 input_set |=
534                                                         ICE_INSET_TUN_IPV4_TTL;
535                                         if (ipv4_mask->hdr.next_proto_id)
536                                                 input_set |=
537                                                 ICE_INSET_TUN_IPV4_PROTO;
538                                 } else {
539                                         if (ipv4_mask->hdr.src_addr)
540                                                 input_set |= ICE_INSET_IPV4_SRC;
541                                         if (ipv4_mask->hdr.dst_addr)
542                                                 input_set |= ICE_INSET_IPV4_DST;
543                                         if (ipv4_mask->hdr.time_to_live)
544                                                 input_set |= ICE_INSET_IPV4_TTL;
545                                         if (ipv4_mask->hdr.next_proto_id)
546                                                 input_set |=
547                                                 ICE_INSET_IPV4_PROTO;
548                                         if (ipv4_mask->hdr.type_of_service)
549                                                 input_set |=
550                                                         ICE_INSET_IPV4_TOS;
551                                 }
552                                 list[t].type = (tunnel_valid  == 0) ?
553                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
554                                 if (ipv4_mask->hdr.src_addr) {
555                                         list[t].h_u.ipv4_hdr.src_addr =
556                                                 ipv4_spec->hdr.src_addr;
557                                         list[t].m_u.ipv4_hdr.src_addr =
558                                                 ipv4_mask->hdr.src_addr;
559                                 }
560                                 if (ipv4_mask->hdr.dst_addr) {
561                                         list[t].h_u.ipv4_hdr.dst_addr =
562                                                 ipv4_spec->hdr.dst_addr;
563                                         list[t].m_u.ipv4_hdr.dst_addr =
564                                                 ipv4_mask->hdr.dst_addr;
565                                 }
566                                 if (ipv4_mask->hdr.time_to_live) {
567                                         list[t].h_u.ipv4_hdr.time_to_live =
568                                                 ipv4_spec->hdr.time_to_live;
569                                         list[t].m_u.ipv4_hdr.time_to_live =
570                                                 ipv4_mask->hdr.time_to_live;
571                                 }
572                                 if (ipv4_mask->hdr.next_proto_id) {
573                                         list[t].h_u.ipv4_hdr.protocol =
574                                                 ipv4_spec->hdr.next_proto_id;
575                                         list[t].m_u.ipv4_hdr.protocol =
576                                                 ipv4_mask->hdr.next_proto_id;
577                                 }
578                                 if (ipv4_mask->hdr.type_of_service) {
579                                         list[t].h_u.ipv4_hdr.tos =
580                                                 ipv4_spec->hdr.type_of_service;
581                                         list[t].m_u.ipv4_hdr.tos =
582                                                 ipv4_mask->hdr.type_of_service;
583                                 }
584                                 t++;
585                         }
586                         break;
587
588                 case RTE_FLOW_ITEM_TYPE_IPV6:
589                         ipv6_spec = item->spec;
590                         ipv6_mask = item->mask;
591                         ipv6_valiad = 1;
592                         if (ipv6_spec && ipv6_mask) {
593                                 if (ipv6_mask->hdr.payload_len) {
594                                         rte_flow_error_set(error, EINVAL,
595                                            RTE_FLOW_ERROR_TYPE_ITEM,
596                                            item,
597                                            "Invalid IPv6 mask");
598                                         return 0;
599                                 }
600
601                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
602                                         if (ipv6_mask->hdr.src_addr[j] &&
603                                                 tunnel_valid) {
604                                                 input_set |=
605                                                 ICE_INSET_TUN_IPV6_SRC;
606                                                 break;
607                                         } else if (ipv6_mask->hdr.src_addr[j]) {
608                                                 input_set |= ICE_INSET_IPV6_SRC;
609                                                 break;
610                                         }
611                                 }
612                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
613                                         if (ipv6_mask->hdr.dst_addr[j] &&
614                                                 tunnel_valid) {
615                                                 input_set |=
616                                                 ICE_INSET_TUN_IPV6_DST;
617                                                 break;
618                                         } else if (ipv6_mask->hdr.dst_addr[j]) {
619                                                 input_set |= ICE_INSET_IPV6_DST;
620                                                 break;
621                                         }
622                                 }
623                                 if (ipv6_mask->hdr.proto &&
624                                         tunnel_valid)
625                                         input_set |=
626                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
627                                 else if (ipv6_mask->hdr.proto)
628                                         input_set |=
629                                                 ICE_INSET_IPV6_NEXT_HDR;
630                                 if (ipv6_mask->hdr.hop_limits &&
631                                         tunnel_valid)
632                                         input_set |=
633                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
634                                 else if (ipv6_mask->hdr.hop_limits)
635                                         input_set |=
636                                                 ICE_INSET_IPV6_HOP_LIMIT;
637                                 if ((ipv6_mask->hdr.vtc_flow &
638                                                 rte_cpu_to_be_32
639                                                 (RTE_IPV6_HDR_TC_MASK)) &&
640                                         tunnel_valid)
641                                         input_set |=
642                                                         ICE_INSET_TUN_IPV6_TC;
643                                 else if (ipv6_mask->hdr.vtc_flow &
644                                                 rte_cpu_to_be_32
645                                                 (RTE_IPV6_HDR_TC_MASK))
646                                         input_set |= ICE_INSET_IPV6_TC;
647
648                                 list[t].type = (tunnel_valid  == 0) ?
649                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
650                                 struct ice_ipv6_hdr *f;
651                                 struct ice_ipv6_hdr *s;
652                                 f = &list[t].h_u.ipv6_hdr;
653                                 s = &list[t].m_u.ipv6_hdr;
654                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
655                                         if (ipv6_mask->hdr.src_addr[j]) {
656                                                 f->src_addr[j] =
657                                                 ipv6_spec->hdr.src_addr[j];
658                                                 s->src_addr[j] =
659                                                 ipv6_mask->hdr.src_addr[j];
660                                         }
661                                         if (ipv6_mask->hdr.dst_addr[j]) {
662                                                 f->dst_addr[j] =
663                                                 ipv6_spec->hdr.dst_addr[j];
664                                                 s->dst_addr[j] =
665                                                 ipv6_mask->hdr.dst_addr[j];
666                                         }
667                                 }
668                                 if (ipv6_mask->hdr.proto) {
669                                         f->next_hdr =
670                                                 ipv6_spec->hdr.proto;
671                                         s->next_hdr =
672                                                 ipv6_mask->hdr.proto;
673                                 }
674                                 if (ipv6_mask->hdr.hop_limits) {
675                                         f->hop_limit =
676                                                 ipv6_spec->hdr.hop_limits;
677                                         s->hop_limit =
678                                                 ipv6_mask->hdr.hop_limits;
679                                 }
680                                 if (ipv6_mask->hdr.vtc_flow &
681                                                 rte_cpu_to_be_32
682                                                 (RTE_IPV6_HDR_TC_MASK)) {
683                                         struct ice_le_ver_tc_flow vtf;
684                                         vtf.u.fld.version = 0;
685                                         vtf.u.fld.flow_label = 0;
686                                         vtf.u.fld.tc = (rte_be_to_cpu_32
687                                                 (ipv6_spec->hdr.vtc_flow) &
688                                                         RTE_IPV6_HDR_TC_MASK) >>
689                                                         RTE_IPV6_HDR_TC_SHIFT;
690                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
691                                         vtf.u.fld.tc = (rte_be_to_cpu_32
692                                                 (ipv6_mask->hdr.vtc_flow) &
693                                                         RTE_IPV6_HDR_TC_MASK) >>
694                                                         RTE_IPV6_HDR_TC_SHIFT;
695                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
696                                 }
697                                 t++;
698                         }
699                         break;
700
701                 case RTE_FLOW_ITEM_TYPE_UDP:
702                         udp_spec = item->spec;
703                         udp_mask = item->mask;
704                         udp_valiad = 1;
705                         if (udp_spec && udp_mask) {
706                                 /* Check UDP mask and update input set*/
707                                 if (udp_mask->hdr.dgram_len ||
708                                     udp_mask->hdr.dgram_cksum) {
709                                         rte_flow_error_set(error, EINVAL,
710                                                    RTE_FLOW_ERROR_TYPE_ITEM,
711                                                    item,
712                                                    "Invalid UDP mask");
713                                         return 0;
714                                 }
715
716                                 if (tunnel_valid) {
717                                         if (udp_mask->hdr.src_port)
718                                                 input_set |=
719                                                 ICE_INSET_TUN_UDP_SRC_PORT;
720                                         if (udp_mask->hdr.dst_port)
721                                                 input_set |=
722                                                 ICE_INSET_TUN_UDP_DST_PORT;
723                                 } else {
724                                         if (udp_mask->hdr.src_port)
725                                                 input_set |=
726                                                 ICE_INSET_UDP_SRC_PORT;
727                                         if (udp_mask->hdr.dst_port)
728                                                 input_set |=
729                                                 ICE_INSET_UDP_DST_PORT;
730                                 }
731                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
732                                                 tunnel_valid == 0)
733                                         list[t].type = ICE_UDP_OF;
734                                 else
735                                         list[t].type = ICE_UDP_ILOS;
736                                 if (udp_mask->hdr.src_port) {
737                                         list[t].h_u.l4_hdr.src_port =
738                                                 udp_spec->hdr.src_port;
739                                         list[t].m_u.l4_hdr.src_port =
740                                                 udp_mask->hdr.src_port;
741                                 }
742                                 if (udp_mask->hdr.dst_port) {
743                                         list[t].h_u.l4_hdr.dst_port =
744                                                 udp_spec->hdr.dst_port;
745                                         list[t].m_u.l4_hdr.dst_port =
746                                                 udp_mask->hdr.dst_port;
747                                 }
748                                                 t++;
749                         }
750                         break;
751
752                 case RTE_FLOW_ITEM_TYPE_TCP:
753                         tcp_spec = item->spec;
754                         tcp_mask = item->mask;
755                         if (tcp_spec && tcp_mask) {
756                                 /* Check TCP mask and update input set */
757                                 if (tcp_mask->hdr.sent_seq ||
758                                         tcp_mask->hdr.recv_ack ||
759                                         tcp_mask->hdr.data_off ||
760                                         tcp_mask->hdr.tcp_flags ||
761                                         tcp_mask->hdr.rx_win ||
762                                         tcp_mask->hdr.cksum ||
763                                         tcp_mask->hdr.tcp_urp) {
764                                         rte_flow_error_set(error, EINVAL,
765                                            RTE_FLOW_ERROR_TYPE_ITEM,
766                                            item,
767                                            "Invalid TCP mask");
768                                         return 0;
769                                 }
770
771                                 if (tunnel_valid) {
772                                         if (tcp_mask->hdr.src_port)
773                                                 input_set |=
774                                                 ICE_INSET_TUN_TCP_SRC_PORT;
775                                         if (tcp_mask->hdr.dst_port)
776                                                 input_set |=
777                                                 ICE_INSET_TUN_TCP_DST_PORT;
778                                 } else {
779                                         if (tcp_mask->hdr.src_port)
780                                                 input_set |=
781                                                 ICE_INSET_TCP_SRC_PORT;
782                                         if (tcp_mask->hdr.dst_port)
783                                                 input_set |=
784                                                 ICE_INSET_TCP_DST_PORT;
785                                 }
786                                 list[t].type = ICE_TCP_IL;
787                                 if (tcp_mask->hdr.src_port) {
788                                         list[t].h_u.l4_hdr.src_port =
789                                                 tcp_spec->hdr.src_port;
790                                         list[t].m_u.l4_hdr.src_port =
791                                                 tcp_mask->hdr.src_port;
792                                 }
793                                 if (tcp_mask->hdr.dst_port) {
794                                         list[t].h_u.l4_hdr.dst_port =
795                                                 tcp_spec->hdr.dst_port;
796                                         list[t].m_u.l4_hdr.dst_port =
797                                                 tcp_mask->hdr.dst_port;
798                                 }
799                                 t++;
800                         }
801                         break;
802
803                 case RTE_FLOW_ITEM_TYPE_SCTP:
804                         sctp_spec = item->spec;
805                         sctp_mask = item->mask;
806                         if (sctp_spec && sctp_mask) {
807                                 /* Check SCTP mask and update input set */
808                                 if (sctp_mask->hdr.cksum) {
809                                         rte_flow_error_set(error, EINVAL,
810                                            RTE_FLOW_ERROR_TYPE_ITEM,
811                                            item,
812                                            "Invalid SCTP mask");
813                                         return 0;
814                                 }
815
816                                 if (tunnel_valid) {
817                                         if (sctp_mask->hdr.src_port)
818                                                 input_set |=
819                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
820                                         if (sctp_mask->hdr.dst_port)
821                                                 input_set |=
822                                                 ICE_INSET_TUN_SCTP_DST_PORT;
823                                 } else {
824                                         if (sctp_mask->hdr.src_port)
825                                                 input_set |=
826                                                 ICE_INSET_SCTP_SRC_PORT;
827                                         if (sctp_mask->hdr.dst_port)
828                                                 input_set |=
829                                                 ICE_INSET_SCTP_DST_PORT;
830                                 }
831                                 list[t].type = ICE_SCTP_IL;
832                                 if (sctp_mask->hdr.src_port) {
833                                         list[t].h_u.sctp_hdr.src_port =
834                                                 sctp_spec->hdr.src_port;
835                                         list[t].m_u.sctp_hdr.src_port =
836                                                 sctp_mask->hdr.src_port;
837                                 }
838                                 if (sctp_mask->hdr.dst_port) {
839                                         list[t].h_u.sctp_hdr.dst_port =
840                                                 sctp_spec->hdr.dst_port;
841                                         list[t].m_u.sctp_hdr.dst_port =
842                                                 sctp_mask->hdr.dst_port;
843                                 }
844                                 t++;
845                         }
846                         break;
847
848                 case RTE_FLOW_ITEM_TYPE_VXLAN:
849                         vxlan_spec = item->spec;
850                         vxlan_mask = item->mask;
851                         /* Check if VXLAN item is used to describe protocol.
852                          * If yes, both spec and mask should be NULL.
853                          * If no, both spec and mask shouldn't be NULL.
854                          */
855                         if ((!vxlan_spec && vxlan_mask) ||
856                             (vxlan_spec && !vxlan_mask)) {
857                                 rte_flow_error_set(error, EINVAL,
858                                            RTE_FLOW_ERROR_TYPE_ITEM,
859                                            item,
860                                            "Invalid VXLAN item");
861                                 return 0;
862                         }
863
864                         tunnel_valid = 1;
865                         if (vxlan_spec && vxlan_mask) {
866                                 list[t].type = ICE_VXLAN;
867                                 if (vxlan_mask->vni[0] ||
868                                         vxlan_mask->vni[1] ||
869                                         vxlan_mask->vni[2]) {
870                                         list[t].h_u.tnl_hdr.vni =
871                                                 (vxlan_spec->vni[2] << 16) |
872                                                 (vxlan_spec->vni[1] << 8) |
873                                                 vxlan_spec->vni[0];
874                                         list[t].m_u.tnl_hdr.vni =
875                                                 (vxlan_mask->vni[2] << 16) |
876                                                 (vxlan_mask->vni[1] << 8) |
877                                                 vxlan_mask->vni[0];
878                                         input_set |=
879                                                 ICE_INSET_TUN_VXLAN_VNI;
880                                 }
881                                 t++;
882                         }
883                         break;
884
885                 case RTE_FLOW_ITEM_TYPE_NVGRE:
886                         nvgre_spec = item->spec;
887                         nvgre_mask = item->mask;
888                         /* Check if NVGRE item is used to describe protocol.
889                          * If yes, both spec and mask should be NULL.
890                          * If no, both spec and mask shouldn't be NULL.
891                          */
892                         if ((!nvgre_spec && nvgre_mask) ||
893                             (nvgre_spec && !nvgre_mask)) {
894                                 rte_flow_error_set(error, EINVAL,
895                                            RTE_FLOW_ERROR_TYPE_ITEM,
896                                            item,
897                                            "Invalid NVGRE item");
898                                 return 0;
899                         }
900                         tunnel_valid = 1;
901                         if (nvgre_spec && nvgre_mask) {
902                                 list[t].type = ICE_NVGRE;
903                                 if (nvgre_mask->tni[0] ||
904                                         nvgre_mask->tni[1] ||
905                                         nvgre_mask->tni[2]) {
906                                         list[t].h_u.nvgre_hdr.tni_flow =
907                                                 (nvgre_spec->tni[2] << 16) |
908                                                 (nvgre_spec->tni[1] << 8) |
909                                                 nvgre_spec->tni[0];
910                                         list[t].m_u.nvgre_hdr.tni_flow =
911                                                 (nvgre_mask->tni[2] << 16) |
912                                                 (nvgre_mask->tni[1] << 8) |
913                                                 nvgre_mask->tni[0];
914                                         input_set |=
915                                                 ICE_INSET_TUN_NVGRE_TNI;
916                                 }
917                                 t++;
918                         }
919                         break;
920
921                 case RTE_FLOW_ITEM_TYPE_VLAN:
922                         vlan_spec = item->spec;
923                         vlan_mask = item->mask;
924                         /* Check if VLAN item is used to describe protocol.
925                          * If yes, both spec and mask should be NULL.
926                          * If no, both spec and mask shouldn't be NULL.
927                          */
928                         if ((!vlan_spec && vlan_mask) ||
929                             (vlan_spec && !vlan_mask)) {
930                                 rte_flow_error_set(error, EINVAL,
931                                            RTE_FLOW_ERROR_TYPE_ITEM,
932                                            item,
933                                            "Invalid VLAN item");
934                                 return 0;
935                         }
936                         if (vlan_spec && vlan_mask) {
937                                 list[t].type = ICE_VLAN_OFOS;
938                                 if (vlan_mask->tci) {
939                                         list[t].h_u.vlan_hdr.vlan =
940                                                 vlan_spec->tci;
941                                         list[t].m_u.vlan_hdr.vlan =
942                                                 vlan_mask->tci;
943                                         input_set |= ICE_INSET_VLAN_OUTER;
944                                 }
945                                 if (vlan_mask->inner_type) {
946                                         list[t].h_u.vlan_hdr.type =
947                                                 vlan_spec->inner_type;
948                                         list[t].m_u.vlan_hdr.type =
949                                                 vlan_mask->inner_type;
950                                         input_set |= ICE_INSET_ETHERTYPE;
951                                 }
952                                 t++;
953                         }
954                         break;
955
956                 case RTE_FLOW_ITEM_TYPE_PPPOED:
957                 case RTE_FLOW_ITEM_TYPE_PPPOES:
958                         pppoe_spec = item->spec;
959                         pppoe_mask = item->mask;
960                         /* Check if PPPoE item is used to describe protocol.
961                          * If yes, both spec and mask should be NULL.
962                          * If no, both spec and mask shouldn't be NULL.
963                          */
964                         if ((!pppoe_spec && pppoe_mask) ||
965                                 (pppoe_spec && !pppoe_mask)) {
966                                 rte_flow_error_set(error, EINVAL,
967                                         RTE_FLOW_ERROR_TYPE_ITEM,
968                                         item,
969                                         "Invalid pppoe item");
970                                 return 0;
971                         }
972                         if (pppoe_spec && pppoe_mask) {
973                                 /* Check pppoe mask and update input set */
974                                 if (pppoe_mask->length ||
975                                         pppoe_mask->code ||
976                                         pppoe_mask->version_type) {
977                                         rte_flow_error_set(error, EINVAL,
978                                                 RTE_FLOW_ERROR_TYPE_ITEM,
979                                                 item,
980                                                 "Invalid pppoe mask");
981                                         return 0;
982                                 }
983                                 list[t].type = ICE_PPPOE;
984                                 if (pppoe_mask->session_id) {
985                                         list[t].h_u.pppoe_hdr.session_id =
986                                                 pppoe_spec->session_id;
987                                         list[t].m_u.pppoe_hdr.session_id =
988                                                 pppoe_mask->session_id;
989                                         input_set |= ICE_INSET_PPPOE_SESSION;
990                                 }
991                                 t++;
992                                 pppoe_valid = 1;
993                         }
994                         break;
995
996                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
997                         pppoe_proto_spec = item->spec;
998                         pppoe_proto_mask = item->mask;
999                         /* Check if PPPoE optional proto_id item
1000                          * is used to describe protocol.
1001                          * If yes, both spec and mask should be NULL.
1002                          * If no, both spec and mask shouldn't be NULL.
1003                          */
1004                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1005                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
1006                                 rte_flow_error_set(error, EINVAL,
1007                                         RTE_FLOW_ERROR_TYPE_ITEM,
1008                                         item,
1009                                         "Invalid pppoe proto item");
1010                                 return 0;
1011                         }
1012                         if (pppoe_proto_spec && pppoe_proto_mask) {
1013                                 if (pppoe_valid)
1014                                         t--;
1015                                 list[t].type = ICE_PPPOE;
1016                                 if (pppoe_proto_mask->proto_id) {
1017                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
1018                                                 pppoe_proto_spec->proto_id;
1019                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
1020                                                 pppoe_proto_mask->proto_id;
1021                                         input_set |= ICE_INSET_PPPOE_PROTO;
1022                                 }
1023                                 t++;
1024                         }
1025                         break;
1026
1027                 case RTE_FLOW_ITEM_TYPE_ESP:
1028                         esp_spec = item->spec;
1029                         esp_mask = item->mask;
1030                         if ((esp_spec && !esp_mask) ||
1031                                 (!esp_spec && esp_mask)) {
1032                                 rte_flow_error_set(error, EINVAL,
1033                                            RTE_FLOW_ERROR_TYPE_ITEM,
1034                                            item,
1035                                            "Invalid esp item");
1036                                 return 0;
1037                         }
1038                         /* Check esp mask and update input set */
1039                         if (esp_mask && esp_mask->hdr.seq) {
1040                                 rte_flow_error_set(error, EINVAL,
1041                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1042                                                 item,
1043                                                 "Invalid esp mask");
1044                                 return 0;
1045                         }
1046
1047                         if (!esp_spec && !esp_mask && !input_set) {
1048                                 profile_rule = 1;
1049                                 if (ipv6_valiad && udp_valiad)
1050                                         *tun_type =
1051                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1052                                 else if (ipv6_valiad)
1053                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1054                                 else if (ipv4_valiad)
1055                                         return 0;
1056                         } else if (esp_spec && esp_mask &&
1057                                                 esp_mask->hdr.spi){
1058                                 if (udp_valiad)
1059                                         list[t].type = ICE_NAT_T;
1060                                 else
1061                                         list[t].type = ICE_ESP;
1062                                 list[t].h_u.esp_hdr.spi =
1063                                         esp_spec->hdr.spi;
1064                                 list[t].m_u.esp_hdr.spi =
1065                                         esp_mask->hdr.spi;
1066                                 input_set |= ICE_INSET_ESP_SPI;
1067                                 t++;
1068                         }
1069
1070                         if (!profile_rule) {
1071                                 if (ipv6_valiad && udp_valiad)
1072                                         *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1073                                 else if (ipv4_valiad && udp_valiad)
1074                                         *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1075                                 else if (ipv6_valiad)
1076                                         *tun_type = ICE_SW_TUN_IPV6_ESP;
1077                                 else if (ipv4_valiad)
1078                                         *tun_type = ICE_SW_TUN_IPV4_ESP;
1079                         }
1080                         break;
1081
1082                 case RTE_FLOW_ITEM_TYPE_AH:
1083                         ah_spec = item->spec;
1084                         ah_mask = item->mask;
1085                         if ((ah_spec && !ah_mask) ||
1086                                 (!ah_spec && ah_mask)) {
1087                                 rte_flow_error_set(error, EINVAL,
1088                                            RTE_FLOW_ERROR_TYPE_ITEM,
1089                                            item,
1090                                            "Invalid ah item");
1091                                 return 0;
1092                         }
1093                         /* Check ah mask and update input set */
1094                         if (ah_mask &&
1095                                 (ah_mask->next_hdr ||
1096                                 ah_mask->payload_len ||
1097                                 ah_mask->seq_num ||
1098                                 ah_mask->reserved)) {
1099                                 rte_flow_error_set(error, EINVAL,
1100                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1101                                                 item,
1102                                                 "Invalid ah mask");
1103                                 return 0;
1104                         }
1105
1106                         if (!ah_spec && !ah_mask && !input_set) {
1107                                 profile_rule = 1;
1108                                 if (ipv6_valiad && udp_valiad)
1109                                         *tun_type =
1110                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1111                                 else if (ipv6_valiad)
1112                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1113                                 else if (ipv4_valiad)
1114                                         return 0;
1115                         } else if (ah_spec && ah_mask &&
1116                                                 ah_mask->spi){
1117                                 list[t].type = ICE_AH;
1118                                 list[t].h_u.ah_hdr.spi =
1119                                         ah_spec->spi;
1120                                 list[t].m_u.ah_hdr.spi =
1121                                         ah_mask->spi;
1122                                 input_set |= ICE_INSET_AH_SPI;
1123                                 t++;
1124                         }
1125
1126                         if (!profile_rule) {
1127                                 if (udp_valiad)
1128                                         return 0;
1129                                 else if (ipv6_valiad)
1130                                         *tun_type = ICE_SW_TUN_IPV6_AH;
1131                                 else if (ipv4_valiad)
1132                                         *tun_type = ICE_SW_TUN_IPV4_AH;
1133                         }
1134                         break;
1135
1136                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1137                         l2tp_spec = item->spec;
1138                         l2tp_mask = item->mask;
1139                         if ((l2tp_spec && !l2tp_mask) ||
1140                                 (!l2tp_spec && l2tp_mask)) {
1141                                 rte_flow_error_set(error, EINVAL,
1142                                            RTE_FLOW_ERROR_TYPE_ITEM,
1143                                            item,
1144                                            "Invalid l2tp item");
1145                                 return 0;
1146                         }
1147
1148                         if (!l2tp_spec && !l2tp_mask && !input_set) {
1149                                 if (ipv6_valiad)
1150                                         *tun_type =
1151                                         ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1152                                 else if (ipv4_valiad)
1153                                         return 0;
1154                         } else if (l2tp_spec && l2tp_mask &&
1155                                                 l2tp_mask->session_id){
1156                                 list[t].type = ICE_L2TPV3;
1157                                 list[t].h_u.l2tpv3_sess_hdr.session_id =
1158                                         l2tp_spec->session_id;
1159                                 list[t].m_u.l2tpv3_sess_hdr.session_id =
1160                                         l2tp_mask->session_id;
1161                                 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1162                                 t++;
1163                         }
1164
1165                         if (!profile_rule) {
1166                                 if (ipv6_valiad)
1167                                         *tun_type =
1168                                         ICE_SW_TUN_IPV6_L2TPV3;
1169                                 else if (ipv4_valiad)
1170                                         *tun_type =
1171                                         ICE_SW_TUN_IPV4_L2TPV3;
1172                         }
1173                         break;
1174
1175                 case RTE_FLOW_ITEM_TYPE_PFCP:
1176                         pfcp_spec = item->spec;
1177                         pfcp_mask = item->mask;
1178                         /* Check if PFCP item is used to describe protocol.
1179                          * If yes, both spec and mask should be NULL.
1180                          * If no, both spec and mask shouldn't be NULL.
1181                          */
1182                         if ((!pfcp_spec && pfcp_mask) ||
1183                             (pfcp_spec && !pfcp_mask)) {
1184                                 rte_flow_error_set(error, EINVAL,
1185                                            RTE_FLOW_ERROR_TYPE_ITEM,
1186                                            item,
1187                                            "Invalid PFCP item");
1188                                 return -ENOTSUP;
1189                         }
1190                         if (pfcp_spec && pfcp_mask) {
1191                                 /* Check pfcp mask and update input set */
1192                                 if (pfcp_mask->msg_type ||
1193                                         pfcp_mask->msg_len ||
1194                                         pfcp_mask->seid) {
1195                                         rte_flow_error_set(error, EINVAL,
1196                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1197                                                 item,
1198                                                 "Invalid pfcp mask");
1199                                         return -ENOTSUP;
1200                                 }
1201                                 if (pfcp_mask->s_field &&
1202                                         pfcp_spec->s_field == 0x01 &&
1203                                         ipv6_valiad)
1204                                         *tun_type =
1205                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1206                                 else if (pfcp_mask->s_field &&
1207                                         pfcp_spec->s_field == 0x01)
1208                                         *tun_type =
1209                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1210                                 else if (pfcp_mask->s_field &&
1211                                         !pfcp_spec->s_field &&
1212                                         ipv6_valiad)
1213                                         *tun_type =
1214                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1215                                 else if (pfcp_mask->s_field &&
1216                                         !pfcp_spec->s_field)
1217                                         *tun_type =
1218                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1219                                 else
1220                                         return -ENOTSUP;
1221                         }
1222                         break;
1223
1224                 case RTE_FLOW_ITEM_TYPE_VOID:
1225                         break;
1226
1227                 default:
1228                         rte_flow_error_set(error, EINVAL,
1229                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1230                                    "Invalid pattern item.");
1231                         goto out;
1232                 }
1233         }
1234
1235         *lkups_num = t;
1236
1237         return input_set;
1238 out:
1239         return 0;
1240 }
1241
1242 static int
1243 ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
1244                             struct rte_flow_error *error,
1245                             struct ice_adv_rule_info *rule_info)
1246 {
1247         const struct rte_flow_action_vf *act_vf;
1248         const struct rte_flow_action *action;
1249         enum rte_flow_action_type action_type;
1250
1251         for (action = actions; action->type !=
1252                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1253                 action_type = action->type;
1254                 switch (action_type) {
1255                 case RTE_FLOW_ACTION_TYPE_VF:
1256                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1257                         act_vf = action->conf;
1258                         rule_info->sw_act.vsi_handle = act_vf->id;
1259                         break;
1260                 default:
1261                         rte_flow_error_set(error,
1262                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1263                                            actions,
1264                                            "Invalid action type or queue number");
1265                         return -rte_errno;
1266                 }
1267         }
1268
1269         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1270         rule_info->sw_act.flag = ICE_FLTR_RX;
1271         rule_info->rx = 1;
1272         rule_info->priority = 5;
1273
1274         return 0;
1275 }
1276
1277 static int
1278 ice_switch_parse_action(struct ice_pf *pf,
1279                 const struct rte_flow_action *actions,
1280                 struct rte_flow_error *error,
1281                 struct ice_adv_rule_info *rule_info)
1282 {
1283         struct ice_vsi *vsi = pf->main_vsi;
1284         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1285         const struct rte_flow_action_queue *act_q;
1286         const struct rte_flow_action_rss *act_qgrop;
1287         uint16_t base_queue, i;
1288         const struct rte_flow_action *action;
1289         enum rte_flow_action_type action_type;
1290         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1291                  2, 4, 8, 16, 32, 64, 128};
1292
1293         base_queue = pf->base_queue + vsi->base_queue;
1294         for (action = actions; action->type !=
1295                         RTE_FLOW_ACTION_TYPE_END; action++) {
1296                 action_type = action->type;
1297                 switch (action_type) {
1298                 case RTE_FLOW_ACTION_TYPE_RSS:
1299                         act_qgrop = action->conf;
1300                         if (act_qgrop->queue_num <= 1)
1301                                 goto error;
1302                         rule_info->sw_act.fltr_act =
1303                                 ICE_FWD_TO_QGRP;
1304                         rule_info->sw_act.fwd_id.q_id =
1305                                 base_queue + act_qgrop->queue[0];
1306                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1307                                 if (act_qgrop->queue_num ==
1308                                         valid_qgrop_number[i])
1309                                         break;
1310                         }
1311                         if (i == MAX_QGRP_NUM_TYPE)
1312                                 goto error;
1313                         if ((act_qgrop->queue[0] +
1314                                 act_qgrop->queue_num) >
1315                                 dev->data->nb_rx_queues)
1316                                 goto error;
1317                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1318                                 if (act_qgrop->queue[i + 1] !=
1319                                         act_qgrop->queue[i] + 1)
1320                                         goto error;
1321                         rule_info->sw_act.qgrp_size =
1322                                 act_qgrop->queue_num;
1323                         break;
1324                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1325                         act_q = action->conf;
1326                         if (act_q->index >= dev->data->nb_rx_queues)
1327                                 goto error;
1328                         rule_info->sw_act.fltr_act =
1329                                 ICE_FWD_TO_Q;
1330                         rule_info->sw_act.fwd_id.q_id =
1331                                 base_queue + act_q->index;
1332                         break;
1333
1334                 case RTE_FLOW_ACTION_TYPE_DROP:
1335                         rule_info->sw_act.fltr_act =
1336                                 ICE_DROP_PACKET;
1337                         break;
1338
1339                 case RTE_FLOW_ACTION_TYPE_VOID:
1340                         break;
1341
1342                 default:
1343                         goto error;
1344                 }
1345         }
1346
1347         rule_info->sw_act.vsi_handle = vsi->idx;
1348         rule_info->rx = 1;
1349         rule_info->sw_act.src = vsi->idx;
1350         rule_info->priority = 5;
1351
1352         return 0;
1353
1354 error:
1355         rte_flow_error_set(error,
1356                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1357                 actions,
1358                 "Invalid action type or queue number");
1359         return -rte_errno;
1360 }
1361
1362 static int
1363 ice_switch_check_action(const struct rte_flow_action *actions,
1364                             struct rte_flow_error *error)
1365 {
1366         const struct rte_flow_action *action;
1367         enum rte_flow_action_type action_type;
1368         uint16_t actions_num = 0;
1369
1370         for (action = actions; action->type !=
1371                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1372                 action_type = action->type;
1373                 switch (action_type) {
1374                 case RTE_FLOW_ACTION_TYPE_VF:
1375                 case RTE_FLOW_ACTION_TYPE_RSS:
1376                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1377                 case RTE_FLOW_ACTION_TYPE_DROP:
1378                         actions_num++;
1379                         break;
1380                 case RTE_FLOW_ACTION_TYPE_VOID:
1381                         continue;
1382                 default:
1383                         rte_flow_error_set(error,
1384                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1385                                            actions,
1386                                            "Invalid action type");
1387                         return -rte_errno;
1388                 }
1389         }
1390
1391         if (actions_num > 1) {
1392                 rte_flow_error_set(error,
1393                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1394                                    actions,
1395                                    "Invalid action number");
1396                 return -rte_errno;
1397         }
1398
1399         return 0;
1400 }
1401
1402 static bool
1403 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1404 {
1405         switch (tun_type) {
1406         case ICE_SW_TUN_PROFID_IPV6_ESP:
1407         case ICE_SW_TUN_PROFID_IPV6_AH:
1408         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1409         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1410         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1411         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1412         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1413         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1414                 return true;
1415         default:
1416                 break;
1417         }
1418
1419         return false;
1420 }
1421
1422 static int
1423 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1424                 struct ice_pattern_match_item *array,
1425                 uint32_t array_len,
1426                 const struct rte_flow_item pattern[],
1427                 const struct rte_flow_action actions[],
1428                 void **meta,
1429                 struct rte_flow_error *error)
1430 {
1431         struct ice_pf *pf = &ad->pf;
1432         uint64_t inputset = 0;
1433         int ret = 0;
1434         struct sw_meta *sw_meta_ptr = NULL;
1435         struct ice_adv_rule_info rule_info;
1436         struct ice_adv_lkup_elem *list = NULL;
1437         uint16_t lkups_num = 0;
1438         const struct rte_flow_item *item = pattern;
1439         uint16_t item_num = 0;
1440         enum ice_sw_tunnel_type tun_type =
1441                 ICE_SW_TUN_AND_NON_TUN;
1442         struct ice_pattern_match_item *pattern_match_item = NULL;
1443
1444         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1445                 item_num++;
1446                 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1447                         tun_type = ICE_SW_TUN_VXLAN;
1448                 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1449                         tun_type = ICE_SW_TUN_NVGRE;
1450                 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1451                                 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1452                         tun_type = ICE_SW_TUN_PPPOE;
1453                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1454                         const struct rte_flow_item_eth *eth_mask;
1455                         if (item->mask)
1456                                 eth_mask = item->mask;
1457                         else
1458                                 continue;
1459                         if (eth_mask->type == UINT16_MAX)
1460                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1461                 }
1462                 /* reserve one more memory slot for ETH which may
1463                  * consume 2 lookup items.
1464                  */
1465                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1466                         item_num++;
1467         }
1468
1469         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1470         if (!list) {
1471                 rte_flow_error_set(error, EINVAL,
1472                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1473                                    "No memory for PMD internal items");
1474                 return -rte_errno;
1475         }
1476
1477         sw_meta_ptr =
1478                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1479         if (!sw_meta_ptr) {
1480                 rte_flow_error_set(error, EINVAL,
1481                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1482                                    "No memory for sw_pattern_meta_ptr");
1483                 goto error;
1484         }
1485
1486         pattern_match_item =
1487                 ice_search_pattern_match_item(pattern, array, array_len, error);
1488         if (!pattern_match_item) {
1489                 rte_flow_error_set(error, EINVAL,
1490                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1491                                    "Invalid input pattern");
1492                 goto error;
1493         }
1494
1495         inputset = ice_switch_inset_get
1496                 (pattern, error, list, &lkups_num, &tun_type);
1497         if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1498                 (inputset & ~pattern_match_item->input_set_mask)) {
1499                 rte_flow_error_set(error, EINVAL,
1500                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1501                                    pattern,
1502                                    "Invalid input set");
1503                 goto error;
1504         }
1505
1506         memset(&rule_info, 0, sizeof(rule_info));
1507         rule_info.tun_type = tun_type;
1508
1509         ret = ice_switch_check_action(actions, error);
1510         if (ret) {
1511                 rte_flow_error_set(error, EINVAL,
1512                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1513                                    "Invalid input action number");
1514                 goto error;
1515         }
1516
1517         if (ad->hw.dcf_enabled)
1518                 ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
1519         else
1520                 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1521
1522         if (ret) {
1523                 rte_flow_error_set(error, EINVAL,
1524                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1525                                    "Invalid input action");
1526                 goto error;
1527         }
1528
1529         if (meta) {
1530                 *meta = sw_meta_ptr;
1531                 ((struct sw_meta *)*meta)->list = list;
1532                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1533                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1534         } else {
1535                 rte_free(list);
1536                 rte_free(sw_meta_ptr);
1537         }
1538
1539         rte_free(pattern_match_item);
1540
1541         return 0;
1542
1543 error:
1544         rte_free(list);
1545         rte_free(sw_meta_ptr);
1546         rte_free(pattern_match_item);
1547
1548         return -rte_errno;
1549 }
1550
1551 static int
1552 ice_switch_query(struct ice_adapter *ad __rte_unused,
1553                 struct rte_flow *flow __rte_unused,
1554                 struct rte_flow_query_count *count __rte_unused,
1555                 struct rte_flow_error *error)
1556 {
1557         rte_flow_error_set(error, EINVAL,
1558                 RTE_FLOW_ERROR_TYPE_HANDLE,
1559                 NULL,
1560                 "count action not supported by switch filter");
1561
1562         return -rte_errno;
1563 }
1564
1565 static int
1566 ice_switch_redirect(struct ice_adapter *ad,
1567                     struct rte_flow *flow,
1568                     struct ice_flow_redirect *rd)
1569 {
1570         struct ice_rule_query_data *rdata = flow->rule;
1571         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1572         struct ice_adv_lkup_elem *lkups_dp = NULL;
1573         struct LIST_HEAD_TYPE *list_head;
1574         struct ice_adv_rule_info rinfo;
1575         struct ice_hw *hw = &ad->hw;
1576         struct ice_switch_info *sw;
1577         uint16_t lkups_cnt;
1578         int ret;
1579
1580         sw = hw->switch_info;
1581         if (!sw->recp_list[rdata->rid].recp_created)
1582                 return -EINVAL;
1583
1584         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1585                 return -ENOTSUP;
1586
1587         list_head = &sw->recp_list[rdata->rid].filt_rules;
1588         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1589                             list_entry) {
1590                 rinfo = list_itr->rule_info;
1591                 if (rinfo.fltr_rule_id == rdata->rule_id &&
1592                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1593                     rinfo.sw_act.vsi_handle == rd->vsi_handle) {
1594                         lkups_cnt = list_itr->lkups_cnt;
1595                         lkups_dp = (struct ice_adv_lkup_elem *)
1596                                 ice_memdup(hw, list_itr->lkups,
1597                                            sizeof(*list_itr->lkups) *
1598                                            lkups_cnt, ICE_NONDMA_TO_NONDMA);
1599                         if (!lkups_dp) {
1600                                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1601                                 return -EINVAL;
1602                         }
1603
1604                         break;
1605                 }
1606         }
1607
1608         if (!lkups_dp)
1609                 return 0;
1610
1611         /* Remove the old rule */
1612         ret = ice_rem_adv_rule(hw, list_itr->lkups,
1613                                lkups_cnt, &rinfo);
1614         if (ret) {
1615                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1616                             rdata->rule_id);
1617                 ret = -EINVAL;
1618                 goto out;
1619         }
1620
1621         /* Update VSI context */
1622         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1623
1624         /* Replay the rule */
1625         ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1626                                &rinfo, rdata);
1627         if (ret) {
1628                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1629                 ret = -EINVAL;
1630         }
1631
1632 out:
1633         ice_free(hw, lkups_dp);
1634         return ret;
1635 }
1636
1637 static int
1638 ice_switch_init(struct ice_adapter *ad)
1639 {
1640         int ret = 0;
1641         struct ice_flow_parser *dist_parser;
1642         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1643
1644         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1645                 dist_parser = &ice_switch_dist_parser_comms;
1646         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1647                 dist_parser = &ice_switch_dist_parser_os;
1648         else
1649                 return -EINVAL;
1650
1651         if (ad->devargs.pipe_mode_support)
1652                 ret = ice_register_parser(perm_parser, ad);
1653         else
1654                 ret = ice_register_parser(dist_parser, ad);
1655         return ret;
1656 }
1657
1658 static void
1659 ice_switch_uninit(struct ice_adapter *ad)
1660 {
1661         struct ice_flow_parser *dist_parser;
1662         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1663
1664         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1665                 dist_parser = &ice_switch_dist_parser_comms;
1666         else
1667                 dist_parser = &ice_switch_dist_parser_os;
1668
1669         if (ad->devargs.pipe_mode_support)
1670                 ice_unregister_parser(perm_parser, ad);
1671         else
1672                 ice_unregister_parser(dist_parser, ad);
1673 }
1674
1675 static struct
1676 ice_flow_engine ice_switch_engine = {
1677         .init = ice_switch_init,
1678         .uninit = ice_switch_uninit,
1679         .create = ice_switch_create,
1680         .destroy = ice_switch_destroy,
1681         .query_count = ice_switch_query,
1682         .redirect = ice_switch_redirect,
1683         .free = ice_switch_filter_rule_free,
1684         .type = ICE_FLOW_ENGINE_SWITCH,
1685 };
1686
1687 static struct
1688 ice_flow_parser ice_switch_dist_parser_os = {
1689         .engine = &ice_switch_engine,
1690         .array = ice_switch_pattern_dist_os,
1691         .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1692         .parse_pattern_action = ice_switch_parse_pattern_action,
1693         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1694 };
1695
1696 static struct
1697 ice_flow_parser ice_switch_dist_parser_comms = {
1698         .engine = &ice_switch_engine,
1699         .array = ice_switch_pattern_dist_comms,
1700         .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1701         .parse_pattern_action = ice_switch_parse_pattern_action,
1702         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1703 };
1704
1705 static struct
1706 ice_flow_parser ice_switch_perm_parser = {
1707         .engine = &ice_switch_engine,
1708         .array = ice_switch_pattern_perm,
1709         .array_len = RTE_DIM(ice_switch_pattern_perm),
1710         .parse_pattern_action = ice_switch_parse_pattern_action,
1711         .stage = ICE_FLOW_STAGE_PERMISSION,
1712 };
1713
1714 RTE_INIT(ice_sw_engine_init)
1715 {
1716         struct ice_flow_engine *engine = &ice_switch_engine;
1717         ice_register_flow_engine(engine);
1718 }