cc0af23adcf4c84fc7f41ab31c9e959e71891ba5
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27
28
29 #define MAX_QGRP_NUM_TYPE 7
30
31 #define ICE_SW_INSET_ETHER ( \
32         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
33 #define ICE_SW_INSET_MAC_VLAN ( \
34                 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
35                 ICE_INSET_VLAN_OUTER)
36 #define ICE_SW_INSET_MAC_IPV4 ( \
37         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
38         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
39 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
40         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
41         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
42         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
43 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
44         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
46         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
47 #define ICE_SW_INSET_MAC_IPV6 ( \
48         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
49         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
50         ICE_INSET_IPV6_NEXT_HDR)
51 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
52         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
53         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
54         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
55 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
56         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
57         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
58         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
59 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
60         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
61         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
62 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
63         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
64         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
65 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
66         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
67         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
68         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
69 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
70         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
71         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
72         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
74         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
76         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
77 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
78         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
79         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
80         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
81 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
82         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
83         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
84 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
85         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
87         ICE_INSET_TUN_IPV4_TOS)
88 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
89         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
91         ICE_INSET_TUN_IPV4_TOS)
92 #define ICE_SW_INSET_MAC_PPPOE  ( \
93         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
94         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
95 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
96         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
97         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
98         ICE_INSET_PPPOE_PROTO)
99 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
100         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
101 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
102         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
103 #define ICE_SW_INSET_MAC_IPV4_AH ( \
104         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
105 #define ICE_SW_INSET_MAC_IPV6_AH ( \
106         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
107 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
108         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
109 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
110         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
111 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
112         ICE_SW_INSET_MAC_IPV4 | \
113         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
114 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
115         ICE_SW_INSET_MAC_IPV6 | \
116         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
117
118 struct sw_meta {
119         struct ice_adv_lkup_elem *list;
120         uint16_t lkups_num;
121         struct ice_adv_rule_info rule_info;
122 };
123
124 static struct ice_flow_parser ice_switch_dist_parser_os;
125 static struct ice_flow_parser ice_switch_dist_parser_comms;
126 static struct ice_flow_parser ice_switch_perm_parser;
127
128 static struct
129 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
130         {pattern_ethertype,
131                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
132         {pattern_ethertype_vlan,
133                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
134         {pattern_eth_ipv4,
135                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
136         {pattern_eth_ipv4_udp,
137                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
138         {pattern_eth_ipv4_tcp,
139                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
140         {pattern_eth_ipv6,
141                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
142         {pattern_eth_ipv6_udp,
143                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
144         {pattern_eth_ipv6_tcp,
145                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
146         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
147                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
148         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
149                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
150         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
151                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
152         {pattern_eth_ipv4_nvgre_eth_ipv4,
153                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
154         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
155                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
156         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
157                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
158         {pattern_eth_pppoed,
159                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
160         {pattern_eth_vlan_pppoed,
161                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
162         {pattern_eth_pppoes,
163                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
164         {pattern_eth_vlan_pppoes,
165                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
166         {pattern_eth_pppoes_proto,
167                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
168         {pattern_eth_vlan_pppoes_proto,
169                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
170         {pattern_eth_ipv4_esp,
171                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
172         {pattern_eth_ipv4_udp_esp,
173                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
174         {pattern_eth_ipv6_esp,
175                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
176         {pattern_eth_ipv6_udp_esp,
177                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
178         {pattern_eth_ipv4_ah,
179                         ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
180         {pattern_eth_ipv6_ah,
181                         ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
182         {pattern_eth_ipv6_udp_ah,
183                         ICE_INSET_NONE, ICE_INSET_NONE},
184         {pattern_eth_ipv4_l2tp,
185                         ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
186         {pattern_eth_ipv6_l2tp,
187                         ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
188         {pattern_eth_ipv4_pfcp,
189                         ICE_INSET_NONE, ICE_INSET_NONE},
190         {pattern_eth_ipv6_pfcp,
191                         ICE_INSET_NONE, ICE_INSET_NONE},
192 };
193
194 static struct
195 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
196         {pattern_ethertype,
197                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
198         {pattern_ethertype_vlan,
199                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
200         {pattern_eth_arp,
201                         ICE_INSET_NONE, ICE_INSET_NONE},
202         {pattern_eth_ipv4,
203                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
204         {pattern_eth_ipv4_udp,
205                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
206         {pattern_eth_ipv4_tcp,
207                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
208         {pattern_eth_ipv6,
209                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
210         {pattern_eth_ipv6_udp,
211                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
212         {pattern_eth_ipv6_tcp,
213                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
214         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
215                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
216         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
217                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
218         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
219                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
220         {pattern_eth_ipv4_nvgre_eth_ipv4,
221                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
222         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
223                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
224         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
225                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
226 };
227
228 static struct
229 ice_pattern_match_item ice_switch_pattern_perm[] = {
230         {pattern_ethertype,
231                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
232         {pattern_ethertype_vlan,
233                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
234         {pattern_eth_ipv4,
235                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
236         {pattern_eth_ipv4_udp,
237                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
238         {pattern_eth_ipv4_tcp,
239                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
240         {pattern_eth_ipv6,
241                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
242         {pattern_eth_ipv6_udp,
243                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
244         {pattern_eth_ipv6_tcp,
245                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
246         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
247                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
248         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
249                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
250         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
251                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
252         {pattern_eth_ipv4_nvgre_eth_ipv4,
253                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
254         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
255                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
256         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
257                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
258         {pattern_eth_pppoed,
259                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
260         {pattern_eth_vlan_pppoed,
261                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
262         {pattern_eth_pppoes,
263                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
264         {pattern_eth_vlan_pppoes,
265                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
266         {pattern_eth_pppoes_proto,
267                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
268         {pattern_eth_vlan_pppoes_proto,
269                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
270         {pattern_eth_ipv4_esp,
271                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
272         {pattern_eth_ipv4_udp_esp,
273                         ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE},
274         {pattern_eth_ipv6_esp,
275                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
276         {pattern_eth_ipv6_udp_esp,
277                         ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE},
278         {pattern_eth_ipv4_ah,
279                         ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE},
280         {pattern_eth_ipv6_ah,
281                         ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE},
282         {pattern_eth_ipv6_udp_ah,
283                         ICE_INSET_NONE, ICE_INSET_NONE},
284         {pattern_eth_ipv4_l2tp,
285                         ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE},
286         {pattern_eth_ipv6_l2tp,
287                         ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE},
288         {pattern_eth_ipv4_pfcp,
289                         ICE_INSET_NONE, ICE_INSET_NONE},
290         {pattern_eth_ipv6_pfcp,
291                         ICE_INSET_NONE, ICE_INSET_NONE},
292 };
293
294 static int
295 ice_switch_create(struct ice_adapter *ad,
296                 struct rte_flow *flow,
297                 void *meta,
298                 struct rte_flow_error *error)
299 {
300         int ret = 0;
301         struct ice_pf *pf = &ad->pf;
302         struct ice_hw *hw = ICE_PF_TO_HW(pf);
303         struct ice_rule_query_data rule_added = {0};
304         struct ice_rule_query_data *filter_ptr;
305         struct ice_adv_lkup_elem *list =
306                 ((struct sw_meta *)meta)->list;
307         uint16_t lkups_cnt =
308                 ((struct sw_meta *)meta)->lkups_num;
309         struct ice_adv_rule_info *rule_info =
310                 &((struct sw_meta *)meta)->rule_info;
311
312         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
313                 rte_flow_error_set(error, EINVAL,
314                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
315                         "item number too large for rule");
316                 goto error;
317         }
318         if (!list) {
319                 rte_flow_error_set(error, EINVAL,
320                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
321                         "lookup list should not be NULL");
322                 goto error;
323         }
324         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
325         if (!ret) {
326                 filter_ptr = rte_zmalloc("ice_switch_filter",
327                         sizeof(struct ice_rule_query_data), 0);
328                 if (!filter_ptr) {
329                         rte_flow_error_set(error, EINVAL,
330                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
331                                    "No memory for ice_switch_filter");
332                         goto error;
333                 }
334                 flow->rule = filter_ptr;
335                 rte_memcpy(filter_ptr,
336                         &rule_added,
337                         sizeof(struct ice_rule_query_data));
338         } else {
339                 rte_flow_error_set(error, EINVAL,
340                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
341                         "switch filter create flow fail");
342                 goto error;
343         }
344
345         rte_free(list);
346         rte_free(meta);
347         return 0;
348
349 error:
350         rte_free(list);
351         rte_free(meta);
352
353         return -rte_errno;
354 }
355
356 static int
357 ice_switch_destroy(struct ice_adapter *ad,
358                 struct rte_flow *flow,
359                 struct rte_flow_error *error)
360 {
361         struct ice_hw *hw = &ad->hw;
362         int ret;
363         struct ice_rule_query_data *filter_ptr;
364
365         filter_ptr = (struct ice_rule_query_data *)
366                 flow->rule;
367
368         if (!filter_ptr) {
369                 rte_flow_error_set(error, EINVAL,
370                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
371                         "no such flow"
372                         " create by switch filter");
373                 return -rte_errno;
374         }
375
376         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
377         if (ret) {
378                 rte_flow_error_set(error, EINVAL,
379                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
380                         "fail to destroy switch filter rule");
381                 return -rte_errno;
382         }
383
384         rte_free(filter_ptr);
385         return ret;
386 }
387
388 static void
389 ice_switch_filter_rule_free(struct rte_flow *flow)
390 {
391         rte_free(flow->rule);
392 }
393
394 static uint64_t
395 ice_switch_inset_get(const struct rte_flow_item pattern[],
396                 struct rte_flow_error *error,
397                 struct ice_adv_lkup_elem *list,
398                 uint16_t *lkups_num,
399                 enum ice_sw_tunnel_type *tun_type)
400 {
401         const struct rte_flow_item *item = pattern;
402         enum rte_flow_item_type item_type;
403         const struct rte_flow_item_eth *eth_spec, *eth_mask;
404         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
405         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
406         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
407         const struct rte_flow_item_udp *udp_spec, *udp_mask;
408         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
409         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
410         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
411         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
412         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
413         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
414                                 *pppoe_proto_mask;
415         const struct rte_flow_item_esp *esp_spec, *esp_mask;
416         const struct rte_flow_item_ah *ah_spec, *ah_mask;
417         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
418         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
419         uint64_t input_set = ICE_INSET_NONE;
420         uint16_t j, t = 0;
421         bool profile_rule = 0;
422         bool tunnel_valid = 0;
423         bool pppoe_valid = 0;
424         bool ipv6_valiad = 0;
425         bool ipv4_valiad = 0;
426         bool udp_valiad = 0;
427
428         for (item = pattern; item->type !=
429                         RTE_FLOW_ITEM_TYPE_END; item++) {
430                 if (item->last) {
431                         rte_flow_error_set(error, EINVAL,
432                                         RTE_FLOW_ERROR_TYPE_ITEM,
433                                         item,
434                                         "Not support range");
435                         return 0;
436                 }
437                 item_type = item->type;
438
439                 switch (item_type) {
440                 case RTE_FLOW_ITEM_TYPE_ETH:
441                         eth_spec = item->spec;
442                         eth_mask = item->mask;
443                         if (eth_spec && eth_mask) {
444                                 const uint8_t *a = eth_mask->src.addr_bytes;
445                                 const uint8_t *b = eth_mask->dst.addr_bytes;
446                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
447                                         if (a[j] && tunnel_valid) {
448                                                 input_set |=
449                                                         ICE_INSET_TUN_SMAC;
450                                                 break;
451                                         } else if (a[j]) {
452                                                 input_set |=
453                                                         ICE_INSET_SMAC;
454                                                 break;
455                                         }
456                                 }
457                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
458                                         if (b[j] && tunnel_valid) {
459                                                 input_set |=
460                                                         ICE_INSET_TUN_DMAC;
461                                                 break;
462                                         } else if (b[j]) {
463                                                 input_set |=
464                                                         ICE_INSET_DMAC;
465                                                 break;
466                                         }
467                                 }
468                                 if (eth_mask->type)
469                                         input_set |= ICE_INSET_ETHERTYPE;
470                                 list[t].type = (tunnel_valid  == 0) ?
471                                         ICE_MAC_OFOS : ICE_MAC_IL;
472                                 struct ice_ether_hdr *h;
473                                 struct ice_ether_hdr *m;
474                                 uint16_t i = 0;
475                                 h = &list[t].h_u.eth_hdr;
476                                 m = &list[t].m_u.eth_hdr;
477                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
478                                         if (eth_mask->src.addr_bytes[j]) {
479                                                 h->src_addr[j] =
480                                                 eth_spec->src.addr_bytes[j];
481                                                 m->src_addr[j] =
482                                                 eth_mask->src.addr_bytes[j];
483                                                 i = 1;
484                                         }
485                                         if (eth_mask->dst.addr_bytes[j]) {
486                                                 h->dst_addr[j] =
487                                                 eth_spec->dst.addr_bytes[j];
488                                                 m->dst_addr[j] =
489                                                 eth_mask->dst.addr_bytes[j];
490                                                 i = 1;
491                                         }
492                                 }
493                                 if (i)
494                                         t++;
495                                 if (eth_mask->type) {
496                                         list[t].type = ICE_ETYPE_OL;
497                                         list[t].h_u.ethertype.ethtype_id =
498                                                 eth_spec->type;
499                                         list[t].m_u.ethertype.ethtype_id =
500                                                 eth_mask->type;
501                                         t++;
502                                 }
503                         }
504                         break;
505
506                 case RTE_FLOW_ITEM_TYPE_IPV4:
507                         ipv4_spec = item->spec;
508                         ipv4_mask = item->mask;
509                         ipv4_valiad = 1;
510                         if (ipv4_spec && ipv4_mask) {
511                                 /* Check IPv4 mask and update input set */
512                                 if (ipv4_mask->hdr.version_ihl ||
513                                         ipv4_mask->hdr.total_length ||
514                                         ipv4_mask->hdr.packet_id ||
515                                         ipv4_mask->hdr.hdr_checksum) {
516                                         rte_flow_error_set(error, EINVAL,
517                                                    RTE_FLOW_ERROR_TYPE_ITEM,
518                                                    item,
519                                                    "Invalid IPv4 mask.");
520                                         return 0;
521                                 }
522
523                                 if (tunnel_valid) {
524                                         if (ipv4_mask->hdr.type_of_service)
525                                                 input_set |=
526                                                         ICE_INSET_TUN_IPV4_TOS;
527                                         if (ipv4_mask->hdr.src_addr)
528                                                 input_set |=
529                                                         ICE_INSET_TUN_IPV4_SRC;
530                                         if (ipv4_mask->hdr.dst_addr)
531                                                 input_set |=
532                                                         ICE_INSET_TUN_IPV4_DST;
533                                         if (ipv4_mask->hdr.time_to_live)
534                                                 input_set |=
535                                                         ICE_INSET_TUN_IPV4_TTL;
536                                         if (ipv4_mask->hdr.next_proto_id)
537                                                 input_set |=
538                                                 ICE_INSET_TUN_IPV4_PROTO;
539                                 } else {
540                                         if (ipv4_mask->hdr.src_addr)
541                                                 input_set |= ICE_INSET_IPV4_SRC;
542                                         if (ipv4_mask->hdr.dst_addr)
543                                                 input_set |= ICE_INSET_IPV4_DST;
544                                         if (ipv4_mask->hdr.time_to_live)
545                                                 input_set |= ICE_INSET_IPV4_TTL;
546                                         if (ipv4_mask->hdr.next_proto_id)
547                                                 input_set |=
548                                                 ICE_INSET_IPV4_PROTO;
549                                         if (ipv4_mask->hdr.type_of_service)
550                                                 input_set |=
551                                                         ICE_INSET_IPV4_TOS;
552                                 }
553                                 list[t].type = (tunnel_valid  == 0) ?
554                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
555                                 if (ipv4_mask->hdr.src_addr) {
556                                         list[t].h_u.ipv4_hdr.src_addr =
557                                                 ipv4_spec->hdr.src_addr;
558                                         list[t].m_u.ipv4_hdr.src_addr =
559                                                 ipv4_mask->hdr.src_addr;
560                                 }
561                                 if (ipv4_mask->hdr.dst_addr) {
562                                         list[t].h_u.ipv4_hdr.dst_addr =
563                                                 ipv4_spec->hdr.dst_addr;
564                                         list[t].m_u.ipv4_hdr.dst_addr =
565                                                 ipv4_mask->hdr.dst_addr;
566                                 }
567                                 if (ipv4_mask->hdr.time_to_live) {
568                                         list[t].h_u.ipv4_hdr.time_to_live =
569                                                 ipv4_spec->hdr.time_to_live;
570                                         list[t].m_u.ipv4_hdr.time_to_live =
571                                                 ipv4_mask->hdr.time_to_live;
572                                 }
573                                 if (ipv4_mask->hdr.next_proto_id) {
574                                         list[t].h_u.ipv4_hdr.protocol =
575                                                 ipv4_spec->hdr.next_proto_id;
576                                         list[t].m_u.ipv4_hdr.protocol =
577                                                 ipv4_mask->hdr.next_proto_id;
578                                 }
579                                 if (ipv4_mask->hdr.type_of_service) {
580                                         list[t].h_u.ipv4_hdr.tos =
581                                                 ipv4_spec->hdr.type_of_service;
582                                         list[t].m_u.ipv4_hdr.tos =
583                                                 ipv4_mask->hdr.type_of_service;
584                                 }
585                                 t++;
586                         }
587                         break;
588
589                 case RTE_FLOW_ITEM_TYPE_IPV6:
590                         ipv6_spec = item->spec;
591                         ipv6_mask = item->mask;
592                         ipv6_valiad = 1;
593                         if (ipv6_spec && ipv6_mask) {
594                                 if (ipv6_mask->hdr.payload_len) {
595                                         rte_flow_error_set(error, EINVAL,
596                                            RTE_FLOW_ERROR_TYPE_ITEM,
597                                            item,
598                                            "Invalid IPv6 mask");
599                                         return 0;
600                                 }
601
602                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
603                                         if (ipv6_mask->hdr.src_addr[j] &&
604                                                 tunnel_valid) {
605                                                 input_set |=
606                                                 ICE_INSET_TUN_IPV6_SRC;
607                                                 break;
608                                         } else if (ipv6_mask->hdr.src_addr[j]) {
609                                                 input_set |= ICE_INSET_IPV6_SRC;
610                                                 break;
611                                         }
612                                 }
613                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
614                                         if (ipv6_mask->hdr.dst_addr[j] &&
615                                                 tunnel_valid) {
616                                                 input_set |=
617                                                 ICE_INSET_TUN_IPV6_DST;
618                                                 break;
619                                         } else if (ipv6_mask->hdr.dst_addr[j]) {
620                                                 input_set |= ICE_INSET_IPV6_DST;
621                                                 break;
622                                         }
623                                 }
624                                 if (ipv6_mask->hdr.proto &&
625                                         tunnel_valid)
626                                         input_set |=
627                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
628                                 else if (ipv6_mask->hdr.proto)
629                                         input_set |=
630                                                 ICE_INSET_IPV6_NEXT_HDR;
631                                 if (ipv6_mask->hdr.hop_limits &&
632                                         tunnel_valid)
633                                         input_set |=
634                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
635                                 else if (ipv6_mask->hdr.hop_limits)
636                                         input_set |=
637                                                 ICE_INSET_IPV6_HOP_LIMIT;
638                                 if ((ipv6_mask->hdr.vtc_flow &
639                                                 rte_cpu_to_be_32
640                                                 (RTE_IPV6_HDR_TC_MASK)) &&
641                                         tunnel_valid)
642                                         input_set |=
643                                                         ICE_INSET_TUN_IPV6_TC;
644                                 else if (ipv6_mask->hdr.vtc_flow &
645                                                 rte_cpu_to_be_32
646                                                 (RTE_IPV6_HDR_TC_MASK))
647                                         input_set |= ICE_INSET_IPV6_TC;
648
649                                 list[t].type = (tunnel_valid  == 0) ?
650                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
651                                 struct ice_ipv6_hdr *f;
652                                 struct ice_ipv6_hdr *s;
653                                 f = &list[t].h_u.ipv6_hdr;
654                                 s = &list[t].m_u.ipv6_hdr;
655                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
656                                         if (ipv6_mask->hdr.src_addr[j]) {
657                                                 f->src_addr[j] =
658                                                 ipv6_spec->hdr.src_addr[j];
659                                                 s->src_addr[j] =
660                                                 ipv6_mask->hdr.src_addr[j];
661                                         }
662                                         if (ipv6_mask->hdr.dst_addr[j]) {
663                                                 f->dst_addr[j] =
664                                                 ipv6_spec->hdr.dst_addr[j];
665                                                 s->dst_addr[j] =
666                                                 ipv6_mask->hdr.dst_addr[j];
667                                         }
668                                 }
669                                 if (ipv6_mask->hdr.proto) {
670                                         f->next_hdr =
671                                                 ipv6_spec->hdr.proto;
672                                         s->next_hdr =
673                                                 ipv6_mask->hdr.proto;
674                                 }
675                                 if (ipv6_mask->hdr.hop_limits) {
676                                         f->hop_limit =
677                                                 ipv6_spec->hdr.hop_limits;
678                                         s->hop_limit =
679                                                 ipv6_mask->hdr.hop_limits;
680                                 }
681                                 if (ipv6_mask->hdr.vtc_flow &
682                                                 rte_cpu_to_be_32
683                                                 (RTE_IPV6_HDR_TC_MASK)) {
684                                         struct ice_le_ver_tc_flow vtf;
685                                         vtf.u.fld.version = 0;
686                                         vtf.u.fld.flow_label = 0;
687                                         vtf.u.fld.tc = (rte_be_to_cpu_32
688                                                 (ipv6_spec->hdr.vtc_flow) &
689                                                         RTE_IPV6_HDR_TC_MASK) >>
690                                                         RTE_IPV6_HDR_TC_SHIFT;
691                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
692                                         vtf.u.fld.tc = (rte_be_to_cpu_32
693                                                 (ipv6_mask->hdr.vtc_flow) &
694                                                         RTE_IPV6_HDR_TC_MASK) >>
695                                                         RTE_IPV6_HDR_TC_SHIFT;
696                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
697                                 }
698                                 t++;
699                         }
700                         break;
701
702                 case RTE_FLOW_ITEM_TYPE_UDP:
703                         udp_spec = item->spec;
704                         udp_mask = item->mask;
705                         udp_valiad = 1;
706                         if (udp_spec && udp_mask) {
707                                 /* Check UDP mask and update input set*/
708                                 if (udp_mask->hdr.dgram_len ||
709                                     udp_mask->hdr.dgram_cksum) {
710                                         rte_flow_error_set(error, EINVAL,
711                                                    RTE_FLOW_ERROR_TYPE_ITEM,
712                                                    item,
713                                                    "Invalid UDP mask");
714                                         return 0;
715                                 }
716
717                                 if (tunnel_valid) {
718                                         if (udp_mask->hdr.src_port)
719                                                 input_set |=
720                                                 ICE_INSET_TUN_UDP_SRC_PORT;
721                                         if (udp_mask->hdr.dst_port)
722                                                 input_set |=
723                                                 ICE_INSET_TUN_UDP_DST_PORT;
724                                 } else {
725                                         if (udp_mask->hdr.src_port)
726                                                 input_set |=
727                                                 ICE_INSET_UDP_SRC_PORT;
728                                         if (udp_mask->hdr.dst_port)
729                                                 input_set |=
730                                                 ICE_INSET_UDP_DST_PORT;
731                                 }
732                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
733                                                 tunnel_valid == 0)
734                                         list[t].type = ICE_UDP_OF;
735                                 else
736                                         list[t].type = ICE_UDP_ILOS;
737                                 if (udp_mask->hdr.src_port) {
738                                         list[t].h_u.l4_hdr.src_port =
739                                                 udp_spec->hdr.src_port;
740                                         list[t].m_u.l4_hdr.src_port =
741                                                 udp_mask->hdr.src_port;
742                                 }
743                                 if (udp_mask->hdr.dst_port) {
744                                         list[t].h_u.l4_hdr.dst_port =
745                                                 udp_spec->hdr.dst_port;
746                                         list[t].m_u.l4_hdr.dst_port =
747                                                 udp_mask->hdr.dst_port;
748                                 }
749                                                 t++;
750                         }
751                         break;
752
753                 case RTE_FLOW_ITEM_TYPE_TCP:
754                         tcp_spec = item->spec;
755                         tcp_mask = item->mask;
756                         if (tcp_spec && tcp_mask) {
757                                 /* Check TCP mask and update input set */
758                                 if (tcp_mask->hdr.sent_seq ||
759                                         tcp_mask->hdr.recv_ack ||
760                                         tcp_mask->hdr.data_off ||
761                                         tcp_mask->hdr.tcp_flags ||
762                                         tcp_mask->hdr.rx_win ||
763                                         tcp_mask->hdr.cksum ||
764                                         tcp_mask->hdr.tcp_urp) {
765                                         rte_flow_error_set(error, EINVAL,
766                                            RTE_FLOW_ERROR_TYPE_ITEM,
767                                            item,
768                                            "Invalid TCP mask");
769                                         return 0;
770                                 }
771
772                                 if (tunnel_valid) {
773                                         if (tcp_mask->hdr.src_port)
774                                                 input_set |=
775                                                 ICE_INSET_TUN_TCP_SRC_PORT;
776                                         if (tcp_mask->hdr.dst_port)
777                                                 input_set |=
778                                                 ICE_INSET_TUN_TCP_DST_PORT;
779                                 } else {
780                                         if (tcp_mask->hdr.src_port)
781                                                 input_set |=
782                                                 ICE_INSET_TCP_SRC_PORT;
783                                         if (tcp_mask->hdr.dst_port)
784                                                 input_set |=
785                                                 ICE_INSET_TCP_DST_PORT;
786                                 }
787                                 list[t].type = ICE_TCP_IL;
788                                 if (tcp_mask->hdr.src_port) {
789                                         list[t].h_u.l4_hdr.src_port =
790                                                 tcp_spec->hdr.src_port;
791                                         list[t].m_u.l4_hdr.src_port =
792                                                 tcp_mask->hdr.src_port;
793                                 }
794                                 if (tcp_mask->hdr.dst_port) {
795                                         list[t].h_u.l4_hdr.dst_port =
796                                                 tcp_spec->hdr.dst_port;
797                                         list[t].m_u.l4_hdr.dst_port =
798                                                 tcp_mask->hdr.dst_port;
799                                 }
800                                 t++;
801                         }
802                         break;
803
804                 case RTE_FLOW_ITEM_TYPE_SCTP:
805                         sctp_spec = item->spec;
806                         sctp_mask = item->mask;
807                         if (sctp_spec && sctp_mask) {
808                                 /* Check SCTP mask and update input set */
809                                 if (sctp_mask->hdr.cksum) {
810                                         rte_flow_error_set(error, EINVAL,
811                                            RTE_FLOW_ERROR_TYPE_ITEM,
812                                            item,
813                                            "Invalid SCTP mask");
814                                         return 0;
815                                 }
816
817                                 if (tunnel_valid) {
818                                         if (sctp_mask->hdr.src_port)
819                                                 input_set |=
820                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
821                                         if (sctp_mask->hdr.dst_port)
822                                                 input_set |=
823                                                 ICE_INSET_TUN_SCTP_DST_PORT;
824                                 } else {
825                                         if (sctp_mask->hdr.src_port)
826                                                 input_set |=
827                                                 ICE_INSET_SCTP_SRC_PORT;
828                                         if (sctp_mask->hdr.dst_port)
829                                                 input_set |=
830                                                 ICE_INSET_SCTP_DST_PORT;
831                                 }
832                                 list[t].type = ICE_SCTP_IL;
833                                 if (sctp_mask->hdr.src_port) {
834                                         list[t].h_u.sctp_hdr.src_port =
835                                                 sctp_spec->hdr.src_port;
836                                         list[t].m_u.sctp_hdr.src_port =
837                                                 sctp_mask->hdr.src_port;
838                                 }
839                                 if (sctp_mask->hdr.dst_port) {
840                                         list[t].h_u.sctp_hdr.dst_port =
841                                                 sctp_spec->hdr.dst_port;
842                                         list[t].m_u.sctp_hdr.dst_port =
843                                                 sctp_mask->hdr.dst_port;
844                                 }
845                                 t++;
846                         }
847                         break;
848
849                 case RTE_FLOW_ITEM_TYPE_VXLAN:
850                         vxlan_spec = item->spec;
851                         vxlan_mask = item->mask;
852                         /* Check if VXLAN item is used to describe protocol.
853                          * If yes, both spec and mask should be NULL.
854                          * If no, both spec and mask shouldn't be NULL.
855                          */
856                         if ((!vxlan_spec && vxlan_mask) ||
857                             (vxlan_spec && !vxlan_mask)) {
858                                 rte_flow_error_set(error, EINVAL,
859                                            RTE_FLOW_ERROR_TYPE_ITEM,
860                                            item,
861                                            "Invalid VXLAN item");
862                                 return 0;
863                         }
864
865                         tunnel_valid = 1;
866                         if (vxlan_spec && vxlan_mask) {
867                                 list[t].type = ICE_VXLAN;
868                                 if (vxlan_mask->vni[0] ||
869                                         vxlan_mask->vni[1] ||
870                                         vxlan_mask->vni[2]) {
871                                         list[t].h_u.tnl_hdr.vni =
872                                                 (vxlan_spec->vni[2] << 16) |
873                                                 (vxlan_spec->vni[1] << 8) |
874                                                 vxlan_spec->vni[0];
875                                         list[t].m_u.tnl_hdr.vni =
876                                                 (vxlan_mask->vni[2] << 16) |
877                                                 (vxlan_mask->vni[1] << 8) |
878                                                 vxlan_mask->vni[0];
879                                         input_set |=
880                                                 ICE_INSET_TUN_VXLAN_VNI;
881                                 }
882                                 t++;
883                         }
884                         break;
885
886                 case RTE_FLOW_ITEM_TYPE_NVGRE:
887                         nvgre_spec = item->spec;
888                         nvgre_mask = item->mask;
889                         /* Check if NVGRE item is used to describe protocol.
890                          * If yes, both spec and mask should be NULL.
891                          * If no, both spec and mask shouldn't be NULL.
892                          */
893                         if ((!nvgre_spec && nvgre_mask) ||
894                             (nvgre_spec && !nvgre_mask)) {
895                                 rte_flow_error_set(error, EINVAL,
896                                            RTE_FLOW_ERROR_TYPE_ITEM,
897                                            item,
898                                            "Invalid NVGRE item");
899                                 return 0;
900                         }
901                         tunnel_valid = 1;
902                         if (nvgre_spec && nvgre_mask) {
903                                 list[t].type = ICE_NVGRE;
904                                 if (nvgre_mask->tni[0] ||
905                                         nvgre_mask->tni[1] ||
906                                         nvgre_mask->tni[2]) {
907                                         list[t].h_u.nvgre_hdr.tni_flow =
908                                                 (nvgre_spec->tni[2] << 16) |
909                                                 (nvgre_spec->tni[1] << 8) |
910                                                 nvgre_spec->tni[0];
911                                         list[t].m_u.nvgre_hdr.tni_flow =
912                                                 (nvgre_mask->tni[2] << 16) |
913                                                 (nvgre_mask->tni[1] << 8) |
914                                                 nvgre_mask->tni[0];
915                                         input_set |=
916                                                 ICE_INSET_TUN_NVGRE_TNI;
917                                 }
918                                 t++;
919                         }
920                         break;
921
922                 case RTE_FLOW_ITEM_TYPE_VLAN:
923                         vlan_spec = item->spec;
924                         vlan_mask = item->mask;
925                         /* Check if VLAN item is used to describe protocol.
926                          * If yes, both spec and mask should be NULL.
927                          * If no, both spec and mask shouldn't be NULL.
928                          */
929                         if ((!vlan_spec && vlan_mask) ||
930                             (vlan_spec && !vlan_mask)) {
931                                 rte_flow_error_set(error, EINVAL,
932                                            RTE_FLOW_ERROR_TYPE_ITEM,
933                                            item,
934                                            "Invalid VLAN item");
935                                 return 0;
936                         }
937                         if (vlan_spec && vlan_mask) {
938                                 list[t].type = ICE_VLAN_OFOS;
939                                 if (vlan_mask->tci) {
940                                         list[t].h_u.vlan_hdr.vlan =
941                                                 vlan_spec->tci;
942                                         list[t].m_u.vlan_hdr.vlan =
943                                                 vlan_mask->tci;
944                                         input_set |= ICE_INSET_VLAN_OUTER;
945                                 }
946                                 if (vlan_mask->inner_type) {
947                                         list[t].h_u.vlan_hdr.type =
948                                                 vlan_spec->inner_type;
949                                         list[t].m_u.vlan_hdr.type =
950                                                 vlan_mask->inner_type;
951                                         input_set |= ICE_INSET_ETHERTYPE;
952                                 }
953                                 t++;
954                         }
955                         break;
956
957                 case RTE_FLOW_ITEM_TYPE_PPPOED:
958                 case RTE_FLOW_ITEM_TYPE_PPPOES:
959                         pppoe_spec = item->spec;
960                         pppoe_mask = item->mask;
961                         /* Check if PPPoE item is used to describe protocol.
962                          * If yes, both spec and mask should be NULL.
963                          * If no, both spec and mask shouldn't be NULL.
964                          */
965                         if ((!pppoe_spec && pppoe_mask) ||
966                                 (pppoe_spec && !pppoe_mask)) {
967                                 rte_flow_error_set(error, EINVAL,
968                                         RTE_FLOW_ERROR_TYPE_ITEM,
969                                         item,
970                                         "Invalid pppoe item");
971                                 return 0;
972                         }
973                         if (pppoe_spec && pppoe_mask) {
974                                 /* Check pppoe mask and update input set */
975                                 if (pppoe_mask->length ||
976                                         pppoe_mask->code ||
977                                         pppoe_mask->version_type) {
978                                         rte_flow_error_set(error, EINVAL,
979                                                 RTE_FLOW_ERROR_TYPE_ITEM,
980                                                 item,
981                                                 "Invalid pppoe mask");
982                                         return 0;
983                                 }
984                                 list[t].type = ICE_PPPOE;
985                                 if (pppoe_mask->session_id) {
986                                         list[t].h_u.pppoe_hdr.session_id =
987                                                 pppoe_spec->session_id;
988                                         list[t].m_u.pppoe_hdr.session_id =
989                                                 pppoe_mask->session_id;
990                                         input_set |= ICE_INSET_PPPOE_SESSION;
991                                 }
992                                 t++;
993                                 pppoe_valid = 1;
994                         }
995                         break;
996
997                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
998                         pppoe_proto_spec = item->spec;
999                         pppoe_proto_mask = item->mask;
1000                         /* Check if PPPoE optional proto_id item
1001                          * is used to describe protocol.
1002                          * If yes, both spec and mask should be NULL.
1003                          * If no, both spec and mask shouldn't be NULL.
1004                          */
1005                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1006                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
1007                                 rte_flow_error_set(error, EINVAL,
1008                                         RTE_FLOW_ERROR_TYPE_ITEM,
1009                                         item,
1010                                         "Invalid pppoe proto item");
1011                                 return 0;
1012                         }
1013                         if (pppoe_proto_spec && pppoe_proto_mask) {
1014                                 if (pppoe_valid)
1015                                         t--;
1016                                 list[t].type = ICE_PPPOE;
1017                                 if (pppoe_proto_mask->proto_id) {
1018                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
1019                                                 pppoe_proto_spec->proto_id;
1020                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
1021                                                 pppoe_proto_mask->proto_id;
1022                                         input_set |= ICE_INSET_PPPOE_PROTO;
1023                                 }
1024                                 t++;
1025                         }
1026                         break;
1027
1028                 case RTE_FLOW_ITEM_TYPE_ESP:
1029                         esp_spec = item->spec;
1030                         esp_mask = item->mask;
1031                         if ((esp_spec && !esp_mask) ||
1032                                 (!esp_spec && esp_mask)) {
1033                                 rte_flow_error_set(error, EINVAL,
1034                                            RTE_FLOW_ERROR_TYPE_ITEM,
1035                                            item,
1036                                            "Invalid esp item");
1037                                 return 0;
1038                         }
1039                         /* Check esp mask and update input set */
1040                         if (esp_mask && esp_mask->hdr.seq) {
1041                                 rte_flow_error_set(error, EINVAL,
1042                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1043                                                 item,
1044                                                 "Invalid esp mask");
1045                                 return 0;
1046                         }
1047
1048                         if (!esp_spec && !esp_mask && !input_set) {
1049                                 profile_rule = 1;
1050                                 if (ipv6_valiad && udp_valiad)
1051                                         *tun_type =
1052                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1053                                 else if (ipv6_valiad)
1054                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1055                                 else if (ipv4_valiad)
1056                                         return 0;
1057                         } else if (esp_spec && esp_mask &&
1058                                                 esp_mask->hdr.spi){
1059                                 if (udp_valiad)
1060                                         list[t].type = ICE_NAT_T;
1061                                 else
1062                                         list[t].type = ICE_ESP;
1063                                 list[t].h_u.esp_hdr.spi =
1064                                         esp_spec->hdr.spi;
1065                                 list[t].m_u.esp_hdr.spi =
1066                                         esp_mask->hdr.spi;
1067                                 input_set |= ICE_INSET_ESP_SPI;
1068                                 t++;
1069                         }
1070
1071                         if (!profile_rule) {
1072                                 if (ipv6_valiad && udp_valiad)
1073                                         *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1074                                 else if (ipv4_valiad && udp_valiad)
1075                                         *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1076                                 else if (ipv6_valiad)
1077                                         *tun_type = ICE_SW_TUN_IPV6_ESP;
1078                                 else if (ipv4_valiad)
1079                                         *tun_type = ICE_SW_TUN_IPV4_ESP;
1080                         }
1081                         break;
1082
1083                 case RTE_FLOW_ITEM_TYPE_AH:
1084                         ah_spec = item->spec;
1085                         ah_mask = item->mask;
1086                         if ((ah_spec && !ah_mask) ||
1087                                 (!ah_spec && ah_mask)) {
1088                                 rte_flow_error_set(error, EINVAL,
1089                                            RTE_FLOW_ERROR_TYPE_ITEM,
1090                                            item,
1091                                            "Invalid ah item");
1092                                 return 0;
1093                         }
1094                         /* Check ah mask and update input set */
1095                         if (ah_mask &&
1096                                 (ah_mask->next_hdr ||
1097                                 ah_mask->payload_len ||
1098                                 ah_mask->seq_num ||
1099                                 ah_mask->reserved)) {
1100                                 rte_flow_error_set(error, EINVAL,
1101                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1102                                                 item,
1103                                                 "Invalid ah mask");
1104                                 return 0;
1105                         }
1106
1107                         if (!ah_spec && !ah_mask && !input_set) {
1108                                 profile_rule = 1;
1109                                 if (ipv6_valiad && udp_valiad)
1110                                         *tun_type =
1111                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1112                                 else if (ipv6_valiad)
1113                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1114                                 else if (ipv4_valiad)
1115                                         return 0;
1116                         } else if (ah_spec && ah_mask &&
1117                                                 ah_mask->spi){
1118                                 list[t].type = ICE_AH;
1119                                 list[t].h_u.ah_hdr.spi =
1120                                         ah_spec->spi;
1121                                 list[t].m_u.ah_hdr.spi =
1122                                         ah_mask->spi;
1123                                 input_set |= ICE_INSET_AH_SPI;
1124                                 t++;
1125                         }
1126
1127                         if (!profile_rule) {
1128                                 if (udp_valiad)
1129                                         return 0;
1130                                 else if (ipv6_valiad)
1131                                         *tun_type = ICE_SW_TUN_IPV6_AH;
1132                                 else if (ipv4_valiad)
1133                                         *tun_type = ICE_SW_TUN_IPV4_AH;
1134                         }
1135                         break;
1136
1137                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1138                         l2tp_spec = item->spec;
1139                         l2tp_mask = item->mask;
1140                         if ((l2tp_spec && !l2tp_mask) ||
1141                                 (!l2tp_spec && l2tp_mask)) {
1142                                 rte_flow_error_set(error, EINVAL,
1143                                            RTE_FLOW_ERROR_TYPE_ITEM,
1144                                            item,
1145                                            "Invalid l2tp item");
1146                                 return 0;
1147                         }
1148
1149                         if (!l2tp_spec && !l2tp_mask && !input_set) {
1150                                 if (ipv6_valiad)
1151                                         *tun_type =
1152                                         ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1153                                 else if (ipv4_valiad)
1154                                         return 0;
1155                         } else if (l2tp_spec && l2tp_mask &&
1156                                                 l2tp_mask->session_id){
1157                                 list[t].type = ICE_L2TPV3;
1158                                 list[t].h_u.l2tpv3_sess_hdr.session_id =
1159                                         l2tp_spec->session_id;
1160                                 list[t].m_u.l2tpv3_sess_hdr.session_id =
1161                                         l2tp_mask->session_id;
1162                                 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1163                                 t++;
1164                         }
1165
1166                         if (!profile_rule) {
1167                                 if (ipv6_valiad)
1168                                         *tun_type =
1169                                         ICE_SW_TUN_IPV6_L2TPV3;
1170                                 else if (ipv4_valiad)
1171                                         *tun_type =
1172                                         ICE_SW_TUN_IPV4_L2TPV3;
1173                         }
1174                         break;
1175
1176                 case RTE_FLOW_ITEM_TYPE_PFCP:
1177                         pfcp_spec = item->spec;
1178                         pfcp_mask = item->mask;
1179                         /* Check if PFCP item is used to describe protocol.
1180                          * If yes, both spec and mask should be NULL.
1181                          * If no, both spec and mask shouldn't be NULL.
1182                          */
1183                         if ((!pfcp_spec && pfcp_mask) ||
1184                             (pfcp_spec && !pfcp_mask)) {
1185                                 rte_flow_error_set(error, EINVAL,
1186                                            RTE_FLOW_ERROR_TYPE_ITEM,
1187                                            item,
1188                                            "Invalid PFCP item");
1189                                 return -ENOTSUP;
1190                         }
1191                         if (pfcp_spec && pfcp_mask) {
1192                                 /* Check pfcp mask and update input set */
1193                                 if (pfcp_mask->msg_type ||
1194                                         pfcp_mask->msg_len ||
1195                                         pfcp_mask->seid) {
1196                                         rte_flow_error_set(error, EINVAL,
1197                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1198                                                 item,
1199                                                 "Invalid pfcp mask");
1200                                         return -ENOTSUP;
1201                                 }
1202                                 if (pfcp_mask->s_field &&
1203                                         pfcp_spec->s_field == 0x01 &&
1204                                         ipv6_valiad)
1205                                         *tun_type =
1206                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1207                                 else if (pfcp_mask->s_field &&
1208                                         pfcp_spec->s_field == 0x01)
1209                                         *tun_type =
1210                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1211                                 else if (pfcp_mask->s_field &&
1212                                         !pfcp_spec->s_field &&
1213                                         ipv6_valiad)
1214                                         *tun_type =
1215                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1216                                 else if (pfcp_mask->s_field &&
1217                                         !pfcp_spec->s_field)
1218                                         *tun_type =
1219                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1220                                 else
1221                                         return -ENOTSUP;
1222                         }
1223                         break;
1224
1225                 case RTE_FLOW_ITEM_TYPE_VOID:
1226                         break;
1227
1228                 default:
1229                         rte_flow_error_set(error, EINVAL,
1230                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1231                                    "Invalid pattern item.");
1232                         goto out;
1233                 }
1234         }
1235
1236         *lkups_num = t;
1237
1238         return input_set;
1239 out:
1240         return 0;
1241 }
1242
1243 static int
1244 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1245                             const struct rte_flow_action *actions,
1246                             struct rte_flow_error *error,
1247                             struct ice_adv_rule_info *rule_info)
1248 {
1249         const struct rte_flow_action_vf *act_vf;
1250         const struct rte_flow_action *action;
1251         enum rte_flow_action_type action_type;
1252
1253         for (action = actions; action->type !=
1254                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1255                 action_type = action->type;
1256                 switch (action_type) {
1257                 case RTE_FLOW_ACTION_TYPE_VF:
1258                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1259                         act_vf = action->conf;
1260                         if (act_vf->original)
1261                                 rule_info->sw_act.vsi_handle =
1262                                         ad->real_hw.avf.bus.func;
1263                         else
1264                                 rule_info->sw_act.vsi_handle = act_vf->id;
1265                         break;
1266                 default:
1267                         rte_flow_error_set(error,
1268                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1269                                            actions,
1270                                            "Invalid action type or queue number");
1271                         return -rte_errno;
1272                 }
1273         }
1274
1275         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1276         rule_info->sw_act.flag = ICE_FLTR_RX;
1277         rule_info->rx = 1;
1278         rule_info->priority = 5;
1279
1280         return 0;
1281 }
1282
1283 static int
1284 ice_switch_parse_action(struct ice_pf *pf,
1285                 const struct rte_flow_action *actions,
1286                 struct rte_flow_error *error,
1287                 struct ice_adv_rule_info *rule_info)
1288 {
1289         struct ice_vsi *vsi = pf->main_vsi;
1290         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1291         const struct rte_flow_action_queue *act_q;
1292         const struct rte_flow_action_rss *act_qgrop;
1293         uint16_t base_queue, i;
1294         const struct rte_flow_action *action;
1295         enum rte_flow_action_type action_type;
1296         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1297                  2, 4, 8, 16, 32, 64, 128};
1298
1299         base_queue = pf->base_queue + vsi->base_queue;
1300         for (action = actions; action->type !=
1301                         RTE_FLOW_ACTION_TYPE_END; action++) {
1302                 action_type = action->type;
1303                 switch (action_type) {
1304                 case RTE_FLOW_ACTION_TYPE_RSS:
1305                         act_qgrop = action->conf;
1306                         if (act_qgrop->queue_num <= 1)
1307                                 goto error;
1308                         rule_info->sw_act.fltr_act =
1309                                 ICE_FWD_TO_QGRP;
1310                         rule_info->sw_act.fwd_id.q_id =
1311                                 base_queue + act_qgrop->queue[0];
1312                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1313                                 if (act_qgrop->queue_num ==
1314                                         valid_qgrop_number[i])
1315                                         break;
1316                         }
1317                         if (i == MAX_QGRP_NUM_TYPE)
1318                                 goto error;
1319                         if ((act_qgrop->queue[0] +
1320                                 act_qgrop->queue_num) >
1321                                 dev->data->nb_rx_queues)
1322                                 goto error;
1323                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1324                                 if (act_qgrop->queue[i + 1] !=
1325                                         act_qgrop->queue[i] + 1)
1326                                         goto error;
1327                         rule_info->sw_act.qgrp_size =
1328                                 act_qgrop->queue_num;
1329                         break;
1330                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1331                         act_q = action->conf;
1332                         if (act_q->index >= dev->data->nb_rx_queues)
1333                                 goto error;
1334                         rule_info->sw_act.fltr_act =
1335                                 ICE_FWD_TO_Q;
1336                         rule_info->sw_act.fwd_id.q_id =
1337                                 base_queue + act_q->index;
1338                         break;
1339
1340                 case RTE_FLOW_ACTION_TYPE_DROP:
1341                         rule_info->sw_act.fltr_act =
1342                                 ICE_DROP_PACKET;
1343                         break;
1344
1345                 case RTE_FLOW_ACTION_TYPE_VOID:
1346                         break;
1347
1348                 default:
1349                         goto error;
1350                 }
1351         }
1352
1353         rule_info->sw_act.vsi_handle = vsi->idx;
1354         rule_info->rx = 1;
1355         rule_info->sw_act.src = vsi->idx;
1356         rule_info->priority = 5;
1357
1358         return 0;
1359
1360 error:
1361         rte_flow_error_set(error,
1362                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1363                 actions,
1364                 "Invalid action type or queue number");
1365         return -rte_errno;
1366 }
1367
1368 static int
1369 ice_switch_check_action(const struct rte_flow_action *actions,
1370                             struct rte_flow_error *error)
1371 {
1372         const struct rte_flow_action *action;
1373         enum rte_flow_action_type action_type;
1374         uint16_t actions_num = 0;
1375
1376         for (action = actions; action->type !=
1377                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1378                 action_type = action->type;
1379                 switch (action_type) {
1380                 case RTE_FLOW_ACTION_TYPE_VF:
1381                 case RTE_FLOW_ACTION_TYPE_RSS:
1382                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1383                 case RTE_FLOW_ACTION_TYPE_DROP:
1384                         actions_num++;
1385                         break;
1386                 case RTE_FLOW_ACTION_TYPE_VOID:
1387                         continue;
1388                 default:
1389                         rte_flow_error_set(error,
1390                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1391                                            actions,
1392                                            "Invalid action type");
1393                         return -rte_errno;
1394                 }
1395         }
1396
1397         if (actions_num != 1) {
1398                 rte_flow_error_set(error,
1399                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1400                                    actions,
1401                                    "Invalid action number");
1402                 return -rte_errno;
1403         }
1404
1405         return 0;
1406 }
1407
1408 static bool
1409 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1410 {
1411         switch (tun_type) {
1412         case ICE_SW_TUN_PROFID_IPV6_ESP:
1413         case ICE_SW_TUN_PROFID_IPV6_AH:
1414         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1415         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1416         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1417         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1418         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1419         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1420                 return true;
1421         default:
1422                 break;
1423         }
1424
1425         return false;
1426 }
1427
1428 static int
1429 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1430                 struct ice_pattern_match_item *array,
1431                 uint32_t array_len,
1432                 const struct rte_flow_item pattern[],
1433                 const struct rte_flow_action actions[],
1434                 void **meta,
1435                 struct rte_flow_error *error)
1436 {
1437         struct ice_pf *pf = &ad->pf;
1438         uint64_t inputset = 0;
1439         int ret = 0;
1440         struct sw_meta *sw_meta_ptr = NULL;
1441         struct ice_adv_rule_info rule_info;
1442         struct ice_adv_lkup_elem *list = NULL;
1443         uint16_t lkups_num = 0;
1444         const struct rte_flow_item *item = pattern;
1445         uint16_t item_num = 0;
1446         enum ice_sw_tunnel_type tun_type =
1447                 ICE_SW_TUN_AND_NON_TUN;
1448         struct ice_pattern_match_item *pattern_match_item = NULL;
1449
1450         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1451                 item_num++;
1452                 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1453                         tun_type = ICE_SW_TUN_VXLAN;
1454                 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1455                         tun_type = ICE_SW_TUN_NVGRE;
1456                 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1457                                 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1458                         tun_type = ICE_SW_TUN_PPPOE;
1459                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1460                         const struct rte_flow_item_eth *eth_mask;
1461                         if (item->mask)
1462                                 eth_mask = item->mask;
1463                         else
1464                                 continue;
1465                         if (eth_mask->type == UINT16_MAX)
1466                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1467                 }
1468                 /* reserve one more memory slot for ETH which may
1469                  * consume 2 lookup items.
1470                  */
1471                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1472                         item_num++;
1473         }
1474
1475         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1476         if (!list) {
1477                 rte_flow_error_set(error, EINVAL,
1478                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1479                                    "No memory for PMD internal items");
1480                 return -rte_errno;
1481         }
1482
1483         sw_meta_ptr =
1484                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1485         if (!sw_meta_ptr) {
1486                 rte_flow_error_set(error, EINVAL,
1487                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1488                                    "No memory for sw_pattern_meta_ptr");
1489                 goto error;
1490         }
1491
1492         pattern_match_item =
1493                 ice_search_pattern_match_item(pattern, array, array_len, error);
1494         if (!pattern_match_item) {
1495                 rte_flow_error_set(error, EINVAL,
1496                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1497                                    "Invalid input pattern");
1498                 goto error;
1499         }
1500
1501         inputset = ice_switch_inset_get
1502                 (pattern, error, list, &lkups_num, &tun_type);
1503         if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1504                 (inputset & ~pattern_match_item->input_set_mask)) {
1505                 rte_flow_error_set(error, EINVAL,
1506                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1507                                    pattern,
1508                                    "Invalid input set");
1509                 goto error;
1510         }
1511
1512         memset(&rule_info, 0, sizeof(rule_info));
1513         rule_info.tun_type = tun_type;
1514
1515         ret = ice_switch_check_action(actions, error);
1516         if (ret) {
1517                 rte_flow_error_set(error, EINVAL,
1518                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1519                                    "Invalid input action number");
1520                 goto error;
1521         }
1522
1523         if (ad->hw.dcf_enabled)
1524                 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1525                                                   &rule_info);
1526         else
1527                 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1528
1529         if (ret) {
1530                 rte_flow_error_set(error, EINVAL,
1531                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1532                                    "Invalid input action");
1533                 goto error;
1534         }
1535
1536         if (meta) {
1537                 *meta = sw_meta_ptr;
1538                 ((struct sw_meta *)*meta)->list = list;
1539                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1540                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1541         } else {
1542                 rte_free(list);
1543                 rte_free(sw_meta_ptr);
1544         }
1545
1546         rte_free(pattern_match_item);
1547
1548         return 0;
1549
1550 error:
1551         rte_free(list);
1552         rte_free(sw_meta_ptr);
1553         rte_free(pattern_match_item);
1554
1555         return -rte_errno;
1556 }
1557
1558 static int
1559 ice_switch_query(struct ice_adapter *ad __rte_unused,
1560                 struct rte_flow *flow __rte_unused,
1561                 struct rte_flow_query_count *count __rte_unused,
1562                 struct rte_flow_error *error)
1563 {
1564         rte_flow_error_set(error, EINVAL,
1565                 RTE_FLOW_ERROR_TYPE_HANDLE,
1566                 NULL,
1567                 "count action not supported by switch filter");
1568
1569         return -rte_errno;
1570 }
1571
1572 static int
1573 ice_switch_redirect(struct ice_adapter *ad,
1574                     struct rte_flow *flow,
1575                     struct ice_flow_redirect *rd)
1576 {
1577         struct ice_rule_query_data *rdata = flow->rule;
1578         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1579         struct ice_adv_lkup_elem *lkups_dp = NULL;
1580         struct LIST_HEAD_TYPE *list_head;
1581         struct ice_adv_rule_info rinfo;
1582         struct ice_hw *hw = &ad->hw;
1583         struct ice_switch_info *sw;
1584         uint16_t lkups_cnt;
1585         int ret;
1586
1587         if (rdata->vsi_handle != rd->vsi_handle)
1588                 return 0;
1589
1590         sw = hw->switch_info;
1591         if (!sw->recp_list[rdata->rid].recp_created)
1592                 return -EINVAL;
1593
1594         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1595                 return -ENOTSUP;
1596
1597         list_head = &sw->recp_list[rdata->rid].filt_rules;
1598         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1599                             list_entry) {
1600                 rinfo = list_itr->rule_info;
1601                 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1602                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1603                     rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1604                     (rinfo.fltr_rule_id == rdata->rule_id &&
1605                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1606                         lkups_cnt = list_itr->lkups_cnt;
1607                         lkups_dp = (struct ice_adv_lkup_elem *)
1608                                 ice_memdup(hw, list_itr->lkups,
1609                                            sizeof(*list_itr->lkups) *
1610                                            lkups_cnt, ICE_NONDMA_TO_NONDMA);
1611
1612                         if (!lkups_dp) {
1613                                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1614                                 return -EINVAL;
1615                         }
1616
1617                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1618                                 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1619                                 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1620                         }
1621                         break;
1622                 }
1623         }
1624
1625         if (!lkups_dp)
1626                 return -EINVAL;
1627
1628         /* Remove the old rule */
1629         ret = ice_rem_adv_rule(hw, list_itr->lkups,
1630                                lkups_cnt, &rinfo);
1631         if (ret) {
1632                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1633                             rdata->rule_id);
1634                 ret = -EINVAL;
1635                 goto out;
1636         }
1637
1638         /* Update VSI context */
1639         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1640
1641         /* Replay the rule */
1642         ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1643                                &rinfo, rdata);
1644         if (ret) {
1645                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1646                 ret = -EINVAL;
1647         }
1648
1649 out:
1650         ice_free(hw, lkups_dp);
1651         return ret;
1652 }
1653
1654 static int
1655 ice_switch_init(struct ice_adapter *ad)
1656 {
1657         int ret = 0;
1658         struct ice_flow_parser *dist_parser;
1659         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1660
1661         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1662                 dist_parser = &ice_switch_dist_parser_comms;
1663         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1664                 dist_parser = &ice_switch_dist_parser_os;
1665         else
1666                 return -EINVAL;
1667
1668         if (ad->devargs.pipe_mode_support)
1669                 ret = ice_register_parser(perm_parser, ad);
1670         else
1671                 ret = ice_register_parser(dist_parser, ad);
1672         return ret;
1673 }
1674
1675 static void
1676 ice_switch_uninit(struct ice_adapter *ad)
1677 {
1678         struct ice_flow_parser *dist_parser;
1679         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1680
1681         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1682                 dist_parser = &ice_switch_dist_parser_comms;
1683         else
1684                 dist_parser = &ice_switch_dist_parser_os;
1685
1686         if (ad->devargs.pipe_mode_support)
1687                 ice_unregister_parser(perm_parser, ad);
1688         else
1689                 ice_unregister_parser(dist_parser, ad);
1690 }
1691
1692 static struct
1693 ice_flow_engine ice_switch_engine = {
1694         .init = ice_switch_init,
1695         .uninit = ice_switch_uninit,
1696         .create = ice_switch_create,
1697         .destroy = ice_switch_destroy,
1698         .query_count = ice_switch_query,
1699         .redirect = ice_switch_redirect,
1700         .free = ice_switch_filter_rule_free,
1701         .type = ICE_FLOW_ENGINE_SWITCH,
1702 };
1703
1704 static struct
1705 ice_flow_parser ice_switch_dist_parser_os = {
1706         .engine = &ice_switch_engine,
1707         .array = ice_switch_pattern_dist_os,
1708         .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1709         .parse_pattern_action = ice_switch_parse_pattern_action,
1710         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1711 };
1712
1713 static struct
1714 ice_flow_parser ice_switch_dist_parser_comms = {
1715         .engine = &ice_switch_engine,
1716         .array = ice_switch_pattern_dist_comms,
1717         .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1718         .parse_pattern_action = ice_switch_parse_pattern_action,
1719         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1720 };
1721
1722 static struct
1723 ice_flow_parser ice_switch_perm_parser = {
1724         .engine = &ice_switch_engine,
1725         .array = ice_switch_pattern_perm,
1726         .array_len = RTE_DIM(ice_switch_pattern_perm),
1727         .parse_pattern_action = ice_switch_parse_pattern_action,
1728         .stage = ICE_FLOW_STAGE_PERMISSION,
1729 };
1730
1731 RTE_INIT(ice_sw_engine_init)
1732 {
1733         struct ice_flow_engine *engine = &ice_switch_engine;
1734         ice_register_flow_engine(engine);
1735 }