55a5618a71e9baca46e31508b9ec064a4de0ee06
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26
27
28 #define MAX_QGRP_NUM_TYPE 7
29
30 #define ICE_SW_INSET_ETHER ( \
31         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_VLAN ( \
33                 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
34                 ICE_INSET_VLAN_OUTER)
35 #define ICE_SW_INSET_MAC_IPV4 ( \
36         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
38 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
39         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
40         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
41         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
42 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
43         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
45         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
46 #define ICE_SW_INSET_MAC_IPV6 ( \
47         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
48         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
49         ICE_INSET_IPV6_NEXT_HDR)
50 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
51         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
52         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
53         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
55         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
56         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
57         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
58 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
59         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
62         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
64 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
65         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
66         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
67         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
68 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
69         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
70         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
71         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
73         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
75         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
77         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
79         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
81         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
83 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
84         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
85         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
86         ICE_INSET_TUN_IPV4_TOS)
87 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
88         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
90         ICE_INSET_TUN_IPV4_TOS)
91 #define ICE_SW_INSET_MAC_PPPOE  ( \
92         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
93         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
94 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
95         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
96         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
97         ICE_INSET_PPPOE_PROTO)
98
99 struct sw_meta {
100         struct ice_adv_lkup_elem *list;
101         uint16_t lkups_num;
102         struct ice_adv_rule_info rule_info;
103 };
104
105 static struct ice_flow_parser ice_switch_dist_parser_os;
106 static struct ice_flow_parser ice_switch_dist_parser_comms;
107 static struct ice_flow_parser ice_switch_perm_parser;
108
109 static struct
110 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
111         {pattern_ethertype,
112                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
113         {pattern_ethertype_vlan,
114                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
115         {pattern_eth_ipv4,
116                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp,
118                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
119         {pattern_eth_ipv4_tcp,
120                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
121         {pattern_eth_ipv6,
122                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
123         {pattern_eth_ipv6_udp,
124                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
125         {pattern_eth_ipv6_tcp,
126                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
127         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
128                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
129         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
130                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
131         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
132                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
133         {pattern_eth_ipv4_nvgre_eth_ipv4,
134                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
135         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
136                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
137         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
138                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
139         {pattern_eth_pppoed,
140                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
141         {pattern_eth_vlan_pppoed,
142                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
143         {pattern_eth_pppoes,
144                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
145         {pattern_eth_vlan_pppoes,
146                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
147         {pattern_eth_pppoes_proto,
148                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
149         {pattern_eth_vlan_pppoes_proto,
150                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
151         {pattern_eth_ipv6_esp,
152                         ICE_INSET_NONE, ICE_INSET_NONE},
153         {pattern_eth_ipv6_udp_esp,
154                         ICE_INSET_NONE, ICE_INSET_NONE},
155         {pattern_eth_ipv6_ah,
156                         ICE_INSET_NONE, ICE_INSET_NONE},
157         {pattern_eth_ipv6_udp_ah,
158                         ICE_INSET_NONE, ICE_INSET_NONE},
159         {pattern_eth_ipv6_l2tp,
160                         ICE_INSET_NONE, ICE_INSET_NONE},
161         {pattern_eth_ipv4_pfcp,
162                         ICE_INSET_NONE, ICE_INSET_NONE},
163         {pattern_eth_ipv6_pfcp,
164                         ICE_INSET_NONE, ICE_INSET_NONE},
165 };
166
167 static struct
168 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
169         {pattern_ethertype,
170                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
171         {pattern_ethertype_vlan,
172                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
173         {pattern_eth_arp,
174                         ICE_INSET_NONE, ICE_INSET_NONE},
175         {pattern_eth_ipv4,
176                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
177         {pattern_eth_ipv4_udp,
178                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
179         {pattern_eth_ipv4_tcp,
180                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
181         {pattern_eth_ipv6,
182                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
183         {pattern_eth_ipv6_udp,
184                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
185         {pattern_eth_ipv6_tcp,
186                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
187         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
188                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
189         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
190                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
191         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
192                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
193         {pattern_eth_ipv4_nvgre_eth_ipv4,
194                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
195         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
196                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
197         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
198                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
199 };
200
201 static struct
202 ice_pattern_match_item ice_switch_pattern_perm[] = {
203         {pattern_ethertype,
204                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
205         {pattern_ethertype_vlan,
206                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
207         {pattern_eth_ipv4,
208                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
209         {pattern_eth_ipv4_udp,
210                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
211         {pattern_eth_ipv4_tcp,
212                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
213         {pattern_eth_ipv6,
214                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
215         {pattern_eth_ipv6_udp,
216                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
217         {pattern_eth_ipv6_tcp,
218                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
219         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
220                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
221         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
222                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
223         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
224                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
225         {pattern_eth_ipv4_nvgre_eth_ipv4,
226                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
227         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
228                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
229         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
230                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
231         {pattern_eth_pppoed,
232                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
233         {pattern_eth_vlan_pppoed,
234                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
235         {pattern_eth_pppoes,
236                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
237         {pattern_eth_vlan_pppoes,
238                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
239         {pattern_eth_pppoes_proto,
240                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
241         {pattern_eth_vlan_pppoes_proto,
242                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
243         {pattern_eth_ipv6_esp,
244                         ICE_INSET_NONE, ICE_INSET_NONE},
245         {pattern_eth_ipv6_udp_esp,
246                         ICE_INSET_NONE, ICE_INSET_NONE},
247         {pattern_eth_ipv6_ah,
248                         ICE_INSET_NONE, ICE_INSET_NONE},
249         {pattern_eth_ipv6_udp_ah,
250                         ICE_INSET_NONE, ICE_INSET_NONE},
251         {pattern_eth_ipv6_l2tp,
252                         ICE_INSET_NONE, ICE_INSET_NONE},
253         {pattern_eth_ipv4_pfcp,
254                         ICE_INSET_NONE, ICE_INSET_NONE},
255         {pattern_eth_ipv6_pfcp,
256                         ICE_INSET_NONE, ICE_INSET_NONE},
257 };
258
259 static int
260 ice_switch_create(struct ice_adapter *ad,
261                 struct rte_flow *flow,
262                 void *meta,
263                 struct rte_flow_error *error)
264 {
265         int ret = 0;
266         struct ice_pf *pf = &ad->pf;
267         struct ice_hw *hw = ICE_PF_TO_HW(pf);
268         struct ice_rule_query_data rule_added = {0};
269         struct ice_rule_query_data *filter_ptr;
270         struct ice_adv_lkup_elem *list =
271                 ((struct sw_meta *)meta)->list;
272         uint16_t lkups_cnt =
273                 ((struct sw_meta *)meta)->lkups_num;
274         struct ice_adv_rule_info *rule_info =
275                 &((struct sw_meta *)meta)->rule_info;
276
277         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
278                 rte_flow_error_set(error, EINVAL,
279                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
280                         "item number too large for rule");
281                 goto error;
282         }
283         if (!list) {
284                 rte_flow_error_set(error, EINVAL,
285                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
286                         "lookup list should not be NULL");
287                 goto error;
288         }
289         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
290         if (!ret) {
291                 filter_ptr = rte_zmalloc("ice_switch_filter",
292                         sizeof(struct ice_rule_query_data), 0);
293                 if (!filter_ptr) {
294                         rte_flow_error_set(error, EINVAL,
295                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
296                                    "No memory for ice_switch_filter");
297                         goto error;
298                 }
299                 flow->rule = filter_ptr;
300                 rte_memcpy(filter_ptr,
301                         &rule_added,
302                         sizeof(struct ice_rule_query_data));
303         } else {
304                 rte_flow_error_set(error, EINVAL,
305                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
306                         "switch filter create flow fail");
307                 goto error;
308         }
309
310         rte_free(list);
311         rte_free(meta);
312         return 0;
313
314 error:
315         rte_free(list);
316         rte_free(meta);
317
318         return -rte_errno;
319 }
320
321 static int
322 ice_switch_destroy(struct ice_adapter *ad,
323                 struct rte_flow *flow,
324                 struct rte_flow_error *error)
325 {
326         struct ice_hw *hw = &ad->hw;
327         int ret;
328         struct ice_rule_query_data *filter_ptr;
329
330         filter_ptr = (struct ice_rule_query_data *)
331                 flow->rule;
332
333         if (!filter_ptr) {
334                 rte_flow_error_set(error, EINVAL,
335                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
336                         "no such flow"
337                         " create by switch filter");
338                 return -rte_errno;
339         }
340
341         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
342         if (ret) {
343                 rte_flow_error_set(error, EINVAL,
344                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
345                         "fail to destroy switch filter rule");
346                 return -rte_errno;
347         }
348
349         rte_free(filter_ptr);
350         return ret;
351 }
352
353 static void
354 ice_switch_filter_rule_free(struct rte_flow *flow)
355 {
356         rte_free(flow->rule);
357 }
358
359 static uint64_t
360 ice_switch_inset_get(const struct rte_flow_item pattern[],
361                 struct rte_flow_error *error,
362                 struct ice_adv_lkup_elem *list,
363                 uint16_t *lkups_num,
364                 enum ice_sw_tunnel_type *tun_type)
365 {
366         const struct rte_flow_item *item = pattern;
367         enum rte_flow_item_type item_type;
368         const struct rte_flow_item_eth *eth_spec, *eth_mask;
369         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
370         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
371         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
372         const struct rte_flow_item_udp *udp_spec, *udp_mask;
373         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
374         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
375         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
376         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
377         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
378         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
379                                 *pppoe_proto_mask;
380         const struct rte_flow_item_esp *esp_spec, *esp_mask;
381         const struct rte_flow_item_ah *ah_spec, *ah_mask;
382         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
383         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
384         uint64_t input_set = ICE_INSET_NONE;
385         uint16_t j, t = 0;
386         uint16_t tunnel_valid = 0;
387         uint16_t pppoe_valid = 0;
388         uint16_t ipv6_valiad = 0;
389         uint16_t udp_valiad = 0;
390
391
392         for (item = pattern; item->type !=
393                         RTE_FLOW_ITEM_TYPE_END; item++) {
394                 if (item->last) {
395                         rte_flow_error_set(error, EINVAL,
396                                         RTE_FLOW_ERROR_TYPE_ITEM,
397                                         item,
398                                         "Not support range");
399                         return 0;
400                 }
401                 item_type = item->type;
402
403                 switch (item_type) {
404                 case RTE_FLOW_ITEM_TYPE_ETH:
405                         eth_spec = item->spec;
406                         eth_mask = item->mask;
407                         if (eth_spec && eth_mask) {
408                                 const uint8_t *a = eth_mask->src.addr_bytes;
409                                 const uint8_t *b = eth_mask->dst.addr_bytes;
410                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
411                                         if (a[j] && tunnel_valid) {
412                                                 input_set |=
413                                                         ICE_INSET_TUN_SMAC;
414                                                 break;
415                                         } else if (a[j]) {
416                                                 input_set |=
417                                                         ICE_INSET_SMAC;
418                                                 break;
419                                         }
420                                 }
421                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
422                                         if (b[j] && tunnel_valid) {
423                                                 input_set |=
424                                                         ICE_INSET_TUN_DMAC;
425                                                 break;
426                                         } else if (b[j]) {
427                                                 input_set |=
428                                                         ICE_INSET_DMAC;
429                                                 break;
430                                         }
431                                 }
432                                 if (eth_mask->type)
433                                         input_set |= ICE_INSET_ETHERTYPE;
434                                 list[t].type = (tunnel_valid  == 0) ?
435                                         ICE_MAC_OFOS : ICE_MAC_IL;
436                                 struct ice_ether_hdr *h;
437                                 struct ice_ether_hdr *m;
438                                 uint16_t i = 0;
439                                 h = &list[t].h_u.eth_hdr;
440                                 m = &list[t].m_u.eth_hdr;
441                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
442                                         if (eth_mask->src.addr_bytes[j]) {
443                                                 h->src_addr[j] =
444                                                 eth_spec->src.addr_bytes[j];
445                                                 m->src_addr[j] =
446                                                 eth_mask->src.addr_bytes[j];
447                                                 i = 1;
448                                         }
449                                         if (eth_mask->dst.addr_bytes[j]) {
450                                                 h->dst_addr[j] =
451                                                 eth_spec->dst.addr_bytes[j];
452                                                 m->dst_addr[j] =
453                                                 eth_mask->dst.addr_bytes[j];
454                                                 i = 1;
455                                         }
456                                 }
457                                 if (i)
458                                         t++;
459                                 if (eth_mask->type) {
460                                         list[t].type = ICE_ETYPE_OL;
461                                         list[t].h_u.ethertype.ethtype_id =
462                                                 eth_spec->type;
463                                         list[t].m_u.ethertype.ethtype_id =
464                                                 eth_mask->type;
465                                         t++;
466                                 }
467                         }
468                         break;
469
470                 case RTE_FLOW_ITEM_TYPE_IPV4:
471                         ipv4_spec = item->spec;
472                         ipv4_mask = item->mask;
473                         if (ipv4_spec && ipv4_mask) {
474                                 /* Check IPv4 mask and update input set */
475                                 if (ipv4_mask->hdr.version_ihl ||
476                                         ipv4_mask->hdr.total_length ||
477                                         ipv4_mask->hdr.packet_id ||
478                                         ipv4_mask->hdr.hdr_checksum) {
479                                         rte_flow_error_set(error, EINVAL,
480                                                    RTE_FLOW_ERROR_TYPE_ITEM,
481                                                    item,
482                                                    "Invalid IPv4 mask.");
483                                         return 0;
484                                 }
485
486                                 if (tunnel_valid) {
487                                         if (ipv4_mask->hdr.type_of_service)
488                                                 input_set |=
489                                                         ICE_INSET_TUN_IPV4_TOS;
490                                         if (ipv4_mask->hdr.src_addr)
491                                                 input_set |=
492                                                         ICE_INSET_TUN_IPV4_SRC;
493                                         if (ipv4_mask->hdr.dst_addr)
494                                                 input_set |=
495                                                         ICE_INSET_TUN_IPV4_DST;
496                                         if (ipv4_mask->hdr.time_to_live)
497                                                 input_set |=
498                                                         ICE_INSET_TUN_IPV4_TTL;
499                                         if (ipv4_mask->hdr.next_proto_id)
500                                                 input_set |=
501                                                 ICE_INSET_TUN_IPV4_PROTO;
502                                 } else {
503                                         if (ipv4_mask->hdr.src_addr)
504                                                 input_set |= ICE_INSET_IPV4_SRC;
505                                         if (ipv4_mask->hdr.dst_addr)
506                                                 input_set |= ICE_INSET_IPV4_DST;
507                                         if (ipv4_mask->hdr.time_to_live)
508                                                 input_set |= ICE_INSET_IPV4_TTL;
509                                         if (ipv4_mask->hdr.next_proto_id)
510                                                 input_set |=
511                                                 ICE_INSET_IPV4_PROTO;
512                                         if (ipv4_mask->hdr.type_of_service)
513                                                 input_set |=
514                                                         ICE_INSET_IPV4_TOS;
515                                 }
516                                 list[t].type = (tunnel_valid  == 0) ?
517                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
518                                 if (ipv4_mask->hdr.src_addr) {
519                                         list[t].h_u.ipv4_hdr.src_addr =
520                                                 ipv4_spec->hdr.src_addr;
521                                         list[t].m_u.ipv4_hdr.src_addr =
522                                                 ipv4_mask->hdr.src_addr;
523                                 }
524                                 if (ipv4_mask->hdr.dst_addr) {
525                                         list[t].h_u.ipv4_hdr.dst_addr =
526                                                 ipv4_spec->hdr.dst_addr;
527                                         list[t].m_u.ipv4_hdr.dst_addr =
528                                                 ipv4_mask->hdr.dst_addr;
529                                 }
530                                 if (ipv4_mask->hdr.time_to_live) {
531                                         list[t].h_u.ipv4_hdr.time_to_live =
532                                                 ipv4_spec->hdr.time_to_live;
533                                         list[t].m_u.ipv4_hdr.time_to_live =
534                                                 ipv4_mask->hdr.time_to_live;
535                                 }
536                                 if (ipv4_mask->hdr.next_proto_id) {
537                                         list[t].h_u.ipv4_hdr.protocol =
538                                                 ipv4_spec->hdr.next_proto_id;
539                                         list[t].m_u.ipv4_hdr.protocol =
540                                                 ipv4_mask->hdr.next_proto_id;
541                                 }
542                                 if (ipv4_mask->hdr.type_of_service) {
543                                         list[t].h_u.ipv4_hdr.tos =
544                                                 ipv4_spec->hdr.type_of_service;
545                                         list[t].m_u.ipv4_hdr.tos =
546                                                 ipv4_mask->hdr.type_of_service;
547                                 }
548                                 t++;
549                         }
550                         break;
551
552                 case RTE_FLOW_ITEM_TYPE_IPV6:
553                         ipv6_spec = item->spec;
554                         ipv6_mask = item->mask;
555                         ipv6_valiad = 1;
556                         if (ipv6_spec && ipv6_mask) {
557                                 if (ipv6_mask->hdr.payload_len) {
558                                         rte_flow_error_set(error, EINVAL,
559                                            RTE_FLOW_ERROR_TYPE_ITEM,
560                                            item,
561                                            "Invalid IPv6 mask");
562                                         return 0;
563                                 }
564
565                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
566                                         if (ipv6_mask->hdr.src_addr[j] &&
567                                                 tunnel_valid) {
568                                                 input_set |=
569                                                 ICE_INSET_TUN_IPV6_SRC;
570                                                 break;
571                                         } else if (ipv6_mask->hdr.src_addr[j]) {
572                                                 input_set |= ICE_INSET_IPV6_SRC;
573                                                 break;
574                                         }
575                                 }
576                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
577                                         if (ipv6_mask->hdr.dst_addr[j] &&
578                                                 tunnel_valid) {
579                                                 input_set |=
580                                                 ICE_INSET_TUN_IPV6_DST;
581                                                 break;
582                                         } else if (ipv6_mask->hdr.dst_addr[j]) {
583                                                 input_set |= ICE_INSET_IPV6_DST;
584                                                 break;
585                                         }
586                                 }
587                                 if (ipv6_mask->hdr.proto &&
588                                         tunnel_valid)
589                                         input_set |=
590                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
591                                 else if (ipv6_mask->hdr.proto)
592                                         input_set |=
593                                                 ICE_INSET_IPV6_NEXT_HDR;
594                                 if (ipv6_mask->hdr.hop_limits &&
595                                         tunnel_valid)
596                                         input_set |=
597                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
598                                 else if (ipv6_mask->hdr.hop_limits)
599                                         input_set |=
600                                                 ICE_INSET_IPV6_HOP_LIMIT;
601                                 if ((ipv6_mask->hdr.vtc_flow &
602                                                 rte_cpu_to_be_32
603                                                 (RTE_IPV6_HDR_TC_MASK)) &&
604                                         tunnel_valid)
605                                         input_set |=
606                                                         ICE_INSET_TUN_IPV6_TC;
607                                 else if (ipv6_mask->hdr.vtc_flow &
608                                                 rte_cpu_to_be_32
609                                                 (RTE_IPV6_HDR_TC_MASK))
610                                         input_set |= ICE_INSET_IPV6_TC;
611
612                                 list[t].type = (tunnel_valid  == 0) ?
613                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
614                                 struct ice_ipv6_hdr *f;
615                                 struct ice_ipv6_hdr *s;
616                                 f = &list[t].h_u.ipv6_hdr;
617                                 s = &list[t].m_u.ipv6_hdr;
618                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
619                                         if (ipv6_mask->hdr.src_addr[j]) {
620                                                 f->src_addr[j] =
621                                                 ipv6_spec->hdr.src_addr[j];
622                                                 s->src_addr[j] =
623                                                 ipv6_mask->hdr.src_addr[j];
624                                         }
625                                         if (ipv6_mask->hdr.dst_addr[j]) {
626                                                 f->dst_addr[j] =
627                                                 ipv6_spec->hdr.dst_addr[j];
628                                                 s->dst_addr[j] =
629                                                 ipv6_mask->hdr.dst_addr[j];
630                                         }
631                                 }
632                                 if (ipv6_mask->hdr.proto) {
633                                         f->next_hdr =
634                                                 ipv6_spec->hdr.proto;
635                                         s->next_hdr =
636                                                 ipv6_mask->hdr.proto;
637                                 }
638                                 if (ipv6_mask->hdr.hop_limits) {
639                                         f->hop_limit =
640                                                 ipv6_spec->hdr.hop_limits;
641                                         s->hop_limit =
642                                                 ipv6_mask->hdr.hop_limits;
643                                 }
644                                 if (ipv6_mask->hdr.vtc_flow &
645                                                 rte_cpu_to_be_32
646                                                 (RTE_IPV6_HDR_TC_MASK)) {
647                                         struct ice_le_ver_tc_flow vtf;
648                                         vtf.u.fld.version = 0;
649                                         vtf.u.fld.flow_label = 0;
650                                         vtf.u.fld.tc = (rte_be_to_cpu_32
651                                                 (ipv6_spec->hdr.vtc_flow) &
652                                                         RTE_IPV6_HDR_TC_MASK) >>
653                                                         RTE_IPV6_HDR_TC_SHIFT;
654                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
655                                         vtf.u.fld.tc = (rte_be_to_cpu_32
656                                                 (ipv6_mask->hdr.vtc_flow) &
657                                                         RTE_IPV6_HDR_TC_MASK) >>
658                                                         RTE_IPV6_HDR_TC_SHIFT;
659                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
660                                 }
661                                 t++;
662                         }
663                         break;
664
665                 case RTE_FLOW_ITEM_TYPE_UDP:
666                         udp_spec = item->spec;
667                         udp_mask = item->mask;
668                         udp_valiad = 1;
669                         if (udp_spec && udp_mask) {
670                                 /* Check UDP mask and update input set*/
671                                 if (udp_mask->hdr.dgram_len ||
672                                     udp_mask->hdr.dgram_cksum) {
673                                         rte_flow_error_set(error, EINVAL,
674                                                    RTE_FLOW_ERROR_TYPE_ITEM,
675                                                    item,
676                                                    "Invalid UDP mask");
677                                         return 0;
678                                 }
679
680                                 if (tunnel_valid) {
681                                         if (udp_mask->hdr.src_port)
682                                                 input_set |=
683                                                 ICE_INSET_TUN_UDP_SRC_PORT;
684                                         if (udp_mask->hdr.dst_port)
685                                                 input_set |=
686                                                 ICE_INSET_TUN_UDP_DST_PORT;
687                                 } else {
688                                         if (udp_mask->hdr.src_port)
689                                                 input_set |=
690                                                 ICE_INSET_UDP_SRC_PORT;
691                                         if (udp_mask->hdr.dst_port)
692                                                 input_set |=
693                                                 ICE_INSET_UDP_DST_PORT;
694                                 }
695                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
696                                                 tunnel_valid == 0)
697                                         list[t].type = ICE_UDP_OF;
698                                 else
699                                         list[t].type = ICE_UDP_ILOS;
700                                 if (udp_mask->hdr.src_port) {
701                                         list[t].h_u.l4_hdr.src_port =
702                                                 udp_spec->hdr.src_port;
703                                         list[t].m_u.l4_hdr.src_port =
704                                                 udp_mask->hdr.src_port;
705                                 }
706                                 if (udp_mask->hdr.dst_port) {
707                                         list[t].h_u.l4_hdr.dst_port =
708                                                 udp_spec->hdr.dst_port;
709                                         list[t].m_u.l4_hdr.dst_port =
710                                                 udp_mask->hdr.dst_port;
711                                 }
712                                                 t++;
713                         }
714                         break;
715
716                 case RTE_FLOW_ITEM_TYPE_TCP:
717                         tcp_spec = item->spec;
718                         tcp_mask = item->mask;
719                         if (tcp_spec && tcp_mask) {
720                                 /* Check TCP mask and update input set */
721                                 if (tcp_mask->hdr.sent_seq ||
722                                         tcp_mask->hdr.recv_ack ||
723                                         tcp_mask->hdr.data_off ||
724                                         tcp_mask->hdr.tcp_flags ||
725                                         tcp_mask->hdr.rx_win ||
726                                         tcp_mask->hdr.cksum ||
727                                         tcp_mask->hdr.tcp_urp) {
728                                         rte_flow_error_set(error, EINVAL,
729                                            RTE_FLOW_ERROR_TYPE_ITEM,
730                                            item,
731                                            "Invalid TCP mask");
732                                         return 0;
733                                 }
734
735                                 if (tunnel_valid) {
736                                         if (tcp_mask->hdr.src_port)
737                                                 input_set |=
738                                                 ICE_INSET_TUN_TCP_SRC_PORT;
739                                         if (tcp_mask->hdr.dst_port)
740                                                 input_set |=
741                                                 ICE_INSET_TUN_TCP_DST_PORT;
742                                 } else {
743                                         if (tcp_mask->hdr.src_port)
744                                                 input_set |=
745                                                 ICE_INSET_TCP_SRC_PORT;
746                                         if (tcp_mask->hdr.dst_port)
747                                                 input_set |=
748                                                 ICE_INSET_TCP_DST_PORT;
749                                 }
750                                 list[t].type = ICE_TCP_IL;
751                                 if (tcp_mask->hdr.src_port) {
752                                         list[t].h_u.l4_hdr.src_port =
753                                                 tcp_spec->hdr.src_port;
754                                         list[t].m_u.l4_hdr.src_port =
755                                                 tcp_mask->hdr.src_port;
756                                 }
757                                 if (tcp_mask->hdr.dst_port) {
758                                         list[t].h_u.l4_hdr.dst_port =
759                                                 tcp_spec->hdr.dst_port;
760                                         list[t].m_u.l4_hdr.dst_port =
761                                                 tcp_mask->hdr.dst_port;
762                                 }
763                                 t++;
764                         }
765                         break;
766
767                 case RTE_FLOW_ITEM_TYPE_SCTP:
768                         sctp_spec = item->spec;
769                         sctp_mask = item->mask;
770                         if (sctp_spec && sctp_mask) {
771                                 /* Check SCTP mask and update input set */
772                                 if (sctp_mask->hdr.cksum) {
773                                         rte_flow_error_set(error, EINVAL,
774                                            RTE_FLOW_ERROR_TYPE_ITEM,
775                                            item,
776                                            "Invalid SCTP mask");
777                                         return 0;
778                                 }
779
780                                 if (tunnel_valid) {
781                                         if (sctp_mask->hdr.src_port)
782                                                 input_set |=
783                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
784                                         if (sctp_mask->hdr.dst_port)
785                                                 input_set |=
786                                                 ICE_INSET_TUN_SCTP_DST_PORT;
787                                 } else {
788                                         if (sctp_mask->hdr.src_port)
789                                                 input_set |=
790                                                 ICE_INSET_SCTP_SRC_PORT;
791                                         if (sctp_mask->hdr.dst_port)
792                                                 input_set |=
793                                                 ICE_INSET_SCTP_DST_PORT;
794                                 }
795                                 list[t].type = ICE_SCTP_IL;
796                                 if (sctp_mask->hdr.src_port) {
797                                         list[t].h_u.sctp_hdr.src_port =
798                                                 sctp_spec->hdr.src_port;
799                                         list[t].m_u.sctp_hdr.src_port =
800                                                 sctp_mask->hdr.src_port;
801                                 }
802                                 if (sctp_mask->hdr.dst_port) {
803                                         list[t].h_u.sctp_hdr.dst_port =
804                                                 sctp_spec->hdr.dst_port;
805                                         list[t].m_u.sctp_hdr.dst_port =
806                                                 sctp_mask->hdr.dst_port;
807                                 }
808                                 t++;
809                         }
810                         break;
811
812                 case RTE_FLOW_ITEM_TYPE_VXLAN:
813                         vxlan_spec = item->spec;
814                         vxlan_mask = item->mask;
815                         /* Check if VXLAN item is used to describe protocol.
816                          * If yes, both spec and mask should be NULL.
817                          * If no, both spec and mask shouldn't be NULL.
818                          */
819                         if ((!vxlan_spec && vxlan_mask) ||
820                             (vxlan_spec && !vxlan_mask)) {
821                                 rte_flow_error_set(error, EINVAL,
822                                            RTE_FLOW_ERROR_TYPE_ITEM,
823                                            item,
824                                            "Invalid VXLAN item");
825                                 return 0;
826                         }
827
828                         tunnel_valid = 1;
829                         if (vxlan_spec && vxlan_mask) {
830                                 list[t].type = ICE_VXLAN;
831                                 if (vxlan_mask->vni[0] ||
832                                         vxlan_mask->vni[1] ||
833                                         vxlan_mask->vni[2]) {
834                                         list[t].h_u.tnl_hdr.vni =
835                                                 (vxlan_spec->vni[2] << 16) |
836                                                 (vxlan_spec->vni[1] << 8) |
837                                                 vxlan_spec->vni[0];
838                                         list[t].m_u.tnl_hdr.vni =
839                                                 (vxlan_mask->vni[2] << 16) |
840                                                 (vxlan_mask->vni[1] << 8) |
841                                                 vxlan_mask->vni[0];
842                                         input_set |=
843                                                 ICE_INSET_TUN_VXLAN_VNI;
844                                 }
845                                 t++;
846                         }
847                         break;
848
849                 case RTE_FLOW_ITEM_TYPE_NVGRE:
850                         nvgre_spec = item->spec;
851                         nvgre_mask = item->mask;
852                         /* Check if NVGRE item is used to describe protocol.
853                          * If yes, both spec and mask should be NULL.
854                          * If no, both spec and mask shouldn't be NULL.
855                          */
856                         if ((!nvgre_spec && nvgre_mask) ||
857                             (nvgre_spec && !nvgre_mask)) {
858                                 rte_flow_error_set(error, EINVAL,
859                                            RTE_FLOW_ERROR_TYPE_ITEM,
860                                            item,
861                                            "Invalid NVGRE item");
862                                 return 0;
863                         }
864                         tunnel_valid = 1;
865                         if (nvgre_spec && nvgre_mask) {
866                                 list[t].type = ICE_NVGRE;
867                                 if (nvgre_mask->tni[0] ||
868                                         nvgre_mask->tni[1] ||
869                                         nvgre_mask->tni[2]) {
870                                         list[t].h_u.nvgre_hdr.tni_flow =
871                                                 (nvgre_spec->tni[2] << 16) |
872                                                 (nvgre_spec->tni[1] << 8) |
873                                                 nvgre_spec->tni[0];
874                                         list[t].m_u.nvgre_hdr.tni_flow =
875                                                 (nvgre_mask->tni[2] << 16) |
876                                                 (nvgre_mask->tni[1] << 8) |
877                                                 nvgre_mask->tni[0];
878                                         input_set |=
879                                                 ICE_INSET_TUN_NVGRE_TNI;
880                                 }
881                                 t++;
882                         }
883                         break;
884
885                 case RTE_FLOW_ITEM_TYPE_VLAN:
886                         vlan_spec = item->spec;
887                         vlan_mask = item->mask;
888                         /* Check if VLAN item is used to describe protocol.
889                          * If yes, both spec and mask should be NULL.
890                          * If no, both spec and mask shouldn't be NULL.
891                          */
892                         if ((!vlan_spec && vlan_mask) ||
893                             (vlan_spec && !vlan_mask)) {
894                                 rte_flow_error_set(error, EINVAL,
895                                            RTE_FLOW_ERROR_TYPE_ITEM,
896                                            item,
897                                            "Invalid VLAN item");
898                                 return 0;
899                         }
900                         if (vlan_spec && vlan_mask) {
901                                 list[t].type = ICE_VLAN_OFOS;
902                                 if (vlan_mask->tci) {
903                                         list[t].h_u.vlan_hdr.vlan =
904                                                 vlan_spec->tci;
905                                         list[t].m_u.vlan_hdr.vlan =
906                                                 vlan_mask->tci;
907                                         input_set |= ICE_INSET_VLAN_OUTER;
908                                 }
909                                 if (vlan_mask->inner_type) {
910                                         list[t].h_u.vlan_hdr.type =
911                                                 vlan_spec->inner_type;
912                                         list[t].m_u.vlan_hdr.type =
913                                                 vlan_mask->inner_type;
914                                         input_set |= ICE_INSET_ETHERTYPE;
915                                 }
916                                 t++;
917                         }
918                         break;
919
920                 case RTE_FLOW_ITEM_TYPE_PPPOED:
921                 case RTE_FLOW_ITEM_TYPE_PPPOES:
922                         pppoe_spec = item->spec;
923                         pppoe_mask = item->mask;
924                         /* Check if PPPoE item is used to describe protocol.
925                          * If yes, both spec and mask should be NULL.
926                          * If no, both spec and mask shouldn't be NULL.
927                          */
928                         if ((!pppoe_spec && pppoe_mask) ||
929                                 (pppoe_spec && !pppoe_mask)) {
930                                 rte_flow_error_set(error, EINVAL,
931                                         RTE_FLOW_ERROR_TYPE_ITEM,
932                                         item,
933                                         "Invalid pppoe item");
934                                 return 0;
935                         }
936                         if (pppoe_spec && pppoe_mask) {
937                                 /* Check pppoe mask and update input set */
938                                 if (pppoe_mask->length ||
939                                         pppoe_mask->code ||
940                                         pppoe_mask->version_type) {
941                                         rte_flow_error_set(error, EINVAL,
942                                                 RTE_FLOW_ERROR_TYPE_ITEM,
943                                                 item,
944                                                 "Invalid pppoe mask");
945                                         return 0;
946                                 }
947                                 list[t].type = ICE_PPPOE;
948                                 if (pppoe_mask->session_id) {
949                                         list[t].h_u.pppoe_hdr.session_id =
950                                                 pppoe_spec->session_id;
951                                         list[t].m_u.pppoe_hdr.session_id =
952                                                 pppoe_mask->session_id;
953                                         input_set |= ICE_INSET_PPPOE_SESSION;
954                                 }
955                                 t++;
956                                 pppoe_valid = 1;
957                         }
958                         break;
959
960                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
961                         pppoe_proto_spec = item->spec;
962                         pppoe_proto_mask = item->mask;
963                         /* Check if PPPoE optional proto_id item
964                          * is used to describe protocol.
965                          * If yes, both spec and mask should be NULL.
966                          * If no, both spec and mask shouldn't be NULL.
967                          */
968                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
969                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
970                                 rte_flow_error_set(error, EINVAL,
971                                         RTE_FLOW_ERROR_TYPE_ITEM,
972                                         item,
973                                         "Invalid pppoe proto item");
974                                 return 0;
975                         }
976                         if (pppoe_proto_spec && pppoe_proto_mask) {
977                                 if (pppoe_valid)
978                                         t--;
979                                 list[t].type = ICE_PPPOE;
980                                 if (pppoe_proto_mask->proto_id) {
981                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
982                                                 pppoe_proto_spec->proto_id;
983                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
984                                                 pppoe_proto_mask->proto_id;
985                                         input_set |= ICE_INSET_PPPOE_PROTO;
986                                 }
987                                 t++;
988                         }
989                         break;
990
991                 case RTE_FLOW_ITEM_TYPE_ESP:
992                         esp_spec = item->spec;
993                         esp_mask = item->mask;
994                         if (esp_spec || esp_mask) {
995                                 rte_flow_error_set(error, EINVAL,
996                                            RTE_FLOW_ERROR_TYPE_ITEM,
997                                            item,
998                                            "Invalid esp item");
999                                 return -ENOTSUP;
1000                         }
1001                         if (ipv6_valiad && udp_valiad)
1002                                 *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
1003                         else if (ipv6_valiad)
1004                                 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1005                         break;
1006
1007                 case RTE_FLOW_ITEM_TYPE_AH:
1008                         ah_spec = item->spec;
1009                         ah_mask = item->mask;
1010                         if (ah_spec || ah_mask) {
1011                                 rte_flow_error_set(error, EINVAL,
1012                                            RTE_FLOW_ERROR_TYPE_ITEM,
1013                                            item,
1014                                            "Invalid ah item");
1015                                 return -ENOTSUP;
1016                         }
1017                         if (ipv6_valiad && udp_valiad)
1018                                 *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
1019                         else if (ipv6_valiad)
1020                                 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1021                         break;
1022
1023                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1024                         l2tp_spec = item->spec;
1025                         l2tp_mask = item->mask;
1026                         if (l2tp_spec || l2tp_mask) {
1027                                 rte_flow_error_set(error, EINVAL,
1028                                            RTE_FLOW_ERROR_TYPE_ITEM,
1029                                            item,
1030                                            "Invalid l2tp item");
1031                                 return -ENOTSUP;
1032                         }
1033                         if (ipv6_valiad)
1034                                 *tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1035                         break;
1036                 case RTE_FLOW_ITEM_TYPE_PFCP:
1037                         pfcp_spec = item->spec;
1038                         pfcp_mask = item->mask;
1039                         /* Check if PFCP item is used to describe protocol.
1040                          * If yes, both spec and mask should be NULL.
1041                          * If no, both spec and mask shouldn't be NULL.
1042                          */
1043                         if ((!pfcp_spec && pfcp_mask) ||
1044                             (pfcp_spec && !pfcp_mask)) {
1045                                 rte_flow_error_set(error, EINVAL,
1046                                            RTE_FLOW_ERROR_TYPE_ITEM,
1047                                            item,
1048                                            "Invalid PFCP item");
1049                                 return -ENOTSUP;
1050                         }
1051                         if (pfcp_spec && pfcp_mask) {
1052                                 /* Check pfcp mask and update input set */
1053                                 if (pfcp_mask->msg_type ||
1054                                         pfcp_mask->msg_len ||
1055                                         pfcp_mask->seid) {
1056                                         rte_flow_error_set(error, EINVAL,
1057                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1058                                                 item,
1059                                                 "Invalid pfcp mask");
1060                                         return -ENOTSUP;
1061                                 }
1062                                 if (pfcp_mask->s_field &&
1063                                         pfcp_spec->s_field == 0x01 &&
1064                                         ipv6_valiad)
1065                                         *tun_type =
1066                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1067                                 else if (pfcp_mask->s_field &&
1068                                         pfcp_spec->s_field == 0x01)
1069                                         *tun_type =
1070                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1071                                 else if (pfcp_mask->s_field &&
1072                                         !pfcp_spec->s_field &&
1073                                         ipv6_valiad)
1074                                         *tun_type =
1075                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1076                                 else if (pfcp_mask->s_field &&
1077                                         !pfcp_spec->s_field)
1078                                         *tun_type =
1079                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1080                                 else
1081                                         return -ENOTSUP;
1082                         }
1083                         break;
1084
1085
1086                 case RTE_FLOW_ITEM_TYPE_VOID:
1087                         break;
1088
1089                 default:
1090                         rte_flow_error_set(error, EINVAL,
1091                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1092                                    "Invalid pattern item.");
1093                         goto out;
1094                 }
1095         }
1096
1097         *lkups_num = t;
1098
1099         return input_set;
1100 out:
1101         return 0;
1102 }
1103
1104 static int
1105 ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
1106                             struct rte_flow_error *error,
1107                             struct ice_adv_rule_info *rule_info)
1108 {
1109         const struct rte_flow_action_vf *act_vf;
1110         const struct rte_flow_action *action;
1111         enum rte_flow_action_type action_type;
1112
1113         for (action = actions; action->type !=
1114                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1115                 action_type = action->type;
1116                 switch (action_type) {
1117                 case RTE_FLOW_ACTION_TYPE_VF:
1118                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1119                         act_vf = action->conf;
1120                         rule_info->sw_act.vsi_handle = act_vf->id;
1121                         break;
1122                 default:
1123                         rte_flow_error_set(error,
1124                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1125                                            actions,
1126                                            "Invalid action type or queue number");
1127                         return -rte_errno;
1128                 }
1129         }
1130
1131         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1132         rule_info->rx = 1;
1133         rule_info->priority = 5;
1134
1135         return 0;
1136 }
1137
1138 static int
1139 ice_switch_parse_action(struct ice_pf *pf,
1140                 const struct rte_flow_action *actions,
1141                 struct rte_flow_error *error,
1142                 struct ice_adv_rule_info *rule_info)
1143 {
1144         struct ice_vsi *vsi = pf->main_vsi;
1145         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1146         const struct rte_flow_action_queue *act_q;
1147         const struct rte_flow_action_rss *act_qgrop;
1148         uint16_t base_queue, i;
1149         const struct rte_flow_action *action;
1150         enum rte_flow_action_type action_type;
1151         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1152                  2, 4, 8, 16, 32, 64, 128};
1153
1154         base_queue = pf->base_queue + vsi->base_queue;
1155         for (action = actions; action->type !=
1156                         RTE_FLOW_ACTION_TYPE_END; action++) {
1157                 action_type = action->type;
1158                 switch (action_type) {
1159                 case RTE_FLOW_ACTION_TYPE_RSS:
1160                         act_qgrop = action->conf;
1161                         rule_info->sw_act.fltr_act =
1162                                 ICE_FWD_TO_QGRP;
1163                         rule_info->sw_act.fwd_id.q_id =
1164                                 base_queue + act_qgrop->queue[0];
1165                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1166                                 if (act_qgrop->queue_num ==
1167                                         valid_qgrop_number[i])
1168                                         break;
1169                         }
1170                         if (i == MAX_QGRP_NUM_TYPE)
1171                                 goto error;
1172                         if ((act_qgrop->queue[0] +
1173                                 act_qgrop->queue_num) >
1174                                 dev->data->nb_rx_queues)
1175                                 goto error;
1176                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1177                                 if (act_qgrop->queue[i + 1] !=
1178                                         act_qgrop->queue[i] + 1)
1179                                         goto error;
1180                         rule_info->sw_act.qgrp_size =
1181                                 act_qgrop->queue_num;
1182                         break;
1183                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1184                         act_q = action->conf;
1185                         if (act_q->index >= dev->data->nb_rx_queues)
1186                                 goto error;
1187                         rule_info->sw_act.fltr_act =
1188                                 ICE_FWD_TO_Q;
1189                         rule_info->sw_act.fwd_id.q_id =
1190                                 base_queue + act_q->index;
1191                         break;
1192
1193                 case RTE_FLOW_ACTION_TYPE_DROP:
1194                         rule_info->sw_act.fltr_act =
1195                                 ICE_DROP_PACKET;
1196                         break;
1197
1198                 case RTE_FLOW_ACTION_TYPE_VOID:
1199                         break;
1200
1201                 default:
1202                         goto error;
1203                 }
1204         }
1205
1206         rule_info->sw_act.vsi_handle = vsi->idx;
1207         rule_info->rx = 1;
1208         rule_info->sw_act.src = vsi->idx;
1209         rule_info->priority = 5;
1210
1211         return 0;
1212
1213 error:
1214         rte_flow_error_set(error,
1215                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1216                 actions,
1217                 "Invalid action type or queue number");
1218         return -rte_errno;
1219 }
1220
1221 static int
1222 ice_switch_check_action(const struct rte_flow_action *actions,
1223                             struct rte_flow_error *error)
1224 {
1225         const struct rte_flow_action *action;
1226         enum rte_flow_action_type action_type;
1227         uint16_t actions_num = 0;
1228
1229         for (action = actions; action->type !=
1230                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1231                 action_type = action->type;
1232                 switch (action_type) {
1233                 case RTE_FLOW_ACTION_TYPE_VF:
1234                 case RTE_FLOW_ACTION_TYPE_RSS:
1235                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1236                 case RTE_FLOW_ACTION_TYPE_DROP:
1237                         actions_num++;
1238                         break;
1239                 case RTE_FLOW_ACTION_TYPE_VOID:
1240                         continue;
1241                 default:
1242                         rte_flow_error_set(error,
1243                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1244                                            actions,
1245                                            "Invalid action type");
1246                         return -rte_errno;
1247                 }
1248         }
1249
1250         if (actions_num > 1) {
1251                 rte_flow_error_set(error,
1252                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1253                                    actions,
1254                                    "Invalid action number");
1255                 return -rte_errno;
1256         }
1257
1258         return 0;
1259 }
1260
1261 static bool
1262 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1263 {
1264         switch (tun_type) {
1265         case ICE_SW_TUN_PROFID_IPV6_ESP:
1266         case ICE_SW_TUN_PROFID_IPV6_AH:
1267         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1268         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1269         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1270         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1271         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1272         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1273                 return true;
1274         default:
1275                 break;
1276         }
1277
1278         return false;
1279 }
1280
1281 static int
1282 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1283                 struct ice_pattern_match_item *array,
1284                 uint32_t array_len,
1285                 const struct rte_flow_item pattern[],
1286                 const struct rte_flow_action actions[],
1287                 void **meta,
1288                 struct rte_flow_error *error)
1289 {
1290         struct ice_pf *pf = &ad->pf;
1291         uint64_t inputset = 0;
1292         int ret = 0;
1293         struct sw_meta *sw_meta_ptr = NULL;
1294         struct ice_adv_rule_info rule_info;
1295         struct ice_adv_lkup_elem *list = NULL;
1296         uint16_t lkups_num = 0;
1297         const struct rte_flow_item *item = pattern;
1298         uint16_t item_num = 0;
1299         enum ice_sw_tunnel_type tun_type =
1300                 ICE_SW_TUN_AND_NON_TUN;
1301         struct ice_pattern_match_item *pattern_match_item = NULL;
1302
1303         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1304                 item_num++;
1305                 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1306                         tun_type = ICE_SW_TUN_VXLAN;
1307                 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1308                         tun_type = ICE_SW_TUN_NVGRE;
1309                 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1310                                 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1311                         tun_type = ICE_SW_TUN_PPPOE;
1312                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1313                         const struct rte_flow_item_eth *eth_mask;
1314                         if (item->mask)
1315                                 eth_mask = item->mask;
1316                         else
1317                                 continue;
1318                         if (eth_mask->type == UINT16_MAX)
1319                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1320                 }
1321                 /* reserve one more memory slot for ETH which may
1322                  * consume 2 lookup items.
1323                  */
1324                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1325                         item_num++;
1326         }
1327
1328         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1329         if (!list) {
1330                 rte_flow_error_set(error, EINVAL,
1331                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1332                                    "No memory for PMD internal items");
1333                 return -rte_errno;
1334         }
1335
1336         sw_meta_ptr =
1337                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1338         if (!sw_meta_ptr) {
1339                 rte_flow_error_set(error, EINVAL,
1340                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1341                                    "No memory for sw_pattern_meta_ptr");
1342                 goto error;
1343         }
1344
1345         pattern_match_item =
1346                 ice_search_pattern_match_item(pattern, array, array_len, error);
1347         if (!pattern_match_item) {
1348                 rte_flow_error_set(error, EINVAL,
1349                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1350                                    "Invalid input pattern");
1351                 goto error;
1352         }
1353
1354         inputset = ice_switch_inset_get
1355                 (pattern, error, list, &lkups_num, &tun_type);
1356         if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1357                 (inputset & ~pattern_match_item->input_set_mask)) {
1358                 rte_flow_error_set(error, EINVAL,
1359                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1360                                    pattern,
1361                                    "Invalid input set");
1362                 goto error;
1363         }
1364
1365         rule_info.tun_type = tun_type;
1366
1367         ret = ice_switch_check_action(actions, error);
1368         if (ret) {
1369                 rte_flow_error_set(error, EINVAL,
1370                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1371                                    "Invalid input action number");
1372                 goto error;
1373         }
1374
1375         if (ad->hw.dcf_enabled)
1376                 ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
1377         else
1378                 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1379
1380         if (ret) {
1381                 rte_flow_error_set(error, EINVAL,
1382                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1383                                    "Invalid input action");
1384                 goto error;
1385         }
1386
1387         if (meta) {
1388                 *meta = sw_meta_ptr;
1389                 ((struct sw_meta *)*meta)->list = list;
1390                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1391                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1392         } else {
1393                 rte_free(list);
1394                 rte_free(sw_meta_ptr);
1395         }
1396
1397         rte_free(pattern_match_item);
1398
1399         return 0;
1400
1401 error:
1402         rte_free(list);
1403         rte_free(sw_meta_ptr);
1404         rte_free(pattern_match_item);
1405
1406         return -rte_errno;
1407 }
1408
1409 static int
1410 ice_switch_query(struct ice_adapter *ad __rte_unused,
1411                 struct rte_flow *flow __rte_unused,
1412                 struct rte_flow_query_count *count __rte_unused,
1413                 struct rte_flow_error *error)
1414 {
1415         rte_flow_error_set(error, EINVAL,
1416                 RTE_FLOW_ERROR_TYPE_HANDLE,
1417                 NULL,
1418                 "count action not supported by switch filter");
1419
1420         return -rte_errno;
1421 }
1422
1423 static int
1424 ice_switch_redirect(struct ice_adapter *ad,
1425                     struct rte_flow *flow,
1426                     struct ice_flow_redirect *rd)
1427 {
1428         struct ice_rule_query_data *rdata = flow->rule;
1429         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1430         struct ice_adv_lkup_elem *lkups_dp = NULL;
1431         struct LIST_HEAD_TYPE *list_head;
1432         struct ice_adv_rule_info rinfo;
1433         struct ice_hw *hw = &ad->hw;
1434         struct ice_switch_info *sw;
1435         uint16_t lkups_cnt;
1436         int ret;
1437
1438         sw = hw->switch_info;
1439         if (!sw->recp_list[rdata->rid].recp_created)
1440                 return -EINVAL;
1441
1442         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1443                 return -ENOTSUP;
1444
1445         list_head = &sw->recp_list[rdata->rid].filt_rules;
1446         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1447                             list_entry) {
1448                 rinfo = list_itr->rule_info;
1449                 if (rinfo.fltr_rule_id == rdata->rule_id &&
1450                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1451                     rinfo.sw_act.vsi_handle == rd->vsi_handle) {
1452                         lkups_cnt = list_itr->lkups_cnt;
1453                         lkups_dp = (struct ice_adv_lkup_elem *)
1454                                 ice_memdup(hw, list_itr->lkups,
1455                                            sizeof(*list_itr->lkups) *
1456                                            lkups_cnt, ICE_NONDMA_TO_NONDMA);
1457                         if (!lkups_dp) {
1458                                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1459                                 return -EINVAL;
1460                         }
1461
1462                         break;
1463                 }
1464         }
1465
1466         if (!lkups_dp)
1467                 return 0;
1468
1469         /* Remove the old rule */
1470         ret = ice_rem_adv_rule(hw, list_itr->lkups,
1471                                lkups_cnt, &rinfo);
1472         if (ret) {
1473                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1474                             rdata->rule_id);
1475                 ret = -EINVAL;
1476                 goto out;
1477         }
1478
1479         /* Update VSI context */
1480         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1481
1482         /* Replay the rule */
1483         ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1484                                &rinfo, rdata);
1485         if (ret) {
1486                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1487                 ret = -EINVAL;
1488         }
1489
1490 out:
1491         ice_free(hw, lkups_dp);
1492         return ret;
1493 }
1494
1495 static int
1496 ice_switch_init(struct ice_adapter *ad)
1497 {
1498         int ret = 0;
1499         struct ice_flow_parser *dist_parser;
1500         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1501
1502         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1503                 dist_parser = &ice_switch_dist_parser_comms;
1504         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1505                 dist_parser = &ice_switch_dist_parser_os;
1506         else
1507                 return -EINVAL;
1508
1509         if (ad->devargs.pipe_mode_support)
1510                 ret = ice_register_parser(perm_parser, ad);
1511         else
1512                 ret = ice_register_parser(dist_parser, ad);
1513         return ret;
1514 }
1515
1516 static void
1517 ice_switch_uninit(struct ice_adapter *ad)
1518 {
1519         struct ice_flow_parser *dist_parser;
1520         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1521
1522         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1523                 dist_parser = &ice_switch_dist_parser_comms;
1524         else
1525                 dist_parser = &ice_switch_dist_parser_os;
1526
1527         if (ad->devargs.pipe_mode_support)
1528                 ice_unregister_parser(perm_parser, ad);
1529         else
1530                 ice_unregister_parser(dist_parser, ad);
1531 }
1532
1533 static struct
1534 ice_flow_engine ice_switch_engine = {
1535         .init = ice_switch_init,
1536         .uninit = ice_switch_uninit,
1537         .create = ice_switch_create,
1538         .destroy = ice_switch_destroy,
1539         .query_count = ice_switch_query,
1540         .redirect = ice_switch_redirect,
1541         .free = ice_switch_filter_rule_free,
1542         .type = ICE_FLOW_ENGINE_SWITCH,
1543 };
1544
1545 static struct
1546 ice_flow_parser ice_switch_dist_parser_os = {
1547         .engine = &ice_switch_engine,
1548         .array = ice_switch_pattern_dist_os,
1549         .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1550         .parse_pattern_action = ice_switch_parse_pattern_action,
1551         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1552 };
1553
1554 static struct
1555 ice_flow_parser ice_switch_dist_parser_comms = {
1556         .engine = &ice_switch_engine,
1557         .array = ice_switch_pattern_dist_comms,
1558         .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1559         .parse_pattern_action = ice_switch_parse_pattern_action,
1560         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1561 };
1562
1563 static struct
1564 ice_flow_parser ice_switch_perm_parser = {
1565         .engine = &ice_switch_engine,
1566         .array = ice_switch_pattern_perm,
1567         .array_len = RTE_DIM(ice_switch_pattern_perm),
1568         .parse_pattern_action = ice_switch_parse_pattern_action,
1569         .stage = ICE_FLOW_STAGE_PERMISSION,
1570 };
1571
1572 RTE_INIT(ice_sw_engine_init)
1573 {
1574         struct ice_flow_engine *engine = &ice_switch_engine;
1575         ice_register_flow_engine(engine);
1576 }