net/ice: support IPv6 NAT-T
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26
27
28 #define MAX_QGRP_NUM_TYPE 7
29
30 #define ICE_SW_INSET_ETHER ( \
31         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_VLAN ( \
33                 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
34                 ICE_INSET_VLAN_OUTER)
35 #define ICE_SW_INSET_MAC_IPV4 ( \
36         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
38 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
39         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
40         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
41         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
42 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
43         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
45         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
46 #define ICE_SW_INSET_MAC_IPV6 ( \
47         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
48         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
49         ICE_INSET_IPV6_NEXT_HDR)
50 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
51         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
52         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
53         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
55         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
56         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
57         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
58 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
59         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
62         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
64 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
65         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
66         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
67         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
68 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
69         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
70         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
71         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
73         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
75         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
77         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
79         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
81         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
83 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
84         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
85         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
86         ICE_INSET_TUN_IPV4_TOS)
87 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
88         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
90         ICE_INSET_TUN_IPV4_TOS)
91 #define ICE_SW_INSET_MAC_PPPOE  ( \
92         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
93         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
94 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
95         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
96         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
97         ICE_INSET_PPPOE_PROTO)
98
99 struct sw_meta {
100         struct ice_adv_lkup_elem *list;
101         uint16_t lkups_num;
102         struct ice_adv_rule_info rule_info;
103 };
104
105 static struct ice_flow_parser ice_switch_dist_parser_os;
106 static struct ice_flow_parser ice_switch_dist_parser_comms;
107 static struct ice_flow_parser ice_switch_perm_parser;
108
109 static struct
110 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
111         {pattern_ethertype,
112                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
113         {pattern_ethertype_vlan,
114                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
115         {pattern_eth_ipv4,
116                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp,
118                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
119         {pattern_eth_ipv4_tcp,
120                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
121         {pattern_eth_ipv6,
122                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
123         {pattern_eth_ipv6_udp,
124                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
125         {pattern_eth_ipv6_tcp,
126                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
127         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
128                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
129         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
130                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
131         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
132                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
133         {pattern_eth_ipv4_nvgre_eth_ipv4,
134                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
135         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
136                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
137         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
138                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
139         {pattern_eth_pppoed,
140                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
141         {pattern_eth_vlan_pppoed,
142                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
143         {pattern_eth_pppoes,
144                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
145         {pattern_eth_vlan_pppoes,
146                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
147         {pattern_eth_pppoes_proto,
148                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
149         {pattern_eth_vlan_pppoes_proto,
150                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
151         {pattern_eth_ipv6_esp,
152                         ICE_INSET_NONE, ICE_INSET_NONE},
153         {pattern_eth_ipv6_udp_esp,
154                         ICE_INSET_NONE, ICE_INSET_NONE},
155         {pattern_eth_ipv6_ah,
156                         ICE_INSET_NONE, ICE_INSET_NONE},
157         {pattern_eth_ipv6_udp_ah,
158                         ICE_INSET_NONE, ICE_INSET_NONE},
159         {pattern_eth_ipv6_l2tp,
160                         ICE_INSET_NONE, ICE_INSET_NONE},
161         {pattern_eth_ipv4_pfcp,
162                         ICE_INSET_NONE, ICE_INSET_NONE},
163         {pattern_eth_ipv6_pfcp,
164                         ICE_INSET_NONE, ICE_INSET_NONE},
165 };
166
167 static struct
168 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
169         {pattern_ethertype,
170                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
171         {pattern_ethertype_vlan,
172                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
173         {pattern_eth_arp,
174                         ICE_INSET_NONE, ICE_INSET_NONE},
175         {pattern_eth_ipv4,
176                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
177         {pattern_eth_ipv4_udp,
178                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
179         {pattern_eth_ipv4_tcp,
180                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
181         {pattern_eth_ipv6,
182                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
183         {pattern_eth_ipv6_udp,
184                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
185         {pattern_eth_ipv6_tcp,
186                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
187         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
188                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
189         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
190                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
191         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
192                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
193         {pattern_eth_ipv4_nvgre_eth_ipv4,
194                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
195         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
196                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
197         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
198                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
199 };
200
201 static struct
202 ice_pattern_match_item ice_switch_pattern_perm[] = {
203         {pattern_ethertype_vlan,
204                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
205         {pattern_eth_ipv4,
206                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
207         {pattern_eth_ipv4_udp,
208                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
209         {pattern_eth_ipv4_tcp,
210                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
211         {pattern_eth_ipv6,
212                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
213         {pattern_eth_ipv6_udp,
214                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
215         {pattern_eth_ipv6_tcp,
216                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
217         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
218                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
219         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
220                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
221         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
222                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
223         {pattern_eth_ipv4_nvgre_eth_ipv4,
224                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
225         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
226                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
227         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
228                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
229         {pattern_eth_ipv6_esp,
230                         ICE_INSET_NONE, ICE_INSET_NONE},
231         {pattern_eth_ipv6_udp_esp,
232                         ICE_INSET_NONE, ICE_INSET_NONE},
233         {pattern_eth_ipv6_ah,
234                         ICE_INSET_NONE, ICE_INSET_NONE},
235         {pattern_eth_ipv6_udp_ah,
236                         ICE_INSET_NONE, ICE_INSET_NONE},
237         {pattern_eth_ipv6_l2tp,
238                         ICE_INSET_NONE, ICE_INSET_NONE},
239         {pattern_eth_ipv4_pfcp,
240                         ICE_INSET_NONE, ICE_INSET_NONE},
241         {pattern_eth_ipv6_pfcp,
242                         ICE_INSET_NONE, ICE_INSET_NONE},
243 };
244
245 static int
246 ice_switch_create(struct ice_adapter *ad,
247                 struct rte_flow *flow,
248                 void *meta,
249                 struct rte_flow_error *error)
250 {
251         int ret = 0;
252         struct ice_pf *pf = &ad->pf;
253         struct ice_hw *hw = ICE_PF_TO_HW(pf);
254         struct ice_rule_query_data rule_added = {0};
255         struct ice_rule_query_data *filter_ptr;
256         struct ice_adv_lkup_elem *list =
257                 ((struct sw_meta *)meta)->list;
258         uint16_t lkups_cnt =
259                 ((struct sw_meta *)meta)->lkups_num;
260         struct ice_adv_rule_info *rule_info =
261                 &((struct sw_meta *)meta)->rule_info;
262
263         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
264                 rte_flow_error_set(error, EINVAL,
265                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
266                         "item number too large for rule");
267                 goto error;
268         }
269         if (!list) {
270                 rte_flow_error_set(error, EINVAL,
271                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
272                         "lookup list should not be NULL");
273                 goto error;
274         }
275         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
276         if (!ret) {
277                 filter_ptr = rte_zmalloc("ice_switch_filter",
278                         sizeof(struct ice_rule_query_data), 0);
279                 if (!filter_ptr) {
280                         rte_flow_error_set(error, EINVAL,
281                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
282                                    "No memory for ice_switch_filter");
283                         goto error;
284                 }
285                 flow->rule = filter_ptr;
286                 rte_memcpy(filter_ptr,
287                         &rule_added,
288                         sizeof(struct ice_rule_query_data));
289         } else {
290                 rte_flow_error_set(error, EINVAL,
291                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
292                         "switch filter create flow fail");
293                 goto error;
294         }
295
296         rte_free(list);
297         rte_free(meta);
298         return 0;
299
300 error:
301         rte_free(list);
302         rte_free(meta);
303
304         return -rte_errno;
305 }
306
307 static int
308 ice_switch_destroy(struct ice_adapter *ad,
309                 struct rte_flow *flow,
310                 struct rte_flow_error *error)
311 {
312         struct ice_hw *hw = &ad->hw;
313         int ret;
314         struct ice_rule_query_data *filter_ptr;
315
316         filter_ptr = (struct ice_rule_query_data *)
317                 flow->rule;
318
319         if (!filter_ptr) {
320                 rte_flow_error_set(error, EINVAL,
321                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
322                         "no such flow"
323                         " create by switch filter");
324                 return -rte_errno;
325         }
326
327         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
328         if (ret) {
329                 rte_flow_error_set(error, EINVAL,
330                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
331                         "fail to destroy switch filter rule");
332                 return -rte_errno;
333         }
334
335         rte_free(filter_ptr);
336         return ret;
337 }
338
339 static void
340 ice_switch_filter_rule_free(struct rte_flow *flow)
341 {
342         rte_free(flow->rule);
343 }
344
345 static uint64_t
346 ice_switch_inset_get(const struct rte_flow_item pattern[],
347                 struct rte_flow_error *error,
348                 struct ice_adv_lkup_elem *list,
349                 uint16_t *lkups_num,
350                 enum ice_sw_tunnel_type *tun_type)
351 {
352         const struct rte_flow_item *item = pattern;
353         enum rte_flow_item_type item_type;
354         const struct rte_flow_item_eth *eth_spec, *eth_mask;
355         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
356         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
357         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
358         const struct rte_flow_item_udp *udp_spec, *udp_mask;
359         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
360         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
361         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
362         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
363         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
364         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
365                                 *pppoe_proto_mask;
366         const struct rte_flow_item_esp *esp_spec, *esp_mask;
367         const struct rte_flow_item_ah *ah_spec, *ah_mask;
368         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
369         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
370         uint64_t input_set = ICE_INSET_NONE;
371         uint16_t j, t = 0;
372         uint16_t tunnel_valid = 0;
373         uint16_t pppoe_valid = 0;
374         uint16_t ipv6_valiad = 0;
375         uint16_t udp_valiad = 0;
376
377
378         for (item = pattern; item->type !=
379                         RTE_FLOW_ITEM_TYPE_END; item++) {
380                 if (item->last) {
381                         rte_flow_error_set(error, EINVAL,
382                                         RTE_FLOW_ERROR_TYPE_ITEM,
383                                         item,
384                                         "Not support range");
385                         return 0;
386                 }
387                 item_type = item->type;
388
389                 switch (item_type) {
390                 case RTE_FLOW_ITEM_TYPE_ETH:
391                         eth_spec = item->spec;
392                         eth_mask = item->mask;
393                         if (eth_spec && eth_mask) {
394                                 const uint8_t *a = eth_mask->src.addr_bytes;
395                                 const uint8_t *b = eth_mask->dst.addr_bytes;
396                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
397                                         if (a[j] && tunnel_valid) {
398                                                 input_set |=
399                                                         ICE_INSET_TUN_SMAC;
400                                                 break;
401                                         } else if (a[j]) {
402                                                 input_set |=
403                                                         ICE_INSET_SMAC;
404                                                 break;
405                                         }
406                                 }
407                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
408                                         if (b[j] && tunnel_valid) {
409                                                 input_set |=
410                                                         ICE_INSET_TUN_DMAC;
411                                                 break;
412                                         } else if (b[j]) {
413                                                 input_set |=
414                                                         ICE_INSET_DMAC;
415                                                 break;
416                                         }
417                                 }
418                                 if (eth_mask->type)
419                                         input_set |= ICE_INSET_ETHERTYPE;
420                                 list[t].type = (tunnel_valid  == 0) ?
421                                         ICE_MAC_OFOS : ICE_MAC_IL;
422                                 struct ice_ether_hdr *h;
423                                 struct ice_ether_hdr *m;
424                                 uint16_t i = 0;
425                                 h = &list[t].h_u.eth_hdr;
426                                 m = &list[t].m_u.eth_hdr;
427                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
428                                         if (eth_mask->src.addr_bytes[j]) {
429                                                 h->src_addr[j] =
430                                                 eth_spec->src.addr_bytes[j];
431                                                 m->src_addr[j] =
432                                                 eth_mask->src.addr_bytes[j];
433                                                 i = 1;
434                                         }
435                                         if (eth_mask->dst.addr_bytes[j]) {
436                                                 h->dst_addr[j] =
437                                                 eth_spec->dst.addr_bytes[j];
438                                                 m->dst_addr[j] =
439                                                 eth_mask->dst.addr_bytes[j];
440                                                 i = 1;
441                                         }
442                                 }
443                                 if (i)
444                                         t++;
445                                 if (eth_mask->type) {
446                                         list[t].type = ICE_ETYPE_OL;
447                                         list[t].h_u.ethertype.ethtype_id =
448                                                 eth_spec->type;
449                                         list[t].m_u.ethertype.ethtype_id =
450                                                 eth_mask->type;
451                                         t++;
452                                 }
453                         }
454                         break;
455
456                 case RTE_FLOW_ITEM_TYPE_IPV4:
457                         ipv4_spec = item->spec;
458                         ipv4_mask = item->mask;
459                         if (ipv4_spec && ipv4_mask) {
460                                 /* Check IPv4 mask and update input set */
461                                 if (ipv4_mask->hdr.version_ihl ||
462                                         ipv4_mask->hdr.total_length ||
463                                         ipv4_mask->hdr.packet_id ||
464                                         ipv4_mask->hdr.hdr_checksum) {
465                                         rte_flow_error_set(error, EINVAL,
466                                                    RTE_FLOW_ERROR_TYPE_ITEM,
467                                                    item,
468                                                    "Invalid IPv4 mask.");
469                                         return 0;
470                                 }
471
472                                 if (tunnel_valid) {
473                                         if (ipv4_mask->hdr.type_of_service)
474                                                 input_set |=
475                                                         ICE_INSET_TUN_IPV4_TOS;
476                                         if (ipv4_mask->hdr.src_addr)
477                                                 input_set |=
478                                                         ICE_INSET_TUN_IPV4_SRC;
479                                         if (ipv4_mask->hdr.dst_addr)
480                                                 input_set |=
481                                                         ICE_INSET_TUN_IPV4_DST;
482                                         if (ipv4_mask->hdr.time_to_live)
483                                                 input_set |=
484                                                         ICE_INSET_TUN_IPV4_TTL;
485                                         if (ipv4_mask->hdr.next_proto_id)
486                                                 input_set |=
487                                                 ICE_INSET_TUN_IPV4_PROTO;
488                                 } else {
489                                         if (ipv4_mask->hdr.src_addr)
490                                                 input_set |= ICE_INSET_IPV4_SRC;
491                                         if (ipv4_mask->hdr.dst_addr)
492                                                 input_set |= ICE_INSET_IPV4_DST;
493                                         if (ipv4_mask->hdr.time_to_live)
494                                                 input_set |= ICE_INSET_IPV4_TTL;
495                                         if (ipv4_mask->hdr.next_proto_id)
496                                                 input_set |=
497                                                 ICE_INSET_IPV4_PROTO;
498                                         if (ipv4_mask->hdr.type_of_service)
499                                                 input_set |=
500                                                         ICE_INSET_IPV4_TOS;
501                                 }
502                                 list[t].type = (tunnel_valid  == 0) ?
503                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
504                                 if (ipv4_mask->hdr.src_addr) {
505                                         list[t].h_u.ipv4_hdr.src_addr =
506                                                 ipv4_spec->hdr.src_addr;
507                                         list[t].m_u.ipv4_hdr.src_addr =
508                                                 ipv4_mask->hdr.src_addr;
509                                 }
510                                 if (ipv4_mask->hdr.dst_addr) {
511                                         list[t].h_u.ipv4_hdr.dst_addr =
512                                                 ipv4_spec->hdr.dst_addr;
513                                         list[t].m_u.ipv4_hdr.dst_addr =
514                                                 ipv4_mask->hdr.dst_addr;
515                                 }
516                                 if (ipv4_mask->hdr.time_to_live) {
517                                         list[t].h_u.ipv4_hdr.time_to_live =
518                                                 ipv4_spec->hdr.time_to_live;
519                                         list[t].m_u.ipv4_hdr.time_to_live =
520                                                 ipv4_mask->hdr.time_to_live;
521                                 }
522                                 if (ipv4_mask->hdr.next_proto_id) {
523                                         list[t].h_u.ipv4_hdr.protocol =
524                                                 ipv4_spec->hdr.next_proto_id;
525                                         list[t].m_u.ipv4_hdr.protocol =
526                                                 ipv4_mask->hdr.next_proto_id;
527                                 }
528                                 if (ipv4_mask->hdr.type_of_service) {
529                                         list[t].h_u.ipv4_hdr.tos =
530                                                 ipv4_spec->hdr.type_of_service;
531                                         list[t].m_u.ipv4_hdr.tos =
532                                                 ipv4_mask->hdr.type_of_service;
533                                 }
534                                 t++;
535                         }
536                         break;
537
538                 case RTE_FLOW_ITEM_TYPE_IPV6:
539                         ipv6_spec = item->spec;
540                         ipv6_mask = item->mask;
541                         ipv6_valiad = 1;
542                         if (ipv6_spec && ipv6_mask) {
543                                 if (ipv6_mask->hdr.payload_len) {
544                                         rte_flow_error_set(error, EINVAL,
545                                            RTE_FLOW_ERROR_TYPE_ITEM,
546                                            item,
547                                            "Invalid IPv6 mask");
548                                         return 0;
549                                 }
550
551                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
552                                         if (ipv6_mask->hdr.src_addr[j] &&
553                                                 tunnel_valid) {
554                                                 input_set |=
555                                                 ICE_INSET_TUN_IPV6_SRC;
556                                                 break;
557                                         } else if (ipv6_mask->hdr.src_addr[j]) {
558                                                 input_set |= ICE_INSET_IPV6_SRC;
559                                                 break;
560                                         }
561                                 }
562                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
563                                         if (ipv6_mask->hdr.dst_addr[j] &&
564                                                 tunnel_valid) {
565                                                 input_set |=
566                                                 ICE_INSET_TUN_IPV6_DST;
567                                                 break;
568                                         } else if (ipv6_mask->hdr.dst_addr[j]) {
569                                                 input_set |= ICE_INSET_IPV6_DST;
570                                                 break;
571                                         }
572                                 }
573                                 if (ipv6_mask->hdr.proto &&
574                                         tunnel_valid)
575                                         input_set |=
576                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
577                                 else if (ipv6_mask->hdr.proto)
578                                         input_set |=
579                                                 ICE_INSET_IPV6_NEXT_HDR;
580                                 if (ipv6_mask->hdr.hop_limits &&
581                                         tunnel_valid)
582                                         input_set |=
583                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
584                                 else if (ipv6_mask->hdr.hop_limits)
585                                         input_set |=
586                                                 ICE_INSET_IPV6_HOP_LIMIT;
587                                 if ((ipv6_mask->hdr.vtc_flow &
588                                                 rte_cpu_to_be_32
589                                                 (RTE_IPV6_HDR_TC_MASK)) &&
590                                         tunnel_valid)
591                                         input_set |=
592                                                         ICE_INSET_TUN_IPV6_TC;
593                                 else if (ipv6_mask->hdr.vtc_flow &
594                                                 rte_cpu_to_be_32
595                                                 (RTE_IPV6_HDR_TC_MASK))
596                                         input_set |= ICE_INSET_IPV6_TC;
597
598                                 list[t].type = (tunnel_valid  == 0) ?
599                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
600                                 struct ice_ipv6_hdr *f;
601                                 struct ice_ipv6_hdr *s;
602                                 f = &list[t].h_u.ipv6_hdr;
603                                 s = &list[t].m_u.ipv6_hdr;
604                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
605                                         if (ipv6_mask->hdr.src_addr[j]) {
606                                                 f->src_addr[j] =
607                                                 ipv6_spec->hdr.src_addr[j];
608                                                 s->src_addr[j] =
609                                                 ipv6_mask->hdr.src_addr[j];
610                                         }
611                                         if (ipv6_mask->hdr.dst_addr[j]) {
612                                                 f->dst_addr[j] =
613                                                 ipv6_spec->hdr.dst_addr[j];
614                                                 s->dst_addr[j] =
615                                                 ipv6_mask->hdr.dst_addr[j];
616                                         }
617                                 }
618                                 if (ipv6_mask->hdr.proto) {
619                                         f->next_hdr =
620                                                 ipv6_spec->hdr.proto;
621                                         s->next_hdr =
622                                                 ipv6_mask->hdr.proto;
623                                 }
624                                 if (ipv6_mask->hdr.hop_limits) {
625                                         f->hop_limit =
626                                                 ipv6_spec->hdr.hop_limits;
627                                         s->hop_limit =
628                                                 ipv6_mask->hdr.hop_limits;
629                                 }
630                                 if (ipv6_mask->hdr.vtc_flow &
631                                                 rte_cpu_to_be_32
632                                                 (RTE_IPV6_HDR_TC_MASK)) {
633                                         struct ice_le_ver_tc_flow vtf;
634                                         vtf.u.fld.version = 0;
635                                         vtf.u.fld.flow_label = 0;
636                                         vtf.u.fld.tc = (rte_be_to_cpu_32
637                                                 (ipv6_spec->hdr.vtc_flow) &
638                                                         RTE_IPV6_HDR_TC_MASK) >>
639                                                         RTE_IPV6_HDR_TC_SHIFT;
640                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
641                                         vtf.u.fld.tc = (rte_be_to_cpu_32
642                                                 (ipv6_mask->hdr.vtc_flow) &
643                                                         RTE_IPV6_HDR_TC_MASK) >>
644                                                         RTE_IPV6_HDR_TC_SHIFT;
645                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
646                                 }
647                                 t++;
648                         }
649                         break;
650
651                 case RTE_FLOW_ITEM_TYPE_UDP:
652                         udp_spec = item->spec;
653                         udp_mask = item->mask;
654                         udp_valiad = 1;
655                         if (udp_spec && udp_mask) {
656                                 /* Check UDP mask and update input set*/
657                                 if (udp_mask->hdr.dgram_len ||
658                                     udp_mask->hdr.dgram_cksum) {
659                                         rte_flow_error_set(error, EINVAL,
660                                                    RTE_FLOW_ERROR_TYPE_ITEM,
661                                                    item,
662                                                    "Invalid UDP mask");
663                                         return 0;
664                                 }
665
666                                 if (tunnel_valid) {
667                                         if (udp_mask->hdr.src_port)
668                                                 input_set |=
669                                                 ICE_INSET_TUN_UDP_SRC_PORT;
670                                         if (udp_mask->hdr.dst_port)
671                                                 input_set |=
672                                                 ICE_INSET_TUN_UDP_DST_PORT;
673                                 } else {
674                                         if (udp_mask->hdr.src_port)
675                                                 input_set |=
676                                                 ICE_INSET_UDP_SRC_PORT;
677                                         if (udp_mask->hdr.dst_port)
678                                                 input_set |=
679                                                 ICE_INSET_UDP_DST_PORT;
680                                 }
681                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
682                                                 tunnel_valid == 0)
683                                         list[t].type = ICE_UDP_OF;
684                                 else
685                                         list[t].type = ICE_UDP_ILOS;
686                                 if (udp_mask->hdr.src_port) {
687                                         list[t].h_u.l4_hdr.src_port =
688                                                 udp_spec->hdr.src_port;
689                                         list[t].m_u.l4_hdr.src_port =
690                                                 udp_mask->hdr.src_port;
691                                 }
692                                 if (udp_mask->hdr.dst_port) {
693                                         list[t].h_u.l4_hdr.dst_port =
694                                                 udp_spec->hdr.dst_port;
695                                         list[t].m_u.l4_hdr.dst_port =
696                                                 udp_mask->hdr.dst_port;
697                                 }
698                                                 t++;
699                         }
700                         break;
701
702                 case RTE_FLOW_ITEM_TYPE_TCP:
703                         tcp_spec = item->spec;
704                         tcp_mask = item->mask;
705                         if (tcp_spec && tcp_mask) {
706                                 /* Check TCP mask and update input set */
707                                 if (tcp_mask->hdr.sent_seq ||
708                                         tcp_mask->hdr.recv_ack ||
709                                         tcp_mask->hdr.data_off ||
710                                         tcp_mask->hdr.tcp_flags ||
711                                         tcp_mask->hdr.rx_win ||
712                                         tcp_mask->hdr.cksum ||
713                                         tcp_mask->hdr.tcp_urp) {
714                                         rte_flow_error_set(error, EINVAL,
715                                            RTE_FLOW_ERROR_TYPE_ITEM,
716                                            item,
717                                            "Invalid TCP mask");
718                                         return 0;
719                                 }
720
721                                 if (tunnel_valid) {
722                                         if (tcp_mask->hdr.src_port)
723                                                 input_set |=
724                                                 ICE_INSET_TUN_TCP_SRC_PORT;
725                                         if (tcp_mask->hdr.dst_port)
726                                                 input_set |=
727                                                 ICE_INSET_TUN_TCP_DST_PORT;
728                                 } else {
729                                         if (tcp_mask->hdr.src_port)
730                                                 input_set |=
731                                                 ICE_INSET_TCP_SRC_PORT;
732                                         if (tcp_mask->hdr.dst_port)
733                                                 input_set |=
734                                                 ICE_INSET_TCP_DST_PORT;
735                                 }
736                                 list[t].type = ICE_TCP_IL;
737                                 if (tcp_mask->hdr.src_port) {
738                                         list[t].h_u.l4_hdr.src_port =
739                                                 tcp_spec->hdr.src_port;
740                                         list[t].m_u.l4_hdr.src_port =
741                                                 tcp_mask->hdr.src_port;
742                                 }
743                                 if (tcp_mask->hdr.dst_port) {
744                                         list[t].h_u.l4_hdr.dst_port =
745                                                 tcp_spec->hdr.dst_port;
746                                         list[t].m_u.l4_hdr.dst_port =
747                                                 tcp_mask->hdr.dst_port;
748                                 }
749                                 t++;
750                         }
751                         break;
752
753                 case RTE_FLOW_ITEM_TYPE_SCTP:
754                         sctp_spec = item->spec;
755                         sctp_mask = item->mask;
756                         if (sctp_spec && sctp_mask) {
757                                 /* Check SCTP mask and update input set */
758                                 if (sctp_mask->hdr.cksum) {
759                                         rte_flow_error_set(error, EINVAL,
760                                            RTE_FLOW_ERROR_TYPE_ITEM,
761                                            item,
762                                            "Invalid SCTP mask");
763                                         return 0;
764                                 }
765
766                                 if (tunnel_valid) {
767                                         if (sctp_mask->hdr.src_port)
768                                                 input_set |=
769                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
770                                         if (sctp_mask->hdr.dst_port)
771                                                 input_set |=
772                                                 ICE_INSET_TUN_SCTP_DST_PORT;
773                                 } else {
774                                         if (sctp_mask->hdr.src_port)
775                                                 input_set |=
776                                                 ICE_INSET_SCTP_SRC_PORT;
777                                         if (sctp_mask->hdr.dst_port)
778                                                 input_set |=
779                                                 ICE_INSET_SCTP_DST_PORT;
780                                 }
781                                 list[t].type = ICE_SCTP_IL;
782                                 if (sctp_mask->hdr.src_port) {
783                                         list[t].h_u.sctp_hdr.src_port =
784                                                 sctp_spec->hdr.src_port;
785                                         list[t].m_u.sctp_hdr.src_port =
786                                                 sctp_mask->hdr.src_port;
787                                 }
788                                 if (sctp_mask->hdr.dst_port) {
789                                         list[t].h_u.sctp_hdr.dst_port =
790                                                 sctp_spec->hdr.dst_port;
791                                         list[t].m_u.sctp_hdr.dst_port =
792                                                 sctp_mask->hdr.dst_port;
793                                 }
794                                 t++;
795                         }
796                         break;
797
798                 case RTE_FLOW_ITEM_TYPE_VXLAN:
799                         vxlan_spec = item->spec;
800                         vxlan_mask = item->mask;
801                         /* Check if VXLAN item is used to describe protocol.
802                          * If yes, both spec and mask should be NULL.
803                          * If no, both spec and mask shouldn't be NULL.
804                          */
805                         if ((!vxlan_spec && vxlan_mask) ||
806                             (vxlan_spec && !vxlan_mask)) {
807                                 rte_flow_error_set(error, EINVAL,
808                                            RTE_FLOW_ERROR_TYPE_ITEM,
809                                            item,
810                                            "Invalid VXLAN item");
811                                 return 0;
812                         }
813
814                         tunnel_valid = 1;
815                         if (vxlan_spec && vxlan_mask) {
816                                 list[t].type = ICE_VXLAN;
817                                 if (vxlan_mask->vni[0] ||
818                                         vxlan_mask->vni[1] ||
819                                         vxlan_mask->vni[2]) {
820                                         list[t].h_u.tnl_hdr.vni =
821                                                 (vxlan_spec->vni[2] << 16) |
822                                                 (vxlan_spec->vni[1] << 8) |
823                                                 vxlan_spec->vni[0];
824                                         list[t].m_u.tnl_hdr.vni =
825                                                 (vxlan_mask->vni[2] << 16) |
826                                                 (vxlan_mask->vni[1] << 8) |
827                                                 vxlan_mask->vni[0];
828                                         input_set |=
829                                                 ICE_INSET_TUN_VXLAN_VNI;
830                                 }
831                                 t++;
832                         }
833                         break;
834
835                 case RTE_FLOW_ITEM_TYPE_NVGRE:
836                         nvgre_spec = item->spec;
837                         nvgre_mask = item->mask;
838                         /* Check if NVGRE item is used to describe protocol.
839                          * If yes, both spec and mask should be NULL.
840                          * If no, both spec and mask shouldn't be NULL.
841                          */
842                         if ((!nvgre_spec && nvgre_mask) ||
843                             (nvgre_spec && !nvgre_mask)) {
844                                 rte_flow_error_set(error, EINVAL,
845                                            RTE_FLOW_ERROR_TYPE_ITEM,
846                                            item,
847                                            "Invalid NVGRE item");
848                                 return 0;
849                         }
850                         tunnel_valid = 1;
851                         if (nvgre_spec && nvgre_mask) {
852                                 list[t].type = ICE_NVGRE;
853                                 if (nvgre_mask->tni[0] ||
854                                         nvgre_mask->tni[1] ||
855                                         nvgre_mask->tni[2]) {
856                                         list[t].h_u.nvgre_hdr.tni_flow =
857                                                 (nvgre_spec->tni[2] << 16) |
858                                                 (nvgre_spec->tni[1] << 8) |
859                                                 nvgre_spec->tni[0];
860                                         list[t].m_u.nvgre_hdr.tni_flow =
861                                                 (nvgre_mask->tni[2] << 16) |
862                                                 (nvgre_mask->tni[1] << 8) |
863                                                 nvgre_mask->tni[0];
864                                         input_set |=
865                                                 ICE_INSET_TUN_NVGRE_TNI;
866                                 }
867                                 t++;
868                         }
869                         break;
870
871                 case RTE_FLOW_ITEM_TYPE_VLAN:
872                         vlan_spec = item->spec;
873                         vlan_mask = item->mask;
874                         /* Check if VLAN item is used to describe protocol.
875                          * If yes, both spec and mask should be NULL.
876                          * If no, both spec and mask shouldn't be NULL.
877                          */
878                         if ((!vlan_spec && vlan_mask) ||
879                             (vlan_spec && !vlan_mask)) {
880                                 rte_flow_error_set(error, EINVAL,
881                                            RTE_FLOW_ERROR_TYPE_ITEM,
882                                            item,
883                                            "Invalid VLAN item");
884                                 return 0;
885                         }
886                         if (vlan_spec && vlan_mask) {
887                                 list[t].type = ICE_VLAN_OFOS;
888                                 if (vlan_mask->tci) {
889                                         list[t].h_u.vlan_hdr.vlan =
890                                                 vlan_spec->tci;
891                                         list[t].m_u.vlan_hdr.vlan =
892                                                 vlan_mask->tci;
893                                         input_set |= ICE_INSET_VLAN_OUTER;
894                                 }
895                                 if (vlan_mask->inner_type) {
896                                         list[t].h_u.vlan_hdr.type =
897                                                 vlan_spec->inner_type;
898                                         list[t].m_u.vlan_hdr.type =
899                                                 vlan_mask->inner_type;
900                                         input_set |= ICE_INSET_VLAN_OUTER;
901                                 }
902                                 t++;
903                         }
904                         break;
905
906                 case RTE_FLOW_ITEM_TYPE_PPPOED:
907                 case RTE_FLOW_ITEM_TYPE_PPPOES:
908                         pppoe_spec = item->spec;
909                         pppoe_mask = item->mask;
910                         /* Check if PPPoE item is used to describe protocol.
911                          * If yes, both spec and mask should be NULL.
912                          * If no, both spec and mask shouldn't be NULL.
913                          */
914                         if ((!pppoe_spec && pppoe_mask) ||
915                                 (pppoe_spec && !pppoe_mask)) {
916                                 rte_flow_error_set(error, EINVAL,
917                                         RTE_FLOW_ERROR_TYPE_ITEM,
918                                         item,
919                                         "Invalid pppoe item");
920                                 return 0;
921                         }
922                         if (pppoe_spec && pppoe_mask) {
923                                 /* Check pppoe mask and update input set */
924                                 if (pppoe_mask->length ||
925                                         pppoe_mask->code ||
926                                         pppoe_mask->version_type) {
927                                         rte_flow_error_set(error, EINVAL,
928                                                 RTE_FLOW_ERROR_TYPE_ITEM,
929                                                 item,
930                                                 "Invalid pppoe mask");
931                                         return 0;
932                                 }
933                                 list[t].type = ICE_PPPOE;
934                                 if (pppoe_mask->session_id) {
935                                         list[t].h_u.pppoe_hdr.session_id =
936                                                 pppoe_spec->session_id;
937                                         list[t].m_u.pppoe_hdr.session_id =
938                                                 pppoe_mask->session_id;
939                                         input_set |= ICE_INSET_PPPOE_SESSION;
940                                 }
941                                 t++;
942                                 pppoe_valid = 1;
943                         }
944                         break;
945
946                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
947                         pppoe_proto_spec = item->spec;
948                         pppoe_proto_mask = item->mask;
949                         /* Check if PPPoE optional proto_id item
950                          * is used to describe protocol.
951                          * If yes, both spec and mask should be NULL.
952                          * If no, both spec and mask shouldn't be NULL.
953                          */
954                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
955                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
956                                 rte_flow_error_set(error, EINVAL,
957                                         RTE_FLOW_ERROR_TYPE_ITEM,
958                                         item,
959                                         "Invalid pppoe proto item");
960                                 return 0;
961                         }
962                         if (pppoe_proto_spec && pppoe_proto_mask) {
963                                 if (pppoe_valid)
964                                         t--;
965                                 list[t].type = ICE_PPPOE;
966                                 if (pppoe_proto_mask->proto_id) {
967                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
968                                                 pppoe_proto_spec->proto_id;
969                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
970                                                 pppoe_proto_mask->proto_id;
971                                         input_set |= ICE_INSET_PPPOE_PROTO;
972                                 }
973                                 t++;
974                         }
975                         break;
976
977                 case RTE_FLOW_ITEM_TYPE_ESP:
978                         esp_spec = item->spec;
979                         esp_mask = item->mask;
980                         if (esp_spec || esp_mask) {
981                                 rte_flow_error_set(error, EINVAL,
982                                            RTE_FLOW_ERROR_TYPE_ITEM,
983                                            item,
984                                            "Invalid esp item");
985                                 return -ENOTSUP;
986                         }
987                         if (ipv6_valiad && udp_valiad)
988                                 *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
989                         else if (ipv6_valiad)
990                                 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
991                         break;
992
993                 case RTE_FLOW_ITEM_TYPE_AH:
994                         ah_spec = item->spec;
995                         ah_mask = item->mask;
996                         if (ah_spec || ah_mask) {
997                                 rte_flow_error_set(error, EINVAL,
998                                            RTE_FLOW_ERROR_TYPE_ITEM,
999                                            item,
1000                                            "Invalid ah item");
1001                                 return -ENOTSUP;
1002                         }
1003                         if (ipv6_valiad && udp_valiad)
1004                                 *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
1005                         else if (ipv6_valiad)
1006                                 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1007                         break;
1008
1009                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1010                         l2tp_spec = item->spec;
1011                         l2tp_mask = item->mask;
1012                         if (l2tp_spec || l2tp_mask) {
1013                                 rte_flow_error_set(error, EINVAL,
1014                                            RTE_FLOW_ERROR_TYPE_ITEM,
1015                                            item,
1016                                            "Invalid l2tp item");
1017                                 return -ENOTSUP;
1018                         }
1019                         if (ipv6_valiad)
1020                                 *tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1021                         break;
1022                 case RTE_FLOW_ITEM_TYPE_PFCP:
1023                         pfcp_spec = item->spec;
1024                         pfcp_mask = item->mask;
1025                         /* Check if PFCP item is used to describe protocol.
1026                          * If yes, both spec and mask should be NULL.
1027                          * If no, both spec and mask shouldn't be NULL.
1028                          */
1029                         if ((!pfcp_spec && pfcp_mask) ||
1030                             (pfcp_spec && !pfcp_mask)) {
1031                                 rte_flow_error_set(error, EINVAL,
1032                                            RTE_FLOW_ERROR_TYPE_ITEM,
1033                                            item,
1034                                            "Invalid PFCP item");
1035                                 return -ENOTSUP;
1036                         }
1037                         if (pfcp_spec && pfcp_mask) {
1038                                 /* Check pfcp mask and update input set */
1039                                 if (pfcp_mask->msg_type ||
1040                                         pfcp_mask->msg_len ||
1041                                         pfcp_mask->seid) {
1042                                         rte_flow_error_set(error, EINVAL,
1043                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1044                                                 item,
1045                                                 "Invalid pfcp mask");
1046                                         return -ENOTSUP;
1047                                 }
1048                                 if (pfcp_mask->s_field &&
1049                                         pfcp_spec->s_field == 0x01 &&
1050                                         ipv6_valiad)
1051                                         *tun_type =
1052                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1053                                 else if (pfcp_mask->s_field &&
1054                                         pfcp_spec->s_field == 0x01)
1055                                         *tun_type =
1056                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1057                                 else if (pfcp_mask->s_field &&
1058                                         !pfcp_spec->s_field &&
1059                                         ipv6_valiad)
1060                                         *tun_type =
1061                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1062                                 else if (pfcp_mask->s_field &&
1063                                         !pfcp_spec->s_field)
1064                                         *tun_type =
1065                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1066                                 else
1067                                         return -ENOTSUP;
1068                         }
1069                         break;
1070
1071
1072                 case RTE_FLOW_ITEM_TYPE_VOID:
1073                         break;
1074
1075                 default:
1076                         rte_flow_error_set(error, EINVAL,
1077                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1078                                    "Invalid pattern item.");
1079                         goto out;
1080                 }
1081         }
1082
1083         *lkups_num = t;
1084
1085         return input_set;
1086 out:
1087         return 0;
1088 }
1089
1090 static int
1091 ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
1092                             struct rte_flow_error *error,
1093                             struct ice_adv_rule_info *rule_info)
1094 {
1095         const struct rte_flow_action_vf *act_vf;
1096         const struct rte_flow_action *action;
1097         enum rte_flow_action_type action_type;
1098
1099         for (action = actions; action->type !=
1100                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1101                 action_type = action->type;
1102                 switch (action_type) {
1103                 case RTE_FLOW_ACTION_TYPE_VF:
1104                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1105                         act_vf = action->conf;
1106                         rule_info->sw_act.vsi_handle = act_vf->id;
1107                         break;
1108                 default:
1109                         rte_flow_error_set(error,
1110                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1111                                            actions,
1112                                            "Invalid action type or queue number");
1113                         return -rte_errno;
1114                 }
1115         }
1116
1117         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1118         rule_info->rx = 1;
1119         rule_info->priority = 5;
1120
1121         return 0;
1122 }
1123
1124 static int
1125 ice_switch_parse_action(struct ice_pf *pf,
1126                 const struct rte_flow_action *actions,
1127                 struct rte_flow_error *error,
1128                 struct ice_adv_rule_info *rule_info)
1129 {
1130         struct ice_vsi *vsi = pf->main_vsi;
1131         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1132         const struct rte_flow_action_queue *act_q;
1133         const struct rte_flow_action_rss *act_qgrop;
1134         uint16_t base_queue, i;
1135         const struct rte_flow_action *action;
1136         enum rte_flow_action_type action_type;
1137         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1138                  2, 4, 8, 16, 32, 64, 128};
1139
1140         base_queue = pf->base_queue + vsi->base_queue;
1141         for (action = actions; action->type !=
1142                         RTE_FLOW_ACTION_TYPE_END; action++) {
1143                 action_type = action->type;
1144                 switch (action_type) {
1145                 case RTE_FLOW_ACTION_TYPE_RSS:
1146                         act_qgrop = action->conf;
1147                         rule_info->sw_act.fltr_act =
1148                                 ICE_FWD_TO_QGRP;
1149                         rule_info->sw_act.fwd_id.q_id =
1150                                 base_queue + act_qgrop->queue[0];
1151                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1152                                 if (act_qgrop->queue_num ==
1153                                         valid_qgrop_number[i])
1154                                         break;
1155                         }
1156                         if (i == MAX_QGRP_NUM_TYPE)
1157                                 goto error;
1158                         if ((act_qgrop->queue[0] +
1159                                 act_qgrop->queue_num) >
1160                                 dev->data->nb_rx_queues)
1161                                 goto error;
1162                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1163                                 if (act_qgrop->queue[i + 1] !=
1164                                         act_qgrop->queue[i] + 1)
1165                                         goto error;
1166                         rule_info->sw_act.qgrp_size =
1167                                 act_qgrop->queue_num;
1168                         break;
1169                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1170                         act_q = action->conf;
1171                         if (act_q->index >= dev->data->nb_rx_queues)
1172                                 goto error;
1173                         rule_info->sw_act.fltr_act =
1174                                 ICE_FWD_TO_Q;
1175                         rule_info->sw_act.fwd_id.q_id =
1176                                 base_queue + act_q->index;
1177                         break;
1178
1179                 case RTE_FLOW_ACTION_TYPE_DROP:
1180                         rule_info->sw_act.fltr_act =
1181                                 ICE_DROP_PACKET;
1182                         break;
1183
1184                 case RTE_FLOW_ACTION_TYPE_VOID:
1185                         break;
1186
1187                 default:
1188                         goto error;
1189                 }
1190         }
1191
1192         rule_info->sw_act.vsi_handle = vsi->idx;
1193         rule_info->rx = 1;
1194         rule_info->sw_act.src = vsi->idx;
1195         rule_info->priority = 5;
1196
1197         return 0;
1198
1199 error:
1200         rte_flow_error_set(error,
1201                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1202                 actions,
1203                 "Invalid action type or queue number");
1204         return -rte_errno;
1205 }
1206
1207 static int
1208 ice_switch_check_action(const struct rte_flow_action *actions,
1209                             struct rte_flow_error *error)
1210 {
1211         const struct rte_flow_action *action;
1212         enum rte_flow_action_type action_type;
1213         uint16_t actions_num = 0;
1214
1215         for (action = actions; action->type !=
1216                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1217                 action_type = action->type;
1218                 switch (action_type) {
1219                 case RTE_FLOW_ACTION_TYPE_VF:
1220                 case RTE_FLOW_ACTION_TYPE_RSS:
1221                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1222                 case RTE_FLOW_ACTION_TYPE_DROP:
1223                         actions_num++;
1224                         break;
1225                 case RTE_FLOW_ACTION_TYPE_VOID:
1226                         continue;
1227                 default:
1228                         rte_flow_error_set(error,
1229                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1230                                            actions,
1231                                            "Invalid action type");
1232                         return -rte_errno;
1233                 }
1234         }
1235
1236         if (actions_num > 1) {
1237                 rte_flow_error_set(error,
1238                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1239                                    actions,
1240                                    "Invalid action number");
1241                 return -rte_errno;
1242         }
1243
1244         return 0;
1245 }
1246
1247 static bool
1248 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1249 {
1250         switch (tun_type) {
1251         case ICE_SW_TUN_PROFID_IPV6_ESP:
1252         case ICE_SW_TUN_PROFID_IPV6_AH:
1253         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1254         case ICE_SW_TUN_PROFID_IPV6_NAT_T:
1255         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1256         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1257         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1258         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1259                 return true;
1260         default:
1261                 break;
1262         }
1263
1264         return false;
1265 }
1266
1267 static int
1268 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1269                 struct ice_pattern_match_item *array,
1270                 uint32_t array_len,
1271                 const struct rte_flow_item pattern[],
1272                 const struct rte_flow_action actions[],
1273                 void **meta,
1274                 struct rte_flow_error *error)
1275 {
1276         struct ice_pf *pf = &ad->pf;
1277         uint64_t inputset = 0;
1278         int ret = 0;
1279         struct sw_meta *sw_meta_ptr = NULL;
1280         struct ice_adv_rule_info rule_info;
1281         struct ice_adv_lkup_elem *list = NULL;
1282         uint16_t lkups_num = 0;
1283         const struct rte_flow_item *item = pattern;
1284         uint16_t item_num = 0;
1285         enum ice_sw_tunnel_type tun_type =
1286                 ICE_SW_TUN_AND_NON_TUN;
1287         struct ice_pattern_match_item *pattern_match_item = NULL;
1288
1289         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1290                 item_num++;
1291                 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1292                         tun_type = ICE_SW_TUN_VXLAN;
1293                 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1294                         tun_type = ICE_SW_TUN_NVGRE;
1295                 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1296                                 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1297                         tun_type = ICE_SW_TUN_PPPOE;
1298                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1299                         const struct rte_flow_item_eth *eth_mask;
1300                         if (item->mask)
1301                                 eth_mask = item->mask;
1302                         else
1303                                 continue;
1304                         if (eth_mask->type == UINT16_MAX)
1305                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1306                 }
1307                 /* reserve one more memory slot for ETH which may
1308                  * consume 2 lookup items.
1309                  */
1310                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1311                         item_num++;
1312         }
1313
1314         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1315         if (!list) {
1316                 rte_flow_error_set(error, EINVAL,
1317                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1318                                    "No memory for PMD internal items");
1319                 return -rte_errno;
1320         }
1321
1322         sw_meta_ptr =
1323                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1324         if (!sw_meta_ptr) {
1325                 rte_flow_error_set(error, EINVAL,
1326                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1327                                    "No memory for sw_pattern_meta_ptr");
1328                 goto error;
1329         }
1330
1331         pattern_match_item =
1332                 ice_search_pattern_match_item(pattern, array, array_len, error);
1333         if (!pattern_match_item) {
1334                 rte_flow_error_set(error, EINVAL,
1335                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1336                                    "Invalid input pattern");
1337                 goto error;
1338         }
1339
1340         inputset = ice_switch_inset_get
1341                 (pattern, error, list, &lkups_num, &tun_type);
1342         if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1343                 (inputset & ~pattern_match_item->input_set_mask)) {
1344                 rte_flow_error_set(error, EINVAL,
1345                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1346                                    pattern,
1347                                    "Invalid input set");
1348                 goto error;
1349         }
1350
1351         rule_info.tun_type = tun_type;
1352
1353         ret = ice_switch_check_action(actions, error);
1354         if (ret) {
1355                 rte_flow_error_set(error, EINVAL,
1356                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1357                                    "Invalid input action number");
1358                 goto error;
1359         }
1360
1361         if (ad->hw.dcf_enabled)
1362                 ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
1363         else
1364                 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1365
1366         if (ret) {
1367                 rte_flow_error_set(error, EINVAL,
1368                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1369                                    "Invalid input action");
1370                 goto error;
1371         }
1372
1373         if (meta) {
1374                 *meta = sw_meta_ptr;
1375                 ((struct sw_meta *)*meta)->list = list;
1376                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1377                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1378         } else {
1379                 rte_free(list);
1380                 rte_free(sw_meta_ptr);
1381         }
1382
1383         rte_free(pattern_match_item);
1384
1385         return 0;
1386
1387 error:
1388         rte_free(list);
1389         rte_free(sw_meta_ptr);
1390         rte_free(pattern_match_item);
1391
1392         return -rte_errno;
1393 }
1394
1395 static int
1396 ice_switch_query(struct ice_adapter *ad __rte_unused,
1397                 struct rte_flow *flow __rte_unused,
1398                 struct rte_flow_query_count *count __rte_unused,
1399                 struct rte_flow_error *error)
1400 {
1401         rte_flow_error_set(error, EINVAL,
1402                 RTE_FLOW_ERROR_TYPE_HANDLE,
1403                 NULL,
1404                 "count action not supported by switch filter");
1405
1406         return -rte_errno;
1407 }
1408
1409 static int
1410 ice_switch_init(struct ice_adapter *ad)
1411 {
1412         int ret = 0;
1413         struct ice_flow_parser *dist_parser;
1414         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1415
1416         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1417                 dist_parser = &ice_switch_dist_parser_comms;
1418         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1419                 dist_parser = &ice_switch_dist_parser_os;
1420         else
1421                 return -EINVAL;
1422
1423         if (ad->devargs.pipe_mode_support)
1424                 ret = ice_register_parser(perm_parser, ad);
1425         else
1426                 ret = ice_register_parser(dist_parser, ad);
1427         return ret;
1428 }
1429
1430 static void
1431 ice_switch_uninit(struct ice_adapter *ad)
1432 {
1433         struct ice_flow_parser *dist_parser;
1434         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1435
1436         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1437                 dist_parser = &ice_switch_dist_parser_comms;
1438         else
1439                 dist_parser = &ice_switch_dist_parser_os;
1440
1441         if (ad->devargs.pipe_mode_support)
1442                 ice_unregister_parser(perm_parser, ad);
1443         else
1444                 ice_unregister_parser(dist_parser, ad);
1445 }
1446
1447 static struct
1448 ice_flow_engine ice_switch_engine = {
1449         .init = ice_switch_init,
1450         .uninit = ice_switch_uninit,
1451         .create = ice_switch_create,
1452         .destroy = ice_switch_destroy,
1453         .query_count = ice_switch_query,
1454         .free = ice_switch_filter_rule_free,
1455         .type = ICE_FLOW_ENGINE_SWITCH,
1456 };
1457
1458 static struct
1459 ice_flow_parser ice_switch_dist_parser_os = {
1460         .engine = &ice_switch_engine,
1461         .array = ice_switch_pattern_dist_os,
1462         .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1463         .parse_pattern_action = ice_switch_parse_pattern_action,
1464         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1465 };
1466
1467 static struct
1468 ice_flow_parser ice_switch_dist_parser_comms = {
1469         .engine = &ice_switch_engine,
1470         .array = ice_switch_pattern_dist_comms,
1471         .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1472         .parse_pattern_action = ice_switch_parse_pattern_action,
1473         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1474 };
1475
1476 static struct
1477 ice_flow_parser ice_switch_perm_parser = {
1478         .engine = &ice_switch_engine,
1479         .array = ice_switch_pattern_perm,
1480         .array_len = RTE_DIM(ice_switch_pattern_perm),
1481         .parse_pattern_action = ice_switch_parse_pattern_action,
1482         .stage = ICE_FLOW_STAGE_PERMISSION,
1483 };
1484
1485 RTE_INIT(ice_sw_engine_init)
1486 {
1487         struct ice_flow_engine *engine = &ice_switch_engine;
1488         ice_register_flow_engine(engine);
1489 }