9c87a16dd175652a1572e9bbc020fa837eeaec93
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26
27
28 #define MAX_QGRP_NUM_TYPE 7
29
30 #define ICE_SW_INSET_ETHER ( \
31         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_VLAN ( \
33                 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
34                 ICE_INSET_VLAN_OUTER)
35 #define ICE_SW_INSET_MAC_IPV4 ( \
36         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
38 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
39         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
40         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
41         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
42 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
43         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
45         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
46 #define ICE_SW_INSET_MAC_IPV6 ( \
47         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
48         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
49         ICE_INSET_IPV6_NEXT_HDR)
50 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
51         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
52         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
53         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
55         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
56         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
57         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
58 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
59         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
62         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
64 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
65         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
66         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
67         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
68 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
69         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
70         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
71         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
73         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
75         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
77         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
79         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
81         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
83 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
84         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
85         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
86         ICE_INSET_TUN_IPV4_TOS)
87 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
88         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
90         ICE_INSET_TUN_IPV4_TOS)
91 #define ICE_SW_INSET_MAC_PPPOE  ( \
92         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
93         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
94 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
95         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
96         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
97         ICE_INSET_PPPOE_PROTO)
98
99 struct sw_meta {
100         struct ice_adv_lkup_elem *list;
101         uint16_t lkups_num;
102         struct ice_adv_rule_info rule_info;
103 };
104
105 static struct ice_flow_parser ice_switch_dist_parser_os;
106 static struct ice_flow_parser ice_switch_dist_parser_comms;
107 static struct ice_flow_parser ice_switch_perm_parser;
108
109 static struct
110 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
111         {pattern_ethertype,
112                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
113         {pattern_ethertype_vlan,
114                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
115         {pattern_eth_ipv4,
116                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp,
118                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
119         {pattern_eth_ipv4_tcp,
120                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
121         {pattern_eth_ipv6,
122                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
123         {pattern_eth_ipv6_udp,
124                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
125         {pattern_eth_ipv6_tcp,
126                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
127         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
128                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
129         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
130                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
131         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
132                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
133         {pattern_eth_ipv4_nvgre_eth_ipv4,
134                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
135         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
136                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
137         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
138                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
139         {pattern_eth_pppoed,
140                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
141         {pattern_eth_vlan_pppoed,
142                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
143         {pattern_eth_pppoes,
144                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
145         {pattern_eth_vlan_pppoes,
146                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
147         {pattern_eth_pppoes_proto,
148                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
149         {pattern_eth_vlan_pppoes_proto,
150                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
151         {pattern_eth_ipv6_esp,
152                         ICE_INSET_NONE, ICE_INSET_NONE},
153         {pattern_eth_ipv6_ah,
154                         ICE_INSET_NONE, ICE_INSET_NONE},
155         {pattern_eth_ipv6_l2tp,
156                         ICE_INSET_NONE, ICE_INSET_NONE},
157 };
158
159 static struct
160 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
161         {pattern_ethertype,
162                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
163         {pattern_ethertype_vlan,
164                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
165         {pattern_eth_arp,
166                         ICE_INSET_NONE, ICE_INSET_NONE},
167         {pattern_eth_ipv4,
168                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
169         {pattern_eth_ipv4_udp,
170                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
171         {pattern_eth_ipv4_tcp,
172                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
173         {pattern_eth_ipv6,
174                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
175         {pattern_eth_ipv6_udp,
176                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
177         {pattern_eth_ipv6_tcp,
178                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
179         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
180                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
181         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
182                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
183         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
184                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
185         {pattern_eth_ipv4_nvgre_eth_ipv4,
186                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
187         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
188                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
189         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
190                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
191 };
192
193 static struct
194 ice_pattern_match_item ice_switch_pattern_perm[] = {
195         {pattern_ethertype_vlan,
196                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
197         {pattern_eth_ipv4,
198                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
199         {pattern_eth_ipv4_udp,
200                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
201         {pattern_eth_ipv4_tcp,
202                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
203         {pattern_eth_ipv6,
204                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
205         {pattern_eth_ipv6_udp,
206                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
207         {pattern_eth_ipv6_tcp,
208                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
209         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
210                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
211         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
212                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
213         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
214                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
215         {pattern_eth_ipv4_nvgre_eth_ipv4,
216                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
217         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
218                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
219         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
220                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
221         {pattern_eth_ipv6_esp,
222                         ICE_INSET_NONE, ICE_INSET_NONE},
223         {pattern_eth_ipv6_ah,
224                         ICE_INSET_NONE, ICE_INSET_NONE},
225         {pattern_eth_ipv6_l2tp,
226                         ICE_INSET_NONE, ICE_INSET_NONE},
227 };
228
229 static int
230 ice_switch_create(struct ice_adapter *ad,
231                 struct rte_flow *flow,
232                 void *meta,
233                 struct rte_flow_error *error)
234 {
235         int ret = 0;
236         struct ice_pf *pf = &ad->pf;
237         struct ice_hw *hw = ICE_PF_TO_HW(pf);
238         struct ice_rule_query_data rule_added = {0};
239         struct ice_rule_query_data *filter_ptr;
240         struct ice_adv_lkup_elem *list =
241                 ((struct sw_meta *)meta)->list;
242         uint16_t lkups_cnt =
243                 ((struct sw_meta *)meta)->lkups_num;
244         struct ice_adv_rule_info *rule_info =
245                 &((struct sw_meta *)meta)->rule_info;
246
247         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
248                 rte_flow_error_set(error, EINVAL,
249                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
250                         "item number too large for rule");
251                 goto error;
252         }
253         if (!list) {
254                 rte_flow_error_set(error, EINVAL,
255                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
256                         "lookup list should not be NULL");
257                 goto error;
258         }
259         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
260         if (!ret) {
261                 filter_ptr = rte_zmalloc("ice_switch_filter",
262                         sizeof(struct ice_rule_query_data), 0);
263                 if (!filter_ptr) {
264                         rte_flow_error_set(error, EINVAL,
265                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
266                                    "No memory for ice_switch_filter");
267                         goto error;
268                 }
269                 flow->rule = filter_ptr;
270                 rte_memcpy(filter_ptr,
271                         &rule_added,
272                         sizeof(struct ice_rule_query_data));
273         } else {
274                 rte_flow_error_set(error, EINVAL,
275                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
276                         "switch filter create flow fail");
277                 goto error;
278         }
279
280         rte_free(list);
281         rte_free(meta);
282         return 0;
283
284 error:
285         rte_free(list);
286         rte_free(meta);
287
288         return -rte_errno;
289 }
290
291 static int
292 ice_switch_destroy(struct ice_adapter *ad,
293                 struct rte_flow *flow,
294                 struct rte_flow_error *error)
295 {
296         struct ice_hw *hw = &ad->hw;
297         int ret;
298         struct ice_rule_query_data *filter_ptr;
299
300         filter_ptr = (struct ice_rule_query_data *)
301                 flow->rule;
302
303         if (!filter_ptr) {
304                 rte_flow_error_set(error, EINVAL,
305                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
306                         "no such flow"
307                         " create by switch filter");
308                 return -rte_errno;
309         }
310
311         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
312         if (ret) {
313                 rte_flow_error_set(error, EINVAL,
314                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
315                         "fail to destroy switch filter rule");
316                 return -rte_errno;
317         }
318
319         rte_free(filter_ptr);
320         return ret;
321 }
322
323 static void
324 ice_switch_filter_rule_free(struct rte_flow *flow)
325 {
326         rte_free(flow->rule);
327 }
328
329 static uint64_t
330 ice_switch_inset_get(const struct rte_flow_item pattern[],
331                 struct rte_flow_error *error,
332                 struct ice_adv_lkup_elem *list,
333                 uint16_t *lkups_num,
334                 enum ice_sw_tunnel_type *tun_type)
335 {
336         const struct rte_flow_item *item = pattern;
337         enum rte_flow_item_type item_type;
338         const struct rte_flow_item_eth *eth_spec, *eth_mask;
339         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
340         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
341         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
342         const struct rte_flow_item_udp *udp_spec, *udp_mask;
343         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
344         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
345         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
346         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
347         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
348         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
349                                 *pppoe_proto_mask;
350         const struct rte_flow_item_esp *esp_spec, *esp_mask;
351         const struct rte_flow_item_ah *ah_spec, *ah_mask;
352         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
353         uint64_t input_set = ICE_INSET_NONE;
354         uint16_t j, t = 0;
355         uint16_t tunnel_valid = 0;
356         uint16_t pppoe_valid = 0;
357         uint16_t ipv6_valiad = 0;
358
359
360         for (item = pattern; item->type !=
361                         RTE_FLOW_ITEM_TYPE_END; item++) {
362                 if (item->last) {
363                         rte_flow_error_set(error, EINVAL,
364                                         RTE_FLOW_ERROR_TYPE_ITEM,
365                                         item,
366                                         "Not support range");
367                         return 0;
368                 }
369                 item_type = item->type;
370
371                 switch (item_type) {
372                 case RTE_FLOW_ITEM_TYPE_ETH:
373                         eth_spec = item->spec;
374                         eth_mask = item->mask;
375                         if (eth_spec && eth_mask) {
376                                 const uint8_t *a = eth_mask->src.addr_bytes;
377                                 const uint8_t *b = eth_mask->dst.addr_bytes;
378                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
379                                         if (a[j] && tunnel_valid) {
380                                                 input_set |=
381                                                         ICE_INSET_TUN_SMAC;
382                                                 break;
383                                         } else if (a[j]) {
384                                                 input_set |=
385                                                         ICE_INSET_SMAC;
386                                                 break;
387                                         }
388                                 }
389                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
390                                         if (b[j] && tunnel_valid) {
391                                                 input_set |=
392                                                         ICE_INSET_TUN_DMAC;
393                                                 break;
394                                         } else if (b[j]) {
395                                                 input_set |=
396                                                         ICE_INSET_DMAC;
397                                                 break;
398                                         }
399                                 }
400                                 if (eth_mask->type)
401                                         input_set |= ICE_INSET_ETHERTYPE;
402                                 list[t].type = (tunnel_valid  == 0) ?
403                                         ICE_MAC_OFOS : ICE_MAC_IL;
404                                 struct ice_ether_hdr *h;
405                                 struct ice_ether_hdr *m;
406                                 uint16_t i = 0;
407                                 h = &list[t].h_u.eth_hdr;
408                                 m = &list[t].m_u.eth_hdr;
409                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
410                                         if (eth_mask->src.addr_bytes[j]) {
411                                                 h->src_addr[j] =
412                                                 eth_spec->src.addr_bytes[j];
413                                                 m->src_addr[j] =
414                                                 eth_mask->src.addr_bytes[j];
415                                                 i = 1;
416                                         }
417                                         if (eth_mask->dst.addr_bytes[j]) {
418                                                 h->dst_addr[j] =
419                                                 eth_spec->dst.addr_bytes[j];
420                                                 m->dst_addr[j] =
421                                                 eth_mask->dst.addr_bytes[j];
422                                                 i = 1;
423                                         }
424                                 }
425                                 if (i)
426                                         t++;
427                                 if (eth_mask->type) {
428                                         list[t].type = ICE_ETYPE_OL;
429                                         list[t].h_u.ethertype.ethtype_id =
430                                                 eth_spec->type;
431                                         list[t].m_u.ethertype.ethtype_id =
432                                                 eth_mask->type;
433                                         t++;
434                                 }
435                         }
436                         break;
437
438                 case RTE_FLOW_ITEM_TYPE_IPV4:
439                         ipv4_spec = item->spec;
440                         ipv4_mask = item->mask;
441                         if (ipv4_spec && ipv4_mask) {
442                                 /* Check IPv4 mask and update input set */
443                                 if (ipv4_mask->hdr.version_ihl ||
444                                         ipv4_mask->hdr.total_length ||
445                                         ipv4_mask->hdr.packet_id ||
446                                         ipv4_mask->hdr.hdr_checksum) {
447                                         rte_flow_error_set(error, EINVAL,
448                                                    RTE_FLOW_ERROR_TYPE_ITEM,
449                                                    item,
450                                                    "Invalid IPv4 mask.");
451                                         return 0;
452                                 }
453
454                                 if (tunnel_valid) {
455                                         if (ipv4_mask->hdr.type_of_service)
456                                                 input_set |=
457                                                         ICE_INSET_TUN_IPV4_TOS;
458                                         if (ipv4_mask->hdr.src_addr)
459                                                 input_set |=
460                                                         ICE_INSET_TUN_IPV4_SRC;
461                                         if (ipv4_mask->hdr.dst_addr)
462                                                 input_set |=
463                                                         ICE_INSET_TUN_IPV4_DST;
464                                         if (ipv4_mask->hdr.time_to_live)
465                                                 input_set |=
466                                                         ICE_INSET_TUN_IPV4_TTL;
467                                         if (ipv4_mask->hdr.next_proto_id)
468                                                 input_set |=
469                                                 ICE_INSET_TUN_IPV4_PROTO;
470                                 } else {
471                                         if (ipv4_mask->hdr.src_addr)
472                                                 input_set |= ICE_INSET_IPV4_SRC;
473                                         if (ipv4_mask->hdr.dst_addr)
474                                                 input_set |= ICE_INSET_IPV4_DST;
475                                         if (ipv4_mask->hdr.time_to_live)
476                                                 input_set |= ICE_INSET_IPV4_TTL;
477                                         if (ipv4_mask->hdr.next_proto_id)
478                                                 input_set |=
479                                                 ICE_INSET_IPV4_PROTO;
480                                         if (ipv4_mask->hdr.type_of_service)
481                                                 input_set |=
482                                                         ICE_INSET_IPV4_TOS;
483                                 }
484                                 list[t].type = (tunnel_valid  == 0) ?
485                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
486                                 if (ipv4_mask->hdr.src_addr) {
487                                         list[t].h_u.ipv4_hdr.src_addr =
488                                                 ipv4_spec->hdr.src_addr;
489                                         list[t].m_u.ipv4_hdr.src_addr =
490                                                 ipv4_mask->hdr.src_addr;
491                                 }
492                                 if (ipv4_mask->hdr.dst_addr) {
493                                         list[t].h_u.ipv4_hdr.dst_addr =
494                                                 ipv4_spec->hdr.dst_addr;
495                                         list[t].m_u.ipv4_hdr.dst_addr =
496                                                 ipv4_mask->hdr.dst_addr;
497                                 }
498                                 if (ipv4_mask->hdr.time_to_live) {
499                                         list[t].h_u.ipv4_hdr.time_to_live =
500                                                 ipv4_spec->hdr.time_to_live;
501                                         list[t].m_u.ipv4_hdr.time_to_live =
502                                                 ipv4_mask->hdr.time_to_live;
503                                 }
504                                 if (ipv4_mask->hdr.next_proto_id) {
505                                         list[t].h_u.ipv4_hdr.protocol =
506                                                 ipv4_spec->hdr.next_proto_id;
507                                         list[t].m_u.ipv4_hdr.protocol =
508                                                 ipv4_mask->hdr.next_proto_id;
509                                 }
510                                 if (ipv4_mask->hdr.type_of_service) {
511                                         list[t].h_u.ipv4_hdr.tos =
512                                                 ipv4_spec->hdr.type_of_service;
513                                         list[t].m_u.ipv4_hdr.tos =
514                                                 ipv4_mask->hdr.type_of_service;
515                                 }
516                                 t++;
517                         }
518                         break;
519
520                 case RTE_FLOW_ITEM_TYPE_IPV6:
521                         ipv6_spec = item->spec;
522                         ipv6_mask = item->mask;
523                         ipv6_valiad = 1;
524                         if (ipv6_spec && ipv6_mask) {
525                                 if (ipv6_mask->hdr.payload_len) {
526                                         rte_flow_error_set(error, EINVAL,
527                                            RTE_FLOW_ERROR_TYPE_ITEM,
528                                            item,
529                                            "Invalid IPv6 mask");
530                                         return 0;
531                                 }
532
533                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
534                                         if (ipv6_mask->hdr.src_addr[j] &&
535                                                 tunnel_valid) {
536                                                 input_set |=
537                                                 ICE_INSET_TUN_IPV6_SRC;
538                                                 break;
539                                         } else if (ipv6_mask->hdr.src_addr[j]) {
540                                                 input_set |= ICE_INSET_IPV6_SRC;
541                                                 break;
542                                         }
543                                 }
544                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
545                                         if (ipv6_mask->hdr.dst_addr[j] &&
546                                                 tunnel_valid) {
547                                                 input_set |=
548                                                 ICE_INSET_TUN_IPV6_DST;
549                                                 break;
550                                         } else if (ipv6_mask->hdr.dst_addr[j]) {
551                                                 input_set |= ICE_INSET_IPV6_DST;
552                                                 break;
553                                         }
554                                 }
555                                 if (ipv6_mask->hdr.proto &&
556                                         tunnel_valid)
557                                         input_set |=
558                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
559                                 else if (ipv6_mask->hdr.proto)
560                                         input_set |=
561                                                 ICE_INSET_IPV6_NEXT_HDR;
562                                 if (ipv6_mask->hdr.hop_limits &&
563                                         tunnel_valid)
564                                         input_set |=
565                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
566                                 else if (ipv6_mask->hdr.hop_limits)
567                                         input_set |=
568                                                 ICE_INSET_IPV6_HOP_LIMIT;
569                                 if ((ipv6_mask->hdr.vtc_flow &
570                                                 rte_cpu_to_be_32
571                                                 (RTE_IPV6_HDR_TC_MASK)) &&
572                                         tunnel_valid)
573                                         input_set |=
574                                                         ICE_INSET_TUN_IPV6_TC;
575                                 else if (ipv6_mask->hdr.vtc_flow &
576                                                 rte_cpu_to_be_32
577                                                 (RTE_IPV6_HDR_TC_MASK))
578                                         input_set |= ICE_INSET_IPV6_TC;
579
580                                 list[t].type = (tunnel_valid  == 0) ?
581                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
582                                 struct ice_ipv6_hdr *f;
583                                 struct ice_ipv6_hdr *s;
584                                 f = &list[t].h_u.ipv6_hdr;
585                                 s = &list[t].m_u.ipv6_hdr;
586                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
587                                         if (ipv6_mask->hdr.src_addr[j]) {
588                                                 f->src_addr[j] =
589                                                 ipv6_spec->hdr.src_addr[j];
590                                                 s->src_addr[j] =
591                                                 ipv6_mask->hdr.src_addr[j];
592                                         }
593                                         if (ipv6_mask->hdr.dst_addr[j]) {
594                                                 f->dst_addr[j] =
595                                                 ipv6_spec->hdr.dst_addr[j];
596                                                 s->dst_addr[j] =
597                                                 ipv6_mask->hdr.dst_addr[j];
598                                         }
599                                 }
600                                 if (ipv6_mask->hdr.proto) {
601                                         f->next_hdr =
602                                                 ipv6_spec->hdr.proto;
603                                         s->next_hdr =
604                                                 ipv6_mask->hdr.proto;
605                                 }
606                                 if (ipv6_mask->hdr.hop_limits) {
607                                         f->hop_limit =
608                                                 ipv6_spec->hdr.hop_limits;
609                                         s->hop_limit =
610                                                 ipv6_mask->hdr.hop_limits;
611                                 }
612                                 if (ipv6_mask->hdr.vtc_flow &
613                                                 rte_cpu_to_be_32
614                                                 (RTE_IPV6_HDR_TC_MASK)) {
615                                         struct ice_le_ver_tc_flow vtf;
616                                         vtf.u.fld.version = 0;
617                                         vtf.u.fld.flow_label = 0;
618                                         vtf.u.fld.tc = (rte_be_to_cpu_32
619                                                 (ipv6_spec->hdr.vtc_flow) &
620                                                         RTE_IPV6_HDR_TC_MASK) >>
621                                                         RTE_IPV6_HDR_TC_SHIFT;
622                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
623                                         vtf.u.fld.tc = (rte_be_to_cpu_32
624                                                 (ipv6_mask->hdr.vtc_flow) &
625                                                         RTE_IPV6_HDR_TC_MASK) >>
626                                                         RTE_IPV6_HDR_TC_SHIFT;
627                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
628                                 }
629                                 t++;
630                         }
631                         break;
632
633                 case RTE_FLOW_ITEM_TYPE_UDP:
634                         udp_spec = item->spec;
635                         udp_mask = item->mask;
636                         if (udp_spec && udp_mask) {
637                                 /* Check UDP mask and update input set*/
638                                 if (udp_mask->hdr.dgram_len ||
639                                     udp_mask->hdr.dgram_cksum) {
640                                         rte_flow_error_set(error, EINVAL,
641                                                    RTE_FLOW_ERROR_TYPE_ITEM,
642                                                    item,
643                                                    "Invalid UDP mask");
644                                         return 0;
645                                 }
646
647                                 if (tunnel_valid) {
648                                         if (udp_mask->hdr.src_port)
649                                                 input_set |=
650                                                 ICE_INSET_TUN_UDP_SRC_PORT;
651                                         if (udp_mask->hdr.dst_port)
652                                                 input_set |=
653                                                 ICE_INSET_TUN_UDP_DST_PORT;
654                                 } else {
655                                         if (udp_mask->hdr.src_port)
656                                                 input_set |=
657                                                 ICE_INSET_UDP_SRC_PORT;
658                                         if (udp_mask->hdr.dst_port)
659                                                 input_set |=
660                                                 ICE_INSET_UDP_DST_PORT;
661                                 }
662                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
663                                                 tunnel_valid == 0)
664                                         list[t].type = ICE_UDP_OF;
665                                 else
666                                         list[t].type = ICE_UDP_ILOS;
667                                 if (udp_mask->hdr.src_port) {
668                                         list[t].h_u.l4_hdr.src_port =
669                                                 udp_spec->hdr.src_port;
670                                         list[t].m_u.l4_hdr.src_port =
671                                                 udp_mask->hdr.src_port;
672                                 }
673                                 if (udp_mask->hdr.dst_port) {
674                                         list[t].h_u.l4_hdr.dst_port =
675                                                 udp_spec->hdr.dst_port;
676                                         list[t].m_u.l4_hdr.dst_port =
677                                                 udp_mask->hdr.dst_port;
678                                 }
679                                                 t++;
680                         }
681                         break;
682
683                 case RTE_FLOW_ITEM_TYPE_TCP:
684                         tcp_spec = item->spec;
685                         tcp_mask = item->mask;
686                         if (tcp_spec && tcp_mask) {
687                                 /* Check TCP mask and update input set */
688                                 if (tcp_mask->hdr.sent_seq ||
689                                         tcp_mask->hdr.recv_ack ||
690                                         tcp_mask->hdr.data_off ||
691                                         tcp_mask->hdr.tcp_flags ||
692                                         tcp_mask->hdr.rx_win ||
693                                         tcp_mask->hdr.cksum ||
694                                         tcp_mask->hdr.tcp_urp) {
695                                         rte_flow_error_set(error, EINVAL,
696                                            RTE_FLOW_ERROR_TYPE_ITEM,
697                                            item,
698                                            "Invalid TCP mask");
699                                         return 0;
700                                 }
701
702                                 if (tunnel_valid) {
703                                         if (tcp_mask->hdr.src_port)
704                                                 input_set |=
705                                                 ICE_INSET_TUN_TCP_SRC_PORT;
706                                         if (tcp_mask->hdr.dst_port)
707                                                 input_set |=
708                                                 ICE_INSET_TUN_TCP_DST_PORT;
709                                 } else {
710                                         if (tcp_mask->hdr.src_port)
711                                                 input_set |=
712                                                 ICE_INSET_TCP_SRC_PORT;
713                                         if (tcp_mask->hdr.dst_port)
714                                                 input_set |=
715                                                 ICE_INSET_TCP_DST_PORT;
716                                 }
717                                 list[t].type = ICE_TCP_IL;
718                                 if (tcp_mask->hdr.src_port) {
719                                         list[t].h_u.l4_hdr.src_port =
720                                                 tcp_spec->hdr.src_port;
721                                         list[t].m_u.l4_hdr.src_port =
722                                                 tcp_mask->hdr.src_port;
723                                 }
724                                 if (tcp_mask->hdr.dst_port) {
725                                         list[t].h_u.l4_hdr.dst_port =
726                                                 tcp_spec->hdr.dst_port;
727                                         list[t].m_u.l4_hdr.dst_port =
728                                                 tcp_mask->hdr.dst_port;
729                                 }
730                                 t++;
731                         }
732                         break;
733
734                 case RTE_FLOW_ITEM_TYPE_SCTP:
735                         sctp_spec = item->spec;
736                         sctp_mask = item->mask;
737                         if (sctp_spec && sctp_mask) {
738                                 /* Check SCTP mask and update input set */
739                                 if (sctp_mask->hdr.cksum) {
740                                         rte_flow_error_set(error, EINVAL,
741                                            RTE_FLOW_ERROR_TYPE_ITEM,
742                                            item,
743                                            "Invalid SCTP mask");
744                                         return 0;
745                                 }
746
747                                 if (tunnel_valid) {
748                                         if (sctp_mask->hdr.src_port)
749                                                 input_set |=
750                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
751                                         if (sctp_mask->hdr.dst_port)
752                                                 input_set |=
753                                                 ICE_INSET_TUN_SCTP_DST_PORT;
754                                 } else {
755                                         if (sctp_mask->hdr.src_port)
756                                                 input_set |=
757                                                 ICE_INSET_SCTP_SRC_PORT;
758                                         if (sctp_mask->hdr.dst_port)
759                                                 input_set |=
760                                                 ICE_INSET_SCTP_DST_PORT;
761                                 }
762                                 list[t].type = ICE_SCTP_IL;
763                                 if (sctp_mask->hdr.src_port) {
764                                         list[t].h_u.sctp_hdr.src_port =
765                                                 sctp_spec->hdr.src_port;
766                                         list[t].m_u.sctp_hdr.src_port =
767                                                 sctp_mask->hdr.src_port;
768                                 }
769                                 if (sctp_mask->hdr.dst_port) {
770                                         list[t].h_u.sctp_hdr.dst_port =
771                                                 sctp_spec->hdr.dst_port;
772                                         list[t].m_u.sctp_hdr.dst_port =
773                                                 sctp_mask->hdr.dst_port;
774                                 }
775                                 t++;
776                         }
777                         break;
778
779                 case RTE_FLOW_ITEM_TYPE_VXLAN:
780                         vxlan_spec = item->spec;
781                         vxlan_mask = item->mask;
782                         /* Check if VXLAN item is used to describe protocol.
783                          * If yes, both spec and mask should be NULL.
784                          * If no, both spec and mask shouldn't be NULL.
785                          */
786                         if ((!vxlan_spec && vxlan_mask) ||
787                             (vxlan_spec && !vxlan_mask)) {
788                                 rte_flow_error_set(error, EINVAL,
789                                            RTE_FLOW_ERROR_TYPE_ITEM,
790                                            item,
791                                            "Invalid VXLAN item");
792                                 return 0;
793                         }
794
795                         tunnel_valid = 1;
796                         if (vxlan_spec && vxlan_mask) {
797                                 list[t].type = ICE_VXLAN;
798                                 if (vxlan_mask->vni[0] ||
799                                         vxlan_mask->vni[1] ||
800                                         vxlan_mask->vni[2]) {
801                                         list[t].h_u.tnl_hdr.vni =
802                                                 (vxlan_spec->vni[2] << 16) |
803                                                 (vxlan_spec->vni[1] << 8) |
804                                                 vxlan_spec->vni[0];
805                                         list[t].m_u.tnl_hdr.vni =
806                                                 (vxlan_mask->vni[2] << 16) |
807                                                 (vxlan_mask->vni[1] << 8) |
808                                                 vxlan_mask->vni[0];
809                                         input_set |=
810                                                 ICE_INSET_TUN_VXLAN_VNI;
811                                 }
812                                 t++;
813                         }
814                         break;
815
816                 case RTE_FLOW_ITEM_TYPE_NVGRE:
817                         nvgre_spec = item->spec;
818                         nvgre_mask = item->mask;
819                         /* Check if NVGRE item is used to describe protocol.
820                          * If yes, both spec and mask should be NULL.
821                          * If no, both spec and mask shouldn't be NULL.
822                          */
823                         if ((!nvgre_spec && nvgre_mask) ||
824                             (nvgre_spec && !nvgre_mask)) {
825                                 rte_flow_error_set(error, EINVAL,
826                                            RTE_FLOW_ERROR_TYPE_ITEM,
827                                            item,
828                                            "Invalid NVGRE item");
829                                 return 0;
830                         }
831                         tunnel_valid = 1;
832                         if (nvgre_spec && nvgre_mask) {
833                                 list[t].type = ICE_NVGRE;
834                                 if (nvgre_mask->tni[0] ||
835                                         nvgre_mask->tni[1] ||
836                                         nvgre_mask->tni[2]) {
837                                         list[t].h_u.nvgre_hdr.tni_flow =
838                                                 (nvgre_spec->tni[2] << 16) |
839                                                 (nvgre_spec->tni[1] << 8) |
840                                                 nvgre_spec->tni[0];
841                                         list[t].m_u.nvgre_hdr.tni_flow =
842                                                 (nvgre_mask->tni[2] << 16) |
843                                                 (nvgre_mask->tni[1] << 8) |
844                                                 nvgre_mask->tni[0];
845                                         input_set |=
846                                                 ICE_INSET_TUN_NVGRE_TNI;
847                                 }
848                                 t++;
849                         }
850                         break;
851
852                 case RTE_FLOW_ITEM_TYPE_VLAN:
853                         vlan_spec = item->spec;
854                         vlan_mask = item->mask;
855                         /* Check if VLAN item is used to describe protocol.
856                          * If yes, both spec and mask should be NULL.
857                          * If no, both spec and mask shouldn't be NULL.
858                          */
859                         if ((!vlan_spec && vlan_mask) ||
860                             (vlan_spec && !vlan_mask)) {
861                                 rte_flow_error_set(error, EINVAL,
862                                            RTE_FLOW_ERROR_TYPE_ITEM,
863                                            item,
864                                            "Invalid VLAN item");
865                                 return 0;
866                         }
867                         if (vlan_spec && vlan_mask) {
868                                 list[t].type = ICE_VLAN_OFOS;
869                                 if (vlan_mask->tci) {
870                                         list[t].h_u.vlan_hdr.vlan =
871                                                 vlan_spec->tci;
872                                         list[t].m_u.vlan_hdr.vlan =
873                                                 vlan_mask->tci;
874                                         input_set |= ICE_INSET_VLAN_OUTER;
875                                 }
876                                 if (vlan_mask->inner_type) {
877                                         list[t].h_u.vlan_hdr.type =
878                                                 vlan_spec->inner_type;
879                                         list[t].m_u.vlan_hdr.type =
880                                                 vlan_mask->inner_type;
881                                         input_set |= ICE_INSET_VLAN_OUTER;
882                                 }
883                                 t++;
884                         }
885                         break;
886
887                 case RTE_FLOW_ITEM_TYPE_PPPOED:
888                 case RTE_FLOW_ITEM_TYPE_PPPOES:
889                         pppoe_spec = item->spec;
890                         pppoe_mask = item->mask;
891                         /* Check if PPPoE item is used to describe protocol.
892                          * If yes, both spec and mask should be NULL.
893                          * If no, both spec and mask shouldn't be NULL.
894                          */
895                         if ((!pppoe_spec && pppoe_mask) ||
896                                 (pppoe_spec && !pppoe_mask)) {
897                                 rte_flow_error_set(error, EINVAL,
898                                         RTE_FLOW_ERROR_TYPE_ITEM,
899                                         item,
900                                         "Invalid pppoe item");
901                                 return 0;
902                         }
903                         if (pppoe_spec && pppoe_mask) {
904                                 /* Check pppoe mask and update input set */
905                                 if (pppoe_mask->length ||
906                                         pppoe_mask->code ||
907                                         pppoe_mask->version_type) {
908                                         rte_flow_error_set(error, EINVAL,
909                                                 RTE_FLOW_ERROR_TYPE_ITEM,
910                                                 item,
911                                                 "Invalid pppoe mask");
912                                         return 0;
913                                 }
914                                 list[t].type = ICE_PPPOE;
915                                 if (pppoe_mask->session_id) {
916                                         list[t].h_u.pppoe_hdr.session_id =
917                                                 pppoe_spec->session_id;
918                                         list[t].m_u.pppoe_hdr.session_id =
919                                                 pppoe_mask->session_id;
920                                         input_set |= ICE_INSET_PPPOE_SESSION;
921                                 }
922                                 t++;
923                                 pppoe_valid = 1;
924                         }
925                         break;
926
927                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
928                         pppoe_proto_spec = item->spec;
929                         pppoe_proto_mask = item->mask;
930                         /* Check if PPPoE optional proto_id item
931                          * is used to describe protocol.
932                          * If yes, both spec and mask should be NULL.
933                          * If no, both spec and mask shouldn't be NULL.
934                          */
935                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
936                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
937                                 rte_flow_error_set(error, EINVAL,
938                                         RTE_FLOW_ERROR_TYPE_ITEM,
939                                         item,
940                                         "Invalid pppoe proto item");
941                                 return 0;
942                         }
943                         if (pppoe_proto_spec && pppoe_proto_mask) {
944                                 if (pppoe_valid)
945                                         t--;
946                                 list[t].type = ICE_PPPOE;
947                                 if (pppoe_proto_mask->proto_id) {
948                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
949                                                 pppoe_proto_spec->proto_id;
950                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
951                                                 pppoe_proto_mask->proto_id;
952                                         input_set |= ICE_INSET_PPPOE_PROTO;
953                                 }
954                                 t++;
955                         }
956                         break;
957
958                 case RTE_FLOW_ITEM_TYPE_ESP:
959                         esp_spec = item->spec;
960                         esp_mask = item->mask;
961                         if (esp_spec || esp_mask) {
962                                 rte_flow_error_set(error, EINVAL,
963                                            RTE_FLOW_ERROR_TYPE_ITEM,
964                                            item,
965                                            "Invalid esp item");
966                                 return -ENOTSUP;
967                         }
968                         if (ipv6_valiad)
969                                 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
970                         break;
971
972                 case RTE_FLOW_ITEM_TYPE_AH:
973                         ah_spec = item->spec;
974                         ah_mask = item->mask;
975                         if (ah_spec || ah_mask) {
976                                 rte_flow_error_set(error, EINVAL,
977                                            RTE_FLOW_ERROR_TYPE_ITEM,
978                                            item,
979                                            "Invalid ah item");
980                                 return -ENOTSUP;
981                         }
982                         if (ipv6_valiad)
983                                 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
984                         break;
985
986                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
987                         l2tp_spec = item->spec;
988                         l2tp_mask = item->mask;
989                         if (l2tp_spec || l2tp_mask) {
990                                 rte_flow_error_set(error, EINVAL,
991                                            RTE_FLOW_ERROR_TYPE_ITEM,
992                                            item,
993                                            "Invalid l2tp item");
994                                 return -ENOTSUP;
995                         }
996                         if (ipv6_valiad)
997                                 *tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
998                         break;
999
1000                 case RTE_FLOW_ITEM_TYPE_VOID:
1001                         break;
1002
1003                 default:
1004                         rte_flow_error_set(error, EINVAL,
1005                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1006                                    "Invalid pattern item.");
1007                         goto out;
1008                 }
1009         }
1010
1011         *lkups_num = t;
1012
1013         return input_set;
1014 out:
1015         return 0;
1016 }
1017
1018 static int
1019 ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
1020                             struct rte_flow_error *error,
1021                             struct ice_adv_rule_info *rule_info)
1022 {
1023         const struct rte_flow_action_vf *act_vf;
1024         const struct rte_flow_action *action;
1025         enum rte_flow_action_type action_type;
1026
1027         for (action = actions; action->type !=
1028                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1029                 action_type = action->type;
1030                 switch (action_type) {
1031                 case RTE_FLOW_ACTION_TYPE_VF:
1032                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1033                         act_vf = action->conf;
1034                         rule_info->sw_act.vsi_handle = act_vf->id;
1035                         break;
1036                 default:
1037                         rte_flow_error_set(error,
1038                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1039                                            actions,
1040                                            "Invalid action type or queue number");
1041                         return -rte_errno;
1042                 }
1043         }
1044
1045         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1046         rule_info->rx = 1;
1047         rule_info->priority = 5;
1048
1049         return 0;
1050 }
1051
1052 static int
1053 ice_switch_parse_action(struct ice_pf *pf,
1054                 const struct rte_flow_action *actions,
1055                 struct rte_flow_error *error,
1056                 struct ice_adv_rule_info *rule_info)
1057 {
1058         struct ice_vsi *vsi = pf->main_vsi;
1059         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1060         const struct rte_flow_action_queue *act_q;
1061         const struct rte_flow_action_rss *act_qgrop;
1062         uint16_t base_queue, i;
1063         const struct rte_flow_action *action;
1064         enum rte_flow_action_type action_type;
1065         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1066                  2, 4, 8, 16, 32, 64, 128};
1067
1068         base_queue = pf->base_queue + vsi->base_queue;
1069         for (action = actions; action->type !=
1070                         RTE_FLOW_ACTION_TYPE_END; action++) {
1071                 action_type = action->type;
1072                 switch (action_type) {
1073                 case RTE_FLOW_ACTION_TYPE_RSS:
1074                         act_qgrop = action->conf;
1075                         rule_info->sw_act.fltr_act =
1076                                 ICE_FWD_TO_QGRP;
1077                         rule_info->sw_act.fwd_id.q_id =
1078                                 base_queue + act_qgrop->queue[0];
1079                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1080                                 if (act_qgrop->queue_num ==
1081                                         valid_qgrop_number[i])
1082                                         break;
1083                         }
1084                         if (i == MAX_QGRP_NUM_TYPE)
1085                                 goto error;
1086                         if ((act_qgrop->queue[0] +
1087                                 act_qgrop->queue_num) >
1088                                 dev->data->nb_rx_queues)
1089                                 goto error;
1090                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1091                                 if (act_qgrop->queue[i + 1] !=
1092                                         act_qgrop->queue[i] + 1)
1093                                         goto error;
1094                         rule_info->sw_act.qgrp_size =
1095                                 act_qgrop->queue_num;
1096                         break;
1097                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1098                         act_q = action->conf;
1099                         if (act_q->index >= dev->data->nb_rx_queues)
1100                                 goto error;
1101                         rule_info->sw_act.fltr_act =
1102                                 ICE_FWD_TO_Q;
1103                         rule_info->sw_act.fwd_id.q_id =
1104                                 base_queue + act_q->index;
1105                         break;
1106
1107                 case RTE_FLOW_ACTION_TYPE_DROP:
1108                         rule_info->sw_act.fltr_act =
1109                                 ICE_DROP_PACKET;
1110                         break;
1111
1112                 case RTE_FLOW_ACTION_TYPE_VOID:
1113                         break;
1114
1115                 default:
1116                         goto error;
1117                 }
1118         }
1119
1120         rule_info->sw_act.vsi_handle = vsi->idx;
1121         rule_info->rx = 1;
1122         rule_info->sw_act.src = vsi->idx;
1123         rule_info->priority = 5;
1124
1125         return 0;
1126
1127 error:
1128         rte_flow_error_set(error,
1129                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1130                 actions,
1131                 "Invalid action type or queue number");
1132         return -rte_errno;
1133 }
1134
1135 static int
1136 ice_switch_check_action(const struct rte_flow_action *actions,
1137                             struct rte_flow_error *error)
1138 {
1139         const struct rte_flow_action *action;
1140         enum rte_flow_action_type action_type;
1141         uint16_t actions_num = 0;
1142
1143         for (action = actions; action->type !=
1144                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1145                 action_type = action->type;
1146                 switch (action_type) {
1147                 case RTE_FLOW_ACTION_TYPE_VF:
1148                 case RTE_FLOW_ACTION_TYPE_RSS:
1149                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1150                 case RTE_FLOW_ACTION_TYPE_DROP:
1151                         actions_num++;
1152                         break;
1153                 case RTE_FLOW_ACTION_TYPE_VOID:
1154                         continue;
1155                 default:
1156                         rte_flow_error_set(error,
1157                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1158                                            actions,
1159                                            "Invalid action type");
1160                         return -rte_errno;
1161                 }
1162         }
1163
1164         if (actions_num > 1) {
1165                 rte_flow_error_set(error,
1166                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1167                                    actions,
1168                                    "Invalid action number");
1169                 return -rte_errno;
1170         }
1171
1172         return 0;
1173 }
1174
1175 static bool
1176 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1177 {
1178         switch (tun_type) {
1179         case ICE_SW_TUN_PROFID_IPV6_ESP:
1180         case ICE_SW_TUN_PROFID_IPV6_AH:
1181         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1182                 return true;
1183         default:
1184                 break;
1185         }
1186
1187         return false;
1188 }
1189
1190 static int
1191 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1192                 struct ice_pattern_match_item *array,
1193                 uint32_t array_len,
1194                 const struct rte_flow_item pattern[],
1195                 const struct rte_flow_action actions[],
1196                 void **meta,
1197                 struct rte_flow_error *error)
1198 {
1199         struct ice_pf *pf = &ad->pf;
1200         uint64_t inputset = 0;
1201         int ret = 0;
1202         struct sw_meta *sw_meta_ptr = NULL;
1203         struct ice_adv_rule_info rule_info;
1204         struct ice_adv_lkup_elem *list = NULL;
1205         uint16_t lkups_num = 0;
1206         const struct rte_flow_item *item = pattern;
1207         uint16_t item_num = 0;
1208         enum ice_sw_tunnel_type tun_type =
1209                 ICE_SW_TUN_AND_NON_TUN;
1210         struct ice_pattern_match_item *pattern_match_item = NULL;
1211
1212         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1213                 item_num++;
1214                 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1215                         tun_type = ICE_SW_TUN_VXLAN;
1216                 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1217                         tun_type = ICE_SW_TUN_NVGRE;
1218                 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1219                                 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1220                         tun_type = ICE_SW_TUN_PPPOE;
1221                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1222                         const struct rte_flow_item_eth *eth_mask;
1223                         if (item->mask)
1224                                 eth_mask = item->mask;
1225                         else
1226                                 continue;
1227                         if (eth_mask->type == UINT16_MAX)
1228                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1229                 }
1230                 /* reserve one more memory slot for ETH which may
1231                  * consume 2 lookup items.
1232                  */
1233                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1234                         item_num++;
1235         }
1236
1237         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1238         if (!list) {
1239                 rte_flow_error_set(error, EINVAL,
1240                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1241                                    "No memory for PMD internal items");
1242                 return -rte_errno;
1243         }
1244
1245         sw_meta_ptr =
1246                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1247         if (!sw_meta_ptr) {
1248                 rte_flow_error_set(error, EINVAL,
1249                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1250                                    "No memory for sw_pattern_meta_ptr");
1251                 goto error;
1252         }
1253
1254         pattern_match_item =
1255                 ice_search_pattern_match_item(pattern, array, array_len, error);
1256         if (!pattern_match_item) {
1257                 rte_flow_error_set(error, EINVAL,
1258                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1259                                    "Invalid input pattern");
1260                 goto error;
1261         }
1262
1263         inputset = ice_switch_inset_get
1264                 (pattern, error, list, &lkups_num, &tun_type);
1265         if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1266                 (inputset & ~pattern_match_item->input_set_mask)) {
1267                 rte_flow_error_set(error, EINVAL,
1268                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1269                                    pattern,
1270                                    "Invalid input set");
1271                 goto error;
1272         }
1273
1274         rule_info.tun_type = tun_type;
1275
1276         ret = ice_switch_check_action(actions, error);
1277         if (ret) {
1278                 rte_flow_error_set(error, EINVAL,
1279                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1280                                    "Invalid input action number");
1281                 goto error;
1282         }
1283
1284         if (ad->hw.dcf_enabled)
1285                 ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
1286         else
1287                 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1288
1289         if (ret) {
1290                 rte_flow_error_set(error, EINVAL,
1291                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1292                                    "Invalid input action");
1293                 goto error;
1294         }
1295
1296         if (meta) {
1297                 *meta = sw_meta_ptr;
1298                 ((struct sw_meta *)*meta)->list = list;
1299                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1300                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1301         } else {
1302                 rte_free(list);
1303                 rte_free(sw_meta_ptr);
1304         }
1305
1306         rte_free(pattern_match_item);
1307
1308         return 0;
1309
1310 error:
1311         rte_free(list);
1312         rte_free(sw_meta_ptr);
1313         rte_free(pattern_match_item);
1314
1315         return -rte_errno;
1316 }
1317
1318 static int
1319 ice_switch_query(struct ice_adapter *ad __rte_unused,
1320                 struct rte_flow *flow __rte_unused,
1321                 struct rte_flow_query_count *count __rte_unused,
1322                 struct rte_flow_error *error)
1323 {
1324         rte_flow_error_set(error, EINVAL,
1325                 RTE_FLOW_ERROR_TYPE_HANDLE,
1326                 NULL,
1327                 "count action not supported by switch filter");
1328
1329         return -rte_errno;
1330 }
1331
1332 static int
1333 ice_switch_init(struct ice_adapter *ad)
1334 {
1335         int ret = 0;
1336         struct ice_flow_parser *dist_parser;
1337         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1338
1339         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1340                 dist_parser = &ice_switch_dist_parser_comms;
1341         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1342                 dist_parser = &ice_switch_dist_parser_os;
1343         else
1344                 return -EINVAL;
1345
1346         if (ad->devargs.pipe_mode_support)
1347                 ret = ice_register_parser(perm_parser, ad);
1348         else
1349                 ret = ice_register_parser(dist_parser, ad);
1350         return ret;
1351 }
1352
1353 static void
1354 ice_switch_uninit(struct ice_adapter *ad)
1355 {
1356         struct ice_flow_parser *dist_parser;
1357         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1358
1359         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1360                 dist_parser = &ice_switch_dist_parser_comms;
1361         else
1362                 dist_parser = &ice_switch_dist_parser_os;
1363
1364         if (ad->devargs.pipe_mode_support)
1365                 ice_unregister_parser(perm_parser, ad);
1366         else
1367                 ice_unregister_parser(dist_parser, ad);
1368 }
1369
1370 static struct
1371 ice_flow_engine ice_switch_engine = {
1372         .init = ice_switch_init,
1373         .uninit = ice_switch_uninit,
1374         .create = ice_switch_create,
1375         .destroy = ice_switch_destroy,
1376         .query_count = ice_switch_query,
1377         .free = ice_switch_filter_rule_free,
1378         .type = ICE_FLOW_ENGINE_SWITCH,
1379 };
1380
1381 static struct
1382 ice_flow_parser ice_switch_dist_parser_os = {
1383         .engine = &ice_switch_engine,
1384         .array = ice_switch_pattern_dist_os,
1385         .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1386         .parse_pattern_action = ice_switch_parse_pattern_action,
1387         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1388 };
1389
1390 static struct
1391 ice_flow_parser ice_switch_dist_parser_comms = {
1392         .engine = &ice_switch_engine,
1393         .array = ice_switch_pattern_dist_comms,
1394         .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1395         .parse_pattern_action = ice_switch_parse_pattern_action,
1396         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1397 };
1398
1399 static struct
1400 ice_flow_parser ice_switch_perm_parser = {
1401         .engine = &ice_switch_engine,
1402         .array = ice_switch_pattern_perm,
1403         .array_len = RTE_DIM(ice_switch_pattern_perm),
1404         .parse_pattern_action = ice_switch_parse_pattern_action,
1405         .stage = ICE_FLOW_STAGE_PERMISSION,
1406 };
1407
1408 RTE_INIT(ice_sw_engine_init)
1409 {
1410         struct ice_flow_engine *engine = &ice_switch_engine;
1411         ice_register_flow_engine(engine);
1412 }