net/ice: support PFCP
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26
27
28 #define MAX_QGRP_NUM_TYPE 7
29
30 #define ICE_SW_INSET_ETHER ( \
31         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
32 #define ICE_SW_INSET_MAC_VLAN ( \
33                 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
34                 ICE_INSET_VLAN_OUTER)
35 #define ICE_SW_INSET_MAC_IPV4 ( \
36         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
37         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
38 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
39         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
40         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
41         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
42 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
43         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
45         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
46 #define ICE_SW_INSET_MAC_IPV6 ( \
47         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
48         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
49         ICE_INSET_IPV6_NEXT_HDR)
50 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
51         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
52         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
53         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
55         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
56         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
57         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
58 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
59         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
60         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
61 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
62         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
63         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
64 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
65         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
66         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
67         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
68 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
69         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
70         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
71         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
73         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
74         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
75         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
77         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
79         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
81         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
83 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
84         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
85         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
86         ICE_INSET_TUN_IPV4_TOS)
87 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
88         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
89         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
90         ICE_INSET_TUN_IPV4_TOS)
91 #define ICE_SW_INSET_MAC_PPPOE  ( \
92         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
93         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
94 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
95         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
96         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
97         ICE_INSET_PPPOE_PROTO)
98
99 struct sw_meta {
100         struct ice_adv_lkup_elem *list;
101         uint16_t lkups_num;
102         struct ice_adv_rule_info rule_info;
103 };
104
105 static struct ice_flow_parser ice_switch_dist_parser_os;
106 static struct ice_flow_parser ice_switch_dist_parser_comms;
107 static struct ice_flow_parser ice_switch_perm_parser;
108
109 static struct
110 ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
111         {pattern_ethertype,
112                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
113         {pattern_ethertype_vlan,
114                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
115         {pattern_eth_ipv4,
116                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
117         {pattern_eth_ipv4_udp,
118                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
119         {pattern_eth_ipv4_tcp,
120                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
121         {pattern_eth_ipv6,
122                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
123         {pattern_eth_ipv6_udp,
124                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
125         {pattern_eth_ipv6_tcp,
126                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
127         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
128                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
129         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
130                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
131         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
132                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
133         {pattern_eth_ipv4_nvgre_eth_ipv4,
134                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
135         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
136                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
137         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
138                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
139         {pattern_eth_pppoed,
140                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
141         {pattern_eth_vlan_pppoed,
142                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
143         {pattern_eth_pppoes,
144                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
145         {pattern_eth_vlan_pppoes,
146                         ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
147         {pattern_eth_pppoes_proto,
148                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
149         {pattern_eth_vlan_pppoes_proto,
150                         ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
151         {pattern_eth_ipv6_esp,
152                         ICE_INSET_NONE, ICE_INSET_NONE},
153         {pattern_eth_ipv6_ah,
154                         ICE_INSET_NONE, ICE_INSET_NONE},
155         {pattern_eth_ipv6_l2tp,
156                         ICE_INSET_NONE, ICE_INSET_NONE},
157         {pattern_eth_ipv4_pfcp,
158                         ICE_INSET_NONE, ICE_INSET_NONE},
159         {pattern_eth_ipv6_pfcp,
160                         ICE_INSET_NONE, ICE_INSET_NONE},
161 };
162
163 static struct
164 ice_pattern_match_item ice_switch_pattern_dist_os[] = {
165         {pattern_ethertype,
166                         ICE_SW_INSET_ETHER, ICE_INSET_NONE},
167         {pattern_ethertype_vlan,
168                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
169         {pattern_eth_arp,
170                         ICE_INSET_NONE, ICE_INSET_NONE},
171         {pattern_eth_ipv4,
172                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
173         {pattern_eth_ipv4_udp,
174                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
175         {pattern_eth_ipv4_tcp,
176                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
177         {pattern_eth_ipv6,
178                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
179         {pattern_eth_ipv6_udp,
180                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
181         {pattern_eth_ipv6_tcp,
182                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
183         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
184                         ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
185         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
186                         ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
187         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
188                         ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
189         {pattern_eth_ipv4_nvgre_eth_ipv4,
190                         ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
191         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
192                         ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
193         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
194                         ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
195 };
196
197 static struct
198 ice_pattern_match_item ice_switch_pattern_perm[] = {
199         {pattern_ethertype_vlan,
200                         ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
201         {pattern_eth_ipv4,
202                         ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
203         {pattern_eth_ipv4_udp,
204                         ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE},
205         {pattern_eth_ipv4_tcp,
206                         ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE},
207         {pattern_eth_ipv6,
208                         ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE},
209         {pattern_eth_ipv6_udp,
210                         ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE},
211         {pattern_eth_ipv6_tcp,
212                         ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE},
213         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
214                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
215         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
216                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
217         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
218                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
219         {pattern_eth_ipv4_nvgre_eth_ipv4,
220                         ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
221         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,
222                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
223         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
224                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
225         {pattern_eth_ipv6_esp,
226                         ICE_INSET_NONE, ICE_INSET_NONE},
227         {pattern_eth_ipv6_ah,
228                         ICE_INSET_NONE, ICE_INSET_NONE},
229         {pattern_eth_ipv6_l2tp,
230                         ICE_INSET_NONE, ICE_INSET_NONE},
231         {pattern_eth_ipv4_pfcp,
232                         ICE_INSET_NONE, ICE_INSET_NONE},
233         {pattern_eth_ipv6_pfcp,
234                         ICE_INSET_NONE, ICE_INSET_NONE},
235 };
236
237 static int
238 ice_switch_create(struct ice_adapter *ad,
239                 struct rte_flow *flow,
240                 void *meta,
241                 struct rte_flow_error *error)
242 {
243         int ret = 0;
244         struct ice_pf *pf = &ad->pf;
245         struct ice_hw *hw = ICE_PF_TO_HW(pf);
246         struct ice_rule_query_data rule_added = {0};
247         struct ice_rule_query_data *filter_ptr;
248         struct ice_adv_lkup_elem *list =
249                 ((struct sw_meta *)meta)->list;
250         uint16_t lkups_cnt =
251                 ((struct sw_meta *)meta)->lkups_num;
252         struct ice_adv_rule_info *rule_info =
253                 &((struct sw_meta *)meta)->rule_info;
254
255         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
256                 rte_flow_error_set(error, EINVAL,
257                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
258                         "item number too large for rule");
259                 goto error;
260         }
261         if (!list) {
262                 rte_flow_error_set(error, EINVAL,
263                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
264                         "lookup list should not be NULL");
265                 goto error;
266         }
267         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
268         if (!ret) {
269                 filter_ptr = rte_zmalloc("ice_switch_filter",
270                         sizeof(struct ice_rule_query_data), 0);
271                 if (!filter_ptr) {
272                         rte_flow_error_set(error, EINVAL,
273                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
274                                    "No memory for ice_switch_filter");
275                         goto error;
276                 }
277                 flow->rule = filter_ptr;
278                 rte_memcpy(filter_ptr,
279                         &rule_added,
280                         sizeof(struct ice_rule_query_data));
281         } else {
282                 rte_flow_error_set(error, EINVAL,
283                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
284                         "switch filter create flow fail");
285                 goto error;
286         }
287
288         rte_free(list);
289         rte_free(meta);
290         return 0;
291
292 error:
293         rte_free(list);
294         rte_free(meta);
295
296         return -rte_errno;
297 }
298
299 static int
300 ice_switch_destroy(struct ice_adapter *ad,
301                 struct rte_flow *flow,
302                 struct rte_flow_error *error)
303 {
304         struct ice_hw *hw = &ad->hw;
305         int ret;
306         struct ice_rule_query_data *filter_ptr;
307
308         filter_ptr = (struct ice_rule_query_data *)
309                 flow->rule;
310
311         if (!filter_ptr) {
312                 rte_flow_error_set(error, EINVAL,
313                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
314                         "no such flow"
315                         " create by switch filter");
316                 return -rte_errno;
317         }
318
319         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
320         if (ret) {
321                 rte_flow_error_set(error, EINVAL,
322                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
323                         "fail to destroy switch filter rule");
324                 return -rte_errno;
325         }
326
327         rte_free(filter_ptr);
328         return ret;
329 }
330
331 static void
332 ice_switch_filter_rule_free(struct rte_flow *flow)
333 {
334         rte_free(flow->rule);
335 }
336
337 static uint64_t
338 ice_switch_inset_get(const struct rte_flow_item pattern[],
339                 struct rte_flow_error *error,
340                 struct ice_adv_lkup_elem *list,
341                 uint16_t *lkups_num,
342                 enum ice_sw_tunnel_type *tun_type)
343 {
344         const struct rte_flow_item *item = pattern;
345         enum rte_flow_item_type item_type;
346         const struct rte_flow_item_eth *eth_spec, *eth_mask;
347         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
348         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
349         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
350         const struct rte_flow_item_udp *udp_spec, *udp_mask;
351         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
352         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
353         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
354         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
355         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
356         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
357                                 *pppoe_proto_mask;
358         const struct rte_flow_item_esp *esp_spec, *esp_mask;
359         const struct rte_flow_item_ah *ah_spec, *ah_mask;
360         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
361         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
362         uint64_t input_set = ICE_INSET_NONE;
363         uint16_t j, t = 0;
364         uint16_t tunnel_valid = 0;
365         uint16_t pppoe_valid = 0;
366         uint16_t ipv6_valiad = 0;
367
368
369         for (item = pattern; item->type !=
370                         RTE_FLOW_ITEM_TYPE_END; item++) {
371                 if (item->last) {
372                         rte_flow_error_set(error, EINVAL,
373                                         RTE_FLOW_ERROR_TYPE_ITEM,
374                                         item,
375                                         "Not support range");
376                         return 0;
377                 }
378                 item_type = item->type;
379
380                 switch (item_type) {
381                 case RTE_FLOW_ITEM_TYPE_ETH:
382                         eth_spec = item->spec;
383                         eth_mask = item->mask;
384                         if (eth_spec && eth_mask) {
385                                 const uint8_t *a = eth_mask->src.addr_bytes;
386                                 const uint8_t *b = eth_mask->dst.addr_bytes;
387                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
388                                         if (a[j] && tunnel_valid) {
389                                                 input_set |=
390                                                         ICE_INSET_TUN_SMAC;
391                                                 break;
392                                         } else if (a[j]) {
393                                                 input_set |=
394                                                         ICE_INSET_SMAC;
395                                                 break;
396                                         }
397                                 }
398                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
399                                         if (b[j] && tunnel_valid) {
400                                                 input_set |=
401                                                         ICE_INSET_TUN_DMAC;
402                                                 break;
403                                         } else if (b[j]) {
404                                                 input_set |=
405                                                         ICE_INSET_DMAC;
406                                                 break;
407                                         }
408                                 }
409                                 if (eth_mask->type)
410                                         input_set |= ICE_INSET_ETHERTYPE;
411                                 list[t].type = (tunnel_valid  == 0) ?
412                                         ICE_MAC_OFOS : ICE_MAC_IL;
413                                 struct ice_ether_hdr *h;
414                                 struct ice_ether_hdr *m;
415                                 uint16_t i = 0;
416                                 h = &list[t].h_u.eth_hdr;
417                                 m = &list[t].m_u.eth_hdr;
418                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
419                                         if (eth_mask->src.addr_bytes[j]) {
420                                                 h->src_addr[j] =
421                                                 eth_spec->src.addr_bytes[j];
422                                                 m->src_addr[j] =
423                                                 eth_mask->src.addr_bytes[j];
424                                                 i = 1;
425                                         }
426                                         if (eth_mask->dst.addr_bytes[j]) {
427                                                 h->dst_addr[j] =
428                                                 eth_spec->dst.addr_bytes[j];
429                                                 m->dst_addr[j] =
430                                                 eth_mask->dst.addr_bytes[j];
431                                                 i = 1;
432                                         }
433                                 }
434                                 if (i)
435                                         t++;
436                                 if (eth_mask->type) {
437                                         list[t].type = ICE_ETYPE_OL;
438                                         list[t].h_u.ethertype.ethtype_id =
439                                                 eth_spec->type;
440                                         list[t].m_u.ethertype.ethtype_id =
441                                                 eth_mask->type;
442                                         t++;
443                                 }
444                         }
445                         break;
446
447                 case RTE_FLOW_ITEM_TYPE_IPV4:
448                         ipv4_spec = item->spec;
449                         ipv4_mask = item->mask;
450                         if (ipv4_spec && ipv4_mask) {
451                                 /* Check IPv4 mask and update input set */
452                                 if (ipv4_mask->hdr.version_ihl ||
453                                         ipv4_mask->hdr.total_length ||
454                                         ipv4_mask->hdr.packet_id ||
455                                         ipv4_mask->hdr.hdr_checksum) {
456                                         rte_flow_error_set(error, EINVAL,
457                                                    RTE_FLOW_ERROR_TYPE_ITEM,
458                                                    item,
459                                                    "Invalid IPv4 mask.");
460                                         return 0;
461                                 }
462
463                                 if (tunnel_valid) {
464                                         if (ipv4_mask->hdr.type_of_service)
465                                                 input_set |=
466                                                         ICE_INSET_TUN_IPV4_TOS;
467                                         if (ipv4_mask->hdr.src_addr)
468                                                 input_set |=
469                                                         ICE_INSET_TUN_IPV4_SRC;
470                                         if (ipv4_mask->hdr.dst_addr)
471                                                 input_set |=
472                                                         ICE_INSET_TUN_IPV4_DST;
473                                         if (ipv4_mask->hdr.time_to_live)
474                                                 input_set |=
475                                                         ICE_INSET_TUN_IPV4_TTL;
476                                         if (ipv4_mask->hdr.next_proto_id)
477                                                 input_set |=
478                                                 ICE_INSET_TUN_IPV4_PROTO;
479                                 } else {
480                                         if (ipv4_mask->hdr.src_addr)
481                                                 input_set |= ICE_INSET_IPV4_SRC;
482                                         if (ipv4_mask->hdr.dst_addr)
483                                                 input_set |= ICE_INSET_IPV4_DST;
484                                         if (ipv4_mask->hdr.time_to_live)
485                                                 input_set |= ICE_INSET_IPV4_TTL;
486                                         if (ipv4_mask->hdr.next_proto_id)
487                                                 input_set |=
488                                                 ICE_INSET_IPV4_PROTO;
489                                         if (ipv4_mask->hdr.type_of_service)
490                                                 input_set |=
491                                                         ICE_INSET_IPV4_TOS;
492                                 }
493                                 list[t].type = (tunnel_valid  == 0) ?
494                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
495                                 if (ipv4_mask->hdr.src_addr) {
496                                         list[t].h_u.ipv4_hdr.src_addr =
497                                                 ipv4_spec->hdr.src_addr;
498                                         list[t].m_u.ipv4_hdr.src_addr =
499                                                 ipv4_mask->hdr.src_addr;
500                                 }
501                                 if (ipv4_mask->hdr.dst_addr) {
502                                         list[t].h_u.ipv4_hdr.dst_addr =
503                                                 ipv4_spec->hdr.dst_addr;
504                                         list[t].m_u.ipv4_hdr.dst_addr =
505                                                 ipv4_mask->hdr.dst_addr;
506                                 }
507                                 if (ipv4_mask->hdr.time_to_live) {
508                                         list[t].h_u.ipv4_hdr.time_to_live =
509                                                 ipv4_spec->hdr.time_to_live;
510                                         list[t].m_u.ipv4_hdr.time_to_live =
511                                                 ipv4_mask->hdr.time_to_live;
512                                 }
513                                 if (ipv4_mask->hdr.next_proto_id) {
514                                         list[t].h_u.ipv4_hdr.protocol =
515                                                 ipv4_spec->hdr.next_proto_id;
516                                         list[t].m_u.ipv4_hdr.protocol =
517                                                 ipv4_mask->hdr.next_proto_id;
518                                 }
519                                 if (ipv4_mask->hdr.type_of_service) {
520                                         list[t].h_u.ipv4_hdr.tos =
521                                                 ipv4_spec->hdr.type_of_service;
522                                         list[t].m_u.ipv4_hdr.tos =
523                                                 ipv4_mask->hdr.type_of_service;
524                                 }
525                                 t++;
526                         }
527                         break;
528
529                 case RTE_FLOW_ITEM_TYPE_IPV6:
530                         ipv6_spec = item->spec;
531                         ipv6_mask = item->mask;
532                         ipv6_valiad = 1;
533                         if (ipv6_spec && ipv6_mask) {
534                                 if (ipv6_mask->hdr.payload_len) {
535                                         rte_flow_error_set(error, EINVAL,
536                                            RTE_FLOW_ERROR_TYPE_ITEM,
537                                            item,
538                                            "Invalid IPv6 mask");
539                                         return 0;
540                                 }
541
542                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
543                                         if (ipv6_mask->hdr.src_addr[j] &&
544                                                 tunnel_valid) {
545                                                 input_set |=
546                                                 ICE_INSET_TUN_IPV6_SRC;
547                                                 break;
548                                         } else if (ipv6_mask->hdr.src_addr[j]) {
549                                                 input_set |= ICE_INSET_IPV6_SRC;
550                                                 break;
551                                         }
552                                 }
553                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
554                                         if (ipv6_mask->hdr.dst_addr[j] &&
555                                                 tunnel_valid) {
556                                                 input_set |=
557                                                 ICE_INSET_TUN_IPV6_DST;
558                                                 break;
559                                         } else if (ipv6_mask->hdr.dst_addr[j]) {
560                                                 input_set |= ICE_INSET_IPV6_DST;
561                                                 break;
562                                         }
563                                 }
564                                 if (ipv6_mask->hdr.proto &&
565                                         tunnel_valid)
566                                         input_set |=
567                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
568                                 else if (ipv6_mask->hdr.proto)
569                                         input_set |=
570                                                 ICE_INSET_IPV6_NEXT_HDR;
571                                 if (ipv6_mask->hdr.hop_limits &&
572                                         tunnel_valid)
573                                         input_set |=
574                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
575                                 else if (ipv6_mask->hdr.hop_limits)
576                                         input_set |=
577                                                 ICE_INSET_IPV6_HOP_LIMIT;
578                                 if ((ipv6_mask->hdr.vtc_flow &
579                                                 rte_cpu_to_be_32
580                                                 (RTE_IPV6_HDR_TC_MASK)) &&
581                                         tunnel_valid)
582                                         input_set |=
583                                                         ICE_INSET_TUN_IPV6_TC;
584                                 else if (ipv6_mask->hdr.vtc_flow &
585                                                 rte_cpu_to_be_32
586                                                 (RTE_IPV6_HDR_TC_MASK))
587                                         input_set |= ICE_INSET_IPV6_TC;
588
589                                 list[t].type = (tunnel_valid  == 0) ?
590                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
591                                 struct ice_ipv6_hdr *f;
592                                 struct ice_ipv6_hdr *s;
593                                 f = &list[t].h_u.ipv6_hdr;
594                                 s = &list[t].m_u.ipv6_hdr;
595                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
596                                         if (ipv6_mask->hdr.src_addr[j]) {
597                                                 f->src_addr[j] =
598                                                 ipv6_spec->hdr.src_addr[j];
599                                                 s->src_addr[j] =
600                                                 ipv6_mask->hdr.src_addr[j];
601                                         }
602                                         if (ipv6_mask->hdr.dst_addr[j]) {
603                                                 f->dst_addr[j] =
604                                                 ipv6_spec->hdr.dst_addr[j];
605                                                 s->dst_addr[j] =
606                                                 ipv6_mask->hdr.dst_addr[j];
607                                         }
608                                 }
609                                 if (ipv6_mask->hdr.proto) {
610                                         f->next_hdr =
611                                                 ipv6_spec->hdr.proto;
612                                         s->next_hdr =
613                                                 ipv6_mask->hdr.proto;
614                                 }
615                                 if (ipv6_mask->hdr.hop_limits) {
616                                         f->hop_limit =
617                                                 ipv6_spec->hdr.hop_limits;
618                                         s->hop_limit =
619                                                 ipv6_mask->hdr.hop_limits;
620                                 }
621                                 if (ipv6_mask->hdr.vtc_flow &
622                                                 rte_cpu_to_be_32
623                                                 (RTE_IPV6_HDR_TC_MASK)) {
624                                         struct ice_le_ver_tc_flow vtf;
625                                         vtf.u.fld.version = 0;
626                                         vtf.u.fld.flow_label = 0;
627                                         vtf.u.fld.tc = (rte_be_to_cpu_32
628                                                 (ipv6_spec->hdr.vtc_flow) &
629                                                         RTE_IPV6_HDR_TC_MASK) >>
630                                                         RTE_IPV6_HDR_TC_SHIFT;
631                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
632                                         vtf.u.fld.tc = (rte_be_to_cpu_32
633                                                 (ipv6_mask->hdr.vtc_flow) &
634                                                         RTE_IPV6_HDR_TC_MASK) >>
635                                                         RTE_IPV6_HDR_TC_SHIFT;
636                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
637                                 }
638                                 t++;
639                         }
640                         break;
641
642                 case RTE_FLOW_ITEM_TYPE_UDP:
643                         udp_spec = item->spec;
644                         udp_mask = item->mask;
645                         if (udp_spec && udp_mask) {
646                                 /* Check UDP mask and update input set*/
647                                 if (udp_mask->hdr.dgram_len ||
648                                     udp_mask->hdr.dgram_cksum) {
649                                         rte_flow_error_set(error, EINVAL,
650                                                    RTE_FLOW_ERROR_TYPE_ITEM,
651                                                    item,
652                                                    "Invalid UDP mask");
653                                         return 0;
654                                 }
655
656                                 if (tunnel_valid) {
657                                         if (udp_mask->hdr.src_port)
658                                                 input_set |=
659                                                 ICE_INSET_TUN_UDP_SRC_PORT;
660                                         if (udp_mask->hdr.dst_port)
661                                                 input_set |=
662                                                 ICE_INSET_TUN_UDP_DST_PORT;
663                                 } else {
664                                         if (udp_mask->hdr.src_port)
665                                                 input_set |=
666                                                 ICE_INSET_UDP_SRC_PORT;
667                                         if (udp_mask->hdr.dst_port)
668                                                 input_set |=
669                                                 ICE_INSET_UDP_DST_PORT;
670                                 }
671                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
672                                                 tunnel_valid == 0)
673                                         list[t].type = ICE_UDP_OF;
674                                 else
675                                         list[t].type = ICE_UDP_ILOS;
676                                 if (udp_mask->hdr.src_port) {
677                                         list[t].h_u.l4_hdr.src_port =
678                                                 udp_spec->hdr.src_port;
679                                         list[t].m_u.l4_hdr.src_port =
680                                                 udp_mask->hdr.src_port;
681                                 }
682                                 if (udp_mask->hdr.dst_port) {
683                                         list[t].h_u.l4_hdr.dst_port =
684                                                 udp_spec->hdr.dst_port;
685                                         list[t].m_u.l4_hdr.dst_port =
686                                                 udp_mask->hdr.dst_port;
687                                 }
688                                                 t++;
689                         }
690                         break;
691
692                 case RTE_FLOW_ITEM_TYPE_TCP:
693                         tcp_spec = item->spec;
694                         tcp_mask = item->mask;
695                         if (tcp_spec && tcp_mask) {
696                                 /* Check TCP mask and update input set */
697                                 if (tcp_mask->hdr.sent_seq ||
698                                         tcp_mask->hdr.recv_ack ||
699                                         tcp_mask->hdr.data_off ||
700                                         tcp_mask->hdr.tcp_flags ||
701                                         tcp_mask->hdr.rx_win ||
702                                         tcp_mask->hdr.cksum ||
703                                         tcp_mask->hdr.tcp_urp) {
704                                         rte_flow_error_set(error, EINVAL,
705                                            RTE_FLOW_ERROR_TYPE_ITEM,
706                                            item,
707                                            "Invalid TCP mask");
708                                         return 0;
709                                 }
710
711                                 if (tunnel_valid) {
712                                         if (tcp_mask->hdr.src_port)
713                                                 input_set |=
714                                                 ICE_INSET_TUN_TCP_SRC_PORT;
715                                         if (tcp_mask->hdr.dst_port)
716                                                 input_set |=
717                                                 ICE_INSET_TUN_TCP_DST_PORT;
718                                 } else {
719                                         if (tcp_mask->hdr.src_port)
720                                                 input_set |=
721                                                 ICE_INSET_TCP_SRC_PORT;
722                                         if (tcp_mask->hdr.dst_port)
723                                                 input_set |=
724                                                 ICE_INSET_TCP_DST_PORT;
725                                 }
726                                 list[t].type = ICE_TCP_IL;
727                                 if (tcp_mask->hdr.src_port) {
728                                         list[t].h_u.l4_hdr.src_port =
729                                                 tcp_spec->hdr.src_port;
730                                         list[t].m_u.l4_hdr.src_port =
731                                                 tcp_mask->hdr.src_port;
732                                 }
733                                 if (tcp_mask->hdr.dst_port) {
734                                         list[t].h_u.l4_hdr.dst_port =
735                                                 tcp_spec->hdr.dst_port;
736                                         list[t].m_u.l4_hdr.dst_port =
737                                                 tcp_mask->hdr.dst_port;
738                                 }
739                                 t++;
740                         }
741                         break;
742
743                 case RTE_FLOW_ITEM_TYPE_SCTP:
744                         sctp_spec = item->spec;
745                         sctp_mask = item->mask;
746                         if (sctp_spec && sctp_mask) {
747                                 /* Check SCTP mask and update input set */
748                                 if (sctp_mask->hdr.cksum) {
749                                         rte_flow_error_set(error, EINVAL,
750                                            RTE_FLOW_ERROR_TYPE_ITEM,
751                                            item,
752                                            "Invalid SCTP mask");
753                                         return 0;
754                                 }
755
756                                 if (tunnel_valid) {
757                                         if (sctp_mask->hdr.src_port)
758                                                 input_set |=
759                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
760                                         if (sctp_mask->hdr.dst_port)
761                                                 input_set |=
762                                                 ICE_INSET_TUN_SCTP_DST_PORT;
763                                 } else {
764                                         if (sctp_mask->hdr.src_port)
765                                                 input_set |=
766                                                 ICE_INSET_SCTP_SRC_PORT;
767                                         if (sctp_mask->hdr.dst_port)
768                                                 input_set |=
769                                                 ICE_INSET_SCTP_DST_PORT;
770                                 }
771                                 list[t].type = ICE_SCTP_IL;
772                                 if (sctp_mask->hdr.src_port) {
773                                         list[t].h_u.sctp_hdr.src_port =
774                                                 sctp_spec->hdr.src_port;
775                                         list[t].m_u.sctp_hdr.src_port =
776                                                 sctp_mask->hdr.src_port;
777                                 }
778                                 if (sctp_mask->hdr.dst_port) {
779                                         list[t].h_u.sctp_hdr.dst_port =
780                                                 sctp_spec->hdr.dst_port;
781                                         list[t].m_u.sctp_hdr.dst_port =
782                                                 sctp_mask->hdr.dst_port;
783                                 }
784                                 t++;
785                         }
786                         break;
787
788                 case RTE_FLOW_ITEM_TYPE_VXLAN:
789                         vxlan_spec = item->spec;
790                         vxlan_mask = item->mask;
791                         /* Check if VXLAN item is used to describe protocol.
792                          * If yes, both spec and mask should be NULL.
793                          * If no, both spec and mask shouldn't be NULL.
794                          */
795                         if ((!vxlan_spec && vxlan_mask) ||
796                             (vxlan_spec && !vxlan_mask)) {
797                                 rte_flow_error_set(error, EINVAL,
798                                            RTE_FLOW_ERROR_TYPE_ITEM,
799                                            item,
800                                            "Invalid VXLAN item");
801                                 return 0;
802                         }
803
804                         tunnel_valid = 1;
805                         if (vxlan_spec && vxlan_mask) {
806                                 list[t].type = ICE_VXLAN;
807                                 if (vxlan_mask->vni[0] ||
808                                         vxlan_mask->vni[1] ||
809                                         vxlan_mask->vni[2]) {
810                                         list[t].h_u.tnl_hdr.vni =
811                                                 (vxlan_spec->vni[2] << 16) |
812                                                 (vxlan_spec->vni[1] << 8) |
813                                                 vxlan_spec->vni[0];
814                                         list[t].m_u.tnl_hdr.vni =
815                                                 (vxlan_mask->vni[2] << 16) |
816                                                 (vxlan_mask->vni[1] << 8) |
817                                                 vxlan_mask->vni[0];
818                                         input_set |=
819                                                 ICE_INSET_TUN_VXLAN_VNI;
820                                 }
821                                 t++;
822                         }
823                         break;
824
825                 case RTE_FLOW_ITEM_TYPE_NVGRE:
826                         nvgre_spec = item->spec;
827                         nvgre_mask = item->mask;
828                         /* Check if NVGRE item is used to describe protocol.
829                          * If yes, both spec and mask should be NULL.
830                          * If no, both spec and mask shouldn't be NULL.
831                          */
832                         if ((!nvgre_spec && nvgre_mask) ||
833                             (nvgre_spec && !nvgre_mask)) {
834                                 rte_flow_error_set(error, EINVAL,
835                                            RTE_FLOW_ERROR_TYPE_ITEM,
836                                            item,
837                                            "Invalid NVGRE item");
838                                 return 0;
839                         }
840                         tunnel_valid = 1;
841                         if (nvgre_spec && nvgre_mask) {
842                                 list[t].type = ICE_NVGRE;
843                                 if (nvgre_mask->tni[0] ||
844                                         nvgre_mask->tni[1] ||
845                                         nvgre_mask->tni[2]) {
846                                         list[t].h_u.nvgre_hdr.tni_flow =
847                                                 (nvgre_spec->tni[2] << 16) |
848                                                 (nvgre_spec->tni[1] << 8) |
849                                                 nvgre_spec->tni[0];
850                                         list[t].m_u.nvgre_hdr.tni_flow =
851                                                 (nvgre_mask->tni[2] << 16) |
852                                                 (nvgre_mask->tni[1] << 8) |
853                                                 nvgre_mask->tni[0];
854                                         input_set |=
855                                                 ICE_INSET_TUN_NVGRE_TNI;
856                                 }
857                                 t++;
858                         }
859                         break;
860
861                 case RTE_FLOW_ITEM_TYPE_VLAN:
862                         vlan_spec = item->spec;
863                         vlan_mask = item->mask;
864                         /* Check if VLAN item is used to describe protocol.
865                          * If yes, both spec and mask should be NULL.
866                          * If no, both spec and mask shouldn't be NULL.
867                          */
868                         if ((!vlan_spec && vlan_mask) ||
869                             (vlan_spec && !vlan_mask)) {
870                                 rte_flow_error_set(error, EINVAL,
871                                            RTE_FLOW_ERROR_TYPE_ITEM,
872                                            item,
873                                            "Invalid VLAN item");
874                                 return 0;
875                         }
876                         if (vlan_spec && vlan_mask) {
877                                 list[t].type = ICE_VLAN_OFOS;
878                                 if (vlan_mask->tci) {
879                                         list[t].h_u.vlan_hdr.vlan =
880                                                 vlan_spec->tci;
881                                         list[t].m_u.vlan_hdr.vlan =
882                                                 vlan_mask->tci;
883                                         input_set |= ICE_INSET_VLAN_OUTER;
884                                 }
885                                 if (vlan_mask->inner_type) {
886                                         list[t].h_u.vlan_hdr.type =
887                                                 vlan_spec->inner_type;
888                                         list[t].m_u.vlan_hdr.type =
889                                                 vlan_mask->inner_type;
890                                         input_set |= ICE_INSET_VLAN_OUTER;
891                                 }
892                                 t++;
893                         }
894                         break;
895
896                 case RTE_FLOW_ITEM_TYPE_PPPOED:
897                 case RTE_FLOW_ITEM_TYPE_PPPOES:
898                         pppoe_spec = item->spec;
899                         pppoe_mask = item->mask;
900                         /* Check if PPPoE item is used to describe protocol.
901                          * If yes, both spec and mask should be NULL.
902                          * If no, both spec and mask shouldn't be NULL.
903                          */
904                         if ((!pppoe_spec && pppoe_mask) ||
905                                 (pppoe_spec && !pppoe_mask)) {
906                                 rte_flow_error_set(error, EINVAL,
907                                         RTE_FLOW_ERROR_TYPE_ITEM,
908                                         item,
909                                         "Invalid pppoe item");
910                                 return 0;
911                         }
912                         if (pppoe_spec && pppoe_mask) {
913                                 /* Check pppoe mask and update input set */
914                                 if (pppoe_mask->length ||
915                                         pppoe_mask->code ||
916                                         pppoe_mask->version_type) {
917                                         rte_flow_error_set(error, EINVAL,
918                                                 RTE_FLOW_ERROR_TYPE_ITEM,
919                                                 item,
920                                                 "Invalid pppoe mask");
921                                         return 0;
922                                 }
923                                 list[t].type = ICE_PPPOE;
924                                 if (pppoe_mask->session_id) {
925                                         list[t].h_u.pppoe_hdr.session_id =
926                                                 pppoe_spec->session_id;
927                                         list[t].m_u.pppoe_hdr.session_id =
928                                                 pppoe_mask->session_id;
929                                         input_set |= ICE_INSET_PPPOE_SESSION;
930                                 }
931                                 t++;
932                                 pppoe_valid = 1;
933                         }
934                         break;
935
936                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
937                         pppoe_proto_spec = item->spec;
938                         pppoe_proto_mask = item->mask;
939                         /* Check if PPPoE optional proto_id item
940                          * is used to describe protocol.
941                          * If yes, both spec and mask should be NULL.
942                          * If no, both spec and mask shouldn't be NULL.
943                          */
944                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
945                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
946                                 rte_flow_error_set(error, EINVAL,
947                                         RTE_FLOW_ERROR_TYPE_ITEM,
948                                         item,
949                                         "Invalid pppoe proto item");
950                                 return 0;
951                         }
952                         if (pppoe_proto_spec && pppoe_proto_mask) {
953                                 if (pppoe_valid)
954                                         t--;
955                                 list[t].type = ICE_PPPOE;
956                                 if (pppoe_proto_mask->proto_id) {
957                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
958                                                 pppoe_proto_spec->proto_id;
959                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
960                                                 pppoe_proto_mask->proto_id;
961                                         input_set |= ICE_INSET_PPPOE_PROTO;
962                                 }
963                                 t++;
964                         }
965                         break;
966
967                 case RTE_FLOW_ITEM_TYPE_ESP:
968                         esp_spec = item->spec;
969                         esp_mask = item->mask;
970                         if (esp_spec || esp_mask) {
971                                 rte_flow_error_set(error, EINVAL,
972                                            RTE_FLOW_ERROR_TYPE_ITEM,
973                                            item,
974                                            "Invalid esp item");
975                                 return -ENOTSUP;
976                         }
977                         if (ipv6_valiad)
978                                 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
979                         break;
980
981                 case RTE_FLOW_ITEM_TYPE_AH:
982                         ah_spec = item->spec;
983                         ah_mask = item->mask;
984                         if (ah_spec || ah_mask) {
985                                 rte_flow_error_set(error, EINVAL,
986                                            RTE_FLOW_ERROR_TYPE_ITEM,
987                                            item,
988                                            "Invalid ah item");
989                                 return -ENOTSUP;
990                         }
991                         if (ipv6_valiad)
992                                 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
993                         break;
994
995                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
996                         l2tp_spec = item->spec;
997                         l2tp_mask = item->mask;
998                         if (l2tp_spec || l2tp_mask) {
999                                 rte_flow_error_set(error, EINVAL,
1000                                            RTE_FLOW_ERROR_TYPE_ITEM,
1001                                            item,
1002                                            "Invalid l2tp item");
1003                                 return -ENOTSUP;
1004                         }
1005                         if (ipv6_valiad)
1006                                 *tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1007                         break;
1008                 case RTE_FLOW_ITEM_TYPE_PFCP:
1009                         pfcp_spec = item->spec;
1010                         pfcp_mask = item->mask;
1011                         /* Check if PFCP item is used to describe protocol.
1012                          * If yes, both spec and mask should be NULL.
1013                          * If no, both spec and mask shouldn't be NULL.
1014                          */
1015                         if ((!pfcp_spec && pfcp_mask) ||
1016                             (pfcp_spec && !pfcp_mask)) {
1017                                 rte_flow_error_set(error, EINVAL,
1018                                            RTE_FLOW_ERROR_TYPE_ITEM,
1019                                            item,
1020                                            "Invalid PFCP item");
1021                                 return -ENOTSUP;
1022                         }
1023                         if (pfcp_spec && pfcp_mask) {
1024                                 /* Check pfcp mask and update input set */
1025                                 if (pfcp_mask->msg_type ||
1026                                         pfcp_mask->msg_len ||
1027                                         pfcp_mask->seid) {
1028                                         rte_flow_error_set(error, EINVAL,
1029                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1030                                                 item,
1031                                                 "Invalid pfcp mask");
1032                                         return -ENOTSUP;
1033                                 }
1034                                 if (pfcp_mask->s_field &&
1035                                         pfcp_spec->s_field == 0x01 &&
1036                                         ipv6_valiad)
1037                                         *tun_type =
1038                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1039                                 else if (pfcp_mask->s_field &&
1040                                         pfcp_spec->s_field == 0x01)
1041                                         *tun_type =
1042                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1043                                 else if (pfcp_mask->s_field &&
1044                                         !pfcp_spec->s_field &&
1045                                         ipv6_valiad)
1046                                         *tun_type =
1047                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1048                                 else if (pfcp_mask->s_field &&
1049                                         !pfcp_spec->s_field)
1050                                         *tun_type =
1051                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1052                                 else
1053                                         return -ENOTSUP;
1054                         }
1055                         break;
1056
1057
1058                 case RTE_FLOW_ITEM_TYPE_VOID:
1059                         break;
1060
1061                 default:
1062                         rte_flow_error_set(error, EINVAL,
1063                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1064                                    "Invalid pattern item.");
1065                         goto out;
1066                 }
1067         }
1068
1069         *lkups_num = t;
1070
1071         return input_set;
1072 out:
1073         return 0;
1074 }
1075
1076 static int
1077 ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
1078                             struct rte_flow_error *error,
1079                             struct ice_adv_rule_info *rule_info)
1080 {
1081         const struct rte_flow_action_vf *act_vf;
1082         const struct rte_flow_action *action;
1083         enum rte_flow_action_type action_type;
1084
1085         for (action = actions; action->type !=
1086                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1087                 action_type = action->type;
1088                 switch (action_type) {
1089                 case RTE_FLOW_ACTION_TYPE_VF:
1090                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1091                         act_vf = action->conf;
1092                         rule_info->sw_act.vsi_handle = act_vf->id;
1093                         break;
1094                 default:
1095                         rte_flow_error_set(error,
1096                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1097                                            actions,
1098                                            "Invalid action type or queue number");
1099                         return -rte_errno;
1100                 }
1101         }
1102
1103         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1104         rule_info->rx = 1;
1105         rule_info->priority = 5;
1106
1107         return 0;
1108 }
1109
1110 static int
1111 ice_switch_parse_action(struct ice_pf *pf,
1112                 const struct rte_flow_action *actions,
1113                 struct rte_flow_error *error,
1114                 struct ice_adv_rule_info *rule_info)
1115 {
1116         struct ice_vsi *vsi = pf->main_vsi;
1117         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1118         const struct rte_flow_action_queue *act_q;
1119         const struct rte_flow_action_rss *act_qgrop;
1120         uint16_t base_queue, i;
1121         const struct rte_flow_action *action;
1122         enum rte_flow_action_type action_type;
1123         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1124                  2, 4, 8, 16, 32, 64, 128};
1125
1126         base_queue = pf->base_queue + vsi->base_queue;
1127         for (action = actions; action->type !=
1128                         RTE_FLOW_ACTION_TYPE_END; action++) {
1129                 action_type = action->type;
1130                 switch (action_type) {
1131                 case RTE_FLOW_ACTION_TYPE_RSS:
1132                         act_qgrop = action->conf;
1133                         rule_info->sw_act.fltr_act =
1134                                 ICE_FWD_TO_QGRP;
1135                         rule_info->sw_act.fwd_id.q_id =
1136                                 base_queue + act_qgrop->queue[0];
1137                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1138                                 if (act_qgrop->queue_num ==
1139                                         valid_qgrop_number[i])
1140                                         break;
1141                         }
1142                         if (i == MAX_QGRP_NUM_TYPE)
1143                                 goto error;
1144                         if ((act_qgrop->queue[0] +
1145                                 act_qgrop->queue_num) >
1146                                 dev->data->nb_rx_queues)
1147                                 goto error;
1148                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1149                                 if (act_qgrop->queue[i + 1] !=
1150                                         act_qgrop->queue[i] + 1)
1151                                         goto error;
1152                         rule_info->sw_act.qgrp_size =
1153                                 act_qgrop->queue_num;
1154                         break;
1155                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1156                         act_q = action->conf;
1157                         if (act_q->index >= dev->data->nb_rx_queues)
1158                                 goto error;
1159                         rule_info->sw_act.fltr_act =
1160                                 ICE_FWD_TO_Q;
1161                         rule_info->sw_act.fwd_id.q_id =
1162                                 base_queue + act_q->index;
1163                         break;
1164
1165                 case RTE_FLOW_ACTION_TYPE_DROP:
1166                         rule_info->sw_act.fltr_act =
1167                                 ICE_DROP_PACKET;
1168                         break;
1169
1170                 case RTE_FLOW_ACTION_TYPE_VOID:
1171                         break;
1172
1173                 default:
1174                         goto error;
1175                 }
1176         }
1177
1178         rule_info->sw_act.vsi_handle = vsi->idx;
1179         rule_info->rx = 1;
1180         rule_info->sw_act.src = vsi->idx;
1181         rule_info->priority = 5;
1182
1183         return 0;
1184
1185 error:
1186         rte_flow_error_set(error,
1187                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1188                 actions,
1189                 "Invalid action type or queue number");
1190         return -rte_errno;
1191 }
1192
1193 static int
1194 ice_switch_check_action(const struct rte_flow_action *actions,
1195                             struct rte_flow_error *error)
1196 {
1197         const struct rte_flow_action *action;
1198         enum rte_flow_action_type action_type;
1199         uint16_t actions_num = 0;
1200
1201         for (action = actions; action->type !=
1202                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1203                 action_type = action->type;
1204                 switch (action_type) {
1205                 case RTE_FLOW_ACTION_TYPE_VF:
1206                 case RTE_FLOW_ACTION_TYPE_RSS:
1207                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1208                 case RTE_FLOW_ACTION_TYPE_DROP:
1209                         actions_num++;
1210                         break;
1211                 case RTE_FLOW_ACTION_TYPE_VOID:
1212                         continue;
1213                 default:
1214                         rte_flow_error_set(error,
1215                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1216                                            actions,
1217                                            "Invalid action type");
1218                         return -rte_errno;
1219                 }
1220         }
1221
1222         if (actions_num > 1) {
1223                 rte_flow_error_set(error,
1224                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1225                                    actions,
1226                                    "Invalid action number");
1227                 return -rte_errno;
1228         }
1229
1230         return 0;
1231 }
1232
1233 static bool
1234 ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
1235 {
1236         switch (tun_type) {
1237         case ICE_SW_TUN_PROFID_IPV6_ESP:
1238         case ICE_SW_TUN_PROFID_IPV6_AH:
1239         case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
1240         case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
1241         case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
1242         case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
1243         case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
1244                 return true;
1245         default:
1246                 break;
1247         }
1248
1249         return false;
1250 }
1251
1252 static int
1253 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1254                 struct ice_pattern_match_item *array,
1255                 uint32_t array_len,
1256                 const struct rte_flow_item pattern[],
1257                 const struct rte_flow_action actions[],
1258                 void **meta,
1259                 struct rte_flow_error *error)
1260 {
1261         struct ice_pf *pf = &ad->pf;
1262         uint64_t inputset = 0;
1263         int ret = 0;
1264         struct sw_meta *sw_meta_ptr = NULL;
1265         struct ice_adv_rule_info rule_info;
1266         struct ice_adv_lkup_elem *list = NULL;
1267         uint16_t lkups_num = 0;
1268         const struct rte_flow_item *item = pattern;
1269         uint16_t item_num = 0;
1270         enum ice_sw_tunnel_type tun_type =
1271                 ICE_SW_TUN_AND_NON_TUN;
1272         struct ice_pattern_match_item *pattern_match_item = NULL;
1273
1274         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1275                 item_num++;
1276                 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1277                         tun_type = ICE_SW_TUN_VXLAN;
1278                 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
1279                         tun_type = ICE_SW_TUN_NVGRE;
1280                 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED ||
1281                                 item->type == RTE_FLOW_ITEM_TYPE_PPPOES)
1282                         tun_type = ICE_SW_TUN_PPPOE;
1283                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1284                         const struct rte_flow_item_eth *eth_mask;
1285                         if (item->mask)
1286                                 eth_mask = item->mask;
1287                         else
1288                                 continue;
1289                         if (eth_mask->type == UINT16_MAX)
1290                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1291                 }
1292                 /* reserve one more memory slot for ETH which may
1293                  * consume 2 lookup items.
1294                  */
1295                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1296                         item_num++;
1297         }
1298
1299         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1300         if (!list) {
1301                 rte_flow_error_set(error, EINVAL,
1302                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1303                                    "No memory for PMD internal items");
1304                 return -rte_errno;
1305         }
1306
1307         sw_meta_ptr =
1308                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1309         if (!sw_meta_ptr) {
1310                 rte_flow_error_set(error, EINVAL,
1311                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1312                                    "No memory for sw_pattern_meta_ptr");
1313                 goto error;
1314         }
1315
1316         pattern_match_item =
1317                 ice_search_pattern_match_item(pattern, array, array_len, error);
1318         if (!pattern_match_item) {
1319                 rte_flow_error_set(error, EINVAL,
1320                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1321                                    "Invalid input pattern");
1322                 goto error;
1323         }
1324
1325         inputset = ice_switch_inset_get
1326                 (pattern, error, list, &lkups_num, &tun_type);
1327         if ((!inputset && !ice_is_profile_rule(tun_type)) ||
1328                 (inputset & ~pattern_match_item->input_set_mask)) {
1329                 rte_flow_error_set(error, EINVAL,
1330                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1331                                    pattern,
1332                                    "Invalid input set");
1333                 goto error;
1334         }
1335
1336         rule_info.tun_type = tun_type;
1337
1338         ret = ice_switch_check_action(actions, error);
1339         if (ret) {
1340                 rte_flow_error_set(error, EINVAL,
1341                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1342                                    "Invalid input action number");
1343                 goto error;
1344         }
1345
1346         if (ad->hw.dcf_enabled)
1347                 ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
1348         else
1349                 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1350
1351         if (ret) {
1352                 rte_flow_error_set(error, EINVAL,
1353                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1354                                    "Invalid input action");
1355                 goto error;
1356         }
1357
1358         if (meta) {
1359                 *meta = sw_meta_ptr;
1360                 ((struct sw_meta *)*meta)->list = list;
1361                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1362                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1363         } else {
1364                 rte_free(list);
1365                 rte_free(sw_meta_ptr);
1366         }
1367
1368         rte_free(pattern_match_item);
1369
1370         return 0;
1371
1372 error:
1373         rte_free(list);
1374         rte_free(sw_meta_ptr);
1375         rte_free(pattern_match_item);
1376
1377         return -rte_errno;
1378 }
1379
1380 static int
1381 ice_switch_query(struct ice_adapter *ad __rte_unused,
1382                 struct rte_flow *flow __rte_unused,
1383                 struct rte_flow_query_count *count __rte_unused,
1384                 struct rte_flow_error *error)
1385 {
1386         rte_flow_error_set(error, EINVAL,
1387                 RTE_FLOW_ERROR_TYPE_HANDLE,
1388                 NULL,
1389                 "count action not supported by switch filter");
1390
1391         return -rte_errno;
1392 }
1393
1394 static int
1395 ice_switch_init(struct ice_adapter *ad)
1396 {
1397         int ret = 0;
1398         struct ice_flow_parser *dist_parser;
1399         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1400
1401         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1402                 dist_parser = &ice_switch_dist_parser_comms;
1403         else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1404                 dist_parser = &ice_switch_dist_parser_os;
1405         else
1406                 return -EINVAL;
1407
1408         if (ad->devargs.pipe_mode_support)
1409                 ret = ice_register_parser(perm_parser, ad);
1410         else
1411                 ret = ice_register_parser(dist_parser, ad);
1412         return ret;
1413 }
1414
1415 static void
1416 ice_switch_uninit(struct ice_adapter *ad)
1417 {
1418         struct ice_flow_parser *dist_parser;
1419         struct ice_flow_parser *perm_parser = &ice_switch_perm_parser;
1420
1421         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1422                 dist_parser = &ice_switch_dist_parser_comms;
1423         else
1424                 dist_parser = &ice_switch_dist_parser_os;
1425
1426         if (ad->devargs.pipe_mode_support)
1427                 ice_unregister_parser(perm_parser, ad);
1428         else
1429                 ice_unregister_parser(dist_parser, ad);
1430 }
1431
1432 static struct
1433 ice_flow_engine ice_switch_engine = {
1434         .init = ice_switch_init,
1435         .uninit = ice_switch_uninit,
1436         .create = ice_switch_create,
1437         .destroy = ice_switch_destroy,
1438         .query_count = ice_switch_query,
1439         .free = ice_switch_filter_rule_free,
1440         .type = ICE_FLOW_ENGINE_SWITCH,
1441 };
1442
1443 static struct
1444 ice_flow_parser ice_switch_dist_parser_os = {
1445         .engine = &ice_switch_engine,
1446         .array = ice_switch_pattern_dist_os,
1447         .array_len = RTE_DIM(ice_switch_pattern_dist_os),
1448         .parse_pattern_action = ice_switch_parse_pattern_action,
1449         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1450 };
1451
1452 static struct
1453 ice_flow_parser ice_switch_dist_parser_comms = {
1454         .engine = &ice_switch_engine,
1455         .array = ice_switch_pattern_dist_comms,
1456         .array_len = RTE_DIM(ice_switch_pattern_dist_comms),
1457         .parse_pattern_action = ice_switch_parse_pattern_action,
1458         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1459 };
1460
1461 static struct
1462 ice_flow_parser ice_switch_perm_parser = {
1463         .engine = &ice_switch_engine,
1464         .array = ice_switch_pattern_perm,
1465         .array_len = RTE_DIM(ice_switch_pattern_perm),
1466         .parse_pattern_action = ice_switch_parse_pattern_action,
1467         .stage = ICE_FLOW_STAGE_PERMISSION,
1468 };
1469
1470 RTE_INIT(ice_sw_engine_init)
1471 {
1472         struct ice_flow_engine *engine = &ice_switch_engine;
1473         ice_register_flow_engine(engine);
1474 }