net/ice: update QinQ switch filter handling
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27
28
29 #define MAX_QGRP_NUM_TYPE       7
30 #define MAX_INPUT_SET_BYTE      32
31 #define ICE_PPP_IPV4_PROTO      0x0021
32 #define ICE_PPP_IPV6_PROTO      0x0057
33 #define ICE_IPV4_PROTO_NVGRE    0x002F
34
35 #define ICE_SW_INSET_ETHER ( \
36         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
39         ICE_INSET_VLAN_INNER)
40 #define ICE_SW_INSET_MAC_QINQ  ( \
41         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
42         ICE_INSET_VLAN_OUTER)
43 #define ICE_SW_INSET_MAC_IPV4 ( \
44         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
46 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
47         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
48 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
49         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
50         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
51         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
52 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
53         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
54         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
55         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
56 #define ICE_SW_INSET_MAC_IPV6 ( \
57         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
58         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
59         ICE_INSET_IPV6_NEXT_HDR)
60 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
61         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
62 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
63         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
64         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
65         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
66 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
67         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
68         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
69         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
70 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
71         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
72         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
74         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
77         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
79         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
81         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
83         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
84 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
85         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
87         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
88 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
89         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
91         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
92 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
93         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
94         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
95 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
96         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
97         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
98         ICE_INSET_TUN_IPV4_TOS)
99 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
100         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
101         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
102         ICE_INSET_TUN_IPV4_TOS)
103 #define ICE_SW_INSET_MAC_PPPOE  ( \
104         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
105         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
106 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
107         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
108         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
109         ICE_INSET_PPPOE_PROTO)
110 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
111         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
112 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
113         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
114 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
115         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
116 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
117         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
118 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
119         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
120 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
121         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
122 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
123         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
124 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
125         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
126 #define ICE_SW_INSET_MAC_IPV4_AH ( \
127         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
128 #define ICE_SW_INSET_MAC_IPV6_AH ( \
129         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
130 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
131         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
132 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
133         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
134 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
135         ICE_SW_INSET_MAC_IPV4 | \
136         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
137 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
138         ICE_SW_INSET_MAC_IPV6 | \
139         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
140
141 struct sw_meta {
142         struct ice_adv_lkup_elem *list;
143         uint16_t lkups_num;
144         struct ice_adv_rule_info rule_info;
145 };
146
147 static struct ice_flow_parser ice_switch_dist_parser;
148 static struct ice_flow_parser ice_switch_perm_parser;
149
150 static struct
151 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
152         {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE, ICE_INSET_NONE},
153         {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE, ICE_INSET_NONE},
154         {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE, ICE_INSET_NONE},
155         {pattern_eth_arp,                               ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
156         {pattern_eth_ipv4,                              ICE_SW_INSET_MAC_IPV4,                  ICE_INSET_NONE, ICE_INSET_NONE},
157         {pattern_eth_ipv4_udp,                          ICE_SW_INSET_MAC_IPV4_UDP,              ICE_INSET_NONE, ICE_INSET_NONE},
158         {pattern_eth_ipv4_tcp,                          ICE_SW_INSET_MAC_IPV4_TCP,              ICE_INSET_NONE, ICE_INSET_NONE},
159         {pattern_eth_ipv6,                              ICE_SW_INSET_MAC_IPV6,                  ICE_INSET_NONE, ICE_INSET_NONE},
160         {pattern_eth_ipv6_udp,                          ICE_SW_INSET_MAC_IPV6_UDP,              ICE_INSET_NONE, ICE_INSET_NONE},
161         {pattern_eth_ipv6_tcp,                          ICE_SW_INSET_MAC_IPV6_TCP,              ICE_INSET_NONE, ICE_INSET_NONE},
162         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_SW_INSET_DIST_VXLAN_IPV4,           ICE_INSET_NONE, ICE_INSET_NONE},
163         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_SW_INSET_DIST_VXLAN_IPV4_UDP,       ICE_INSET_NONE, ICE_INSET_NONE},
164         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_SW_INSET_DIST_VXLAN_IPV4_TCP,       ICE_INSET_NONE, ICE_INSET_NONE},
165         {pattern_eth_ipv4_nvgre_eth_ipv4,               ICE_SW_INSET_DIST_NVGRE_IPV4,           ICE_INSET_NONE, ICE_INSET_NONE},
166         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,           ICE_SW_INSET_DIST_NVGRE_IPV4_UDP,       ICE_INSET_NONE, ICE_INSET_NONE},
167         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,           ICE_SW_INSET_DIST_NVGRE_IPV4_TCP,       ICE_INSET_NONE, ICE_INSET_NONE},
168         {pattern_eth_pppoes,                            ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
169         {pattern_eth_vlan_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
170         {pattern_eth_pppoes_proto,                      ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
171         {pattern_eth_vlan_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
172         {pattern_eth_pppoes_ipv4,                       ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
173         {pattern_eth_pppoes_ipv4_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
174         {pattern_eth_pppoes_ipv4_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
175         {pattern_eth_pppoes_ipv6,                       ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
176         {pattern_eth_pppoes_ipv6_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
177         {pattern_eth_pppoes_ipv6_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
178         {pattern_eth_vlan_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
179         {pattern_eth_vlan_pppoes_ipv4_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
180         {pattern_eth_vlan_pppoes_ipv4_udp,              ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
181         {pattern_eth_vlan_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
182         {pattern_eth_vlan_pppoes_ipv6_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
183         {pattern_eth_vlan_pppoes_ipv6_udp,              ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
184         {pattern_eth_ipv4_esp,                          ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
185         {pattern_eth_ipv4_udp_esp,                      ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
186         {pattern_eth_ipv6_esp,                          ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
187         {pattern_eth_ipv6_udp_esp,                      ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
188         {pattern_eth_ipv4_ah,                           ICE_SW_INSET_MAC_IPV4_AH,               ICE_INSET_NONE, ICE_INSET_NONE},
189         {pattern_eth_ipv6_ah,                           ICE_SW_INSET_MAC_IPV6_AH,               ICE_INSET_NONE, ICE_INSET_NONE},
190         {pattern_eth_ipv6_udp_ah,                       ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
191         {pattern_eth_ipv4_l2tp,                         ICE_SW_INSET_MAC_IPV4_L2TP,             ICE_INSET_NONE, ICE_INSET_NONE},
192         {pattern_eth_ipv6_l2tp,                         ICE_SW_INSET_MAC_IPV6_L2TP,             ICE_INSET_NONE, ICE_INSET_NONE},
193         {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
194         {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
195         {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE, ICE_INSET_NONE},
196         {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE, ICE_INSET_NONE},
197         {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
198         {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
199         {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
200         {pattern_eth_qinq_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
201 };
202
203 static struct
204 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
205         {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE, ICE_INSET_NONE},
206         {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE, ICE_INSET_NONE},
207         {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE, ICE_INSET_NONE},
208         {pattern_eth_arp,                               ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
209         {pattern_eth_ipv4,                              ICE_SW_INSET_MAC_IPV4,                  ICE_INSET_NONE, ICE_INSET_NONE},
210         {pattern_eth_ipv4_udp,                          ICE_SW_INSET_MAC_IPV4_UDP,              ICE_INSET_NONE, ICE_INSET_NONE},
211         {pattern_eth_ipv4_tcp,                          ICE_SW_INSET_MAC_IPV4_TCP,              ICE_INSET_NONE, ICE_INSET_NONE},
212         {pattern_eth_ipv6,                              ICE_SW_INSET_MAC_IPV6,                  ICE_INSET_NONE, ICE_INSET_NONE},
213         {pattern_eth_ipv6_udp,                          ICE_SW_INSET_MAC_IPV6_UDP,              ICE_INSET_NONE, ICE_INSET_NONE},
214         {pattern_eth_ipv6_tcp,                          ICE_SW_INSET_MAC_IPV6_TCP,              ICE_INSET_NONE, ICE_INSET_NONE},
215         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_SW_INSET_PERM_TUNNEL_IPV4,          ICE_INSET_NONE, ICE_INSET_NONE},
216         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP,      ICE_INSET_NONE, ICE_INSET_NONE},
217         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP,      ICE_INSET_NONE, ICE_INSET_NONE},
218         {pattern_eth_ipv4_nvgre_eth_ipv4,               ICE_SW_INSET_PERM_TUNNEL_IPV4,          ICE_INSET_NONE, ICE_INSET_NONE},
219         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,           ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP,      ICE_INSET_NONE, ICE_INSET_NONE},
220         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,           ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP,      ICE_INSET_NONE, ICE_INSET_NONE},
221         {pattern_eth_pppoes,                            ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
222         {pattern_eth_vlan_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
223         {pattern_eth_pppoes_proto,                      ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
224         {pattern_eth_vlan_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
225         {pattern_eth_pppoes_ipv4,                       ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
226         {pattern_eth_pppoes_ipv4_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
227         {pattern_eth_pppoes_ipv4_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
228         {pattern_eth_pppoes_ipv6,                       ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
229         {pattern_eth_pppoes_ipv6_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
230         {pattern_eth_pppoes_ipv6_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
231         {pattern_eth_vlan_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
232         {pattern_eth_vlan_pppoes_ipv4_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
233         {pattern_eth_vlan_pppoes_ipv4_udp,              ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
234         {pattern_eth_vlan_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
235         {pattern_eth_vlan_pppoes_ipv6_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
236         {pattern_eth_vlan_pppoes_ipv6_udp,              ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
237         {pattern_eth_ipv4_esp,                          ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
238         {pattern_eth_ipv4_udp_esp,                      ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
239         {pattern_eth_ipv6_esp,                          ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
240         {pattern_eth_ipv6_udp_esp,                      ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
241         {pattern_eth_ipv4_ah,                           ICE_SW_INSET_MAC_IPV4_AH,               ICE_INSET_NONE, ICE_INSET_NONE},
242         {pattern_eth_ipv6_ah,                           ICE_SW_INSET_MAC_IPV6_AH,               ICE_INSET_NONE, ICE_INSET_NONE},
243         {pattern_eth_ipv6_udp_ah,                       ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
244         {pattern_eth_ipv4_l2tp,                         ICE_SW_INSET_MAC_IPV4_L2TP,             ICE_INSET_NONE, ICE_INSET_NONE},
245         {pattern_eth_ipv6_l2tp,                         ICE_SW_INSET_MAC_IPV6_L2TP,             ICE_INSET_NONE, ICE_INSET_NONE},
246         {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
247         {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
248         {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE, ICE_INSET_NONE},
249         {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE, ICE_INSET_NONE},
250         {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
251         {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
252         {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
253         {pattern_eth_qinq_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
254 };
255
256 static int
257 ice_switch_create(struct ice_adapter *ad,
258                 struct rte_flow *flow,
259                 void *meta,
260                 struct rte_flow_error *error)
261 {
262         int ret = 0;
263         struct ice_pf *pf = &ad->pf;
264         struct ice_hw *hw = ICE_PF_TO_HW(pf);
265         struct ice_rule_query_data rule_added = {0};
266         struct ice_rule_query_data *filter_ptr;
267         struct ice_adv_lkup_elem *list =
268                 ((struct sw_meta *)meta)->list;
269         uint16_t lkups_cnt =
270                 ((struct sw_meta *)meta)->lkups_num;
271         struct ice_adv_rule_info *rule_info =
272                 &((struct sw_meta *)meta)->rule_info;
273
274         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
275                 rte_flow_error_set(error, EINVAL,
276                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
277                         "item number too large for rule");
278                 goto error;
279         }
280         if (!list) {
281                 rte_flow_error_set(error, EINVAL,
282                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
283                         "lookup list should not be NULL");
284                 goto error;
285         }
286         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
287         if (!ret) {
288                 filter_ptr = rte_zmalloc("ice_switch_filter",
289                         sizeof(struct ice_rule_query_data), 0);
290                 if (!filter_ptr) {
291                         rte_flow_error_set(error, EINVAL,
292                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
293                                    "No memory for ice_switch_filter");
294                         goto error;
295                 }
296                 flow->rule = filter_ptr;
297                 rte_memcpy(filter_ptr,
298                         &rule_added,
299                         sizeof(struct ice_rule_query_data));
300         } else {
301                 rte_flow_error_set(error, EINVAL,
302                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
303                         "switch filter create flow fail");
304                 goto error;
305         }
306
307         rte_free(list);
308         rte_free(meta);
309         return 0;
310
311 error:
312         rte_free(list);
313         rte_free(meta);
314
315         return -rte_errno;
316 }
317
318 static int
319 ice_switch_destroy(struct ice_adapter *ad,
320                 struct rte_flow *flow,
321                 struct rte_flow_error *error)
322 {
323         struct ice_hw *hw = &ad->hw;
324         int ret;
325         struct ice_rule_query_data *filter_ptr;
326
327         filter_ptr = (struct ice_rule_query_data *)
328                 flow->rule;
329
330         if (!filter_ptr) {
331                 rte_flow_error_set(error, EINVAL,
332                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
333                         "no such flow"
334                         " create by switch filter");
335                 return -rte_errno;
336         }
337
338         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
339         if (ret) {
340                 rte_flow_error_set(error, EINVAL,
341                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
342                         "fail to destroy switch filter rule");
343                 return -rte_errno;
344         }
345
346         rte_free(filter_ptr);
347         return ret;
348 }
349
350 static void
351 ice_switch_filter_rule_free(struct rte_flow *flow)
352 {
353         rte_free(flow->rule);
354 }
355
356 static uint64_t
357 ice_switch_inset_get(const struct rte_flow_item pattern[],
358                 struct rte_flow_error *error,
359                 struct ice_adv_lkup_elem *list,
360                 uint16_t *lkups_num,
361                 enum ice_sw_tunnel_type *tun_type)
362 {
363         const struct rte_flow_item *item = pattern;
364         enum rte_flow_item_type item_type;
365         const struct rte_flow_item_eth *eth_spec, *eth_mask;
366         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
367         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
368         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
369         const struct rte_flow_item_udp *udp_spec, *udp_mask;
370         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
371         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
372         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
373         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
374         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
375         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
376                                 *pppoe_proto_mask;
377         const struct rte_flow_item_esp *esp_spec, *esp_mask;
378         const struct rte_flow_item_ah *ah_spec, *ah_mask;
379         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
380         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
381         uint64_t input_set = ICE_INSET_NONE;
382         uint16_t input_set_byte = 0;
383         bool pppoe_elem_valid = 0;
384         bool pppoe_patt_valid = 0;
385         bool pppoe_prot_valid = 0;
386         bool inner_vlan_valid = 0;
387         bool outer_vlan_valid = 0;
388         bool tunnel_valid = 0;
389         bool profile_rule = 0;
390         bool nvgre_valid = 0;
391         bool vxlan_valid = 0;
392         bool qinq_valid = 0;
393         bool ipv6_valid = 0;
394         bool ipv4_valid = 0;
395         bool udp_valid = 0;
396         bool tcp_valid = 0;
397         uint16_t j, t = 0;
398
399         if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
400             *tun_type == ICE_NON_TUN_QINQ)
401                 qinq_valid = 1;
402
403         for (item = pattern; item->type !=
404                         RTE_FLOW_ITEM_TYPE_END; item++) {
405                 if (item->last) {
406                         rte_flow_error_set(error, EINVAL,
407                                         RTE_FLOW_ERROR_TYPE_ITEM,
408                                         item,
409                                         "Not support range");
410                         return 0;
411                 }
412                 item_type = item->type;
413
414                 switch (item_type) {
415                 case RTE_FLOW_ITEM_TYPE_ETH:
416                         eth_spec = item->spec;
417                         eth_mask = item->mask;
418                         if (eth_spec && eth_mask) {
419                                 const uint8_t *a = eth_mask->src.addr_bytes;
420                                 const uint8_t *b = eth_mask->dst.addr_bytes;
421                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
422                                         if (a[j] && tunnel_valid) {
423                                                 input_set |=
424                                                         ICE_INSET_TUN_SMAC;
425                                                 break;
426                                         } else if (a[j]) {
427                                                 input_set |=
428                                                         ICE_INSET_SMAC;
429                                                 break;
430                                         }
431                                 }
432                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
433                                         if (b[j] && tunnel_valid) {
434                                                 input_set |=
435                                                         ICE_INSET_TUN_DMAC;
436                                                 break;
437                                         } else if (b[j]) {
438                                                 input_set |=
439                                                         ICE_INSET_DMAC;
440                                                 break;
441                                         }
442                                 }
443                                 if (eth_mask->type)
444                                         input_set |= ICE_INSET_ETHERTYPE;
445                                 list[t].type = (tunnel_valid  == 0) ?
446                                         ICE_MAC_OFOS : ICE_MAC_IL;
447                                 struct ice_ether_hdr *h;
448                                 struct ice_ether_hdr *m;
449                                 uint16_t i = 0;
450                                 h = &list[t].h_u.eth_hdr;
451                                 m = &list[t].m_u.eth_hdr;
452                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
453                                         if (eth_mask->src.addr_bytes[j]) {
454                                                 h->src_addr[j] =
455                                                 eth_spec->src.addr_bytes[j];
456                                                 m->src_addr[j] =
457                                                 eth_mask->src.addr_bytes[j];
458                                                 i = 1;
459                                                 input_set_byte++;
460                                         }
461                                         if (eth_mask->dst.addr_bytes[j]) {
462                                                 h->dst_addr[j] =
463                                                 eth_spec->dst.addr_bytes[j];
464                                                 m->dst_addr[j] =
465                                                 eth_mask->dst.addr_bytes[j];
466                                                 i = 1;
467                                                 input_set_byte++;
468                                         }
469                                 }
470                                 if (i)
471                                         t++;
472                                 if (eth_mask->type) {
473                                         list[t].type = ICE_ETYPE_OL;
474                                         list[t].h_u.ethertype.ethtype_id =
475                                                 eth_spec->type;
476                                         list[t].m_u.ethertype.ethtype_id =
477                                                 eth_mask->type;
478                                         input_set_byte += 2;
479                                         t++;
480                                 }
481                         }
482                         break;
483
484                 case RTE_FLOW_ITEM_TYPE_IPV4:
485                         ipv4_spec = item->spec;
486                         ipv4_mask = item->mask;
487                         ipv4_valid = 1;
488                         if (ipv4_spec && ipv4_mask) {
489                                 /* Check IPv4 mask and update input set */
490                                 if (ipv4_mask->hdr.version_ihl ||
491                                         ipv4_mask->hdr.total_length ||
492                                         ipv4_mask->hdr.packet_id ||
493                                         ipv4_mask->hdr.hdr_checksum) {
494                                         rte_flow_error_set(error, EINVAL,
495                                                    RTE_FLOW_ERROR_TYPE_ITEM,
496                                                    item,
497                                                    "Invalid IPv4 mask.");
498                                         return 0;
499                                 }
500
501                                 if (tunnel_valid) {
502                                         if (ipv4_mask->hdr.type_of_service)
503                                                 input_set |=
504                                                         ICE_INSET_TUN_IPV4_TOS;
505                                         if (ipv4_mask->hdr.src_addr)
506                                                 input_set |=
507                                                         ICE_INSET_TUN_IPV4_SRC;
508                                         if (ipv4_mask->hdr.dst_addr)
509                                                 input_set |=
510                                                         ICE_INSET_TUN_IPV4_DST;
511                                         if (ipv4_mask->hdr.time_to_live)
512                                                 input_set |=
513                                                         ICE_INSET_TUN_IPV4_TTL;
514                                         if (ipv4_mask->hdr.next_proto_id)
515                                                 input_set |=
516                                                 ICE_INSET_TUN_IPV4_PROTO;
517                                 } else {
518                                         if (ipv4_mask->hdr.src_addr)
519                                                 input_set |= ICE_INSET_IPV4_SRC;
520                                         if (ipv4_mask->hdr.dst_addr)
521                                                 input_set |= ICE_INSET_IPV4_DST;
522                                         if (ipv4_mask->hdr.time_to_live)
523                                                 input_set |= ICE_INSET_IPV4_TTL;
524                                         if (ipv4_mask->hdr.next_proto_id)
525                                                 input_set |=
526                                                 ICE_INSET_IPV4_PROTO;
527                                         if (ipv4_mask->hdr.type_of_service)
528                                                 input_set |=
529                                                         ICE_INSET_IPV4_TOS;
530                                 }
531                                 list[t].type = (tunnel_valid  == 0) ?
532                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
533                                 if (ipv4_mask->hdr.src_addr) {
534                                         list[t].h_u.ipv4_hdr.src_addr =
535                                                 ipv4_spec->hdr.src_addr;
536                                         list[t].m_u.ipv4_hdr.src_addr =
537                                                 ipv4_mask->hdr.src_addr;
538                                         input_set_byte += 2;
539                                 }
540                                 if (ipv4_mask->hdr.dst_addr) {
541                                         list[t].h_u.ipv4_hdr.dst_addr =
542                                                 ipv4_spec->hdr.dst_addr;
543                                         list[t].m_u.ipv4_hdr.dst_addr =
544                                                 ipv4_mask->hdr.dst_addr;
545                                         input_set_byte += 2;
546                                 }
547                                 if (ipv4_mask->hdr.time_to_live) {
548                                         list[t].h_u.ipv4_hdr.time_to_live =
549                                                 ipv4_spec->hdr.time_to_live;
550                                         list[t].m_u.ipv4_hdr.time_to_live =
551                                                 ipv4_mask->hdr.time_to_live;
552                                         input_set_byte++;
553                                 }
554                                 if (ipv4_mask->hdr.next_proto_id) {
555                                         list[t].h_u.ipv4_hdr.protocol =
556                                                 ipv4_spec->hdr.next_proto_id;
557                                         list[t].m_u.ipv4_hdr.protocol =
558                                                 ipv4_mask->hdr.next_proto_id;
559                                         input_set_byte++;
560                                 }
561                                 if ((ipv4_spec->hdr.next_proto_id &
562                                         ipv4_mask->hdr.next_proto_id) ==
563                                         ICE_IPV4_PROTO_NVGRE)
564                                         *tun_type = ICE_SW_TUN_AND_NON_TUN;
565                                 if (ipv4_mask->hdr.type_of_service) {
566                                         list[t].h_u.ipv4_hdr.tos =
567                                                 ipv4_spec->hdr.type_of_service;
568                                         list[t].m_u.ipv4_hdr.tos =
569                                                 ipv4_mask->hdr.type_of_service;
570                                         input_set_byte++;
571                                 }
572                                 t++;
573                         }
574                         break;
575
576                 case RTE_FLOW_ITEM_TYPE_IPV6:
577                         ipv6_spec = item->spec;
578                         ipv6_mask = item->mask;
579                         ipv6_valid = 1;
580                         if (ipv6_spec && ipv6_mask) {
581                                 if (ipv6_mask->hdr.payload_len) {
582                                         rte_flow_error_set(error, EINVAL,
583                                            RTE_FLOW_ERROR_TYPE_ITEM,
584                                            item,
585                                            "Invalid IPv6 mask");
586                                         return 0;
587                                 }
588
589                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
590                                         if (ipv6_mask->hdr.src_addr[j] &&
591                                                 tunnel_valid) {
592                                                 input_set |=
593                                                 ICE_INSET_TUN_IPV6_SRC;
594                                                 break;
595                                         } else if (ipv6_mask->hdr.src_addr[j]) {
596                                                 input_set |= ICE_INSET_IPV6_SRC;
597                                                 break;
598                                         }
599                                 }
600                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
601                                         if (ipv6_mask->hdr.dst_addr[j] &&
602                                                 tunnel_valid) {
603                                                 input_set |=
604                                                 ICE_INSET_TUN_IPV6_DST;
605                                                 break;
606                                         } else if (ipv6_mask->hdr.dst_addr[j]) {
607                                                 input_set |= ICE_INSET_IPV6_DST;
608                                                 break;
609                                         }
610                                 }
611                                 if (ipv6_mask->hdr.proto &&
612                                         tunnel_valid)
613                                         input_set |=
614                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
615                                 else if (ipv6_mask->hdr.proto)
616                                         input_set |=
617                                                 ICE_INSET_IPV6_NEXT_HDR;
618                                 if (ipv6_mask->hdr.hop_limits &&
619                                         tunnel_valid)
620                                         input_set |=
621                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
622                                 else if (ipv6_mask->hdr.hop_limits)
623                                         input_set |=
624                                                 ICE_INSET_IPV6_HOP_LIMIT;
625                                 if ((ipv6_mask->hdr.vtc_flow &
626                                                 rte_cpu_to_be_32
627                                                 (RTE_IPV6_HDR_TC_MASK)) &&
628                                         tunnel_valid)
629                                         input_set |=
630                                                         ICE_INSET_TUN_IPV6_TC;
631                                 else if (ipv6_mask->hdr.vtc_flow &
632                                                 rte_cpu_to_be_32
633                                                 (RTE_IPV6_HDR_TC_MASK))
634                                         input_set |= ICE_INSET_IPV6_TC;
635
636                                 list[t].type = (tunnel_valid  == 0) ?
637                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
638                                 struct ice_ipv6_hdr *f;
639                                 struct ice_ipv6_hdr *s;
640                                 f = &list[t].h_u.ipv6_hdr;
641                                 s = &list[t].m_u.ipv6_hdr;
642                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
643                                         if (ipv6_mask->hdr.src_addr[j]) {
644                                                 f->src_addr[j] =
645                                                 ipv6_spec->hdr.src_addr[j];
646                                                 s->src_addr[j] =
647                                                 ipv6_mask->hdr.src_addr[j];
648                                                 input_set_byte++;
649                                         }
650                                         if (ipv6_mask->hdr.dst_addr[j]) {
651                                                 f->dst_addr[j] =
652                                                 ipv6_spec->hdr.dst_addr[j];
653                                                 s->dst_addr[j] =
654                                                 ipv6_mask->hdr.dst_addr[j];
655                                                 input_set_byte++;
656                                         }
657                                 }
658                                 if (ipv6_mask->hdr.proto) {
659                                         f->next_hdr =
660                                                 ipv6_spec->hdr.proto;
661                                         s->next_hdr =
662                                                 ipv6_mask->hdr.proto;
663                                         input_set_byte++;
664                                 }
665                                 if (ipv6_mask->hdr.hop_limits) {
666                                         f->hop_limit =
667                                                 ipv6_spec->hdr.hop_limits;
668                                         s->hop_limit =
669                                                 ipv6_mask->hdr.hop_limits;
670                                         input_set_byte++;
671                                 }
672                                 if (ipv6_mask->hdr.vtc_flow &
673                                                 rte_cpu_to_be_32
674                                                 (RTE_IPV6_HDR_TC_MASK)) {
675                                         struct ice_le_ver_tc_flow vtf;
676                                         vtf.u.fld.version = 0;
677                                         vtf.u.fld.flow_label = 0;
678                                         vtf.u.fld.tc = (rte_be_to_cpu_32
679                                                 (ipv6_spec->hdr.vtc_flow) &
680                                                         RTE_IPV6_HDR_TC_MASK) >>
681                                                         RTE_IPV6_HDR_TC_SHIFT;
682                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
683                                         vtf.u.fld.tc = (rte_be_to_cpu_32
684                                                 (ipv6_mask->hdr.vtc_flow) &
685                                                         RTE_IPV6_HDR_TC_MASK) >>
686                                                         RTE_IPV6_HDR_TC_SHIFT;
687                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
688                                         input_set_byte += 4;
689                                 }
690                                 t++;
691                         }
692                         break;
693
694                 case RTE_FLOW_ITEM_TYPE_UDP:
695                         udp_spec = item->spec;
696                         udp_mask = item->mask;
697                         udp_valid = 1;
698                         if (udp_spec && udp_mask) {
699                                 /* Check UDP mask and update input set*/
700                                 if (udp_mask->hdr.dgram_len ||
701                                     udp_mask->hdr.dgram_cksum) {
702                                         rte_flow_error_set(error, EINVAL,
703                                                    RTE_FLOW_ERROR_TYPE_ITEM,
704                                                    item,
705                                                    "Invalid UDP mask");
706                                         return 0;
707                                 }
708
709                                 if (tunnel_valid) {
710                                         if (udp_mask->hdr.src_port)
711                                                 input_set |=
712                                                 ICE_INSET_TUN_UDP_SRC_PORT;
713                                         if (udp_mask->hdr.dst_port)
714                                                 input_set |=
715                                                 ICE_INSET_TUN_UDP_DST_PORT;
716                                 } else {
717                                         if (udp_mask->hdr.src_port)
718                                                 input_set |=
719                                                 ICE_INSET_UDP_SRC_PORT;
720                                         if (udp_mask->hdr.dst_port)
721                                                 input_set |=
722                                                 ICE_INSET_UDP_DST_PORT;
723                                 }
724                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
725                                                 tunnel_valid == 0)
726                                         list[t].type = ICE_UDP_OF;
727                                 else
728                                         list[t].type = ICE_UDP_ILOS;
729                                 if (udp_mask->hdr.src_port) {
730                                         list[t].h_u.l4_hdr.src_port =
731                                                 udp_spec->hdr.src_port;
732                                         list[t].m_u.l4_hdr.src_port =
733                                                 udp_mask->hdr.src_port;
734                                         input_set_byte += 2;
735                                 }
736                                 if (udp_mask->hdr.dst_port) {
737                                         list[t].h_u.l4_hdr.dst_port =
738                                                 udp_spec->hdr.dst_port;
739                                         list[t].m_u.l4_hdr.dst_port =
740                                                 udp_mask->hdr.dst_port;
741                                         input_set_byte += 2;
742                                 }
743                                 t++;
744                         }
745                         break;
746
747                 case RTE_FLOW_ITEM_TYPE_TCP:
748                         tcp_spec = item->spec;
749                         tcp_mask = item->mask;
750                         tcp_valid = 1;
751                         if (tcp_spec && tcp_mask) {
752                                 /* Check TCP mask and update input set */
753                                 if (tcp_mask->hdr.sent_seq ||
754                                         tcp_mask->hdr.recv_ack ||
755                                         tcp_mask->hdr.data_off ||
756                                         tcp_mask->hdr.tcp_flags ||
757                                         tcp_mask->hdr.rx_win ||
758                                         tcp_mask->hdr.cksum ||
759                                         tcp_mask->hdr.tcp_urp) {
760                                         rte_flow_error_set(error, EINVAL,
761                                            RTE_FLOW_ERROR_TYPE_ITEM,
762                                            item,
763                                            "Invalid TCP mask");
764                                         return 0;
765                                 }
766
767                                 if (tunnel_valid) {
768                                         if (tcp_mask->hdr.src_port)
769                                                 input_set |=
770                                                 ICE_INSET_TUN_TCP_SRC_PORT;
771                                         if (tcp_mask->hdr.dst_port)
772                                                 input_set |=
773                                                 ICE_INSET_TUN_TCP_DST_PORT;
774                                 } else {
775                                         if (tcp_mask->hdr.src_port)
776                                                 input_set |=
777                                                 ICE_INSET_TCP_SRC_PORT;
778                                         if (tcp_mask->hdr.dst_port)
779                                                 input_set |=
780                                                 ICE_INSET_TCP_DST_PORT;
781                                 }
782                                 list[t].type = ICE_TCP_IL;
783                                 if (tcp_mask->hdr.src_port) {
784                                         list[t].h_u.l4_hdr.src_port =
785                                                 tcp_spec->hdr.src_port;
786                                         list[t].m_u.l4_hdr.src_port =
787                                                 tcp_mask->hdr.src_port;
788                                         input_set_byte += 2;
789                                 }
790                                 if (tcp_mask->hdr.dst_port) {
791                                         list[t].h_u.l4_hdr.dst_port =
792                                                 tcp_spec->hdr.dst_port;
793                                         list[t].m_u.l4_hdr.dst_port =
794                                                 tcp_mask->hdr.dst_port;
795                                         input_set_byte += 2;
796                                 }
797                                 t++;
798                         }
799                         break;
800
801                 case RTE_FLOW_ITEM_TYPE_SCTP:
802                         sctp_spec = item->spec;
803                         sctp_mask = item->mask;
804                         if (sctp_spec && sctp_mask) {
805                                 /* Check SCTP mask and update input set */
806                                 if (sctp_mask->hdr.cksum) {
807                                         rte_flow_error_set(error, EINVAL,
808                                            RTE_FLOW_ERROR_TYPE_ITEM,
809                                            item,
810                                            "Invalid SCTP mask");
811                                         return 0;
812                                 }
813
814                                 if (tunnel_valid) {
815                                         if (sctp_mask->hdr.src_port)
816                                                 input_set |=
817                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
818                                         if (sctp_mask->hdr.dst_port)
819                                                 input_set |=
820                                                 ICE_INSET_TUN_SCTP_DST_PORT;
821                                 } else {
822                                         if (sctp_mask->hdr.src_port)
823                                                 input_set |=
824                                                 ICE_INSET_SCTP_SRC_PORT;
825                                         if (sctp_mask->hdr.dst_port)
826                                                 input_set |=
827                                                 ICE_INSET_SCTP_DST_PORT;
828                                 }
829                                 list[t].type = ICE_SCTP_IL;
830                                 if (sctp_mask->hdr.src_port) {
831                                         list[t].h_u.sctp_hdr.src_port =
832                                                 sctp_spec->hdr.src_port;
833                                         list[t].m_u.sctp_hdr.src_port =
834                                                 sctp_mask->hdr.src_port;
835                                         input_set_byte += 2;
836                                 }
837                                 if (sctp_mask->hdr.dst_port) {
838                                         list[t].h_u.sctp_hdr.dst_port =
839                                                 sctp_spec->hdr.dst_port;
840                                         list[t].m_u.sctp_hdr.dst_port =
841                                                 sctp_mask->hdr.dst_port;
842                                         input_set_byte += 2;
843                                 }
844                                 t++;
845                         }
846                         break;
847
848                 case RTE_FLOW_ITEM_TYPE_VXLAN:
849                         vxlan_spec = item->spec;
850                         vxlan_mask = item->mask;
851                         /* Check if VXLAN item is used to describe protocol.
852                          * If yes, both spec and mask should be NULL.
853                          * If no, both spec and mask shouldn't be NULL.
854                          */
855                         if ((!vxlan_spec && vxlan_mask) ||
856                             (vxlan_spec && !vxlan_mask)) {
857                                 rte_flow_error_set(error, EINVAL,
858                                            RTE_FLOW_ERROR_TYPE_ITEM,
859                                            item,
860                                            "Invalid VXLAN item");
861                                 return 0;
862                         }
863                         vxlan_valid = 1;
864                         tunnel_valid = 1;
865                         if (vxlan_spec && vxlan_mask) {
866                                 list[t].type = ICE_VXLAN;
867                                 if (vxlan_mask->vni[0] ||
868                                         vxlan_mask->vni[1] ||
869                                         vxlan_mask->vni[2]) {
870                                         list[t].h_u.tnl_hdr.vni =
871                                                 (vxlan_spec->vni[2] << 16) |
872                                                 (vxlan_spec->vni[1] << 8) |
873                                                 vxlan_spec->vni[0];
874                                         list[t].m_u.tnl_hdr.vni =
875                                                 (vxlan_mask->vni[2] << 16) |
876                                                 (vxlan_mask->vni[1] << 8) |
877                                                 vxlan_mask->vni[0];
878                                         input_set |=
879                                                 ICE_INSET_TUN_VXLAN_VNI;
880                                         input_set_byte += 2;
881                                 }
882                                 t++;
883                         }
884                         break;
885
886                 case RTE_FLOW_ITEM_TYPE_NVGRE:
887                         nvgre_spec = item->spec;
888                         nvgre_mask = item->mask;
889                         /* Check if NVGRE item is used to describe protocol.
890                          * If yes, both spec and mask should be NULL.
891                          * If no, both spec and mask shouldn't be NULL.
892                          */
893                         if ((!nvgre_spec && nvgre_mask) ||
894                             (nvgre_spec && !nvgre_mask)) {
895                                 rte_flow_error_set(error, EINVAL,
896                                            RTE_FLOW_ERROR_TYPE_ITEM,
897                                            item,
898                                            "Invalid NVGRE item");
899                                 return 0;
900                         }
901                         nvgre_valid = 1;
902                         tunnel_valid = 1;
903                         if (nvgre_spec && nvgre_mask) {
904                                 list[t].type = ICE_NVGRE;
905                                 if (nvgre_mask->tni[0] ||
906                                         nvgre_mask->tni[1] ||
907                                         nvgre_mask->tni[2]) {
908                                         list[t].h_u.nvgre_hdr.tni_flow =
909                                                 (nvgre_spec->tni[2] << 16) |
910                                                 (nvgre_spec->tni[1] << 8) |
911                                                 nvgre_spec->tni[0];
912                                         list[t].m_u.nvgre_hdr.tni_flow =
913                                                 (nvgre_mask->tni[2] << 16) |
914                                                 (nvgre_mask->tni[1] << 8) |
915                                                 nvgre_mask->tni[0];
916                                         input_set |=
917                                                 ICE_INSET_TUN_NVGRE_TNI;
918                                         input_set_byte += 2;
919                                 }
920                                 t++;
921                         }
922                         break;
923
924                 case RTE_FLOW_ITEM_TYPE_VLAN:
925                         vlan_spec = item->spec;
926                         vlan_mask = item->mask;
927                         /* Check if VLAN item is used to describe protocol.
928                          * If yes, both spec and mask should be NULL.
929                          * If no, both spec and mask shouldn't be NULL.
930                          */
931                         if ((!vlan_spec && vlan_mask) ||
932                             (vlan_spec && !vlan_mask)) {
933                                 rte_flow_error_set(error, EINVAL,
934                                            RTE_FLOW_ERROR_TYPE_ITEM,
935                                            item,
936                                            "Invalid VLAN item");
937                                 return 0;
938                         }
939
940                         if (qinq_valid) {
941                                 if (!outer_vlan_valid)
942                                         outer_vlan_valid = 1;
943                                 else
944                                         inner_vlan_valid = 1;
945                         }
946
947                         if (vlan_spec && vlan_mask) {
948                                 if (qinq_valid) {
949                                         if (!inner_vlan_valid) {
950                                                 list[t].type = ICE_VLAN_EX;
951                                                 input_set |=
952                                                         ICE_INSET_VLAN_OUTER;
953                                         } else {
954                                                 list[t].type = ICE_VLAN_IN;
955                                                 input_set |=
956                                                         ICE_INSET_VLAN_INNER;
957                                         }
958                                 } else {
959                                         list[t].type = ICE_VLAN_OFOS;
960                                         input_set |= ICE_INSET_VLAN_INNER;
961                                 }
962
963                                 if (vlan_mask->tci) {
964                                         list[t].h_u.vlan_hdr.vlan =
965                                                 vlan_spec->tci;
966                                         list[t].m_u.vlan_hdr.vlan =
967                                                 vlan_mask->tci;
968                                         input_set_byte += 2;
969                                 }
970                                 if (vlan_mask->inner_type) {
971                                         rte_flow_error_set(error, EINVAL,
972                                                 RTE_FLOW_ERROR_TYPE_ITEM,
973                                                 item,
974                                                 "Invalid VLAN input set.");
975                                         return 0;
976                                 }
977                                 t++;
978                         }
979                         break;
980
981                 case RTE_FLOW_ITEM_TYPE_PPPOED:
982                 case RTE_FLOW_ITEM_TYPE_PPPOES:
983                         pppoe_spec = item->spec;
984                         pppoe_mask = item->mask;
985                         /* Check if PPPoE item is used to describe protocol.
986                          * If yes, both spec and mask should be NULL.
987                          * If no, both spec and mask shouldn't be NULL.
988                          */
989                         if ((!pppoe_spec && pppoe_mask) ||
990                                 (pppoe_spec && !pppoe_mask)) {
991                                 rte_flow_error_set(error, EINVAL,
992                                         RTE_FLOW_ERROR_TYPE_ITEM,
993                                         item,
994                                         "Invalid pppoe item");
995                                 return 0;
996                         }
997                         pppoe_patt_valid = 1;
998                         if (pppoe_spec && pppoe_mask) {
999                                 /* Check pppoe mask and update input set */
1000                                 if (pppoe_mask->length ||
1001                                         pppoe_mask->code ||
1002                                         pppoe_mask->version_type) {
1003                                         rte_flow_error_set(error, EINVAL,
1004                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1005                                                 item,
1006                                                 "Invalid pppoe mask");
1007                                         return 0;
1008                                 }
1009                                 list[t].type = ICE_PPPOE;
1010                                 if (pppoe_mask->session_id) {
1011                                         list[t].h_u.pppoe_hdr.session_id =
1012                                                 pppoe_spec->session_id;
1013                                         list[t].m_u.pppoe_hdr.session_id =
1014                                                 pppoe_mask->session_id;
1015                                         input_set |= ICE_INSET_PPPOE_SESSION;
1016                                         input_set_byte += 2;
1017                                 }
1018                                 t++;
1019                                 pppoe_elem_valid = 1;
1020                         }
1021                         break;
1022
1023                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1024                         pppoe_proto_spec = item->spec;
1025                         pppoe_proto_mask = item->mask;
1026                         /* Check if PPPoE optional proto_id item
1027                          * is used to describe protocol.
1028                          * If yes, both spec and mask should be NULL.
1029                          * If no, both spec and mask shouldn't be NULL.
1030                          */
1031                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1032                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
1033                                 rte_flow_error_set(error, EINVAL,
1034                                         RTE_FLOW_ERROR_TYPE_ITEM,
1035                                         item,
1036                                         "Invalid pppoe proto item");
1037                                 return 0;
1038                         }
1039                         if (pppoe_proto_spec && pppoe_proto_mask) {
1040                                 if (pppoe_elem_valid)
1041                                         t--;
1042                                 list[t].type = ICE_PPPOE;
1043                                 if (pppoe_proto_mask->proto_id) {
1044                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
1045                                                 pppoe_proto_spec->proto_id;
1046                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
1047                                                 pppoe_proto_mask->proto_id;
1048                                         input_set |= ICE_INSET_PPPOE_PROTO;
1049                                         input_set_byte += 2;
1050                                         pppoe_prot_valid = 1;
1051                                 }
1052                                 if ((pppoe_proto_mask->proto_id &
1053                                         pppoe_proto_spec->proto_id) !=
1054                                             CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1055                                         (pppoe_proto_mask->proto_id &
1056                                         pppoe_proto_spec->proto_id) !=
1057                                             CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1058                                         *tun_type = ICE_SW_TUN_PPPOE_PAY;
1059                                 else
1060                                         *tun_type = ICE_SW_TUN_PPPOE;
1061                                 t++;
1062                         }
1063
1064                         break;
1065
1066                 case RTE_FLOW_ITEM_TYPE_ESP:
1067                         esp_spec = item->spec;
1068                         esp_mask = item->mask;
1069                         if ((esp_spec && !esp_mask) ||
1070                                 (!esp_spec && esp_mask)) {
1071                                 rte_flow_error_set(error, EINVAL,
1072                                            RTE_FLOW_ERROR_TYPE_ITEM,
1073                                            item,
1074                                            "Invalid esp item");
1075                                 return 0;
1076                         }
1077                         /* Check esp mask and update input set */
1078                         if (esp_mask && esp_mask->hdr.seq) {
1079                                 rte_flow_error_set(error, EINVAL,
1080                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1081                                                 item,
1082                                                 "Invalid esp mask");
1083                                 return 0;
1084                         }
1085
1086                         if (!esp_spec && !esp_mask && !input_set) {
1087                                 profile_rule = 1;
1088                                 if (ipv6_valid && udp_valid)
1089                                         *tun_type =
1090                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1091                                 else if (ipv6_valid)
1092                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1093                                 else if (ipv4_valid)
1094                                         return 0;
1095                         } else if (esp_spec && esp_mask &&
1096                                                 esp_mask->hdr.spi){
1097                                 if (udp_valid)
1098                                         list[t].type = ICE_NAT_T;
1099                                 else
1100                                         list[t].type = ICE_ESP;
1101                                 list[t].h_u.esp_hdr.spi =
1102                                         esp_spec->hdr.spi;
1103                                 list[t].m_u.esp_hdr.spi =
1104                                         esp_mask->hdr.spi;
1105                                 input_set |= ICE_INSET_ESP_SPI;
1106                                 input_set_byte += 4;
1107                                 t++;
1108                         }
1109
1110                         if (!profile_rule) {
1111                                 if (ipv6_valid && udp_valid)
1112                                         *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1113                                 else if (ipv4_valid && udp_valid)
1114                                         *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1115                                 else if (ipv6_valid)
1116                                         *tun_type = ICE_SW_TUN_IPV6_ESP;
1117                                 else if (ipv4_valid)
1118                                         *tun_type = ICE_SW_TUN_IPV4_ESP;
1119                         }
1120                         break;
1121
1122                 case RTE_FLOW_ITEM_TYPE_AH:
1123                         ah_spec = item->spec;
1124                         ah_mask = item->mask;
1125                         if ((ah_spec && !ah_mask) ||
1126                                 (!ah_spec && ah_mask)) {
1127                                 rte_flow_error_set(error, EINVAL,
1128                                            RTE_FLOW_ERROR_TYPE_ITEM,
1129                                            item,
1130                                            "Invalid ah item");
1131                                 return 0;
1132                         }
1133                         /* Check ah mask and update input set */
1134                         if (ah_mask &&
1135                                 (ah_mask->next_hdr ||
1136                                 ah_mask->payload_len ||
1137                                 ah_mask->seq_num ||
1138                                 ah_mask->reserved)) {
1139                                 rte_flow_error_set(error, EINVAL,
1140                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1141                                                 item,
1142                                                 "Invalid ah mask");
1143                                 return 0;
1144                         }
1145
1146                         if (!ah_spec && !ah_mask && !input_set) {
1147                                 profile_rule = 1;
1148                                 if (ipv6_valid && udp_valid)
1149                                         *tun_type =
1150                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1151                                 else if (ipv6_valid)
1152                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1153                                 else if (ipv4_valid)
1154                                         return 0;
1155                         } else if (ah_spec && ah_mask &&
1156                                                 ah_mask->spi){
1157                                 list[t].type = ICE_AH;
1158                                 list[t].h_u.ah_hdr.spi =
1159                                         ah_spec->spi;
1160                                 list[t].m_u.ah_hdr.spi =
1161                                         ah_mask->spi;
1162                                 input_set |= ICE_INSET_AH_SPI;
1163                                 input_set_byte += 4;
1164                                 t++;
1165                         }
1166
1167                         if (!profile_rule) {
1168                                 if (udp_valid)
1169                                         return 0;
1170                                 else if (ipv6_valid)
1171                                         *tun_type = ICE_SW_TUN_IPV6_AH;
1172                                 else if (ipv4_valid)
1173                                         *tun_type = ICE_SW_TUN_IPV4_AH;
1174                         }
1175                         break;
1176
1177                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1178                         l2tp_spec = item->spec;
1179                         l2tp_mask = item->mask;
1180                         if ((l2tp_spec && !l2tp_mask) ||
1181                                 (!l2tp_spec && l2tp_mask)) {
1182                                 rte_flow_error_set(error, EINVAL,
1183                                            RTE_FLOW_ERROR_TYPE_ITEM,
1184                                            item,
1185                                            "Invalid l2tp item");
1186                                 return 0;
1187                         }
1188
1189                         if (!l2tp_spec && !l2tp_mask && !input_set) {
1190                                 if (ipv6_valid)
1191                                         *tun_type =
1192                                         ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1193                                 else if (ipv4_valid)
1194                                         return 0;
1195                         } else if (l2tp_spec && l2tp_mask &&
1196                                                 l2tp_mask->session_id){
1197                                 list[t].type = ICE_L2TPV3;
1198                                 list[t].h_u.l2tpv3_sess_hdr.session_id =
1199                                         l2tp_spec->session_id;
1200                                 list[t].m_u.l2tpv3_sess_hdr.session_id =
1201                                         l2tp_mask->session_id;
1202                                 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1203                                 input_set_byte += 4;
1204                                 t++;
1205                         }
1206
1207                         if (!profile_rule) {
1208                                 if (ipv6_valid)
1209                                         *tun_type =
1210                                         ICE_SW_TUN_IPV6_L2TPV3;
1211                                 else if (ipv4_valid)
1212                                         *tun_type =
1213                                         ICE_SW_TUN_IPV4_L2TPV3;
1214                         }
1215                         break;
1216
1217                 case RTE_FLOW_ITEM_TYPE_PFCP:
1218                         pfcp_spec = item->spec;
1219                         pfcp_mask = item->mask;
1220                         /* Check if PFCP item is used to describe protocol.
1221                          * If yes, both spec and mask should be NULL.
1222                          * If no, both spec and mask shouldn't be NULL.
1223                          */
1224                         if ((!pfcp_spec && pfcp_mask) ||
1225                             (pfcp_spec && !pfcp_mask)) {
1226                                 rte_flow_error_set(error, EINVAL,
1227                                            RTE_FLOW_ERROR_TYPE_ITEM,
1228                                            item,
1229                                            "Invalid PFCP item");
1230                                 return -ENOTSUP;
1231                         }
1232                         if (pfcp_spec && pfcp_mask) {
1233                                 /* Check pfcp mask and update input set */
1234                                 if (pfcp_mask->msg_type ||
1235                                         pfcp_mask->msg_len ||
1236                                         pfcp_mask->seid) {
1237                                         rte_flow_error_set(error, EINVAL,
1238                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1239                                                 item,
1240                                                 "Invalid pfcp mask");
1241                                         return -ENOTSUP;
1242                                 }
1243                                 if (pfcp_mask->s_field &&
1244                                         pfcp_spec->s_field == 0x01 &&
1245                                         ipv6_valid)
1246                                         *tun_type =
1247                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1248                                 else if (pfcp_mask->s_field &&
1249                                         pfcp_spec->s_field == 0x01)
1250                                         *tun_type =
1251                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1252                                 else if (pfcp_mask->s_field &&
1253                                         !pfcp_spec->s_field &&
1254                                         ipv6_valid)
1255                                         *tun_type =
1256                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1257                                 else if (pfcp_mask->s_field &&
1258                                         !pfcp_spec->s_field)
1259                                         *tun_type =
1260                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1261                                 else
1262                                         return -ENOTSUP;
1263                         }
1264                         break;
1265
1266                 case RTE_FLOW_ITEM_TYPE_VOID:
1267                         break;
1268
1269                 default:
1270                         rte_flow_error_set(error, EINVAL,
1271                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1272                                    "Invalid pattern item.");
1273                         goto out;
1274                 }
1275         }
1276
1277         if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1278             inner_vlan_valid && outer_vlan_valid)
1279                 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1280         else if (*tun_type == ICE_SW_TUN_PPPOE &&
1281                  inner_vlan_valid && outer_vlan_valid)
1282                 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1283         else if (*tun_type == ICE_NON_TUN &&
1284                  inner_vlan_valid && outer_vlan_valid)
1285                 *tun_type = ICE_NON_TUN_QINQ;
1286         else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1287                  inner_vlan_valid && outer_vlan_valid)
1288                 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1289
1290         if (pppoe_patt_valid && !pppoe_prot_valid) {
1291                 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1292                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1293                 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1294                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1295                 else if (inner_vlan_valid && outer_vlan_valid)
1296                         *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1297                 else if (ipv6_valid && udp_valid)
1298                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1299                 else if (ipv6_valid && tcp_valid)
1300                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1301                 else if (ipv4_valid && udp_valid)
1302                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1303                 else if (ipv4_valid && tcp_valid)
1304                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1305                 else if (ipv6_valid)
1306                         *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1307                 else if (ipv4_valid)
1308                         *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1309                 else
1310                         *tun_type = ICE_SW_TUN_PPPOE;
1311         }
1312
1313         if (*tun_type == ICE_NON_TUN) {
1314                 if (vxlan_valid)
1315                         *tun_type = ICE_SW_TUN_VXLAN;
1316                 else if (nvgre_valid)
1317                         *tun_type = ICE_SW_TUN_NVGRE;
1318                 else if (ipv4_valid && tcp_valid)
1319                         *tun_type = ICE_SW_IPV4_TCP;
1320                 else if (ipv4_valid && udp_valid)
1321                         *tun_type = ICE_SW_IPV4_UDP;
1322                 else if (ipv6_valid && tcp_valid)
1323                         *tun_type = ICE_SW_IPV6_TCP;
1324                 else if (ipv6_valid && udp_valid)
1325                         *tun_type = ICE_SW_IPV6_UDP;
1326         }
1327
1328         if (input_set_byte > MAX_INPUT_SET_BYTE) {
1329                 rte_flow_error_set(error, EINVAL,
1330                         RTE_FLOW_ERROR_TYPE_ITEM,
1331                         item,
1332                         "too much input set");
1333                 return -ENOTSUP;
1334         }
1335
1336         *lkups_num = t;
1337
1338         return input_set;
1339 out:
1340         return 0;
1341 }
1342
1343 static int
1344 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1345                             const struct rte_flow_action *actions,
1346                             struct rte_flow_error *error,
1347                             struct ice_adv_rule_info *rule_info)
1348 {
1349         const struct rte_flow_action_vf *act_vf;
1350         const struct rte_flow_action *action;
1351         enum rte_flow_action_type action_type;
1352
1353         for (action = actions; action->type !=
1354                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1355                 action_type = action->type;
1356                 switch (action_type) {
1357                 case RTE_FLOW_ACTION_TYPE_VF:
1358                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1359                         act_vf = action->conf;
1360
1361                         if (act_vf->id >= ad->real_hw.num_vfs &&
1362                                 !act_vf->original) {
1363                                 rte_flow_error_set(error,
1364                                         EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1365                                         actions,
1366                                         "Invalid vf id");
1367                                 return -rte_errno;
1368                         }
1369
1370                         if (act_vf->original)
1371                                 rule_info->sw_act.vsi_handle =
1372                                         ad->real_hw.avf.bus.func;
1373                         else
1374                                 rule_info->sw_act.vsi_handle = act_vf->id;
1375                         break;
1376
1377                 case RTE_FLOW_ACTION_TYPE_DROP:
1378                         rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1379                         break;
1380
1381                 default:
1382                         rte_flow_error_set(error,
1383                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1384                                            actions,
1385                                            "Invalid action type");
1386                         return -rte_errno;
1387                 }
1388         }
1389
1390         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1391         rule_info->sw_act.flag = ICE_FLTR_RX;
1392         rule_info->rx = 1;
1393         rule_info->priority = 5;
1394
1395         return 0;
1396 }
1397
1398 static int
1399 ice_switch_parse_action(struct ice_pf *pf,
1400                 const struct rte_flow_action *actions,
1401                 struct rte_flow_error *error,
1402                 struct ice_adv_rule_info *rule_info)
1403 {
1404         struct ice_vsi *vsi = pf->main_vsi;
1405         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1406         const struct rte_flow_action_queue *act_q;
1407         const struct rte_flow_action_rss *act_qgrop;
1408         uint16_t base_queue, i;
1409         const struct rte_flow_action *action;
1410         enum rte_flow_action_type action_type;
1411         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1412                  2, 4, 8, 16, 32, 64, 128};
1413
1414         base_queue = pf->base_queue + vsi->base_queue;
1415         for (action = actions; action->type !=
1416                         RTE_FLOW_ACTION_TYPE_END; action++) {
1417                 action_type = action->type;
1418                 switch (action_type) {
1419                 case RTE_FLOW_ACTION_TYPE_RSS:
1420                         act_qgrop = action->conf;
1421                         if (act_qgrop->queue_num <= 1)
1422                                 goto error;
1423                         rule_info->sw_act.fltr_act =
1424                                 ICE_FWD_TO_QGRP;
1425                         rule_info->sw_act.fwd_id.q_id =
1426                                 base_queue + act_qgrop->queue[0];
1427                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1428                                 if (act_qgrop->queue_num ==
1429                                         valid_qgrop_number[i])
1430                                         break;
1431                         }
1432                         if (i == MAX_QGRP_NUM_TYPE)
1433                                 goto error;
1434                         if ((act_qgrop->queue[0] +
1435                                 act_qgrop->queue_num) >
1436                                 dev->data->nb_rx_queues)
1437                                 goto error1;
1438                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1439                                 if (act_qgrop->queue[i + 1] !=
1440                                         act_qgrop->queue[i] + 1)
1441                                         goto error2;
1442                         rule_info->sw_act.qgrp_size =
1443                                 act_qgrop->queue_num;
1444                         break;
1445                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1446                         act_q = action->conf;
1447                         if (act_q->index >= dev->data->nb_rx_queues)
1448                                 goto error;
1449                         rule_info->sw_act.fltr_act =
1450                                 ICE_FWD_TO_Q;
1451                         rule_info->sw_act.fwd_id.q_id =
1452                                 base_queue + act_q->index;
1453                         break;
1454
1455                 case RTE_FLOW_ACTION_TYPE_DROP:
1456                         rule_info->sw_act.fltr_act =
1457                                 ICE_DROP_PACKET;
1458                         break;
1459
1460                 case RTE_FLOW_ACTION_TYPE_VOID:
1461                         break;
1462
1463                 default:
1464                         goto error;
1465                 }
1466         }
1467
1468         rule_info->sw_act.vsi_handle = vsi->idx;
1469         rule_info->rx = 1;
1470         rule_info->sw_act.src = vsi->idx;
1471         rule_info->priority = 5;
1472
1473         return 0;
1474
1475 error:
1476         rte_flow_error_set(error,
1477                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1478                 actions,
1479                 "Invalid action type or queue number");
1480         return -rte_errno;
1481
1482 error1:
1483         rte_flow_error_set(error,
1484                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1485                 actions,
1486                 "Invalid queue region indexes");
1487         return -rte_errno;
1488
1489 error2:
1490         rte_flow_error_set(error,
1491                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1492                 actions,
1493                 "Discontinuous queue region");
1494         return -rte_errno;
1495 }
1496
1497 static int
1498 ice_switch_check_action(const struct rte_flow_action *actions,
1499                             struct rte_flow_error *error)
1500 {
1501         const struct rte_flow_action *action;
1502         enum rte_flow_action_type action_type;
1503         uint16_t actions_num = 0;
1504
1505         for (action = actions; action->type !=
1506                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1507                 action_type = action->type;
1508                 switch (action_type) {
1509                 case RTE_FLOW_ACTION_TYPE_VF:
1510                 case RTE_FLOW_ACTION_TYPE_RSS:
1511                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1512                 case RTE_FLOW_ACTION_TYPE_DROP:
1513                         actions_num++;
1514                         break;
1515                 case RTE_FLOW_ACTION_TYPE_VOID:
1516                         continue;
1517                 default:
1518                         rte_flow_error_set(error,
1519                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1520                                            actions,
1521                                            "Invalid action type");
1522                         return -rte_errno;
1523                 }
1524         }
1525
1526         if (actions_num != 1) {
1527                 rte_flow_error_set(error,
1528                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1529                                    actions,
1530                                    "Invalid action number");
1531                 return -rte_errno;
1532         }
1533
1534         return 0;
1535 }
1536
1537 static int
1538 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1539                 struct ice_pattern_match_item *array,
1540                 uint32_t array_len,
1541                 const struct rte_flow_item pattern[],
1542                 const struct rte_flow_action actions[],
1543                 void **meta,
1544                 struct rte_flow_error *error)
1545 {
1546         struct ice_pf *pf = &ad->pf;
1547         uint64_t inputset = 0;
1548         int ret = 0;
1549         struct sw_meta *sw_meta_ptr = NULL;
1550         struct ice_adv_rule_info rule_info;
1551         struct ice_adv_lkup_elem *list = NULL;
1552         uint16_t lkups_num = 0;
1553         const struct rte_flow_item *item = pattern;
1554         uint16_t item_num = 0;
1555         uint16_t vlan_num = 0;
1556         enum ice_sw_tunnel_type tun_type =
1557                         ICE_NON_TUN;
1558         struct ice_pattern_match_item *pattern_match_item = NULL;
1559
1560         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1561                 item_num++;
1562                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1563                         const struct rte_flow_item_eth *eth_mask;
1564                         if (item->mask)
1565                                 eth_mask = item->mask;
1566                         else
1567                                 continue;
1568                         if (eth_mask->type == UINT16_MAX)
1569                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1570                 }
1571
1572                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1573                         vlan_num++;
1574
1575                 /* reserve one more memory slot for ETH which may
1576                  * consume 2 lookup items.
1577                  */
1578                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1579                         item_num++;
1580         }
1581
1582         if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1583                 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1584         else if (vlan_num == 2)
1585                 tun_type = ICE_NON_TUN_QINQ;
1586
1587         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1588         if (!list) {
1589                 rte_flow_error_set(error, EINVAL,
1590                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1591                                    "No memory for PMD internal items");
1592                 return -rte_errno;
1593         }
1594
1595         sw_meta_ptr =
1596                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1597         if (!sw_meta_ptr) {
1598                 rte_flow_error_set(error, EINVAL,
1599                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1600                                    "No memory for sw_pattern_meta_ptr");
1601                 goto error;
1602         }
1603
1604         pattern_match_item =
1605                 ice_search_pattern_match_item(ad, pattern, array, array_len,
1606                                               error);
1607         if (!pattern_match_item) {
1608                 rte_flow_error_set(error, EINVAL,
1609                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1610                                    "Invalid input pattern");
1611                 goto error;
1612         }
1613
1614         inputset = ice_switch_inset_get
1615                 (pattern, error, list, &lkups_num, &tun_type);
1616         if ((!inputset && !ice_is_prof_rule(tun_type)) ||
1617                 (inputset & ~pattern_match_item->input_set_mask_o)) {
1618                 rte_flow_error_set(error, EINVAL,
1619                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1620                                    pattern,
1621                                    "Invalid input set");
1622                 goto error;
1623         }
1624
1625         memset(&rule_info, 0, sizeof(rule_info));
1626         rule_info.tun_type = tun_type;
1627
1628         ret = ice_switch_check_action(actions, error);
1629         if (ret)
1630                 goto error;
1631
1632         if (ad->hw.dcf_enabled)
1633                 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1634                                                   &rule_info);
1635         else
1636                 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1637
1638         if (ret)
1639                 goto error;
1640
1641         if (meta) {
1642                 *meta = sw_meta_ptr;
1643                 ((struct sw_meta *)*meta)->list = list;
1644                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1645                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1646         } else {
1647                 rte_free(list);
1648                 rte_free(sw_meta_ptr);
1649         }
1650
1651         rte_free(pattern_match_item);
1652
1653         return 0;
1654
1655 error:
1656         rte_free(list);
1657         rte_free(sw_meta_ptr);
1658         rte_free(pattern_match_item);
1659
1660         return -rte_errno;
1661 }
1662
1663 static int
1664 ice_switch_query(struct ice_adapter *ad __rte_unused,
1665                 struct rte_flow *flow __rte_unused,
1666                 struct rte_flow_query_count *count __rte_unused,
1667                 struct rte_flow_error *error)
1668 {
1669         rte_flow_error_set(error, EINVAL,
1670                 RTE_FLOW_ERROR_TYPE_HANDLE,
1671                 NULL,
1672                 "count action not supported by switch filter");
1673
1674         return -rte_errno;
1675 }
1676
1677 static int
1678 ice_switch_redirect(struct ice_adapter *ad,
1679                     struct rte_flow *flow,
1680                     struct ice_flow_redirect *rd)
1681 {
1682         struct ice_rule_query_data *rdata = flow->rule;
1683         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1684         struct ice_adv_lkup_elem *lkups_dp = NULL;
1685         struct LIST_HEAD_TYPE *list_head;
1686         struct ice_adv_rule_info rinfo;
1687         struct ice_hw *hw = &ad->hw;
1688         struct ice_switch_info *sw;
1689         uint16_t lkups_cnt;
1690         int ret;
1691
1692         if (rdata->vsi_handle != rd->vsi_handle)
1693                 return 0;
1694
1695         sw = hw->switch_info;
1696         if (!sw->recp_list[rdata->rid].recp_created)
1697                 return -EINVAL;
1698
1699         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1700                 return -ENOTSUP;
1701
1702         list_head = &sw->recp_list[rdata->rid].filt_rules;
1703         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1704                             list_entry) {
1705                 rinfo = list_itr->rule_info;
1706                 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1707                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1708                     rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1709                     (rinfo.fltr_rule_id == rdata->rule_id &&
1710                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1711                         lkups_cnt = list_itr->lkups_cnt;
1712                         lkups_dp = (struct ice_adv_lkup_elem *)
1713                                 ice_memdup(hw, list_itr->lkups,
1714                                            sizeof(*list_itr->lkups) *
1715                                            lkups_cnt, ICE_NONDMA_TO_NONDMA);
1716
1717                         if (!lkups_dp) {
1718                                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1719                                 return -EINVAL;
1720                         }
1721
1722                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1723                                 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1724                                 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1725                         }
1726                         break;
1727                 }
1728         }
1729
1730         if (!lkups_dp)
1731                 return -EINVAL;
1732
1733         /* Remove the old rule */
1734         ret = ice_rem_adv_rule(hw, list_itr->lkups,
1735                                lkups_cnt, &rinfo);
1736         if (ret) {
1737                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1738                             rdata->rule_id);
1739                 ret = -EINVAL;
1740                 goto out;
1741         }
1742
1743         /* Update VSI context */
1744         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1745
1746         /* Replay the rule */
1747         ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1748                                &rinfo, rdata);
1749         if (ret) {
1750                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1751                 ret = -EINVAL;
1752         }
1753
1754 out:
1755         ice_free(hw, lkups_dp);
1756         return ret;
1757 }
1758
1759 static int
1760 ice_switch_init(struct ice_adapter *ad)
1761 {
1762         int ret = 0;
1763         struct ice_flow_parser *dist_parser;
1764         struct ice_flow_parser *perm_parser;
1765
1766         if (ad->devargs.pipe_mode_support) {
1767                 perm_parser = &ice_switch_perm_parser;
1768                 ret = ice_register_parser(perm_parser, ad);
1769         } else {
1770                 dist_parser = &ice_switch_dist_parser;
1771                 ret = ice_register_parser(dist_parser, ad);
1772         }
1773         return ret;
1774 }
1775
1776 static void
1777 ice_switch_uninit(struct ice_adapter *ad)
1778 {
1779         struct ice_flow_parser *dist_parser;
1780         struct ice_flow_parser *perm_parser;
1781
1782         if (ad->devargs.pipe_mode_support) {
1783                 perm_parser = &ice_switch_perm_parser;
1784                 ice_unregister_parser(perm_parser, ad);
1785         } else {
1786                 dist_parser = &ice_switch_dist_parser;
1787                 ice_unregister_parser(dist_parser, ad);
1788         }
1789 }
1790
1791 static struct
1792 ice_flow_engine ice_switch_engine = {
1793         .init = ice_switch_init,
1794         .uninit = ice_switch_uninit,
1795         .create = ice_switch_create,
1796         .destroy = ice_switch_destroy,
1797         .query_count = ice_switch_query,
1798         .redirect = ice_switch_redirect,
1799         .free = ice_switch_filter_rule_free,
1800         .type = ICE_FLOW_ENGINE_SWITCH,
1801 };
1802
1803 static struct
1804 ice_flow_parser ice_switch_dist_parser = {
1805         .engine = &ice_switch_engine,
1806         .array = ice_switch_pattern_dist_list,
1807         .array_len = RTE_DIM(ice_switch_pattern_dist_list),
1808         .parse_pattern_action = ice_switch_parse_pattern_action,
1809         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1810 };
1811
1812 static struct
1813 ice_flow_parser ice_switch_perm_parser = {
1814         .engine = &ice_switch_engine,
1815         .array = ice_switch_pattern_perm_list,
1816         .array_len = RTE_DIM(ice_switch_pattern_perm_list),
1817         .parse_pattern_action = ice_switch_parse_pattern_action,
1818         .stage = ICE_FLOW_STAGE_PERMISSION,
1819 };
1820
1821 RTE_INIT(ice_sw_engine_init)
1822 {
1823         struct ice_flow_engine *engine = &ice_switch_engine;
1824         ice_register_flow_engine(engine);
1825 }