net/ice: remove redundant function
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27
28
29 #define MAX_QGRP_NUM_TYPE       7
30 #define MAX_INPUT_SET_BYTE      32
31 #define ICE_PPP_IPV4_PROTO      0x0021
32 #define ICE_PPP_IPV6_PROTO      0x0057
33 #define ICE_IPV4_PROTO_NVGRE    0x002F
34
35 #define ICE_SW_INSET_ETHER ( \
36         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
39         ICE_INSET_VLAN_INNER)
40 #define ICE_SW_INSET_MAC_QINQ  ( \
41         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
42         ICE_INSET_VLAN_OUTER)
43 #define ICE_SW_INSET_MAC_IPV4 ( \
44         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
46 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
47         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
48 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
49         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
50         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
51         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
52 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
53         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
54         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
55         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
56 #define ICE_SW_INSET_MAC_IPV6 ( \
57         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
58         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
59         ICE_INSET_IPV6_NEXT_HDR)
60 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
61         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
62 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
63         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
64         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
65         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
66 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
67         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
68         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
69         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
70 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
71         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
72         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
74         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
77         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
79         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
81         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
83         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
84 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
85         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
87         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
88 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
89         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
91         ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
92 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
93         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
94         ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
95 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
96         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
97         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
98         ICE_INSET_TUN_IPV4_TOS)
99 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
100         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
101         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
102         ICE_INSET_TUN_IPV4_TOS)
103 #define ICE_SW_INSET_MAC_PPPOE  ( \
104         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
105         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
106 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
107         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
108         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
109         ICE_INSET_PPPOE_PROTO)
110 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
111         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
112 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
113         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
114 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
115         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
116 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
117         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
118 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
119         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
120 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
121         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
122 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
123         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
124 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
125         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
126 #define ICE_SW_INSET_MAC_IPV4_AH ( \
127         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
128 #define ICE_SW_INSET_MAC_IPV6_AH ( \
129         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
130 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
131         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
132 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
133         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
134 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
135         ICE_SW_INSET_MAC_IPV4 | \
136         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
137 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
138         ICE_SW_INSET_MAC_IPV6 | \
139         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
140
141 struct sw_meta {
142         struct ice_adv_lkup_elem *list;
143         uint16_t lkups_num;
144         struct ice_adv_rule_info rule_info;
145 };
146
147 static struct ice_flow_parser ice_switch_dist_parser;
148 static struct ice_flow_parser ice_switch_perm_parser;
149
150 static struct
151 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
152         {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE, ICE_INSET_NONE},
153         {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE, ICE_INSET_NONE},
154         {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE, ICE_INSET_NONE},
155         {pattern_eth_arp,                               ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
156         {pattern_eth_ipv4,                              ICE_SW_INSET_MAC_IPV4,                  ICE_INSET_NONE, ICE_INSET_NONE},
157         {pattern_eth_ipv4_udp,                          ICE_SW_INSET_MAC_IPV4_UDP,              ICE_INSET_NONE, ICE_INSET_NONE},
158         {pattern_eth_ipv4_tcp,                          ICE_SW_INSET_MAC_IPV4_TCP,              ICE_INSET_NONE, ICE_INSET_NONE},
159         {pattern_eth_ipv6,                              ICE_SW_INSET_MAC_IPV6,                  ICE_INSET_NONE, ICE_INSET_NONE},
160         {pattern_eth_ipv6_udp,                          ICE_SW_INSET_MAC_IPV6_UDP,              ICE_INSET_NONE, ICE_INSET_NONE},
161         {pattern_eth_ipv6_tcp,                          ICE_SW_INSET_MAC_IPV6_TCP,              ICE_INSET_NONE, ICE_INSET_NONE},
162         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_SW_INSET_DIST_VXLAN_IPV4,           ICE_INSET_NONE, ICE_INSET_NONE},
163         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_SW_INSET_DIST_VXLAN_IPV4_UDP,       ICE_INSET_NONE, ICE_INSET_NONE},
164         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_SW_INSET_DIST_VXLAN_IPV4_TCP,       ICE_INSET_NONE, ICE_INSET_NONE},
165         {pattern_eth_ipv4_nvgre_eth_ipv4,               ICE_SW_INSET_DIST_NVGRE_IPV4,           ICE_INSET_NONE, ICE_INSET_NONE},
166         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,           ICE_SW_INSET_DIST_NVGRE_IPV4_UDP,       ICE_INSET_NONE, ICE_INSET_NONE},
167         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,           ICE_SW_INSET_DIST_NVGRE_IPV4_TCP,       ICE_INSET_NONE, ICE_INSET_NONE},
168         {pattern_eth_pppoes,                            ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
169         {pattern_eth_vlan_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
170         {pattern_eth_pppoes_proto,                      ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
171         {pattern_eth_vlan_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
172         {pattern_eth_pppoes_ipv4,                       ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
173         {pattern_eth_pppoes_ipv4_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
174         {pattern_eth_pppoes_ipv4_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
175         {pattern_eth_pppoes_ipv6,                       ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
176         {pattern_eth_pppoes_ipv6_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
177         {pattern_eth_pppoes_ipv6_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
178         {pattern_eth_vlan_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
179         {pattern_eth_vlan_pppoes_ipv4_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
180         {pattern_eth_vlan_pppoes_ipv4_udp,              ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
181         {pattern_eth_vlan_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
182         {pattern_eth_vlan_pppoes_ipv6_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
183         {pattern_eth_vlan_pppoes_ipv6_udp,              ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
184         {pattern_eth_ipv4_esp,                          ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
185         {pattern_eth_ipv4_udp_esp,                      ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
186         {pattern_eth_ipv6_esp,                          ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
187         {pattern_eth_ipv6_udp_esp,                      ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
188         {pattern_eth_ipv4_ah,                           ICE_SW_INSET_MAC_IPV4_AH,               ICE_INSET_NONE, ICE_INSET_NONE},
189         {pattern_eth_ipv6_ah,                           ICE_SW_INSET_MAC_IPV6_AH,               ICE_INSET_NONE, ICE_INSET_NONE},
190         {pattern_eth_ipv6_udp_ah,                       ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
191         {pattern_eth_ipv4_l2tp,                         ICE_SW_INSET_MAC_IPV4_L2TP,             ICE_INSET_NONE, ICE_INSET_NONE},
192         {pattern_eth_ipv6_l2tp,                         ICE_SW_INSET_MAC_IPV6_L2TP,             ICE_INSET_NONE, ICE_INSET_NONE},
193         {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
194         {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
195         {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE, ICE_INSET_NONE},
196         {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE, ICE_INSET_NONE},
197         {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
198         {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
199         {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
200         {pattern_eth_qinq_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
201 };
202
203 static struct
204 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
205         {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE, ICE_INSET_NONE},
206         {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE, ICE_INSET_NONE},
207         {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE, ICE_INSET_NONE},
208         {pattern_eth_arp,                               ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
209         {pattern_eth_ipv4,                              ICE_SW_INSET_MAC_IPV4,                  ICE_INSET_NONE, ICE_INSET_NONE},
210         {pattern_eth_ipv4_udp,                          ICE_SW_INSET_MAC_IPV4_UDP,              ICE_INSET_NONE, ICE_INSET_NONE},
211         {pattern_eth_ipv4_tcp,                          ICE_SW_INSET_MAC_IPV4_TCP,              ICE_INSET_NONE, ICE_INSET_NONE},
212         {pattern_eth_ipv6,                              ICE_SW_INSET_MAC_IPV6,                  ICE_INSET_NONE, ICE_INSET_NONE},
213         {pattern_eth_ipv6_udp,                          ICE_SW_INSET_MAC_IPV6_UDP,              ICE_INSET_NONE, ICE_INSET_NONE},
214         {pattern_eth_ipv6_tcp,                          ICE_SW_INSET_MAC_IPV6_TCP,              ICE_INSET_NONE, ICE_INSET_NONE},
215         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_SW_INSET_PERM_TUNNEL_IPV4,          ICE_INSET_NONE, ICE_INSET_NONE},
216         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP,      ICE_INSET_NONE, ICE_INSET_NONE},
217         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP,      ICE_INSET_NONE, ICE_INSET_NONE},
218         {pattern_eth_ipv4_nvgre_eth_ipv4,               ICE_SW_INSET_PERM_TUNNEL_IPV4,          ICE_INSET_NONE, ICE_INSET_NONE},
219         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,           ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP,      ICE_INSET_NONE, ICE_INSET_NONE},
220         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,           ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP,      ICE_INSET_NONE, ICE_INSET_NONE},
221         {pattern_eth_pppoes,                            ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
222         {pattern_eth_vlan_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
223         {pattern_eth_pppoes_proto,                      ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
224         {pattern_eth_vlan_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
225         {pattern_eth_pppoes_ipv4,                       ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
226         {pattern_eth_pppoes_ipv4_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
227         {pattern_eth_pppoes_ipv4_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
228         {pattern_eth_pppoes_ipv6,                       ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
229         {pattern_eth_pppoes_ipv6_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
230         {pattern_eth_pppoes_ipv6_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
231         {pattern_eth_vlan_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
232         {pattern_eth_vlan_pppoes_ipv4_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
233         {pattern_eth_vlan_pppoes_ipv4_udp,              ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
234         {pattern_eth_vlan_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
235         {pattern_eth_vlan_pppoes_ipv6_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE, ICE_INSET_NONE},
236         {pattern_eth_vlan_pppoes_ipv6_udp,              ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE, ICE_INSET_NONE},
237         {pattern_eth_ipv4_esp,                          ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
238         {pattern_eth_ipv4_udp_esp,                      ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
239         {pattern_eth_ipv6_esp,                          ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
240         {pattern_eth_ipv6_udp_esp,                      ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE, ICE_INSET_NONE},
241         {pattern_eth_ipv4_ah,                           ICE_SW_INSET_MAC_IPV4_AH,               ICE_INSET_NONE, ICE_INSET_NONE},
242         {pattern_eth_ipv6_ah,                           ICE_SW_INSET_MAC_IPV6_AH,               ICE_INSET_NONE, ICE_INSET_NONE},
243         {pattern_eth_ipv6_udp_ah,                       ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
244         {pattern_eth_ipv4_l2tp,                         ICE_SW_INSET_MAC_IPV4_L2TP,             ICE_INSET_NONE, ICE_INSET_NONE},
245         {pattern_eth_ipv6_l2tp,                         ICE_SW_INSET_MAC_IPV6_L2TP,             ICE_INSET_NONE, ICE_INSET_NONE},
246         {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
247         {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE, ICE_INSET_NONE},
248         {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE, ICE_INSET_NONE},
249         {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE, ICE_INSET_NONE},
250         {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE, ICE_INSET_NONE},
251         {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE, ICE_INSET_NONE},
252         {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE, ICE_INSET_NONE},
253         {pattern_eth_qinq_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE, ICE_INSET_NONE},
254 };
255
256 static int
257 ice_switch_create(struct ice_adapter *ad,
258                 struct rte_flow *flow,
259                 void *meta,
260                 struct rte_flow_error *error)
261 {
262         int ret = 0;
263         struct ice_pf *pf = &ad->pf;
264         struct ice_hw *hw = ICE_PF_TO_HW(pf);
265         struct ice_rule_query_data rule_added = {0};
266         struct ice_rule_query_data *filter_ptr;
267         struct ice_adv_lkup_elem *list =
268                 ((struct sw_meta *)meta)->list;
269         uint16_t lkups_cnt =
270                 ((struct sw_meta *)meta)->lkups_num;
271         struct ice_adv_rule_info *rule_info =
272                 &((struct sw_meta *)meta)->rule_info;
273
274         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
275                 rte_flow_error_set(error, EINVAL,
276                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
277                         "item number too large for rule");
278                 goto error;
279         }
280         if (!list) {
281                 rte_flow_error_set(error, EINVAL,
282                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
283                         "lookup list should not be NULL");
284                 goto error;
285         }
286         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
287         if (!ret) {
288                 filter_ptr = rte_zmalloc("ice_switch_filter",
289                         sizeof(struct ice_rule_query_data), 0);
290                 if (!filter_ptr) {
291                         rte_flow_error_set(error, EINVAL,
292                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
293                                    "No memory for ice_switch_filter");
294                         goto error;
295                 }
296                 flow->rule = filter_ptr;
297                 rte_memcpy(filter_ptr,
298                         &rule_added,
299                         sizeof(struct ice_rule_query_data));
300         } else {
301                 rte_flow_error_set(error, EINVAL,
302                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
303                         "switch filter create flow fail");
304                 goto error;
305         }
306
307         rte_free(list);
308         rte_free(meta);
309         return 0;
310
311 error:
312         rte_free(list);
313         rte_free(meta);
314
315         return -rte_errno;
316 }
317
318 static int
319 ice_switch_destroy(struct ice_adapter *ad,
320                 struct rte_flow *flow,
321                 struct rte_flow_error *error)
322 {
323         struct ice_hw *hw = &ad->hw;
324         int ret;
325         struct ice_rule_query_data *filter_ptr;
326
327         filter_ptr = (struct ice_rule_query_data *)
328                 flow->rule;
329
330         if (!filter_ptr) {
331                 rte_flow_error_set(error, EINVAL,
332                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
333                         "no such flow"
334                         " create by switch filter");
335                 return -rte_errno;
336         }
337
338         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
339         if (ret) {
340                 rte_flow_error_set(error, EINVAL,
341                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
342                         "fail to destroy switch filter rule");
343                 return -rte_errno;
344         }
345
346         rte_free(filter_ptr);
347         return ret;
348 }
349
350 static void
351 ice_switch_filter_rule_free(struct rte_flow *flow)
352 {
353         rte_free(flow->rule);
354 }
355
356 static uint64_t
357 ice_switch_inset_get(const struct rte_flow_item pattern[],
358                 struct rte_flow_error *error,
359                 struct ice_adv_lkup_elem *list,
360                 uint16_t *lkups_num,
361                 enum ice_sw_tunnel_type *tun_type)
362 {
363         const struct rte_flow_item *item = pattern;
364         enum rte_flow_item_type item_type;
365         const struct rte_flow_item_eth *eth_spec, *eth_mask;
366         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
367         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
368         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
369         const struct rte_flow_item_udp *udp_spec, *udp_mask;
370         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
371         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
372         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
373         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
374         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
375         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
376                                 *pppoe_proto_mask;
377         const struct rte_flow_item_esp *esp_spec, *esp_mask;
378         const struct rte_flow_item_ah *ah_spec, *ah_mask;
379         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
380         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
381         uint64_t input_set = ICE_INSET_NONE;
382         uint16_t input_set_byte = 0;
383         bool pppoe_elem_valid = 0;
384         bool pppoe_patt_valid = 0;
385         bool pppoe_prot_valid = 0;
386         bool inner_vlan_valid = 0;
387         bool outer_vlan_valid = 0;
388         bool tunnel_valid = 0;
389         bool profile_rule = 0;
390         bool nvgre_valid = 0;
391         bool vxlan_valid = 0;
392         bool ipv6_valid = 0;
393         bool ipv4_valid = 0;
394         bool udp_valid = 0;
395         bool tcp_valid = 0;
396         uint16_t j, t = 0;
397
398         for (item = pattern; item->type !=
399                         RTE_FLOW_ITEM_TYPE_END; item++) {
400                 if (item->last) {
401                         rte_flow_error_set(error, EINVAL,
402                                         RTE_FLOW_ERROR_TYPE_ITEM,
403                                         item,
404                                         "Not support range");
405                         return 0;
406                 }
407                 item_type = item->type;
408
409                 switch (item_type) {
410                 case RTE_FLOW_ITEM_TYPE_ETH:
411                         eth_spec = item->spec;
412                         eth_mask = item->mask;
413                         if (eth_spec && eth_mask) {
414                                 const uint8_t *a = eth_mask->src.addr_bytes;
415                                 const uint8_t *b = eth_mask->dst.addr_bytes;
416                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
417                                         if (a[j] && tunnel_valid) {
418                                                 input_set |=
419                                                         ICE_INSET_TUN_SMAC;
420                                                 break;
421                                         } else if (a[j]) {
422                                                 input_set |=
423                                                         ICE_INSET_SMAC;
424                                                 break;
425                                         }
426                                 }
427                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
428                                         if (b[j] && tunnel_valid) {
429                                                 input_set |=
430                                                         ICE_INSET_TUN_DMAC;
431                                                 break;
432                                         } else if (b[j]) {
433                                                 input_set |=
434                                                         ICE_INSET_DMAC;
435                                                 break;
436                                         }
437                                 }
438                                 if (eth_mask->type)
439                                         input_set |= ICE_INSET_ETHERTYPE;
440                                 list[t].type = (tunnel_valid  == 0) ?
441                                         ICE_MAC_OFOS : ICE_MAC_IL;
442                                 struct ice_ether_hdr *h;
443                                 struct ice_ether_hdr *m;
444                                 uint16_t i = 0;
445                                 h = &list[t].h_u.eth_hdr;
446                                 m = &list[t].m_u.eth_hdr;
447                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
448                                         if (eth_mask->src.addr_bytes[j]) {
449                                                 h->src_addr[j] =
450                                                 eth_spec->src.addr_bytes[j];
451                                                 m->src_addr[j] =
452                                                 eth_mask->src.addr_bytes[j];
453                                                 i = 1;
454                                                 input_set_byte++;
455                                         }
456                                         if (eth_mask->dst.addr_bytes[j]) {
457                                                 h->dst_addr[j] =
458                                                 eth_spec->dst.addr_bytes[j];
459                                                 m->dst_addr[j] =
460                                                 eth_mask->dst.addr_bytes[j];
461                                                 i = 1;
462                                                 input_set_byte++;
463                                         }
464                                 }
465                                 if (i)
466                                         t++;
467                                 if (eth_mask->type) {
468                                         list[t].type = ICE_ETYPE_OL;
469                                         list[t].h_u.ethertype.ethtype_id =
470                                                 eth_spec->type;
471                                         list[t].m_u.ethertype.ethtype_id =
472                                                 eth_mask->type;
473                                         input_set_byte += 2;
474                                         t++;
475                                 }
476                         }
477                         break;
478
479                 case RTE_FLOW_ITEM_TYPE_IPV4:
480                         ipv4_spec = item->spec;
481                         ipv4_mask = item->mask;
482                         ipv4_valid = 1;
483                         if (ipv4_spec && ipv4_mask) {
484                                 /* Check IPv4 mask and update input set */
485                                 if (ipv4_mask->hdr.version_ihl ||
486                                         ipv4_mask->hdr.total_length ||
487                                         ipv4_mask->hdr.packet_id ||
488                                         ipv4_mask->hdr.hdr_checksum) {
489                                         rte_flow_error_set(error, EINVAL,
490                                                    RTE_FLOW_ERROR_TYPE_ITEM,
491                                                    item,
492                                                    "Invalid IPv4 mask.");
493                                         return 0;
494                                 }
495
496                                 if (tunnel_valid) {
497                                         if (ipv4_mask->hdr.type_of_service)
498                                                 input_set |=
499                                                         ICE_INSET_TUN_IPV4_TOS;
500                                         if (ipv4_mask->hdr.src_addr)
501                                                 input_set |=
502                                                         ICE_INSET_TUN_IPV4_SRC;
503                                         if (ipv4_mask->hdr.dst_addr)
504                                                 input_set |=
505                                                         ICE_INSET_TUN_IPV4_DST;
506                                         if (ipv4_mask->hdr.time_to_live)
507                                                 input_set |=
508                                                         ICE_INSET_TUN_IPV4_TTL;
509                                         if (ipv4_mask->hdr.next_proto_id)
510                                                 input_set |=
511                                                 ICE_INSET_TUN_IPV4_PROTO;
512                                 } else {
513                                         if (ipv4_mask->hdr.src_addr)
514                                                 input_set |= ICE_INSET_IPV4_SRC;
515                                         if (ipv4_mask->hdr.dst_addr)
516                                                 input_set |= ICE_INSET_IPV4_DST;
517                                         if (ipv4_mask->hdr.time_to_live)
518                                                 input_set |= ICE_INSET_IPV4_TTL;
519                                         if (ipv4_mask->hdr.next_proto_id)
520                                                 input_set |=
521                                                 ICE_INSET_IPV4_PROTO;
522                                         if (ipv4_mask->hdr.type_of_service)
523                                                 input_set |=
524                                                         ICE_INSET_IPV4_TOS;
525                                 }
526                                 list[t].type = (tunnel_valid  == 0) ?
527                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
528                                 if (ipv4_mask->hdr.src_addr) {
529                                         list[t].h_u.ipv4_hdr.src_addr =
530                                                 ipv4_spec->hdr.src_addr;
531                                         list[t].m_u.ipv4_hdr.src_addr =
532                                                 ipv4_mask->hdr.src_addr;
533                                         input_set_byte += 2;
534                                 }
535                                 if (ipv4_mask->hdr.dst_addr) {
536                                         list[t].h_u.ipv4_hdr.dst_addr =
537                                                 ipv4_spec->hdr.dst_addr;
538                                         list[t].m_u.ipv4_hdr.dst_addr =
539                                                 ipv4_mask->hdr.dst_addr;
540                                         input_set_byte += 2;
541                                 }
542                                 if (ipv4_mask->hdr.time_to_live) {
543                                         list[t].h_u.ipv4_hdr.time_to_live =
544                                                 ipv4_spec->hdr.time_to_live;
545                                         list[t].m_u.ipv4_hdr.time_to_live =
546                                                 ipv4_mask->hdr.time_to_live;
547                                         input_set_byte++;
548                                 }
549                                 if (ipv4_mask->hdr.next_proto_id) {
550                                         list[t].h_u.ipv4_hdr.protocol =
551                                                 ipv4_spec->hdr.next_proto_id;
552                                         list[t].m_u.ipv4_hdr.protocol =
553                                                 ipv4_mask->hdr.next_proto_id;
554                                         input_set_byte++;
555                                 }
556                                 if ((ipv4_spec->hdr.next_proto_id &
557                                         ipv4_mask->hdr.next_proto_id) ==
558                                         ICE_IPV4_PROTO_NVGRE)
559                                         *tun_type = ICE_SW_TUN_AND_NON_TUN;
560                                 if (ipv4_mask->hdr.type_of_service) {
561                                         list[t].h_u.ipv4_hdr.tos =
562                                                 ipv4_spec->hdr.type_of_service;
563                                         list[t].m_u.ipv4_hdr.tos =
564                                                 ipv4_mask->hdr.type_of_service;
565                                         input_set_byte++;
566                                 }
567                                 t++;
568                         }
569                         break;
570
571                 case RTE_FLOW_ITEM_TYPE_IPV6:
572                         ipv6_spec = item->spec;
573                         ipv6_mask = item->mask;
574                         ipv6_valid = 1;
575                         if (ipv6_spec && ipv6_mask) {
576                                 if (ipv6_mask->hdr.payload_len) {
577                                         rte_flow_error_set(error, EINVAL,
578                                            RTE_FLOW_ERROR_TYPE_ITEM,
579                                            item,
580                                            "Invalid IPv6 mask");
581                                         return 0;
582                                 }
583
584                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
585                                         if (ipv6_mask->hdr.src_addr[j] &&
586                                                 tunnel_valid) {
587                                                 input_set |=
588                                                 ICE_INSET_TUN_IPV6_SRC;
589                                                 break;
590                                         } else if (ipv6_mask->hdr.src_addr[j]) {
591                                                 input_set |= ICE_INSET_IPV6_SRC;
592                                                 break;
593                                         }
594                                 }
595                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
596                                         if (ipv6_mask->hdr.dst_addr[j] &&
597                                                 tunnel_valid) {
598                                                 input_set |=
599                                                 ICE_INSET_TUN_IPV6_DST;
600                                                 break;
601                                         } else if (ipv6_mask->hdr.dst_addr[j]) {
602                                                 input_set |= ICE_INSET_IPV6_DST;
603                                                 break;
604                                         }
605                                 }
606                                 if (ipv6_mask->hdr.proto &&
607                                         tunnel_valid)
608                                         input_set |=
609                                                 ICE_INSET_TUN_IPV6_NEXT_HDR;
610                                 else if (ipv6_mask->hdr.proto)
611                                         input_set |=
612                                                 ICE_INSET_IPV6_NEXT_HDR;
613                                 if (ipv6_mask->hdr.hop_limits &&
614                                         tunnel_valid)
615                                         input_set |=
616                                                 ICE_INSET_TUN_IPV6_HOP_LIMIT;
617                                 else if (ipv6_mask->hdr.hop_limits)
618                                         input_set |=
619                                                 ICE_INSET_IPV6_HOP_LIMIT;
620                                 if ((ipv6_mask->hdr.vtc_flow &
621                                                 rte_cpu_to_be_32
622                                                 (RTE_IPV6_HDR_TC_MASK)) &&
623                                         tunnel_valid)
624                                         input_set |=
625                                                         ICE_INSET_TUN_IPV6_TC;
626                                 else if (ipv6_mask->hdr.vtc_flow &
627                                                 rte_cpu_to_be_32
628                                                 (RTE_IPV6_HDR_TC_MASK))
629                                         input_set |= ICE_INSET_IPV6_TC;
630
631                                 list[t].type = (tunnel_valid  == 0) ?
632                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
633                                 struct ice_ipv6_hdr *f;
634                                 struct ice_ipv6_hdr *s;
635                                 f = &list[t].h_u.ipv6_hdr;
636                                 s = &list[t].m_u.ipv6_hdr;
637                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
638                                         if (ipv6_mask->hdr.src_addr[j]) {
639                                                 f->src_addr[j] =
640                                                 ipv6_spec->hdr.src_addr[j];
641                                                 s->src_addr[j] =
642                                                 ipv6_mask->hdr.src_addr[j];
643                                                 input_set_byte++;
644                                         }
645                                         if (ipv6_mask->hdr.dst_addr[j]) {
646                                                 f->dst_addr[j] =
647                                                 ipv6_spec->hdr.dst_addr[j];
648                                                 s->dst_addr[j] =
649                                                 ipv6_mask->hdr.dst_addr[j];
650                                                 input_set_byte++;
651                                         }
652                                 }
653                                 if (ipv6_mask->hdr.proto) {
654                                         f->next_hdr =
655                                                 ipv6_spec->hdr.proto;
656                                         s->next_hdr =
657                                                 ipv6_mask->hdr.proto;
658                                         input_set_byte++;
659                                 }
660                                 if (ipv6_mask->hdr.hop_limits) {
661                                         f->hop_limit =
662                                                 ipv6_spec->hdr.hop_limits;
663                                         s->hop_limit =
664                                                 ipv6_mask->hdr.hop_limits;
665                                         input_set_byte++;
666                                 }
667                                 if (ipv6_mask->hdr.vtc_flow &
668                                                 rte_cpu_to_be_32
669                                                 (RTE_IPV6_HDR_TC_MASK)) {
670                                         struct ice_le_ver_tc_flow vtf;
671                                         vtf.u.fld.version = 0;
672                                         vtf.u.fld.flow_label = 0;
673                                         vtf.u.fld.tc = (rte_be_to_cpu_32
674                                                 (ipv6_spec->hdr.vtc_flow) &
675                                                         RTE_IPV6_HDR_TC_MASK) >>
676                                                         RTE_IPV6_HDR_TC_SHIFT;
677                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
678                                         vtf.u.fld.tc = (rte_be_to_cpu_32
679                                                 (ipv6_mask->hdr.vtc_flow) &
680                                                         RTE_IPV6_HDR_TC_MASK) >>
681                                                         RTE_IPV6_HDR_TC_SHIFT;
682                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
683                                         input_set_byte += 4;
684                                 }
685                                 t++;
686                         }
687                         break;
688
689                 case RTE_FLOW_ITEM_TYPE_UDP:
690                         udp_spec = item->spec;
691                         udp_mask = item->mask;
692                         udp_valid = 1;
693                         if (udp_spec && udp_mask) {
694                                 /* Check UDP mask and update input set*/
695                                 if (udp_mask->hdr.dgram_len ||
696                                     udp_mask->hdr.dgram_cksum) {
697                                         rte_flow_error_set(error, EINVAL,
698                                                    RTE_FLOW_ERROR_TYPE_ITEM,
699                                                    item,
700                                                    "Invalid UDP mask");
701                                         return 0;
702                                 }
703
704                                 if (tunnel_valid) {
705                                         if (udp_mask->hdr.src_port)
706                                                 input_set |=
707                                                 ICE_INSET_TUN_UDP_SRC_PORT;
708                                         if (udp_mask->hdr.dst_port)
709                                                 input_set |=
710                                                 ICE_INSET_TUN_UDP_DST_PORT;
711                                 } else {
712                                         if (udp_mask->hdr.src_port)
713                                                 input_set |=
714                                                 ICE_INSET_UDP_SRC_PORT;
715                                         if (udp_mask->hdr.dst_port)
716                                                 input_set |=
717                                                 ICE_INSET_UDP_DST_PORT;
718                                 }
719                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
720                                                 tunnel_valid == 0)
721                                         list[t].type = ICE_UDP_OF;
722                                 else
723                                         list[t].type = ICE_UDP_ILOS;
724                                 if (udp_mask->hdr.src_port) {
725                                         list[t].h_u.l4_hdr.src_port =
726                                                 udp_spec->hdr.src_port;
727                                         list[t].m_u.l4_hdr.src_port =
728                                                 udp_mask->hdr.src_port;
729                                         input_set_byte += 2;
730                                 }
731                                 if (udp_mask->hdr.dst_port) {
732                                         list[t].h_u.l4_hdr.dst_port =
733                                                 udp_spec->hdr.dst_port;
734                                         list[t].m_u.l4_hdr.dst_port =
735                                                 udp_mask->hdr.dst_port;
736                                         input_set_byte += 2;
737                                 }
738                                 t++;
739                         }
740                         break;
741
742                 case RTE_FLOW_ITEM_TYPE_TCP:
743                         tcp_spec = item->spec;
744                         tcp_mask = item->mask;
745                         tcp_valid = 1;
746                         if (tcp_spec && tcp_mask) {
747                                 /* Check TCP mask and update input set */
748                                 if (tcp_mask->hdr.sent_seq ||
749                                         tcp_mask->hdr.recv_ack ||
750                                         tcp_mask->hdr.data_off ||
751                                         tcp_mask->hdr.tcp_flags ||
752                                         tcp_mask->hdr.rx_win ||
753                                         tcp_mask->hdr.cksum ||
754                                         tcp_mask->hdr.tcp_urp) {
755                                         rte_flow_error_set(error, EINVAL,
756                                            RTE_FLOW_ERROR_TYPE_ITEM,
757                                            item,
758                                            "Invalid TCP mask");
759                                         return 0;
760                                 }
761
762                                 if (tunnel_valid) {
763                                         if (tcp_mask->hdr.src_port)
764                                                 input_set |=
765                                                 ICE_INSET_TUN_TCP_SRC_PORT;
766                                         if (tcp_mask->hdr.dst_port)
767                                                 input_set |=
768                                                 ICE_INSET_TUN_TCP_DST_PORT;
769                                 } else {
770                                         if (tcp_mask->hdr.src_port)
771                                                 input_set |=
772                                                 ICE_INSET_TCP_SRC_PORT;
773                                         if (tcp_mask->hdr.dst_port)
774                                                 input_set |=
775                                                 ICE_INSET_TCP_DST_PORT;
776                                 }
777                                 list[t].type = ICE_TCP_IL;
778                                 if (tcp_mask->hdr.src_port) {
779                                         list[t].h_u.l4_hdr.src_port =
780                                                 tcp_spec->hdr.src_port;
781                                         list[t].m_u.l4_hdr.src_port =
782                                                 tcp_mask->hdr.src_port;
783                                         input_set_byte += 2;
784                                 }
785                                 if (tcp_mask->hdr.dst_port) {
786                                         list[t].h_u.l4_hdr.dst_port =
787                                                 tcp_spec->hdr.dst_port;
788                                         list[t].m_u.l4_hdr.dst_port =
789                                                 tcp_mask->hdr.dst_port;
790                                         input_set_byte += 2;
791                                 }
792                                 t++;
793                         }
794                         break;
795
796                 case RTE_FLOW_ITEM_TYPE_SCTP:
797                         sctp_spec = item->spec;
798                         sctp_mask = item->mask;
799                         if (sctp_spec && sctp_mask) {
800                                 /* Check SCTP mask and update input set */
801                                 if (sctp_mask->hdr.cksum) {
802                                         rte_flow_error_set(error, EINVAL,
803                                            RTE_FLOW_ERROR_TYPE_ITEM,
804                                            item,
805                                            "Invalid SCTP mask");
806                                         return 0;
807                                 }
808
809                                 if (tunnel_valid) {
810                                         if (sctp_mask->hdr.src_port)
811                                                 input_set |=
812                                                 ICE_INSET_TUN_SCTP_SRC_PORT;
813                                         if (sctp_mask->hdr.dst_port)
814                                                 input_set |=
815                                                 ICE_INSET_TUN_SCTP_DST_PORT;
816                                 } else {
817                                         if (sctp_mask->hdr.src_port)
818                                                 input_set |=
819                                                 ICE_INSET_SCTP_SRC_PORT;
820                                         if (sctp_mask->hdr.dst_port)
821                                                 input_set |=
822                                                 ICE_INSET_SCTP_DST_PORT;
823                                 }
824                                 list[t].type = ICE_SCTP_IL;
825                                 if (sctp_mask->hdr.src_port) {
826                                         list[t].h_u.sctp_hdr.src_port =
827                                                 sctp_spec->hdr.src_port;
828                                         list[t].m_u.sctp_hdr.src_port =
829                                                 sctp_mask->hdr.src_port;
830                                         input_set_byte += 2;
831                                 }
832                                 if (sctp_mask->hdr.dst_port) {
833                                         list[t].h_u.sctp_hdr.dst_port =
834                                                 sctp_spec->hdr.dst_port;
835                                         list[t].m_u.sctp_hdr.dst_port =
836                                                 sctp_mask->hdr.dst_port;
837                                         input_set_byte += 2;
838                                 }
839                                 t++;
840                         }
841                         break;
842
843                 case RTE_FLOW_ITEM_TYPE_VXLAN:
844                         vxlan_spec = item->spec;
845                         vxlan_mask = item->mask;
846                         /* Check if VXLAN item is used to describe protocol.
847                          * If yes, both spec and mask should be NULL.
848                          * If no, both spec and mask shouldn't be NULL.
849                          */
850                         if ((!vxlan_spec && vxlan_mask) ||
851                             (vxlan_spec && !vxlan_mask)) {
852                                 rte_flow_error_set(error, EINVAL,
853                                            RTE_FLOW_ERROR_TYPE_ITEM,
854                                            item,
855                                            "Invalid VXLAN item");
856                                 return 0;
857                         }
858                         vxlan_valid = 1;
859                         tunnel_valid = 1;
860                         if (vxlan_spec && vxlan_mask) {
861                                 list[t].type = ICE_VXLAN;
862                                 if (vxlan_mask->vni[0] ||
863                                         vxlan_mask->vni[1] ||
864                                         vxlan_mask->vni[2]) {
865                                         list[t].h_u.tnl_hdr.vni =
866                                                 (vxlan_spec->vni[2] << 16) |
867                                                 (vxlan_spec->vni[1] << 8) |
868                                                 vxlan_spec->vni[0];
869                                         list[t].m_u.tnl_hdr.vni =
870                                                 (vxlan_mask->vni[2] << 16) |
871                                                 (vxlan_mask->vni[1] << 8) |
872                                                 vxlan_mask->vni[0];
873                                         input_set |=
874                                                 ICE_INSET_TUN_VXLAN_VNI;
875                                         input_set_byte += 2;
876                                 }
877                                 t++;
878                         }
879                         break;
880
881                 case RTE_FLOW_ITEM_TYPE_NVGRE:
882                         nvgre_spec = item->spec;
883                         nvgre_mask = item->mask;
884                         /* Check if NVGRE item is used to describe protocol.
885                          * If yes, both spec and mask should be NULL.
886                          * If no, both spec and mask shouldn't be NULL.
887                          */
888                         if ((!nvgre_spec && nvgre_mask) ||
889                             (nvgre_spec && !nvgre_mask)) {
890                                 rte_flow_error_set(error, EINVAL,
891                                            RTE_FLOW_ERROR_TYPE_ITEM,
892                                            item,
893                                            "Invalid NVGRE item");
894                                 return 0;
895                         }
896                         nvgre_valid = 1;
897                         tunnel_valid = 1;
898                         if (nvgre_spec && nvgre_mask) {
899                                 list[t].type = ICE_NVGRE;
900                                 if (nvgre_mask->tni[0] ||
901                                         nvgre_mask->tni[1] ||
902                                         nvgre_mask->tni[2]) {
903                                         list[t].h_u.nvgre_hdr.tni_flow =
904                                                 (nvgre_spec->tni[2] << 16) |
905                                                 (nvgre_spec->tni[1] << 8) |
906                                                 nvgre_spec->tni[0];
907                                         list[t].m_u.nvgre_hdr.tni_flow =
908                                                 (nvgre_mask->tni[2] << 16) |
909                                                 (nvgre_mask->tni[1] << 8) |
910                                                 nvgre_mask->tni[0];
911                                         input_set |=
912                                                 ICE_INSET_TUN_NVGRE_TNI;
913                                         input_set_byte += 2;
914                                 }
915                                 t++;
916                         }
917                         break;
918
919                 case RTE_FLOW_ITEM_TYPE_VLAN:
920                         vlan_spec = item->spec;
921                         vlan_mask = item->mask;
922                         /* Check if VLAN item is used to describe protocol.
923                          * If yes, both spec and mask should be NULL.
924                          * If no, both spec and mask shouldn't be NULL.
925                          */
926                         if ((!vlan_spec && vlan_mask) ||
927                             (vlan_spec && !vlan_mask)) {
928                                 rte_flow_error_set(error, EINVAL,
929                                            RTE_FLOW_ERROR_TYPE_ITEM,
930                                            item,
931                                            "Invalid VLAN item");
932                                 return 0;
933                         }
934
935                         if (!outer_vlan_valid &&
936                             (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
937                              *tun_type == ICE_NON_TUN_QINQ))
938                                 outer_vlan_valid = 1;
939                         else if (!inner_vlan_valid &&
940                                  (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
941                                   *tun_type == ICE_NON_TUN_QINQ))
942                                 inner_vlan_valid = 1;
943                         else if (!inner_vlan_valid)
944                                 inner_vlan_valid = 1;
945
946                         if (vlan_spec && vlan_mask) {
947                                 if (outer_vlan_valid && !inner_vlan_valid) {
948                                         list[t].type = ICE_VLAN_EX;
949                                         input_set |= ICE_INSET_VLAN_OUTER;
950                                 } else if (inner_vlan_valid) {
951                                         list[t].type = ICE_VLAN_OFOS;
952                                         input_set |= ICE_INSET_VLAN_INNER;
953                                 }
954
955                                 if (vlan_mask->tci) {
956                                         list[t].h_u.vlan_hdr.vlan =
957                                                 vlan_spec->tci;
958                                         list[t].m_u.vlan_hdr.vlan =
959                                                 vlan_mask->tci;
960                                         input_set_byte += 2;
961                                 }
962                                 if (vlan_mask->inner_type) {
963                                         rte_flow_error_set(error, EINVAL,
964                                                 RTE_FLOW_ERROR_TYPE_ITEM,
965                                                 item,
966                                                 "Invalid VLAN input set.");
967                                         return 0;
968                                 }
969                                 t++;
970                         }
971                         break;
972
973                 case RTE_FLOW_ITEM_TYPE_PPPOED:
974                 case RTE_FLOW_ITEM_TYPE_PPPOES:
975                         pppoe_spec = item->spec;
976                         pppoe_mask = item->mask;
977                         /* Check if PPPoE item is used to describe protocol.
978                          * If yes, both spec and mask should be NULL.
979                          * If no, both spec and mask shouldn't be NULL.
980                          */
981                         if ((!pppoe_spec && pppoe_mask) ||
982                                 (pppoe_spec && !pppoe_mask)) {
983                                 rte_flow_error_set(error, EINVAL,
984                                         RTE_FLOW_ERROR_TYPE_ITEM,
985                                         item,
986                                         "Invalid pppoe item");
987                                 return 0;
988                         }
989                         pppoe_patt_valid = 1;
990                         if (pppoe_spec && pppoe_mask) {
991                                 /* Check pppoe mask and update input set */
992                                 if (pppoe_mask->length ||
993                                         pppoe_mask->code ||
994                                         pppoe_mask->version_type) {
995                                         rte_flow_error_set(error, EINVAL,
996                                                 RTE_FLOW_ERROR_TYPE_ITEM,
997                                                 item,
998                                                 "Invalid pppoe mask");
999                                         return 0;
1000                                 }
1001                                 list[t].type = ICE_PPPOE;
1002                                 if (pppoe_mask->session_id) {
1003                                         list[t].h_u.pppoe_hdr.session_id =
1004                                                 pppoe_spec->session_id;
1005                                         list[t].m_u.pppoe_hdr.session_id =
1006                                                 pppoe_mask->session_id;
1007                                         input_set |= ICE_INSET_PPPOE_SESSION;
1008                                         input_set_byte += 2;
1009                                 }
1010                                 t++;
1011                                 pppoe_elem_valid = 1;
1012                         }
1013                         break;
1014
1015                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1016                         pppoe_proto_spec = item->spec;
1017                         pppoe_proto_mask = item->mask;
1018                         /* Check if PPPoE optional proto_id item
1019                          * is used to describe protocol.
1020                          * If yes, both spec and mask should be NULL.
1021                          * If no, both spec and mask shouldn't be NULL.
1022                          */
1023                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1024                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
1025                                 rte_flow_error_set(error, EINVAL,
1026                                         RTE_FLOW_ERROR_TYPE_ITEM,
1027                                         item,
1028                                         "Invalid pppoe proto item");
1029                                 return 0;
1030                         }
1031                         if (pppoe_proto_spec && pppoe_proto_mask) {
1032                                 if (pppoe_elem_valid)
1033                                         t--;
1034                                 list[t].type = ICE_PPPOE;
1035                                 if (pppoe_proto_mask->proto_id) {
1036                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
1037                                                 pppoe_proto_spec->proto_id;
1038                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
1039                                                 pppoe_proto_mask->proto_id;
1040                                         input_set |= ICE_INSET_PPPOE_PROTO;
1041                                         input_set_byte += 2;
1042                                         pppoe_prot_valid = 1;
1043                                 }
1044                                 if ((pppoe_proto_mask->proto_id &
1045                                         pppoe_proto_spec->proto_id) !=
1046                                             CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1047                                         (pppoe_proto_mask->proto_id &
1048                                         pppoe_proto_spec->proto_id) !=
1049                                             CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1050                                         *tun_type = ICE_SW_TUN_PPPOE_PAY;
1051                                 else
1052                                         *tun_type = ICE_SW_TUN_PPPOE;
1053                                 t++;
1054                         }
1055
1056                         break;
1057
1058                 case RTE_FLOW_ITEM_TYPE_ESP:
1059                         esp_spec = item->spec;
1060                         esp_mask = item->mask;
1061                         if ((esp_spec && !esp_mask) ||
1062                                 (!esp_spec && esp_mask)) {
1063                                 rte_flow_error_set(error, EINVAL,
1064                                            RTE_FLOW_ERROR_TYPE_ITEM,
1065                                            item,
1066                                            "Invalid esp item");
1067                                 return 0;
1068                         }
1069                         /* Check esp mask and update input set */
1070                         if (esp_mask && esp_mask->hdr.seq) {
1071                                 rte_flow_error_set(error, EINVAL,
1072                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1073                                                 item,
1074                                                 "Invalid esp mask");
1075                                 return 0;
1076                         }
1077
1078                         if (!esp_spec && !esp_mask && !input_set) {
1079                                 profile_rule = 1;
1080                                 if (ipv6_valid && udp_valid)
1081                                         *tun_type =
1082                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1083                                 else if (ipv6_valid)
1084                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1085                                 else if (ipv4_valid)
1086                                         return 0;
1087                         } else if (esp_spec && esp_mask &&
1088                                                 esp_mask->hdr.spi){
1089                                 if (udp_valid)
1090                                         list[t].type = ICE_NAT_T;
1091                                 else
1092                                         list[t].type = ICE_ESP;
1093                                 list[t].h_u.esp_hdr.spi =
1094                                         esp_spec->hdr.spi;
1095                                 list[t].m_u.esp_hdr.spi =
1096                                         esp_mask->hdr.spi;
1097                                 input_set |= ICE_INSET_ESP_SPI;
1098                                 input_set_byte += 4;
1099                                 t++;
1100                         }
1101
1102                         if (!profile_rule) {
1103                                 if (ipv6_valid && udp_valid)
1104                                         *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1105                                 else if (ipv4_valid && udp_valid)
1106                                         *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1107                                 else if (ipv6_valid)
1108                                         *tun_type = ICE_SW_TUN_IPV6_ESP;
1109                                 else if (ipv4_valid)
1110                                         *tun_type = ICE_SW_TUN_IPV4_ESP;
1111                         }
1112                         break;
1113
1114                 case RTE_FLOW_ITEM_TYPE_AH:
1115                         ah_spec = item->spec;
1116                         ah_mask = item->mask;
1117                         if ((ah_spec && !ah_mask) ||
1118                                 (!ah_spec && ah_mask)) {
1119                                 rte_flow_error_set(error, EINVAL,
1120                                            RTE_FLOW_ERROR_TYPE_ITEM,
1121                                            item,
1122                                            "Invalid ah item");
1123                                 return 0;
1124                         }
1125                         /* Check ah mask and update input set */
1126                         if (ah_mask &&
1127                                 (ah_mask->next_hdr ||
1128                                 ah_mask->payload_len ||
1129                                 ah_mask->seq_num ||
1130                                 ah_mask->reserved)) {
1131                                 rte_flow_error_set(error, EINVAL,
1132                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1133                                                 item,
1134                                                 "Invalid ah mask");
1135                                 return 0;
1136                         }
1137
1138                         if (!ah_spec && !ah_mask && !input_set) {
1139                                 profile_rule = 1;
1140                                 if (ipv6_valid && udp_valid)
1141                                         *tun_type =
1142                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1143                                 else if (ipv6_valid)
1144                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1145                                 else if (ipv4_valid)
1146                                         return 0;
1147                         } else if (ah_spec && ah_mask &&
1148                                                 ah_mask->spi){
1149                                 list[t].type = ICE_AH;
1150                                 list[t].h_u.ah_hdr.spi =
1151                                         ah_spec->spi;
1152                                 list[t].m_u.ah_hdr.spi =
1153                                         ah_mask->spi;
1154                                 input_set |= ICE_INSET_AH_SPI;
1155                                 input_set_byte += 4;
1156                                 t++;
1157                         }
1158
1159                         if (!profile_rule) {
1160                                 if (udp_valid)
1161                                         return 0;
1162                                 else if (ipv6_valid)
1163                                         *tun_type = ICE_SW_TUN_IPV6_AH;
1164                                 else if (ipv4_valid)
1165                                         *tun_type = ICE_SW_TUN_IPV4_AH;
1166                         }
1167                         break;
1168
1169                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1170                         l2tp_spec = item->spec;
1171                         l2tp_mask = item->mask;
1172                         if ((l2tp_spec && !l2tp_mask) ||
1173                                 (!l2tp_spec && l2tp_mask)) {
1174                                 rte_flow_error_set(error, EINVAL,
1175                                            RTE_FLOW_ERROR_TYPE_ITEM,
1176                                            item,
1177                                            "Invalid l2tp item");
1178                                 return 0;
1179                         }
1180
1181                         if (!l2tp_spec && !l2tp_mask && !input_set) {
1182                                 if (ipv6_valid)
1183                                         *tun_type =
1184                                         ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1185                                 else if (ipv4_valid)
1186                                         return 0;
1187                         } else if (l2tp_spec && l2tp_mask &&
1188                                                 l2tp_mask->session_id){
1189                                 list[t].type = ICE_L2TPV3;
1190                                 list[t].h_u.l2tpv3_sess_hdr.session_id =
1191                                         l2tp_spec->session_id;
1192                                 list[t].m_u.l2tpv3_sess_hdr.session_id =
1193                                         l2tp_mask->session_id;
1194                                 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1195                                 input_set_byte += 4;
1196                                 t++;
1197                         }
1198
1199                         if (!profile_rule) {
1200                                 if (ipv6_valid)
1201                                         *tun_type =
1202                                         ICE_SW_TUN_IPV6_L2TPV3;
1203                                 else if (ipv4_valid)
1204                                         *tun_type =
1205                                         ICE_SW_TUN_IPV4_L2TPV3;
1206                         }
1207                         break;
1208
1209                 case RTE_FLOW_ITEM_TYPE_PFCP:
1210                         pfcp_spec = item->spec;
1211                         pfcp_mask = item->mask;
1212                         /* Check if PFCP item is used to describe protocol.
1213                          * If yes, both spec and mask should be NULL.
1214                          * If no, both spec and mask shouldn't be NULL.
1215                          */
1216                         if ((!pfcp_spec && pfcp_mask) ||
1217                             (pfcp_spec && !pfcp_mask)) {
1218                                 rte_flow_error_set(error, EINVAL,
1219                                            RTE_FLOW_ERROR_TYPE_ITEM,
1220                                            item,
1221                                            "Invalid PFCP item");
1222                                 return -ENOTSUP;
1223                         }
1224                         if (pfcp_spec && pfcp_mask) {
1225                                 /* Check pfcp mask and update input set */
1226                                 if (pfcp_mask->msg_type ||
1227                                         pfcp_mask->msg_len ||
1228                                         pfcp_mask->seid) {
1229                                         rte_flow_error_set(error, EINVAL,
1230                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1231                                                 item,
1232                                                 "Invalid pfcp mask");
1233                                         return -ENOTSUP;
1234                                 }
1235                                 if (pfcp_mask->s_field &&
1236                                         pfcp_spec->s_field == 0x01 &&
1237                                         ipv6_valid)
1238                                         *tun_type =
1239                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1240                                 else if (pfcp_mask->s_field &&
1241                                         pfcp_spec->s_field == 0x01)
1242                                         *tun_type =
1243                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1244                                 else if (pfcp_mask->s_field &&
1245                                         !pfcp_spec->s_field &&
1246                                         ipv6_valid)
1247                                         *tun_type =
1248                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1249                                 else if (pfcp_mask->s_field &&
1250                                         !pfcp_spec->s_field)
1251                                         *tun_type =
1252                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1253                                 else
1254                                         return -ENOTSUP;
1255                         }
1256                         break;
1257
1258                 case RTE_FLOW_ITEM_TYPE_VOID:
1259                         break;
1260
1261                 default:
1262                         rte_flow_error_set(error, EINVAL,
1263                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1264                                    "Invalid pattern item.");
1265                         goto out;
1266                 }
1267         }
1268
1269         if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1270             inner_vlan_valid && outer_vlan_valid)
1271                 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1272         else if (*tun_type == ICE_SW_TUN_PPPOE &&
1273                  inner_vlan_valid && outer_vlan_valid)
1274                 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1275         else if (*tun_type == ICE_NON_TUN &&
1276                  inner_vlan_valid && outer_vlan_valid)
1277                 *tun_type = ICE_NON_TUN_QINQ;
1278         else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1279                  inner_vlan_valid && outer_vlan_valid)
1280                 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1281
1282         if (pppoe_patt_valid && !pppoe_prot_valid) {
1283                 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1284                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1285                 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1286                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1287                 else if (inner_vlan_valid && outer_vlan_valid)
1288                         *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1289                 else if (ipv6_valid && udp_valid)
1290                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1291                 else if (ipv6_valid && tcp_valid)
1292                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1293                 else if (ipv4_valid && udp_valid)
1294                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1295                 else if (ipv4_valid && tcp_valid)
1296                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1297                 else if (ipv6_valid)
1298                         *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1299                 else if (ipv4_valid)
1300                         *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1301                 else
1302                         *tun_type = ICE_SW_TUN_PPPOE;
1303         }
1304
1305         if (*tun_type == ICE_NON_TUN) {
1306                 if (vxlan_valid)
1307                         *tun_type = ICE_SW_TUN_VXLAN;
1308                 else if (nvgre_valid)
1309                         *tun_type = ICE_SW_TUN_NVGRE;
1310                 else if (ipv4_valid && tcp_valid)
1311                         *tun_type = ICE_SW_IPV4_TCP;
1312                 else if (ipv4_valid && udp_valid)
1313                         *tun_type = ICE_SW_IPV4_UDP;
1314                 else if (ipv6_valid && tcp_valid)
1315                         *tun_type = ICE_SW_IPV6_TCP;
1316                 else if (ipv6_valid && udp_valid)
1317                         *tun_type = ICE_SW_IPV6_UDP;
1318         }
1319
1320         if (input_set_byte > MAX_INPUT_SET_BYTE) {
1321                 rte_flow_error_set(error, EINVAL,
1322                         RTE_FLOW_ERROR_TYPE_ITEM,
1323                         item,
1324                         "too much input set");
1325                 return -ENOTSUP;
1326         }
1327
1328         *lkups_num = t;
1329
1330         return input_set;
1331 out:
1332         return 0;
1333 }
1334
1335 static int
1336 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1337                             const struct rte_flow_action *actions,
1338                             struct rte_flow_error *error,
1339                             struct ice_adv_rule_info *rule_info)
1340 {
1341         const struct rte_flow_action_vf *act_vf;
1342         const struct rte_flow_action *action;
1343         enum rte_flow_action_type action_type;
1344
1345         for (action = actions; action->type !=
1346                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1347                 action_type = action->type;
1348                 switch (action_type) {
1349                 case RTE_FLOW_ACTION_TYPE_VF:
1350                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1351                         act_vf = action->conf;
1352
1353                         if (act_vf->id >= ad->real_hw.num_vfs &&
1354                                 !act_vf->original) {
1355                                 rte_flow_error_set(error,
1356                                         EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1357                                         actions,
1358                                         "Invalid vf id");
1359                                 return -rte_errno;
1360                         }
1361
1362                         if (act_vf->original)
1363                                 rule_info->sw_act.vsi_handle =
1364                                         ad->real_hw.avf.bus.func;
1365                         else
1366                                 rule_info->sw_act.vsi_handle = act_vf->id;
1367                         break;
1368
1369                 case RTE_FLOW_ACTION_TYPE_DROP:
1370                         rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1371                         break;
1372
1373                 default:
1374                         rte_flow_error_set(error,
1375                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1376                                            actions,
1377                                            "Invalid action type");
1378                         return -rte_errno;
1379                 }
1380         }
1381
1382         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1383         rule_info->sw_act.flag = ICE_FLTR_RX;
1384         rule_info->rx = 1;
1385         rule_info->priority = 5;
1386
1387         return 0;
1388 }
1389
1390 static int
1391 ice_switch_parse_action(struct ice_pf *pf,
1392                 const struct rte_flow_action *actions,
1393                 struct rte_flow_error *error,
1394                 struct ice_adv_rule_info *rule_info)
1395 {
1396         struct ice_vsi *vsi = pf->main_vsi;
1397         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1398         const struct rte_flow_action_queue *act_q;
1399         const struct rte_flow_action_rss *act_qgrop;
1400         uint16_t base_queue, i;
1401         const struct rte_flow_action *action;
1402         enum rte_flow_action_type action_type;
1403         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1404                  2, 4, 8, 16, 32, 64, 128};
1405
1406         base_queue = pf->base_queue + vsi->base_queue;
1407         for (action = actions; action->type !=
1408                         RTE_FLOW_ACTION_TYPE_END; action++) {
1409                 action_type = action->type;
1410                 switch (action_type) {
1411                 case RTE_FLOW_ACTION_TYPE_RSS:
1412                         act_qgrop = action->conf;
1413                         if (act_qgrop->queue_num <= 1)
1414                                 goto error;
1415                         rule_info->sw_act.fltr_act =
1416                                 ICE_FWD_TO_QGRP;
1417                         rule_info->sw_act.fwd_id.q_id =
1418                                 base_queue + act_qgrop->queue[0];
1419                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1420                                 if (act_qgrop->queue_num ==
1421                                         valid_qgrop_number[i])
1422                                         break;
1423                         }
1424                         if (i == MAX_QGRP_NUM_TYPE)
1425                                 goto error;
1426                         if ((act_qgrop->queue[0] +
1427                                 act_qgrop->queue_num) >
1428                                 dev->data->nb_rx_queues)
1429                                 goto error1;
1430                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1431                                 if (act_qgrop->queue[i + 1] !=
1432                                         act_qgrop->queue[i] + 1)
1433                                         goto error2;
1434                         rule_info->sw_act.qgrp_size =
1435                                 act_qgrop->queue_num;
1436                         break;
1437                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1438                         act_q = action->conf;
1439                         if (act_q->index >= dev->data->nb_rx_queues)
1440                                 goto error;
1441                         rule_info->sw_act.fltr_act =
1442                                 ICE_FWD_TO_Q;
1443                         rule_info->sw_act.fwd_id.q_id =
1444                                 base_queue + act_q->index;
1445                         break;
1446
1447                 case RTE_FLOW_ACTION_TYPE_DROP:
1448                         rule_info->sw_act.fltr_act =
1449                                 ICE_DROP_PACKET;
1450                         break;
1451
1452                 case RTE_FLOW_ACTION_TYPE_VOID:
1453                         break;
1454
1455                 default:
1456                         goto error;
1457                 }
1458         }
1459
1460         rule_info->sw_act.vsi_handle = vsi->idx;
1461         rule_info->rx = 1;
1462         rule_info->sw_act.src = vsi->idx;
1463         rule_info->priority = 5;
1464
1465         return 0;
1466
1467 error:
1468         rte_flow_error_set(error,
1469                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1470                 actions,
1471                 "Invalid action type or queue number");
1472         return -rte_errno;
1473
1474 error1:
1475         rte_flow_error_set(error,
1476                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1477                 actions,
1478                 "Invalid queue region indexes");
1479         return -rte_errno;
1480
1481 error2:
1482         rte_flow_error_set(error,
1483                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1484                 actions,
1485                 "Discontinuous queue region");
1486         return -rte_errno;
1487 }
1488
1489 static int
1490 ice_switch_check_action(const struct rte_flow_action *actions,
1491                             struct rte_flow_error *error)
1492 {
1493         const struct rte_flow_action *action;
1494         enum rte_flow_action_type action_type;
1495         uint16_t actions_num = 0;
1496
1497         for (action = actions; action->type !=
1498                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1499                 action_type = action->type;
1500                 switch (action_type) {
1501                 case RTE_FLOW_ACTION_TYPE_VF:
1502                 case RTE_FLOW_ACTION_TYPE_RSS:
1503                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1504                 case RTE_FLOW_ACTION_TYPE_DROP:
1505                         actions_num++;
1506                         break;
1507                 case RTE_FLOW_ACTION_TYPE_VOID:
1508                         continue;
1509                 default:
1510                         rte_flow_error_set(error,
1511                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1512                                            actions,
1513                                            "Invalid action type");
1514                         return -rte_errno;
1515                 }
1516         }
1517
1518         if (actions_num != 1) {
1519                 rte_flow_error_set(error,
1520                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1521                                    actions,
1522                                    "Invalid action number");
1523                 return -rte_errno;
1524         }
1525
1526         return 0;
1527 }
1528
1529 static int
1530 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1531                 struct ice_pattern_match_item *array,
1532                 uint32_t array_len,
1533                 const struct rte_flow_item pattern[],
1534                 const struct rte_flow_action actions[],
1535                 void **meta,
1536                 struct rte_flow_error *error)
1537 {
1538         struct ice_pf *pf = &ad->pf;
1539         uint64_t inputset = 0;
1540         int ret = 0;
1541         struct sw_meta *sw_meta_ptr = NULL;
1542         struct ice_adv_rule_info rule_info;
1543         struct ice_adv_lkup_elem *list = NULL;
1544         uint16_t lkups_num = 0;
1545         const struct rte_flow_item *item = pattern;
1546         uint16_t item_num = 0;
1547         uint16_t vlan_num = 0;
1548         enum ice_sw_tunnel_type tun_type =
1549                         ICE_NON_TUN;
1550         struct ice_pattern_match_item *pattern_match_item = NULL;
1551
1552         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1553                 item_num++;
1554                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1555                         const struct rte_flow_item_eth *eth_mask;
1556                         if (item->mask)
1557                                 eth_mask = item->mask;
1558                         else
1559                                 continue;
1560                         if (eth_mask->type == UINT16_MAX)
1561                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1562                 }
1563
1564                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1565                         vlan_num++;
1566
1567                 /* reserve one more memory slot for ETH which may
1568                  * consume 2 lookup items.
1569                  */
1570                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1571                         item_num++;
1572         }
1573
1574         if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1575                 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1576         else if (vlan_num == 2)
1577                 tun_type = ICE_NON_TUN_QINQ;
1578
1579         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1580         if (!list) {
1581                 rte_flow_error_set(error, EINVAL,
1582                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1583                                    "No memory for PMD internal items");
1584                 return -rte_errno;
1585         }
1586
1587         sw_meta_ptr =
1588                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1589         if (!sw_meta_ptr) {
1590                 rte_flow_error_set(error, EINVAL,
1591                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1592                                    "No memory for sw_pattern_meta_ptr");
1593                 goto error;
1594         }
1595
1596         pattern_match_item =
1597                 ice_search_pattern_match_item(ad, pattern, array, array_len,
1598                                               error);
1599         if (!pattern_match_item) {
1600                 rte_flow_error_set(error, EINVAL,
1601                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1602                                    "Invalid input pattern");
1603                 goto error;
1604         }
1605
1606         inputset = ice_switch_inset_get
1607                 (pattern, error, list, &lkups_num, &tun_type);
1608         if ((!inputset && !ice_is_prof_rule(tun_type)) ||
1609                 (inputset & ~pattern_match_item->input_set_mask_o)) {
1610                 rte_flow_error_set(error, EINVAL,
1611                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1612                                    pattern,
1613                                    "Invalid input set");
1614                 goto error;
1615         }
1616
1617         memset(&rule_info, 0, sizeof(rule_info));
1618         rule_info.tun_type = tun_type;
1619
1620         ret = ice_switch_check_action(actions, error);
1621         if (ret)
1622                 goto error;
1623
1624         if (ad->hw.dcf_enabled)
1625                 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1626                                                   &rule_info);
1627         else
1628                 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1629
1630         if (ret)
1631                 goto error;
1632
1633         if (meta) {
1634                 *meta = sw_meta_ptr;
1635                 ((struct sw_meta *)*meta)->list = list;
1636                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1637                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1638         } else {
1639                 rte_free(list);
1640                 rte_free(sw_meta_ptr);
1641         }
1642
1643         rte_free(pattern_match_item);
1644
1645         return 0;
1646
1647 error:
1648         rte_free(list);
1649         rte_free(sw_meta_ptr);
1650         rte_free(pattern_match_item);
1651
1652         return -rte_errno;
1653 }
1654
1655 static int
1656 ice_switch_query(struct ice_adapter *ad __rte_unused,
1657                 struct rte_flow *flow __rte_unused,
1658                 struct rte_flow_query_count *count __rte_unused,
1659                 struct rte_flow_error *error)
1660 {
1661         rte_flow_error_set(error, EINVAL,
1662                 RTE_FLOW_ERROR_TYPE_HANDLE,
1663                 NULL,
1664                 "count action not supported by switch filter");
1665
1666         return -rte_errno;
1667 }
1668
1669 static int
1670 ice_switch_redirect(struct ice_adapter *ad,
1671                     struct rte_flow *flow,
1672                     struct ice_flow_redirect *rd)
1673 {
1674         struct ice_rule_query_data *rdata = flow->rule;
1675         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1676         struct ice_adv_lkup_elem *lkups_dp = NULL;
1677         struct LIST_HEAD_TYPE *list_head;
1678         struct ice_adv_rule_info rinfo;
1679         struct ice_hw *hw = &ad->hw;
1680         struct ice_switch_info *sw;
1681         uint16_t lkups_cnt;
1682         int ret;
1683
1684         if (rdata->vsi_handle != rd->vsi_handle)
1685                 return 0;
1686
1687         sw = hw->switch_info;
1688         if (!sw->recp_list[rdata->rid].recp_created)
1689                 return -EINVAL;
1690
1691         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1692                 return -ENOTSUP;
1693
1694         list_head = &sw->recp_list[rdata->rid].filt_rules;
1695         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1696                             list_entry) {
1697                 rinfo = list_itr->rule_info;
1698                 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1699                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1700                     rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1701                     (rinfo.fltr_rule_id == rdata->rule_id &&
1702                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1703                         lkups_cnt = list_itr->lkups_cnt;
1704                         lkups_dp = (struct ice_adv_lkup_elem *)
1705                                 ice_memdup(hw, list_itr->lkups,
1706                                            sizeof(*list_itr->lkups) *
1707                                            lkups_cnt, ICE_NONDMA_TO_NONDMA);
1708
1709                         if (!lkups_dp) {
1710                                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1711                                 return -EINVAL;
1712                         }
1713
1714                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1715                                 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1716                                 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1717                         }
1718                         break;
1719                 }
1720         }
1721
1722         if (!lkups_dp)
1723                 return -EINVAL;
1724
1725         /* Remove the old rule */
1726         ret = ice_rem_adv_rule(hw, list_itr->lkups,
1727                                lkups_cnt, &rinfo);
1728         if (ret) {
1729                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1730                             rdata->rule_id);
1731                 ret = -EINVAL;
1732                 goto out;
1733         }
1734
1735         /* Update VSI context */
1736         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1737
1738         /* Replay the rule */
1739         ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1740                                &rinfo, rdata);
1741         if (ret) {
1742                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1743                 ret = -EINVAL;
1744         }
1745
1746 out:
1747         ice_free(hw, lkups_dp);
1748         return ret;
1749 }
1750
1751 static int
1752 ice_switch_init(struct ice_adapter *ad)
1753 {
1754         int ret = 0;
1755         struct ice_flow_parser *dist_parser;
1756         struct ice_flow_parser *perm_parser;
1757
1758         if (ad->devargs.pipe_mode_support) {
1759                 perm_parser = &ice_switch_perm_parser;
1760                 ret = ice_register_parser(perm_parser, ad);
1761         } else {
1762                 dist_parser = &ice_switch_dist_parser;
1763                 ret = ice_register_parser(dist_parser, ad);
1764         }
1765         return ret;
1766 }
1767
1768 static void
1769 ice_switch_uninit(struct ice_adapter *ad)
1770 {
1771         struct ice_flow_parser *dist_parser;
1772         struct ice_flow_parser *perm_parser;
1773
1774         if (ad->devargs.pipe_mode_support) {
1775                 perm_parser = &ice_switch_perm_parser;
1776                 ice_unregister_parser(perm_parser, ad);
1777         } else {
1778                 dist_parser = &ice_switch_dist_parser;
1779                 ice_unregister_parser(dist_parser, ad);
1780         }
1781 }
1782
1783 static struct
1784 ice_flow_engine ice_switch_engine = {
1785         .init = ice_switch_init,
1786         .uninit = ice_switch_uninit,
1787         .create = ice_switch_create,
1788         .destroy = ice_switch_destroy,
1789         .query_count = ice_switch_query,
1790         .redirect = ice_switch_redirect,
1791         .free = ice_switch_filter_rule_free,
1792         .type = ICE_FLOW_ENGINE_SWITCH,
1793 };
1794
1795 static struct
1796 ice_flow_parser ice_switch_dist_parser = {
1797         .engine = &ice_switch_engine,
1798         .array = ice_switch_pattern_dist_list,
1799         .array_len = RTE_DIM(ice_switch_pattern_dist_list),
1800         .parse_pattern_action = ice_switch_parse_pattern_action,
1801         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1802 };
1803
1804 static struct
1805 ice_flow_parser ice_switch_perm_parser = {
1806         .engine = &ice_switch_engine,
1807         .array = ice_switch_pattern_perm_list,
1808         .array_len = RTE_DIM(ice_switch_pattern_perm_list),
1809         .parse_pattern_action = ice_switch_parse_pattern_action,
1810         .stage = ICE_FLOW_STAGE_PERMISSION,
1811 };
1812
1813 RTE_INIT(ice_sw_engine_init)
1814 {
1815         struct ice_flow_engine *engine = &ice_switch_engine;
1816         ice_register_flow_engine(engine);
1817 }