net/iavf: fix overflow in maximum packet length config
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
23 #include "ice_logs.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
27
28
29 #define MAX_QGRP_NUM_TYPE       7
30 #define MAX_INPUT_SET_BYTE      32
31 #define ICE_PPP_IPV4_PROTO      0x0021
32 #define ICE_PPP_IPV6_PROTO      0x0057
33 #define ICE_IPV4_PROTO_NVGRE    0x002F
34
35 #define ICE_SW_INSET_ETHER ( \
36         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38         ICE_SW_INSET_ETHER | ICE_INSET_VLAN_INNER)
39 #define ICE_SW_INSET_MAC_QINQ  ( \
40         ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
41         ICE_INSET_VLAN_OUTER)
42 #define ICE_SW_INSET_MAC_IPV4 ( \
43         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
44         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
45 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
46         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
47 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
48         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
49         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
50         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
52         ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
53         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
54         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
55 #define ICE_SW_INSET_MAC_IPV6 ( \
56         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
57         ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
58         ICE_INSET_IPV6_NEXT_HDR)
59 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
60         ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
61 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
62         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
63         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
64         ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
65 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
66         ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
67         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
68         ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
69 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
70         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
71         ICE_INSET_NVGRE_TNI)
72 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
73         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
74         ICE_INSET_VXLAN_VNI)
75 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
76         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
77         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
78         ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
79 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
80         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
81         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
82         ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
83 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
84         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
85         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
86         ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
87 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
88         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
89         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
90         ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
91 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
92         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
93         ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
94 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
95         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
96         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
97         ICE_INSET_IPV4_TOS)
98 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
99         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
100         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
101         ICE_INSET_IPV4_TOS)
102 #define ICE_SW_INSET_MAC_PPPOE  ( \
103         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
104         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
105 #define ICE_SW_INSET_MAC_PPPOE_PROTO  ( \
106         ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
107         ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
108         ICE_INSET_PPPOE_PROTO)
109 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
110         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
111 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
112         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
113 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
114         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
115 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
116         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
117 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
118         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
119 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
120         ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
121 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
122         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
123 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
124         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
125 #define ICE_SW_INSET_MAC_IPV4_AH ( \
126         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
127 #define ICE_SW_INSET_MAC_IPV6_AH ( \
128         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
129 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
130         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
131 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
132         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
133 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
134         ICE_SW_INSET_MAC_IPV4 | \
135         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
136 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
137         ICE_SW_INSET_MAC_IPV6 | \
138         ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
139 #define ICE_SW_INSET_MAC_IPV4_GTPU ( \
140         ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID)
141 #define ICE_SW_INSET_MAC_IPV6_GTPU ( \
142         ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID)
143 #define ICE_SW_INSET_MAC_GTPU_OUTER ( \
144         ICE_INSET_DMAC | ICE_INSET_GTPU_TEID)
145 #define ICE_SW_INSET_MAC_GTPU_EH_OUTER ( \
146         ICE_SW_INSET_MAC_GTPU_OUTER | ICE_INSET_GTPU_QFI)
147 #define ICE_SW_INSET_GTPU_IPV4 ( \
148         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
149 #define ICE_SW_INSET_GTPU_IPV6 ( \
150         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST)
151 #define ICE_SW_INSET_GTPU_IPV4_UDP ( \
152         ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_UDP_SRC_PORT | \
153         ICE_INSET_UDP_DST_PORT)
154 #define ICE_SW_INSET_GTPU_IPV4_TCP ( \
155         ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_TCP_SRC_PORT | \
156         ICE_INSET_TCP_DST_PORT)
157 #define ICE_SW_INSET_GTPU_IPV6_UDP ( \
158         ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_UDP_SRC_PORT | \
159         ICE_INSET_UDP_DST_PORT)
160 #define ICE_SW_INSET_GTPU_IPV6_TCP ( \
161         ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_TCP_SRC_PORT | \
162         ICE_INSET_TCP_DST_PORT)
163
164 struct sw_meta {
165         struct ice_adv_lkup_elem *list;
166         uint16_t lkups_num;
167         struct ice_adv_rule_info rule_info;
168 };
169
170 static struct ice_flow_parser ice_switch_dist_parser;
171 static struct ice_flow_parser ice_switch_perm_parser;
172
173 static struct
174 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
175         {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE,                         ICE_INSET_NONE},
176         {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
177         {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
178         {pattern_eth_arp,                               ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
179         {pattern_eth_ipv4,                              ICE_SW_INSET_MAC_IPV4,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
180         {pattern_eth_ipv4_udp,                          ICE_SW_INSET_MAC_IPV4_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
181         {pattern_eth_ipv4_tcp,                          ICE_SW_INSET_MAC_IPV4_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
182         {pattern_eth_ipv6,                              ICE_SW_INSET_MAC_IPV6,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
183         {pattern_eth_ipv6_udp,                          ICE_SW_INSET_MAC_IPV6_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
184         {pattern_eth_ipv6_tcp,                          ICE_SW_INSET_MAC_IPV6_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
185         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_VXLAN_IPV4,           ICE_INSET_NONE},
186         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_VXLAN_IPV4_UDP,       ICE_INSET_NONE},
187         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_VXLAN_IPV4_TCP,       ICE_INSET_NONE},
188         {pattern_eth_ipv4_nvgre_eth_ipv4,               ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_NVGRE_IPV4,           ICE_INSET_NONE},
189         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,           ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_NVGRE_IPV4_UDP,       ICE_INSET_NONE},
190         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,           ICE_INSET_IPV4_DST,                     ICE_SW_INSET_DIST_NVGRE_IPV4_TCP,       ICE_INSET_NONE},
191         {pattern_eth_pppoes,                            ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
192         {pattern_eth_vlan_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
193         {pattern_eth_pppoes_proto,                      ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
194         {pattern_eth_vlan_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
195         {pattern_eth_pppoes_ipv4,                       ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
196         {pattern_eth_pppoes_ipv4_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
197         {pattern_eth_pppoes_ipv4_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
198         {pattern_eth_pppoes_ipv6,                       ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
199         {pattern_eth_pppoes_ipv6_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
200         {pattern_eth_pppoes_ipv6_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
201         {pattern_eth_vlan_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
202         {pattern_eth_vlan_pppoes_ipv4_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
203         {pattern_eth_vlan_pppoes_ipv4_udp,              ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
204         {pattern_eth_vlan_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
205         {pattern_eth_vlan_pppoes_ipv6_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
206         {pattern_eth_vlan_pppoes_ipv6_udp,              ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
207         {pattern_eth_ipv4_esp,                          ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
208         {pattern_eth_ipv4_udp_esp,                      ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
209         {pattern_eth_ipv6_esp,                          ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
210         {pattern_eth_ipv6_udp_esp,                      ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
211         {pattern_eth_ipv4_ah,                           ICE_SW_INSET_MAC_IPV4_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
212         {pattern_eth_ipv6_ah,                           ICE_SW_INSET_MAC_IPV6_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
213         {pattern_eth_ipv6_udp_ah,                       ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
214         {pattern_eth_ipv4_l2tp,                         ICE_SW_INSET_MAC_IPV4_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
215         {pattern_eth_ipv6_l2tp,                         ICE_SW_INSET_MAC_IPV6_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
216         {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
217         {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
218         {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE,                         ICE_INSET_NONE},
219         {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE,                         ICE_INSET_NONE},
220         {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
221         {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
222         {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
223         {pattern_eth_qinq_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
224         {pattern_eth_ipv4_gtpu,                         ICE_SW_INSET_MAC_IPV4_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
225         {pattern_eth_ipv6_gtpu,                         ICE_SW_INSET_MAC_IPV6_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
226         {pattern_eth_ipv4_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
227         {pattern_eth_ipv4_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
228         {pattern_eth_ipv4_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
229         {pattern_eth_ipv4_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
230         {pattern_eth_ipv4_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
231         {pattern_eth_ipv4_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
232         {pattern_eth_ipv4_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
233         {pattern_eth_ipv4_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
234         {pattern_eth_ipv4_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
235         {pattern_eth_ipv4_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
236         {pattern_eth_ipv4_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
237         {pattern_eth_ipv4_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
238         {pattern_eth_ipv6_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
239         {pattern_eth_ipv6_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
240         {pattern_eth_ipv6_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
241         {pattern_eth_ipv6_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
242         {pattern_eth_ipv6_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
243         {pattern_eth_ipv6_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
244         {pattern_eth_ipv6_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
245         {pattern_eth_ipv6_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
246         {pattern_eth_ipv6_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
247         {pattern_eth_ipv6_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
248         {pattern_eth_ipv6_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
249         {pattern_eth_ipv6_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
250 };
251
252 static struct
253 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
254         {pattern_ethertype,                             ICE_SW_INSET_ETHER,                     ICE_INSET_NONE,                         ICE_INSET_NONE},
255         {pattern_ethertype_vlan,                        ICE_SW_INSET_MAC_VLAN,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
256         {pattern_ethertype_qinq,                        ICE_SW_INSET_MAC_QINQ,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
257         {pattern_eth_arp,                               ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
258         {pattern_eth_ipv4,                              ICE_SW_INSET_MAC_IPV4,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
259         {pattern_eth_ipv4_udp,                          ICE_SW_INSET_MAC_IPV4_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
260         {pattern_eth_ipv4_tcp,                          ICE_SW_INSET_MAC_IPV4_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
261         {pattern_eth_ipv6,                              ICE_SW_INSET_MAC_IPV6,                  ICE_INSET_NONE,                         ICE_INSET_NONE},
262         {pattern_eth_ipv6_udp,                          ICE_SW_INSET_MAC_IPV6_UDP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
263         {pattern_eth_ipv6_tcp,                          ICE_SW_INSET_MAC_IPV6_TCP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
264         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,           ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4,          ICE_INSET_NONE},
265         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,       ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP,      ICE_INSET_NONE},
266         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,       ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP,      ICE_INSET_NONE},
267         {pattern_eth_ipv4_nvgre_eth_ipv4,               ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4,          ICE_INSET_NONE},
268         {pattern_eth_ipv4_nvgre_eth_ipv4_udp,           ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP,      ICE_INSET_NONE},
269         {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,           ICE_INSET_NONE,                         ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP,      ICE_INSET_NONE},
270         {pattern_eth_pppoes,                            ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
271         {pattern_eth_vlan_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
272         {pattern_eth_pppoes_proto,                      ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
273         {pattern_eth_vlan_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
274         {pattern_eth_pppoes_ipv4,                       ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
275         {pattern_eth_pppoes_ipv4_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
276         {pattern_eth_pppoes_ipv4_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
277         {pattern_eth_pppoes_ipv6,                       ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
278         {pattern_eth_pppoes_ipv6_tcp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
279         {pattern_eth_pppoes_ipv6_udp,                   ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
280         {pattern_eth_vlan_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
281         {pattern_eth_vlan_pppoes_ipv4_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV4_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
282         {pattern_eth_vlan_pppoes_ipv4_udp,              ICE_SW_INSET_MAC_PPPOE_IPV4_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
283         {pattern_eth_vlan_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
284         {pattern_eth_vlan_pppoes_ipv6_tcp,              ICE_SW_INSET_MAC_PPPOE_IPV6_TCP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
285         {pattern_eth_vlan_pppoes_ipv6_udp,              ICE_SW_INSET_MAC_PPPOE_IPV6_UDP,        ICE_INSET_NONE,                         ICE_INSET_NONE},
286         {pattern_eth_ipv4_esp,                          ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
287         {pattern_eth_ipv4_udp_esp,                      ICE_SW_INSET_MAC_IPV4_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
288         {pattern_eth_ipv6_esp,                          ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
289         {pattern_eth_ipv6_udp_esp,                      ICE_SW_INSET_MAC_IPV6_ESP,              ICE_INSET_NONE,                         ICE_INSET_NONE},
290         {pattern_eth_ipv4_ah,                           ICE_SW_INSET_MAC_IPV4_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
291         {pattern_eth_ipv6_ah,                           ICE_SW_INSET_MAC_IPV6_AH,               ICE_INSET_NONE,                         ICE_INSET_NONE},
292         {pattern_eth_ipv6_udp_ah,                       ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
293         {pattern_eth_ipv4_l2tp,                         ICE_SW_INSET_MAC_IPV4_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
294         {pattern_eth_ipv6_l2tp,                         ICE_SW_INSET_MAC_IPV6_L2TP,             ICE_INSET_NONE,                         ICE_INSET_NONE},
295         {pattern_eth_ipv4_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
296         {pattern_eth_ipv6_pfcp,                         ICE_INSET_NONE,                         ICE_INSET_NONE,                         ICE_INSET_NONE},
297         {pattern_eth_qinq_ipv4,                         ICE_SW_INSET_MAC_QINQ_IPV4,             ICE_INSET_NONE,                         ICE_INSET_NONE},
298         {pattern_eth_qinq_ipv6,                         ICE_SW_INSET_MAC_QINQ_IPV6,             ICE_INSET_NONE,                         ICE_INSET_NONE},
299         {pattern_eth_qinq_pppoes,                       ICE_SW_INSET_MAC_PPPOE,                 ICE_INSET_NONE,                         ICE_INSET_NONE},
300         {pattern_eth_qinq_pppoes_proto,                 ICE_SW_INSET_MAC_PPPOE_PROTO,           ICE_INSET_NONE,                         ICE_INSET_NONE},
301         {pattern_eth_qinq_pppoes_ipv4,                  ICE_SW_INSET_MAC_PPPOE_IPV4,            ICE_INSET_NONE,                         ICE_INSET_NONE},
302         {pattern_eth_qinq_pppoes_ipv6,                  ICE_SW_INSET_MAC_PPPOE_IPV6,            ICE_INSET_NONE,                         ICE_INSET_NONE},
303         {pattern_eth_ipv4_gtpu,                         ICE_SW_INSET_MAC_IPV4_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
304         {pattern_eth_ipv6_gtpu,                         ICE_SW_INSET_MAC_IPV6_GTPU,             ICE_INSET_NONE,                         ICE_INSET_NONE},
305         {pattern_eth_ipv4_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
306         {pattern_eth_ipv4_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
307         {pattern_eth_ipv4_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
308         {pattern_eth_ipv4_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
309         {pattern_eth_ipv4_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
310         {pattern_eth_ipv4_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
311         {pattern_eth_ipv4_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
312         {pattern_eth_ipv4_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
313         {pattern_eth_ipv4_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
314         {pattern_eth_ipv4_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
315         {pattern_eth_ipv4_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
316         {pattern_eth_ipv4_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
317         {pattern_eth_ipv6_gtpu_ipv4,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
318         {pattern_eth_ipv6_gtpu_eh_ipv4,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4,                 ICE_INSET_NONE},
319         {pattern_eth_ipv6_gtpu_ipv4_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
320         {pattern_eth_ipv6_gtpu_eh_ipv4_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_UDP,             ICE_INSET_NONE},
321         {pattern_eth_ipv6_gtpu_ipv4_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
322         {pattern_eth_ipv6_gtpu_eh_ipv4_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV4_TCP,             ICE_INSET_NONE},
323         {pattern_eth_ipv6_gtpu_ipv6,                    ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
324         {pattern_eth_ipv6_gtpu_eh_ipv6,                 ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6,                 ICE_INSET_NONE},
325         {pattern_eth_ipv6_gtpu_ipv6_udp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
326         {pattern_eth_ipv6_gtpu_eh_ipv6_udp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_UDP,             ICE_INSET_NONE},
327         {pattern_eth_ipv6_gtpu_ipv6_tcp,                ICE_SW_INSET_MAC_GTPU_OUTER,            ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
328         {pattern_eth_ipv6_gtpu_eh_ipv6_tcp,             ICE_SW_INSET_MAC_GTPU_EH_OUTER,         ICE_SW_INSET_GTPU_IPV6_TCP,             ICE_INSET_NONE},
329 };
330
331 static int
332 ice_switch_create(struct ice_adapter *ad,
333                 struct rte_flow *flow,
334                 void *meta,
335                 struct rte_flow_error *error)
336 {
337         int ret = 0;
338         struct ice_pf *pf = &ad->pf;
339         struct ice_hw *hw = ICE_PF_TO_HW(pf);
340         struct ice_rule_query_data rule_added = {0};
341         struct ice_rule_query_data *filter_ptr;
342         struct ice_adv_lkup_elem *list =
343                 ((struct sw_meta *)meta)->list;
344         uint16_t lkups_cnt =
345                 ((struct sw_meta *)meta)->lkups_num;
346         struct ice_adv_rule_info *rule_info =
347                 &((struct sw_meta *)meta)->rule_info;
348
349         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
350                 rte_flow_error_set(error, EINVAL,
351                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
352                         "item number too large for rule");
353                 goto error;
354         }
355         if (!list) {
356                 rte_flow_error_set(error, EINVAL,
357                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
358                         "lookup list should not be NULL");
359                 goto error;
360         }
361         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
362         if (!ret) {
363                 filter_ptr = rte_zmalloc("ice_switch_filter",
364                         sizeof(struct ice_rule_query_data), 0);
365                 if (!filter_ptr) {
366                         rte_flow_error_set(error, EINVAL,
367                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
368                                    "No memory for ice_switch_filter");
369                         goto error;
370                 }
371                 flow->rule = filter_ptr;
372                 rte_memcpy(filter_ptr,
373                         &rule_added,
374                         sizeof(struct ice_rule_query_data));
375         } else {
376                 rte_flow_error_set(error, EINVAL,
377                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
378                         "switch filter create flow fail");
379                 goto error;
380         }
381
382         rte_free(list);
383         rte_free(meta);
384         return 0;
385
386 error:
387         rte_free(list);
388         rte_free(meta);
389
390         return -rte_errno;
391 }
392
393 static int
394 ice_switch_destroy(struct ice_adapter *ad,
395                 struct rte_flow *flow,
396                 struct rte_flow_error *error)
397 {
398         struct ice_hw *hw = &ad->hw;
399         int ret;
400         struct ice_rule_query_data *filter_ptr;
401
402         filter_ptr = (struct ice_rule_query_data *)
403                 flow->rule;
404
405         if (!filter_ptr) {
406                 rte_flow_error_set(error, EINVAL,
407                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
408                         "no such flow"
409                         " create by switch filter");
410                 return -rte_errno;
411         }
412
413         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
414         if (ret) {
415                 rte_flow_error_set(error, EINVAL,
416                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
417                         "fail to destroy switch filter rule");
418                 return -rte_errno;
419         }
420
421         rte_free(filter_ptr);
422         return ret;
423 }
424
425 static void
426 ice_switch_filter_rule_free(struct rte_flow *flow)
427 {
428         rte_free(flow->rule);
429 }
430
431 static bool
432 ice_switch_parse_pattern(const struct rte_flow_item pattern[],
433                 struct rte_flow_error *error,
434                 struct ice_adv_lkup_elem *list,
435                 uint16_t *lkups_num,
436                 enum ice_sw_tunnel_type *tun_type,
437                 const struct ice_pattern_match_item *pattern_match_item)
438 {
439         const struct rte_flow_item *item = pattern;
440         enum rte_flow_item_type item_type;
441         const struct rte_flow_item_eth *eth_spec, *eth_mask;
442         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
443         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
444         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
445         const struct rte_flow_item_udp *udp_spec, *udp_mask;
446         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
447         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
448         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
449         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
450         const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
451         const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
452                                 *pppoe_proto_mask;
453         const struct rte_flow_item_esp *esp_spec, *esp_mask;
454         const struct rte_flow_item_ah *ah_spec, *ah_mask;
455         const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
456         const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
457         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
458         const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
459         uint64_t outer_input_set = ICE_INSET_NONE;
460         uint64_t inner_input_set = ICE_INSET_NONE;
461         uint64_t *input = NULL;
462         uint16_t input_set_byte = 0;
463         bool pppoe_elem_valid = 0;
464         bool pppoe_patt_valid = 0;
465         bool pppoe_prot_valid = 0;
466         bool inner_vlan_valid = 0;
467         bool outer_vlan_valid = 0;
468         bool tunnel_valid = 0;
469         bool profile_rule = 0;
470         bool nvgre_valid = 0;
471         bool vxlan_valid = 0;
472         bool qinq_valid = 0;
473         bool ipv6_valid = 0;
474         bool ipv4_valid = 0;
475         bool udp_valid = 0;
476         bool tcp_valid = 0;
477         bool gtpu_valid = 0;
478         bool gtpu_psc_valid = 0;
479         bool inner_ipv4_valid = 0;
480         bool inner_ipv6_valid = 0;
481         bool inner_tcp_valid = 0;
482         bool inner_udp_valid = 0;
483         uint16_t j, k, t = 0;
484
485         if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
486             *tun_type == ICE_NON_TUN_QINQ)
487                 qinq_valid = 1;
488
489         for (item = pattern; item->type !=
490                         RTE_FLOW_ITEM_TYPE_END; item++) {
491                 if (item->last) {
492                         rte_flow_error_set(error, EINVAL,
493                                         RTE_FLOW_ERROR_TYPE_ITEM,
494                                         item,
495                                         "Not support range");
496                         return false;
497                 }
498                 item_type = item->type;
499
500                 switch (item_type) {
501                 case RTE_FLOW_ITEM_TYPE_ETH:
502                         eth_spec = item->spec;
503                         eth_mask = item->mask;
504                         if (eth_spec && eth_mask) {
505                                 const uint8_t *a = eth_mask->src.addr_bytes;
506                                 const uint8_t *b = eth_mask->dst.addr_bytes;
507                                 if (tunnel_valid)
508                                         input = &inner_input_set;
509                                 else
510                                         input = &outer_input_set;
511                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
512                                         if (a[j]) {
513                                                 *input |= ICE_INSET_SMAC;
514                                                 break;
515                                         }
516                                 }
517                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
518                                         if (b[j]) {
519                                                 *input |= ICE_INSET_DMAC;
520                                                 break;
521                                         }
522                                 }
523                                 if (eth_mask->type)
524                                         *input |= ICE_INSET_ETHERTYPE;
525                                 list[t].type = (tunnel_valid  == 0) ?
526                                         ICE_MAC_OFOS : ICE_MAC_IL;
527                                 struct ice_ether_hdr *h;
528                                 struct ice_ether_hdr *m;
529                                 uint16_t i = 0;
530                                 h = &list[t].h_u.eth_hdr;
531                                 m = &list[t].m_u.eth_hdr;
532                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
533                                         if (eth_mask->src.addr_bytes[j]) {
534                                                 h->src_addr[j] =
535                                                 eth_spec->src.addr_bytes[j];
536                                                 m->src_addr[j] =
537                                                 eth_mask->src.addr_bytes[j];
538                                                 i = 1;
539                                                 input_set_byte++;
540                                         }
541                                         if (eth_mask->dst.addr_bytes[j]) {
542                                                 h->dst_addr[j] =
543                                                 eth_spec->dst.addr_bytes[j];
544                                                 m->dst_addr[j] =
545                                                 eth_mask->dst.addr_bytes[j];
546                                                 i = 1;
547                                                 input_set_byte++;
548                                         }
549                                 }
550                                 if (i)
551                                         t++;
552                                 if (eth_mask->type) {
553                                         list[t].type = ICE_ETYPE_OL;
554                                         list[t].h_u.ethertype.ethtype_id =
555                                                 eth_spec->type;
556                                         list[t].m_u.ethertype.ethtype_id =
557                                                 eth_mask->type;
558                                         input_set_byte += 2;
559                                         t++;
560                                 }
561                         }
562                         break;
563
564                 case RTE_FLOW_ITEM_TYPE_IPV4:
565                         ipv4_spec = item->spec;
566                         ipv4_mask = item->mask;
567                         if (tunnel_valid) {
568                                 inner_ipv4_valid = 1;
569                                 input = &inner_input_set;
570                         } else {
571                                 ipv4_valid = 1;
572                                 input = &outer_input_set;
573                         }
574
575                         if (ipv4_spec && ipv4_mask) {
576                                 /* Check IPv4 mask and update input set */
577                                 if (ipv4_mask->hdr.version_ihl ||
578                                         ipv4_mask->hdr.total_length ||
579                                         ipv4_mask->hdr.packet_id ||
580                                         ipv4_mask->hdr.hdr_checksum) {
581                                         rte_flow_error_set(error, EINVAL,
582                                                    RTE_FLOW_ERROR_TYPE_ITEM,
583                                                    item,
584                                                    "Invalid IPv4 mask.");
585                                         return false;
586                                 }
587
588                                 if (ipv4_mask->hdr.src_addr)
589                                         *input |= ICE_INSET_IPV4_SRC;
590                                 if (ipv4_mask->hdr.dst_addr)
591                                         *input |= ICE_INSET_IPV4_DST;
592                                 if (ipv4_mask->hdr.time_to_live)
593                                         *input |= ICE_INSET_IPV4_TTL;
594                                 if (ipv4_mask->hdr.next_proto_id)
595                                         *input |= ICE_INSET_IPV4_PROTO;
596                                 if (ipv4_mask->hdr.type_of_service)
597                                         *input |= ICE_INSET_IPV4_TOS;
598
599                                 list[t].type = (tunnel_valid  == 0) ?
600                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
601                                 if (ipv4_mask->hdr.src_addr) {
602                                         list[t].h_u.ipv4_hdr.src_addr =
603                                                 ipv4_spec->hdr.src_addr;
604                                         list[t].m_u.ipv4_hdr.src_addr =
605                                                 ipv4_mask->hdr.src_addr;
606                                         input_set_byte += 2;
607                                 }
608                                 if (ipv4_mask->hdr.dst_addr) {
609                                         list[t].h_u.ipv4_hdr.dst_addr =
610                                                 ipv4_spec->hdr.dst_addr;
611                                         list[t].m_u.ipv4_hdr.dst_addr =
612                                                 ipv4_mask->hdr.dst_addr;
613                                         input_set_byte += 2;
614                                 }
615                                 if (ipv4_mask->hdr.time_to_live) {
616                                         list[t].h_u.ipv4_hdr.time_to_live =
617                                                 ipv4_spec->hdr.time_to_live;
618                                         list[t].m_u.ipv4_hdr.time_to_live =
619                                                 ipv4_mask->hdr.time_to_live;
620                                         input_set_byte++;
621                                 }
622                                 if (ipv4_mask->hdr.next_proto_id) {
623                                         list[t].h_u.ipv4_hdr.protocol =
624                                                 ipv4_spec->hdr.next_proto_id;
625                                         list[t].m_u.ipv4_hdr.protocol =
626                                                 ipv4_mask->hdr.next_proto_id;
627                                         input_set_byte++;
628                                 }
629                                 if ((ipv4_spec->hdr.next_proto_id &
630                                         ipv4_mask->hdr.next_proto_id) ==
631                                         ICE_IPV4_PROTO_NVGRE)
632                                         *tun_type = ICE_SW_TUN_AND_NON_TUN;
633                                 if (ipv4_mask->hdr.type_of_service) {
634                                         list[t].h_u.ipv4_hdr.tos =
635                                                 ipv4_spec->hdr.type_of_service;
636                                         list[t].m_u.ipv4_hdr.tos =
637                                                 ipv4_mask->hdr.type_of_service;
638                                         input_set_byte++;
639                                 }
640                                 t++;
641                         }
642                         break;
643
644                 case RTE_FLOW_ITEM_TYPE_IPV6:
645                         ipv6_spec = item->spec;
646                         ipv6_mask = item->mask;
647                         if (tunnel_valid) {
648                                 inner_ipv6_valid = 1;
649                                 input = &inner_input_set;
650                         } else {
651                                 ipv6_valid = 1;
652                                 input = &outer_input_set;
653                         }
654
655                         if (ipv6_spec && ipv6_mask) {
656                                 if (ipv6_mask->hdr.payload_len) {
657                                         rte_flow_error_set(error, EINVAL,
658                                            RTE_FLOW_ERROR_TYPE_ITEM,
659                                            item,
660                                            "Invalid IPv6 mask");
661                                         return false;
662                                 }
663
664                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
665                                         if (ipv6_mask->hdr.src_addr[j]) {
666                                                 *input |= ICE_INSET_IPV6_SRC;
667                                                 break;
668                                         }
669                                 }
670                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
671                                         if (ipv6_mask->hdr.dst_addr[j]) {
672                                                 *input |= ICE_INSET_IPV6_DST;
673                                                 break;
674                                         }
675                                 }
676                                 if (ipv6_mask->hdr.proto)
677                                         *input |= ICE_INSET_IPV6_NEXT_HDR;
678                                 if (ipv6_mask->hdr.hop_limits)
679                                         *input |= ICE_INSET_IPV6_HOP_LIMIT;
680                                 if (ipv6_mask->hdr.vtc_flow &
681                                     rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
682                                         *input |= ICE_INSET_IPV6_TC;
683
684                                 list[t].type = (tunnel_valid  == 0) ?
685                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
686                                 struct ice_ipv6_hdr *f;
687                                 struct ice_ipv6_hdr *s;
688                                 f = &list[t].h_u.ipv6_hdr;
689                                 s = &list[t].m_u.ipv6_hdr;
690                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
691                                         if (ipv6_mask->hdr.src_addr[j]) {
692                                                 f->src_addr[j] =
693                                                 ipv6_spec->hdr.src_addr[j];
694                                                 s->src_addr[j] =
695                                                 ipv6_mask->hdr.src_addr[j];
696                                                 input_set_byte++;
697                                         }
698                                         if (ipv6_mask->hdr.dst_addr[j]) {
699                                                 f->dst_addr[j] =
700                                                 ipv6_spec->hdr.dst_addr[j];
701                                                 s->dst_addr[j] =
702                                                 ipv6_mask->hdr.dst_addr[j];
703                                                 input_set_byte++;
704                                         }
705                                 }
706                                 if (ipv6_mask->hdr.proto) {
707                                         f->next_hdr =
708                                                 ipv6_spec->hdr.proto;
709                                         s->next_hdr =
710                                                 ipv6_mask->hdr.proto;
711                                         input_set_byte++;
712                                 }
713                                 if (ipv6_mask->hdr.hop_limits) {
714                                         f->hop_limit =
715                                                 ipv6_spec->hdr.hop_limits;
716                                         s->hop_limit =
717                                                 ipv6_mask->hdr.hop_limits;
718                                         input_set_byte++;
719                                 }
720                                 if (ipv6_mask->hdr.vtc_flow &
721                                                 rte_cpu_to_be_32
722                                                 (RTE_IPV6_HDR_TC_MASK)) {
723                                         struct ice_le_ver_tc_flow vtf;
724                                         vtf.u.fld.version = 0;
725                                         vtf.u.fld.flow_label = 0;
726                                         vtf.u.fld.tc = (rte_be_to_cpu_32
727                                                 (ipv6_spec->hdr.vtc_flow) &
728                                                         RTE_IPV6_HDR_TC_MASK) >>
729                                                         RTE_IPV6_HDR_TC_SHIFT;
730                                         f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
731                                         vtf.u.fld.tc = (rte_be_to_cpu_32
732                                                 (ipv6_mask->hdr.vtc_flow) &
733                                                         RTE_IPV6_HDR_TC_MASK) >>
734                                                         RTE_IPV6_HDR_TC_SHIFT;
735                                         s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
736                                         input_set_byte += 4;
737                                 }
738                                 t++;
739                         }
740                         break;
741
742                 case RTE_FLOW_ITEM_TYPE_UDP:
743                         udp_spec = item->spec;
744                         udp_mask = item->mask;
745                         if (tunnel_valid) {
746                                 inner_udp_valid = 1;
747                                 input = &inner_input_set;
748                         } else {
749                                 udp_valid = 1;
750                                 input = &outer_input_set;
751                         }
752
753                         if (udp_spec && udp_mask) {
754                                 /* Check UDP mask and update input set*/
755                                 if (udp_mask->hdr.dgram_len ||
756                                     udp_mask->hdr.dgram_cksum) {
757                                         rte_flow_error_set(error, EINVAL,
758                                                    RTE_FLOW_ERROR_TYPE_ITEM,
759                                                    item,
760                                                    "Invalid UDP mask");
761                                         return false;
762                                 }
763
764                                 if (udp_mask->hdr.src_port)
765                                         *input |= ICE_INSET_UDP_SRC_PORT;
766                                 if (udp_mask->hdr.dst_port)
767                                         *input |= ICE_INSET_UDP_DST_PORT;
768
769                                 if (*tun_type == ICE_SW_TUN_VXLAN &&
770                                                 tunnel_valid == 0)
771                                         list[t].type = ICE_UDP_OF;
772                                 else
773                                         list[t].type = ICE_UDP_ILOS;
774                                 if (udp_mask->hdr.src_port) {
775                                         list[t].h_u.l4_hdr.src_port =
776                                                 udp_spec->hdr.src_port;
777                                         list[t].m_u.l4_hdr.src_port =
778                                                 udp_mask->hdr.src_port;
779                                         input_set_byte += 2;
780                                 }
781                                 if (udp_mask->hdr.dst_port) {
782                                         list[t].h_u.l4_hdr.dst_port =
783                                                 udp_spec->hdr.dst_port;
784                                         list[t].m_u.l4_hdr.dst_port =
785                                                 udp_mask->hdr.dst_port;
786                                         input_set_byte += 2;
787                                 }
788                                 t++;
789                         }
790                         break;
791
792                 case RTE_FLOW_ITEM_TYPE_TCP:
793                         tcp_spec = item->spec;
794                         tcp_mask = item->mask;
795                         if (tunnel_valid) {
796                                 inner_tcp_valid = 1;
797                                 input = &inner_input_set;
798                         } else {
799                                 tcp_valid = 1;
800                                 input = &outer_input_set;
801                         }
802
803                         if (tcp_spec && tcp_mask) {
804                                 /* Check TCP mask and update input set */
805                                 if (tcp_mask->hdr.sent_seq ||
806                                         tcp_mask->hdr.recv_ack ||
807                                         tcp_mask->hdr.data_off ||
808                                         tcp_mask->hdr.tcp_flags ||
809                                         tcp_mask->hdr.rx_win ||
810                                         tcp_mask->hdr.cksum ||
811                                         tcp_mask->hdr.tcp_urp) {
812                                         rte_flow_error_set(error, EINVAL,
813                                            RTE_FLOW_ERROR_TYPE_ITEM,
814                                            item,
815                                            "Invalid TCP mask");
816                                         return false;
817                                 }
818
819                                 if (tcp_mask->hdr.src_port)
820                                         *input |= ICE_INSET_TCP_SRC_PORT;
821                                 if (tcp_mask->hdr.dst_port)
822                                         *input |= ICE_INSET_TCP_DST_PORT;
823                                 list[t].type = ICE_TCP_IL;
824                                 if (tcp_mask->hdr.src_port) {
825                                         list[t].h_u.l4_hdr.src_port =
826                                                 tcp_spec->hdr.src_port;
827                                         list[t].m_u.l4_hdr.src_port =
828                                                 tcp_mask->hdr.src_port;
829                                         input_set_byte += 2;
830                                 }
831                                 if (tcp_mask->hdr.dst_port) {
832                                         list[t].h_u.l4_hdr.dst_port =
833                                                 tcp_spec->hdr.dst_port;
834                                         list[t].m_u.l4_hdr.dst_port =
835                                                 tcp_mask->hdr.dst_port;
836                                         input_set_byte += 2;
837                                 }
838                                 t++;
839                         }
840                         break;
841
842                 case RTE_FLOW_ITEM_TYPE_SCTP:
843                         sctp_spec = item->spec;
844                         sctp_mask = item->mask;
845                         if (sctp_spec && sctp_mask) {
846                                 /* Check SCTP mask and update input set */
847                                 if (sctp_mask->hdr.cksum) {
848                                         rte_flow_error_set(error, EINVAL,
849                                            RTE_FLOW_ERROR_TYPE_ITEM,
850                                            item,
851                                            "Invalid SCTP mask");
852                                         return false;
853                                 }
854                                 if (tunnel_valid)
855                                         input = &inner_input_set;
856                                 else
857                                         input = &outer_input_set;
858
859                                 if (sctp_mask->hdr.src_port)
860                                         *input |= ICE_INSET_SCTP_SRC_PORT;
861                                 if (sctp_mask->hdr.dst_port)
862                                         *input |= ICE_INSET_SCTP_DST_PORT;
863
864                                 list[t].type = ICE_SCTP_IL;
865                                 if (sctp_mask->hdr.src_port) {
866                                         list[t].h_u.sctp_hdr.src_port =
867                                                 sctp_spec->hdr.src_port;
868                                         list[t].m_u.sctp_hdr.src_port =
869                                                 sctp_mask->hdr.src_port;
870                                         input_set_byte += 2;
871                                 }
872                                 if (sctp_mask->hdr.dst_port) {
873                                         list[t].h_u.sctp_hdr.dst_port =
874                                                 sctp_spec->hdr.dst_port;
875                                         list[t].m_u.sctp_hdr.dst_port =
876                                                 sctp_mask->hdr.dst_port;
877                                         input_set_byte += 2;
878                                 }
879                                 t++;
880                         }
881                         break;
882
883                 case RTE_FLOW_ITEM_TYPE_VXLAN:
884                         vxlan_spec = item->spec;
885                         vxlan_mask = item->mask;
886                         /* Check if VXLAN item is used to describe protocol.
887                          * If yes, both spec and mask should be NULL.
888                          * If no, both spec and mask shouldn't be NULL.
889                          */
890                         if ((!vxlan_spec && vxlan_mask) ||
891                             (vxlan_spec && !vxlan_mask)) {
892                                 rte_flow_error_set(error, EINVAL,
893                                            RTE_FLOW_ERROR_TYPE_ITEM,
894                                            item,
895                                            "Invalid VXLAN item");
896                                 return false;
897                         }
898                         vxlan_valid = 1;
899                         tunnel_valid = 1;
900                         input = &inner_input_set;
901                         if (vxlan_spec && vxlan_mask) {
902                                 list[t].type = ICE_VXLAN;
903                                 if (vxlan_mask->vni[0] ||
904                                         vxlan_mask->vni[1] ||
905                                         vxlan_mask->vni[2]) {
906                                         list[t].h_u.tnl_hdr.vni =
907                                                 (vxlan_spec->vni[2] << 16) |
908                                                 (vxlan_spec->vni[1] << 8) |
909                                                 vxlan_spec->vni[0];
910                                         list[t].m_u.tnl_hdr.vni =
911                                                 (vxlan_mask->vni[2] << 16) |
912                                                 (vxlan_mask->vni[1] << 8) |
913                                                 vxlan_mask->vni[0];
914                                         *input |= ICE_INSET_VXLAN_VNI;
915                                         input_set_byte += 2;
916                                 }
917                                 t++;
918                         }
919                         break;
920
921                 case RTE_FLOW_ITEM_TYPE_NVGRE:
922                         nvgre_spec = item->spec;
923                         nvgre_mask = item->mask;
924                         /* Check if NVGRE item is used to describe protocol.
925                          * If yes, both spec and mask should be NULL.
926                          * If no, both spec and mask shouldn't be NULL.
927                          */
928                         if ((!nvgre_spec && nvgre_mask) ||
929                             (nvgre_spec && !nvgre_mask)) {
930                                 rte_flow_error_set(error, EINVAL,
931                                            RTE_FLOW_ERROR_TYPE_ITEM,
932                                            item,
933                                            "Invalid NVGRE item");
934                                 return false;
935                         }
936                         nvgre_valid = 1;
937                         tunnel_valid = 1;
938                         input = &inner_input_set;
939                         if (nvgre_spec && nvgre_mask) {
940                                 list[t].type = ICE_NVGRE;
941                                 if (nvgre_mask->tni[0] ||
942                                         nvgre_mask->tni[1] ||
943                                         nvgre_mask->tni[2]) {
944                                         list[t].h_u.nvgre_hdr.tni_flow =
945                                                 (nvgre_spec->tni[2] << 16) |
946                                                 (nvgre_spec->tni[1] << 8) |
947                                                 nvgre_spec->tni[0];
948                                         list[t].m_u.nvgre_hdr.tni_flow =
949                                                 (nvgre_mask->tni[2] << 16) |
950                                                 (nvgre_mask->tni[1] << 8) |
951                                                 nvgre_mask->tni[0];
952                                         *input |= ICE_INSET_NVGRE_TNI;
953                                         input_set_byte += 2;
954                                 }
955                                 t++;
956                         }
957                         break;
958
959                 case RTE_FLOW_ITEM_TYPE_VLAN:
960                         vlan_spec = item->spec;
961                         vlan_mask = item->mask;
962                         /* Check if VLAN item is used to describe protocol.
963                          * If yes, both spec and mask should be NULL.
964                          * If no, both spec and mask shouldn't be NULL.
965                          */
966                         if ((!vlan_spec && vlan_mask) ||
967                             (vlan_spec && !vlan_mask)) {
968                                 rte_flow_error_set(error, EINVAL,
969                                            RTE_FLOW_ERROR_TYPE_ITEM,
970                                            item,
971                                            "Invalid VLAN item");
972                                 return false;
973                         }
974
975                         if (qinq_valid) {
976                                 if (!outer_vlan_valid)
977                                         outer_vlan_valid = 1;
978                                 else
979                                         inner_vlan_valid = 1;
980                         }
981
982                         input = &outer_input_set;
983
984                         if (vlan_spec && vlan_mask) {
985                                 if (qinq_valid) {
986                                         if (!inner_vlan_valid) {
987                                                 list[t].type = ICE_VLAN_EX;
988                                                 *input |=
989                                                         ICE_INSET_VLAN_OUTER;
990                                         } else {
991                                                 list[t].type = ICE_VLAN_IN;
992                                                 *input |=
993                                                         ICE_INSET_VLAN_INNER;
994                                         }
995                                 } else {
996                                         list[t].type = ICE_VLAN_OFOS;
997                                         *input |= ICE_INSET_VLAN_INNER;
998                                 }
999
1000                                 if (vlan_mask->tci) {
1001                                         list[t].h_u.vlan_hdr.vlan =
1002                                                 vlan_spec->tci;
1003                                         list[t].m_u.vlan_hdr.vlan =
1004                                                 vlan_mask->tci;
1005                                         input_set_byte += 2;
1006                                 }
1007                                 if (vlan_mask->inner_type) {
1008                                         rte_flow_error_set(error, EINVAL,
1009                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1010                                                 item,
1011                                                 "Invalid VLAN input set.");
1012                                         return false;
1013                                 }
1014                                 t++;
1015                         }
1016                         break;
1017
1018                 case RTE_FLOW_ITEM_TYPE_PPPOED:
1019                 case RTE_FLOW_ITEM_TYPE_PPPOES:
1020                         pppoe_spec = item->spec;
1021                         pppoe_mask = item->mask;
1022                         /* Check if PPPoE item is used to describe protocol.
1023                          * If yes, both spec and mask should be NULL.
1024                          * If no, both spec and mask shouldn't be NULL.
1025                          */
1026                         if ((!pppoe_spec && pppoe_mask) ||
1027                                 (pppoe_spec && !pppoe_mask)) {
1028                                 rte_flow_error_set(error, EINVAL,
1029                                         RTE_FLOW_ERROR_TYPE_ITEM,
1030                                         item,
1031                                         "Invalid pppoe item");
1032                                 return false;
1033                         }
1034                         pppoe_patt_valid = 1;
1035                         input = &outer_input_set;
1036                         if (pppoe_spec && pppoe_mask) {
1037                                 /* Check pppoe mask and update input set */
1038                                 if (pppoe_mask->length ||
1039                                         pppoe_mask->code ||
1040                                         pppoe_mask->version_type) {
1041                                         rte_flow_error_set(error, EINVAL,
1042                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1043                                                 item,
1044                                                 "Invalid pppoe mask");
1045                                         return false;
1046                                 }
1047                                 list[t].type = ICE_PPPOE;
1048                                 if (pppoe_mask->session_id) {
1049                                         list[t].h_u.pppoe_hdr.session_id =
1050                                                 pppoe_spec->session_id;
1051                                         list[t].m_u.pppoe_hdr.session_id =
1052                                                 pppoe_mask->session_id;
1053                                         *input |= ICE_INSET_PPPOE_SESSION;
1054                                         input_set_byte += 2;
1055                                 }
1056                                 t++;
1057                                 pppoe_elem_valid = 1;
1058                         }
1059                         break;
1060
1061                 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1062                         pppoe_proto_spec = item->spec;
1063                         pppoe_proto_mask = item->mask;
1064                         /* Check if PPPoE optional proto_id item
1065                          * is used to describe protocol.
1066                          * If yes, both spec and mask should be NULL.
1067                          * If no, both spec and mask shouldn't be NULL.
1068                          */
1069                         if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1070                                 (pppoe_proto_spec && !pppoe_proto_mask)) {
1071                                 rte_flow_error_set(error, EINVAL,
1072                                         RTE_FLOW_ERROR_TYPE_ITEM,
1073                                         item,
1074                                         "Invalid pppoe proto item");
1075                                 return false;
1076                         }
1077                         input = &outer_input_set;
1078                         if (pppoe_proto_spec && pppoe_proto_mask) {
1079                                 if (pppoe_elem_valid)
1080                                         t--;
1081                                 list[t].type = ICE_PPPOE;
1082                                 if (pppoe_proto_mask->proto_id) {
1083                                         list[t].h_u.pppoe_hdr.ppp_prot_id =
1084                                                 pppoe_proto_spec->proto_id;
1085                                         list[t].m_u.pppoe_hdr.ppp_prot_id =
1086                                                 pppoe_proto_mask->proto_id;
1087                                         *input |= ICE_INSET_PPPOE_PROTO;
1088                                         input_set_byte += 2;
1089                                         pppoe_prot_valid = 1;
1090                                 }
1091                                 if ((pppoe_proto_mask->proto_id &
1092                                         pppoe_proto_spec->proto_id) !=
1093                                             CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1094                                         (pppoe_proto_mask->proto_id &
1095                                         pppoe_proto_spec->proto_id) !=
1096                                             CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1097                                         *tun_type = ICE_SW_TUN_PPPOE_PAY;
1098                                 else
1099                                         *tun_type = ICE_SW_TUN_PPPOE;
1100                                 t++;
1101                         }
1102
1103                         break;
1104
1105                 case RTE_FLOW_ITEM_TYPE_ESP:
1106                         esp_spec = item->spec;
1107                         esp_mask = item->mask;
1108                         if ((esp_spec && !esp_mask) ||
1109                                 (!esp_spec && esp_mask)) {
1110                                 rte_flow_error_set(error, EINVAL,
1111                                            RTE_FLOW_ERROR_TYPE_ITEM,
1112                                            item,
1113                                            "Invalid esp item");
1114                                 return false;
1115                         }
1116                         /* Check esp mask and update input set */
1117                         if (esp_mask && esp_mask->hdr.seq) {
1118                                 rte_flow_error_set(error, EINVAL,
1119                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1120                                                 item,
1121                                                 "Invalid esp mask");
1122                                 return false;
1123                         }
1124                         input = &outer_input_set;
1125                         if (!esp_spec && !esp_mask && !(*input)) {
1126                                 profile_rule = 1;
1127                                 if (ipv6_valid && udp_valid)
1128                                         *tun_type =
1129                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1130                                 else if (ipv6_valid)
1131                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1132                                 else if (ipv4_valid)
1133                                         goto inset_check;
1134                         } else if (esp_spec && esp_mask &&
1135                                                 esp_mask->hdr.spi){
1136                                 if (udp_valid)
1137                                         list[t].type = ICE_NAT_T;
1138                                 else
1139                                         list[t].type = ICE_ESP;
1140                                 list[t].h_u.esp_hdr.spi =
1141                                         esp_spec->hdr.spi;
1142                                 list[t].m_u.esp_hdr.spi =
1143                                         esp_mask->hdr.spi;
1144                                 *input |= ICE_INSET_ESP_SPI;
1145                                 input_set_byte += 4;
1146                                 t++;
1147                         }
1148
1149                         if (!profile_rule) {
1150                                 if (ipv6_valid && udp_valid)
1151                                         *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1152                                 else if (ipv4_valid && udp_valid)
1153                                         *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1154                                 else if (ipv6_valid)
1155                                         *tun_type = ICE_SW_TUN_IPV6_ESP;
1156                                 else if (ipv4_valid)
1157                                         *tun_type = ICE_SW_TUN_IPV4_ESP;
1158                         }
1159                         break;
1160
1161                 case RTE_FLOW_ITEM_TYPE_AH:
1162                         ah_spec = item->spec;
1163                         ah_mask = item->mask;
1164                         if ((ah_spec && !ah_mask) ||
1165                                 (!ah_spec && ah_mask)) {
1166                                 rte_flow_error_set(error, EINVAL,
1167                                            RTE_FLOW_ERROR_TYPE_ITEM,
1168                                            item,
1169                                            "Invalid ah item");
1170                                 return false;
1171                         }
1172                         /* Check ah mask and update input set */
1173                         if (ah_mask &&
1174                                 (ah_mask->next_hdr ||
1175                                 ah_mask->payload_len ||
1176                                 ah_mask->seq_num ||
1177                                 ah_mask->reserved)) {
1178                                 rte_flow_error_set(error, EINVAL,
1179                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1180                                                 item,
1181                                                 "Invalid ah mask");
1182                                 return false;
1183                         }
1184
1185                         input = &outer_input_set;
1186                         if (!ah_spec && !ah_mask && !(*input)) {
1187                                 profile_rule = 1;
1188                                 if (ipv6_valid && udp_valid)
1189                                         *tun_type =
1190                                         ICE_SW_TUN_PROFID_IPV6_NAT_T;
1191                                 else if (ipv6_valid)
1192                                         *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1193                                 else if (ipv4_valid)
1194                                         goto inset_check;
1195                         } else if (ah_spec && ah_mask &&
1196                                                 ah_mask->spi){
1197                                 list[t].type = ICE_AH;
1198                                 list[t].h_u.ah_hdr.spi =
1199                                         ah_spec->spi;
1200                                 list[t].m_u.ah_hdr.spi =
1201                                         ah_mask->spi;
1202                                 *input |= ICE_INSET_AH_SPI;
1203                                 input_set_byte += 4;
1204                                 t++;
1205                         }
1206
1207                         if (!profile_rule) {
1208                                 if (udp_valid)
1209                                         goto inset_check;
1210                                 else if (ipv6_valid)
1211                                         *tun_type = ICE_SW_TUN_IPV6_AH;
1212                                 else if (ipv4_valid)
1213                                         *tun_type = ICE_SW_TUN_IPV4_AH;
1214                         }
1215                         break;
1216
1217                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1218                         l2tp_spec = item->spec;
1219                         l2tp_mask = item->mask;
1220                         if ((l2tp_spec && !l2tp_mask) ||
1221                                 (!l2tp_spec && l2tp_mask)) {
1222                                 rte_flow_error_set(error, EINVAL,
1223                                            RTE_FLOW_ERROR_TYPE_ITEM,
1224                                            item,
1225                                            "Invalid l2tp item");
1226                                 return false;
1227                         }
1228
1229                         input = &outer_input_set;
1230                         if (!l2tp_spec && !l2tp_mask && !(*input)) {
1231                                 if (ipv6_valid)
1232                                         *tun_type =
1233                                         ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1234                                 else if (ipv4_valid)
1235                                         goto inset_check;
1236                         } else if (l2tp_spec && l2tp_mask &&
1237                                                 l2tp_mask->session_id){
1238                                 list[t].type = ICE_L2TPV3;
1239                                 list[t].h_u.l2tpv3_sess_hdr.session_id =
1240                                         l2tp_spec->session_id;
1241                                 list[t].m_u.l2tpv3_sess_hdr.session_id =
1242                                         l2tp_mask->session_id;
1243                                 *input |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1244                                 input_set_byte += 4;
1245                                 t++;
1246                         }
1247
1248                         if (!profile_rule) {
1249                                 if (ipv6_valid)
1250                                         *tun_type =
1251                                         ICE_SW_TUN_IPV6_L2TPV3;
1252                                 else if (ipv4_valid)
1253                                         *tun_type =
1254                                         ICE_SW_TUN_IPV4_L2TPV3;
1255                         }
1256                         break;
1257
1258                 case RTE_FLOW_ITEM_TYPE_PFCP:
1259                         pfcp_spec = item->spec;
1260                         pfcp_mask = item->mask;
1261                         /* Check if PFCP item is used to describe protocol.
1262                          * If yes, both spec and mask should be NULL.
1263                          * If no, both spec and mask shouldn't be NULL.
1264                          */
1265                         if ((!pfcp_spec && pfcp_mask) ||
1266                             (pfcp_spec && !pfcp_mask)) {
1267                                 rte_flow_error_set(error, EINVAL,
1268                                            RTE_FLOW_ERROR_TYPE_ITEM,
1269                                            item,
1270                                            "Invalid PFCP item");
1271                                 return false;
1272                         }
1273                         if (pfcp_spec && pfcp_mask) {
1274                                 /* Check pfcp mask and update input set */
1275                                 if (pfcp_mask->msg_type ||
1276                                         pfcp_mask->msg_len ||
1277                                         pfcp_mask->seid) {
1278                                         rte_flow_error_set(error, EINVAL,
1279                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1280                                                 item,
1281                                                 "Invalid pfcp mask");
1282                                         return false;
1283                                 }
1284                                 if (pfcp_mask->s_field &&
1285                                         pfcp_spec->s_field == 0x01 &&
1286                                         ipv6_valid)
1287                                         *tun_type =
1288                                         ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1289                                 else if (pfcp_mask->s_field &&
1290                                         pfcp_spec->s_field == 0x01)
1291                                         *tun_type =
1292                                         ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1293                                 else if (pfcp_mask->s_field &&
1294                                         !pfcp_spec->s_field &&
1295                                         ipv6_valid)
1296                                         *tun_type =
1297                                         ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1298                                 else if (pfcp_mask->s_field &&
1299                                         !pfcp_spec->s_field)
1300                                         *tun_type =
1301                                         ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1302                                 else
1303                                         return false;
1304                         }
1305                         break;
1306
1307                 case RTE_FLOW_ITEM_TYPE_GTPU:
1308                         gtp_spec = item->spec;
1309                         gtp_mask = item->mask;
1310                         if (gtp_spec && !gtp_mask) {
1311                                 rte_flow_error_set(error, EINVAL,
1312                                         RTE_FLOW_ERROR_TYPE_ITEM,
1313                                         item,
1314                                         "Invalid GTP item");
1315                                 return false;
1316                         }
1317                         if (gtp_spec && gtp_mask) {
1318                                 if (gtp_mask->v_pt_rsv_flags ||
1319                                     gtp_mask->msg_type ||
1320                                     gtp_mask->msg_len) {
1321                                         rte_flow_error_set(error, EINVAL,
1322                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1323                                                 item,
1324                                                 "Invalid GTP mask");
1325                                         return false;
1326                                 }
1327                                 input = &outer_input_set;
1328                                 if (gtp_mask->teid)
1329                                         *input |= ICE_INSET_GTPU_TEID;
1330                                 list[t].type = ICE_GTP;
1331                                 list[t].h_u.gtp_hdr.teid =
1332                                         gtp_spec->teid;
1333                                 list[t].m_u.gtp_hdr.teid =
1334                                         gtp_mask->teid;
1335                                 input_set_byte += 4;
1336                                 t++;
1337                         }
1338                         tunnel_valid = 1;
1339                         gtpu_valid = 1;
1340                         break;
1341
1342                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1343                         gtp_psc_spec = item->spec;
1344                         gtp_psc_mask = item->mask;
1345                         if (gtp_psc_spec && !gtp_psc_mask) {
1346                                 rte_flow_error_set(error, EINVAL,
1347                                         RTE_FLOW_ERROR_TYPE_ITEM,
1348                                         item,
1349                                         "Invalid GTPU_EH item");
1350                                 return false;
1351                         }
1352                         if (gtp_psc_spec && gtp_psc_mask) {
1353                                 if (gtp_psc_mask->pdu_type) {
1354                                         rte_flow_error_set(error, EINVAL,
1355                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1356                                                 item,
1357                                                 "Invalid GTPU_EH mask");
1358                                         return false;
1359                                 }
1360                                 input = &outer_input_set;
1361                                 if (gtp_psc_mask->qfi)
1362                                         *input |= ICE_INSET_GTPU_QFI;
1363                                 list[t].type = ICE_GTP;
1364                                 list[t].h_u.gtp_hdr.qfi =
1365                                         gtp_psc_spec->qfi;
1366                                 list[t].m_u.gtp_hdr.qfi =
1367                                         gtp_psc_mask->qfi;
1368                                 input_set_byte += 1;
1369                                 t++;
1370                         }
1371                         gtpu_psc_valid = 1;
1372                         break;
1373
1374                 case RTE_FLOW_ITEM_TYPE_VOID:
1375                         break;
1376
1377                 default:
1378                         rte_flow_error_set(error, EINVAL,
1379                                    RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1380                                    "Invalid pattern item.");
1381                         return false;
1382                 }
1383         }
1384
1385         if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1386             inner_vlan_valid && outer_vlan_valid)
1387                 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1388         else if (*tun_type == ICE_SW_TUN_PPPOE &&
1389                  inner_vlan_valid && outer_vlan_valid)
1390                 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1391         else if (*tun_type == ICE_NON_TUN &&
1392                  inner_vlan_valid && outer_vlan_valid)
1393                 *tun_type = ICE_NON_TUN_QINQ;
1394         else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1395                  inner_vlan_valid && outer_vlan_valid)
1396                 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1397
1398         if (pppoe_patt_valid && !pppoe_prot_valid) {
1399                 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1400                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1401                 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1402                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1403                 else if (inner_vlan_valid && outer_vlan_valid)
1404                         *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1405                 else if (ipv6_valid && udp_valid)
1406                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1407                 else if (ipv6_valid && tcp_valid)
1408                         *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1409                 else if (ipv4_valid && udp_valid)
1410                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1411                 else if (ipv4_valid && tcp_valid)
1412                         *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1413                 else if (ipv6_valid)
1414                         *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1415                 else if (ipv4_valid)
1416                         *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1417                 else
1418                         *tun_type = ICE_SW_TUN_PPPOE;
1419         }
1420
1421         if (gtpu_valid && gtpu_psc_valid) {
1422                 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1423                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP;
1424                 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1425                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP;
1426                 else if (ipv4_valid && inner_ipv4_valid)
1427                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4;
1428                 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1429                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP;
1430                 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1431                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP;
1432                 else if (ipv4_valid && inner_ipv6_valid)
1433                         *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6;
1434                 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1435                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP;
1436                 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1437                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP;
1438                 else if (ipv6_valid && inner_ipv4_valid)
1439                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4;
1440                 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1441                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP;
1442                 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1443                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP;
1444                 else if (ipv6_valid && inner_ipv6_valid)
1445                         *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6;
1446                 else if (ipv4_valid)
1447                         *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1448                 else if (ipv6_valid)
1449                         *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1450         } else if (gtpu_valid) {
1451                 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1452                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_UDP;
1453                 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1454                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_TCP;
1455                 else if (ipv4_valid && inner_ipv4_valid)
1456                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1457                 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1458                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_UDP;
1459                 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1460                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_TCP;
1461                 else if (ipv4_valid && inner_ipv6_valid)
1462                         *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1463                 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1464                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_UDP;
1465                 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1466                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_TCP;
1467                 else if (ipv6_valid && inner_ipv4_valid)
1468                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1469                 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1470                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_UDP;
1471                 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1472                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_TCP;
1473                 else if (ipv6_valid && inner_ipv6_valid)
1474                         *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1475                 else if (ipv4_valid)
1476                         *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1477                 else if (ipv6_valid)
1478                         *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1479         }
1480
1481         if (*tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY ||
1482             *tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
1483                 for (k = 0; k < t; k++) {
1484                         if (list[k].type == ICE_GTP)
1485                                 list[k].type = ICE_GTP_NO_PAY;
1486                 }
1487         }
1488
1489         if (*tun_type == ICE_NON_TUN) {
1490                 if (vxlan_valid)
1491                         *tun_type = ICE_SW_TUN_VXLAN;
1492                 else if (nvgre_valid)
1493                         *tun_type = ICE_SW_TUN_NVGRE;
1494                 else if (ipv4_valid && tcp_valid)
1495                         *tun_type = ICE_SW_IPV4_TCP;
1496                 else if (ipv4_valid && udp_valid)
1497                         *tun_type = ICE_SW_IPV4_UDP;
1498                 else if (ipv6_valid && tcp_valid)
1499                         *tun_type = ICE_SW_IPV6_TCP;
1500                 else if (ipv6_valid && udp_valid)
1501                         *tun_type = ICE_SW_IPV6_UDP;
1502         }
1503
1504         if (input_set_byte > MAX_INPUT_SET_BYTE) {
1505                 rte_flow_error_set(error, EINVAL,
1506                         RTE_FLOW_ERROR_TYPE_ITEM,
1507                         item,
1508                         "too much input set");
1509                 return false;
1510         }
1511
1512         *lkups_num = t;
1513
1514 inset_check:
1515         if ((!outer_input_set && !inner_input_set &&
1516             !ice_is_prof_rule(*tun_type)) || (outer_input_set &
1517             ~pattern_match_item->input_set_mask_o) ||
1518             (inner_input_set & ~pattern_match_item->input_set_mask_i))
1519                 return false;
1520
1521         return true;
1522 }
1523
1524 static int
1525 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1526                             const struct rte_flow_action *actions,
1527                             uint32_t priority,
1528                             struct rte_flow_error *error,
1529                             struct ice_adv_rule_info *rule_info)
1530 {
1531         const struct rte_flow_action_vf *act_vf;
1532         const struct rte_flow_action *action;
1533         enum rte_flow_action_type action_type;
1534
1535         for (action = actions; action->type !=
1536                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1537                 action_type = action->type;
1538                 switch (action_type) {
1539                 case RTE_FLOW_ACTION_TYPE_VF:
1540                         rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1541                         act_vf = action->conf;
1542
1543                         if (act_vf->id >= ad->real_hw.num_vfs &&
1544                                 !act_vf->original) {
1545                                 rte_flow_error_set(error,
1546                                         EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1547                                         actions,
1548                                         "Invalid vf id");
1549                                 return -rte_errno;
1550                         }
1551
1552                         if (act_vf->original)
1553                                 rule_info->sw_act.vsi_handle =
1554                                         ad->real_hw.avf.bus.func;
1555                         else
1556                                 rule_info->sw_act.vsi_handle = act_vf->id;
1557                         break;
1558
1559                 case RTE_FLOW_ACTION_TYPE_DROP:
1560                         rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1561                         break;
1562
1563                 default:
1564                         rte_flow_error_set(error,
1565                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1566                                            actions,
1567                                            "Invalid action type");
1568                         return -rte_errno;
1569                 }
1570         }
1571
1572         rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1573         rule_info->sw_act.flag = ICE_FLTR_RX;
1574         rule_info->rx = 1;
1575         rule_info->priority = 6 - priority;
1576
1577         return 0;
1578 }
1579
1580 static int
1581 ice_switch_parse_action(struct ice_pf *pf,
1582                 const struct rte_flow_action *actions,
1583                 uint32_t priority,
1584                 struct rte_flow_error *error,
1585                 struct ice_adv_rule_info *rule_info)
1586 {
1587         struct ice_vsi *vsi = pf->main_vsi;
1588         struct rte_eth_dev_data *dev_data = pf->adapter->pf.dev_data;
1589         const struct rte_flow_action_queue *act_q;
1590         const struct rte_flow_action_rss *act_qgrop;
1591         uint16_t base_queue, i;
1592         const struct rte_flow_action *action;
1593         enum rte_flow_action_type action_type;
1594         uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1595                  2, 4, 8, 16, 32, 64, 128};
1596
1597         base_queue = pf->base_queue + vsi->base_queue;
1598         for (action = actions; action->type !=
1599                         RTE_FLOW_ACTION_TYPE_END; action++) {
1600                 action_type = action->type;
1601                 switch (action_type) {
1602                 case RTE_FLOW_ACTION_TYPE_RSS:
1603                         act_qgrop = action->conf;
1604                         if (act_qgrop->queue_num <= 1)
1605                                 goto error;
1606                         rule_info->sw_act.fltr_act =
1607                                 ICE_FWD_TO_QGRP;
1608                         rule_info->sw_act.fwd_id.q_id =
1609                                 base_queue + act_qgrop->queue[0];
1610                         for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1611                                 if (act_qgrop->queue_num ==
1612                                         valid_qgrop_number[i])
1613                                         break;
1614                         }
1615                         if (i == MAX_QGRP_NUM_TYPE)
1616                                 goto error;
1617                         if ((act_qgrop->queue[0] +
1618                                 act_qgrop->queue_num) >
1619                                 dev_data->nb_rx_queues)
1620                                 goto error1;
1621                         for (i = 0; i < act_qgrop->queue_num - 1; i++)
1622                                 if (act_qgrop->queue[i + 1] !=
1623                                         act_qgrop->queue[i] + 1)
1624                                         goto error2;
1625                         rule_info->sw_act.qgrp_size =
1626                                 act_qgrop->queue_num;
1627                         break;
1628                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1629                         act_q = action->conf;
1630                         if (act_q->index >= dev_data->nb_rx_queues)
1631                                 goto error;
1632                         rule_info->sw_act.fltr_act =
1633                                 ICE_FWD_TO_Q;
1634                         rule_info->sw_act.fwd_id.q_id =
1635                                 base_queue + act_q->index;
1636                         break;
1637
1638                 case RTE_FLOW_ACTION_TYPE_DROP:
1639                         rule_info->sw_act.fltr_act =
1640                                 ICE_DROP_PACKET;
1641                         break;
1642
1643                 case RTE_FLOW_ACTION_TYPE_VOID:
1644                         break;
1645
1646                 default:
1647                         goto error;
1648                 }
1649         }
1650
1651         rule_info->sw_act.vsi_handle = vsi->idx;
1652         rule_info->rx = 1;
1653         rule_info->sw_act.src = vsi->idx;
1654         rule_info->priority = priority + 5;
1655
1656         return 0;
1657
1658 error:
1659         rte_flow_error_set(error,
1660                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1661                 actions,
1662                 "Invalid action type or queue number");
1663         return -rte_errno;
1664
1665 error1:
1666         rte_flow_error_set(error,
1667                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1668                 actions,
1669                 "Invalid queue region indexes");
1670         return -rte_errno;
1671
1672 error2:
1673         rte_flow_error_set(error,
1674                 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1675                 actions,
1676                 "Discontinuous queue region");
1677         return -rte_errno;
1678 }
1679
1680 static int
1681 ice_switch_check_action(const struct rte_flow_action *actions,
1682                             struct rte_flow_error *error)
1683 {
1684         const struct rte_flow_action *action;
1685         enum rte_flow_action_type action_type;
1686         uint16_t actions_num = 0;
1687
1688         for (action = actions; action->type !=
1689                                 RTE_FLOW_ACTION_TYPE_END; action++) {
1690                 action_type = action->type;
1691                 switch (action_type) {
1692                 case RTE_FLOW_ACTION_TYPE_VF:
1693                 case RTE_FLOW_ACTION_TYPE_RSS:
1694                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1695                 case RTE_FLOW_ACTION_TYPE_DROP:
1696                         actions_num++;
1697                         break;
1698                 case RTE_FLOW_ACTION_TYPE_VOID:
1699                         continue;
1700                 default:
1701                         rte_flow_error_set(error,
1702                                            EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1703                                            actions,
1704                                            "Invalid action type");
1705                         return -rte_errno;
1706                 }
1707         }
1708
1709         if (actions_num != 1) {
1710                 rte_flow_error_set(error,
1711                                    EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1712                                    actions,
1713                                    "Invalid action number");
1714                 return -rte_errno;
1715         }
1716
1717         return 0;
1718 }
1719
1720 static int
1721 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1722                 struct ice_pattern_match_item *array,
1723                 uint32_t array_len,
1724                 const struct rte_flow_item pattern[],
1725                 const struct rte_flow_action actions[],
1726                 uint32_t priority,
1727                 void **meta,
1728                 struct rte_flow_error *error)
1729 {
1730         struct ice_pf *pf = &ad->pf;
1731         int ret = 0;
1732         struct sw_meta *sw_meta_ptr = NULL;
1733         struct ice_adv_rule_info rule_info;
1734         struct ice_adv_lkup_elem *list = NULL;
1735         uint16_t lkups_num = 0;
1736         const struct rte_flow_item *item = pattern;
1737         uint16_t item_num = 0;
1738         uint16_t vlan_num = 0;
1739         enum ice_sw_tunnel_type tun_type =
1740                         ICE_NON_TUN;
1741         struct ice_pattern_match_item *pattern_match_item = NULL;
1742
1743         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1744                 item_num++;
1745                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1746                         const struct rte_flow_item_eth *eth_mask;
1747                         if (item->mask)
1748                                 eth_mask = item->mask;
1749                         else
1750                                 continue;
1751                         if (eth_mask->type == UINT16_MAX)
1752                                 tun_type = ICE_SW_TUN_AND_NON_TUN;
1753                 }
1754
1755                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1756                         vlan_num++;
1757
1758                 /* reserve one more memory slot for ETH which may
1759                  * consume 2 lookup items.
1760                  */
1761                 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1762                         item_num++;
1763         }
1764
1765         if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1766                 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1767         else if (vlan_num == 2)
1768                 tun_type = ICE_NON_TUN_QINQ;
1769
1770         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1771         if (!list) {
1772                 rte_flow_error_set(error, EINVAL,
1773                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1774                                    "No memory for PMD internal items");
1775                 return -rte_errno;
1776         }
1777
1778         sw_meta_ptr =
1779                 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1780         if (!sw_meta_ptr) {
1781                 rte_flow_error_set(error, EINVAL,
1782                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1783                                    "No memory for sw_pattern_meta_ptr");
1784                 goto error;
1785         }
1786
1787         pattern_match_item =
1788                 ice_search_pattern_match_item(ad, pattern, array, array_len,
1789                                               error);
1790         if (!pattern_match_item) {
1791                 rte_flow_error_set(error, EINVAL,
1792                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1793                                    "Invalid input pattern");
1794                 goto error;
1795         }
1796
1797         if (!ice_switch_parse_pattern(pattern, error, list, &lkups_num,
1798                                    &tun_type, pattern_match_item)) {
1799                 rte_flow_error_set(error, EINVAL,
1800                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1801                                    pattern,
1802                                    "Invalid input set");
1803                 goto error;
1804         }
1805
1806         memset(&rule_info, 0, sizeof(rule_info));
1807         rule_info.tun_type = tun_type;
1808
1809         ret = ice_switch_check_action(actions, error);
1810         if (ret)
1811                 goto error;
1812
1813         if (ad->hw.dcf_enabled)
1814                 ret = ice_switch_parse_dcf_action((void *)ad, actions, priority,
1815                                                   error, &rule_info);
1816         else
1817                 ret = ice_switch_parse_action(pf, actions, priority, error,
1818                                               &rule_info);
1819
1820         if (ret)
1821                 goto error;
1822
1823         if (meta) {
1824                 *meta = sw_meta_ptr;
1825                 ((struct sw_meta *)*meta)->list = list;
1826                 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1827                 ((struct sw_meta *)*meta)->rule_info = rule_info;
1828         } else {
1829                 rte_free(list);
1830                 rte_free(sw_meta_ptr);
1831         }
1832
1833         rte_free(pattern_match_item);
1834
1835         return 0;
1836
1837 error:
1838         rte_free(list);
1839         rte_free(sw_meta_ptr);
1840         rte_free(pattern_match_item);
1841
1842         return -rte_errno;
1843 }
1844
1845 static int
1846 ice_switch_query(struct ice_adapter *ad __rte_unused,
1847                 struct rte_flow *flow __rte_unused,
1848                 struct rte_flow_query_count *count __rte_unused,
1849                 struct rte_flow_error *error)
1850 {
1851         rte_flow_error_set(error, EINVAL,
1852                 RTE_FLOW_ERROR_TYPE_HANDLE,
1853                 NULL,
1854                 "count action not supported by switch filter");
1855
1856         return -rte_errno;
1857 }
1858
1859 static int
1860 ice_switch_redirect(struct ice_adapter *ad,
1861                     struct rte_flow *flow,
1862                     struct ice_flow_redirect *rd)
1863 {
1864         struct ice_rule_query_data *rdata = flow->rule;
1865         struct ice_adv_fltr_mgmt_list_entry *list_itr;
1866         struct ice_adv_lkup_elem *lkups_dp = NULL;
1867         struct LIST_HEAD_TYPE *list_head;
1868         struct ice_adv_rule_info rinfo;
1869         struct ice_hw *hw = &ad->hw;
1870         struct ice_switch_info *sw;
1871         uint16_t lkups_cnt;
1872         int ret;
1873
1874         if (rdata->vsi_handle != rd->vsi_handle)
1875                 return 0;
1876
1877         sw = hw->switch_info;
1878         if (!sw->recp_list[rdata->rid].recp_created)
1879                 return -EINVAL;
1880
1881         if (rd->type != ICE_FLOW_REDIRECT_VSI)
1882                 return -ENOTSUP;
1883
1884         list_head = &sw->recp_list[rdata->rid].filt_rules;
1885         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1886                             list_entry) {
1887                 rinfo = list_itr->rule_info;
1888                 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1889                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1890                     rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1891                     (rinfo.fltr_rule_id == rdata->rule_id &&
1892                     rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1893                         lkups_cnt = list_itr->lkups_cnt;
1894                         lkups_dp = (struct ice_adv_lkup_elem *)
1895                                 ice_memdup(hw, list_itr->lkups,
1896                                            sizeof(*list_itr->lkups) *
1897                                            lkups_cnt, ICE_NONDMA_TO_NONDMA);
1898
1899                         if (!lkups_dp) {
1900                                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1901                                 return -EINVAL;
1902                         }
1903
1904                         if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
1905                                 rinfo.sw_act.vsi_handle = rd->vsi_handle;
1906                                 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
1907                         }
1908                         break;
1909                 }
1910         }
1911
1912         if (!lkups_dp)
1913                 return -EINVAL;
1914
1915         /* Remove the old rule */
1916         ret = ice_rem_adv_rule(hw, list_itr->lkups,
1917                                lkups_cnt, &rinfo);
1918         if (ret) {
1919                 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
1920                             rdata->rule_id);
1921                 ret = -EINVAL;
1922                 goto out;
1923         }
1924
1925         /* Update VSI context */
1926         hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
1927
1928         /* Replay the rule */
1929         ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
1930                                &rinfo, rdata);
1931         if (ret) {
1932                 PMD_DRV_LOG(ERR, "Failed to replay the rule");
1933                 ret = -EINVAL;
1934         }
1935
1936 out:
1937         ice_free(hw, lkups_dp);
1938         return ret;
1939 }
1940
1941 static int
1942 ice_switch_init(struct ice_adapter *ad)
1943 {
1944         int ret = 0;
1945         struct ice_flow_parser *dist_parser;
1946         struct ice_flow_parser *perm_parser;
1947
1948         if (ad->devargs.pipe_mode_support) {
1949                 perm_parser = &ice_switch_perm_parser;
1950                 ret = ice_register_parser(perm_parser, ad);
1951         } else {
1952                 dist_parser = &ice_switch_dist_parser;
1953                 ret = ice_register_parser(dist_parser, ad);
1954         }
1955         return ret;
1956 }
1957
1958 static void
1959 ice_switch_uninit(struct ice_adapter *ad)
1960 {
1961         struct ice_flow_parser *dist_parser;
1962         struct ice_flow_parser *perm_parser;
1963
1964         if (ad->devargs.pipe_mode_support) {
1965                 perm_parser = &ice_switch_perm_parser;
1966                 ice_unregister_parser(perm_parser, ad);
1967         } else {
1968                 dist_parser = &ice_switch_dist_parser;
1969                 ice_unregister_parser(dist_parser, ad);
1970         }
1971 }
1972
1973 static struct
1974 ice_flow_engine ice_switch_engine = {
1975         .init = ice_switch_init,
1976         .uninit = ice_switch_uninit,
1977         .create = ice_switch_create,
1978         .destroy = ice_switch_destroy,
1979         .query_count = ice_switch_query,
1980         .redirect = ice_switch_redirect,
1981         .free = ice_switch_filter_rule_free,
1982         .type = ICE_FLOW_ENGINE_SWITCH,
1983 };
1984
1985 static struct
1986 ice_flow_parser ice_switch_dist_parser = {
1987         .engine = &ice_switch_engine,
1988         .array = ice_switch_pattern_dist_list,
1989         .array_len = RTE_DIM(ice_switch_pattern_dist_list),
1990         .parse_pattern_action = ice_switch_parse_pattern_action,
1991         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1992 };
1993
1994 static struct
1995 ice_flow_parser ice_switch_perm_parser = {
1996         .engine = &ice_switch_engine,
1997         .array = ice_switch_pattern_perm_list,
1998         .array_len = RTE_DIM(ice_switch_pattern_perm_list),
1999         .parse_pattern_action = ice_switch_parse_pattern_action,
2000         .stage = ICE_FLOW_STAGE_PERMISSION,
2001 };
2002
2003 RTE_INIT(ice_sw_engine_init)
2004 {
2005         struct ice_flow_engine *engine = &ice_switch_engine;
2006         ice_register_flow_engine(engine);
2007 }