1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 #define MAX_QGRP_NUM_TYPE 7
30 #define MAX_INPUT_SET_BYTE 32
31 #define ICE_PPP_IPV4_PROTO 0x0021
32 #define ICE_PPP_IPV6_PROTO 0x0057
33 #define ICE_IPV4_PROTO_NVGRE 0x002F
34 #define ICE_SW_PRI_BASE 6
36 #define ICE_SW_INSET_ETHER ( \
37 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
38 #define ICE_SW_INSET_MAC_VLAN ( \
39 ICE_SW_INSET_ETHER | ICE_INSET_VLAN_INNER)
40 #define ICE_SW_INSET_MAC_QINQ ( \
41 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
43 #define ICE_SW_INSET_MAC_IPV4 ( \
44 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
46 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
47 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
48 #define ICE_SW_INSET_MAC_QINQ_IPV4_TCP ( \
49 ICE_SW_INSET_MAC_QINQ_IPV4 | \
50 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
51 #define ICE_SW_INSET_MAC_QINQ_IPV4_UDP ( \
52 ICE_SW_INSET_MAC_QINQ_IPV4 | \
53 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
54 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
55 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
56 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
57 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
58 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
59 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
60 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
61 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
62 #define ICE_SW_INSET_MAC_IPV6 ( \
63 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
64 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
65 ICE_INSET_IPV6_NEXT_HDR)
66 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
67 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
68 #define ICE_SW_INSET_MAC_QINQ_IPV6_TCP ( \
69 ICE_SW_INSET_MAC_QINQ_IPV6 | \
70 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
71 #define ICE_SW_INSET_MAC_QINQ_IPV6_UDP ( \
72 ICE_SW_INSET_MAC_QINQ_IPV6 | \
73 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
74 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
75 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
76 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
77 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
78 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
79 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
80 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
81 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
82 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
83 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
85 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
86 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \
88 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
89 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
90 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
91 ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
92 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
93 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
94 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
95 ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI)
96 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
97 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
98 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
99 ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
100 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
101 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
102 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
103 ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI)
104 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
105 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
106 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
107 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
108 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
109 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \
111 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
112 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
113 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \
115 #define ICE_SW_INSET_MAC_PPPOE ( \
116 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
117 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
118 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
119 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
120 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
121 ICE_INSET_PPPOE_PROTO)
122 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
123 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
124 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
125 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
126 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
127 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
128 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
129 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
130 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
131 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
132 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
133 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
134 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
135 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
136 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
137 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
138 #define ICE_SW_INSET_MAC_IPV4_AH ( \
139 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
140 #define ICE_SW_INSET_MAC_IPV6_AH ( \
141 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
142 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
143 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
144 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
145 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
146 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
147 ICE_SW_INSET_MAC_IPV4 | \
148 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
149 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
150 ICE_SW_INSET_MAC_IPV6 | \
151 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
152 #define ICE_SW_INSET_MAC_IPV4_GTPU ( \
153 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID)
154 #define ICE_SW_INSET_MAC_IPV6_GTPU ( \
155 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID)
156 #define ICE_SW_INSET_MAC_GTPU_OUTER ( \
157 ICE_INSET_DMAC | ICE_INSET_GTPU_TEID)
158 #define ICE_SW_INSET_MAC_GTPU_EH_OUTER ( \
159 ICE_SW_INSET_MAC_GTPU_OUTER | ICE_INSET_GTPU_QFI)
160 #define ICE_SW_INSET_GTPU_IPV4 ( \
161 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
162 #define ICE_SW_INSET_GTPU_IPV6 ( \
163 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST)
164 #define ICE_SW_INSET_GTPU_IPV4_UDP ( \
165 ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_UDP_SRC_PORT | \
166 ICE_INSET_UDP_DST_PORT)
167 #define ICE_SW_INSET_GTPU_IPV4_TCP ( \
168 ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_TCP_SRC_PORT | \
169 ICE_INSET_TCP_DST_PORT)
170 #define ICE_SW_INSET_GTPU_IPV6_UDP ( \
171 ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_UDP_SRC_PORT | \
172 ICE_INSET_UDP_DST_PORT)
173 #define ICE_SW_INSET_GTPU_IPV6_TCP ( \
174 ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_TCP_SRC_PORT | \
175 ICE_INSET_TCP_DST_PORT)
178 struct ice_adv_lkup_elem *list;
180 struct ice_adv_rule_info rule_info;
183 enum ice_sw_fltr_status {
185 ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT,
186 ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT,
189 struct ice_switch_filter_conf {
190 enum ice_sw_fltr_status fltr_status;
192 struct ice_rule_query_data sw_query_data;
195 * The lookup elements and rule info are saved here when filter creation
200 struct ice_adv_lkup_elem *lkups;
201 struct ice_adv_rule_info rule_info;
204 static struct ice_flow_parser ice_switch_dist_parser;
205 static struct ice_flow_parser ice_switch_perm_parser;
208 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
209 {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE},
210 {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE},
211 {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE},
212 {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
213 {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
214 {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
215 {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
216 {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
217 {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
218 {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
219 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE},
220 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE},
221 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE},
222 {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE},
223 {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE},
224 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE},
225 {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
226 {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
227 {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
228 {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
229 {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
230 {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
231 {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
232 {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
233 {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
234 {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
235 {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
236 {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
237 {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
238 {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
239 {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
240 {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
241 {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
242 {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
243 {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
244 {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
245 {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE},
246 {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE},
247 {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
248 {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
249 {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
250 {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
251 {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
252 {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
253 {pattern_eth_qinq_ipv4_tcp, ICE_SW_INSET_MAC_QINQ_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
254 {pattern_eth_qinq_ipv4_udp, ICE_SW_INSET_MAC_QINQ_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
255 {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
256 {pattern_eth_qinq_ipv6_tcp, ICE_SW_INSET_MAC_QINQ_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
257 {pattern_eth_qinq_ipv6_udp, ICE_SW_INSET_MAC_QINQ_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
258 {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
259 {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
260 {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
261 {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
262 {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
263 {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
264 {pattern_eth_ipv4_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
265 {pattern_eth_ipv4_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
266 {pattern_eth_ipv4_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
267 {pattern_eth_ipv4_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
268 {pattern_eth_ipv4_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
269 {pattern_eth_ipv4_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
270 {pattern_eth_ipv4_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
271 {pattern_eth_ipv4_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
272 {pattern_eth_ipv4_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
273 {pattern_eth_ipv4_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
274 {pattern_eth_ipv4_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
275 {pattern_eth_ipv4_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
276 {pattern_eth_ipv6_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
277 {pattern_eth_ipv6_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
278 {pattern_eth_ipv6_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
279 {pattern_eth_ipv6_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
280 {pattern_eth_ipv6_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
281 {pattern_eth_ipv6_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
282 {pattern_eth_ipv6_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
283 {pattern_eth_ipv6_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
284 {pattern_eth_ipv6_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
285 {pattern_eth_ipv6_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
286 {pattern_eth_ipv6_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
287 {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
291 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
292 {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE},
293 {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE},
294 {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE},
295 {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
296 {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
297 {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
298 {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
299 {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
300 {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
301 {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
302 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
303 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
304 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
305 {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE},
306 {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
307 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
308 {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
309 {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
310 {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
311 {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
312 {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
313 {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
314 {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
315 {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
316 {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
317 {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
318 {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
319 {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
320 {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
321 {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
322 {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
323 {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
324 {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
325 {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
326 {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
327 {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
328 {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE},
329 {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE},
330 {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
331 {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
332 {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
333 {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
334 {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
335 {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
336 {pattern_eth_qinq_ipv4_tcp, ICE_SW_INSET_MAC_QINQ_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
337 {pattern_eth_qinq_ipv4_udp, ICE_SW_INSET_MAC_QINQ_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
338 {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
339 {pattern_eth_qinq_ipv6_tcp, ICE_SW_INSET_MAC_QINQ_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
340 {pattern_eth_qinq_ipv6_udp, ICE_SW_INSET_MAC_QINQ_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
341 {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
342 {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
343 {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
344 {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
345 {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
346 {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
347 {pattern_eth_ipv4_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
348 {pattern_eth_ipv4_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
349 {pattern_eth_ipv4_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
350 {pattern_eth_ipv4_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
351 {pattern_eth_ipv4_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
352 {pattern_eth_ipv4_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
353 {pattern_eth_ipv4_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
354 {pattern_eth_ipv4_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
355 {pattern_eth_ipv4_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
356 {pattern_eth_ipv4_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
357 {pattern_eth_ipv4_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
358 {pattern_eth_ipv4_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
359 {pattern_eth_ipv6_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
360 {pattern_eth_ipv6_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE},
361 {pattern_eth_ipv6_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
362 {pattern_eth_ipv6_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE},
363 {pattern_eth_ipv6_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
364 {pattern_eth_ipv6_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE},
365 {pattern_eth_ipv6_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
366 {pattern_eth_ipv6_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE},
367 {pattern_eth_ipv6_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
368 {pattern_eth_ipv6_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE},
369 {pattern_eth_ipv6_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
370 {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE},
374 ice_switch_create(struct ice_adapter *ad,
375 struct rte_flow *flow,
377 struct rte_flow_error *error)
380 struct ice_pf *pf = &ad->pf;
381 struct ice_hw *hw = ICE_PF_TO_HW(pf);
382 struct ice_rule_query_data rule_added = {0};
383 struct ice_switch_filter_conf *filter_conf_ptr;
384 struct ice_adv_lkup_elem *list =
385 ((struct sw_meta *)meta)->list;
387 ((struct sw_meta *)meta)->lkups_num;
388 struct ice_adv_rule_info *rule_info =
389 &((struct sw_meta *)meta)->rule_info;
391 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
392 rte_flow_error_set(error, EINVAL,
393 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
394 "item number too large for rule");
398 rte_flow_error_set(error, EINVAL,
399 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
400 "lookup list should not be NULL");
404 if (ice_dcf_adminq_need_retry(ad)) {
405 rte_flow_error_set(error, EAGAIN,
406 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
411 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
413 filter_conf_ptr = rte_zmalloc("ice_switch_filter",
414 sizeof(struct ice_switch_filter_conf), 0);
415 if (!filter_conf_ptr) {
416 rte_flow_error_set(error, EINVAL,
417 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
418 "No memory for ice_switch_filter");
422 filter_conf_ptr->sw_query_data = rule_added;
424 filter_conf_ptr->vsi_num =
425 ice_get_hw_vsi_num(hw, rule_info->sw_act.vsi_handle);
426 filter_conf_ptr->lkups = list;
427 filter_conf_ptr->lkups_num = lkups_cnt;
428 filter_conf_ptr->rule_info = *rule_info;
430 filter_conf_ptr->fltr_status = ICE_SW_FLTR_ADDED;
432 flow->rule = filter_conf_ptr;
434 if (ice_dcf_adminq_need_retry(ad))
439 rte_flow_error_set(error, -ret,
440 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
441 "switch filter create flow fail");
456 ice_switch_filter_rule_free(struct rte_flow *flow)
458 struct ice_switch_filter_conf *filter_conf_ptr =
459 (struct ice_switch_filter_conf *)flow->rule;
462 rte_free(filter_conf_ptr->lkups);
464 rte_free(filter_conf_ptr);
468 ice_switch_destroy(struct ice_adapter *ad,
469 struct rte_flow *flow,
470 struct rte_flow_error *error)
472 struct ice_hw *hw = &ad->hw;
474 struct ice_switch_filter_conf *filter_conf_ptr;
476 filter_conf_ptr = (struct ice_switch_filter_conf *)
479 if (!filter_conf_ptr ||
480 filter_conf_ptr->fltr_status == ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT) {
481 rte_flow_error_set(error, EINVAL,
482 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
484 " create by switch filter");
486 ice_switch_filter_rule_free(flow);
491 if (ice_dcf_adminq_need_retry(ad)) {
492 rte_flow_error_set(error, EAGAIN,
493 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
498 ret = ice_rem_adv_rule_by_id(hw, &filter_conf_ptr->sw_query_data);
500 if (ice_dcf_adminq_need_retry(ad))
505 rte_flow_error_set(error, -ret,
506 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
507 "fail to destroy switch filter rule");
511 ice_switch_filter_rule_free(flow);
516 ice_switch_parse_pattern(const struct rte_flow_item pattern[],
517 struct rte_flow_error *error,
518 struct ice_adv_lkup_elem *list,
520 enum ice_sw_tunnel_type *tun_type,
521 const struct ice_pattern_match_item *pattern_match_item)
523 const struct rte_flow_item *item = pattern;
524 enum rte_flow_item_type item_type;
525 const struct rte_flow_item_eth *eth_spec, *eth_mask;
526 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
527 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
528 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
529 const struct rte_flow_item_udp *udp_spec, *udp_mask;
530 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
531 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
532 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
533 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
534 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
535 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
537 const struct rte_flow_item_esp *esp_spec, *esp_mask;
538 const struct rte_flow_item_ah *ah_spec, *ah_mask;
539 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
540 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
541 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
542 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
543 uint64_t outer_input_set = ICE_INSET_NONE;
544 uint64_t inner_input_set = ICE_INSET_NONE;
545 uint64_t *input = NULL;
546 uint16_t input_set_byte = 0;
547 bool pppoe_elem_valid = 0;
548 bool pppoe_patt_valid = 0;
549 bool pppoe_prot_valid = 0;
550 bool inner_vlan_valid = 0;
551 bool outer_vlan_valid = 0;
552 bool tunnel_valid = 0;
553 bool profile_rule = 0;
554 bool nvgre_valid = 0;
555 bool vxlan_valid = 0;
562 bool gtpu_psc_valid = 0;
563 bool inner_ipv4_valid = 0;
564 bool inner_ipv6_valid = 0;
565 bool inner_tcp_valid = 0;
566 bool inner_udp_valid = 0;
567 uint16_t j, k, t = 0;
569 if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
570 *tun_type == ICE_NON_TUN_QINQ)
573 for (item = pattern; item->type !=
574 RTE_FLOW_ITEM_TYPE_END; item++) {
576 rte_flow_error_set(error, EINVAL,
577 RTE_FLOW_ERROR_TYPE_ITEM,
579 "Not support range");
582 item_type = item->type;
585 case RTE_FLOW_ITEM_TYPE_ETH:
586 eth_spec = item->spec;
587 eth_mask = item->mask;
588 if (eth_spec && eth_mask) {
589 const uint8_t *a = eth_mask->src.addr_bytes;
590 const uint8_t *b = eth_mask->dst.addr_bytes;
592 input = &inner_input_set;
594 input = &outer_input_set;
595 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
597 *input |= ICE_INSET_SMAC;
601 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
603 *input |= ICE_INSET_DMAC;
608 *input |= ICE_INSET_ETHERTYPE;
609 list[t].type = (tunnel_valid == 0) ?
610 ICE_MAC_OFOS : ICE_MAC_IL;
611 struct ice_ether_hdr *h;
612 struct ice_ether_hdr *m;
614 h = &list[t].h_u.eth_hdr;
615 m = &list[t].m_u.eth_hdr;
616 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
617 if (eth_mask->src.addr_bytes[j]) {
619 eth_spec->src.addr_bytes[j];
621 eth_mask->src.addr_bytes[j];
625 if (eth_mask->dst.addr_bytes[j]) {
627 eth_spec->dst.addr_bytes[j];
629 eth_mask->dst.addr_bytes[j];
636 if (eth_mask->type) {
637 list[t].type = ICE_ETYPE_OL;
638 list[t].h_u.ethertype.ethtype_id =
640 list[t].m_u.ethertype.ethtype_id =
648 case RTE_FLOW_ITEM_TYPE_IPV4:
649 ipv4_spec = item->spec;
650 ipv4_mask = item->mask;
652 inner_ipv4_valid = 1;
653 input = &inner_input_set;
656 input = &outer_input_set;
659 if (ipv4_spec && ipv4_mask) {
660 /* Check IPv4 mask and update input set */
661 if (ipv4_mask->hdr.version_ihl ||
662 ipv4_mask->hdr.total_length ||
663 ipv4_mask->hdr.packet_id ||
664 ipv4_mask->hdr.hdr_checksum) {
665 rte_flow_error_set(error, EINVAL,
666 RTE_FLOW_ERROR_TYPE_ITEM,
668 "Invalid IPv4 mask.");
672 if (ipv4_mask->hdr.src_addr)
673 *input |= ICE_INSET_IPV4_SRC;
674 if (ipv4_mask->hdr.dst_addr)
675 *input |= ICE_INSET_IPV4_DST;
676 if (ipv4_mask->hdr.time_to_live)
677 *input |= ICE_INSET_IPV4_TTL;
678 if (ipv4_mask->hdr.next_proto_id)
679 *input |= ICE_INSET_IPV4_PROTO;
680 if (ipv4_mask->hdr.type_of_service)
681 *input |= ICE_INSET_IPV4_TOS;
683 list[t].type = (tunnel_valid == 0) ?
684 ICE_IPV4_OFOS : ICE_IPV4_IL;
685 if (ipv4_mask->hdr.src_addr) {
686 list[t].h_u.ipv4_hdr.src_addr =
687 ipv4_spec->hdr.src_addr;
688 list[t].m_u.ipv4_hdr.src_addr =
689 ipv4_mask->hdr.src_addr;
692 if (ipv4_mask->hdr.dst_addr) {
693 list[t].h_u.ipv4_hdr.dst_addr =
694 ipv4_spec->hdr.dst_addr;
695 list[t].m_u.ipv4_hdr.dst_addr =
696 ipv4_mask->hdr.dst_addr;
699 if (ipv4_mask->hdr.time_to_live) {
700 list[t].h_u.ipv4_hdr.time_to_live =
701 ipv4_spec->hdr.time_to_live;
702 list[t].m_u.ipv4_hdr.time_to_live =
703 ipv4_mask->hdr.time_to_live;
706 if (ipv4_mask->hdr.next_proto_id) {
707 list[t].h_u.ipv4_hdr.protocol =
708 ipv4_spec->hdr.next_proto_id;
709 list[t].m_u.ipv4_hdr.protocol =
710 ipv4_mask->hdr.next_proto_id;
713 if ((ipv4_spec->hdr.next_proto_id &
714 ipv4_mask->hdr.next_proto_id) ==
715 ICE_IPV4_PROTO_NVGRE)
716 *tun_type = ICE_SW_TUN_AND_NON_TUN;
717 if (ipv4_mask->hdr.type_of_service) {
718 list[t].h_u.ipv4_hdr.tos =
719 ipv4_spec->hdr.type_of_service;
720 list[t].m_u.ipv4_hdr.tos =
721 ipv4_mask->hdr.type_of_service;
728 case RTE_FLOW_ITEM_TYPE_IPV6:
729 ipv6_spec = item->spec;
730 ipv6_mask = item->mask;
732 inner_ipv6_valid = 1;
733 input = &inner_input_set;
736 input = &outer_input_set;
739 if (ipv6_spec && ipv6_mask) {
740 if (ipv6_mask->hdr.payload_len) {
741 rte_flow_error_set(error, EINVAL,
742 RTE_FLOW_ERROR_TYPE_ITEM,
744 "Invalid IPv6 mask");
748 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
749 if (ipv6_mask->hdr.src_addr[j]) {
750 *input |= ICE_INSET_IPV6_SRC;
754 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
755 if (ipv6_mask->hdr.dst_addr[j]) {
756 *input |= ICE_INSET_IPV6_DST;
760 if (ipv6_mask->hdr.proto)
761 *input |= ICE_INSET_IPV6_NEXT_HDR;
762 if (ipv6_mask->hdr.hop_limits)
763 *input |= ICE_INSET_IPV6_HOP_LIMIT;
764 if (ipv6_mask->hdr.vtc_flow &
765 rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
766 *input |= ICE_INSET_IPV6_TC;
768 list[t].type = (tunnel_valid == 0) ?
769 ICE_IPV6_OFOS : ICE_IPV6_IL;
770 struct ice_ipv6_hdr *f;
771 struct ice_ipv6_hdr *s;
772 f = &list[t].h_u.ipv6_hdr;
773 s = &list[t].m_u.ipv6_hdr;
774 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
775 if (ipv6_mask->hdr.src_addr[j]) {
777 ipv6_spec->hdr.src_addr[j];
779 ipv6_mask->hdr.src_addr[j];
782 if (ipv6_mask->hdr.dst_addr[j]) {
784 ipv6_spec->hdr.dst_addr[j];
786 ipv6_mask->hdr.dst_addr[j];
790 if (ipv6_mask->hdr.proto) {
792 ipv6_spec->hdr.proto;
794 ipv6_mask->hdr.proto;
797 if (ipv6_mask->hdr.hop_limits) {
799 ipv6_spec->hdr.hop_limits;
801 ipv6_mask->hdr.hop_limits;
804 if (ipv6_mask->hdr.vtc_flow &
806 (RTE_IPV6_HDR_TC_MASK)) {
807 struct ice_le_ver_tc_flow vtf;
808 vtf.u.fld.version = 0;
809 vtf.u.fld.flow_label = 0;
810 vtf.u.fld.tc = (rte_be_to_cpu_32
811 (ipv6_spec->hdr.vtc_flow) &
812 RTE_IPV6_HDR_TC_MASK) >>
813 RTE_IPV6_HDR_TC_SHIFT;
814 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
815 vtf.u.fld.tc = (rte_be_to_cpu_32
816 (ipv6_mask->hdr.vtc_flow) &
817 RTE_IPV6_HDR_TC_MASK) >>
818 RTE_IPV6_HDR_TC_SHIFT;
819 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
826 case RTE_FLOW_ITEM_TYPE_UDP:
827 udp_spec = item->spec;
828 udp_mask = item->mask;
831 input = &inner_input_set;
834 input = &outer_input_set;
837 if (udp_spec && udp_mask) {
838 /* Check UDP mask and update input set*/
839 if (udp_mask->hdr.dgram_len ||
840 udp_mask->hdr.dgram_cksum) {
841 rte_flow_error_set(error, EINVAL,
842 RTE_FLOW_ERROR_TYPE_ITEM,
848 if (udp_mask->hdr.src_port)
849 *input |= ICE_INSET_UDP_SRC_PORT;
850 if (udp_mask->hdr.dst_port)
851 *input |= ICE_INSET_UDP_DST_PORT;
853 if (*tun_type == ICE_SW_TUN_VXLAN &&
855 list[t].type = ICE_UDP_OF;
857 list[t].type = ICE_UDP_ILOS;
858 if (udp_mask->hdr.src_port) {
859 list[t].h_u.l4_hdr.src_port =
860 udp_spec->hdr.src_port;
861 list[t].m_u.l4_hdr.src_port =
862 udp_mask->hdr.src_port;
865 if (udp_mask->hdr.dst_port) {
866 list[t].h_u.l4_hdr.dst_port =
867 udp_spec->hdr.dst_port;
868 list[t].m_u.l4_hdr.dst_port =
869 udp_mask->hdr.dst_port;
876 case RTE_FLOW_ITEM_TYPE_TCP:
877 tcp_spec = item->spec;
878 tcp_mask = item->mask;
881 input = &inner_input_set;
884 input = &outer_input_set;
887 if (tcp_spec && tcp_mask) {
888 /* Check TCP mask and update input set */
889 if (tcp_mask->hdr.sent_seq ||
890 tcp_mask->hdr.recv_ack ||
891 tcp_mask->hdr.data_off ||
892 tcp_mask->hdr.tcp_flags ||
893 tcp_mask->hdr.rx_win ||
894 tcp_mask->hdr.cksum ||
895 tcp_mask->hdr.tcp_urp) {
896 rte_flow_error_set(error, EINVAL,
897 RTE_FLOW_ERROR_TYPE_ITEM,
903 if (tcp_mask->hdr.src_port)
904 *input |= ICE_INSET_TCP_SRC_PORT;
905 if (tcp_mask->hdr.dst_port)
906 *input |= ICE_INSET_TCP_DST_PORT;
907 list[t].type = ICE_TCP_IL;
908 if (tcp_mask->hdr.src_port) {
909 list[t].h_u.l4_hdr.src_port =
910 tcp_spec->hdr.src_port;
911 list[t].m_u.l4_hdr.src_port =
912 tcp_mask->hdr.src_port;
915 if (tcp_mask->hdr.dst_port) {
916 list[t].h_u.l4_hdr.dst_port =
917 tcp_spec->hdr.dst_port;
918 list[t].m_u.l4_hdr.dst_port =
919 tcp_mask->hdr.dst_port;
926 case RTE_FLOW_ITEM_TYPE_SCTP:
927 sctp_spec = item->spec;
928 sctp_mask = item->mask;
929 if (sctp_spec && sctp_mask) {
930 /* Check SCTP mask and update input set */
931 if (sctp_mask->hdr.cksum) {
932 rte_flow_error_set(error, EINVAL,
933 RTE_FLOW_ERROR_TYPE_ITEM,
935 "Invalid SCTP mask");
939 input = &inner_input_set;
941 input = &outer_input_set;
943 if (sctp_mask->hdr.src_port)
944 *input |= ICE_INSET_SCTP_SRC_PORT;
945 if (sctp_mask->hdr.dst_port)
946 *input |= ICE_INSET_SCTP_DST_PORT;
948 list[t].type = ICE_SCTP_IL;
949 if (sctp_mask->hdr.src_port) {
950 list[t].h_u.sctp_hdr.src_port =
951 sctp_spec->hdr.src_port;
952 list[t].m_u.sctp_hdr.src_port =
953 sctp_mask->hdr.src_port;
956 if (sctp_mask->hdr.dst_port) {
957 list[t].h_u.sctp_hdr.dst_port =
958 sctp_spec->hdr.dst_port;
959 list[t].m_u.sctp_hdr.dst_port =
960 sctp_mask->hdr.dst_port;
967 case RTE_FLOW_ITEM_TYPE_VXLAN:
968 vxlan_spec = item->spec;
969 vxlan_mask = item->mask;
970 /* Check if VXLAN item is used to describe protocol.
971 * If yes, both spec and mask should be NULL.
972 * If no, both spec and mask shouldn't be NULL.
974 if ((!vxlan_spec && vxlan_mask) ||
975 (vxlan_spec && !vxlan_mask)) {
976 rte_flow_error_set(error, EINVAL,
977 RTE_FLOW_ERROR_TYPE_ITEM,
979 "Invalid VXLAN item");
984 input = &inner_input_set;
985 if (vxlan_spec && vxlan_mask) {
986 list[t].type = ICE_VXLAN;
987 if (vxlan_mask->vni[0] ||
988 vxlan_mask->vni[1] ||
989 vxlan_mask->vni[2]) {
990 list[t].h_u.tnl_hdr.vni =
991 (vxlan_spec->vni[2] << 16) |
992 (vxlan_spec->vni[1] << 8) |
994 list[t].m_u.tnl_hdr.vni =
995 (vxlan_mask->vni[2] << 16) |
996 (vxlan_mask->vni[1] << 8) |
998 *input |= ICE_INSET_VXLAN_VNI;
1005 case RTE_FLOW_ITEM_TYPE_NVGRE:
1006 nvgre_spec = item->spec;
1007 nvgre_mask = item->mask;
1008 /* Check if NVGRE item is used to describe protocol.
1009 * If yes, both spec and mask should be NULL.
1010 * If no, both spec and mask shouldn't be NULL.
1012 if ((!nvgre_spec && nvgre_mask) ||
1013 (nvgre_spec && !nvgre_mask)) {
1014 rte_flow_error_set(error, EINVAL,
1015 RTE_FLOW_ERROR_TYPE_ITEM,
1017 "Invalid NVGRE item");
1022 input = &inner_input_set;
1023 if (nvgre_spec && nvgre_mask) {
1024 list[t].type = ICE_NVGRE;
1025 if (nvgre_mask->tni[0] ||
1026 nvgre_mask->tni[1] ||
1027 nvgre_mask->tni[2]) {
1028 list[t].h_u.nvgre_hdr.tni_flow =
1029 (nvgre_spec->tni[2] << 16) |
1030 (nvgre_spec->tni[1] << 8) |
1032 list[t].m_u.nvgre_hdr.tni_flow =
1033 (nvgre_mask->tni[2] << 16) |
1034 (nvgre_mask->tni[1] << 8) |
1036 *input |= ICE_INSET_NVGRE_TNI;
1037 input_set_byte += 2;
1043 case RTE_FLOW_ITEM_TYPE_VLAN:
1044 vlan_spec = item->spec;
1045 vlan_mask = item->mask;
1046 /* Check if VLAN item is used to describe protocol.
1047 * If yes, both spec and mask should be NULL.
1048 * If no, both spec and mask shouldn't be NULL.
1050 if ((!vlan_spec && vlan_mask) ||
1051 (vlan_spec && !vlan_mask)) {
1052 rte_flow_error_set(error, EINVAL,
1053 RTE_FLOW_ERROR_TYPE_ITEM,
1055 "Invalid VLAN item");
1060 if (!outer_vlan_valid)
1061 outer_vlan_valid = 1;
1063 inner_vlan_valid = 1;
1066 input = &outer_input_set;
1068 if (vlan_spec && vlan_mask) {
1070 if (!inner_vlan_valid) {
1071 list[t].type = ICE_VLAN_EX;
1073 ICE_INSET_VLAN_OUTER;
1075 list[t].type = ICE_VLAN_IN;
1077 ICE_INSET_VLAN_INNER;
1080 list[t].type = ICE_VLAN_OFOS;
1081 *input |= ICE_INSET_VLAN_INNER;
1084 if (vlan_mask->tci) {
1085 list[t].h_u.vlan_hdr.vlan =
1087 list[t].m_u.vlan_hdr.vlan =
1089 input_set_byte += 2;
1091 if (vlan_mask->inner_type) {
1092 rte_flow_error_set(error, EINVAL,
1093 RTE_FLOW_ERROR_TYPE_ITEM,
1095 "Invalid VLAN input set.");
1102 case RTE_FLOW_ITEM_TYPE_PPPOED:
1103 case RTE_FLOW_ITEM_TYPE_PPPOES:
1104 pppoe_spec = item->spec;
1105 pppoe_mask = item->mask;
1106 /* Check if PPPoE item is used to describe protocol.
1107 * If yes, both spec and mask should be NULL.
1108 * If no, both spec and mask shouldn't be NULL.
1110 if ((!pppoe_spec && pppoe_mask) ||
1111 (pppoe_spec && !pppoe_mask)) {
1112 rte_flow_error_set(error, EINVAL,
1113 RTE_FLOW_ERROR_TYPE_ITEM,
1115 "Invalid pppoe item");
1118 pppoe_patt_valid = 1;
1119 input = &outer_input_set;
1120 if (pppoe_spec && pppoe_mask) {
1121 /* Check pppoe mask and update input set */
1122 if (pppoe_mask->length ||
1124 pppoe_mask->version_type) {
1125 rte_flow_error_set(error, EINVAL,
1126 RTE_FLOW_ERROR_TYPE_ITEM,
1128 "Invalid pppoe mask");
1131 list[t].type = ICE_PPPOE;
1132 if (pppoe_mask->session_id) {
1133 list[t].h_u.pppoe_hdr.session_id =
1134 pppoe_spec->session_id;
1135 list[t].m_u.pppoe_hdr.session_id =
1136 pppoe_mask->session_id;
1137 *input |= ICE_INSET_PPPOE_SESSION;
1138 input_set_byte += 2;
1141 pppoe_elem_valid = 1;
1145 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1146 pppoe_proto_spec = item->spec;
1147 pppoe_proto_mask = item->mask;
1148 /* Check if PPPoE optional proto_id item
1149 * is used to describe protocol.
1150 * If yes, both spec and mask should be NULL.
1151 * If no, both spec and mask shouldn't be NULL.
1153 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1154 (pppoe_proto_spec && !pppoe_proto_mask)) {
1155 rte_flow_error_set(error, EINVAL,
1156 RTE_FLOW_ERROR_TYPE_ITEM,
1158 "Invalid pppoe proto item");
1161 input = &outer_input_set;
1162 if (pppoe_proto_spec && pppoe_proto_mask) {
1163 if (pppoe_elem_valid)
1165 list[t].type = ICE_PPPOE;
1166 if (pppoe_proto_mask->proto_id) {
1167 list[t].h_u.pppoe_hdr.ppp_prot_id =
1168 pppoe_proto_spec->proto_id;
1169 list[t].m_u.pppoe_hdr.ppp_prot_id =
1170 pppoe_proto_mask->proto_id;
1171 *input |= ICE_INSET_PPPOE_PROTO;
1172 input_set_byte += 2;
1173 pppoe_prot_valid = 1;
1175 if ((pppoe_proto_mask->proto_id &
1176 pppoe_proto_spec->proto_id) !=
1177 CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1178 (pppoe_proto_mask->proto_id &
1179 pppoe_proto_spec->proto_id) !=
1180 CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1181 *tun_type = ICE_SW_TUN_PPPOE_PAY;
1183 *tun_type = ICE_SW_TUN_PPPOE;
1189 case RTE_FLOW_ITEM_TYPE_ESP:
1190 esp_spec = item->spec;
1191 esp_mask = item->mask;
1192 if ((esp_spec && !esp_mask) ||
1193 (!esp_spec && esp_mask)) {
1194 rte_flow_error_set(error, EINVAL,
1195 RTE_FLOW_ERROR_TYPE_ITEM,
1197 "Invalid esp item");
1200 /* Check esp mask and update input set */
1201 if (esp_mask && esp_mask->hdr.seq) {
1202 rte_flow_error_set(error, EINVAL,
1203 RTE_FLOW_ERROR_TYPE_ITEM,
1205 "Invalid esp mask");
1208 input = &outer_input_set;
1209 if (!esp_spec && !esp_mask && !(*input)) {
1211 if (ipv6_valid && udp_valid)
1213 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1214 else if (ipv6_valid)
1215 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1216 else if (ipv4_valid)
1218 } else if (esp_spec && esp_mask &&
1221 list[t].type = ICE_NAT_T;
1223 list[t].type = ICE_ESP;
1224 list[t].h_u.esp_hdr.spi =
1226 list[t].m_u.esp_hdr.spi =
1228 *input |= ICE_INSET_ESP_SPI;
1229 input_set_byte += 4;
1233 if (!profile_rule) {
1234 if (ipv6_valid && udp_valid)
1235 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1236 else if (ipv4_valid && udp_valid)
1237 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1238 else if (ipv6_valid)
1239 *tun_type = ICE_SW_TUN_IPV6_ESP;
1240 else if (ipv4_valid)
1241 *tun_type = ICE_SW_TUN_IPV4_ESP;
1245 case RTE_FLOW_ITEM_TYPE_AH:
1246 ah_spec = item->spec;
1247 ah_mask = item->mask;
1248 if ((ah_spec && !ah_mask) ||
1249 (!ah_spec && ah_mask)) {
1250 rte_flow_error_set(error, EINVAL,
1251 RTE_FLOW_ERROR_TYPE_ITEM,
1256 /* Check ah mask and update input set */
1258 (ah_mask->next_hdr ||
1259 ah_mask->payload_len ||
1261 ah_mask->reserved)) {
1262 rte_flow_error_set(error, EINVAL,
1263 RTE_FLOW_ERROR_TYPE_ITEM,
1269 input = &outer_input_set;
1270 if (!ah_spec && !ah_mask && !(*input)) {
1272 if (ipv6_valid && udp_valid)
1274 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1275 else if (ipv6_valid)
1276 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1277 else if (ipv4_valid)
1279 } else if (ah_spec && ah_mask &&
1281 list[t].type = ICE_AH;
1282 list[t].h_u.ah_hdr.spi =
1284 list[t].m_u.ah_hdr.spi =
1286 *input |= ICE_INSET_AH_SPI;
1287 input_set_byte += 4;
1291 if (!profile_rule) {
1294 else if (ipv6_valid)
1295 *tun_type = ICE_SW_TUN_IPV6_AH;
1296 else if (ipv4_valid)
1297 *tun_type = ICE_SW_TUN_IPV4_AH;
1301 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1302 l2tp_spec = item->spec;
1303 l2tp_mask = item->mask;
1304 if ((l2tp_spec && !l2tp_mask) ||
1305 (!l2tp_spec && l2tp_mask)) {
1306 rte_flow_error_set(error, EINVAL,
1307 RTE_FLOW_ERROR_TYPE_ITEM,
1309 "Invalid l2tp item");
1313 input = &outer_input_set;
1314 if (!l2tp_spec && !l2tp_mask && !(*input)) {
1317 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1318 else if (ipv4_valid)
1320 } else if (l2tp_spec && l2tp_mask &&
1321 l2tp_mask->session_id){
1322 list[t].type = ICE_L2TPV3;
1323 list[t].h_u.l2tpv3_sess_hdr.session_id =
1324 l2tp_spec->session_id;
1325 list[t].m_u.l2tpv3_sess_hdr.session_id =
1326 l2tp_mask->session_id;
1327 *input |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1328 input_set_byte += 4;
1332 if (!profile_rule) {
1335 ICE_SW_TUN_IPV6_L2TPV3;
1336 else if (ipv4_valid)
1338 ICE_SW_TUN_IPV4_L2TPV3;
1342 case RTE_FLOW_ITEM_TYPE_PFCP:
1343 pfcp_spec = item->spec;
1344 pfcp_mask = item->mask;
1345 /* Check if PFCP item is used to describe protocol.
1346 * If yes, both spec and mask should be NULL.
1347 * If no, both spec and mask shouldn't be NULL.
1349 if ((!pfcp_spec && pfcp_mask) ||
1350 (pfcp_spec && !pfcp_mask)) {
1351 rte_flow_error_set(error, EINVAL,
1352 RTE_FLOW_ERROR_TYPE_ITEM,
1354 "Invalid PFCP item");
1357 if (pfcp_spec && pfcp_mask) {
1358 /* Check pfcp mask and update input set */
1359 if (pfcp_mask->msg_type ||
1360 pfcp_mask->msg_len ||
1362 rte_flow_error_set(error, EINVAL,
1363 RTE_FLOW_ERROR_TYPE_ITEM,
1365 "Invalid pfcp mask");
1368 if (pfcp_mask->s_field &&
1369 pfcp_spec->s_field == 0x01 &&
1372 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1373 else if (pfcp_mask->s_field &&
1374 pfcp_spec->s_field == 0x01)
1376 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1377 else if (pfcp_mask->s_field &&
1378 !pfcp_spec->s_field &&
1381 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1382 else if (pfcp_mask->s_field &&
1383 !pfcp_spec->s_field)
1385 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1391 case RTE_FLOW_ITEM_TYPE_GTPU:
1392 gtp_spec = item->spec;
1393 gtp_mask = item->mask;
1394 if (gtp_spec && !gtp_mask) {
1395 rte_flow_error_set(error, EINVAL,
1396 RTE_FLOW_ERROR_TYPE_ITEM,
1398 "Invalid GTP item");
1401 if (gtp_spec && gtp_mask) {
1402 if (gtp_mask->v_pt_rsv_flags ||
1403 gtp_mask->msg_type ||
1404 gtp_mask->msg_len) {
1405 rte_flow_error_set(error, EINVAL,
1406 RTE_FLOW_ERROR_TYPE_ITEM,
1408 "Invalid GTP mask");
1411 input = &outer_input_set;
1413 *input |= ICE_INSET_GTPU_TEID;
1414 list[t].type = ICE_GTP;
1415 list[t].h_u.gtp_hdr.teid =
1417 list[t].m_u.gtp_hdr.teid =
1419 input_set_byte += 4;
1426 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1427 gtp_psc_spec = item->spec;
1428 gtp_psc_mask = item->mask;
1429 if (gtp_psc_spec && !gtp_psc_mask) {
1430 rte_flow_error_set(error, EINVAL,
1431 RTE_FLOW_ERROR_TYPE_ITEM,
1433 "Invalid GTPU_EH item");
1436 if (gtp_psc_spec && gtp_psc_mask) {
1437 if (gtp_psc_mask->hdr.type) {
1438 rte_flow_error_set(error, EINVAL,
1439 RTE_FLOW_ERROR_TYPE_ITEM,
1441 "Invalid GTPU_EH mask");
1444 input = &outer_input_set;
1445 if (gtp_psc_mask->hdr.qfi)
1446 *input |= ICE_INSET_GTPU_QFI;
1447 list[t].type = ICE_GTP;
1448 list[t].h_u.gtp_hdr.qfi =
1449 gtp_psc_spec->hdr.qfi;
1450 list[t].m_u.gtp_hdr.qfi =
1451 gtp_psc_mask->hdr.qfi;
1452 input_set_byte += 1;
1458 case RTE_FLOW_ITEM_TYPE_VOID:
1462 rte_flow_error_set(error, EINVAL,
1463 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1464 "Invalid pattern item.");
1469 if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1470 inner_vlan_valid && outer_vlan_valid)
1471 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1472 else if (*tun_type == ICE_SW_TUN_PPPOE &&
1473 inner_vlan_valid && outer_vlan_valid)
1474 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1475 else if (*tun_type == ICE_NON_TUN &&
1476 inner_vlan_valid && outer_vlan_valid)
1477 *tun_type = ICE_NON_TUN_QINQ;
1478 else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1479 inner_vlan_valid && outer_vlan_valid)
1480 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1482 if (pppoe_patt_valid && !pppoe_prot_valid) {
1483 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1484 *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1485 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1486 *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1487 else if (inner_vlan_valid && outer_vlan_valid)
1488 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1489 else if (ipv6_valid && udp_valid)
1490 *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1491 else if (ipv6_valid && tcp_valid)
1492 *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1493 else if (ipv4_valid && udp_valid)
1494 *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1495 else if (ipv4_valid && tcp_valid)
1496 *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1497 else if (ipv6_valid)
1498 *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1499 else if (ipv4_valid)
1500 *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1502 *tun_type = ICE_SW_TUN_PPPOE;
1505 if (gtpu_valid && gtpu_psc_valid) {
1506 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1507 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP;
1508 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1509 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP;
1510 else if (ipv4_valid && inner_ipv4_valid)
1511 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4;
1512 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1513 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP;
1514 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1515 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP;
1516 else if (ipv4_valid && inner_ipv6_valid)
1517 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6;
1518 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1519 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP;
1520 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1521 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP;
1522 else if (ipv6_valid && inner_ipv4_valid)
1523 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4;
1524 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1525 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP;
1526 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1527 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP;
1528 else if (ipv6_valid && inner_ipv6_valid)
1529 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6;
1530 else if (ipv4_valid)
1531 *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1532 else if (ipv6_valid)
1533 *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1534 } else if (gtpu_valid) {
1535 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1536 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_UDP;
1537 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1538 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_TCP;
1539 else if (ipv4_valid && inner_ipv4_valid)
1540 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1541 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1542 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_UDP;
1543 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1544 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_TCP;
1545 else if (ipv4_valid && inner_ipv6_valid)
1546 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1547 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1548 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_UDP;
1549 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1550 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_TCP;
1551 else if (ipv6_valid && inner_ipv4_valid)
1552 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1553 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1554 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_UDP;
1555 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1556 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_TCP;
1557 else if (ipv6_valid && inner_ipv6_valid)
1558 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1559 else if (ipv4_valid)
1560 *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1561 else if (ipv6_valid)
1562 *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1565 if (*tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY ||
1566 *tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
1567 for (k = 0; k < t; k++) {
1568 if (list[k].type == ICE_GTP)
1569 list[k].type = ICE_GTP_NO_PAY;
1573 if (*tun_type == ICE_NON_TUN) {
1575 *tun_type = ICE_SW_TUN_VXLAN;
1576 else if (nvgre_valid)
1577 *tun_type = ICE_SW_TUN_NVGRE;
1578 else if (ipv4_valid && tcp_valid)
1579 *tun_type = ICE_SW_IPV4_TCP;
1580 else if (ipv4_valid && udp_valid)
1581 *tun_type = ICE_SW_IPV4_UDP;
1582 else if (ipv6_valid && tcp_valid)
1583 *tun_type = ICE_SW_IPV6_TCP;
1584 else if (ipv6_valid && udp_valid)
1585 *tun_type = ICE_SW_IPV6_UDP;
1588 if (input_set_byte > MAX_INPUT_SET_BYTE) {
1589 rte_flow_error_set(error, EINVAL,
1590 RTE_FLOW_ERROR_TYPE_ITEM,
1592 "too much input set");
1599 if ((!outer_input_set && !inner_input_set &&
1600 !ice_is_prof_rule(*tun_type)) || (outer_input_set &
1601 ~pattern_match_item->input_set_mask_o) ||
1602 (inner_input_set & ~pattern_match_item->input_set_mask_i))
1609 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1610 const struct rte_flow_action *actions,
1612 struct rte_flow_error *error,
1613 struct ice_adv_rule_info *rule_info)
1615 const struct rte_flow_action_vf *act_vf;
1616 const struct rte_flow_action *action;
1617 enum rte_flow_action_type action_type;
1619 for (action = actions; action->type !=
1620 RTE_FLOW_ACTION_TYPE_END; action++) {
1621 action_type = action->type;
1622 switch (action_type) {
1623 case RTE_FLOW_ACTION_TYPE_VF:
1624 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1625 act_vf = action->conf;
1627 if (act_vf->id >= ad->real_hw.num_vfs &&
1628 !act_vf->original) {
1629 rte_flow_error_set(error,
1630 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1636 if (act_vf->original)
1637 rule_info->sw_act.vsi_handle =
1638 ad->real_hw.avf.bus.func;
1640 rule_info->sw_act.vsi_handle = act_vf->id;
1643 case RTE_FLOW_ACTION_TYPE_DROP:
1644 rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1648 rte_flow_error_set(error,
1649 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1651 "Invalid action type");
1656 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1657 rule_info->sw_act.flag = ICE_FLTR_RX;
1659 /* 0 denotes lowest priority of recipe and highest priority
1660 * of rte_flow. Change rte_flow priority into recipe priority.
1662 rule_info->priority = ICE_SW_PRI_BASE - priority;
1668 ice_switch_parse_action(struct ice_pf *pf,
1669 const struct rte_flow_action *actions,
1671 struct rte_flow_error *error,
1672 struct ice_adv_rule_info *rule_info)
1674 struct ice_vsi *vsi = pf->main_vsi;
1675 struct rte_eth_dev_data *dev_data = pf->adapter->pf.dev_data;
1676 const struct rte_flow_action_queue *act_q;
1677 const struct rte_flow_action_rss *act_qgrop;
1678 uint16_t base_queue, i;
1679 const struct rte_flow_action *action;
1680 enum rte_flow_action_type action_type;
1681 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1682 2, 4, 8, 16, 32, 64, 128};
1684 base_queue = pf->base_queue + vsi->base_queue;
1685 for (action = actions; action->type !=
1686 RTE_FLOW_ACTION_TYPE_END; action++) {
1687 action_type = action->type;
1688 switch (action_type) {
1689 case RTE_FLOW_ACTION_TYPE_RSS:
1690 act_qgrop = action->conf;
1691 if (act_qgrop->queue_num <= 1)
1693 rule_info->sw_act.fltr_act =
1695 rule_info->sw_act.fwd_id.q_id =
1696 base_queue + act_qgrop->queue[0];
1697 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1698 if (act_qgrop->queue_num ==
1699 valid_qgrop_number[i])
1702 if (i == MAX_QGRP_NUM_TYPE)
1704 if ((act_qgrop->queue[0] +
1705 act_qgrop->queue_num) >
1706 dev_data->nb_rx_queues)
1708 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1709 if (act_qgrop->queue[i + 1] !=
1710 act_qgrop->queue[i] + 1)
1712 rule_info->sw_act.qgrp_size =
1713 act_qgrop->queue_num;
1715 case RTE_FLOW_ACTION_TYPE_QUEUE:
1716 act_q = action->conf;
1717 if (act_q->index >= dev_data->nb_rx_queues)
1719 rule_info->sw_act.fltr_act =
1721 rule_info->sw_act.fwd_id.q_id =
1722 base_queue + act_q->index;
1725 case RTE_FLOW_ACTION_TYPE_DROP:
1726 rule_info->sw_act.fltr_act =
1730 case RTE_FLOW_ACTION_TYPE_VOID:
1738 rule_info->sw_act.vsi_handle = vsi->idx;
1740 rule_info->sw_act.src = vsi->idx;
1741 /* 0 denotes lowest priority of recipe and highest priority
1742 * of rte_flow. Change rte_flow priority into recipe priority.
1744 rule_info->priority = ICE_SW_PRI_BASE - priority;
1749 rte_flow_error_set(error,
1750 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1752 "Invalid action type or queue number");
1756 rte_flow_error_set(error,
1757 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1759 "Invalid queue region indexes");
1763 rte_flow_error_set(error,
1764 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1766 "Discontinuous queue region");
1771 ice_switch_check_action(const struct rte_flow_action *actions,
1772 struct rte_flow_error *error)
1774 const struct rte_flow_action *action;
1775 enum rte_flow_action_type action_type;
1776 uint16_t actions_num = 0;
1778 for (action = actions; action->type !=
1779 RTE_FLOW_ACTION_TYPE_END; action++) {
1780 action_type = action->type;
1781 switch (action_type) {
1782 case RTE_FLOW_ACTION_TYPE_VF:
1783 case RTE_FLOW_ACTION_TYPE_RSS:
1784 case RTE_FLOW_ACTION_TYPE_QUEUE:
1785 case RTE_FLOW_ACTION_TYPE_DROP:
1788 case RTE_FLOW_ACTION_TYPE_VOID:
1791 rte_flow_error_set(error,
1792 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1794 "Invalid action type");
1799 if (actions_num != 1) {
1800 rte_flow_error_set(error,
1801 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1803 "Invalid action number");
1811 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1812 struct ice_pattern_match_item *array,
1814 const struct rte_flow_item pattern[],
1815 const struct rte_flow_action actions[],
1818 struct rte_flow_error *error)
1820 struct ice_pf *pf = &ad->pf;
1822 struct sw_meta *sw_meta_ptr = NULL;
1823 struct ice_adv_rule_info rule_info;
1824 struct ice_adv_lkup_elem *list = NULL;
1825 uint16_t lkups_num = 0;
1826 const struct rte_flow_item *item = pattern;
1827 uint16_t item_num = 0;
1828 uint16_t vlan_num = 0;
1829 enum ice_sw_tunnel_type tun_type =
1831 struct ice_pattern_match_item *pattern_match_item = NULL;
1833 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1835 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1836 const struct rte_flow_item_eth *eth_mask;
1838 eth_mask = item->mask;
1841 if (eth_mask->type == UINT16_MAX)
1842 tun_type = ICE_SW_TUN_AND_NON_TUN;
1845 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1848 /* reserve one more memory slot for ETH which may
1849 * consume 2 lookup items.
1851 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1855 if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1856 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1857 else if (vlan_num == 2)
1858 tun_type = ICE_NON_TUN_QINQ;
1860 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1862 rte_flow_error_set(error, EINVAL,
1863 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1864 "No memory for PMD internal items");
1869 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1871 rte_flow_error_set(error, EINVAL,
1872 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1873 "No memory for sw_pattern_meta_ptr");
1877 pattern_match_item =
1878 ice_search_pattern_match_item(ad, pattern, array, array_len,
1880 if (!pattern_match_item) {
1881 rte_flow_error_set(error, EINVAL,
1882 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1883 "Invalid input pattern");
1887 if (!ice_switch_parse_pattern(pattern, error, list, &lkups_num,
1888 &tun_type, pattern_match_item)) {
1889 rte_flow_error_set(error, EINVAL,
1890 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1892 "Invalid input set");
1896 memset(&rule_info, 0, sizeof(rule_info));
1897 rule_info.tun_type = tun_type;
1899 ret = ice_switch_check_action(actions, error);
1903 if (ad->hw.dcf_enabled)
1904 ret = ice_switch_parse_dcf_action((void *)ad, actions, priority,
1907 ret = ice_switch_parse_action(pf, actions, priority, error,
1914 *meta = sw_meta_ptr;
1915 ((struct sw_meta *)*meta)->list = list;
1916 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1917 ((struct sw_meta *)*meta)->rule_info = rule_info;
1920 rte_free(sw_meta_ptr);
1923 rte_free(pattern_match_item);
1929 rte_free(sw_meta_ptr);
1930 rte_free(pattern_match_item);
1936 ice_switch_query(struct ice_adapter *ad __rte_unused,
1937 struct rte_flow *flow __rte_unused,
1938 struct rte_flow_query_count *count __rte_unused,
1939 struct rte_flow_error *error)
1941 rte_flow_error_set(error, EINVAL,
1942 RTE_FLOW_ERROR_TYPE_HANDLE,
1944 "count action not supported by switch filter");
1950 ice_switch_redirect(struct ice_adapter *ad,
1951 struct rte_flow *flow,
1952 struct ice_flow_redirect *rd)
1954 struct ice_rule_query_data *rdata;
1955 struct ice_switch_filter_conf *filter_conf_ptr =
1956 (struct ice_switch_filter_conf *)flow->rule;
1957 struct ice_rule_query_data added_rdata = { 0 };
1958 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1959 struct ice_adv_lkup_elem *lkups_ref = NULL;
1960 struct ice_adv_lkup_elem *lkups_dp = NULL;
1961 struct LIST_HEAD_TYPE *list_head;
1962 struct ice_adv_rule_info rinfo;
1963 struct ice_hw *hw = &ad->hw;
1964 struct ice_switch_info *sw;
1968 rdata = &filter_conf_ptr->sw_query_data;
1970 if (rdata->vsi_handle != rd->vsi_handle)
1973 sw = hw->switch_info;
1974 if (!sw->recp_list[rdata->rid].recp_created)
1977 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1980 switch (filter_conf_ptr->fltr_status) {
1981 case ICE_SW_FLTR_ADDED:
1982 list_head = &sw->recp_list[rdata->rid].filt_rules;
1983 LIST_FOR_EACH_ENTRY(list_itr, list_head,
1984 ice_adv_fltr_mgmt_list_entry,
1986 rinfo = list_itr->rule_info;
1987 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1988 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1989 rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1990 (rinfo.fltr_rule_id == rdata->rule_id &&
1991 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1992 lkups_cnt = list_itr->lkups_cnt;
1994 lkups_dp = (struct ice_adv_lkup_elem *)
1995 ice_memdup(hw, list_itr->lkups,
1996 sizeof(*list_itr->lkups) *
1998 ICE_NONDMA_TO_NONDMA);
2001 "Failed to allocate memory.");
2004 lkups_ref = lkups_dp;
2006 if (rinfo.sw_act.fltr_act ==
2007 ICE_FWD_TO_VSI_LIST) {
2008 rinfo.sw_act.vsi_handle =
2010 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
2020 case ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT:
2021 /* Recover VSI context */
2022 hw->vsi_ctx[rd->vsi_handle]->vsi_num = filter_conf_ptr->vsi_num;
2023 rinfo = filter_conf_ptr->rule_info;
2024 lkups_cnt = filter_conf_ptr->lkups_num;
2025 lkups_ref = filter_conf_ptr->lkups;
2027 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
2028 rinfo.sw_act.vsi_handle = rd->vsi_handle;
2029 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
2033 case ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT:
2034 rinfo = filter_conf_ptr->rule_info;
2035 lkups_cnt = filter_conf_ptr->lkups_num;
2036 lkups_ref = filter_conf_ptr->lkups;
2044 if (ice_dcf_adminq_need_retry(ad)) {
2045 PMD_DRV_LOG(WARNING, "DCF is not on");
2050 /* Remove the old rule */
2051 ret = ice_rem_adv_rule(hw, lkups_ref, lkups_cnt, &rinfo);
2053 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
2055 filter_conf_ptr->fltr_status =
2056 ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT;
2062 if (ice_dcf_adminq_need_retry(ad)) {
2063 PMD_DRV_LOG(WARNING, "DCF is not on");
2068 /* Update VSI context */
2069 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
2071 /* Replay the rule */
2072 ret = ice_add_adv_rule(hw, lkups_ref, lkups_cnt,
2073 &rinfo, &added_rdata);
2075 PMD_DRV_LOG(ERR, "Failed to replay the rule");
2076 filter_conf_ptr->fltr_status =
2077 ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT;
2080 filter_conf_ptr->sw_query_data = added_rdata;
2081 /* Save VSI number for failure recover */
2082 filter_conf_ptr->vsi_num = rd->new_vsi_num;
2083 filter_conf_ptr->fltr_status = ICE_SW_FLTR_ADDED;
2088 if (ice_dcf_adminq_need_retry(ad))
2091 ice_free(hw, lkups_dp);
2096 ice_switch_init(struct ice_adapter *ad)
2099 struct ice_flow_parser *dist_parser;
2100 struct ice_flow_parser *perm_parser;
2102 if (ad->devargs.pipe_mode_support) {
2103 perm_parser = &ice_switch_perm_parser;
2104 ret = ice_register_parser(perm_parser, ad);
2106 dist_parser = &ice_switch_dist_parser;
2107 ret = ice_register_parser(dist_parser, ad);
2113 ice_switch_uninit(struct ice_adapter *ad)
2115 struct ice_flow_parser *dist_parser;
2116 struct ice_flow_parser *perm_parser;
2118 if (ad->devargs.pipe_mode_support) {
2119 perm_parser = &ice_switch_perm_parser;
2120 ice_unregister_parser(perm_parser, ad);
2122 dist_parser = &ice_switch_dist_parser;
2123 ice_unregister_parser(dist_parser, ad);
2128 ice_flow_engine ice_switch_engine = {
2129 .init = ice_switch_init,
2130 .uninit = ice_switch_uninit,
2131 .create = ice_switch_create,
2132 .destroy = ice_switch_destroy,
2133 .query_count = ice_switch_query,
2134 .redirect = ice_switch_redirect,
2135 .free = ice_switch_filter_rule_free,
2136 .type = ICE_FLOW_ENGINE_SWITCH,
2140 ice_flow_parser ice_switch_dist_parser = {
2141 .engine = &ice_switch_engine,
2142 .array = ice_switch_pattern_dist_list,
2143 .array_len = RTE_DIM(ice_switch_pattern_dist_list),
2144 .parse_pattern_action = ice_switch_parse_pattern_action,
2145 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2149 ice_flow_parser ice_switch_perm_parser = {
2150 .engine = &ice_switch_engine,
2151 .array = ice_switch_pattern_perm_list,
2152 .array_len = RTE_DIM(ice_switch_pattern_perm_list),
2153 .parse_pattern_action = ice_switch_parse_pattern_action,
2154 .stage = ICE_FLOW_STAGE_PERMISSION,
2157 RTE_INIT(ice_sw_engine_init)
2159 struct ice_flow_engine *engine = &ice_switch_engine;
2160 ice_register_flow_engine(engine);