1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "base/ice_type.h"
22 #include "base/ice_switch.h"
24 #include "ice_ethdev.h"
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 #define MAX_QGRP_NUM_TYPE 7
30 #define MAX_INPUT_SET_BYTE 32
31 #define ICE_PPP_IPV4_PROTO 0x0021
32 #define ICE_PPP_IPV6_PROTO 0x0057
33 #define ICE_IPV4_PROTO_NVGRE 0x002F
35 #define ICE_SW_INSET_ETHER ( \
36 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
37 #define ICE_SW_INSET_MAC_VLAN ( \
38 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
40 #define ICE_SW_INSET_MAC_QINQ ( \
41 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \
43 #define ICE_SW_INSET_MAC_IPV4 ( \
44 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
45 ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
46 #define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
47 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
48 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
49 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
50 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
51 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
52 #define ICE_SW_INSET_MAC_IPV4_UDP ( \
53 ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
54 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
55 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
56 #define ICE_SW_INSET_MAC_IPV6 ( \
57 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
58 ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
59 ICE_INSET_IPV6_NEXT_HDR)
60 #define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
61 ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
62 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
63 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
64 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
65 ICE_INSET_TCP_DST_PORT | ICE_INSET_TCP_SRC_PORT)
66 #define ICE_SW_INSET_MAC_IPV6_UDP ( \
67 ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
68 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
69 ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT)
70 #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \
71 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
72 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
73 #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \
74 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
75 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
76 #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \
77 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
78 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
79 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
80 #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \
81 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
82 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
83 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST)
84 #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \
85 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
86 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
87 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
88 #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \
89 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
90 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
91 ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST)
92 #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \
93 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
94 ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS)
95 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \
96 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
97 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \
98 ICE_INSET_TUN_IPV4_TOS)
99 #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \
100 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
101 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \
102 ICE_INSET_TUN_IPV4_TOS)
103 #define ICE_SW_INSET_MAC_PPPOE ( \
104 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
105 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
106 #define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
107 ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
108 ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
109 ICE_INSET_PPPOE_PROTO)
110 #define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \
111 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4)
112 #define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \
113 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP)
114 #define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \
115 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP)
116 #define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \
117 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6)
118 #define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \
119 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP)
120 #define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \
121 ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP)
122 #define ICE_SW_INSET_MAC_IPV4_ESP ( \
123 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI)
124 #define ICE_SW_INSET_MAC_IPV6_ESP ( \
125 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI)
126 #define ICE_SW_INSET_MAC_IPV4_AH ( \
127 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI)
128 #define ICE_SW_INSET_MAC_IPV6_AH ( \
129 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI)
130 #define ICE_SW_INSET_MAC_IPV4_L2TP ( \
131 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID)
132 #define ICE_SW_INSET_MAC_IPV6_L2TP ( \
133 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID)
134 #define ICE_SW_INSET_MAC_IPV4_PFCP ( \
135 ICE_SW_INSET_MAC_IPV4 | \
136 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
137 #define ICE_SW_INSET_MAC_IPV6_PFCP ( \
138 ICE_SW_INSET_MAC_IPV6 | \
139 ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID)
140 #define ICE_SW_INSET_MAC_IPV4_GTPU ( \
141 ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID)
142 #define ICE_SW_INSET_MAC_IPV6_GTPU ( \
143 ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID)
144 #define ICE_SW_INSET_MAC_IPV4_GTPU_IPV4 ( \
145 ICE_INSET_DMAC | ICE_INSET_GTPU_TEID | \
146 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
147 #define ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4 ( \
148 ICE_SW_INSET_MAC_IPV4_GTPU_IPV4 | ICE_INSET_GTPU_QFI)
149 #define ICE_SW_INSET_MAC_IPV4_GTPU_IPV6 ( \
150 ICE_INSET_DMAC | ICE_INSET_GTPU_TEID | \
151 ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST)
152 #define ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6 ( \
153 ICE_SW_INSET_MAC_IPV4_GTPU_IPV6 | ICE_INSET_GTPU_QFI)
154 #define ICE_SW_INSET_MAC_IPV6_GTPU_IPV4 ( \
155 ICE_INSET_DMAC | ICE_INSET_GTPU_TEID | \
156 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
157 #define ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4 ( \
158 ICE_SW_INSET_MAC_IPV6_GTPU_IPV4 | ICE_INSET_GTPU_QFI)
159 #define ICE_SW_INSET_MAC_IPV6_GTPU_IPV6 ( \
160 ICE_INSET_DMAC | ICE_INSET_GTPU_TEID | \
161 ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST)
162 #define ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6 ( \
163 ICE_SW_INSET_MAC_IPV6_GTPU_IPV6 | ICE_INSET_GTPU_QFI)
164 #define ICE_SW_INSET_MAC_IPV4_GTPU_IPV4_UDP ( \
165 ICE_SW_INSET_MAC_IPV4_GTPU_IPV4 | \
166 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
167 #define ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4_UDP ( \
168 ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4 | \
169 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
170 #define ICE_SW_INSET_MAC_IPV4_GTPU_IPV4_TCP ( \
171 ICE_SW_INSET_MAC_IPV4_GTPU_IPV4 | \
172 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
173 #define ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4_TCP ( \
174 ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4 | \
175 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
176 #define ICE_SW_INSET_MAC_IPV4_GTPU_IPV6_UDP ( \
177 ICE_SW_INSET_MAC_IPV4_GTPU_IPV6 | \
178 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
179 #define ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6_UDP ( \
180 ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6 | \
181 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
182 #define ICE_SW_INSET_MAC_IPV4_GTPU_IPV6_TCP ( \
183 ICE_SW_INSET_MAC_IPV4_GTPU_IPV6 | \
184 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
185 #define ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6_TCP ( \
186 ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6 | \
187 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
188 #define ICE_SW_INSET_MAC_IPV6_GTPU_IPV4_UDP ( \
189 ICE_SW_INSET_MAC_IPV6_GTPU_IPV4 | \
190 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
191 #define ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4_UDP ( \
192 ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4 | \
193 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
194 #define ICE_SW_INSET_MAC_IPV6_GTPU_IPV4_TCP ( \
195 ICE_SW_INSET_MAC_IPV6_GTPU_IPV4 | \
196 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
197 #define ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4_TCP ( \
198 ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4 | \
199 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
200 #define ICE_SW_INSET_MAC_IPV6_GTPU_IPV6_UDP ( \
201 ICE_SW_INSET_MAC_IPV6_GTPU_IPV6 | \
202 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
203 #define ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6_UDP ( \
204 ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6 | \
205 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
206 #define ICE_SW_INSET_MAC_IPV6_GTPU_IPV6_TCP ( \
207 ICE_SW_INSET_MAC_IPV6_GTPU_IPV6 | \
208 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
209 #define ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6_TCP ( \
210 ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6 | \
211 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
214 struct ice_adv_lkup_elem *list;
216 struct ice_adv_rule_info rule_info;
219 static struct ice_flow_parser ice_switch_dist_parser;
220 static struct ice_flow_parser ice_switch_perm_parser;
223 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
224 {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE},
225 {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE},
226 {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE},
227 {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
228 {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
229 {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
230 {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
231 {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
232 {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
233 {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
234 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
235 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
236 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
237 {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
238 {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
239 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
240 {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
241 {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
242 {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
243 {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
244 {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
245 {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
246 {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
247 {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
248 {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
249 {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
250 {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
251 {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
252 {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
253 {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
254 {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
255 {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
256 {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
257 {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
258 {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
259 {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
260 {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE},
261 {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE},
262 {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
263 {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
264 {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
265 {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
266 {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
267 {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
268 {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
269 {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
270 {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
271 {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
272 {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
273 {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
274 {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
275 {pattern_eth_ipv4_gtpu_ipv4, ICE_SW_INSET_MAC_IPV4_GTPU_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
276 {pattern_eth_ipv4_gtpu_eh_ipv4, ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
277 {pattern_eth_ipv4_gtpu_ipv4_udp, ICE_SW_INSET_MAC_IPV4_GTPU_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
278 {pattern_eth_ipv4_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
279 {pattern_eth_ipv4_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_GTPU_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
280 {pattern_eth_ipv4_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
281 {pattern_eth_ipv4_gtpu_ipv6, ICE_SW_INSET_MAC_IPV4_GTPU_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
282 {pattern_eth_ipv4_gtpu_eh_ipv6, ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
283 {pattern_eth_ipv4_gtpu_ipv6_udp, ICE_SW_INSET_MAC_IPV4_GTPU_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
284 {pattern_eth_ipv4_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
285 {pattern_eth_ipv4_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_IPV4_GTPU_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
286 {pattern_eth_ipv4_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
287 {pattern_eth_ipv6_gtpu_ipv4, ICE_SW_INSET_MAC_IPV6_GTPU_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
288 {pattern_eth_ipv6_gtpu_eh_ipv4, ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
289 {pattern_eth_ipv6_gtpu_ipv4_udp, ICE_SW_INSET_MAC_IPV6_GTPU_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
290 {pattern_eth_ipv6_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
291 {pattern_eth_ipv6_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_IPV6_GTPU_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
292 {pattern_eth_ipv6_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
293 {pattern_eth_ipv6_gtpu_ipv6, ICE_SW_INSET_MAC_IPV6_GTPU_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
294 {pattern_eth_ipv6_gtpu_eh_ipv6, ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
295 {pattern_eth_ipv6_gtpu_ipv6_udp, ICE_SW_INSET_MAC_IPV6_GTPU_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
296 {pattern_eth_ipv6_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
297 {pattern_eth_ipv6_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_GTPU_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
298 {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
302 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
303 {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE},
304 {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE},
305 {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE},
306 {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
307 {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
308 {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
309 {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
310 {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
311 {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
312 {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
313 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
314 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
315 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
316 {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
317 {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
318 {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
319 {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
320 {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
321 {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
322 {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
323 {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
324 {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
325 {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
326 {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
327 {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
328 {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
329 {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
330 {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
331 {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
332 {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
333 {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
334 {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
335 {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
336 {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
337 {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
338 {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
339 {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE},
340 {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE},
341 {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
342 {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
343 {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE},
344 {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
345 {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
346 {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
347 {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
348 {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE},
349 {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE},
350 {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
351 {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
352 {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
353 {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE},
354 {pattern_eth_ipv4_gtpu_ipv4, ICE_SW_INSET_MAC_IPV4_GTPU_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
355 {pattern_eth_ipv4_gtpu_eh_ipv4, ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
356 {pattern_eth_ipv4_gtpu_ipv4_udp, ICE_SW_INSET_MAC_IPV4_GTPU_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
357 {pattern_eth_ipv4_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
358 {pattern_eth_ipv4_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_GTPU_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
359 {pattern_eth_ipv4_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
360 {pattern_eth_ipv4_gtpu_ipv6, ICE_SW_INSET_MAC_IPV4_GTPU_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
361 {pattern_eth_ipv4_gtpu_eh_ipv6, ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
362 {pattern_eth_ipv4_gtpu_ipv6_udp, ICE_SW_INSET_MAC_IPV4_GTPU_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
363 {pattern_eth_ipv4_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
364 {pattern_eth_ipv4_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_IPV4_GTPU_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
365 {pattern_eth_ipv4_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_IPV4_GTPU_EH_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
366 {pattern_eth_ipv6_gtpu_ipv4, ICE_SW_INSET_MAC_IPV6_GTPU_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
367 {pattern_eth_ipv6_gtpu_eh_ipv4, ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
368 {pattern_eth_ipv6_gtpu_ipv4_udp, ICE_SW_INSET_MAC_IPV6_GTPU_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
369 {pattern_eth_ipv6_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
370 {pattern_eth_ipv6_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_IPV6_GTPU_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
371 {pattern_eth_ipv6_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
372 {pattern_eth_ipv6_gtpu_ipv6, ICE_SW_INSET_MAC_IPV6_GTPU_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
373 {pattern_eth_ipv6_gtpu_eh_ipv6, ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
374 {pattern_eth_ipv6_gtpu_ipv6_udp, ICE_SW_INSET_MAC_IPV6_GTPU_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
375 {pattern_eth_ipv6_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
376 {pattern_eth_ipv6_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_GTPU_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
377 {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_GTPU_EH_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
381 ice_switch_create(struct ice_adapter *ad,
382 struct rte_flow *flow,
384 struct rte_flow_error *error)
387 struct ice_pf *pf = &ad->pf;
388 struct ice_hw *hw = ICE_PF_TO_HW(pf);
389 struct ice_rule_query_data rule_added = {0};
390 struct ice_rule_query_data *filter_ptr;
391 struct ice_adv_lkup_elem *list =
392 ((struct sw_meta *)meta)->list;
394 ((struct sw_meta *)meta)->lkups_num;
395 struct ice_adv_rule_info *rule_info =
396 &((struct sw_meta *)meta)->rule_info;
398 if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
399 rte_flow_error_set(error, EINVAL,
400 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
401 "item number too large for rule");
405 rte_flow_error_set(error, EINVAL,
406 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
407 "lookup list should not be NULL");
410 ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
412 filter_ptr = rte_zmalloc("ice_switch_filter",
413 sizeof(struct ice_rule_query_data), 0);
415 rte_flow_error_set(error, EINVAL,
416 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
417 "No memory for ice_switch_filter");
420 flow->rule = filter_ptr;
421 rte_memcpy(filter_ptr,
423 sizeof(struct ice_rule_query_data));
425 rte_flow_error_set(error, EINVAL,
426 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
427 "switch filter create flow fail");
443 ice_switch_destroy(struct ice_adapter *ad,
444 struct rte_flow *flow,
445 struct rte_flow_error *error)
447 struct ice_hw *hw = &ad->hw;
449 struct ice_rule_query_data *filter_ptr;
451 filter_ptr = (struct ice_rule_query_data *)
455 rte_flow_error_set(error, EINVAL,
456 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
458 " create by switch filter");
462 ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
464 rte_flow_error_set(error, EINVAL,
465 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
466 "fail to destroy switch filter rule");
470 rte_free(filter_ptr);
475 ice_switch_filter_rule_free(struct rte_flow *flow)
477 rte_free(flow->rule);
481 ice_switch_inset_get(const struct rte_flow_item pattern[],
482 struct rte_flow_error *error,
483 struct ice_adv_lkup_elem *list,
485 enum ice_sw_tunnel_type *tun_type)
487 const struct rte_flow_item *item = pattern;
488 enum rte_flow_item_type item_type;
489 const struct rte_flow_item_eth *eth_spec, *eth_mask;
490 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
491 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
492 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
493 const struct rte_flow_item_udp *udp_spec, *udp_mask;
494 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
495 const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
496 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
497 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
498 const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
499 const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
501 const struct rte_flow_item_esp *esp_spec, *esp_mask;
502 const struct rte_flow_item_ah *ah_spec, *ah_mask;
503 const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
504 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
505 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
506 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
507 uint64_t input_set = ICE_INSET_NONE;
508 uint16_t input_set_byte = 0;
509 bool pppoe_elem_valid = 0;
510 bool pppoe_patt_valid = 0;
511 bool pppoe_prot_valid = 0;
512 bool inner_vlan_valid = 0;
513 bool outer_vlan_valid = 0;
514 bool tunnel_valid = 0;
515 bool profile_rule = 0;
516 bool nvgre_valid = 0;
517 bool vxlan_valid = 0;
524 bool gtpu_psc_valid = 0;
525 bool inner_ipv4_valid = 0;
526 bool inner_ipv6_valid = 0;
527 bool inner_tcp_valid = 0;
528 bool inner_udp_valid = 0;
529 uint16_t j, k, t = 0;
531 if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
532 *tun_type == ICE_NON_TUN_QINQ)
535 for (item = pattern; item->type !=
536 RTE_FLOW_ITEM_TYPE_END; item++) {
538 rte_flow_error_set(error, EINVAL,
539 RTE_FLOW_ERROR_TYPE_ITEM,
541 "Not support range");
544 item_type = item->type;
547 case RTE_FLOW_ITEM_TYPE_ETH:
548 eth_spec = item->spec;
549 eth_mask = item->mask;
550 if (eth_spec && eth_mask) {
551 const uint8_t *a = eth_mask->src.addr_bytes;
552 const uint8_t *b = eth_mask->dst.addr_bytes;
553 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
554 if (a[j] && tunnel_valid) {
564 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
565 if (b[j] && tunnel_valid) {
576 input_set |= ICE_INSET_ETHERTYPE;
577 list[t].type = (tunnel_valid == 0) ?
578 ICE_MAC_OFOS : ICE_MAC_IL;
579 struct ice_ether_hdr *h;
580 struct ice_ether_hdr *m;
582 h = &list[t].h_u.eth_hdr;
583 m = &list[t].m_u.eth_hdr;
584 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
585 if (eth_mask->src.addr_bytes[j]) {
587 eth_spec->src.addr_bytes[j];
589 eth_mask->src.addr_bytes[j];
593 if (eth_mask->dst.addr_bytes[j]) {
595 eth_spec->dst.addr_bytes[j];
597 eth_mask->dst.addr_bytes[j];
604 if (eth_mask->type) {
605 list[t].type = ICE_ETYPE_OL;
606 list[t].h_u.ethertype.ethtype_id =
608 list[t].m_u.ethertype.ethtype_id =
616 case RTE_FLOW_ITEM_TYPE_IPV4:
617 ipv4_spec = item->spec;
618 ipv4_mask = item->mask;
620 inner_ipv4_valid = 1;
624 if (ipv4_spec && ipv4_mask) {
625 /* Check IPv4 mask and update input set */
626 if (ipv4_mask->hdr.version_ihl ||
627 ipv4_mask->hdr.total_length ||
628 ipv4_mask->hdr.packet_id ||
629 ipv4_mask->hdr.hdr_checksum) {
630 rte_flow_error_set(error, EINVAL,
631 RTE_FLOW_ERROR_TYPE_ITEM,
633 "Invalid IPv4 mask.");
638 if (ipv4_mask->hdr.type_of_service)
640 ICE_INSET_TUN_IPV4_TOS;
641 if (ipv4_mask->hdr.src_addr)
643 ICE_INSET_TUN_IPV4_SRC;
644 if (ipv4_mask->hdr.dst_addr)
646 ICE_INSET_TUN_IPV4_DST;
647 if (ipv4_mask->hdr.time_to_live)
649 ICE_INSET_TUN_IPV4_TTL;
650 if (ipv4_mask->hdr.next_proto_id)
652 ICE_INSET_TUN_IPV4_PROTO;
654 if (ipv4_mask->hdr.src_addr)
655 input_set |= ICE_INSET_IPV4_SRC;
656 if (ipv4_mask->hdr.dst_addr)
657 input_set |= ICE_INSET_IPV4_DST;
658 if (ipv4_mask->hdr.time_to_live)
659 input_set |= ICE_INSET_IPV4_TTL;
660 if (ipv4_mask->hdr.next_proto_id)
662 ICE_INSET_IPV4_PROTO;
663 if (ipv4_mask->hdr.type_of_service)
667 list[t].type = (tunnel_valid == 0) ?
668 ICE_IPV4_OFOS : ICE_IPV4_IL;
669 if (ipv4_mask->hdr.src_addr) {
670 list[t].h_u.ipv4_hdr.src_addr =
671 ipv4_spec->hdr.src_addr;
672 list[t].m_u.ipv4_hdr.src_addr =
673 ipv4_mask->hdr.src_addr;
676 if (ipv4_mask->hdr.dst_addr) {
677 list[t].h_u.ipv4_hdr.dst_addr =
678 ipv4_spec->hdr.dst_addr;
679 list[t].m_u.ipv4_hdr.dst_addr =
680 ipv4_mask->hdr.dst_addr;
683 if (ipv4_mask->hdr.time_to_live) {
684 list[t].h_u.ipv4_hdr.time_to_live =
685 ipv4_spec->hdr.time_to_live;
686 list[t].m_u.ipv4_hdr.time_to_live =
687 ipv4_mask->hdr.time_to_live;
690 if (ipv4_mask->hdr.next_proto_id) {
691 list[t].h_u.ipv4_hdr.protocol =
692 ipv4_spec->hdr.next_proto_id;
693 list[t].m_u.ipv4_hdr.protocol =
694 ipv4_mask->hdr.next_proto_id;
697 if ((ipv4_spec->hdr.next_proto_id &
698 ipv4_mask->hdr.next_proto_id) ==
699 ICE_IPV4_PROTO_NVGRE)
700 *tun_type = ICE_SW_TUN_AND_NON_TUN;
701 if (ipv4_mask->hdr.type_of_service) {
702 list[t].h_u.ipv4_hdr.tos =
703 ipv4_spec->hdr.type_of_service;
704 list[t].m_u.ipv4_hdr.tos =
705 ipv4_mask->hdr.type_of_service;
712 case RTE_FLOW_ITEM_TYPE_IPV6:
713 ipv6_spec = item->spec;
714 ipv6_mask = item->mask;
716 inner_ipv6_valid = 1;
719 if (ipv6_spec && ipv6_mask) {
720 if (ipv6_mask->hdr.payload_len) {
721 rte_flow_error_set(error, EINVAL,
722 RTE_FLOW_ERROR_TYPE_ITEM,
724 "Invalid IPv6 mask");
728 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
729 if (ipv6_mask->hdr.src_addr[j] &&
732 ICE_INSET_TUN_IPV6_SRC;
734 } else if (ipv6_mask->hdr.src_addr[j]) {
735 input_set |= ICE_INSET_IPV6_SRC;
739 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
740 if (ipv6_mask->hdr.dst_addr[j] &&
743 ICE_INSET_TUN_IPV6_DST;
745 } else if (ipv6_mask->hdr.dst_addr[j]) {
746 input_set |= ICE_INSET_IPV6_DST;
750 if (ipv6_mask->hdr.proto &&
753 ICE_INSET_TUN_IPV6_NEXT_HDR;
754 else if (ipv6_mask->hdr.proto)
756 ICE_INSET_IPV6_NEXT_HDR;
757 if (ipv6_mask->hdr.hop_limits &&
760 ICE_INSET_TUN_IPV6_HOP_LIMIT;
761 else if (ipv6_mask->hdr.hop_limits)
763 ICE_INSET_IPV6_HOP_LIMIT;
764 if ((ipv6_mask->hdr.vtc_flow &
766 (RTE_IPV6_HDR_TC_MASK)) &&
769 ICE_INSET_TUN_IPV6_TC;
770 else if (ipv6_mask->hdr.vtc_flow &
772 (RTE_IPV6_HDR_TC_MASK))
773 input_set |= ICE_INSET_IPV6_TC;
775 list[t].type = (tunnel_valid == 0) ?
776 ICE_IPV6_OFOS : ICE_IPV6_IL;
777 struct ice_ipv6_hdr *f;
778 struct ice_ipv6_hdr *s;
779 f = &list[t].h_u.ipv6_hdr;
780 s = &list[t].m_u.ipv6_hdr;
781 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
782 if (ipv6_mask->hdr.src_addr[j]) {
784 ipv6_spec->hdr.src_addr[j];
786 ipv6_mask->hdr.src_addr[j];
789 if (ipv6_mask->hdr.dst_addr[j]) {
791 ipv6_spec->hdr.dst_addr[j];
793 ipv6_mask->hdr.dst_addr[j];
797 if (ipv6_mask->hdr.proto) {
799 ipv6_spec->hdr.proto;
801 ipv6_mask->hdr.proto;
804 if (ipv6_mask->hdr.hop_limits) {
806 ipv6_spec->hdr.hop_limits;
808 ipv6_mask->hdr.hop_limits;
811 if (ipv6_mask->hdr.vtc_flow &
813 (RTE_IPV6_HDR_TC_MASK)) {
814 struct ice_le_ver_tc_flow vtf;
815 vtf.u.fld.version = 0;
816 vtf.u.fld.flow_label = 0;
817 vtf.u.fld.tc = (rte_be_to_cpu_32
818 (ipv6_spec->hdr.vtc_flow) &
819 RTE_IPV6_HDR_TC_MASK) >>
820 RTE_IPV6_HDR_TC_SHIFT;
821 f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
822 vtf.u.fld.tc = (rte_be_to_cpu_32
823 (ipv6_mask->hdr.vtc_flow) &
824 RTE_IPV6_HDR_TC_MASK) >>
825 RTE_IPV6_HDR_TC_SHIFT;
826 s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
833 case RTE_FLOW_ITEM_TYPE_UDP:
834 udp_spec = item->spec;
835 udp_mask = item->mask;
840 if (udp_spec && udp_mask) {
841 /* Check UDP mask and update input set*/
842 if (udp_mask->hdr.dgram_len ||
843 udp_mask->hdr.dgram_cksum) {
844 rte_flow_error_set(error, EINVAL,
845 RTE_FLOW_ERROR_TYPE_ITEM,
852 if (udp_mask->hdr.src_port)
854 ICE_INSET_TUN_UDP_SRC_PORT;
855 if (udp_mask->hdr.dst_port)
857 ICE_INSET_TUN_UDP_DST_PORT;
859 if (udp_mask->hdr.src_port)
861 ICE_INSET_UDP_SRC_PORT;
862 if (udp_mask->hdr.dst_port)
864 ICE_INSET_UDP_DST_PORT;
866 if (*tun_type == ICE_SW_TUN_VXLAN &&
868 list[t].type = ICE_UDP_OF;
870 list[t].type = ICE_UDP_ILOS;
871 if (udp_mask->hdr.src_port) {
872 list[t].h_u.l4_hdr.src_port =
873 udp_spec->hdr.src_port;
874 list[t].m_u.l4_hdr.src_port =
875 udp_mask->hdr.src_port;
878 if (udp_mask->hdr.dst_port) {
879 list[t].h_u.l4_hdr.dst_port =
880 udp_spec->hdr.dst_port;
881 list[t].m_u.l4_hdr.dst_port =
882 udp_mask->hdr.dst_port;
889 case RTE_FLOW_ITEM_TYPE_TCP:
890 tcp_spec = item->spec;
891 tcp_mask = item->mask;
896 if (tcp_spec && tcp_mask) {
897 /* Check TCP mask and update input set */
898 if (tcp_mask->hdr.sent_seq ||
899 tcp_mask->hdr.recv_ack ||
900 tcp_mask->hdr.data_off ||
901 tcp_mask->hdr.tcp_flags ||
902 tcp_mask->hdr.rx_win ||
903 tcp_mask->hdr.cksum ||
904 tcp_mask->hdr.tcp_urp) {
905 rte_flow_error_set(error, EINVAL,
906 RTE_FLOW_ERROR_TYPE_ITEM,
913 if (tcp_mask->hdr.src_port)
915 ICE_INSET_TUN_TCP_SRC_PORT;
916 if (tcp_mask->hdr.dst_port)
918 ICE_INSET_TUN_TCP_DST_PORT;
920 if (tcp_mask->hdr.src_port)
922 ICE_INSET_TCP_SRC_PORT;
923 if (tcp_mask->hdr.dst_port)
925 ICE_INSET_TCP_DST_PORT;
927 list[t].type = ICE_TCP_IL;
928 if (tcp_mask->hdr.src_port) {
929 list[t].h_u.l4_hdr.src_port =
930 tcp_spec->hdr.src_port;
931 list[t].m_u.l4_hdr.src_port =
932 tcp_mask->hdr.src_port;
935 if (tcp_mask->hdr.dst_port) {
936 list[t].h_u.l4_hdr.dst_port =
937 tcp_spec->hdr.dst_port;
938 list[t].m_u.l4_hdr.dst_port =
939 tcp_mask->hdr.dst_port;
946 case RTE_FLOW_ITEM_TYPE_SCTP:
947 sctp_spec = item->spec;
948 sctp_mask = item->mask;
949 if (sctp_spec && sctp_mask) {
950 /* Check SCTP mask and update input set */
951 if (sctp_mask->hdr.cksum) {
952 rte_flow_error_set(error, EINVAL,
953 RTE_FLOW_ERROR_TYPE_ITEM,
955 "Invalid SCTP mask");
960 if (sctp_mask->hdr.src_port)
962 ICE_INSET_TUN_SCTP_SRC_PORT;
963 if (sctp_mask->hdr.dst_port)
965 ICE_INSET_TUN_SCTP_DST_PORT;
967 if (sctp_mask->hdr.src_port)
969 ICE_INSET_SCTP_SRC_PORT;
970 if (sctp_mask->hdr.dst_port)
972 ICE_INSET_SCTP_DST_PORT;
974 list[t].type = ICE_SCTP_IL;
975 if (sctp_mask->hdr.src_port) {
976 list[t].h_u.sctp_hdr.src_port =
977 sctp_spec->hdr.src_port;
978 list[t].m_u.sctp_hdr.src_port =
979 sctp_mask->hdr.src_port;
982 if (sctp_mask->hdr.dst_port) {
983 list[t].h_u.sctp_hdr.dst_port =
984 sctp_spec->hdr.dst_port;
985 list[t].m_u.sctp_hdr.dst_port =
986 sctp_mask->hdr.dst_port;
993 case RTE_FLOW_ITEM_TYPE_VXLAN:
994 vxlan_spec = item->spec;
995 vxlan_mask = item->mask;
996 /* Check if VXLAN item is used to describe protocol.
997 * If yes, both spec and mask should be NULL.
998 * If no, both spec and mask shouldn't be NULL.
1000 if ((!vxlan_spec && vxlan_mask) ||
1001 (vxlan_spec && !vxlan_mask)) {
1002 rte_flow_error_set(error, EINVAL,
1003 RTE_FLOW_ERROR_TYPE_ITEM,
1005 "Invalid VXLAN item");
1010 if (vxlan_spec && vxlan_mask) {
1011 list[t].type = ICE_VXLAN;
1012 if (vxlan_mask->vni[0] ||
1013 vxlan_mask->vni[1] ||
1014 vxlan_mask->vni[2]) {
1015 list[t].h_u.tnl_hdr.vni =
1016 (vxlan_spec->vni[2] << 16) |
1017 (vxlan_spec->vni[1] << 8) |
1019 list[t].m_u.tnl_hdr.vni =
1020 (vxlan_mask->vni[2] << 16) |
1021 (vxlan_mask->vni[1] << 8) |
1024 ICE_INSET_TUN_VXLAN_VNI;
1025 input_set_byte += 2;
1031 case RTE_FLOW_ITEM_TYPE_NVGRE:
1032 nvgre_spec = item->spec;
1033 nvgre_mask = item->mask;
1034 /* Check if NVGRE item is used to describe protocol.
1035 * If yes, both spec and mask should be NULL.
1036 * If no, both spec and mask shouldn't be NULL.
1038 if ((!nvgre_spec && nvgre_mask) ||
1039 (nvgre_spec && !nvgre_mask)) {
1040 rte_flow_error_set(error, EINVAL,
1041 RTE_FLOW_ERROR_TYPE_ITEM,
1043 "Invalid NVGRE item");
1048 if (nvgre_spec && nvgre_mask) {
1049 list[t].type = ICE_NVGRE;
1050 if (nvgre_mask->tni[0] ||
1051 nvgre_mask->tni[1] ||
1052 nvgre_mask->tni[2]) {
1053 list[t].h_u.nvgre_hdr.tni_flow =
1054 (nvgre_spec->tni[2] << 16) |
1055 (nvgre_spec->tni[1] << 8) |
1057 list[t].m_u.nvgre_hdr.tni_flow =
1058 (nvgre_mask->tni[2] << 16) |
1059 (nvgre_mask->tni[1] << 8) |
1062 ICE_INSET_TUN_NVGRE_TNI;
1063 input_set_byte += 2;
1069 case RTE_FLOW_ITEM_TYPE_VLAN:
1070 vlan_spec = item->spec;
1071 vlan_mask = item->mask;
1072 /* Check if VLAN item is used to describe protocol.
1073 * If yes, both spec and mask should be NULL.
1074 * If no, both spec and mask shouldn't be NULL.
1076 if ((!vlan_spec && vlan_mask) ||
1077 (vlan_spec && !vlan_mask)) {
1078 rte_flow_error_set(error, EINVAL,
1079 RTE_FLOW_ERROR_TYPE_ITEM,
1081 "Invalid VLAN item");
1086 if (!outer_vlan_valid)
1087 outer_vlan_valid = 1;
1089 inner_vlan_valid = 1;
1092 if (vlan_spec && vlan_mask) {
1094 if (!inner_vlan_valid) {
1095 list[t].type = ICE_VLAN_EX;
1097 ICE_INSET_VLAN_OUTER;
1099 list[t].type = ICE_VLAN_IN;
1101 ICE_INSET_VLAN_INNER;
1104 list[t].type = ICE_VLAN_OFOS;
1105 input_set |= ICE_INSET_VLAN_INNER;
1108 if (vlan_mask->tci) {
1109 list[t].h_u.vlan_hdr.vlan =
1111 list[t].m_u.vlan_hdr.vlan =
1113 input_set_byte += 2;
1115 if (vlan_mask->inner_type) {
1116 rte_flow_error_set(error, EINVAL,
1117 RTE_FLOW_ERROR_TYPE_ITEM,
1119 "Invalid VLAN input set.");
1126 case RTE_FLOW_ITEM_TYPE_PPPOED:
1127 case RTE_FLOW_ITEM_TYPE_PPPOES:
1128 pppoe_spec = item->spec;
1129 pppoe_mask = item->mask;
1130 /* Check if PPPoE item is used to describe protocol.
1131 * If yes, both spec and mask should be NULL.
1132 * If no, both spec and mask shouldn't be NULL.
1134 if ((!pppoe_spec && pppoe_mask) ||
1135 (pppoe_spec && !pppoe_mask)) {
1136 rte_flow_error_set(error, EINVAL,
1137 RTE_FLOW_ERROR_TYPE_ITEM,
1139 "Invalid pppoe item");
1142 pppoe_patt_valid = 1;
1143 if (pppoe_spec && pppoe_mask) {
1144 /* Check pppoe mask and update input set */
1145 if (pppoe_mask->length ||
1147 pppoe_mask->version_type) {
1148 rte_flow_error_set(error, EINVAL,
1149 RTE_FLOW_ERROR_TYPE_ITEM,
1151 "Invalid pppoe mask");
1154 list[t].type = ICE_PPPOE;
1155 if (pppoe_mask->session_id) {
1156 list[t].h_u.pppoe_hdr.session_id =
1157 pppoe_spec->session_id;
1158 list[t].m_u.pppoe_hdr.session_id =
1159 pppoe_mask->session_id;
1160 input_set |= ICE_INSET_PPPOE_SESSION;
1161 input_set_byte += 2;
1164 pppoe_elem_valid = 1;
1168 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
1169 pppoe_proto_spec = item->spec;
1170 pppoe_proto_mask = item->mask;
1171 /* Check if PPPoE optional proto_id item
1172 * is used to describe protocol.
1173 * If yes, both spec and mask should be NULL.
1174 * If no, both spec and mask shouldn't be NULL.
1176 if ((!pppoe_proto_spec && pppoe_proto_mask) ||
1177 (pppoe_proto_spec && !pppoe_proto_mask)) {
1178 rte_flow_error_set(error, EINVAL,
1179 RTE_FLOW_ERROR_TYPE_ITEM,
1181 "Invalid pppoe proto item");
1184 if (pppoe_proto_spec && pppoe_proto_mask) {
1185 if (pppoe_elem_valid)
1187 list[t].type = ICE_PPPOE;
1188 if (pppoe_proto_mask->proto_id) {
1189 list[t].h_u.pppoe_hdr.ppp_prot_id =
1190 pppoe_proto_spec->proto_id;
1191 list[t].m_u.pppoe_hdr.ppp_prot_id =
1192 pppoe_proto_mask->proto_id;
1193 input_set |= ICE_INSET_PPPOE_PROTO;
1194 input_set_byte += 2;
1195 pppoe_prot_valid = 1;
1197 if ((pppoe_proto_mask->proto_id &
1198 pppoe_proto_spec->proto_id) !=
1199 CPU_TO_BE16(ICE_PPP_IPV4_PROTO) &&
1200 (pppoe_proto_mask->proto_id &
1201 pppoe_proto_spec->proto_id) !=
1202 CPU_TO_BE16(ICE_PPP_IPV6_PROTO))
1203 *tun_type = ICE_SW_TUN_PPPOE_PAY;
1205 *tun_type = ICE_SW_TUN_PPPOE;
1211 case RTE_FLOW_ITEM_TYPE_ESP:
1212 esp_spec = item->spec;
1213 esp_mask = item->mask;
1214 if ((esp_spec && !esp_mask) ||
1215 (!esp_spec && esp_mask)) {
1216 rte_flow_error_set(error, EINVAL,
1217 RTE_FLOW_ERROR_TYPE_ITEM,
1219 "Invalid esp item");
1222 /* Check esp mask and update input set */
1223 if (esp_mask && esp_mask->hdr.seq) {
1224 rte_flow_error_set(error, EINVAL,
1225 RTE_FLOW_ERROR_TYPE_ITEM,
1227 "Invalid esp mask");
1231 if (!esp_spec && !esp_mask && !input_set) {
1233 if (ipv6_valid && udp_valid)
1235 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1236 else if (ipv6_valid)
1237 *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
1238 else if (ipv4_valid)
1240 } else if (esp_spec && esp_mask &&
1243 list[t].type = ICE_NAT_T;
1245 list[t].type = ICE_ESP;
1246 list[t].h_u.esp_hdr.spi =
1248 list[t].m_u.esp_hdr.spi =
1250 input_set |= ICE_INSET_ESP_SPI;
1251 input_set_byte += 4;
1255 if (!profile_rule) {
1256 if (ipv6_valid && udp_valid)
1257 *tun_type = ICE_SW_TUN_IPV6_NAT_T;
1258 else if (ipv4_valid && udp_valid)
1259 *tun_type = ICE_SW_TUN_IPV4_NAT_T;
1260 else if (ipv6_valid)
1261 *tun_type = ICE_SW_TUN_IPV6_ESP;
1262 else if (ipv4_valid)
1263 *tun_type = ICE_SW_TUN_IPV4_ESP;
1267 case RTE_FLOW_ITEM_TYPE_AH:
1268 ah_spec = item->spec;
1269 ah_mask = item->mask;
1270 if ((ah_spec && !ah_mask) ||
1271 (!ah_spec && ah_mask)) {
1272 rte_flow_error_set(error, EINVAL,
1273 RTE_FLOW_ERROR_TYPE_ITEM,
1278 /* Check ah mask and update input set */
1280 (ah_mask->next_hdr ||
1281 ah_mask->payload_len ||
1283 ah_mask->reserved)) {
1284 rte_flow_error_set(error, EINVAL,
1285 RTE_FLOW_ERROR_TYPE_ITEM,
1291 if (!ah_spec && !ah_mask && !input_set) {
1293 if (ipv6_valid && udp_valid)
1295 ICE_SW_TUN_PROFID_IPV6_NAT_T;
1296 else if (ipv6_valid)
1297 *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
1298 else if (ipv4_valid)
1300 } else if (ah_spec && ah_mask &&
1302 list[t].type = ICE_AH;
1303 list[t].h_u.ah_hdr.spi =
1305 list[t].m_u.ah_hdr.spi =
1307 input_set |= ICE_INSET_AH_SPI;
1308 input_set_byte += 4;
1312 if (!profile_rule) {
1315 else if (ipv6_valid)
1316 *tun_type = ICE_SW_TUN_IPV6_AH;
1317 else if (ipv4_valid)
1318 *tun_type = ICE_SW_TUN_IPV4_AH;
1322 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1323 l2tp_spec = item->spec;
1324 l2tp_mask = item->mask;
1325 if ((l2tp_spec && !l2tp_mask) ||
1326 (!l2tp_spec && l2tp_mask)) {
1327 rte_flow_error_set(error, EINVAL,
1328 RTE_FLOW_ERROR_TYPE_ITEM,
1330 "Invalid l2tp item");
1334 if (!l2tp_spec && !l2tp_mask && !input_set) {
1337 ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
1338 else if (ipv4_valid)
1340 } else if (l2tp_spec && l2tp_mask &&
1341 l2tp_mask->session_id){
1342 list[t].type = ICE_L2TPV3;
1343 list[t].h_u.l2tpv3_sess_hdr.session_id =
1344 l2tp_spec->session_id;
1345 list[t].m_u.l2tpv3_sess_hdr.session_id =
1346 l2tp_mask->session_id;
1347 input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
1348 input_set_byte += 4;
1352 if (!profile_rule) {
1355 ICE_SW_TUN_IPV6_L2TPV3;
1356 else if (ipv4_valid)
1358 ICE_SW_TUN_IPV4_L2TPV3;
1362 case RTE_FLOW_ITEM_TYPE_PFCP:
1363 pfcp_spec = item->spec;
1364 pfcp_mask = item->mask;
1365 /* Check if PFCP item is used to describe protocol.
1366 * If yes, both spec and mask should be NULL.
1367 * If no, both spec and mask shouldn't be NULL.
1369 if ((!pfcp_spec && pfcp_mask) ||
1370 (pfcp_spec && !pfcp_mask)) {
1371 rte_flow_error_set(error, EINVAL,
1372 RTE_FLOW_ERROR_TYPE_ITEM,
1374 "Invalid PFCP item");
1377 if (pfcp_spec && pfcp_mask) {
1378 /* Check pfcp mask and update input set */
1379 if (pfcp_mask->msg_type ||
1380 pfcp_mask->msg_len ||
1382 rte_flow_error_set(error, EINVAL,
1383 RTE_FLOW_ERROR_TYPE_ITEM,
1385 "Invalid pfcp mask");
1388 if (pfcp_mask->s_field &&
1389 pfcp_spec->s_field == 0x01 &&
1392 ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
1393 else if (pfcp_mask->s_field &&
1394 pfcp_spec->s_field == 0x01)
1396 ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
1397 else if (pfcp_mask->s_field &&
1398 !pfcp_spec->s_field &&
1401 ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
1402 else if (pfcp_mask->s_field &&
1403 !pfcp_spec->s_field)
1405 ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
1411 case RTE_FLOW_ITEM_TYPE_GTPU:
1412 gtp_spec = item->spec;
1413 gtp_mask = item->mask;
1414 if (gtp_spec && !gtp_mask) {
1415 rte_flow_error_set(error, EINVAL,
1416 RTE_FLOW_ERROR_TYPE_ITEM,
1418 "Invalid GTP item");
1421 if (gtp_spec && gtp_mask) {
1422 if (gtp_mask->v_pt_rsv_flags ||
1423 gtp_mask->msg_type ||
1424 gtp_mask->msg_len) {
1425 rte_flow_error_set(error, EINVAL,
1426 RTE_FLOW_ERROR_TYPE_ITEM,
1428 "Invalid GTP mask");
1432 input_set |= ICE_INSET_GTPU_TEID;
1433 list[t].type = ICE_GTP;
1434 list[t].h_u.gtp_hdr.teid =
1436 list[t].m_u.gtp_hdr.teid =
1438 input_set_byte += 4;
1445 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1446 gtp_psc_spec = item->spec;
1447 gtp_psc_mask = item->mask;
1448 if (gtp_psc_spec && !gtp_psc_mask) {
1449 rte_flow_error_set(error, EINVAL,
1450 RTE_FLOW_ERROR_TYPE_ITEM,
1452 "Invalid GTPU_EH item");
1455 if (gtp_psc_spec && gtp_psc_mask) {
1456 if (gtp_psc_mask->pdu_type) {
1457 rte_flow_error_set(error, EINVAL,
1458 RTE_FLOW_ERROR_TYPE_ITEM,
1460 "Invalid GTPU_EH mask");
1463 if (gtp_psc_mask->qfi)
1464 input_set |= ICE_INSET_GTPU_QFI;
1465 list[t].type = ICE_GTP;
1466 list[t].h_u.gtp_hdr.qfi =
1468 list[t].m_u.gtp_hdr.qfi =
1470 input_set_byte += 1;
1476 case RTE_FLOW_ITEM_TYPE_VOID:
1480 rte_flow_error_set(error, EINVAL,
1481 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1482 "Invalid pattern item.");
1487 if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
1488 inner_vlan_valid && outer_vlan_valid)
1489 *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
1490 else if (*tun_type == ICE_SW_TUN_PPPOE &&
1491 inner_vlan_valid && outer_vlan_valid)
1492 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1493 else if (*tun_type == ICE_NON_TUN &&
1494 inner_vlan_valid && outer_vlan_valid)
1495 *tun_type = ICE_NON_TUN_QINQ;
1496 else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
1497 inner_vlan_valid && outer_vlan_valid)
1498 *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1500 if (pppoe_patt_valid && !pppoe_prot_valid) {
1501 if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
1502 *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
1503 else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
1504 *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
1505 else if (inner_vlan_valid && outer_vlan_valid)
1506 *tun_type = ICE_SW_TUN_PPPOE_QINQ;
1507 else if (ipv6_valid && udp_valid)
1508 *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
1509 else if (ipv6_valid && tcp_valid)
1510 *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
1511 else if (ipv4_valid && udp_valid)
1512 *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP;
1513 else if (ipv4_valid && tcp_valid)
1514 *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP;
1515 else if (ipv6_valid)
1516 *tun_type = ICE_SW_TUN_PPPOE_IPV6;
1517 else if (ipv4_valid)
1518 *tun_type = ICE_SW_TUN_PPPOE_IPV4;
1520 *tun_type = ICE_SW_TUN_PPPOE;
1523 if (gtpu_valid && gtpu_psc_valid) {
1524 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1525 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP;
1526 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1527 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP;
1528 else if (ipv4_valid && inner_ipv4_valid)
1529 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4;
1530 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1531 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP;
1532 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1533 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP;
1534 else if (ipv4_valid && inner_ipv6_valid)
1535 *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6;
1536 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1537 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP;
1538 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1539 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP;
1540 else if (ipv6_valid && inner_ipv4_valid)
1541 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4;
1542 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1543 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP;
1544 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1545 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP;
1546 else if (ipv6_valid && inner_ipv6_valid)
1547 *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6;
1548 else if (ipv4_valid)
1549 *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1550 else if (ipv6_valid)
1551 *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1552 } else if (gtpu_valid) {
1553 if (ipv4_valid && inner_ipv4_valid && inner_udp_valid)
1554 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_UDP;
1555 else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid)
1556 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_TCP;
1557 else if (ipv4_valid && inner_ipv4_valid)
1558 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4;
1559 else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid)
1560 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_UDP;
1561 else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid)
1562 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_TCP;
1563 else if (ipv4_valid && inner_ipv6_valid)
1564 *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6;
1565 else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid)
1566 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_UDP;
1567 else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid)
1568 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_TCP;
1569 else if (ipv6_valid && inner_ipv4_valid)
1570 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4;
1571 else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid)
1572 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_UDP;
1573 else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid)
1574 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_TCP;
1575 else if (ipv6_valid && inner_ipv6_valid)
1576 *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6;
1577 else if (ipv4_valid)
1578 *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY;
1579 else if (ipv6_valid)
1580 *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY;
1583 if (*tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY ||
1584 *tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) {
1585 for (k = 0; k < t; k++) {
1586 if (list[k].type == ICE_GTP)
1587 list[k].type = ICE_GTP_NO_PAY;
1591 if (*tun_type == ICE_NON_TUN) {
1593 *tun_type = ICE_SW_TUN_VXLAN;
1594 else if (nvgre_valid)
1595 *tun_type = ICE_SW_TUN_NVGRE;
1596 else if (ipv4_valid && tcp_valid)
1597 *tun_type = ICE_SW_IPV4_TCP;
1598 else if (ipv4_valid && udp_valid)
1599 *tun_type = ICE_SW_IPV4_UDP;
1600 else if (ipv6_valid && tcp_valid)
1601 *tun_type = ICE_SW_IPV6_TCP;
1602 else if (ipv6_valid && udp_valid)
1603 *tun_type = ICE_SW_IPV6_UDP;
1606 if (input_set_byte > MAX_INPUT_SET_BYTE) {
1607 rte_flow_error_set(error, EINVAL,
1608 RTE_FLOW_ERROR_TYPE_ITEM,
1610 "too much input set");
1622 ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad,
1623 const struct rte_flow_action *actions,
1624 struct rte_flow_error *error,
1625 struct ice_adv_rule_info *rule_info)
1627 const struct rte_flow_action_vf *act_vf;
1628 const struct rte_flow_action *action;
1629 enum rte_flow_action_type action_type;
1631 for (action = actions; action->type !=
1632 RTE_FLOW_ACTION_TYPE_END; action++) {
1633 action_type = action->type;
1634 switch (action_type) {
1635 case RTE_FLOW_ACTION_TYPE_VF:
1636 rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
1637 act_vf = action->conf;
1639 if (act_vf->id >= ad->real_hw.num_vfs &&
1640 !act_vf->original) {
1641 rte_flow_error_set(error,
1642 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1648 if (act_vf->original)
1649 rule_info->sw_act.vsi_handle =
1650 ad->real_hw.avf.bus.func;
1652 rule_info->sw_act.vsi_handle = act_vf->id;
1655 case RTE_FLOW_ACTION_TYPE_DROP:
1656 rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
1660 rte_flow_error_set(error,
1661 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1663 "Invalid action type");
1668 rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
1669 rule_info->sw_act.flag = ICE_FLTR_RX;
1671 rule_info->priority = 5;
1677 ice_switch_parse_action(struct ice_pf *pf,
1678 const struct rte_flow_action *actions,
1679 struct rte_flow_error *error,
1680 struct ice_adv_rule_info *rule_info)
1682 struct ice_vsi *vsi = pf->main_vsi;
1683 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1684 const struct rte_flow_action_queue *act_q;
1685 const struct rte_flow_action_rss *act_qgrop;
1686 uint16_t base_queue, i;
1687 const struct rte_flow_action *action;
1688 enum rte_flow_action_type action_type;
1689 uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
1690 2, 4, 8, 16, 32, 64, 128};
1692 base_queue = pf->base_queue + vsi->base_queue;
1693 for (action = actions; action->type !=
1694 RTE_FLOW_ACTION_TYPE_END; action++) {
1695 action_type = action->type;
1696 switch (action_type) {
1697 case RTE_FLOW_ACTION_TYPE_RSS:
1698 act_qgrop = action->conf;
1699 if (act_qgrop->queue_num <= 1)
1701 rule_info->sw_act.fltr_act =
1703 rule_info->sw_act.fwd_id.q_id =
1704 base_queue + act_qgrop->queue[0];
1705 for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
1706 if (act_qgrop->queue_num ==
1707 valid_qgrop_number[i])
1710 if (i == MAX_QGRP_NUM_TYPE)
1712 if ((act_qgrop->queue[0] +
1713 act_qgrop->queue_num) >
1714 dev->data->nb_rx_queues)
1716 for (i = 0; i < act_qgrop->queue_num - 1; i++)
1717 if (act_qgrop->queue[i + 1] !=
1718 act_qgrop->queue[i] + 1)
1720 rule_info->sw_act.qgrp_size =
1721 act_qgrop->queue_num;
1723 case RTE_FLOW_ACTION_TYPE_QUEUE:
1724 act_q = action->conf;
1725 if (act_q->index >= dev->data->nb_rx_queues)
1727 rule_info->sw_act.fltr_act =
1729 rule_info->sw_act.fwd_id.q_id =
1730 base_queue + act_q->index;
1733 case RTE_FLOW_ACTION_TYPE_DROP:
1734 rule_info->sw_act.fltr_act =
1738 case RTE_FLOW_ACTION_TYPE_VOID:
1746 rule_info->sw_act.vsi_handle = vsi->idx;
1748 rule_info->sw_act.src = vsi->idx;
1749 rule_info->priority = 5;
1754 rte_flow_error_set(error,
1755 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1757 "Invalid action type or queue number");
1761 rte_flow_error_set(error,
1762 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1764 "Invalid queue region indexes");
1768 rte_flow_error_set(error,
1769 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1771 "Discontinuous queue region");
1776 ice_switch_check_action(const struct rte_flow_action *actions,
1777 struct rte_flow_error *error)
1779 const struct rte_flow_action *action;
1780 enum rte_flow_action_type action_type;
1781 uint16_t actions_num = 0;
1783 for (action = actions; action->type !=
1784 RTE_FLOW_ACTION_TYPE_END; action++) {
1785 action_type = action->type;
1786 switch (action_type) {
1787 case RTE_FLOW_ACTION_TYPE_VF:
1788 case RTE_FLOW_ACTION_TYPE_RSS:
1789 case RTE_FLOW_ACTION_TYPE_QUEUE:
1790 case RTE_FLOW_ACTION_TYPE_DROP:
1793 case RTE_FLOW_ACTION_TYPE_VOID:
1796 rte_flow_error_set(error,
1797 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1799 "Invalid action type");
1804 if (actions_num != 1) {
1805 rte_flow_error_set(error,
1806 EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1808 "Invalid action number");
1816 ice_switch_parse_pattern_action(struct ice_adapter *ad,
1817 struct ice_pattern_match_item *array,
1819 const struct rte_flow_item pattern[],
1820 const struct rte_flow_action actions[],
1822 struct rte_flow_error *error)
1824 struct ice_pf *pf = &ad->pf;
1825 uint64_t inputset = 0;
1827 struct sw_meta *sw_meta_ptr = NULL;
1828 struct ice_adv_rule_info rule_info;
1829 struct ice_adv_lkup_elem *list = NULL;
1830 uint16_t lkups_num = 0;
1831 const struct rte_flow_item *item = pattern;
1832 uint16_t item_num = 0;
1833 uint16_t vlan_num = 0;
1834 enum ice_sw_tunnel_type tun_type =
1836 struct ice_pattern_match_item *pattern_match_item = NULL;
1838 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1840 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1841 const struct rte_flow_item_eth *eth_mask;
1843 eth_mask = item->mask;
1846 if (eth_mask->type == UINT16_MAX)
1847 tun_type = ICE_SW_TUN_AND_NON_TUN;
1850 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
1853 /* reserve one more memory slot for ETH which may
1854 * consume 2 lookup items.
1856 if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
1860 if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
1861 tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
1862 else if (vlan_num == 2)
1863 tun_type = ICE_NON_TUN_QINQ;
1865 list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
1867 rte_flow_error_set(error, EINVAL,
1868 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1869 "No memory for PMD internal items");
1874 rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
1876 rte_flow_error_set(error, EINVAL,
1877 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1878 "No memory for sw_pattern_meta_ptr");
1882 pattern_match_item =
1883 ice_search_pattern_match_item(ad, pattern, array, array_len,
1885 if (!pattern_match_item) {
1886 rte_flow_error_set(error, EINVAL,
1887 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1888 "Invalid input pattern");
1892 inputset = ice_switch_inset_get
1893 (pattern, error, list, &lkups_num, &tun_type);
1894 if ((!inputset && !ice_is_prof_rule(tun_type)) ||
1895 (inputset & ~pattern_match_item->input_set_mask_o)) {
1896 rte_flow_error_set(error, EINVAL,
1897 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1899 "Invalid input set");
1903 memset(&rule_info, 0, sizeof(rule_info));
1904 rule_info.tun_type = tun_type;
1906 ret = ice_switch_check_action(actions, error);
1910 if (ad->hw.dcf_enabled)
1911 ret = ice_switch_parse_dcf_action((void *)ad, actions, error,
1914 ret = ice_switch_parse_action(pf, actions, error, &rule_info);
1920 *meta = sw_meta_ptr;
1921 ((struct sw_meta *)*meta)->list = list;
1922 ((struct sw_meta *)*meta)->lkups_num = lkups_num;
1923 ((struct sw_meta *)*meta)->rule_info = rule_info;
1926 rte_free(sw_meta_ptr);
1929 rte_free(pattern_match_item);
1935 rte_free(sw_meta_ptr);
1936 rte_free(pattern_match_item);
1942 ice_switch_query(struct ice_adapter *ad __rte_unused,
1943 struct rte_flow *flow __rte_unused,
1944 struct rte_flow_query_count *count __rte_unused,
1945 struct rte_flow_error *error)
1947 rte_flow_error_set(error, EINVAL,
1948 RTE_FLOW_ERROR_TYPE_HANDLE,
1950 "count action not supported by switch filter");
1956 ice_switch_redirect(struct ice_adapter *ad,
1957 struct rte_flow *flow,
1958 struct ice_flow_redirect *rd)
1960 struct ice_rule_query_data *rdata = flow->rule;
1961 struct ice_adv_fltr_mgmt_list_entry *list_itr;
1962 struct ice_adv_lkup_elem *lkups_dp = NULL;
1963 struct LIST_HEAD_TYPE *list_head;
1964 struct ice_adv_rule_info rinfo;
1965 struct ice_hw *hw = &ad->hw;
1966 struct ice_switch_info *sw;
1970 if (rdata->vsi_handle != rd->vsi_handle)
1973 sw = hw->switch_info;
1974 if (!sw->recp_list[rdata->rid].recp_created)
1977 if (rd->type != ICE_FLOW_REDIRECT_VSI)
1980 list_head = &sw->recp_list[rdata->rid].filt_rules;
1981 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
1983 rinfo = list_itr->rule_info;
1984 if ((rinfo.fltr_rule_id == rdata->rule_id &&
1985 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
1986 rinfo.sw_act.vsi_handle == rd->vsi_handle) ||
1987 (rinfo.fltr_rule_id == rdata->rule_id &&
1988 rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){
1989 lkups_cnt = list_itr->lkups_cnt;
1990 lkups_dp = (struct ice_adv_lkup_elem *)
1991 ice_memdup(hw, list_itr->lkups,
1992 sizeof(*list_itr->lkups) *
1993 lkups_cnt, ICE_NONDMA_TO_NONDMA);
1996 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
2000 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
2001 rinfo.sw_act.vsi_handle = rd->vsi_handle;
2002 rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI;
2011 /* Remove the old rule */
2012 ret = ice_rem_adv_rule(hw, list_itr->lkups,
2015 PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
2021 /* Update VSI context */
2022 hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
2024 /* Replay the rule */
2025 ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
2028 PMD_DRV_LOG(ERR, "Failed to replay the rule");
2033 ice_free(hw, lkups_dp);
2038 ice_switch_init(struct ice_adapter *ad)
2041 struct ice_flow_parser *dist_parser;
2042 struct ice_flow_parser *perm_parser;
2044 if (ad->devargs.pipe_mode_support) {
2045 perm_parser = &ice_switch_perm_parser;
2046 ret = ice_register_parser(perm_parser, ad);
2048 dist_parser = &ice_switch_dist_parser;
2049 ret = ice_register_parser(dist_parser, ad);
2055 ice_switch_uninit(struct ice_adapter *ad)
2057 struct ice_flow_parser *dist_parser;
2058 struct ice_flow_parser *perm_parser;
2060 if (ad->devargs.pipe_mode_support) {
2061 perm_parser = &ice_switch_perm_parser;
2062 ice_unregister_parser(perm_parser, ad);
2064 dist_parser = &ice_switch_dist_parser;
2065 ice_unregister_parser(dist_parser, ad);
2070 ice_flow_engine ice_switch_engine = {
2071 .init = ice_switch_init,
2072 .uninit = ice_switch_uninit,
2073 .create = ice_switch_create,
2074 .destroy = ice_switch_destroy,
2075 .query_count = ice_switch_query,
2076 .redirect = ice_switch_redirect,
2077 .free = ice_switch_filter_rule_free,
2078 .type = ICE_FLOW_ENGINE_SWITCH,
2082 ice_flow_parser ice_switch_dist_parser = {
2083 .engine = &ice_switch_engine,
2084 .array = ice_switch_pattern_dist_list,
2085 .array_len = RTE_DIM(ice_switch_pattern_dist_list),
2086 .parse_pattern_action = ice_switch_parse_pattern_action,
2087 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2091 ice_flow_parser ice_switch_perm_parser = {
2092 .engine = &ice_switch_engine,
2093 .array = ice_switch_pattern_perm_list,
2094 .array_len = RTE_DIM(ice_switch_pattern_perm_list),
2095 .parse_pattern_action = ice_switch_parse_pattern_action,
2096 .stage = ICE_FLOW_STAGE_PERMISSION,
2099 RTE_INIT(ice_sw_engine_init)
2101 struct ice_flow_engine *engine = &ice_switch_engine;
2102 ice_register_flow_engine(engine);