1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
19 #include "iavf_generic_flow.h"
21 #include "iavf_rxtx.h"
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
28 #define IAVF_GTPU_EH_DWLINK 0
29 #define IAVF_GTPU_EH_UPLINK 1
31 #define IAVF_FDIR_INSET_ETH (\
34 #define IAVF_FDIR_INSET_ETH_IPV4 (\
35 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
36 IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
37 IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_ID)
39 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
40 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
41 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
42 IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
44 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
45 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
46 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
47 IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
49 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
50 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
51 IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
52 IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
54 #define IAVF_FDIR_INSET_ETH_IPV6 (\
55 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
56 IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
57 IAVF_INSET_IPV6_HOP_LIMIT)
59 #define IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT (\
60 IAVF_FDIR_INSET_ETH_IPV6 | IAVF_INSET_IPV6_ID)
62 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
63 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
64 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
65 IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
67 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
68 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
69 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
70 IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
72 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
73 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
74 IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
75 IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
77 #define IAVF_FDIR_INSET_IPV4_GTPU (\
78 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
81 #define IAVF_FDIR_INSET_GTPU_IPV4 (\
82 IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
83 IAVF_INSET_TUN_IPV4_PROTO | IAVF_INSET_TUN_IPV4_TOS | \
84 IAVF_INSET_TUN_IPV4_TTL)
86 #define IAVF_FDIR_INSET_GTPU_IPV4_UDP (\
87 IAVF_FDIR_INSET_GTPU_IPV4 | \
88 IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
90 #define IAVF_FDIR_INSET_GTPU_IPV4_TCP (\
91 IAVF_FDIR_INSET_GTPU_IPV4 | \
92 IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
94 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
95 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
96 IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
98 #define IAVF_FDIR_INSET_IPV6_GTPU (\
99 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
100 IAVF_INSET_GTPU_TEID)
102 #define IAVF_FDIR_INSET_GTPU_IPV6 (\
103 IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
104 IAVF_INSET_TUN_IPV6_NEXT_HDR | IAVF_INSET_TUN_IPV6_TC | \
105 IAVF_INSET_TUN_IPV6_HOP_LIMIT)
107 #define IAVF_FDIR_INSET_GTPU_IPV6_UDP (\
108 IAVF_FDIR_INSET_GTPU_IPV6 | \
109 IAVF_INSET_TUN_UDP_SRC_PORT | IAVF_INSET_TUN_UDP_DST_PORT)
111 #define IAVF_FDIR_INSET_GTPU_IPV6_TCP (\
112 IAVF_FDIR_INSET_GTPU_IPV6 | \
113 IAVF_INSET_TUN_TCP_SRC_PORT | IAVF_INSET_TUN_TCP_DST_PORT)
115 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
116 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
117 IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
119 #define IAVF_FDIR_INSET_L2TPV3OIP (\
120 IAVF_L2TPV3OIP_SESSION_ID)
122 #define IAVF_FDIR_INSET_IPV4_ESP (\
123 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
126 #define IAVF_FDIR_INSET_IPV6_ESP (\
127 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
130 #define IAVF_FDIR_INSET_AH (\
133 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
134 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
137 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
138 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
141 #define IAVF_FDIR_INSET_PFCP (\
142 IAVF_INSET_PFCP_S_FIELD)
144 #define IAVF_FDIR_INSET_ECPRI (\
147 #define IAVF_FDIR_INSET_GRE_IPV4 (\
148 IAVF_INSET_TUN_IPV4_SRC | IAVF_INSET_TUN_IPV4_DST | \
149 IAVF_INSET_TUN_IPV4_TOS | IAVF_INSET_TUN_IPV4_PROTO)
151 #define IAVF_FDIR_INSET_GRE_IPV4_TCP (\
152 IAVF_FDIR_INSET_GRE_IPV4 | IAVF_INSET_TUN_TCP_SRC_PORT | \
153 IAVF_INSET_TUN_TCP_DST_PORT)
155 #define IAVF_FDIR_INSET_GRE_IPV4_UDP (\
156 IAVF_FDIR_INSET_GRE_IPV4 | IAVF_INSET_TUN_UDP_SRC_PORT | \
157 IAVF_INSET_TUN_UDP_DST_PORT)
159 #define IAVF_FDIR_INSET_GRE_IPV6 (\
160 IAVF_INSET_TUN_IPV6_SRC | IAVF_INSET_TUN_IPV6_DST | \
161 IAVF_INSET_TUN_IPV6_TC | IAVF_INSET_TUN_IPV6_NEXT_HDR)
163 #define IAVF_FDIR_INSET_GRE_IPV6_TCP (\
164 IAVF_FDIR_INSET_GRE_IPV6 | IAVF_INSET_TUN_TCP_SRC_PORT | \
165 IAVF_INSET_TUN_TCP_DST_PORT)
167 #define IAVF_FDIR_INSET_GRE_IPV6_UDP (\
168 IAVF_FDIR_INSET_GRE_IPV6 | IAVF_INSET_TUN_UDP_SRC_PORT | \
169 IAVF_INSET_TUN_UDP_DST_PORT)
171 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
172 {iavf_pattern_ethertype, IAVF_FDIR_INSET_ETH, IAVF_INSET_NONE},
173 {iavf_pattern_eth_ipv4, IAVF_FDIR_INSET_ETH_IPV4, IAVF_INSET_NONE},
174 {iavf_pattern_eth_ipv4_udp, IAVF_FDIR_INSET_ETH_IPV4_UDP, IAVF_INSET_NONE},
175 {iavf_pattern_eth_ipv4_tcp, IAVF_FDIR_INSET_ETH_IPV4_TCP, IAVF_INSET_NONE},
176 {iavf_pattern_eth_ipv4_sctp, IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
177 {iavf_pattern_eth_ipv6, IAVF_FDIR_INSET_ETH_IPV6, IAVF_INSET_NONE},
178 {iavf_pattern_eth_ipv6_frag_ext, IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT, IAVF_INSET_NONE},
179 {iavf_pattern_eth_ipv6_udp, IAVF_FDIR_INSET_ETH_IPV6_UDP, IAVF_INSET_NONE},
180 {iavf_pattern_eth_ipv6_tcp, IAVF_FDIR_INSET_ETH_IPV6_TCP, IAVF_INSET_NONE},
181 {iavf_pattern_eth_ipv6_sctp, IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
182 {iavf_pattern_eth_ipv4_gtpu, IAVF_FDIR_INSET_IPV4_GTPU, IAVF_INSET_NONE},
183 {iavf_pattern_eth_ipv4_gtpu_ipv4, IAVF_FDIR_INSET_GTPU_IPV4, IAVF_INSET_NONE},
184 {iavf_pattern_eth_ipv4_gtpu_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
185 {iavf_pattern_eth_ipv4_gtpu_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
186 {iavf_pattern_eth_ipv4_gtpu_ipv6, IAVF_FDIR_INSET_GTPU_IPV6, IAVF_INSET_NONE},
187 {iavf_pattern_eth_ipv4_gtpu_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
188 {iavf_pattern_eth_ipv4_gtpu_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
189 {iavf_pattern_eth_ipv4_gtpu_eh, IAVF_FDIR_INSET_IPV4_GTPU_EH, IAVF_INSET_NONE},
190 {iavf_pattern_eth_ipv4_gtpu_eh_ipv4, IAVF_FDIR_INSET_GTPU_IPV4, IAVF_INSET_NONE},
191 {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
192 {iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
193 {iavf_pattern_eth_ipv4_gtpu_eh_ipv6, IAVF_FDIR_INSET_GTPU_IPV6, IAVF_INSET_NONE},
194 {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
195 {iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
196 {iavf_pattern_eth_ipv4_gre_ipv4_gtpu, IAVF_FDIR_INSET_IPV4_GTPU, IAVF_INSET_NONE},
197 {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4, IAVF_FDIR_INSET_GTPU_IPV4, IAVF_INSET_NONE},
198 {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
199 {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
200 {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6, IAVF_FDIR_INSET_GTPU_IPV6, IAVF_INSET_NONE},
201 {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
202 {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
203 {iavf_pattern_eth_ipv4_gre_ipv6_gtpu, IAVF_FDIR_INSET_IPV4_GTPU, IAVF_INSET_NONE},
204 {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4, IAVF_FDIR_INSET_GTPU_IPV4, IAVF_INSET_NONE},
205 {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
206 {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
207 {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6, IAVF_FDIR_INSET_GTPU_IPV6, IAVF_INSET_NONE},
208 {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
209 {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
210 {iavf_pattern_eth_ipv6_gre_ipv4_gtpu, IAVF_FDIR_INSET_IPV6_GTPU, IAVF_INSET_NONE},
211 {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4, IAVF_FDIR_INSET_GTPU_IPV4, IAVF_INSET_NONE},
212 {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
213 {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
214 {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6, IAVF_FDIR_INSET_GTPU_IPV6, IAVF_INSET_NONE},
215 {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
216 {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
217 {iavf_pattern_eth_ipv6_gre_ipv6_gtpu, IAVF_FDIR_INSET_IPV6_GTPU, IAVF_INSET_NONE},
218 {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4, IAVF_FDIR_INSET_GTPU_IPV4, IAVF_INSET_NONE},
219 {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
220 {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
221 {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6, IAVF_FDIR_INSET_GTPU_IPV6, IAVF_INSET_NONE},
222 {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
223 {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
224 {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh, IAVF_FDIR_INSET_IPV4_GTPU_EH, IAVF_INSET_NONE},
225 {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4, IAVF_FDIR_INSET_GTPU_IPV4, IAVF_INSET_NONE},
226 {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
227 {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
228 {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6, IAVF_FDIR_INSET_GTPU_IPV6, IAVF_INSET_NONE},
229 {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
230 {iavf_pattern_eth_ipv4_gre_ipv4_gtpu_eh_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
231 {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh, IAVF_FDIR_INSET_IPV4_GTPU_EH, IAVF_INSET_NONE},
232 {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4, IAVF_FDIR_INSET_GTPU_IPV4, IAVF_INSET_NONE},
233 {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
234 {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
235 {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6, IAVF_FDIR_INSET_GTPU_IPV6, IAVF_INSET_NONE},
236 {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
237 {iavf_pattern_eth_ipv4_gre_ipv6_gtpu_eh_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
238 {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh, IAVF_FDIR_INSET_IPV6_GTPU_EH, IAVF_INSET_NONE},
239 {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4, IAVF_FDIR_INSET_GTPU_IPV4, IAVF_INSET_NONE},
240 {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
241 {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
242 {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6, IAVF_FDIR_INSET_GTPU_IPV6, IAVF_INSET_NONE},
243 {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
244 {iavf_pattern_eth_ipv6_gre_ipv4_gtpu_eh_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
245 {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh, IAVF_FDIR_INSET_IPV6_GTPU_EH, IAVF_INSET_NONE},
246 {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4, IAVF_FDIR_INSET_GTPU_IPV4, IAVF_INSET_NONE},
247 {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_udp, IAVF_FDIR_INSET_GTPU_IPV4_UDP, IAVF_INSET_NONE},
248 {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv4_tcp, IAVF_FDIR_INSET_GTPU_IPV4_TCP, IAVF_INSET_NONE},
249 {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6, IAVF_FDIR_INSET_GTPU_IPV6, IAVF_INSET_NONE},
250 {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_udp, IAVF_FDIR_INSET_GTPU_IPV6_UDP, IAVF_INSET_NONE},
251 {iavf_pattern_eth_ipv6_gre_ipv6_gtpu_eh_ipv6_tcp, IAVF_FDIR_INSET_GTPU_IPV6_TCP, IAVF_INSET_NONE},
252 {iavf_pattern_eth_ipv6_gtpu, IAVF_FDIR_INSET_IPV6_GTPU, IAVF_INSET_NONE},
253 {iavf_pattern_eth_ipv6_gtpu_eh, IAVF_FDIR_INSET_IPV6_GTPU_EH, IAVF_INSET_NONE},
254 {iavf_pattern_eth_ipv4_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE},
255 {iavf_pattern_eth_ipv6_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE},
256 {iavf_pattern_eth_ipv4_esp, IAVF_FDIR_INSET_IPV4_ESP, IAVF_INSET_NONE},
257 {iavf_pattern_eth_ipv6_esp, IAVF_FDIR_INSET_IPV6_ESP, IAVF_INSET_NONE},
258 {iavf_pattern_eth_ipv4_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE},
259 {iavf_pattern_eth_ipv6_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE},
260 {iavf_pattern_eth_ipv4_udp_esp, IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
261 {iavf_pattern_eth_ipv6_udp_esp, IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
262 {iavf_pattern_eth_ipv4_pfcp, IAVF_FDIR_INSET_PFCP, IAVF_INSET_NONE},
263 {iavf_pattern_eth_ipv6_pfcp, IAVF_FDIR_INSET_PFCP, IAVF_INSET_NONE},
264 {iavf_pattern_eth_ecpri, IAVF_FDIR_INSET_ECPRI, IAVF_INSET_NONE},
265 {iavf_pattern_eth_ipv4_ecpri, IAVF_FDIR_INSET_ECPRI, IAVF_INSET_NONE},
266 {iavf_pattern_eth_ipv4_gre_ipv4, IAVF_FDIR_INSET_GRE_IPV4, IAVF_INSET_NONE},
267 {iavf_pattern_eth_ipv4_gre_ipv4_tcp, IAVF_FDIR_INSET_GRE_IPV4_TCP, IAVF_INSET_NONE},
268 {iavf_pattern_eth_ipv4_gre_ipv4_udp, IAVF_FDIR_INSET_GRE_IPV4_UDP, IAVF_INSET_NONE},
269 {iavf_pattern_eth_ipv4_gre_ipv6, IAVF_FDIR_INSET_GRE_IPV6, IAVF_INSET_NONE},
270 {iavf_pattern_eth_ipv4_gre_ipv6_tcp, IAVF_FDIR_INSET_GRE_IPV6_TCP, IAVF_INSET_NONE},
271 {iavf_pattern_eth_ipv4_gre_ipv6_udp, IAVF_FDIR_INSET_GRE_IPV6_UDP, IAVF_INSET_NONE},
272 {iavf_pattern_eth_ipv6_gre_ipv4, IAVF_FDIR_INSET_GRE_IPV4, IAVF_INSET_NONE},
273 {iavf_pattern_eth_ipv6_gre_ipv4_tcp, IAVF_FDIR_INSET_GRE_IPV4_TCP, IAVF_INSET_NONE},
274 {iavf_pattern_eth_ipv6_gre_ipv4_udp, IAVF_FDIR_INSET_GRE_IPV4_UDP, IAVF_INSET_NONE},
275 {iavf_pattern_eth_ipv6_gre_ipv6, IAVF_FDIR_INSET_GRE_IPV6, IAVF_INSET_NONE},
276 {iavf_pattern_eth_ipv6_gre_ipv6_tcp, IAVF_FDIR_INSET_GRE_IPV6_TCP, IAVF_INSET_NONE},
277 {iavf_pattern_eth_ipv6_gre_ipv6_udp, IAVF_FDIR_INSET_GRE_IPV6_UDP, IAVF_INSET_NONE},
280 static struct iavf_flow_parser iavf_fdir_parser;
283 iavf_fdir_init(struct iavf_adapter *ad)
285 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
286 struct iavf_flow_parser *parser;
291 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
292 parser = &iavf_fdir_parser;
296 return iavf_register_parser(parser, ad);
300 iavf_fdir_uninit(struct iavf_adapter *ad)
302 iavf_unregister_parser(&iavf_fdir_parser, ad);
306 iavf_fdir_create(struct iavf_adapter *ad,
307 struct rte_flow *flow,
309 struct rte_flow_error *error)
311 struct iavf_fdir_conf *filter = meta;
312 struct iavf_fdir_conf *rule;
315 rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
317 rte_flow_error_set(error, ENOMEM,
318 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
319 "Failed to allocate memory for fdir rule");
323 ret = iavf_fdir_add(ad, filter);
325 rte_flow_error_set(error, -ret,
326 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
327 "Failed to add filter rule.");
331 if (filter->mark_flag == 1)
332 iavf_fdir_rx_proc_enable(ad, 1);
334 rte_memcpy(rule, filter, sizeof(*rule));
345 iavf_fdir_destroy(struct iavf_adapter *ad,
346 struct rte_flow *flow,
347 struct rte_flow_error *error)
349 struct iavf_fdir_conf *filter;
352 filter = (struct iavf_fdir_conf *)flow->rule;
354 ret = iavf_fdir_del(ad, filter);
356 rte_flow_error_set(error, -ret,
357 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
358 "Failed to delete filter rule.");
362 if (filter->mark_flag == 1)
363 iavf_fdir_rx_proc_enable(ad, 0);
372 iavf_fdir_validation(struct iavf_adapter *ad,
373 __rte_unused struct rte_flow *flow,
375 struct rte_flow_error *error)
377 struct iavf_fdir_conf *filter = meta;
380 ret = iavf_fdir_check(ad, filter);
382 rte_flow_error_set(error, -ret,
383 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
384 "Failed to validate filter rule.");
391 static struct iavf_flow_engine iavf_fdir_engine = {
392 .init = iavf_fdir_init,
393 .uninit = iavf_fdir_uninit,
394 .create = iavf_fdir_create,
395 .destroy = iavf_fdir_destroy,
396 .validation = iavf_fdir_validation,
397 .type = IAVF_FLOW_ENGINE_FDIR,
401 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
402 struct rte_flow_error *error,
403 const struct rte_flow_action *act,
404 struct virtchnl_filter_action *filter_action)
406 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
407 const struct rte_flow_action_rss *rss = act->conf;
410 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
411 rte_flow_error_set(error, EINVAL,
412 RTE_FLOW_ERROR_TYPE_ACTION, act,
417 if (rss->queue_num <= 1) {
418 rte_flow_error_set(error, EINVAL,
419 RTE_FLOW_ERROR_TYPE_ACTION, act,
420 "Queue region size can't be 0 or 1.");
424 /* check if queue index for queue region is continuous */
425 for (i = 0; i < rss->queue_num - 1; i++) {
426 if (rss->queue[i + 1] != rss->queue[i] + 1) {
427 rte_flow_error_set(error, EINVAL,
428 RTE_FLOW_ERROR_TYPE_ACTION, act,
429 "Discontinuous queue region");
434 if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
435 rte_flow_error_set(error, EINVAL,
436 RTE_FLOW_ERROR_TYPE_ACTION, act,
437 "Invalid queue region indexes.");
441 if (!(rte_is_power_of_2(rss->queue_num) &&
442 rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
443 rte_flow_error_set(error, EINVAL,
444 RTE_FLOW_ERROR_TYPE_ACTION, act,
445 "The region size should be any of the following values:"
446 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
447 "of queues do not exceed the VSI allocation.");
451 if (rss->queue_num > vf->max_rss_qregion) {
452 rte_flow_error_set(error, EINVAL,
453 RTE_FLOW_ERROR_TYPE_ACTION, act,
454 "The region size cannot be large than the supported max RSS queue region");
458 filter_action->act_conf.queue.index = rss->queue[0];
459 filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
465 iavf_fdir_parse_action(struct iavf_adapter *ad,
466 const struct rte_flow_action actions[],
467 struct rte_flow_error *error,
468 struct iavf_fdir_conf *filter)
470 const struct rte_flow_action_queue *act_q;
471 const struct rte_flow_action_mark *mark_spec = NULL;
472 uint32_t dest_num = 0;
473 uint32_t mark_num = 0;
477 struct virtchnl_filter_action *filter_action;
479 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
480 switch (actions->type) {
481 case RTE_FLOW_ACTION_TYPE_VOID:
484 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
487 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
489 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
491 filter->add_fltr.rule_cfg.action_set.count = ++number;
494 case RTE_FLOW_ACTION_TYPE_DROP:
497 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
499 filter_action->type = VIRTCHNL_ACTION_DROP;
501 filter->add_fltr.rule_cfg.action_set.count = ++number;
504 case RTE_FLOW_ACTION_TYPE_QUEUE:
507 act_q = actions->conf;
508 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
510 filter_action->type = VIRTCHNL_ACTION_QUEUE;
511 filter_action->act_conf.queue.index = act_q->index;
513 if (filter_action->act_conf.queue.index >=
514 ad->eth_dev->data->nb_rx_queues) {
515 rte_flow_error_set(error, EINVAL,
516 RTE_FLOW_ERROR_TYPE_ACTION,
517 actions, "Invalid queue for FDIR.");
521 filter->add_fltr.rule_cfg.action_set.count = ++number;
524 case RTE_FLOW_ACTION_TYPE_RSS:
527 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
529 filter_action->type = VIRTCHNL_ACTION_Q_REGION;
531 ret = iavf_fdir_parse_action_qregion(ad,
532 error, actions, filter_action);
536 filter->add_fltr.rule_cfg.action_set.count = ++number;
539 case RTE_FLOW_ACTION_TYPE_MARK:
542 filter->mark_flag = 1;
543 mark_spec = actions->conf;
544 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
546 filter_action->type = VIRTCHNL_ACTION_MARK;
547 filter_action->act_conf.mark_id = mark_spec->id;
549 filter->add_fltr.rule_cfg.action_set.count = ++number;
553 rte_flow_error_set(error, EINVAL,
554 RTE_FLOW_ERROR_TYPE_ACTION, actions,
560 if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
561 rte_flow_error_set(error, EINVAL,
562 RTE_FLOW_ERROR_TYPE_ACTION, actions,
563 "Action numbers exceed the maximum value");
568 rte_flow_error_set(error, EINVAL,
569 RTE_FLOW_ERROR_TYPE_ACTION, actions,
570 "Unsupported action combination");
575 rte_flow_error_set(error, EINVAL,
576 RTE_FLOW_ERROR_TYPE_ACTION, actions,
577 "Too many mark actions");
581 if (dest_num + mark_num == 0) {
582 rte_flow_error_set(error, EINVAL,
583 RTE_FLOW_ERROR_TYPE_ACTION, actions,
588 /* Mark only is equal to mark + passthru. */
590 filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
591 filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
592 filter->add_fltr.rule_cfg.action_set.count = ++number;
599 iavf_fdir_refine_input_set(const uint64_t input_set,
600 const uint64_t input_set_mask,
601 struct iavf_fdir_conf *filter)
603 struct virtchnl_proto_hdr *hdr, *hdr_last;
604 struct rte_flow_item_ipv4 ipv4_spec;
605 struct rte_flow_item_ipv6 ipv6_spec;
609 if (input_set & ~input_set_mask)
614 last_layer = filter->add_fltr.rule_cfg.proto_hdrs.count - 1;
615 /* Last layer of TCP/UDP pattern isn't less than 2. */
618 hdr_last = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer];
619 if (hdr_last->type == VIRTCHNL_PROTO_HDR_TCP)
621 else if (hdr_last->type == VIRTCHNL_PROTO_HDR_UDP)
626 hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[last_layer - 1];
628 case VIRTCHNL_PROTO_HDR_IPV4:
629 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
630 memset(&ipv4_spec, 0, sizeof(ipv4_spec));
631 ipv4_spec.hdr.next_proto_id = proto_id;
632 rte_memcpy(hdr->buffer, &ipv4_spec.hdr,
633 sizeof(ipv4_spec.hdr));
635 case VIRTCHNL_PROTO_HDR_IPV6:
636 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
637 memset(&ipv6_spec, 0, sizeof(ipv6_spec));
638 ipv6_spec.hdr.proto = proto_id;
639 rte_memcpy(hdr->buffer, &ipv6_spec.hdr,
640 sizeof(ipv6_spec.hdr));
648 iavf_fdir_add_fragment_hdr(struct virtchnl_proto_hdrs *hdrs, int layer)
650 struct virtchnl_proto_hdr *hdr1;
651 struct virtchnl_proto_hdr *hdr2;
654 if (layer < 0 || layer > hdrs->count)
657 /* shift headers layer */
658 for (i = hdrs->count; i >= layer; i--) {
659 hdr1 = &hdrs->proto_hdr[i];
660 hdr2 = &hdrs->proto_hdr[i - 1];
664 /* adding dummy fragment header */
665 hdr1 = &hdrs->proto_hdr[layer];
666 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, IPV4_FRAG);
667 hdr1->field_selector = 0;
668 hdrs->count = ++layer;
672 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
673 const struct rte_flow_item pattern[],
674 const uint64_t input_set_mask,
675 struct rte_flow_error *error,
676 struct iavf_fdir_conf *filter)
678 struct virtchnl_proto_hdrs *hdrs =
679 &filter->add_fltr.rule_cfg.proto_hdrs;
680 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
681 const struct rte_flow_item_eth *eth_spec, *eth_mask;
682 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
683 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
684 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec;
685 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_mask;
686 const struct rte_flow_item_udp *udp_spec, *udp_mask;
687 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
688 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
689 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
690 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
691 const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
692 const struct rte_flow_item_esp *esp_spec, *esp_mask;
693 const struct rte_flow_item_ah *ah_spec, *ah_mask;
694 const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
695 const struct rte_flow_item_ecpri *ecpri_spec, *ecpri_mask;
696 const struct rte_flow_item_gre *gre_spec, *gre_mask;
697 const struct rte_flow_item *item = pattern;
698 struct virtchnl_proto_hdr *hdr, *hdr1 = NULL;
699 struct rte_ecpri_common_hdr ecpri_common;
700 uint64_t input_set = IAVF_INSET_NONE;
701 enum rte_flow_item_type item_type;
702 enum rte_flow_item_type next_type;
703 uint8_t tun_inner = 0;
707 uint8_t ipv6_addr_mask[16] = {
708 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
709 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
712 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
713 item_type = item->type;
715 if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
717 RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
718 rte_flow_error_set(error, EINVAL,
719 RTE_FLOW_ERROR_TYPE_ITEM, item,
720 "Not support range");
724 case RTE_FLOW_ITEM_TYPE_ETH:
725 eth_spec = item->spec;
726 eth_mask = item->mask;
727 next_type = (item + 1)->type;
729 hdr1 = &hdrs->proto_hdr[layer];
731 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
733 if (next_type == RTE_FLOW_ITEM_TYPE_END &&
734 (!eth_spec || !eth_mask)) {
735 rte_flow_error_set(error, EINVAL,
736 RTE_FLOW_ERROR_TYPE_ITEM,
737 item, "NULL eth spec/mask.");
741 if (eth_spec && eth_mask) {
742 if (!rte_is_zero_ether_addr(ð_mask->src) ||
743 !rte_is_zero_ether_addr(ð_mask->dst)) {
744 rte_flow_error_set(error, EINVAL,
745 RTE_FLOW_ERROR_TYPE_ITEM, item,
746 "Invalid MAC_addr mask.");
751 if (eth_spec && eth_mask && eth_mask->type) {
752 if (eth_mask->type != RTE_BE16(0xffff)) {
753 rte_flow_error_set(error, EINVAL,
754 RTE_FLOW_ERROR_TYPE_ITEM,
755 item, "Invalid type mask.");
759 ether_type = rte_be_to_cpu_16(eth_spec->type);
760 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
761 ether_type == RTE_ETHER_TYPE_IPV6) {
762 rte_flow_error_set(error, EINVAL,
763 RTE_FLOW_ERROR_TYPE_ITEM,
765 "Unsupported ether_type.");
769 input_set |= IAVF_INSET_ETHERTYPE;
770 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
773 rte_memcpy(hdr1->buffer, eth_spec,
774 sizeof(struct rte_ether_hdr));
777 hdrs->count = ++layer;
780 case RTE_FLOW_ITEM_TYPE_IPV4:
781 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
782 ipv4_spec = item->spec;
783 ipv4_last = item->last;
784 ipv4_mask = item->mask;
785 next_type = (item + 1)->type;
787 hdr = &hdrs->proto_hdr[layer];
789 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
791 if (!(ipv4_spec && ipv4_mask)) {
792 hdrs->count = ++layer;
796 if (ipv4_mask->hdr.version_ihl ||
797 ipv4_mask->hdr.total_length ||
798 ipv4_mask->hdr.hdr_checksum) {
799 rte_flow_error_set(error, EINVAL,
800 RTE_FLOW_ERROR_TYPE_ITEM,
801 item, "Invalid IPv4 mask.");
806 (ipv4_last->hdr.version_ihl ||
807 ipv4_last->hdr.type_of_service ||
808 ipv4_last->hdr.time_to_live ||
809 ipv4_last->hdr.total_length |
810 ipv4_last->hdr.next_proto_id ||
811 ipv4_last->hdr.hdr_checksum ||
812 ipv4_last->hdr.src_addr ||
813 ipv4_last->hdr.dst_addr)) {
814 rte_flow_error_set(error, EINVAL,
815 RTE_FLOW_ERROR_TYPE_ITEM,
816 item, "Invalid IPv4 last.");
820 if (ipv4_mask->hdr.type_of_service ==
822 input_set |= IAVF_INSET_IPV4_TOS;
823 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
827 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
828 input_set |= IAVF_INSET_IPV4_PROTO;
829 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
833 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
834 input_set |= IAVF_INSET_IPV4_TTL;
835 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
839 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
840 input_set |= IAVF_INSET_IPV4_SRC;
841 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
845 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
846 input_set |= IAVF_INSET_IPV4_DST;
847 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
852 input_set &= ~IAVF_PROT_IPV4_OUTER;
853 input_set |= IAVF_PROT_IPV4_INNER;
856 rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
857 sizeof(ipv4_spec->hdr));
859 hdrs->count = ++layer;
862 * spec is 0x2000, mask is 0x2000
864 if (ipv4_spec->hdr.fragment_offset ==
865 rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
866 ipv4_mask->hdr.fragment_offset ==
867 rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) {
868 /* all IPv4 fragment packet has the same
869 * ethertype, if the spec and mask is valid,
870 * set ethertype into input set.
872 input_set |= IAVF_INSET_ETHERTYPE;
873 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
876 /* add dummy header for IPv4 Fragment */
877 iavf_fdir_add_fragment_hdr(hdrs, layer);
878 } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
879 rte_flow_error_set(error, EINVAL,
880 RTE_FLOW_ERROR_TYPE_ITEM,
881 item, "Invalid IPv4 mask.");
887 case RTE_FLOW_ITEM_TYPE_IPV6:
888 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
889 ipv6_spec = item->spec;
890 ipv6_mask = item->mask;
892 hdr = &hdrs->proto_hdr[layer];
894 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
896 if (!(ipv6_spec && ipv6_mask)) {
897 hdrs->count = ++layer;
901 if (ipv6_mask->hdr.payload_len) {
902 rte_flow_error_set(error, EINVAL,
903 RTE_FLOW_ERROR_TYPE_ITEM,
904 item, "Invalid IPv6 mask");
908 if ((ipv6_mask->hdr.vtc_flow &
909 rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
910 == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
911 input_set |= IAVF_INSET_IPV6_TC;
912 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
916 if (ipv6_mask->hdr.proto == UINT8_MAX) {
917 input_set |= IAVF_INSET_IPV6_NEXT_HDR;
918 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
922 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
923 input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
924 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
928 if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
929 RTE_DIM(ipv6_mask->hdr.src_addr))) {
930 input_set |= IAVF_INSET_IPV6_SRC;
931 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
934 if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
935 RTE_DIM(ipv6_mask->hdr.dst_addr))) {
936 input_set |= IAVF_INSET_IPV6_DST;
937 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
942 input_set &= ~IAVF_PROT_IPV6_OUTER;
943 input_set |= IAVF_PROT_IPV6_INNER;
946 rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
947 sizeof(ipv6_spec->hdr));
949 hdrs->count = ++layer;
952 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
953 ipv6_frag_spec = item->spec;
954 ipv6_frag_mask = item->mask;
955 next_type = (item + 1)->type;
957 hdr = &hdrs->proto_hdr[layer];
959 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6_EH_FRAG);
961 if (!(ipv6_frag_spec && ipv6_frag_mask)) {
962 hdrs->count = ++layer;
967 * spec is 0x1, mask is 0x1
969 if (ipv6_frag_spec->hdr.frag_data ==
970 rte_cpu_to_be_16(1) &&
971 ipv6_frag_mask->hdr.frag_data ==
972 rte_cpu_to_be_16(1)) {
973 /* all IPv6 fragment packet has the same
974 * ethertype, if the spec and mask is valid,
975 * set ethertype into input set.
977 input_set |= IAVF_INSET_ETHERTYPE;
978 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
981 rte_memcpy(hdr->buffer, &ipv6_frag_spec->hdr,
982 sizeof(ipv6_frag_spec->hdr));
983 } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
984 rte_flow_error_set(error, EINVAL,
985 RTE_FLOW_ERROR_TYPE_ITEM,
986 item, "Invalid IPv6 mask.");
990 hdrs->count = ++layer;
993 case RTE_FLOW_ITEM_TYPE_UDP:
994 udp_spec = item->spec;
995 udp_mask = item->mask;
997 hdr = &hdrs->proto_hdr[layer];
999 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
1001 if (udp_spec && udp_mask) {
1002 if (udp_mask->hdr.dgram_len ||
1003 udp_mask->hdr.dgram_cksum) {
1004 rte_flow_error_set(error, EINVAL,
1005 RTE_FLOW_ERROR_TYPE_ITEM, item,
1006 "Invalid UDP mask");
1010 if (udp_mask->hdr.src_port == UINT16_MAX) {
1011 input_set |= IAVF_INSET_UDP_SRC_PORT;
1012 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
1014 if (udp_mask->hdr.dst_port == UINT16_MAX) {
1015 input_set |= IAVF_INSET_UDP_DST_PORT;
1016 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
1020 input_set &= ~IAVF_PROT_UDP_OUTER;
1021 input_set |= IAVF_PROT_UDP_INNER;
1024 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1025 rte_memcpy(hdr->buffer,
1027 sizeof(udp_spec->hdr));
1028 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1029 rte_memcpy(hdr->buffer,
1031 sizeof(udp_spec->hdr));
1034 hdrs->count = ++layer;
1037 case RTE_FLOW_ITEM_TYPE_TCP:
1038 tcp_spec = item->spec;
1039 tcp_mask = item->mask;
1041 hdr = &hdrs->proto_hdr[layer];
1043 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
1045 if (tcp_spec && tcp_mask) {
1046 if (tcp_mask->hdr.sent_seq ||
1047 tcp_mask->hdr.recv_ack ||
1048 tcp_mask->hdr.data_off ||
1049 tcp_mask->hdr.tcp_flags ||
1050 tcp_mask->hdr.rx_win ||
1051 tcp_mask->hdr.cksum ||
1052 tcp_mask->hdr.tcp_urp) {
1053 rte_flow_error_set(error, EINVAL,
1054 RTE_FLOW_ERROR_TYPE_ITEM, item,
1055 "Invalid TCP mask");
1059 if (tcp_mask->hdr.src_port == UINT16_MAX) {
1060 input_set |= IAVF_INSET_TCP_SRC_PORT;
1061 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
1063 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
1064 input_set |= IAVF_INSET_TCP_DST_PORT;
1065 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
1069 input_set &= ~IAVF_PROT_TCP_OUTER;
1070 input_set |= IAVF_PROT_TCP_INNER;
1073 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1074 rte_memcpy(hdr->buffer,
1076 sizeof(tcp_spec->hdr));
1077 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1078 rte_memcpy(hdr->buffer,
1080 sizeof(tcp_spec->hdr));
1083 hdrs->count = ++layer;
1086 case RTE_FLOW_ITEM_TYPE_SCTP:
1087 sctp_spec = item->spec;
1088 sctp_mask = item->mask;
1090 hdr = &hdrs->proto_hdr[layer];
1092 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
1094 if (sctp_spec && sctp_mask) {
1095 if (sctp_mask->hdr.cksum) {
1096 rte_flow_error_set(error, EINVAL,
1097 RTE_FLOW_ERROR_TYPE_ITEM, item,
1098 "Invalid UDP mask");
1102 if (sctp_mask->hdr.src_port == UINT16_MAX) {
1103 input_set |= IAVF_INSET_SCTP_SRC_PORT;
1104 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
1106 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
1107 input_set |= IAVF_INSET_SCTP_DST_PORT;
1108 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
1111 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1112 rte_memcpy(hdr->buffer,
1114 sizeof(sctp_spec->hdr));
1115 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1116 rte_memcpy(hdr->buffer,
1118 sizeof(sctp_spec->hdr));
1121 hdrs->count = ++layer;
1124 case RTE_FLOW_ITEM_TYPE_GTPU:
1125 gtp_spec = item->spec;
1126 gtp_mask = item->mask;
1128 hdr = &hdrs->proto_hdr[layer];
1130 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
1132 if (gtp_spec && gtp_mask) {
1133 if (gtp_mask->v_pt_rsv_flags ||
1134 gtp_mask->msg_type ||
1135 gtp_mask->msg_len) {
1136 rte_flow_error_set(error, EINVAL,
1137 RTE_FLOW_ERROR_TYPE_ITEM,
1138 item, "Invalid GTP mask");
1142 if (gtp_mask->teid == UINT32_MAX) {
1143 input_set |= IAVF_INSET_GTPU_TEID;
1144 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
1147 rte_memcpy(hdr->buffer,
1148 gtp_spec, sizeof(*gtp_spec));
1153 hdrs->count = ++layer;
1156 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1157 gtp_psc_spec = item->spec;
1158 gtp_psc_mask = item->mask;
1160 hdr = &hdrs->proto_hdr[layer];
1163 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1164 else if ((gtp_psc_mask->hdr.qfi) &&
1165 !(gtp_psc_mask->hdr.type))
1166 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
1167 else if (gtp_psc_spec->hdr.type == IAVF_GTPU_EH_UPLINK)
1168 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP);
1169 else if (gtp_psc_spec->hdr.type == IAVF_GTPU_EH_DWLINK)
1170 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN);
1172 if (gtp_psc_spec && gtp_psc_mask) {
1173 if (gtp_psc_mask->hdr.qfi == 0x3F) {
1174 input_set |= IAVF_INSET_GTPU_QFI;
1175 if (!gtp_psc_mask->hdr.type)
1176 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1178 else if (gtp_psc_spec->hdr.type ==
1179 IAVF_GTPU_EH_UPLINK)
1180 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1182 else if (gtp_psc_spec->hdr.type ==
1183 IAVF_GTPU_EH_DWLINK)
1184 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
1188 rte_memcpy(hdr->buffer, gtp_psc_spec,
1189 sizeof(*gtp_psc_spec));
1192 hdrs->count = ++layer;
1195 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
1196 l2tpv3oip_spec = item->spec;
1197 l2tpv3oip_mask = item->mask;
1199 hdr = &hdrs->proto_hdr[layer];
1201 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
1203 if (l2tpv3oip_spec && l2tpv3oip_mask) {
1204 if (l2tpv3oip_mask->session_id == UINT32_MAX) {
1205 input_set |= IAVF_L2TPV3OIP_SESSION_ID;
1206 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
1209 rte_memcpy(hdr->buffer, l2tpv3oip_spec,
1210 sizeof(*l2tpv3oip_spec));
1213 hdrs->count = ++layer;
1216 case RTE_FLOW_ITEM_TYPE_ESP:
1217 esp_spec = item->spec;
1218 esp_mask = item->mask;
1220 hdr = &hdrs->proto_hdr[layer];
1222 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
1224 if (esp_spec && esp_mask) {
1225 if (esp_mask->hdr.spi == UINT32_MAX) {
1226 input_set |= IAVF_INSET_ESP_SPI;
1227 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
1230 rte_memcpy(hdr->buffer, &esp_spec->hdr,
1231 sizeof(esp_spec->hdr));
1234 hdrs->count = ++layer;
1237 case RTE_FLOW_ITEM_TYPE_AH:
1238 ah_spec = item->spec;
1239 ah_mask = item->mask;
1241 hdr = &hdrs->proto_hdr[layer];
1243 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
1245 if (ah_spec && ah_mask) {
1246 if (ah_mask->spi == UINT32_MAX) {
1247 input_set |= IAVF_INSET_AH_SPI;
1248 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
1251 rte_memcpy(hdr->buffer, ah_spec,
1255 hdrs->count = ++layer;
1258 case RTE_FLOW_ITEM_TYPE_PFCP:
1259 pfcp_spec = item->spec;
1260 pfcp_mask = item->mask;
1262 hdr = &hdrs->proto_hdr[layer];
1264 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
1266 if (pfcp_spec && pfcp_mask) {
1267 if (pfcp_mask->s_field == UINT8_MAX) {
1268 input_set |= IAVF_INSET_PFCP_S_FIELD;
1269 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
1272 rte_memcpy(hdr->buffer, pfcp_spec,
1273 sizeof(*pfcp_spec));
1276 hdrs->count = ++layer;
1279 case RTE_FLOW_ITEM_TYPE_ECPRI:
1280 ecpri_spec = item->spec;
1281 ecpri_mask = item->mask;
1283 ecpri_common.u32 = rte_be_to_cpu_32(ecpri_spec->hdr.common.u32);
1285 hdr = &hdrs->proto_hdr[layer];
1287 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ECPRI);
1289 if (ecpri_spec && ecpri_mask) {
1290 if (ecpri_common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA &&
1291 ecpri_mask->hdr.type0.pc_id == UINT16_MAX) {
1292 input_set |= IAVF_ECPRI_PC_RTC_ID;
1293 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ECPRI,
1297 rte_memcpy(hdr->buffer, ecpri_spec,
1298 sizeof(*ecpri_spec));
1301 hdrs->count = ++layer;
1304 case RTE_FLOW_ITEM_TYPE_GRE:
1305 gre_spec = item->spec;
1306 gre_mask = item->mask;
1308 hdr = &hdrs->proto_hdr[layer];
1310 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GRE);
1312 if (gre_spec && gre_mask) {
1313 rte_memcpy(hdr->buffer, gre_spec,
1319 hdrs->count = ++layer;
1322 case RTE_FLOW_ITEM_TYPE_VOID:
1326 rte_flow_error_set(error, EINVAL,
1327 RTE_FLOW_ERROR_TYPE_ITEM, item,
1328 "Invalid pattern item.");
1333 if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
1334 rte_flow_error_set(error, EINVAL,
1335 RTE_FLOW_ERROR_TYPE_ITEM, item,
1336 "Protocol header layers exceed the maximum value");
1340 if (!iavf_fdir_refine_input_set(input_set,
1341 input_set_mask | IAVF_INSET_ETHERTYPE,
1343 rte_flow_error_set(error, EINVAL,
1344 RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
1345 "Invalid input set");
1349 filter->input_set = input_set;
1355 iavf_fdir_parse(struct iavf_adapter *ad,
1356 struct iavf_pattern_match_item *array,
1358 const struct rte_flow_item pattern[],
1359 const struct rte_flow_action actions[],
1361 struct rte_flow_error *error)
1363 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1364 struct iavf_fdir_conf *filter = &vf->fdir.conf;
1365 struct iavf_pattern_match_item *item = NULL;
1368 memset(filter, 0, sizeof(*filter));
1370 item = iavf_search_pattern_match_item(pattern, array, array_len, error);
1374 ret = iavf_fdir_parse_pattern(ad, pattern, item->input_set_mask,
1379 ret = iavf_fdir_parse_action(ad, actions, error, filter);
1391 static struct iavf_flow_parser iavf_fdir_parser = {
1392 .engine = &iavf_fdir_engine,
1393 .array = iavf_fdir_pattern,
1394 .array_len = RTE_DIM(iavf_fdir_pattern),
1395 .parse_pattern_action = iavf_fdir_parse,
1396 .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
1399 RTE_INIT(iavf_fdir_engine_register)
1401 iavf_register_flow_engine(&iavf_fdir_engine);