1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
12 #include <rte_malloc.h>
13 #include <rte_tailq.h>
14 #include "base/i40e_prototype.h"
15 #include "i40e_logs.h"
16 #include "i40e_ethdev.h"
17 #include "i40e_hash.h"
20 #define BIT(n) (1UL << (n))
24 #define BIT_ULL(n) (1ULL << (n))
27 /* Pattern item headers */
28 #define I40E_HASH_HDR_ETH 0x01ULL
29 #define I40E_HASH_HDR_IPV4 0x10ULL
30 #define I40E_HASH_HDR_IPV6 0x20ULL
31 #define I40E_HASH_HDR_TCP 0x100ULL
32 #define I40E_HASH_HDR_UDP 0x200ULL
33 #define I40E_HASH_HDR_SCTP 0x400ULL
34 #define I40E_HASH_HDR_ESP 0x10000ULL
35 #define I40E_HASH_HDR_L2TPV3 0x20000ULL
36 #define I40E_HASH_HDR_AH 0x40000ULL
37 #define I40E_HASH_HDR_GTPC 0x100000ULL
38 #define I40E_HASH_HDR_GTPU 0x200000ULL
40 #define I40E_HASH_HDR_INNER_SHIFT 32
41 #define I40E_HASH_HDR_IPV4_INNER (I40E_HASH_HDR_IPV4 << \
42 I40E_HASH_HDR_INNER_SHIFT)
43 #define I40E_HASH_HDR_IPV6_INNER (I40E_HASH_HDR_IPV6 << \
44 I40E_HASH_HDR_INNER_SHIFT)
47 #define I40E_PHINT_ETH I40E_HASH_HDR_ETH
50 #define I40E_PHINT_IPV4 (I40E_HASH_HDR_ETH | I40E_HASH_HDR_IPV4)
51 #define I40E_PHINT_IPV4_TCP (I40E_PHINT_IPV4 | I40E_HASH_HDR_TCP)
52 #define I40E_PHINT_IPV4_UDP (I40E_PHINT_IPV4 | I40E_HASH_HDR_UDP)
53 #define I40E_PHINT_IPV4_SCTP (I40E_PHINT_IPV4 | I40E_HASH_HDR_SCTP)
56 #define I40E_PHINT_IPV6 (I40E_HASH_HDR_ETH | I40E_HASH_HDR_IPV6)
57 #define I40E_PHINT_IPV6_TCP (I40E_PHINT_IPV6 | I40E_HASH_HDR_TCP)
58 #define I40E_PHINT_IPV6_UDP (I40E_PHINT_IPV6 | I40E_HASH_HDR_UDP)
59 #define I40E_PHINT_IPV6_SCTP (I40E_PHINT_IPV6 | I40E_HASH_HDR_SCTP)
62 #define I40E_PHINT_IPV4_ESP (I40E_PHINT_IPV4 | I40E_HASH_HDR_ESP)
63 #define I40E_PHINT_IPV6_ESP (I40E_PHINT_IPV6 | I40E_HASH_HDR_ESP)
64 #define I40E_PHINT_IPV4_UDP_ESP (I40E_PHINT_IPV4_UDP | \
66 #define I40E_PHINT_IPV6_UDP_ESP (I40E_PHINT_IPV6_UDP | \
70 #define I40E_PHINT_IPV4_GTPC (I40E_PHINT_IPV4_UDP | \
72 #define I40E_PHINT_IPV6_GTPC (I40E_PHINT_IPV6_UDP | \
76 #define I40E_PHINT_IPV4_GTPU (I40E_PHINT_IPV4_UDP | \
78 #define I40E_PHINT_IPV4_GTPU_IPV4 (I40E_PHINT_IPV4_GTPU | \
79 I40E_HASH_HDR_IPV4_INNER)
80 #define I40E_PHINT_IPV4_GTPU_IPV6 (I40E_PHINT_IPV4_GTPU | \
81 I40E_HASH_HDR_IPV6_INNER)
82 #define I40E_PHINT_IPV6_GTPU (I40E_PHINT_IPV6_UDP | \
84 #define I40E_PHINT_IPV6_GTPU_IPV4 (I40E_PHINT_IPV6_GTPU | \
85 I40E_HASH_HDR_IPV4_INNER)
86 #define I40E_PHINT_IPV6_GTPU_IPV6 (I40E_PHINT_IPV6_GTPU | \
87 I40E_HASH_HDR_IPV6_INNER)
90 #define I40E_PHINT_IPV4_L2TPV3 (I40E_PHINT_IPV4 | I40E_HASH_HDR_L2TPV3)
91 #define I40E_PHINT_IPV6_L2TPV3 (I40E_PHINT_IPV6 | I40E_HASH_HDR_L2TPV3)
94 #define I40E_PHINT_IPV4_AH (I40E_PHINT_IPV4 | I40E_HASH_HDR_AH)
95 #define I40E_PHINT_IPV6_AH (I40E_PHINT_IPV6 | I40E_HASH_HDR_AH)
97 /* Structure of mapping RSS type to input set */
98 struct i40e_hash_map_rss_inset {
103 const struct i40e_hash_map_rss_inset i40e_hash_rss_inset[] = {
105 { ETH_RSS_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
106 { ETH_RSS_FRAG_IPV4, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
108 { ETH_RSS_NONFRAG_IPV4_OTHER,
109 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST },
111 { ETH_RSS_NONFRAG_IPV4_TCP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
112 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
114 { ETH_RSS_NONFRAG_IPV4_UDP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
115 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
117 { ETH_RSS_NONFRAG_IPV4_SCTP, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
118 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
121 { ETH_RSS_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
122 { ETH_RSS_FRAG_IPV6, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
124 { ETH_RSS_NONFRAG_IPV6_OTHER,
125 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST },
127 { ETH_RSS_NONFRAG_IPV6_TCP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
128 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
130 { ETH_RSS_NONFRAG_IPV6_UDP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
131 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
133 { ETH_RSS_NONFRAG_IPV6_SCTP, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
134 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT },
137 { ETH_RSS_PORT, I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT },
140 { ETH_RSS_L2_PAYLOAD, I40E_INSET_LAST_ETHER_TYPE },
141 { ETH_RSS_ETH, I40E_INSET_DMAC | I40E_INSET_SMAC },
144 { ETH_RSS_S_VLAN, I40E_INSET_VLAN_OUTER },
145 { ETH_RSS_C_VLAN, I40E_INSET_VLAN_INNER },
148 #define I40E_HASH_VOID_NEXT_ALLOW BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH)
150 #define I40E_HASH_ETH_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV4) | \
151 BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV6) | \
152 BIT_ULL(RTE_FLOW_ITEM_TYPE_VLAN))
154 #define I40E_HASH_IP_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_TCP) | \
155 BIT_ULL(RTE_FLOW_ITEM_TYPE_UDP) | \
156 BIT_ULL(RTE_FLOW_ITEM_TYPE_SCTP) | \
157 BIT_ULL(RTE_FLOW_ITEM_TYPE_ESP) | \
158 BIT_ULL(RTE_FLOW_ITEM_TYPE_L2TPV3OIP) |\
159 BIT_ULL(RTE_FLOW_ITEM_TYPE_AH))
161 #define I40E_HASH_UDP_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_GTPU) | \
162 BIT_ULL(RTE_FLOW_ITEM_TYPE_GTPC))
164 #define I40E_HASH_GTPU_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV4) | \
165 BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV6))
167 static const uint64_t pattern_next_allow_items[] = {
168 [RTE_FLOW_ITEM_TYPE_VOID] = I40E_HASH_VOID_NEXT_ALLOW,
169 [RTE_FLOW_ITEM_TYPE_ETH] = I40E_HASH_ETH_NEXT_ALLOW,
170 [RTE_FLOW_ITEM_TYPE_IPV4] = I40E_HASH_IP_NEXT_ALLOW,
171 [RTE_FLOW_ITEM_TYPE_IPV6] = I40E_HASH_IP_NEXT_ALLOW,
172 [RTE_FLOW_ITEM_TYPE_UDP] = I40E_HASH_UDP_NEXT_ALLOW,
173 [RTE_FLOW_ITEM_TYPE_GTPU] = I40E_HASH_GTPU_NEXT_ALLOW,
176 static const uint64_t pattern_item_header[] = {
177 [RTE_FLOW_ITEM_TYPE_ETH] = I40E_HASH_HDR_ETH,
178 [RTE_FLOW_ITEM_TYPE_IPV4] = I40E_HASH_HDR_IPV4,
179 [RTE_FLOW_ITEM_TYPE_IPV6] = I40E_HASH_HDR_IPV6,
180 [RTE_FLOW_ITEM_TYPE_TCP] = I40E_HASH_HDR_TCP,
181 [RTE_FLOW_ITEM_TYPE_UDP] = I40E_HASH_HDR_UDP,
182 [RTE_FLOW_ITEM_TYPE_SCTP] = I40E_HASH_HDR_SCTP,
183 [RTE_FLOW_ITEM_TYPE_ESP] = I40E_HASH_HDR_ESP,
184 [RTE_FLOW_ITEM_TYPE_GTPC] = I40E_HASH_HDR_GTPC,
185 [RTE_FLOW_ITEM_TYPE_GTPU] = I40E_HASH_HDR_GTPU,
186 [RTE_FLOW_ITEM_TYPE_L2TPV3OIP] = I40E_HASH_HDR_L2TPV3,
187 [RTE_FLOW_ITEM_TYPE_AH] = I40E_HASH_HDR_AH,
190 /* Structure of matched pattern */
191 struct i40e_hash_match_pattern {
192 uint64_t pattern_type;
193 uint64_t rss_mask; /* Supported RSS type for this pattern */
194 bool custom_pctype_flag;/* true for custom packet type */
198 #define I40E_HASH_MAP_PATTERN(pattern, rss_mask, pctype) { \
199 pattern, rss_mask, false, pctype }
201 #define I40E_HASH_MAP_CUS_PATTERN(pattern, rss_mask, cus_pctype) { \
202 pattern, rss_mask, true, cus_pctype }
204 #define I40E_HASH_L2_RSS_MASK (ETH_RSS_ETH | ETH_RSS_L2_SRC_ONLY | \
207 #define I40E_HASH_L23_RSS_MASK (I40E_HASH_L2_RSS_MASK | \
209 ETH_RSS_L3_SRC_ONLY | \
212 #define I40E_HASH_IPV4_L23_RSS_MASK (ETH_RSS_IPV4 | I40E_HASH_L23_RSS_MASK)
213 #define I40E_HASH_IPV6_L23_RSS_MASK (ETH_RSS_IPV6 | I40E_HASH_L23_RSS_MASK)
215 #define I40E_HASH_L234_RSS_MASK (I40E_HASH_L23_RSS_MASK | \
216 ETH_RSS_PORT | ETH_RSS_L4_SRC_ONLY | \
219 #define I40E_HASH_IPV4_L234_RSS_MASK (I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV4)
220 #define I40E_HASH_IPV6_L234_RSS_MASK (I40E_HASH_L234_RSS_MASK | ETH_RSS_IPV6)
222 #define I40E_HASH_L4_TYPES (ETH_RSS_NONFRAG_IPV4_TCP | \
223 ETH_RSS_NONFRAG_IPV4_UDP | \
224 ETH_RSS_NONFRAG_IPV4_SCTP | \
225 ETH_RSS_NONFRAG_IPV6_TCP | \
226 ETH_RSS_NONFRAG_IPV6_UDP | \
227 ETH_RSS_NONFRAG_IPV6_SCTP)
229 /* Current supported patterns and RSS types.
230 * All items that have the same pattern types are together.
232 static const struct i40e_hash_match_pattern match_patterns[] = {
234 I40E_HASH_MAP_PATTERN(I40E_PHINT_ETH,
235 ETH_RSS_L2_PAYLOAD | I40E_HASH_L2_RSS_MASK,
236 I40E_FILTER_PCTYPE_L2_PAYLOAD),
239 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
240 ETH_RSS_FRAG_IPV4 | I40E_HASH_IPV4_L23_RSS_MASK,
241 I40E_FILTER_PCTYPE_FRAG_IPV4),
243 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4,
244 ETH_RSS_NONFRAG_IPV4_OTHER |
245 I40E_HASH_IPV4_L23_RSS_MASK,
246 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER),
248 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_TCP,
249 ETH_RSS_NONFRAG_IPV4_TCP |
250 I40E_HASH_IPV4_L234_RSS_MASK,
251 I40E_FILTER_PCTYPE_NONF_IPV4_TCP),
253 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_UDP,
254 ETH_RSS_NONFRAG_IPV4_UDP |
255 I40E_HASH_IPV4_L234_RSS_MASK,
256 I40E_FILTER_PCTYPE_NONF_IPV4_UDP),
258 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV4_SCTP,
259 ETH_RSS_NONFRAG_IPV4_SCTP |
260 I40E_HASH_IPV4_L234_RSS_MASK,
261 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP),
264 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
265 ETH_RSS_FRAG_IPV6 | I40E_HASH_IPV6_L23_RSS_MASK,
266 I40E_FILTER_PCTYPE_FRAG_IPV6),
268 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6,
269 ETH_RSS_NONFRAG_IPV6_OTHER |
270 I40E_HASH_IPV6_L23_RSS_MASK,
271 I40E_FILTER_PCTYPE_NONF_IPV6_OTHER),
273 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_TCP,
274 ETH_RSS_NONFRAG_IPV6_TCP |
275 I40E_HASH_IPV6_L234_RSS_MASK,
276 I40E_FILTER_PCTYPE_NONF_IPV6_TCP),
278 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_UDP,
279 ETH_RSS_NONFRAG_IPV6_UDP |
280 I40E_HASH_IPV6_L234_RSS_MASK,
281 I40E_FILTER_PCTYPE_NONF_IPV6_UDP),
283 I40E_HASH_MAP_PATTERN(I40E_PHINT_IPV6_SCTP,
284 ETH_RSS_NONFRAG_IPV6_SCTP |
285 I40E_HASH_IPV6_L234_RSS_MASK,
286 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP),
289 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_ESP,
290 ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4),
291 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_ESP,
292 ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6),
293 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_UDP_ESP,
294 ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV4_UDP),
295 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_UDP_ESP,
296 ETH_RSS_ESP, I40E_CUSTOMIZED_ESP_IPV6_UDP),
299 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPC,
300 I40E_HASH_IPV4_L234_RSS_MASK,
301 I40E_CUSTOMIZED_GTPC),
302 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPC,
303 I40E_HASH_IPV6_L234_RSS_MASK,
304 I40E_CUSTOMIZED_GTPC),
307 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU,
308 I40E_HASH_IPV4_L234_RSS_MASK,
309 I40E_CUSTOMIZED_GTPU),
310 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV4,
311 ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
312 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_GTPU_IPV6,
313 ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
314 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU,
315 I40E_HASH_IPV6_L234_RSS_MASK,
316 I40E_CUSTOMIZED_GTPU),
317 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV4,
318 ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV4),
319 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_GTPU_IPV6,
320 ETH_RSS_GTPU, I40E_CUSTOMIZED_GTPU_IPV6),
323 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_L2TPV3,
324 ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV4_L2TPV3),
325 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_L2TPV3,
326 ETH_RSS_L2TPV3, I40E_CUSTOMIZED_IPV6_L2TPV3),
329 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV4_AH, ETH_RSS_AH,
330 I40E_CUSTOMIZED_AH_IPV4),
331 I40E_HASH_MAP_CUS_PATTERN(I40E_PHINT_IPV6_AH, ETH_RSS_AH,
332 I40E_CUSTOMIZED_AH_IPV6),
336 i40e_hash_get_pattern_type(const struct rte_flow_item pattern[],
337 uint64_t *pattern_types,
338 struct rte_flow_error *error)
340 const char *message = "Pattern not supported";
341 enum rte_flow_item_type prev_item_type = RTE_FLOW_ITEM_TYPE_VOID;
342 enum rte_flow_item_type last_item_type = prev_item_type;
343 uint64_t item_hdr, pattern_hdrs = 0;
344 bool inner_flag = false;
347 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
348 if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID)
351 if (pattern->mask || pattern->spec || pattern->last) {
352 message = "Header info should not be specified";
356 /* Check the previous item allows this sub-item. */
357 if (prev_item_type >= (enum rte_flow_item_type)
358 RTE_DIM(pattern_next_allow_items) ||
359 !(pattern_next_allow_items[prev_item_type] &
360 BIT_ULL(pattern->type)))
363 /* For VLAN item, it does no matter about to pattern type
364 * recognition. So just count the number of VLAN and do not
365 * change the value of variable `prev_item_type`.
367 last_item_type = pattern->type;
368 if (last_item_type == RTE_FLOW_ITEM_TYPE_VLAN) {
375 prev_item_type = last_item_type;
376 assert(last_item_type < (enum rte_flow_item_type)
377 RTE_DIM(pattern_item_header));
378 item_hdr = pattern_item_header[last_item_type];
382 item_hdr <<= I40E_HASH_HDR_INNER_SHIFT;
384 /* Inner layer should not have GTPU item */
385 if (last_item_type == RTE_FLOW_ITEM_TYPE_GTPU)
388 if (last_item_type == RTE_FLOW_ITEM_TYPE_GTPU) {
394 if (item_hdr & pattern_hdrs)
397 pattern_hdrs |= item_hdr;
400 if (pattern_hdrs && last_item_type != RTE_FLOW_ITEM_TYPE_VLAN) {
401 *pattern_types = pattern_hdrs;
406 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
411 i40e_hash_get_x722_ext_pctypes(uint8_t match_pctype)
413 uint64_t pctypes = 0;
415 switch (match_pctype) {
416 case I40E_FILTER_PCTYPE_NONF_IPV4_TCP:
417 pctypes = BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
420 case I40E_FILTER_PCTYPE_NONF_IPV4_UDP:
421 pctypes = BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
422 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
425 case I40E_FILTER_PCTYPE_NONF_IPV6_TCP:
426 pctypes = BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
429 case I40E_FILTER_PCTYPE_NONF_IPV6_UDP:
430 pctypes = BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
431 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
439 i40e_hash_translate_gtp_inset(struct i40e_rte_flow_rss_conf *rss_conf,
440 struct rte_flow_error *error)
442 if (rss_conf->inset &
443 (I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC |
444 I40E_INSET_DST_PORT | I40E_INSET_SRC_PORT))
445 return rte_flow_error_set(error, ENOTSUP,
446 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
448 "Only support external destination IP");
450 if (rss_conf->inset & I40E_INSET_IPV4_DST)
451 rss_conf->inset = (rss_conf->inset & ~I40E_INSET_IPV4_DST) |
452 I40E_INSET_TUNNEL_IPV4_DST;
454 if (rss_conf->inset & I40E_INSET_IPV6_DST)
455 rss_conf->inset = (rss_conf->inset & ~I40E_INSET_IPV6_DST) |
456 I40E_INSET_TUNNEL_IPV6_DST;
462 i40e_hash_get_pctypes(const struct rte_eth_dev *dev,
463 const struct i40e_hash_match_pattern *match,
464 struct i40e_rte_flow_rss_conf *rss_conf,
465 struct rte_flow_error *error)
467 if (match->custom_pctype_flag) {
469 struct i40e_customized_pctype *custom_type;
471 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
472 custom_type = i40e_find_customized_pctype(pf, match->pctype);
473 if (!custom_type || !custom_type->valid)
474 return rte_flow_error_set(error, ENOTSUP,
475 RTE_FLOW_ERROR_TYPE_ITEM,
476 NULL, "PCTYPE not supported");
478 rss_conf->config_pctypes |= BIT_ULL(custom_type->pctype);
480 if (match->pctype == I40E_CUSTOMIZED_GTPU ||
481 match->pctype == I40E_CUSTOMIZED_GTPC)
482 return i40e_hash_translate_gtp_inset(rss_conf, error);
485 I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
488 rss_conf->config_pctypes |= BIT_ULL(match->pctype);
489 if (hw->mac.type == I40E_MAC_X722) {
490 types = i40e_hash_get_x722_ext_pctypes(match->pctype);
491 rss_conf->config_pctypes |= types;
499 i40e_hash_get_pattern_pctypes(const struct rte_eth_dev *dev,
500 const struct rte_flow_item pattern[],
501 const struct rte_flow_action_rss *rss_act,
502 struct i40e_rte_flow_rss_conf *rss_conf,
503 struct rte_flow_error *error)
505 uint64_t pattern_types = 0;
506 bool match_flag = false;
509 ret = i40e_hash_get_pattern_type(pattern, &pattern_types, error);
513 for (i = 0; i < (int)RTE_DIM(match_patterns); i++) {
514 const struct i40e_hash_match_pattern *match =
517 /* Check pattern types match. All items that have the same
518 * pattern types are together, so if the pattern types match
519 * previous item but they doesn't match current item, it means
520 * the pattern types do not match all remain items.
522 if (pattern_types != match->pattern_type) {
529 /* Check RSS types match */
530 if (!(rss_act->types & ~match->rss_mask)) {
531 ret = i40e_hash_get_pctypes(dev, match,
538 if (rss_conf->config_pctypes)
542 return rte_flow_error_set(error, ENOTSUP,
543 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
544 NULL, "RSS types not supported");
546 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
547 NULL, "Pattern not supported");
551 i40e_hash_get_inset(uint64_t rss_types)
553 uint64_t mask, inset = 0;
556 for (i = 0; i < (int)RTE_DIM(i40e_hash_rss_inset); i++) {
557 if (rss_types & i40e_hash_rss_inset[i].rss_type)
558 inset |= i40e_hash_rss_inset[i].inset;
564 /* If SRC_ONLY and DST_ONLY of the same level are used simultaneously,
565 * it is the same case as none of them are added.
567 mask = rss_types & (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY);
568 if (mask == ETH_RSS_L2_SRC_ONLY)
569 inset &= ~I40E_INSET_DMAC;
570 else if (mask == ETH_RSS_L2_DST_ONLY)
571 inset &= ~I40E_INSET_SMAC;
573 mask = rss_types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
574 if (mask == ETH_RSS_L3_SRC_ONLY)
575 inset &= ~(I40E_INSET_IPV4_DST | I40E_INSET_IPV6_DST);
576 else if (mask == ETH_RSS_L3_DST_ONLY)
577 inset &= ~(I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC);
579 mask = rss_types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
580 if (mask == ETH_RSS_L4_SRC_ONLY)
581 inset &= ~I40E_INSET_DST_PORT;
582 else if (mask == ETH_RSS_L4_DST_ONLY)
583 inset &= ~I40E_INSET_SRC_PORT;
585 if (rss_types & I40E_HASH_L4_TYPES) {
586 uint64_t l3_mask = rss_types &
587 (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY);
588 uint64_t l4_mask = rss_types &
589 (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
591 if (l3_mask && !l4_mask)
592 inset &= ~(I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT);
593 else if (!l3_mask && l4_mask)
594 inset &= ~(I40E_INSET_IPV4_DST | I40E_INSET_IPV6_DST |
595 I40E_INSET_IPV4_SRC | I40E_INSET_IPV6_SRC);
602 i40e_hash_config_func(struct i40e_hw *hw, enum rte_eth_hash_function func)
606 uint8_t symmetric = 0;
608 reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
610 if (func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
611 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK))
614 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
616 if (func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
619 if (reg & I40E_GLQF_CTL_HTOEP_MASK)
622 reg |= I40E_GLQF_CTL_HTOEP_MASK;
625 pf = &((struct i40e_adapter *)hw->back)->pf;
626 if (pf->support_multi_driver) {
628 "Modify hash function is not permitted when multi-driver enabled");
632 PMD_DRV_LOG(INFO, "NIC hash function is setting to %d", func);
633 i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
634 I40E_WRITE_FLUSH(hw);
637 i40e_set_symmetric_hash_enable_per_port(hw, symmetric);
642 i40e_hash_config_pctype_symmetric(struct i40e_hw *hw,
646 struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
649 /* For X722, get translated pctype in fd pctype register */
650 if (hw->mac.type == I40E_MAC_X722)
651 pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
653 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype));
655 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
657 reg |= I40E_GLQF_HSYM_SYMH_ENA_MASK;
659 if (!(reg & I40E_GLQF_HSYM_SYMH_ENA_MASK))
661 reg &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK;
664 if (pf->support_multi_driver) {
666 "Enable/Disable symmetric hash is not permitted when multi-driver enabled");
670 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
671 I40E_WRITE_FLUSH(hw);
676 i40e_hash_enable_pctype(struct i40e_hw *hw,
677 uint32_t pctype, bool enable)
679 uint32_t reg, reg_val, mask;
681 /* For X722, get translated pctype in fd pctype register */
682 if (hw->mac.type == I40E_MAC_X722)
683 pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
687 reg = I40E_PFQF_HENA(0);
689 mask = BIT(pctype - 32);
690 reg = I40E_PFQF_HENA(1);
693 reg_val = i40e_read_rx_ctl(hw, reg);
701 if (!(reg_val & mask))
707 i40e_write_rx_ctl(hw, reg, reg_val);
708 I40E_WRITE_FLUSH(hw);
712 i40e_hash_config_pctype(struct i40e_hw *hw,
713 struct i40e_rte_flow_rss_conf *rss_conf,
716 uint64_t rss_types = rss_conf->conf.types;
719 if (rss_types == 0) {
720 i40e_hash_enable_pctype(hw, pctype, false);
724 if (rss_conf->inset) {
725 ret = i40e_set_hash_inset(hw, rss_conf->inset, pctype, false);
730 i40e_hash_enable_pctype(hw, pctype, true);
735 i40e_hash_config_region(struct i40e_pf *pf,
736 const struct i40e_rte_flow_rss_conf *rss_conf)
738 struct i40e_hw *hw = &pf->adapter->hw;
739 struct rte_eth_dev *dev = pf->adapter->eth_dev;
740 struct i40e_queue_region_info *regions = pf->queue_region.region;
741 uint32_t num = pf->queue_region.queue_region_number;
742 uint32_t i, region_id_mask = 0;
744 /* Use a 32 bit variable to represent all regions */
745 RTE_BUILD_BUG_ON(I40E_REGION_MAX_INDEX > 31);
747 /* Re-configure the region if it existed */
748 for (i = 0; i < num; i++) {
749 if (rss_conf->region_queue_start ==
750 regions[i].queue_start_index &&
751 rss_conf->region_queue_num == regions[i].queue_num) {
754 for (j = 0; j < regions[i].user_priority_num; j++) {
755 if (regions[i].user_priority[j] ==
756 rss_conf->region_priority)
760 if (j >= I40E_MAX_USER_PRIORITY) {
762 "Priority number exceed the maximum %d",
763 I40E_MAX_USER_PRIORITY);
767 regions[i].user_priority[j] = rss_conf->region_priority;
768 regions[i].user_priority_num++;
769 return i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
772 region_id_mask |= BIT(regions[i].region_id);
775 if (num > I40E_REGION_MAX_INDEX) {
776 PMD_DRV_LOG(ERR, "Queue region resource used up");
780 /* Add a new region */
782 pf->queue_region.queue_region_number++;
783 memset(®ions[num], 0, sizeof(regions[0]));
785 regions[num].region_id = rte_bsf32(~region_id_mask);
786 regions[num].queue_num = rss_conf->region_queue_num;
787 regions[num].queue_start_index = rss_conf->region_queue_start;
788 regions[num].user_priority[0] = rss_conf->region_priority;
789 regions[num].user_priority_num = 1;
791 return i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
795 i40e_hash_config(struct i40e_pf *pf,
796 struct i40e_rte_flow_rss_conf *rss_conf)
798 struct rte_flow_action_rss *rss_info = &rss_conf->conf;
799 struct i40e_hw *hw = &pf->adapter->hw;
803 if (rss_info->func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
804 ret = i40e_hash_config_func(hw, rss_info->func);
808 if (rss_info->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
809 rss_conf->misc_reset_flags |=
810 I40E_HASH_FLOW_RESET_FLAG_FUNC;
813 if (rss_conf->region_queue_num > 0) {
814 ret = i40e_hash_config_region(pf, rss_conf);
818 rss_conf->misc_reset_flags |= I40E_HASH_FLOW_RESET_FLAG_REGION;
821 if (rss_info->key_len > 0) {
822 ret = i40e_set_rss_key(pf->main_vsi, rss_conf->key,
827 rss_conf->misc_reset_flags |= I40E_HASH_FLOW_RESET_FLAG_KEY;
830 /* Update lookup table */
831 if (rss_info->queue_num > 0) {
832 uint8_t lut[ETH_RSS_RETA_SIZE_512];
835 for (i = 0; i < hw->func_caps.rss_table_size; i++) {
836 lut[i] = (uint8_t)rss_info->queue[j];
837 j = (j == rss_info->queue_num - 1) ? 0 : (j + 1);
840 ret = i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i);
844 pf->hash_enabled_queues = 0;
845 for (i = 0; i < rss_info->queue_num; i++)
846 pf->hash_enabled_queues |= BIT_ULL(lut[i]);
848 pf->adapter->rss_reta_updated = 0;
849 rss_conf->misc_reset_flags |= I40E_HASH_FLOW_RESET_FLAG_QUEUE;
852 /* The codes behind configure the input sets and symmetric hash
853 * function of the packet types and enable hash on them.
855 pctypes = rss_conf->config_pctypes;
859 /* For first flow that will enable hash on any packet type, we clean
860 * the RSS sets that by legacy configuration commands and parameters.
862 if (!pf->hash_filter_enabled) {
863 i40e_pf_disable_rss(pf);
864 pf->hash_filter_enabled = true;
868 uint32_t idx = rte_bsf64(pctypes);
869 uint64_t bit = BIT_ULL(idx);
871 if (rss_conf->symmetric_enable) {
872 ret = i40e_hash_config_pctype_symmetric(hw, idx, true);
876 rss_conf->reset_symmetric_pctypes |= bit;
879 ret = i40e_hash_config_pctype(hw, rss_conf, idx);
883 rss_conf->reset_config_pctypes |= bit;
891 i40e_hash_parse_key(const struct rte_flow_action_rss *rss_act,
892 struct i40e_rte_flow_rss_conf *rss_conf)
894 const uint8_t *key = rss_act->key;
896 if (!key || rss_act->key_len != sizeof(rss_conf->key)) {
897 const uint32_t rss_key_default[] = {0x6b793944,
898 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
899 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
900 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
902 if (rss_act->key_len != sizeof(rss_conf->key))
904 "RSS key length invalid, must be %u bytes, now set key to default",
905 (uint32_t)sizeof(rss_conf->key));
907 memcpy(rss_conf->key, rss_key_default, sizeof(rss_conf->key));
909 memcpy(rss_conf->key, key, sizeof(rss_conf->key));
912 rss_conf->conf.key = rss_conf->key;
913 rss_conf->conf.key_len = sizeof(rss_conf->key);
917 i40e_hash_parse_queues(const struct rte_eth_dev *dev,
918 const struct rte_flow_action_rss *rss_act,
919 struct i40e_rte_flow_rss_conf *rss_conf,
920 struct rte_flow_error *error)
927 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
928 if (!rss_act->queue_num ||
929 rss_act->queue_num > hw->func_caps.rss_table_size)
930 return rte_flow_error_set(error, EINVAL,
931 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
932 NULL, "Invalid RSS queue number");
934 if (rss_act->key_len)
936 "RSS key is ignored when queues specified");
938 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
939 if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
940 max_queue = i40e_pf_calc_configured_queues_num(pf);
942 max_queue = pf->dev_data->nb_rx_queues;
944 max_queue = RTE_MIN(max_queue, I40E_MAX_Q_PER_TC);
946 for (i = 0; i < rss_act->queue_num; i++) {
947 if ((int)rss_act->queue[i] >= max_queue)
951 if (i < rss_act->queue_num)
952 return rte_flow_error_set(error, EINVAL,
953 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
954 NULL, "Invalid RSS queues");
956 memcpy(rss_conf->queue, rss_act->queue,
957 rss_act->queue_num * sizeof(rss_conf->queue[0]));
958 rss_conf->conf.queue = rss_conf->queue;
959 rss_conf->conf.queue_num = rss_act->queue_num;
964 i40e_hash_parse_queue_region(const struct rte_eth_dev *dev,
965 const struct rte_flow_item pattern[],
966 const struct rte_flow_action_rss *rss_act,
967 struct i40e_rte_flow_rss_conf *rss_conf,
968 struct rte_flow_error *error)
971 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
972 uint64_t hash_queues;
975 if (pattern[1].type != RTE_FLOW_ITEM_TYPE_END)
976 return rte_flow_error_set(error, ENOTSUP,
977 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
979 "Pattern not supported.");
981 vlan_spec = pattern->spec;
982 vlan_mask = pattern->mask;
983 if (!vlan_spec || !vlan_mask ||
984 (rte_be_to_cpu_16(vlan_mask->tci) >> 13) != 7)
985 return rte_flow_error_set(error, EINVAL,
986 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
990 return rte_flow_error_set(error, EINVAL,
991 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
992 NULL, "Queues not specified");
994 if (rss_act->key_len)
996 "RSS key is ignored when configure queue region");
998 /* Use a 64 bit variable to represent all queues in a region. */
999 RTE_BUILD_BUG_ON(I40E_MAX_Q_PER_TC > 64);
1001 if (!rss_act->queue_num ||
1002 !rte_is_power_of_2(rss_act->queue_num) ||
1003 rss_act->queue_num + rss_act->queue[0] > I40E_MAX_Q_PER_TC)
1004 return rte_flow_error_set(error, EINVAL,
1005 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1006 NULL, "Queue number error");
1008 for (i = 1; i < rss_act->queue_num; i++) {
1009 if (rss_act->queue[i - 1] + 1 != rss_act->queue[i])
1013 if (i < rss_act->queue_num)
1014 return rte_flow_error_set(error, EINVAL,
1015 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1017 "Queues must be incremented continuously");
1019 /* Map all queues to bits of uint64_t */
1020 hash_queues = (BIT_ULL(rss_act->queue[0] + rss_act->queue_num) - 1) &
1021 ~(BIT_ULL(rss_act->queue[0]) - 1);
1023 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1024 if (hash_queues & ~pf->hash_enabled_queues)
1025 return rte_flow_error_set(error, EINVAL,
1026 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1027 NULL, "Some queues are not in LUT");
1029 rss_conf->region_queue_num = (uint8_t)rss_act->queue_num;
1030 rss_conf->region_queue_start = rss_act->queue[0];
1031 rss_conf->region_priority = rte_be_to_cpu_16(vlan_spec->tci) >> 13;
1036 i40e_hash_parse_global_conf(const struct rte_eth_dev *dev,
1037 const struct rte_flow_item pattern[],
1038 const struct rte_flow_action_rss *rss_act,
1039 struct i40e_rte_flow_rss_conf *rss_conf,
1040 struct rte_flow_error *error)
1042 if (rss_act->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
1043 return rte_flow_error_set(error, EINVAL,
1044 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1046 "Symmetric function should be set with pattern types");
1048 rss_conf->conf.func = rss_act->func;
1051 PMD_DRV_LOG(WARNING,
1052 "RSS types are ignored when no pattern specified");
1054 if (pattern[0].type == RTE_FLOW_ITEM_TYPE_VLAN)
1055 return i40e_hash_parse_queue_region(dev, pattern, rss_act,
1059 return i40e_hash_parse_queues(dev, rss_act, rss_conf, error);
1061 if (rss_act->key_len) {
1062 i40e_hash_parse_key(rss_act, rss_conf);
1066 if (rss_act->func == RTE_ETH_HASH_FUNCTION_DEFAULT)
1067 PMD_DRV_LOG(WARNING, "Nothing change");
1072 i40e_hash_validate_rss_types(uint64_t rss_types)
1074 uint64_t type, mask;
1077 type = ETH_RSS_ETH & rss_types;
1078 mask = (ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY) & rss_types;
1083 type = (I40E_HASH_L4_TYPES | ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1084 ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_IPV6 |
1085 ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER) & rss_types;
1086 mask = (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY) & rss_types;
1091 type = (I40E_HASH_L4_TYPES | ETH_RSS_PORT) & rss_types;
1092 mask = (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY) & rss_types;
1100 i40e_hash_parse_pattern_act(const struct rte_eth_dev *dev,
1101 const struct rte_flow_item pattern[],
1102 const struct rte_flow_action_rss *rss_act,
1103 struct i40e_rte_flow_rss_conf *rss_conf,
1104 struct rte_flow_error *error)
1107 return rte_flow_error_set(error, EINVAL,
1108 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1110 "RSS Queues not supported when pattern specified");
1112 if (rss_act->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
1113 rss_conf->symmetric_enable = true;
1114 else if (rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
1115 return rte_flow_error_set(error, -EINVAL,
1116 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1118 "Only symmetric TOEPLITZ supported when pattern specified");
1120 if (!i40e_hash_validate_rss_types(rss_act->types))
1121 return rte_flow_error_set(error, EINVAL,
1122 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1123 NULL, "RSS types are invalid");
1125 if (rss_act->key_len)
1126 i40e_hash_parse_key(rss_act, rss_conf);
1128 rss_conf->conf.func = rss_act->func;
1129 rss_conf->conf.types = rss_act->types;
1130 rss_conf->inset = i40e_hash_get_inset(rss_act->types);
1132 return i40e_hash_get_pattern_pctypes(dev, pattern, rss_act,
1137 i40e_hash_parse(const struct rte_eth_dev *dev,
1138 const struct rte_flow_item pattern[],
1139 const struct rte_flow_action actions[],
1140 struct i40e_rte_flow_rss_conf *rss_conf,
1141 struct rte_flow_error *error)
1143 const struct rte_flow_action_rss *rss_act;
1145 if (actions[1].type != RTE_FLOW_ACTION_TYPE_END)
1146 return rte_flow_error_set(error, EINVAL,
1147 RTE_FLOW_ERROR_TYPE_ACTION,
1149 "Only support one action for RSS.");
1151 rss_act = (const struct rte_flow_action_rss *)actions[0].conf;
1153 return rte_flow_error_set(error, ENOTSUP,
1154 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1156 "RSS level is not supported");
1158 while (pattern->type == RTE_FLOW_ITEM_TYPE_VOID)
1161 if (pattern[0].type == RTE_FLOW_ITEM_TYPE_END ||
1162 pattern[0].type == RTE_FLOW_ITEM_TYPE_VLAN)
1163 return i40e_hash_parse_global_conf(dev, pattern, rss_act,
1166 return i40e_hash_parse_pattern_act(dev, pattern, rss_act,
1171 i40e_invalid_rss_filter(const struct i40e_rte_flow_rss_conf *ref_conf,
1172 struct i40e_rte_flow_rss_conf *conf)
1174 uint32_t reset_flags = conf->misc_reset_flags;
1176 conf->misc_reset_flags &= ~ref_conf->misc_reset_flags;
1178 if ((reset_flags & I40E_HASH_FLOW_RESET_FLAG_REGION) &&
1179 (ref_conf->misc_reset_flags & I40E_HASH_FLOW_RESET_FLAG_REGION) &&
1180 (conf->region_queue_start != ref_conf->region_queue_start ||
1181 conf->region_queue_num != ref_conf->region_queue_num))
1182 conf->misc_reset_flags |= I40E_HASH_FLOW_RESET_FLAG_REGION;
1184 conf->reset_config_pctypes &= ~ref_conf->reset_config_pctypes;
1185 conf->reset_symmetric_pctypes &= ~ref_conf->reset_symmetric_pctypes;
1189 i40e_hash_filter_restore(struct i40e_pf *pf)
1191 struct i40e_rss_filter *filter;
1194 TAILQ_FOREACH(filter, &pf->rss_config_list, next) {
1195 struct i40e_rte_flow_rss_conf *rss_conf =
1196 &filter->rss_filter_info;
1197 struct i40e_rss_filter *prev;
1199 rss_conf->misc_reset_flags = 0;
1200 rss_conf->reset_config_pctypes = 0;
1201 rss_conf->reset_symmetric_pctypes = 0;
1203 ret = i40e_hash_config(pf, rss_conf);
1205 pf->hash_filter_enabled = 0;
1206 i40e_pf_disable_rss(pf);
1208 "Re-configure RSS failed, RSS has been disabled");
1212 /* Invalid previous RSS filter */
1213 TAILQ_FOREACH(prev, &pf->rss_config_list, next) {
1216 i40e_invalid_rss_filter(rss_conf,
1217 &prev->rss_filter_info);
1225 i40e_hash_filter_create(struct i40e_pf *pf,
1226 struct i40e_rte_flow_rss_conf *rss_conf)
1228 struct i40e_rss_filter *filter, *prev;
1229 struct i40e_rte_flow_rss_conf *new_conf;
1232 filter = rte_zmalloc("i40e_rss_filter", sizeof(*filter), 0);
1234 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
1238 new_conf = &filter->rss_filter_info;
1240 memcpy(new_conf, rss_conf, sizeof(*new_conf));
1241 if (new_conf->conf.queue_num)
1242 new_conf->conf.queue = new_conf->queue;
1243 if (new_conf->conf.key_len)
1244 new_conf->conf.key = new_conf->key;
1246 ret = i40e_hash_config(pf, new_conf);
1249 if (i40e_pf_config_rss(pf))
1252 (void)i40e_hash_filter_restore(pf);
1256 /* Invalid previous RSS filter */
1257 TAILQ_FOREACH(prev, &pf->rss_config_list, next)
1258 i40e_invalid_rss_filter(new_conf, &prev->rss_filter_info);
1260 TAILQ_INSERT_TAIL(&pf->rss_config_list, filter, next);
1265 i40e_hash_reset_conf(struct i40e_pf *pf,
1266 struct i40e_rte_flow_rss_conf *rss_conf)
1268 struct i40e_hw *hw = &pf->adapter->hw;
1273 if (rss_conf->misc_reset_flags & I40E_HASH_FLOW_RESET_FLAG_FUNC) {
1274 ret = i40e_hash_config_func(hw, RTE_ETH_HASH_FUNCTION_TOEPLITZ);
1278 rss_conf->misc_reset_flags &= ~I40E_HASH_FLOW_RESET_FLAG_FUNC;
1281 if (rss_conf->misc_reset_flags & I40E_HASH_FLOW_RESET_FLAG_REGION) {
1282 ret = i40e_flush_queue_region_all_conf(pf->adapter->eth_dev,
1287 rss_conf->misc_reset_flags &= ~I40E_HASH_FLOW_RESET_FLAG_REGION;
1290 if (rss_conf->misc_reset_flags & I40E_HASH_FLOW_RESET_FLAG_KEY) {
1291 ret = i40e_pf_reset_rss_key(pf);
1295 rss_conf->misc_reset_flags &= ~I40E_HASH_FLOW_RESET_FLAG_KEY;
1298 if (rss_conf->misc_reset_flags & I40E_HASH_FLOW_RESET_FLAG_QUEUE) {
1299 if (!pf->adapter->rss_reta_updated) {
1300 ret = i40e_pf_reset_rss_reta(pf);
1305 pf->hash_enabled_queues = 0;
1306 rss_conf->misc_reset_flags &= ~I40E_HASH_FLOW_RESET_FLAG_QUEUE;
1309 while (rss_conf->reset_config_pctypes) {
1310 idx = rte_bsf64(rss_conf->reset_config_pctypes);
1312 i40e_hash_enable_pctype(hw, idx, false);
1313 inset = i40e_get_default_input_set(idx);
1315 ret = i40e_set_hash_inset(hw, inset, idx, false);
1320 rss_conf->reset_config_pctypes &= ~BIT_ULL(idx);
1323 while (rss_conf->reset_symmetric_pctypes) {
1324 idx = rte_bsf64(rss_conf->reset_symmetric_pctypes);
1326 ret = i40e_hash_config_pctype_symmetric(hw, idx, false);
1330 rss_conf->reset_symmetric_pctypes &= ~BIT_ULL(idx);
1337 i40e_hash_filter_destroy(struct i40e_pf *pf,
1338 const struct i40e_rss_filter *rss_filter)
1340 struct i40e_rss_filter *filter;
1343 TAILQ_FOREACH(filter, &pf->rss_config_list, next) {
1344 if (rss_filter == filter) {
1345 ret = i40e_hash_reset_conf(pf,
1346 &filter->rss_filter_info);
1350 TAILQ_REMOVE(&pf->rss_config_list, filter, next);
1360 i40e_hash_filter_flush(struct i40e_pf *pf)
1362 struct rte_flow *flow, *next;
1364 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, next) {
1365 if (flow->filter_type != RTE_ETH_FILTER_HASH)
1369 struct i40e_rss_filter *filter = flow->rule;
1372 ret = i40e_hash_reset_conf(pf,
1373 &filter->rss_filter_info);
1377 TAILQ_REMOVE(&pf->rss_config_list, filter, next);
1381 TAILQ_REMOVE(&pf->flow_list, flow, node);
1385 assert(!pf->rss_config_list.tqh_first);