1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2017-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_eth_ctrl.h>
15 #include <rte_ether.h>
17 #include <rte_flow_driver.h>
23 #include "sfc_filter.h"
26 #include "sfc_dp_rx.h"
29 * At now flow API is implemented in such a manner that each
30 * flow rule is converted to one or more hardware filters.
31 * All elements of flow rule (attributes, pattern items, actions)
32 * correspond to one or more fields in the efx_filter_spec_s structure
33 * that is responsible for the hardware filter.
34 * If some required field is unset in the flow rule, then a handful
35 * of filter copies will be created to cover all possible values
39 enum sfc_flow_item_layers {
40 SFC_FLOW_ITEM_ANY_LAYER,
41 SFC_FLOW_ITEM_START_LAYER,
47 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
48 efx_filter_spec_t *spec,
49 struct rte_flow_error *error);
51 struct sfc_flow_item {
52 enum rte_flow_item_type type; /* Type of item */
53 enum sfc_flow_item_layers layer; /* Layer of item */
54 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
55 sfc_flow_item_parse *parse; /* Parsing function */
58 static sfc_flow_item_parse sfc_flow_parse_void;
59 static sfc_flow_item_parse sfc_flow_parse_eth;
60 static sfc_flow_item_parse sfc_flow_parse_vlan;
61 static sfc_flow_item_parse sfc_flow_parse_ipv4;
62 static sfc_flow_item_parse sfc_flow_parse_ipv6;
63 static sfc_flow_item_parse sfc_flow_parse_tcp;
64 static sfc_flow_item_parse sfc_flow_parse_udp;
65 static sfc_flow_item_parse sfc_flow_parse_vxlan;
66 static sfc_flow_item_parse sfc_flow_parse_geneve;
67 static sfc_flow_item_parse sfc_flow_parse_nvgre;
69 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
70 unsigned int filters_count_for_one_val,
71 struct rte_flow_error *error);
73 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
74 efx_filter_spec_t *spec,
75 struct sfc_filter *filter);
77 struct sfc_flow_copy_flag {
78 /* EFX filter specification match flag */
79 efx_filter_match_flags_t flag;
80 /* Number of values of corresponding field */
81 unsigned int vals_count;
82 /* Function to set values in specifications */
83 sfc_flow_spec_set_vals *set_vals;
85 * Function to check that the specification is suitable
86 * for adding this match flag
88 sfc_flow_spec_check *spec_check;
91 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
92 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
93 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
94 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
95 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
98 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
103 for (i = 0; i < size; i++)
106 return (sum == 0) ? B_TRUE : B_FALSE;
110 * Validate item and prepare structures spec and mask for parsing
113 sfc_flow_parse_init(const struct rte_flow_item *item,
114 const void **spec_ptr,
115 const void **mask_ptr,
116 const void *supp_mask,
117 const void *def_mask,
119 struct rte_flow_error *error)
128 rte_flow_error_set(error, EINVAL,
129 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
134 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
135 rte_flow_error_set(error, EINVAL,
136 RTE_FLOW_ERROR_TYPE_ITEM, item,
137 "Mask or last is set without spec");
142 * If "mask" is not set, default mask is used,
143 * but if default mask is NULL, "mask" should be set
145 if (item->mask == NULL) {
146 if (def_mask == NULL) {
147 rte_flow_error_set(error, EINVAL,
148 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
149 "Mask should be specified");
165 * If field values in "last" are either 0 or equal to the corresponding
166 * values in "spec" then they are ignored
169 !sfc_flow_is_zero(last, size) &&
170 memcmp(last, spec, size) != 0) {
171 rte_flow_error_set(error, ENOTSUP,
172 RTE_FLOW_ERROR_TYPE_ITEM, item,
173 "Ranging is not supported");
177 if (supp_mask == NULL) {
178 rte_flow_error_set(error, EINVAL,
179 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
180 "Supported mask for item should be specified");
184 /* Check that mask does not ask for more match than supp_mask */
185 for (i = 0; i < size; i++) {
186 supp = ((const uint8_t *)supp_mask)[i];
188 if (~supp & mask[i]) {
189 rte_flow_error_set(error, ENOTSUP,
190 RTE_FLOW_ERROR_TYPE_ITEM, item,
191 "Item's field is not supported");
204 * Masking is not supported, so masks in items should be either
205 * full or empty (zeroed) and set only for supported fields which
206 * are specified in the supp_mask.
210 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
211 __rte_unused efx_filter_spec_t *efx_spec,
212 __rte_unused struct rte_flow_error *error)
218 * Convert Ethernet item to EFX filter specification.
221 * Item specification. Outer frame specification may only comprise
222 * source/destination addresses and Ethertype field.
223 * Inner frame specification may contain destination address only.
224 * There is support for individual/group mask as well as for empty and full.
225 * If the mask is NULL, default mask will be used. Ranging is not supported.
226 * @param efx_spec[in, out]
227 * EFX filter specification to update.
229 * Perform verbose error reporting if not NULL.
232 sfc_flow_parse_eth(const struct rte_flow_item *item,
233 efx_filter_spec_t *efx_spec,
234 struct rte_flow_error *error)
237 const struct rte_flow_item_eth *spec = NULL;
238 const struct rte_flow_item_eth *mask = NULL;
239 const struct rte_flow_item_eth supp_mask = {
240 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
241 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
244 const struct rte_flow_item_eth ifrm_supp_mask = {
245 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
247 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
248 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
250 const struct rte_flow_item_eth *supp_mask_p;
251 const struct rte_flow_item_eth *def_mask_p;
252 uint8_t *loc_mac = NULL;
253 boolean_t is_ifrm = (efx_spec->efs_encap_type !=
254 EFX_TUNNEL_PROTOCOL_NONE);
257 supp_mask_p = &ifrm_supp_mask;
258 def_mask_p = &ifrm_supp_mask;
259 loc_mac = efx_spec->efs_ifrm_loc_mac;
261 supp_mask_p = &supp_mask;
262 def_mask_p = &rte_flow_item_eth_mask;
263 loc_mac = efx_spec->efs_loc_mac;
266 rc = sfc_flow_parse_init(item,
267 (const void **)&spec,
268 (const void **)&mask,
269 supp_mask_p, def_mask_p,
270 sizeof(struct rte_flow_item_eth),
275 /* If "spec" is not set, could be any Ethernet */
279 if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
280 efx_spec->efs_match_flags |= is_ifrm ?
281 EFX_FILTER_MATCH_IFRM_LOC_MAC :
282 EFX_FILTER_MATCH_LOC_MAC;
283 rte_memcpy(loc_mac, spec->dst.addr_bytes,
285 } else if (memcmp(mask->dst.addr_bytes, ig_mask,
286 EFX_MAC_ADDR_LEN) == 0) {
287 if (is_unicast_ether_addr(&spec->dst))
288 efx_spec->efs_match_flags |= is_ifrm ?
289 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
290 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
292 efx_spec->efs_match_flags |= is_ifrm ?
293 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
294 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
295 } else if (!is_zero_ether_addr(&mask->dst)) {
300 * ifrm_supp_mask ensures that the source address and
301 * ethertype masks are equal to zero in inner frame,
302 * so these fields are filled in only for the outer frame
304 if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
305 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
306 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
308 } else if (!is_zero_ether_addr(&mask->src)) {
313 * Ether type is in big-endian byte order in item and
314 * in little-endian in efx_spec, so byte swap is used
316 if (mask->type == supp_mask.type) {
317 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
318 efx_spec->efs_ether_type = rte_bswap16(spec->type);
319 } else if (mask->type != 0) {
326 rte_flow_error_set(error, EINVAL,
327 RTE_FLOW_ERROR_TYPE_ITEM, item,
328 "Bad mask in the ETH pattern item");
333 * Convert VLAN item to EFX filter specification.
336 * Item specification. Only VID field is supported.
337 * The mask can not be NULL. Ranging is not supported.
338 * @param efx_spec[in, out]
339 * EFX filter specification to update.
341 * Perform verbose error reporting if not NULL.
344 sfc_flow_parse_vlan(const struct rte_flow_item *item,
345 efx_filter_spec_t *efx_spec,
346 struct rte_flow_error *error)
350 const struct rte_flow_item_vlan *spec = NULL;
351 const struct rte_flow_item_vlan *mask = NULL;
352 const struct rte_flow_item_vlan supp_mask = {
353 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
354 .inner_type = RTE_BE16(0xffff),
357 rc = sfc_flow_parse_init(item,
358 (const void **)&spec,
359 (const void **)&mask,
362 sizeof(struct rte_flow_item_vlan),
368 * VID is in big-endian byte order in item and
369 * in little-endian in efx_spec, so byte swap is used.
370 * If two VLAN items are included, the first matches
371 * the outer tag and the next matches the inner tag.
373 if (mask->tci == supp_mask.tci) {
374 /* Apply mask to keep VID only */
375 vid = rte_bswap16(spec->tci & mask->tci);
377 if (!(efx_spec->efs_match_flags &
378 EFX_FILTER_MATCH_OUTER_VID)) {
379 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
380 efx_spec->efs_outer_vid = vid;
381 } else if (!(efx_spec->efs_match_flags &
382 EFX_FILTER_MATCH_INNER_VID)) {
383 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
384 efx_spec->efs_inner_vid = vid;
386 rte_flow_error_set(error, EINVAL,
387 RTE_FLOW_ERROR_TYPE_ITEM, item,
388 "More than two VLAN items");
392 rte_flow_error_set(error, EINVAL,
393 RTE_FLOW_ERROR_TYPE_ITEM, item,
394 "VLAN ID in TCI match is required");
398 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
399 rte_flow_error_set(error, EINVAL,
400 RTE_FLOW_ERROR_TYPE_ITEM, item,
401 "VLAN TPID matching is not supported");
404 if (mask->inner_type == supp_mask.inner_type) {
405 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
406 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
407 } else if (mask->inner_type) {
408 rte_flow_error_set(error, EINVAL,
409 RTE_FLOW_ERROR_TYPE_ITEM, item,
410 "Bad mask for VLAN inner_type");
418 * Convert IPv4 item to EFX filter specification.
421 * Item specification. Only source and destination addresses and
422 * protocol fields are supported. If the mask is NULL, default
423 * mask will be used. Ranging is not supported.
424 * @param efx_spec[in, out]
425 * EFX filter specification to update.
427 * Perform verbose error reporting if not NULL.
430 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
431 efx_filter_spec_t *efx_spec,
432 struct rte_flow_error *error)
435 const struct rte_flow_item_ipv4 *spec = NULL;
436 const struct rte_flow_item_ipv4 *mask = NULL;
437 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
438 const struct rte_flow_item_ipv4 supp_mask = {
440 .src_addr = 0xffffffff,
441 .dst_addr = 0xffffffff,
442 .next_proto_id = 0xff,
446 rc = sfc_flow_parse_init(item,
447 (const void **)&spec,
448 (const void **)&mask,
450 &rte_flow_item_ipv4_mask,
451 sizeof(struct rte_flow_item_ipv4),
457 * Filtering by IPv4 source and destination addresses requires
458 * the appropriate ETHER_TYPE in hardware filters
460 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
461 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
462 efx_spec->efs_ether_type = ether_type_ipv4;
463 } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
464 rte_flow_error_set(error, EINVAL,
465 RTE_FLOW_ERROR_TYPE_ITEM, item,
466 "Ethertype in pattern with IPV4 item should be appropriate");
474 * IPv4 addresses are in big-endian byte order in item and in
477 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
478 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
479 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
480 } else if (mask->hdr.src_addr != 0) {
484 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
485 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
486 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
487 } else if (mask->hdr.dst_addr != 0) {
491 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
492 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
493 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
494 } else if (mask->hdr.next_proto_id != 0) {
501 rte_flow_error_set(error, EINVAL,
502 RTE_FLOW_ERROR_TYPE_ITEM, item,
503 "Bad mask in the IPV4 pattern item");
508 * Convert IPv6 item to EFX filter specification.
511 * Item specification. Only source and destination addresses and
512 * next header fields are supported. If the mask is NULL, default
513 * mask will be used. Ranging is not supported.
514 * @param efx_spec[in, out]
515 * EFX filter specification to update.
517 * Perform verbose error reporting if not NULL.
520 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
521 efx_filter_spec_t *efx_spec,
522 struct rte_flow_error *error)
525 const struct rte_flow_item_ipv6 *spec = NULL;
526 const struct rte_flow_item_ipv6 *mask = NULL;
527 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
528 const struct rte_flow_item_ipv6 supp_mask = {
530 .src_addr = { 0xff, 0xff, 0xff, 0xff,
531 0xff, 0xff, 0xff, 0xff,
532 0xff, 0xff, 0xff, 0xff,
533 0xff, 0xff, 0xff, 0xff },
534 .dst_addr = { 0xff, 0xff, 0xff, 0xff,
535 0xff, 0xff, 0xff, 0xff,
536 0xff, 0xff, 0xff, 0xff,
537 0xff, 0xff, 0xff, 0xff },
542 rc = sfc_flow_parse_init(item,
543 (const void **)&spec,
544 (const void **)&mask,
546 &rte_flow_item_ipv6_mask,
547 sizeof(struct rte_flow_item_ipv6),
553 * Filtering by IPv6 source and destination addresses requires
554 * the appropriate ETHER_TYPE in hardware filters
556 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
557 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
558 efx_spec->efs_ether_type = ether_type_ipv6;
559 } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
560 rte_flow_error_set(error, EINVAL,
561 RTE_FLOW_ERROR_TYPE_ITEM, item,
562 "Ethertype in pattern with IPV6 item should be appropriate");
570 * IPv6 addresses are in big-endian byte order in item and in
573 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
574 sizeof(mask->hdr.src_addr)) == 0) {
575 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
577 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
578 sizeof(spec->hdr.src_addr));
579 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
580 sizeof(efx_spec->efs_rem_host));
581 } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
582 sizeof(mask->hdr.src_addr))) {
586 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
587 sizeof(mask->hdr.dst_addr)) == 0) {
588 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
590 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
591 sizeof(spec->hdr.dst_addr));
592 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
593 sizeof(efx_spec->efs_loc_host));
594 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
595 sizeof(mask->hdr.dst_addr))) {
599 if (mask->hdr.proto == supp_mask.hdr.proto) {
600 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
601 efx_spec->efs_ip_proto = spec->hdr.proto;
602 } else if (mask->hdr.proto != 0) {
609 rte_flow_error_set(error, EINVAL,
610 RTE_FLOW_ERROR_TYPE_ITEM, item,
611 "Bad mask in the IPV6 pattern item");
616 * Convert TCP item to EFX filter specification.
619 * Item specification. Only source and destination ports fields
620 * are supported. If the mask is NULL, default mask will be used.
621 * Ranging is not supported.
622 * @param efx_spec[in, out]
623 * EFX filter specification to update.
625 * Perform verbose error reporting if not NULL.
628 sfc_flow_parse_tcp(const struct rte_flow_item *item,
629 efx_filter_spec_t *efx_spec,
630 struct rte_flow_error *error)
633 const struct rte_flow_item_tcp *spec = NULL;
634 const struct rte_flow_item_tcp *mask = NULL;
635 const struct rte_flow_item_tcp supp_mask = {
642 rc = sfc_flow_parse_init(item,
643 (const void **)&spec,
644 (const void **)&mask,
646 &rte_flow_item_tcp_mask,
647 sizeof(struct rte_flow_item_tcp),
653 * Filtering by TCP source and destination ports requires
654 * the appropriate IP_PROTO in hardware filters
656 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
657 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
658 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
659 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
660 rte_flow_error_set(error, EINVAL,
661 RTE_FLOW_ERROR_TYPE_ITEM, item,
662 "IP proto in pattern with TCP item should be appropriate");
670 * Source and destination ports are in big-endian byte order in item and
671 * in little-endian in efx_spec, so byte swap is used
673 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
674 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
675 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
676 } else if (mask->hdr.src_port != 0) {
680 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
681 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
682 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
683 } else if (mask->hdr.dst_port != 0) {
690 rte_flow_error_set(error, EINVAL,
691 RTE_FLOW_ERROR_TYPE_ITEM, item,
692 "Bad mask in the TCP pattern item");
697 * Convert UDP item to EFX filter specification.
700 * Item specification. Only source and destination ports fields
701 * are supported. If the mask is NULL, default mask will be used.
702 * Ranging is not supported.
703 * @param efx_spec[in, out]
704 * EFX filter specification to update.
706 * Perform verbose error reporting if not NULL.
709 sfc_flow_parse_udp(const struct rte_flow_item *item,
710 efx_filter_spec_t *efx_spec,
711 struct rte_flow_error *error)
714 const struct rte_flow_item_udp *spec = NULL;
715 const struct rte_flow_item_udp *mask = NULL;
716 const struct rte_flow_item_udp supp_mask = {
723 rc = sfc_flow_parse_init(item,
724 (const void **)&spec,
725 (const void **)&mask,
727 &rte_flow_item_udp_mask,
728 sizeof(struct rte_flow_item_udp),
734 * Filtering by UDP source and destination ports requires
735 * the appropriate IP_PROTO in hardware filters
737 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
738 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
739 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
740 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
741 rte_flow_error_set(error, EINVAL,
742 RTE_FLOW_ERROR_TYPE_ITEM, item,
743 "IP proto in pattern with UDP item should be appropriate");
751 * Source and destination ports are in big-endian byte order in item and
752 * in little-endian in efx_spec, so byte swap is used
754 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
755 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
756 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
757 } else if (mask->hdr.src_port != 0) {
761 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
762 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
763 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
764 } else if (mask->hdr.dst_port != 0) {
771 rte_flow_error_set(error, EINVAL,
772 RTE_FLOW_ERROR_TYPE_ITEM, item,
773 "Bad mask in the UDP pattern item");
778 * Filters for encapsulated packets match based on the EtherType and IP
779 * protocol in the outer frame.
782 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
783 efx_filter_spec_t *efx_spec,
785 struct rte_flow_error *error)
787 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
788 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
789 efx_spec->efs_ip_proto = ip_proto;
790 } else if (efx_spec->efs_ip_proto != ip_proto) {
792 case EFX_IPPROTO_UDP:
793 rte_flow_error_set(error, EINVAL,
794 RTE_FLOW_ERROR_TYPE_ITEM, item,
795 "Outer IP header protocol must be UDP "
796 "in VxLAN/GENEVE pattern");
799 case EFX_IPPROTO_GRE:
800 rte_flow_error_set(error, EINVAL,
801 RTE_FLOW_ERROR_TYPE_ITEM, item,
802 "Outer IP header protocol must be GRE "
807 rte_flow_error_set(error, EINVAL,
808 RTE_FLOW_ERROR_TYPE_ITEM, item,
809 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
815 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
816 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
817 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
818 rte_flow_error_set(error, EINVAL,
819 RTE_FLOW_ERROR_TYPE_ITEM, item,
820 "Outer frame EtherType in pattern with tunneling "
821 "must be IPv4 or IPv6");
829 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
830 const uint8_t *vni_or_vsid_val,
831 const uint8_t *vni_or_vsid_mask,
832 const struct rte_flow_item *item,
833 struct rte_flow_error *error)
835 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
839 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
840 EFX_VNI_OR_VSID_LEN) == 0) {
841 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
842 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
843 EFX_VNI_OR_VSID_LEN);
844 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
845 rte_flow_error_set(error, EINVAL,
846 RTE_FLOW_ERROR_TYPE_ITEM, item,
847 "Unsupported VNI/VSID mask");
855 * Convert VXLAN item to EFX filter specification.
858 * Item specification. Only VXLAN network identifier field is supported.
859 * If the mask is NULL, default mask will be used.
860 * Ranging is not supported.
861 * @param efx_spec[in, out]
862 * EFX filter specification to update.
864 * Perform verbose error reporting if not NULL.
867 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
868 efx_filter_spec_t *efx_spec,
869 struct rte_flow_error *error)
872 const struct rte_flow_item_vxlan *spec = NULL;
873 const struct rte_flow_item_vxlan *mask = NULL;
874 const struct rte_flow_item_vxlan supp_mask = {
875 .vni = { 0xff, 0xff, 0xff }
878 rc = sfc_flow_parse_init(item,
879 (const void **)&spec,
880 (const void **)&mask,
882 &rte_flow_item_vxlan_mask,
883 sizeof(struct rte_flow_item_vxlan),
888 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
889 EFX_IPPROTO_UDP, error);
893 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
894 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
899 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
900 mask->vni, item, error);
906 * Convert GENEVE item to EFX filter specification.
909 * Item specification. Only Virtual Network Identifier and protocol type
910 * fields are supported. But protocol type can be only Ethernet (0x6558).
911 * If the mask is NULL, default mask will be used.
912 * Ranging is not supported.
913 * @param efx_spec[in, out]
914 * EFX filter specification to update.
916 * Perform verbose error reporting if not NULL.
919 sfc_flow_parse_geneve(const struct rte_flow_item *item,
920 efx_filter_spec_t *efx_spec,
921 struct rte_flow_error *error)
924 const struct rte_flow_item_geneve *spec = NULL;
925 const struct rte_flow_item_geneve *mask = NULL;
926 const struct rte_flow_item_geneve supp_mask = {
927 .protocol = RTE_BE16(0xffff),
928 .vni = { 0xff, 0xff, 0xff }
931 rc = sfc_flow_parse_init(item,
932 (const void **)&spec,
933 (const void **)&mask,
935 &rte_flow_item_geneve_mask,
936 sizeof(struct rte_flow_item_geneve),
941 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
942 EFX_IPPROTO_UDP, error);
946 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
947 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
952 if (mask->protocol == supp_mask.protocol) {
953 if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
954 rte_flow_error_set(error, EINVAL,
955 RTE_FLOW_ERROR_TYPE_ITEM, item,
956 "GENEVE encap. protocol must be Ethernet "
957 "(0x6558) in the GENEVE pattern item");
960 } else if (mask->protocol != 0) {
961 rte_flow_error_set(error, EINVAL,
962 RTE_FLOW_ERROR_TYPE_ITEM, item,
963 "Unsupported mask for GENEVE encap. protocol");
967 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
968 mask->vni, item, error);
974 * Convert NVGRE item to EFX filter specification.
977 * Item specification. Only virtual subnet ID field is supported.
978 * If the mask is NULL, default mask will be used.
979 * Ranging is not supported.
980 * @param efx_spec[in, out]
981 * EFX filter specification to update.
983 * Perform verbose error reporting if not NULL.
986 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
987 efx_filter_spec_t *efx_spec,
988 struct rte_flow_error *error)
991 const struct rte_flow_item_nvgre *spec = NULL;
992 const struct rte_flow_item_nvgre *mask = NULL;
993 const struct rte_flow_item_nvgre supp_mask = {
994 .tni = { 0xff, 0xff, 0xff }
997 rc = sfc_flow_parse_init(item,
998 (const void **)&spec,
999 (const void **)&mask,
1001 &rte_flow_item_nvgre_mask,
1002 sizeof(struct rte_flow_item_nvgre),
1007 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1008 EFX_IPPROTO_GRE, error);
1012 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1013 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1018 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1019 mask->tni, item, error);
1024 static const struct sfc_flow_item sfc_flow_items[] = {
1026 .type = RTE_FLOW_ITEM_TYPE_VOID,
1027 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1028 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1029 .parse = sfc_flow_parse_void,
1032 .type = RTE_FLOW_ITEM_TYPE_ETH,
1033 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1034 .layer = SFC_FLOW_ITEM_L2,
1035 .parse = sfc_flow_parse_eth,
1038 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1039 .prev_layer = SFC_FLOW_ITEM_L2,
1040 .layer = SFC_FLOW_ITEM_L2,
1041 .parse = sfc_flow_parse_vlan,
1044 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1045 .prev_layer = SFC_FLOW_ITEM_L2,
1046 .layer = SFC_FLOW_ITEM_L3,
1047 .parse = sfc_flow_parse_ipv4,
1050 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1051 .prev_layer = SFC_FLOW_ITEM_L2,
1052 .layer = SFC_FLOW_ITEM_L3,
1053 .parse = sfc_flow_parse_ipv6,
1056 .type = RTE_FLOW_ITEM_TYPE_TCP,
1057 .prev_layer = SFC_FLOW_ITEM_L3,
1058 .layer = SFC_FLOW_ITEM_L4,
1059 .parse = sfc_flow_parse_tcp,
1062 .type = RTE_FLOW_ITEM_TYPE_UDP,
1063 .prev_layer = SFC_FLOW_ITEM_L3,
1064 .layer = SFC_FLOW_ITEM_L4,
1065 .parse = sfc_flow_parse_udp,
1068 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1069 .prev_layer = SFC_FLOW_ITEM_L4,
1070 .layer = SFC_FLOW_ITEM_START_LAYER,
1071 .parse = sfc_flow_parse_vxlan,
1074 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1075 .prev_layer = SFC_FLOW_ITEM_L4,
1076 .layer = SFC_FLOW_ITEM_START_LAYER,
1077 .parse = sfc_flow_parse_geneve,
1080 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1081 .prev_layer = SFC_FLOW_ITEM_L3,
1082 .layer = SFC_FLOW_ITEM_START_LAYER,
1083 .parse = sfc_flow_parse_nvgre,
1088 * Protocol-independent flow API support
1091 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1092 struct rte_flow *flow,
1093 struct rte_flow_error *error)
1096 rte_flow_error_set(error, EINVAL,
1097 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1101 if (attr->group != 0) {
1102 rte_flow_error_set(error, ENOTSUP,
1103 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1104 "Groups are not supported");
1107 if (attr->priority != 0) {
1108 rte_flow_error_set(error, ENOTSUP,
1109 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1110 "Priorities are not supported");
1113 if (attr->egress != 0) {
1114 rte_flow_error_set(error, ENOTSUP,
1115 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1116 "Egress is not supported");
1119 if (attr->transfer != 0) {
1120 rte_flow_error_set(error, ENOTSUP,
1121 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1122 "Transfer is not supported");
1125 if (attr->ingress == 0) {
1126 rte_flow_error_set(error, ENOTSUP,
1127 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1128 "Only ingress is supported");
1132 flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
1133 flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1138 /* Get item from array sfc_flow_items */
1139 static const struct sfc_flow_item *
1140 sfc_flow_get_item(enum rte_flow_item_type type)
1144 for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1145 if (sfc_flow_items[i].type == type)
1146 return &sfc_flow_items[i];
1152 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1153 struct rte_flow *flow,
1154 struct rte_flow_error *error)
1157 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1158 boolean_t is_ifrm = B_FALSE;
1159 const struct sfc_flow_item *item;
1161 if (pattern == NULL) {
1162 rte_flow_error_set(error, EINVAL,
1163 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1168 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1169 item = sfc_flow_get_item(pattern->type);
1171 rte_flow_error_set(error, ENOTSUP,
1172 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1173 "Unsupported pattern item");
1178 * Omitting one or several protocol layers at the beginning
1179 * of pattern is supported
1181 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1182 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1183 item->prev_layer != prev_layer) {
1184 rte_flow_error_set(error, ENOTSUP,
1185 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1186 "Unexpected sequence of pattern items");
1191 * Allow only VOID and ETH pattern items in the inner frame.
1192 * Also check that there is only one tunneling protocol.
1194 switch (item->type) {
1195 case RTE_FLOW_ITEM_TYPE_VOID:
1196 case RTE_FLOW_ITEM_TYPE_ETH:
1199 case RTE_FLOW_ITEM_TYPE_VXLAN:
1200 case RTE_FLOW_ITEM_TYPE_GENEVE:
1201 case RTE_FLOW_ITEM_TYPE_NVGRE:
1203 rte_flow_error_set(error, EINVAL,
1204 RTE_FLOW_ERROR_TYPE_ITEM,
1206 "More than one tunneling protocol");
1214 rte_flow_error_set(error, EINVAL,
1215 RTE_FLOW_ERROR_TYPE_ITEM,
1217 "There is an unsupported pattern item "
1218 "in the inner frame");
1224 rc = item->parse(pattern, &flow->spec.template, error);
1228 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1229 prev_layer = item->layer;
1236 sfc_flow_parse_queue(struct sfc_adapter *sa,
1237 const struct rte_flow_action_queue *queue,
1238 struct rte_flow *flow)
1240 struct sfc_rxq *rxq;
1242 if (queue->index >= sa->rxq_count)
1245 rxq = sa->rxq_info[queue->index].rxq;
1246 flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1252 sfc_flow_parse_rss(struct sfc_adapter *sa,
1253 const struct rte_flow_action_rss *action_rss,
1254 struct rte_flow *flow)
1256 struct sfc_rss *rss = &sa->rss;
1257 unsigned int rxq_sw_index;
1258 struct sfc_rxq *rxq;
1259 unsigned int rxq_hw_index_min;
1260 unsigned int rxq_hw_index_max;
1261 efx_rx_hash_type_t efx_hash_types;
1262 const uint8_t *rss_key;
1263 struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
1266 if (action_rss->queue_num == 0)
1269 rxq_sw_index = sa->rxq_count - 1;
1270 rxq = sa->rxq_info[rxq_sw_index].rxq;
1271 rxq_hw_index_min = rxq->hw_index;
1272 rxq_hw_index_max = 0;
1274 for (i = 0; i < action_rss->queue_num; ++i) {
1275 rxq_sw_index = action_rss->queue[i];
1277 if (rxq_sw_index >= sa->rxq_count)
1280 rxq = sa->rxq_info[rxq_sw_index].rxq;
1282 if (rxq->hw_index < rxq_hw_index_min)
1283 rxq_hw_index_min = rxq->hw_index;
1285 if (rxq->hw_index > rxq_hw_index_max)
1286 rxq_hw_index_max = rxq->hw_index;
1289 switch (action_rss->func) {
1290 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1291 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1297 if (action_rss->level)
1301 * Dummy RSS action with only one queue and no specific settings
1302 * for hash types and key does not require dedicated RSS context
1303 * and may be simplified to single queue action.
1305 if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1306 action_rss->key_len == 0) {
1307 flow->spec.template.efs_dmaq_id = rxq_hw_index_min;
1311 if (action_rss->types) {
1314 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1322 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1323 efx_hash_types |= rss->hf_map[i].efx;
1326 if (action_rss->key_len) {
1327 if (action_rss->key_len != sizeof(rss->key))
1330 rss_key = action_rss->key;
1337 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1338 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1339 sfc_rss_conf->rss_hash_types = efx_hash_types;
1340 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1342 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1343 unsigned int nb_queues = action_rss->queue_num;
1344 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1345 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
1347 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1354 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1355 unsigned int filters_count)
1360 for (i = 0; i < filters_count; i++) {
1363 rc = efx_filter_remove(sa->nic, &spec->filters[i]);
1364 if (ret == 0 && rc != 0) {
1365 sfc_err(sa, "failed to remove filter specification "
1375 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1380 for (i = 0; i < spec->count; i++) {
1381 rc = efx_filter_insert(sa->nic, &spec->filters[i]);
1383 sfc_flow_spec_flush(sa, spec, i);
1392 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1394 return sfc_flow_spec_flush(sa, spec, spec->count);
1398 sfc_flow_filter_insert(struct sfc_adapter *sa,
1399 struct rte_flow *flow)
1401 struct sfc_rss *rss = &sa->rss;
1402 struct sfc_flow_rss *flow_rss = &flow->rss_conf;
1403 uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1408 unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
1409 flow_rss->rxq_hw_index_min + 1,
1412 rc = efx_rx_scale_context_alloc(sa->nic,
1413 EFX_RX_SCALE_EXCLUSIVE,
1417 goto fail_scale_context_alloc;
1419 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1421 flow_rss->rss_hash_types, B_TRUE);
1423 goto fail_scale_mode_set;
1425 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1429 goto fail_scale_key_set;
1432 * At this point, fully elaborated filter specifications
1433 * have been produced from the template. To make sure that
1434 * RSS behaviour is consistent between them, set the same
1435 * RSS context value everywhere.
1437 for (i = 0; i < flow->spec.count; i++) {
1438 efx_filter_spec_t *spec = &flow->spec.filters[i];
1440 spec->efs_rss_context = efs_rss_context;
1441 spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1442 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1446 rc = sfc_flow_spec_insert(sa, &flow->spec);
1448 goto fail_filter_insert;
1452 * Scale table is set after filter insertion because
1453 * the table entries are relative to the base RxQ ID
1454 * and the latter is submitted to the HW by means of
1455 * inserting a filter, so by the time of the request
1456 * the HW knows all the information needed to verify
1457 * the table entries, and the operation will succeed
1459 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1461 RTE_DIM(flow_rss->rss_tbl));
1463 goto fail_scale_tbl_set;
1469 sfc_flow_spec_remove(sa, &flow->spec);
1473 fail_scale_mode_set:
1474 if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1475 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1477 fail_scale_context_alloc:
1482 sfc_flow_filter_remove(struct sfc_adapter *sa,
1483 struct rte_flow *flow)
1487 rc = sfc_flow_spec_remove(sa, &flow->spec);
1493 * All specifications for a given flow rule have the same RSS
1494 * context, so that RSS context value is taken from the first
1495 * filter specification
1497 efx_filter_spec_t *spec = &flow->spec.filters[0];
1499 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1506 sfc_flow_parse_mark(struct sfc_adapter *sa,
1507 const struct rte_flow_action_mark *mark,
1508 struct rte_flow *flow)
1510 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1512 if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1515 flow->spec.template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1516 flow->spec.template.efs_mark = mark->id;
1522 sfc_flow_parse_actions(struct sfc_adapter *sa,
1523 const struct rte_flow_action actions[],
1524 struct rte_flow *flow,
1525 struct rte_flow_error *error)
1528 const unsigned int dp_rx_features = sa->dp_rx->features;
1529 uint32_t actions_set = 0;
1530 const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1531 (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1532 (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1533 const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1534 (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1536 if (actions == NULL) {
1537 rte_flow_error_set(error, EINVAL,
1538 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1543 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
1544 RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
1546 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1547 switch (actions->type) {
1548 case RTE_FLOW_ACTION_TYPE_VOID:
1549 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1553 case RTE_FLOW_ACTION_TYPE_QUEUE:
1554 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1556 if ((actions_set & fate_actions_mask) != 0)
1557 goto fail_fate_actions;
1559 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1561 rte_flow_error_set(error, EINVAL,
1562 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1563 "Bad QUEUE action");
1568 case RTE_FLOW_ACTION_TYPE_RSS:
1569 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1571 if ((actions_set & fate_actions_mask) != 0)
1572 goto fail_fate_actions;
1574 rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1576 rte_flow_error_set(error, -rc,
1577 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1583 case RTE_FLOW_ACTION_TYPE_DROP:
1584 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1586 if ((actions_set & fate_actions_mask) != 0)
1587 goto fail_fate_actions;
1589 flow->spec.template.efs_dmaq_id =
1590 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1593 case RTE_FLOW_ACTION_TYPE_FLAG:
1594 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1596 if ((actions_set & mark_actions_mask) != 0)
1597 goto fail_actions_overlap;
1599 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1600 rte_flow_error_set(error, ENOTSUP,
1601 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1602 "FLAG action is not supported on the current Rx datapath");
1606 flow->spec.template.efs_flags |=
1607 EFX_FILTER_FLAG_ACTION_FLAG;
1610 case RTE_FLOW_ACTION_TYPE_MARK:
1611 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1613 if ((actions_set & mark_actions_mask) != 0)
1614 goto fail_actions_overlap;
1616 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1617 rte_flow_error_set(error, ENOTSUP,
1618 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1619 "MARK action is not supported on the current Rx datapath");
1623 rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1625 rte_flow_error_set(error, rc,
1626 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1633 rte_flow_error_set(error, ENOTSUP,
1634 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1635 "Action is not supported");
1639 actions_set |= (1UL << actions->type);
1641 #undef SFC_BUILD_SET_OVERFLOW
1643 /* When fate is unknown, drop traffic. */
1644 if ((actions_set & fate_actions_mask) == 0) {
1645 flow->spec.template.efs_dmaq_id =
1646 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1652 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1653 "Cannot combine several fate-deciding actions, "
1654 "choose between QUEUE, RSS or DROP");
1657 fail_actions_overlap:
1658 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1659 "Overlapping actions are not supported");
1664 * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1665 * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1666 * specifications after copying.
1668 * @param spec[in, out]
1669 * SFC flow specification to update.
1670 * @param filters_count_for_one_val[in]
1671 * How many specifications should have the same match flag, what is the
1672 * number of specifications before copying.
1674 * Perform verbose error reporting if not NULL.
1677 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1678 unsigned int filters_count_for_one_val,
1679 struct rte_flow_error *error)
1682 static const efx_filter_match_flags_t vals[] = {
1683 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1684 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1687 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1688 rte_flow_error_set(error, EINVAL,
1689 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1690 "Number of specifications is incorrect while copying "
1691 "by unknown destination flags");
1695 for (i = 0; i < spec->count; i++) {
1696 /* The check above ensures that divisor can't be zero here */
1697 spec->filters[i].efs_match_flags |=
1698 vals[i / filters_count_for_one_val];
1705 * Check that the following conditions are met:
1706 * - the list of supported filters has a filter
1707 * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1708 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1712 * The match flags of filter.
1714 * Specification to be supplemented.
1716 * SFC filter with list of supported filters.
1719 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1720 __rte_unused efx_filter_spec_t *spec,
1721 struct sfc_filter *filter)
1724 efx_filter_match_flags_t match_mcast_dst;
1727 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1728 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1729 for (i = 0; i < filter->supported_match_num; i++) {
1730 if (match_mcast_dst == filter->supported_match[i])
1738 * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1739 * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1740 * specifications after copying.
1742 * @param spec[in, out]
1743 * SFC flow specification to update.
1744 * @param filters_count_for_one_val[in]
1745 * How many specifications should have the same EtherType value, what is the
1746 * number of specifications before copying.
1748 * Perform verbose error reporting if not NULL.
1751 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1752 unsigned int filters_count_for_one_val,
1753 struct rte_flow_error *error)
1756 static const uint16_t vals[] = {
1757 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1760 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1761 rte_flow_error_set(error, EINVAL,
1762 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1763 "Number of specifications is incorrect "
1764 "while copying by Ethertype");
1768 for (i = 0; i < spec->count; i++) {
1769 spec->filters[i].efs_match_flags |=
1770 EFX_FILTER_MATCH_ETHER_TYPE;
1773 * The check above ensures that
1774 * filters_count_for_one_val is not 0
1776 spec->filters[i].efs_ether_type =
1777 vals[i / filters_count_for_one_val];
1784 * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1785 * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1786 * specifications after copying.
1788 * @param spec[in, out]
1789 * SFC flow specification to update.
1790 * @param filters_count_for_one_val[in]
1791 * How many specifications should have the same match flag, what is the
1792 * number of specifications before copying.
1794 * Perform verbose error reporting if not NULL.
1797 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1798 unsigned int filters_count_for_one_val,
1799 struct rte_flow_error *error)
1802 static const efx_filter_match_flags_t vals[] = {
1803 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1804 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1807 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1808 rte_flow_error_set(error, EINVAL,
1809 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1810 "Number of specifications is incorrect while copying "
1811 "by inner frame unknown destination flags");
1815 for (i = 0; i < spec->count; i++) {
1816 /* The check above ensures that divisor can't be zero here */
1817 spec->filters[i].efs_match_flags |=
1818 vals[i / filters_count_for_one_val];
1825 * Check that the following conditions are met:
1826 * - the specification corresponds to a filter for encapsulated traffic
1827 * - the list of supported filters has a filter
1828 * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1829 * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1833 * The match flags of filter.
1835 * Specification to be supplemented.
1837 * SFC filter with list of supported filters.
1840 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1841 efx_filter_spec_t *spec,
1842 struct sfc_filter *filter)
1845 efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1846 efx_filter_match_flags_t match_mcast_dst;
1848 if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1852 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1853 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1854 for (i = 0; i < filter->supported_match_num; i++) {
1855 if (match_mcast_dst == filter->supported_match[i])
1863 * Match flags that can be automatically added to filters.
1864 * Selecting the last minimum when searching for the copy flag ensures that the
1865 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
1866 * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
1867 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
1870 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1872 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1874 .set_vals = sfc_flow_set_unknown_dst_flags,
1875 .spec_check = sfc_flow_check_unknown_dst_flags,
1878 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
1880 .set_vals = sfc_flow_set_ethertypes,
1884 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1886 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
1887 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
1891 /* Get item from array sfc_flow_copy_flags */
1892 static const struct sfc_flow_copy_flag *
1893 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
1897 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1898 if (sfc_flow_copy_flags[i].flag == flag)
1899 return &sfc_flow_copy_flags[i];
1906 * Make copies of the specifications, set match flag and values
1907 * of the field that corresponds to it.
1909 * @param spec[in, out]
1910 * SFC flow specification to update.
1912 * The match flag to add.
1914 * Perform verbose error reporting if not NULL.
1917 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
1918 efx_filter_match_flags_t flag,
1919 struct rte_flow_error *error)
1922 unsigned int new_filters_count;
1923 unsigned int filters_count_for_one_val;
1924 const struct sfc_flow_copy_flag *copy_flag;
1927 copy_flag = sfc_flow_get_copy_flag(flag);
1928 if (copy_flag == NULL) {
1929 rte_flow_error_set(error, ENOTSUP,
1930 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1931 "Unsupported spec field for copying");
1935 new_filters_count = spec->count * copy_flag->vals_count;
1936 if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
1937 rte_flow_error_set(error, EINVAL,
1938 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1939 "Too much EFX specifications in the flow rule");
1943 /* Copy filters specifications */
1944 for (i = spec->count; i < new_filters_count; i++)
1945 spec->filters[i] = spec->filters[i - spec->count];
1947 filters_count_for_one_val = spec->count;
1948 spec->count = new_filters_count;
1950 rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
1958 * Check that the given set of match flags missing in the original filter spec
1959 * could be covered by adding spec copies which specify the corresponding
1960 * flags and packet field values to match.
1962 * @param miss_flags[in]
1963 * Flags that are missing until the supported filter.
1965 * Specification to be supplemented.
1970 * Number of specifications after copy or 0, if the flags can not be added.
1973 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
1974 efx_filter_spec_t *spec,
1975 struct sfc_filter *filter)
1978 efx_filter_match_flags_t copy_flags = 0;
1979 efx_filter_match_flags_t flag;
1980 efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
1981 sfc_flow_spec_check *check;
1982 unsigned int multiplier = 1;
1984 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1985 flag = sfc_flow_copy_flags[i].flag;
1986 check = sfc_flow_copy_flags[i].spec_check;
1987 if ((flag & miss_flags) == flag) {
1988 if (check != NULL && (!check(match, spec, filter)))
1992 multiplier *= sfc_flow_copy_flags[i].vals_count;
1996 if (copy_flags == miss_flags)
2003 * Attempt to supplement the specification template to the minimally
2004 * supported set of match flags. To do this, it is necessary to copy
2005 * the specifications, filling them with the values of fields that
2006 * correspond to the missing flags.
2007 * The necessary and sufficient filter is built from the fewest number
2008 * of copies which could be made to cover the minimally required set
2013 * @param spec[in, out]
2014 * SFC flow specification to update.
2016 * Perform verbose error reporting if not NULL.
2019 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2020 struct sfc_flow_spec *spec,
2021 struct rte_flow_error *error)
2023 struct sfc_filter *filter = &sa->filter;
2024 efx_filter_match_flags_t miss_flags;
2025 efx_filter_match_flags_t min_miss_flags = 0;
2026 efx_filter_match_flags_t match;
2027 unsigned int min_multiplier = UINT_MAX;
2028 unsigned int multiplier;
2032 match = spec->template.efs_match_flags;
2033 for (i = 0; i < filter->supported_match_num; i++) {
2034 if ((match & filter->supported_match[i]) == match) {
2035 miss_flags = filter->supported_match[i] & (~match);
2036 multiplier = sfc_flow_check_missing_flags(miss_flags,
2037 &spec->template, filter);
2038 if (multiplier > 0) {
2039 if (multiplier <= min_multiplier) {
2040 min_multiplier = multiplier;
2041 min_miss_flags = miss_flags;
2047 if (min_multiplier == UINT_MAX) {
2048 rte_flow_error_set(error, ENOTSUP,
2049 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2050 "The flow rule pattern is unsupported");
2054 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2055 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2057 if ((flag & min_miss_flags) == flag) {
2058 rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2068 * Check that set of match flags is referred to by a filter. Filter is
2069 * described by match flags with the ability to add OUTER_VID and INNER_VID
2072 * @param match_flags[in]
2073 * Set of match flags.
2074 * @param flags_pattern[in]
2075 * Pattern of filter match flags.
2078 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2079 efx_filter_match_flags_t flags_pattern)
2081 if ((match_flags & flags_pattern) != flags_pattern)
2084 switch (match_flags & ~flags_pattern) {
2086 case EFX_FILTER_MATCH_OUTER_VID:
2087 case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2095 * Check whether the spec maps to a hardware filter which is known to be
2096 * ineffective despite being valid.
2099 * SFC flow specification.
2102 sfc_flow_is_match_flags_exception(struct sfc_flow_spec *spec)
2105 uint16_t ether_type;
2107 efx_filter_match_flags_t match_flags;
2109 for (i = 0; i < spec->count; i++) {
2110 match_flags = spec->filters[i].efs_match_flags;
2112 if (sfc_flow_is_match_with_vids(match_flags,
2113 EFX_FILTER_MATCH_ETHER_TYPE) ||
2114 sfc_flow_is_match_with_vids(match_flags,
2115 EFX_FILTER_MATCH_ETHER_TYPE |
2116 EFX_FILTER_MATCH_LOC_MAC)) {
2117 ether_type = spec->filters[i].efs_ether_type;
2118 if (ether_type == EFX_ETHER_TYPE_IPV4 ||
2119 ether_type == EFX_ETHER_TYPE_IPV6)
2121 } else if (sfc_flow_is_match_with_vids(match_flags,
2122 EFX_FILTER_MATCH_ETHER_TYPE |
2123 EFX_FILTER_MATCH_IP_PROTO) ||
2124 sfc_flow_is_match_with_vids(match_flags,
2125 EFX_FILTER_MATCH_ETHER_TYPE |
2126 EFX_FILTER_MATCH_IP_PROTO |
2127 EFX_FILTER_MATCH_LOC_MAC)) {
2128 ip_proto = spec->filters[i].efs_ip_proto;
2129 if (ip_proto == EFX_IPPROTO_TCP ||
2130 ip_proto == EFX_IPPROTO_UDP)
2139 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2140 struct rte_flow *flow,
2141 struct rte_flow_error *error)
2143 efx_filter_spec_t *spec_tmpl = &flow->spec.template;
2144 efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2147 /* Initialize the first filter spec with template */
2148 flow->spec.filters[0] = *spec_tmpl;
2149 flow->spec.count = 1;
2151 if (!sfc_filter_is_match_supported(sa, match_flags)) {
2152 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2157 if (sfc_flow_is_match_flags_exception(&flow->spec)) {
2158 rte_flow_error_set(error, ENOTSUP,
2159 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2160 "The flow rule pattern is unsupported");
2168 sfc_flow_parse(struct rte_eth_dev *dev,
2169 const struct rte_flow_attr *attr,
2170 const struct rte_flow_item pattern[],
2171 const struct rte_flow_action actions[],
2172 struct rte_flow *flow,
2173 struct rte_flow_error *error)
2175 struct sfc_adapter *sa = dev->data->dev_private;
2178 rc = sfc_flow_parse_attr(attr, flow, error);
2180 goto fail_bad_value;
2182 rc = sfc_flow_parse_pattern(pattern, flow, error);
2184 goto fail_bad_value;
2186 rc = sfc_flow_parse_actions(sa, actions, flow, error);
2188 goto fail_bad_value;
2190 rc = sfc_flow_validate_match_flags(sa, flow, error);
2192 goto fail_bad_value;
2201 sfc_flow_validate(struct rte_eth_dev *dev,
2202 const struct rte_flow_attr *attr,
2203 const struct rte_flow_item pattern[],
2204 const struct rte_flow_action actions[],
2205 struct rte_flow_error *error)
2207 struct rte_flow flow;
2209 memset(&flow, 0, sizeof(flow));
2211 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
2214 static struct rte_flow *
2215 sfc_flow_create(struct rte_eth_dev *dev,
2216 const struct rte_flow_attr *attr,
2217 const struct rte_flow_item pattern[],
2218 const struct rte_flow_action actions[],
2219 struct rte_flow_error *error)
2221 struct sfc_adapter *sa = dev->data->dev_private;
2222 struct rte_flow *flow = NULL;
2225 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2227 rte_flow_error_set(error, ENOMEM,
2228 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2229 "Failed to allocate memory");
2233 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2235 goto fail_bad_value;
2237 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
2239 sfc_adapter_lock(sa);
2241 if (sa->state == SFC_ADAPTER_STARTED) {
2242 rc = sfc_flow_filter_insert(sa, flow);
2244 rte_flow_error_set(error, rc,
2245 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2246 "Failed to insert filter");
2247 goto fail_filter_insert;
2251 sfc_adapter_unlock(sa);
2256 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2260 sfc_adapter_unlock(sa);
2267 sfc_flow_remove(struct sfc_adapter *sa,
2268 struct rte_flow *flow,
2269 struct rte_flow_error *error)
2273 SFC_ASSERT(sfc_adapter_is_locked(sa));
2275 if (sa->state == SFC_ADAPTER_STARTED) {
2276 rc = sfc_flow_filter_remove(sa, flow);
2278 rte_flow_error_set(error, rc,
2279 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2280 "Failed to destroy flow rule");
2283 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2290 sfc_flow_destroy(struct rte_eth_dev *dev,
2291 struct rte_flow *flow,
2292 struct rte_flow_error *error)
2294 struct sfc_adapter *sa = dev->data->dev_private;
2295 struct rte_flow *flow_ptr;
2298 sfc_adapter_lock(sa);
2300 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
2301 if (flow_ptr == flow)
2305 rte_flow_error_set(error, rc,
2306 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2307 "Failed to find flow rule to destroy");
2308 goto fail_bad_value;
2311 rc = sfc_flow_remove(sa, flow, error);
2314 sfc_adapter_unlock(sa);
2320 sfc_flow_flush(struct rte_eth_dev *dev,
2321 struct rte_flow_error *error)
2323 struct sfc_adapter *sa = dev->data->dev_private;
2324 struct rte_flow *flow;
2328 sfc_adapter_lock(sa);
2330 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2331 rc = sfc_flow_remove(sa, flow, error);
2336 sfc_adapter_unlock(sa);
2342 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2343 struct rte_flow_error *error)
2345 struct sfc_adapter *sa = dev->data->dev_private;
2346 struct sfc_port *port = &sa->port;
2349 sfc_adapter_lock(sa);
2350 if (sa->state != SFC_ADAPTER_INITIALIZED) {
2351 rte_flow_error_set(error, EBUSY,
2352 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2353 NULL, "please close the port first");
2356 port->isolated = (enable) ? B_TRUE : B_FALSE;
2358 sfc_adapter_unlock(sa);
2363 const struct rte_flow_ops sfc_flow_ops = {
2364 .validate = sfc_flow_validate,
2365 .create = sfc_flow_create,
2366 .destroy = sfc_flow_destroy,
2367 .flush = sfc_flow_flush,
2369 .isolate = sfc_flow_isolate,
2373 sfc_flow_init(struct sfc_adapter *sa)
2375 SFC_ASSERT(sfc_adapter_is_locked(sa));
2377 TAILQ_INIT(&sa->filter.flow_list);
2381 sfc_flow_fini(struct sfc_adapter *sa)
2383 struct rte_flow *flow;
2385 SFC_ASSERT(sfc_adapter_is_locked(sa));
2387 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2388 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2394 sfc_flow_stop(struct sfc_adapter *sa)
2396 struct rte_flow *flow;
2398 SFC_ASSERT(sfc_adapter_is_locked(sa));
2400 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
2401 sfc_flow_filter_remove(sa, flow);
2405 sfc_flow_start(struct sfc_adapter *sa)
2407 struct rte_flow *flow;
2410 sfc_log_init(sa, "entry");
2412 SFC_ASSERT(sfc_adapter_is_locked(sa));
2414 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
2415 rc = sfc_flow_filter_insert(sa, flow);
2420 sfc_log_init(sa, "done");