1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2017-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_eth_ctrl.h>
15 #include <rte_ether.h>
17 #include <rte_flow_driver.h>
23 #include "sfc_filter.h"
26 #include "sfc_dp_rx.h"
29 * At now flow API is implemented in such a manner that each
30 * flow rule is converted to one or more hardware filters.
31 * All elements of flow rule (attributes, pattern items, actions)
32 * correspond to one or more fields in the efx_filter_spec_s structure
33 * that is responsible for the hardware filter.
34 * If some required field is unset in the flow rule, then a handful
35 * of filter copies will be created to cover all possible values
39 enum sfc_flow_item_layers {
40 SFC_FLOW_ITEM_ANY_LAYER,
41 SFC_FLOW_ITEM_START_LAYER,
47 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
48 efx_filter_spec_t *spec,
49 struct rte_flow_error *error);
51 struct sfc_flow_item {
52 enum rte_flow_item_type type; /* Type of item */
53 enum sfc_flow_item_layers layer; /* Layer of item */
54 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
55 sfc_flow_item_parse *parse; /* Parsing function */
58 static sfc_flow_item_parse sfc_flow_parse_void;
59 static sfc_flow_item_parse sfc_flow_parse_eth;
60 static sfc_flow_item_parse sfc_flow_parse_vlan;
61 static sfc_flow_item_parse sfc_flow_parse_ipv4;
62 static sfc_flow_item_parse sfc_flow_parse_ipv6;
63 static sfc_flow_item_parse sfc_flow_parse_tcp;
64 static sfc_flow_item_parse sfc_flow_parse_udp;
65 static sfc_flow_item_parse sfc_flow_parse_vxlan;
66 static sfc_flow_item_parse sfc_flow_parse_geneve;
67 static sfc_flow_item_parse sfc_flow_parse_nvgre;
69 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
70 unsigned int filters_count_for_one_val,
71 struct rte_flow_error *error);
73 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
74 efx_filter_spec_t *spec,
75 struct sfc_filter *filter);
77 struct sfc_flow_copy_flag {
78 /* EFX filter specification match flag */
79 efx_filter_match_flags_t flag;
80 /* Number of values of corresponding field */
81 unsigned int vals_count;
82 /* Function to set values in specifications */
83 sfc_flow_spec_set_vals *set_vals;
85 * Function to check that the specification is suitable
86 * for adding this match flag
88 sfc_flow_spec_check *spec_check;
91 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
92 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
93 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
94 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
95 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
98 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
103 for (i = 0; i < size; i++)
106 return (sum == 0) ? B_TRUE : B_FALSE;
110 * Validate item and prepare structures spec and mask for parsing
113 sfc_flow_parse_init(const struct rte_flow_item *item,
114 const void **spec_ptr,
115 const void **mask_ptr,
116 const void *supp_mask,
117 const void *def_mask,
119 struct rte_flow_error *error)
128 rte_flow_error_set(error, EINVAL,
129 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
134 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
135 rte_flow_error_set(error, EINVAL,
136 RTE_FLOW_ERROR_TYPE_ITEM, item,
137 "Mask or last is set without spec");
142 * If "mask" is not set, default mask is used,
143 * but if default mask is NULL, "mask" should be set
145 if (item->mask == NULL) {
146 if (def_mask == NULL) {
147 rte_flow_error_set(error, EINVAL,
148 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
149 "Mask should be specified");
165 * If field values in "last" are either 0 or equal to the corresponding
166 * values in "spec" then they are ignored
169 !sfc_flow_is_zero(last, size) &&
170 memcmp(last, spec, size) != 0) {
171 rte_flow_error_set(error, ENOTSUP,
172 RTE_FLOW_ERROR_TYPE_ITEM, item,
173 "Ranging is not supported");
177 if (supp_mask == NULL) {
178 rte_flow_error_set(error, EINVAL,
179 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
180 "Supported mask for item should be specified");
184 /* Check that mask does not ask for more match than supp_mask */
185 for (i = 0; i < size; i++) {
186 supp = ((const uint8_t *)supp_mask)[i];
188 if (~supp & mask[i]) {
189 rte_flow_error_set(error, ENOTSUP,
190 RTE_FLOW_ERROR_TYPE_ITEM, item,
191 "Item's field is not supported");
204 * Masking is not supported, so masks in items should be either
205 * full or empty (zeroed) and set only for supported fields which
206 * are specified in the supp_mask.
210 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
211 __rte_unused efx_filter_spec_t *efx_spec,
212 __rte_unused struct rte_flow_error *error)
218 * Convert Ethernet item to EFX filter specification.
221 * Item specification. Outer frame specification may only comprise
222 * source/destination addresses and Ethertype field.
223 * Inner frame specification may contain destination address only.
224 * There is support for individual/group mask as well as for empty and full.
225 * If the mask is NULL, default mask will be used. Ranging is not supported.
226 * @param efx_spec[in, out]
227 * EFX filter specification to update.
229 * Perform verbose error reporting if not NULL.
232 sfc_flow_parse_eth(const struct rte_flow_item *item,
233 efx_filter_spec_t *efx_spec,
234 struct rte_flow_error *error)
237 const struct rte_flow_item_eth *spec = NULL;
238 const struct rte_flow_item_eth *mask = NULL;
239 const struct rte_flow_item_eth supp_mask = {
240 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
241 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
244 const struct rte_flow_item_eth ifrm_supp_mask = {
245 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
247 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
248 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
250 const struct rte_flow_item_eth *supp_mask_p;
251 const struct rte_flow_item_eth *def_mask_p;
252 uint8_t *loc_mac = NULL;
253 boolean_t is_ifrm = (efx_spec->efs_encap_type !=
254 EFX_TUNNEL_PROTOCOL_NONE);
257 supp_mask_p = &ifrm_supp_mask;
258 def_mask_p = &ifrm_supp_mask;
259 loc_mac = efx_spec->efs_ifrm_loc_mac;
261 supp_mask_p = &supp_mask;
262 def_mask_p = &rte_flow_item_eth_mask;
263 loc_mac = efx_spec->efs_loc_mac;
266 rc = sfc_flow_parse_init(item,
267 (const void **)&spec,
268 (const void **)&mask,
269 supp_mask_p, def_mask_p,
270 sizeof(struct rte_flow_item_eth),
275 /* If "spec" is not set, could be any Ethernet */
279 if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
280 efx_spec->efs_match_flags |= is_ifrm ?
281 EFX_FILTER_MATCH_IFRM_LOC_MAC :
282 EFX_FILTER_MATCH_LOC_MAC;
283 rte_memcpy(loc_mac, spec->dst.addr_bytes,
285 } else if (memcmp(mask->dst.addr_bytes, ig_mask,
286 EFX_MAC_ADDR_LEN) == 0) {
287 if (is_unicast_ether_addr(&spec->dst))
288 efx_spec->efs_match_flags |= is_ifrm ?
289 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
290 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
292 efx_spec->efs_match_flags |= is_ifrm ?
293 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
294 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
295 } else if (!is_zero_ether_addr(&mask->dst)) {
300 * ifrm_supp_mask ensures that the source address and
301 * ethertype masks are equal to zero in inner frame,
302 * so these fields are filled in only for the outer frame
304 if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
305 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
306 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
308 } else if (!is_zero_ether_addr(&mask->src)) {
313 * Ether type is in big-endian byte order in item and
314 * in little-endian in efx_spec, so byte swap is used
316 if (mask->type == supp_mask.type) {
317 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
318 efx_spec->efs_ether_type = rte_bswap16(spec->type);
319 } else if (mask->type != 0) {
326 rte_flow_error_set(error, EINVAL,
327 RTE_FLOW_ERROR_TYPE_ITEM, item,
328 "Bad mask in the ETH pattern item");
333 * Convert VLAN item to EFX filter specification.
336 * Item specification. Only VID field is supported.
337 * The mask can not be NULL. Ranging is not supported.
338 * @param efx_spec[in, out]
339 * EFX filter specification to update.
341 * Perform verbose error reporting if not NULL.
344 sfc_flow_parse_vlan(const struct rte_flow_item *item,
345 efx_filter_spec_t *efx_spec,
346 struct rte_flow_error *error)
350 const struct rte_flow_item_vlan *spec = NULL;
351 const struct rte_flow_item_vlan *mask = NULL;
352 const struct rte_flow_item_vlan supp_mask = {
353 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
354 .inner_type = RTE_BE16(0xffff),
357 rc = sfc_flow_parse_init(item,
358 (const void **)&spec,
359 (const void **)&mask,
362 sizeof(struct rte_flow_item_vlan),
368 * VID is in big-endian byte order in item and
369 * in little-endian in efx_spec, so byte swap is used.
370 * If two VLAN items are included, the first matches
371 * the outer tag and the next matches the inner tag.
373 if (mask->tci == supp_mask.tci) {
374 vid = rte_bswap16(spec->tci);
376 if (!(efx_spec->efs_match_flags &
377 EFX_FILTER_MATCH_OUTER_VID)) {
378 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
379 efx_spec->efs_outer_vid = vid;
380 } else if (!(efx_spec->efs_match_flags &
381 EFX_FILTER_MATCH_INNER_VID)) {
382 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
383 efx_spec->efs_inner_vid = vid;
385 rte_flow_error_set(error, EINVAL,
386 RTE_FLOW_ERROR_TYPE_ITEM, item,
387 "More than two VLAN items");
391 rte_flow_error_set(error, EINVAL,
392 RTE_FLOW_ERROR_TYPE_ITEM, item,
393 "VLAN ID in TCI match is required");
397 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
398 rte_flow_error_set(error, EINVAL,
399 RTE_FLOW_ERROR_TYPE_ITEM, item,
400 "VLAN TPID matching is not supported");
403 if (mask->inner_type == supp_mask.inner_type) {
404 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
405 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
406 } else if (mask->inner_type) {
407 rte_flow_error_set(error, EINVAL,
408 RTE_FLOW_ERROR_TYPE_ITEM, item,
409 "Bad mask for VLAN inner_type");
417 * Convert IPv4 item to EFX filter specification.
420 * Item specification. Only source and destination addresses and
421 * protocol fields are supported. If the mask is NULL, default
422 * mask will be used. Ranging is not supported.
423 * @param efx_spec[in, out]
424 * EFX filter specification to update.
426 * Perform verbose error reporting if not NULL.
429 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
430 efx_filter_spec_t *efx_spec,
431 struct rte_flow_error *error)
434 const struct rte_flow_item_ipv4 *spec = NULL;
435 const struct rte_flow_item_ipv4 *mask = NULL;
436 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
437 const struct rte_flow_item_ipv4 supp_mask = {
439 .src_addr = 0xffffffff,
440 .dst_addr = 0xffffffff,
441 .next_proto_id = 0xff,
445 rc = sfc_flow_parse_init(item,
446 (const void **)&spec,
447 (const void **)&mask,
449 &rte_flow_item_ipv4_mask,
450 sizeof(struct rte_flow_item_ipv4),
456 * Filtering by IPv4 source and destination addresses requires
457 * the appropriate ETHER_TYPE in hardware filters
459 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
460 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
461 efx_spec->efs_ether_type = ether_type_ipv4;
462 } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
463 rte_flow_error_set(error, EINVAL,
464 RTE_FLOW_ERROR_TYPE_ITEM, item,
465 "Ethertype in pattern with IPV4 item should be appropriate");
473 * IPv4 addresses are in big-endian byte order in item and in
476 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
477 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
478 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
479 } else if (mask->hdr.src_addr != 0) {
483 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
484 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
485 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
486 } else if (mask->hdr.dst_addr != 0) {
490 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
491 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
492 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
493 } else if (mask->hdr.next_proto_id != 0) {
500 rte_flow_error_set(error, EINVAL,
501 RTE_FLOW_ERROR_TYPE_ITEM, item,
502 "Bad mask in the IPV4 pattern item");
507 * Convert IPv6 item to EFX filter specification.
510 * Item specification. Only source and destination addresses and
511 * next header fields are supported. If the mask is NULL, default
512 * mask will be used. Ranging is not supported.
513 * @param efx_spec[in, out]
514 * EFX filter specification to update.
516 * Perform verbose error reporting if not NULL.
519 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
520 efx_filter_spec_t *efx_spec,
521 struct rte_flow_error *error)
524 const struct rte_flow_item_ipv6 *spec = NULL;
525 const struct rte_flow_item_ipv6 *mask = NULL;
526 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
527 const struct rte_flow_item_ipv6 supp_mask = {
529 .src_addr = { 0xff, 0xff, 0xff, 0xff,
530 0xff, 0xff, 0xff, 0xff,
531 0xff, 0xff, 0xff, 0xff,
532 0xff, 0xff, 0xff, 0xff },
533 .dst_addr = { 0xff, 0xff, 0xff, 0xff,
534 0xff, 0xff, 0xff, 0xff,
535 0xff, 0xff, 0xff, 0xff,
536 0xff, 0xff, 0xff, 0xff },
541 rc = sfc_flow_parse_init(item,
542 (const void **)&spec,
543 (const void **)&mask,
545 &rte_flow_item_ipv6_mask,
546 sizeof(struct rte_flow_item_ipv6),
552 * Filtering by IPv6 source and destination addresses requires
553 * the appropriate ETHER_TYPE in hardware filters
555 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
556 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
557 efx_spec->efs_ether_type = ether_type_ipv6;
558 } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
559 rte_flow_error_set(error, EINVAL,
560 RTE_FLOW_ERROR_TYPE_ITEM, item,
561 "Ethertype in pattern with IPV6 item should be appropriate");
569 * IPv6 addresses are in big-endian byte order in item and in
572 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
573 sizeof(mask->hdr.src_addr)) == 0) {
574 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
576 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
577 sizeof(spec->hdr.src_addr));
578 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
579 sizeof(efx_spec->efs_rem_host));
580 } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
581 sizeof(mask->hdr.src_addr))) {
585 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
586 sizeof(mask->hdr.dst_addr)) == 0) {
587 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
589 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
590 sizeof(spec->hdr.dst_addr));
591 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
592 sizeof(efx_spec->efs_loc_host));
593 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
594 sizeof(mask->hdr.dst_addr))) {
598 if (mask->hdr.proto == supp_mask.hdr.proto) {
599 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
600 efx_spec->efs_ip_proto = spec->hdr.proto;
601 } else if (mask->hdr.proto != 0) {
608 rte_flow_error_set(error, EINVAL,
609 RTE_FLOW_ERROR_TYPE_ITEM, item,
610 "Bad mask in the IPV6 pattern item");
615 * Convert TCP item to EFX filter specification.
618 * Item specification. Only source and destination ports fields
619 * are supported. If the mask is NULL, default mask will be used.
620 * Ranging is not supported.
621 * @param efx_spec[in, out]
622 * EFX filter specification to update.
624 * Perform verbose error reporting if not NULL.
627 sfc_flow_parse_tcp(const struct rte_flow_item *item,
628 efx_filter_spec_t *efx_spec,
629 struct rte_flow_error *error)
632 const struct rte_flow_item_tcp *spec = NULL;
633 const struct rte_flow_item_tcp *mask = NULL;
634 const struct rte_flow_item_tcp supp_mask = {
641 rc = sfc_flow_parse_init(item,
642 (const void **)&spec,
643 (const void **)&mask,
645 &rte_flow_item_tcp_mask,
646 sizeof(struct rte_flow_item_tcp),
652 * Filtering by TCP source and destination ports requires
653 * the appropriate IP_PROTO in hardware filters
655 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
656 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
657 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
658 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
659 rte_flow_error_set(error, EINVAL,
660 RTE_FLOW_ERROR_TYPE_ITEM, item,
661 "IP proto in pattern with TCP item should be appropriate");
669 * Source and destination ports are in big-endian byte order in item and
670 * in little-endian in efx_spec, so byte swap is used
672 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
673 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
674 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
675 } else if (mask->hdr.src_port != 0) {
679 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
680 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
681 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
682 } else if (mask->hdr.dst_port != 0) {
689 rte_flow_error_set(error, EINVAL,
690 RTE_FLOW_ERROR_TYPE_ITEM, item,
691 "Bad mask in the TCP pattern item");
696 * Convert UDP item to EFX filter specification.
699 * Item specification. Only source and destination ports fields
700 * are supported. If the mask is NULL, default mask will be used.
701 * Ranging is not supported.
702 * @param efx_spec[in, out]
703 * EFX filter specification to update.
705 * Perform verbose error reporting if not NULL.
708 sfc_flow_parse_udp(const struct rte_flow_item *item,
709 efx_filter_spec_t *efx_spec,
710 struct rte_flow_error *error)
713 const struct rte_flow_item_udp *spec = NULL;
714 const struct rte_flow_item_udp *mask = NULL;
715 const struct rte_flow_item_udp supp_mask = {
722 rc = sfc_flow_parse_init(item,
723 (const void **)&spec,
724 (const void **)&mask,
726 &rte_flow_item_udp_mask,
727 sizeof(struct rte_flow_item_udp),
733 * Filtering by UDP source and destination ports requires
734 * the appropriate IP_PROTO in hardware filters
736 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
737 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
738 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
739 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
740 rte_flow_error_set(error, EINVAL,
741 RTE_FLOW_ERROR_TYPE_ITEM, item,
742 "IP proto in pattern with UDP item should be appropriate");
750 * Source and destination ports are in big-endian byte order in item and
751 * in little-endian in efx_spec, so byte swap is used
753 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
754 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
755 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
756 } else if (mask->hdr.src_port != 0) {
760 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
761 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
762 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
763 } else if (mask->hdr.dst_port != 0) {
770 rte_flow_error_set(error, EINVAL,
771 RTE_FLOW_ERROR_TYPE_ITEM, item,
772 "Bad mask in the UDP pattern item");
777 * Filters for encapsulated packets match based on the EtherType and IP
778 * protocol in the outer frame.
781 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
782 efx_filter_spec_t *efx_spec,
784 struct rte_flow_error *error)
786 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
787 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
788 efx_spec->efs_ip_proto = ip_proto;
789 } else if (efx_spec->efs_ip_proto != ip_proto) {
791 case EFX_IPPROTO_UDP:
792 rte_flow_error_set(error, EINVAL,
793 RTE_FLOW_ERROR_TYPE_ITEM, item,
794 "Outer IP header protocol must be UDP "
795 "in VxLAN/GENEVE pattern");
798 case EFX_IPPROTO_GRE:
799 rte_flow_error_set(error, EINVAL,
800 RTE_FLOW_ERROR_TYPE_ITEM, item,
801 "Outer IP header protocol must be GRE "
806 rte_flow_error_set(error, EINVAL,
807 RTE_FLOW_ERROR_TYPE_ITEM, item,
808 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
814 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
815 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
816 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
817 rte_flow_error_set(error, EINVAL,
818 RTE_FLOW_ERROR_TYPE_ITEM, item,
819 "Outer frame EtherType in pattern with tunneling "
820 "must be IPv4 or IPv6");
828 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
829 const uint8_t *vni_or_vsid_val,
830 const uint8_t *vni_or_vsid_mask,
831 const struct rte_flow_item *item,
832 struct rte_flow_error *error)
834 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
838 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
839 EFX_VNI_OR_VSID_LEN) == 0) {
840 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
841 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
842 EFX_VNI_OR_VSID_LEN);
843 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
844 rte_flow_error_set(error, EINVAL,
845 RTE_FLOW_ERROR_TYPE_ITEM, item,
846 "Unsupported VNI/VSID mask");
854 * Convert VXLAN item to EFX filter specification.
857 * Item specification. Only VXLAN network identifier field is supported.
858 * If the mask is NULL, default mask will be used.
859 * Ranging is not supported.
860 * @param efx_spec[in, out]
861 * EFX filter specification to update.
863 * Perform verbose error reporting if not NULL.
866 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
867 efx_filter_spec_t *efx_spec,
868 struct rte_flow_error *error)
871 const struct rte_flow_item_vxlan *spec = NULL;
872 const struct rte_flow_item_vxlan *mask = NULL;
873 const struct rte_flow_item_vxlan supp_mask = {
874 .vni = { 0xff, 0xff, 0xff }
877 rc = sfc_flow_parse_init(item,
878 (const void **)&spec,
879 (const void **)&mask,
881 &rte_flow_item_vxlan_mask,
882 sizeof(struct rte_flow_item_vxlan),
887 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
888 EFX_IPPROTO_UDP, error);
892 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
893 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
898 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
899 mask->vni, item, error);
905 * Convert GENEVE item to EFX filter specification.
908 * Item specification. Only Virtual Network Identifier and protocol type
909 * fields are supported. But protocol type can be only Ethernet (0x6558).
910 * If the mask is NULL, default mask will be used.
911 * Ranging is not supported.
912 * @param efx_spec[in, out]
913 * EFX filter specification to update.
915 * Perform verbose error reporting if not NULL.
918 sfc_flow_parse_geneve(const struct rte_flow_item *item,
919 efx_filter_spec_t *efx_spec,
920 struct rte_flow_error *error)
923 const struct rte_flow_item_geneve *spec = NULL;
924 const struct rte_flow_item_geneve *mask = NULL;
925 const struct rte_flow_item_geneve supp_mask = {
926 .protocol = RTE_BE16(0xffff),
927 .vni = { 0xff, 0xff, 0xff }
930 rc = sfc_flow_parse_init(item,
931 (const void **)&spec,
932 (const void **)&mask,
934 &rte_flow_item_geneve_mask,
935 sizeof(struct rte_flow_item_geneve),
940 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
941 EFX_IPPROTO_UDP, error);
945 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
946 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
951 if (mask->protocol == supp_mask.protocol) {
952 if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
953 rte_flow_error_set(error, EINVAL,
954 RTE_FLOW_ERROR_TYPE_ITEM, item,
955 "GENEVE encap. protocol must be Ethernet "
956 "(0x6558) in the GENEVE pattern item");
959 } else if (mask->protocol != 0) {
960 rte_flow_error_set(error, EINVAL,
961 RTE_FLOW_ERROR_TYPE_ITEM, item,
962 "Unsupported mask for GENEVE encap. protocol");
966 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
967 mask->vni, item, error);
973 * Convert NVGRE item to EFX filter specification.
976 * Item specification. Only virtual subnet ID field is supported.
977 * If the mask is NULL, default mask will be used.
978 * Ranging is not supported.
979 * @param efx_spec[in, out]
980 * EFX filter specification to update.
982 * Perform verbose error reporting if not NULL.
985 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
986 efx_filter_spec_t *efx_spec,
987 struct rte_flow_error *error)
990 const struct rte_flow_item_nvgre *spec = NULL;
991 const struct rte_flow_item_nvgre *mask = NULL;
992 const struct rte_flow_item_nvgre supp_mask = {
993 .tni = { 0xff, 0xff, 0xff }
996 rc = sfc_flow_parse_init(item,
997 (const void **)&spec,
998 (const void **)&mask,
1000 &rte_flow_item_nvgre_mask,
1001 sizeof(struct rte_flow_item_nvgre),
1006 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1007 EFX_IPPROTO_GRE, error);
1011 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1012 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1017 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1018 mask->tni, item, error);
1023 static const struct sfc_flow_item sfc_flow_items[] = {
1025 .type = RTE_FLOW_ITEM_TYPE_VOID,
1026 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1027 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1028 .parse = sfc_flow_parse_void,
1031 .type = RTE_FLOW_ITEM_TYPE_ETH,
1032 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1033 .layer = SFC_FLOW_ITEM_L2,
1034 .parse = sfc_flow_parse_eth,
1037 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1038 .prev_layer = SFC_FLOW_ITEM_L2,
1039 .layer = SFC_FLOW_ITEM_L2,
1040 .parse = sfc_flow_parse_vlan,
1043 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1044 .prev_layer = SFC_FLOW_ITEM_L2,
1045 .layer = SFC_FLOW_ITEM_L3,
1046 .parse = sfc_flow_parse_ipv4,
1049 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1050 .prev_layer = SFC_FLOW_ITEM_L2,
1051 .layer = SFC_FLOW_ITEM_L3,
1052 .parse = sfc_flow_parse_ipv6,
1055 .type = RTE_FLOW_ITEM_TYPE_TCP,
1056 .prev_layer = SFC_FLOW_ITEM_L3,
1057 .layer = SFC_FLOW_ITEM_L4,
1058 .parse = sfc_flow_parse_tcp,
1061 .type = RTE_FLOW_ITEM_TYPE_UDP,
1062 .prev_layer = SFC_FLOW_ITEM_L3,
1063 .layer = SFC_FLOW_ITEM_L4,
1064 .parse = sfc_flow_parse_udp,
1067 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1068 .prev_layer = SFC_FLOW_ITEM_L4,
1069 .layer = SFC_FLOW_ITEM_START_LAYER,
1070 .parse = sfc_flow_parse_vxlan,
1073 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1074 .prev_layer = SFC_FLOW_ITEM_L4,
1075 .layer = SFC_FLOW_ITEM_START_LAYER,
1076 .parse = sfc_flow_parse_geneve,
1079 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1080 .prev_layer = SFC_FLOW_ITEM_L3,
1081 .layer = SFC_FLOW_ITEM_START_LAYER,
1082 .parse = sfc_flow_parse_nvgre,
1087 * Protocol-independent flow API support
1090 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1091 struct rte_flow *flow,
1092 struct rte_flow_error *error)
1095 rte_flow_error_set(error, EINVAL,
1096 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1100 if (attr->group != 0) {
1101 rte_flow_error_set(error, ENOTSUP,
1102 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1103 "Groups are not supported");
1106 if (attr->priority != 0) {
1107 rte_flow_error_set(error, ENOTSUP,
1108 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1109 "Priorities are not supported");
1112 if (attr->egress != 0) {
1113 rte_flow_error_set(error, ENOTSUP,
1114 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1115 "Egress is not supported");
1118 if (attr->transfer != 0) {
1119 rte_flow_error_set(error, ENOTSUP,
1120 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1121 "Transfer is not supported");
1124 if (attr->ingress == 0) {
1125 rte_flow_error_set(error, ENOTSUP,
1126 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1127 "Only ingress is supported");
1131 flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
1132 flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1137 /* Get item from array sfc_flow_items */
1138 static const struct sfc_flow_item *
1139 sfc_flow_get_item(enum rte_flow_item_type type)
1143 for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1144 if (sfc_flow_items[i].type == type)
1145 return &sfc_flow_items[i];
1151 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1152 struct rte_flow *flow,
1153 struct rte_flow_error *error)
1156 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1157 boolean_t is_ifrm = B_FALSE;
1158 const struct sfc_flow_item *item;
1160 if (pattern == NULL) {
1161 rte_flow_error_set(error, EINVAL,
1162 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1167 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1168 item = sfc_flow_get_item(pattern->type);
1170 rte_flow_error_set(error, ENOTSUP,
1171 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1172 "Unsupported pattern item");
1177 * Omitting one or several protocol layers at the beginning
1178 * of pattern is supported
1180 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1181 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1182 item->prev_layer != prev_layer) {
1183 rte_flow_error_set(error, ENOTSUP,
1184 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1185 "Unexpected sequence of pattern items");
1190 * Allow only VOID and ETH pattern items in the inner frame.
1191 * Also check that there is only one tunneling protocol.
1193 switch (item->type) {
1194 case RTE_FLOW_ITEM_TYPE_VOID:
1195 case RTE_FLOW_ITEM_TYPE_ETH:
1198 case RTE_FLOW_ITEM_TYPE_VXLAN:
1199 case RTE_FLOW_ITEM_TYPE_GENEVE:
1200 case RTE_FLOW_ITEM_TYPE_NVGRE:
1202 rte_flow_error_set(error, EINVAL,
1203 RTE_FLOW_ERROR_TYPE_ITEM,
1205 "More than one tunneling protocol");
1213 rte_flow_error_set(error, EINVAL,
1214 RTE_FLOW_ERROR_TYPE_ITEM,
1216 "There is an unsupported pattern item "
1217 "in the inner frame");
1223 rc = item->parse(pattern, &flow->spec.template, error);
1227 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1228 prev_layer = item->layer;
1235 sfc_flow_parse_queue(struct sfc_adapter *sa,
1236 const struct rte_flow_action_queue *queue,
1237 struct rte_flow *flow)
1239 struct sfc_rxq *rxq;
1241 if (queue->index >= sa->rxq_count)
1244 rxq = sa->rxq_info[queue->index].rxq;
1245 flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1251 sfc_flow_parse_rss(struct sfc_adapter *sa,
1252 const struct rte_flow_action_rss *action_rss,
1253 struct rte_flow *flow)
1255 struct sfc_rss *rss = &sa->rss;
1256 unsigned int rxq_sw_index;
1257 struct sfc_rxq *rxq;
1258 unsigned int rxq_hw_index_min;
1259 unsigned int rxq_hw_index_max;
1260 efx_rx_hash_type_t efx_hash_types;
1261 const uint8_t *rss_key;
1262 struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
1265 if (action_rss->queue_num == 0)
1268 rxq_sw_index = sa->rxq_count - 1;
1269 rxq = sa->rxq_info[rxq_sw_index].rxq;
1270 rxq_hw_index_min = rxq->hw_index;
1271 rxq_hw_index_max = 0;
1273 for (i = 0; i < action_rss->queue_num; ++i) {
1274 rxq_sw_index = action_rss->queue[i];
1276 if (rxq_sw_index >= sa->rxq_count)
1279 rxq = sa->rxq_info[rxq_sw_index].rxq;
1281 if (rxq->hw_index < rxq_hw_index_min)
1282 rxq_hw_index_min = rxq->hw_index;
1284 if (rxq->hw_index > rxq_hw_index_max)
1285 rxq_hw_index_max = rxq->hw_index;
1288 switch (action_rss->func) {
1289 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1290 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1296 if (action_rss->level)
1300 * Dummy RSS action with only one queue and no specific settings
1301 * for hash types and key does not require dedicated RSS context
1302 * and may be simplified to single queue action.
1304 if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1305 action_rss->key_len == 0) {
1306 flow->spec.template.efs_dmaq_id = rxq_hw_index_min;
1310 if (action_rss->types) {
1313 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1321 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1322 efx_hash_types |= rss->hf_map[i].efx;
1325 if (action_rss->key_len) {
1326 if (action_rss->key_len != sizeof(rss->key))
1329 rss_key = action_rss->key;
1336 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1337 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1338 sfc_rss_conf->rss_hash_types = efx_hash_types;
1339 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1341 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1342 unsigned int nb_queues = action_rss->queue_num;
1343 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1344 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
1346 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1353 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1354 unsigned int filters_count)
1359 for (i = 0; i < filters_count; i++) {
1362 rc = efx_filter_remove(sa->nic, &spec->filters[i]);
1363 if (ret == 0 && rc != 0) {
1364 sfc_err(sa, "failed to remove filter specification "
1374 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1379 for (i = 0; i < spec->count; i++) {
1380 rc = efx_filter_insert(sa->nic, &spec->filters[i]);
1382 sfc_flow_spec_flush(sa, spec, i);
1391 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1393 return sfc_flow_spec_flush(sa, spec, spec->count);
1397 sfc_flow_filter_insert(struct sfc_adapter *sa,
1398 struct rte_flow *flow)
1400 struct sfc_rss *rss = &sa->rss;
1401 struct sfc_flow_rss *flow_rss = &flow->rss_conf;
1402 uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1407 unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
1408 flow_rss->rxq_hw_index_min + 1,
1411 rc = efx_rx_scale_context_alloc(sa->nic,
1412 EFX_RX_SCALE_EXCLUSIVE,
1416 goto fail_scale_context_alloc;
1418 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1420 flow_rss->rss_hash_types, B_TRUE);
1422 goto fail_scale_mode_set;
1424 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1428 goto fail_scale_key_set;
1431 * At this point, fully elaborated filter specifications
1432 * have been produced from the template. To make sure that
1433 * RSS behaviour is consistent between them, set the same
1434 * RSS context value everywhere.
1436 for (i = 0; i < flow->spec.count; i++) {
1437 efx_filter_spec_t *spec = &flow->spec.filters[i];
1439 spec->efs_rss_context = efs_rss_context;
1440 spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1441 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1445 rc = sfc_flow_spec_insert(sa, &flow->spec);
1447 goto fail_filter_insert;
1451 * Scale table is set after filter insertion because
1452 * the table entries are relative to the base RxQ ID
1453 * and the latter is submitted to the HW by means of
1454 * inserting a filter, so by the time of the request
1455 * the HW knows all the information needed to verify
1456 * the table entries, and the operation will succeed
1458 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1460 RTE_DIM(flow_rss->rss_tbl));
1462 goto fail_scale_tbl_set;
1468 sfc_flow_spec_remove(sa, &flow->spec);
1472 fail_scale_mode_set:
1473 if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1474 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1476 fail_scale_context_alloc:
1481 sfc_flow_filter_remove(struct sfc_adapter *sa,
1482 struct rte_flow *flow)
1486 rc = sfc_flow_spec_remove(sa, &flow->spec);
1492 * All specifications for a given flow rule have the same RSS
1493 * context, so that RSS context value is taken from the first
1494 * filter specification
1496 efx_filter_spec_t *spec = &flow->spec.filters[0];
1498 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1505 sfc_flow_parse_mark(struct sfc_adapter *sa,
1506 const struct rte_flow_action_mark *mark,
1507 struct rte_flow *flow)
1509 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1511 if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1514 flow->spec.template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1515 flow->spec.template.efs_mark = mark->id;
1521 sfc_flow_parse_actions(struct sfc_adapter *sa,
1522 const struct rte_flow_action actions[],
1523 struct rte_flow *flow,
1524 struct rte_flow_error *error)
1527 const unsigned int dp_rx_features = sa->dp_rx->features;
1528 uint32_t actions_set = 0;
1529 const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1530 (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1531 (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1532 const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1533 (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1535 if (actions == NULL) {
1536 rte_flow_error_set(error, EINVAL,
1537 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1542 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
1543 RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
1545 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1546 switch (actions->type) {
1547 case RTE_FLOW_ACTION_TYPE_VOID:
1548 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1552 case RTE_FLOW_ACTION_TYPE_QUEUE:
1553 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1555 if ((actions_set & fate_actions_mask) != 0)
1556 goto fail_fate_actions;
1558 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1560 rte_flow_error_set(error, EINVAL,
1561 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1562 "Bad QUEUE action");
1567 case RTE_FLOW_ACTION_TYPE_RSS:
1568 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1570 if ((actions_set & fate_actions_mask) != 0)
1571 goto fail_fate_actions;
1573 rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1575 rte_flow_error_set(error, -rc,
1576 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1582 case RTE_FLOW_ACTION_TYPE_DROP:
1583 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1585 if ((actions_set & fate_actions_mask) != 0)
1586 goto fail_fate_actions;
1588 flow->spec.template.efs_dmaq_id =
1589 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1592 case RTE_FLOW_ACTION_TYPE_FLAG:
1593 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1595 if ((actions_set & mark_actions_mask) != 0)
1596 goto fail_actions_overlap;
1598 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1599 rte_flow_error_set(error, ENOTSUP,
1600 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1601 "FLAG action is not supported on the current Rx datapath");
1605 flow->spec.template.efs_flags |=
1606 EFX_FILTER_FLAG_ACTION_FLAG;
1609 case RTE_FLOW_ACTION_TYPE_MARK:
1610 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1612 if ((actions_set & mark_actions_mask) != 0)
1613 goto fail_actions_overlap;
1615 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1616 rte_flow_error_set(error, ENOTSUP,
1617 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1618 "MARK action is not supported on the current Rx datapath");
1622 rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1624 rte_flow_error_set(error, rc,
1625 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1632 rte_flow_error_set(error, ENOTSUP,
1633 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1634 "Action is not supported");
1638 actions_set |= (1UL << actions->type);
1640 #undef SFC_BUILD_SET_OVERFLOW
1642 /* When fate is unknown, drop traffic. */
1643 if ((actions_set & fate_actions_mask) == 0) {
1644 flow->spec.template.efs_dmaq_id =
1645 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1651 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1652 "Cannot combine several fate-deciding actions, "
1653 "choose between QUEUE, RSS or DROP");
1656 fail_actions_overlap:
1657 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1658 "Overlapping actions are not supported");
1663 * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1664 * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1665 * specifications after copying.
1667 * @param spec[in, out]
1668 * SFC flow specification to update.
1669 * @param filters_count_for_one_val[in]
1670 * How many specifications should have the same match flag, what is the
1671 * number of specifications before copying.
1673 * Perform verbose error reporting if not NULL.
1676 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1677 unsigned int filters_count_for_one_val,
1678 struct rte_flow_error *error)
1681 static const efx_filter_match_flags_t vals[] = {
1682 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1683 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1686 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1687 rte_flow_error_set(error, EINVAL,
1688 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1689 "Number of specifications is incorrect while copying "
1690 "by unknown destination flags");
1694 for (i = 0; i < spec->count; i++) {
1695 /* The check above ensures that divisor can't be zero here */
1696 spec->filters[i].efs_match_flags |=
1697 vals[i / filters_count_for_one_val];
1704 * Check that the following conditions are met:
1705 * - the list of supported filters has a filter
1706 * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1707 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1711 * The match flags of filter.
1713 * Specification to be supplemented.
1715 * SFC filter with list of supported filters.
1718 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1719 __rte_unused efx_filter_spec_t *spec,
1720 struct sfc_filter *filter)
1723 efx_filter_match_flags_t match_mcast_dst;
1726 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1727 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1728 for (i = 0; i < filter->supported_match_num; i++) {
1729 if (match_mcast_dst == filter->supported_match[i])
1737 * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1738 * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1739 * specifications after copying.
1741 * @param spec[in, out]
1742 * SFC flow specification to update.
1743 * @param filters_count_for_one_val[in]
1744 * How many specifications should have the same EtherType value, what is the
1745 * number of specifications before copying.
1747 * Perform verbose error reporting if not NULL.
1750 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1751 unsigned int filters_count_for_one_val,
1752 struct rte_flow_error *error)
1755 static const uint16_t vals[] = {
1756 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1759 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1760 rte_flow_error_set(error, EINVAL,
1761 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1762 "Number of specifications is incorrect "
1763 "while copying by Ethertype");
1767 for (i = 0; i < spec->count; i++) {
1768 spec->filters[i].efs_match_flags |=
1769 EFX_FILTER_MATCH_ETHER_TYPE;
1772 * The check above ensures that
1773 * filters_count_for_one_val is not 0
1775 spec->filters[i].efs_ether_type =
1776 vals[i / filters_count_for_one_val];
1783 * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1784 * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1785 * specifications after copying.
1787 * @param spec[in, out]
1788 * SFC flow specification to update.
1789 * @param filters_count_for_one_val[in]
1790 * How many specifications should have the same match flag, what is the
1791 * number of specifications before copying.
1793 * Perform verbose error reporting if not NULL.
1796 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1797 unsigned int filters_count_for_one_val,
1798 struct rte_flow_error *error)
1801 static const efx_filter_match_flags_t vals[] = {
1802 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1803 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1806 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1807 rte_flow_error_set(error, EINVAL,
1808 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1809 "Number of specifications is incorrect while copying "
1810 "by inner frame unknown destination flags");
1814 for (i = 0; i < spec->count; i++) {
1815 /* The check above ensures that divisor can't be zero here */
1816 spec->filters[i].efs_match_flags |=
1817 vals[i / filters_count_for_one_val];
1824 * Check that the following conditions are met:
1825 * - the specification corresponds to a filter for encapsulated traffic
1826 * - the list of supported filters has a filter
1827 * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1828 * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1832 * The match flags of filter.
1834 * Specification to be supplemented.
1836 * SFC filter with list of supported filters.
1839 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1840 efx_filter_spec_t *spec,
1841 struct sfc_filter *filter)
1844 efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1845 efx_filter_match_flags_t match_mcast_dst;
1847 if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1851 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1852 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1853 for (i = 0; i < filter->supported_match_num; i++) {
1854 if (match_mcast_dst == filter->supported_match[i])
1862 * Match flags that can be automatically added to filters.
1863 * Selecting the last minimum when searching for the copy flag ensures that the
1864 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
1865 * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
1866 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
1869 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1871 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1873 .set_vals = sfc_flow_set_unknown_dst_flags,
1874 .spec_check = sfc_flow_check_unknown_dst_flags,
1877 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
1879 .set_vals = sfc_flow_set_ethertypes,
1883 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1885 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
1886 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
1890 /* Get item from array sfc_flow_copy_flags */
1891 static const struct sfc_flow_copy_flag *
1892 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
1896 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1897 if (sfc_flow_copy_flags[i].flag == flag)
1898 return &sfc_flow_copy_flags[i];
1905 * Make copies of the specifications, set match flag and values
1906 * of the field that corresponds to it.
1908 * @param spec[in, out]
1909 * SFC flow specification to update.
1911 * The match flag to add.
1913 * Perform verbose error reporting if not NULL.
1916 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
1917 efx_filter_match_flags_t flag,
1918 struct rte_flow_error *error)
1921 unsigned int new_filters_count;
1922 unsigned int filters_count_for_one_val;
1923 const struct sfc_flow_copy_flag *copy_flag;
1926 copy_flag = sfc_flow_get_copy_flag(flag);
1927 if (copy_flag == NULL) {
1928 rte_flow_error_set(error, ENOTSUP,
1929 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1930 "Unsupported spec field for copying");
1934 new_filters_count = spec->count * copy_flag->vals_count;
1935 if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
1936 rte_flow_error_set(error, EINVAL,
1937 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1938 "Too much EFX specifications in the flow rule");
1942 /* Copy filters specifications */
1943 for (i = spec->count; i < new_filters_count; i++)
1944 spec->filters[i] = spec->filters[i - spec->count];
1946 filters_count_for_one_val = spec->count;
1947 spec->count = new_filters_count;
1949 rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
1957 * Check that the given set of match flags missing in the original filter spec
1958 * could be covered by adding spec copies which specify the corresponding
1959 * flags and packet field values to match.
1961 * @param miss_flags[in]
1962 * Flags that are missing until the supported filter.
1964 * Specification to be supplemented.
1969 * Number of specifications after copy or 0, if the flags can not be added.
1972 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
1973 efx_filter_spec_t *spec,
1974 struct sfc_filter *filter)
1977 efx_filter_match_flags_t copy_flags = 0;
1978 efx_filter_match_flags_t flag;
1979 efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
1980 sfc_flow_spec_check *check;
1981 unsigned int multiplier = 1;
1983 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1984 flag = sfc_flow_copy_flags[i].flag;
1985 check = sfc_flow_copy_flags[i].spec_check;
1986 if ((flag & miss_flags) == flag) {
1987 if (check != NULL && (!check(match, spec, filter)))
1991 multiplier *= sfc_flow_copy_flags[i].vals_count;
1995 if (copy_flags == miss_flags)
2002 * Attempt to supplement the specification template to the minimally
2003 * supported set of match flags. To do this, it is necessary to copy
2004 * the specifications, filling them with the values of fields that
2005 * correspond to the missing flags.
2006 * The necessary and sufficient filter is built from the fewest number
2007 * of copies which could be made to cover the minimally required set
2012 * @param spec[in, out]
2013 * SFC flow specification to update.
2015 * Perform verbose error reporting if not NULL.
2018 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2019 struct sfc_flow_spec *spec,
2020 struct rte_flow_error *error)
2022 struct sfc_filter *filter = &sa->filter;
2023 efx_filter_match_flags_t miss_flags;
2024 efx_filter_match_flags_t min_miss_flags = 0;
2025 efx_filter_match_flags_t match;
2026 unsigned int min_multiplier = UINT_MAX;
2027 unsigned int multiplier;
2031 match = spec->template.efs_match_flags;
2032 for (i = 0; i < filter->supported_match_num; i++) {
2033 if ((match & filter->supported_match[i]) == match) {
2034 miss_flags = filter->supported_match[i] & (~match);
2035 multiplier = sfc_flow_check_missing_flags(miss_flags,
2036 &spec->template, filter);
2037 if (multiplier > 0) {
2038 if (multiplier <= min_multiplier) {
2039 min_multiplier = multiplier;
2040 min_miss_flags = miss_flags;
2046 if (min_multiplier == UINT_MAX) {
2047 rte_flow_error_set(error, ENOTSUP,
2048 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2049 "The flow rule pattern is unsupported");
2053 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2054 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2056 if ((flag & min_miss_flags) == flag) {
2057 rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2067 * Check that set of match flags is referred to by a filter. Filter is
2068 * described by match flags with the ability to add OUTER_VID and INNER_VID
2071 * @param match_flags[in]
2072 * Set of match flags.
2073 * @param flags_pattern[in]
2074 * Pattern of filter match flags.
2077 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2078 efx_filter_match_flags_t flags_pattern)
2080 if ((match_flags & flags_pattern) != flags_pattern)
2083 switch (match_flags & ~flags_pattern) {
2085 case EFX_FILTER_MATCH_OUTER_VID:
2086 case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2094 * Check whether the spec maps to a hardware filter which is known to be
2095 * ineffective despite being valid.
2098 * SFC flow specification.
2101 sfc_flow_is_match_flags_exception(struct sfc_flow_spec *spec)
2104 uint16_t ether_type;
2106 efx_filter_match_flags_t match_flags;
2108 for (i = 0; i < spec->count; i++) {
2109 match_flags = spec->filters[i].efs_match_flags;
2111 if (sfc_flow_is_match_with_vids(match_flags,
2112 EFX_FILTER_MATCH_ETHER_TYPE) ||
2113 sfc_flow_is_match_with_vids(match_flags,
2114 EFX_FILTER_MATCH_ETHER_TYPE |
2115 EFX_FILTER_MATCH_LOC_MAC)) {
2116 ether_type = spec->filters[i].efs_ether_type;
2117 if (ether_type == EFX_ETHER_TYPE_IPV4 ||
2118 ether_type == EFX_ETHER_TYPE_IPV6)
2120 } else if (sfc_flow_is_match_with_vids(match_flags,
2121 EFX_FILTER_MATCH_ETHER_TYPE |
2122 EFX_FILTER_MATCH_IP_PROTO) ||
2123 sfc_flow_is_match_with_vids(match_flags,
2124 EFX_FILTER_MATCH_ETHER_TYPE |
2125 EFX_FILTER_MATCH_IP_PROTO |
2126 EFX_FILTER_MATCH_LOC_MAC)) {
2127 ip_proto = spec->filters[i].efs_ip_proto;
2128 if (ip_proto == EFX_IPPROTO_TCP ||
2129 ip_proto == EFX_IPPROTO_UDP)
2138 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2139 struct rte_flow *flow,
2140 struct rte_flow_error *error)
2142 efx_filter_spec_t *spec_tmpl = &flow->spec.template;
2143 efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2146 /* Initialize the first filter spec with template */
2147 flow->spec.filters[0] = *spec_tmpl;
2148 flow->spec.count = 1;
2150 if (!sfc_filter_is_match_supported(sa, match_flags)) {
2151 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2156 if (sfc_flow_is_match_flags_exception(&flow->spec)) {
2157 rte_flow_error_set(error, ENOTSUP,
2158 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2159 "The flow rule pattern is unsupported");
2167 sfc_flow_parse(struct rte_eth_dev *dev,
2168 const struct rte_flow_attr *attr,
2169 const struct rte_flow_item pattern[],
2170 const struct rte_flow_action actions[],
2171 struct rte_flow *flow,
2172 struct rte_flow_error *error)
2174 struct sfc_adapter *sa = dev->data->dev_private;
2177 rc = sfc_flow_parse_attr(attr, flow, error);
2179 goto fail_bad_value;
2181 rc = sfc_flow_parse_pattern(pattern, flow, error);
2183 goto fail_bad_value;
2185 rc = sfc_flow_parse_actions(sa, actions, flow, error);
2187 goto fail_bad_value;
2189 rc = sfc_flow_validate_match_flags(sa, flow, error);
2191 goto fail_bad_value;
2200 sfc_flow_validate(struct rte_eth_dev *dev,
2201 const struct rte_flow_attr *attr,
2202 const struct rte_flow_item pattern[],
2203 const struct rte_flow_action actions[],
2204 struct rte_flow_error *error)
2206 struct rte_flow flow;
2208 memset(&flow, 0, sizeof(flow));
2210 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
2213 static struct rte_flow *
2214 sfc_flow_create(struct rte_eth_dev *dev,
2215 const struct rte_flow_attr *attr,
2216 const struct rte_flow_item pattern[],
2217 const struct rte_flow_action actions[],
2218 struct rte_flow_error *error)
2220 struct sfc_adapter *sa = dev->data->dev_private;
2221 struct rte_flow *flow = NULL;
2224 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2226 rte_flow_error_set(error, ENOMEM,
2227 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2228 "Failed to allocate memory");
2232 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2234 goto fail_bad_value;
2236 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
2238 sfc_adapter_lock(sa);
2240 if (sa->state == SFC_ADAPTER_STARTED) {
2241 rc = sfc_flow_filter_insert(sa, flow);
2243 rte_flow_error_set(error, rc,
2244 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2245 "Failed to insert filter");
2246 goto fail_filter_insert;
2250 sfc_adapter_unlock(sa);
2255 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2259 sfc_adapter_unlock(sa);
2266 sfc_flow_remove(struct sfc_adapter *sa,
2267 struct rte_flow *flow,
2268 struct rte_flow_error *error)
2272 SFC_ASSERT(sfc_adapter_is_locked(sa));
2274 if (sa->state == SFC_ADAPTER_STARTED) {
2275 rc = sfc_flow_filter_remove(sa, flow);
2277 rte_flow_error_set(error, rc,
2278 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2279 "Failed to destroy flow rule");
2282 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2289 sfc_flow_destroy(struct rte_eth_dev *dev,
2290 struct rte_flow *flow,
2291 struct rte_flow_error *error)
2293 struct sfc_adapter *sa = dev->data->dev_private;
2294 struct rte_flow *flow_ptr;
2297 sfc_adapter_lock(sa);
2299 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
2300 if (flow_ptr == flow)
2304 rte_flow_error_set(error, rc,
2305 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2306 "Failed to find flow rule to destroy");
2307 goto fail_bad_value;
2310 rc = sfc_flow_remove(sa, flow, error);
2313 sfc_adapter_unlock(sa);
2319 sfc_flow_flush(struct rte_eth_dev *dev,
2320 struct rte_flow_error *error)
2322 struct sfc_adapter *sa = dev->data->dev_private;
2323 struct rte_flow *flow;
2327 sfc_adapter_lock(sa);
2329 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2330 rc = sfc_flow_remove(sa, flow, error);
2335 sfc_adapter_unlock(sa);
2341 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2342 struct rte_flow_error *error)
2344 struct sfc_adapter *sa = dev->data->dev_private;
2345 struct sfc_port *port = &sa->port;
2348 sfc_adapter_lock(sa);
2349 if (sa->state != SFC_ADAPTER_INITIALIZED) {
2350 rte_flow_error_set(error, EBUSY,
2351 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2352 NULL, "please close the port first");
2355 port->isolated = (enable) ? B_TRUE : B_FALSE;
2357 sfc_adapter_unlock(sa);
2362 const struct rte_flow_ops sfc_flow_ops = {
2363 .validate = sfc_flow_validate,
2364 .create = sfc_flow_create,
2365 .destroy = sfc_flow_destroy,
2366 .flush = sfc_flow_flush,
2368 .isolate = sfc_flow_isolate,
2372 sfc_flow_init(struct sfc_adapter *sa)
2374 SFC_ASSERT(sfc_adapter_is_locked(sa));
2376 TAILQ_INIT(&sa->filter.flow_list);
2380 sfc_flow_fini(struct sfc_adapter *sa)
2382 struct rte_flow *flow;
2384 SFC_ASSERT(sfc_adapter_is_locked(sa));
2386 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2387 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2393 sfc_flow_stop(struct sfc_adapter *sa)
2395 struct rte_flow *flow;
2397 SFC_ASSERT(sfc_adapter_is_locked(sa));
2399 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
2400 sfc_flow_filter_remove(sa, flow);
2404 sfc_flow_start(struct sfc_adapter *sa)
2406 struct rte_flow *flow;
2409 sfc_log_init(sa, "entry");
2411 SFC_ASSERT(sfc_adapter_is_locked(sa));
2413 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
2414 rc = sfc_flow_filter_insert(sa, flow);
2419 sfc_log_init(sa, "done");