1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2017-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_tailq.h>
11 #include <rte_common.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_eth_ctrl.h>
14 #include <rte_ether.h>
16 #include <rte_flow_driver.h>
22 #include "sfc_filter.h"
27 * At now flow API is implemented in such a manner that each
28 * flow rule is converted to a hardware filter.
29 * All elements of flow rule (attributes, pattern items, actions)
30 * correspond to one or more fields in the efx_filter_spec_s structure
31 * that is responsible for the hardware filter.
34 enum sfc_flow_item_layers {
35 SFC_FLOW_ITEM_ANY_LAYER,
36 SFC_FLOW_ITEM_START_LAYER,
42 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
43 efx_filter_spec_t *spec,
44 struct rte_flow_error *error);
46 struct sfc_flow_item {
47 enum rte_flow_item_type type; /* Type of item */
48 enum sfc_flow_item_layers layer; /* Layer of item */
49 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
50 sfc_flow_item_parse *parse; /* Parsing function */
53 static sfc_flow_item_parse sfc_flow_parse_void;
54 static sfc_flow_item_parse sfc_flow_parse_eth;
55 static sfc_flow_item_parse sfc_flow_parse_vlan;
56 static sfc_flow_item_parse sfc_flow_parse_ipv4;
57 static sfc_flow_item_parse sfc_flow_parse_ipv6;
58 static sfc_flow_item_parse sfc_flow_parse_tcp;
59 static sfc_flow_item_parse sfc_flow_parse_udp;
60 static sfc_flow_item_parse sfc_flow_parse_vxlan;
61 static sfc_flow_item_parse sfc_flow_parse_geneve;
62 static sfc_flow_item_parse sfc_flow_parse_nvgre;
65 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
70 for (i = 0; i < size; i++)
73 return (sum == 0) ? B_TRUE : B_FALSE;
77 * Validate item and prepare structures spec and mask for parsing
80 sfc_flow_parse_init(const struct rte_flow_item *item,
81 const void **spec_ptr,
82 const void **mask_ptr,
83 const void *supp_mask,
86 struct rte_flow_error *error)
96 rte_flow_error_set(error, EINVAL,
97 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
102 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
103 rte_flow_error_set(error, EINVAL,
104 RTE_FLOW_ERROR_TYPE_ITEM, item,
105 "Mask or last is set without spec");
110 * If "mask" is not set, default mask is used,
111 * but if default mask is NULL, "mask" should be set
113 if (item->mask == NULL) {
114 if (def_mask == NULL) {
115 rte_flow_error_set(error, EINVAL,
116 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
117 "Mask should be specified");
133 * If field values in "last" are either 0 or equal to the corresponding
134 * values in "spec" then they are ignored
137 !sfc_flow_is_zero(last, size) &&
138 memcmp(last, spec, size) != 0) {
139 rte_flow_error_set(error, ENOTSUP,
140 RTE_FLOW_ERROR_TYPE_ITEM, item,
141 "Ranging is not supported");
145 if (supp_mask == NULL) {
146 rte_flow_error_set(error, EINVAL,
147 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
148 "Supported mask for item should be specified");
152 /* Check that mask and spec not asks for more match than supp_mask */
153 for (i = 0; i < size; i++) {
154 match = spec[i] | mask[i];
155 supp = ((const uint8_t *)supp_mask)[i];
157 if ((match | supp) != supp) {
158 rte_flow_error_set(error, ENOTSUP,
159 RTE_FLOW_ERROR_TYPE_ITEM, item,
160 "Item's field is not supported");
173 * Masking is not supported, so masks in items should be either
174 * full or empty (zeroed) and set only for supported fields which
175 * are specified in the supp_mask.
179 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
180 __rte_unused efx_filter_spec_t *efx_spec,
181 __rte_unused struct rte_flow_error *error)
187 * Convert Ethernet item to EFX filter specification.
190 * Item specification. Only source and destination addresses and
191 * Ethernet type fields are supported. In addition to full and
192 * empty masks of destination address, individual/group mask is
193 * also supported. If the mask is NULL, default mask will be used.
194 * Ranging is not supported.
195 * @param efx_spec[in, out]
196 * EFX filter specification to update.
198 * Perform verbose error reporting if not NULL.
201 sfc_flow_parse_eth(const struct rte_flow_item *item,
202 efx_filter_spec_t *efx_spec,
203 struct rte_flow_error *error)
206 const struct rte_flow_item_eth *spec = NULL;
207 const struct rte_flow_item_eth *mask = NULL;
208 const struct rte_flow_item_eth supp_mask = {
209 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
210 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
213 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
214 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
217 rc = sfc_flow_parse_init(item,
218 (const void **)&spec,
219 (const void **)&mask,
221 &rte_flow_item_eth_mask,
222 sizeof(struct rte_flow_item_eth),
227 /* If "spec" is not set, could be any Ethernet */
231 if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
232 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
233 rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
235 } else if (memcmp(mask->dst.addr_bytes, ig_mask,
236 EFX_MAC_ADDR_LEN) == 0) {
237 if (is_unicast_ether_addr(&spec->dst))
238 efx_spec->efs_match_flags |=
239 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
241 efx_spec->efs_match_flags |=
242 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
243 } else if (!is_zero_ether_addr(&mask->dst)) {
247 if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
248 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
249 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
251 } else if (!is_zero_ether_addr(&mask->src)) {
256 * Ether type is in big-endian byte order in item and
257 * in little-endian in efx_spec, so byte swap is used
259 if (mask->type == supp_mask.type) {
260 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
261 efx_spec->efs_ether_type = rte_bswap16(spec->type);
262 } else if (mask->type != 0) {
269 rte_flow_error_set(error, EINVAL,
270 RTE_FLOW_ERROR_TYPE_ITEM, item,
271 "Bad mask in the ETH pattern item");
276 * Convert VLAN item to EFX filter specification.
279 * Item specification. Only VID field is supported.
280 * The mask can not be NULL. Ranging is not supported.
281 * @param efx_spec[in, out]
282 * EFX filter specification to update.
284 * Perform verbose error reporting if not NULL.
287 sfc_flow_parse_vlan(const struct rte_flow_item *item,
288 efx_filter_spec_t *efx_spec,
289 struct rte_flow_error *error)
293 const struct rte_flow_item_vlan *spec = NULL;
294 const struct rte_flow_item_vlan *mask = NULL;
295 const struct rte_flow_item_vlan supp_mask = {
296 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
299 rc = sfc_flow_parse_init(item,
300 (const void **)&spec,
301 (const void **)&mask,
304 sizeof(struct rte_flow_item_vlan),
310 * VID is in big-endian byte order in item and
311 * in little-endian in efx_spec, so byte swap is used.
312 * If two VLAN items are included, the first matches
313 * the outer tag and the next matches the inner tag.
315 if (mask->tci == supp_mask.tci) {
316 vid = rte_bswap16(spec->tci);
318 if (!(efx_spec->efs_match_flags &
319 EFX_FILTER_MATCH_OUTER_VID)) {
320 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
321 efx_spec->efs_outer_vid = vid;
322 } else if (!(efx_spec->efs_match_flags &
323 EFX_FILTER_MATCH_INNER_VID)) {
324 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
325 efx_spec->efs_inner_vid = vid;
327 rte_flow_error_set(error, EINVAL,
328 RTE_FLOW_ERROR_TYPE_ITEM, item,
329 "More than two VLAN items");
333 rte_flow_error_set(error, EINVAL,
334 RTE_FLOW_ERROR_TYPE_ITEM, item,
335 "VLAN ID in TCI match is required");
343 * Convert IPv4 item to EFX filter specification.
346 * Item specification. Only source and destination addresses and
347 * protocol fields are supported. If the mask is NULL, default
348 * mask will be used. Ranging is not supported.
349 * @param efx_spec[in, out]
350 * EFX filter specification to update.
352 * Perform verbose error reporting if not NULL.
355 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
356 efx_filter_spec_t *efx_spec,
357 struct rte_flow_error *error)
360 const struct rte_flow_item_ipv4 *spec = NULL;
361 const struct rte_flow_item_ipv4 *mask = NULL;
362 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
363 const struct rte_flow_item_ipv4 supp_mask = {
365 .src_addr = 0xffffffff,
366 .dst_addr = 0xffffffff,
367 .next_proto_id = 0xff,
371 rc = sfc_flow_parse_init(item,
372 (const void **)&spec,
373 (const void **)&mask,
375 &rte_flow_item_ipv4_mask,
376 sizeof(struct rte_flow_item_ipv4),
382 * Filtering by IPv4 source and destination addresses requires
383 * the appropriate ETHER_TYPE in hardware filters
385 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
386 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
387 efx_spec->efs_ether_type = ether_type_ipv4;
388 } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
389 rte_flow_error_set(error, EINVAL,
390 RTE_FLOW_ERROR_TYPE_ITEM, item,
391 "Ethertype in pattern with IPV4 item should be appropriate");
399 * IPv4 addresses are in big-endian byte order in item and in
402 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
403 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
404 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
405 } else if (mask->hdr.src_addr != 0) {
409 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
410 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
411 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
412 } else if (mask->hdr.dst_addr != 0) {
416 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
417 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
418 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
419 } else if (mask->hdr.next_proto_id != 0) {
426 rte_flow_error_set(error, EINVAL,
427 RTE_FLOW_ERROR_TYPE_ITEM, item,
428 "Bad mask in the IPV4 pattern item");
433 * Convert IPv6 item to EFX filter specification.
436 * Item specification. Only source and destination addresses and
437 * next header fields are supported. If the mask is NULL, default
438 * mask will be used. Ranging is not supported.
439 * @param efx_spec[in, out]
440 * EFX filter specification to update.
442 * Perform verbose error reporting if not NULL.
445 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
446 efx_filter_spec_t *efx_spec,
447 struct rte_flow_error *error)
450 const struct rte_flow_item_ipv6 *spec = NULL;
451 const struct rte_flow_item_ipv6 *mask = NULL;
452 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
453 const struct rte_flow_item_ipv6 supp_mask = {
455 .src_addr = { 0xff, 0xff, 0xff, 0xff,
456 0xff, 0xff, 0xff, 0xff,
457 0xff, 0xff, 0xff, 0xff,
458 0xff, 0xff, 0xff, 0xff },
459 .dst_addr = { 0xff, 0xff, 0xff, 0xff,
460 0xff, 0xff, 0xff, 0xff,
461 0xff, 0xff, 0xff, 0xff,
462 0xff, 0xff, 0xff, 0xff },
467 rc = sfc_flow_parse_init(item,
468 (const void **)&spec,
469 (const void **)&mask,
471 &rte_flow_item_ipv6_mask,
472 sizeof(struct rte_flow_item_ipv6),
478 * Filtering by IPv6 source and destination addresses requires
479 * the appropriate ETHER_TYPE in hardware filters
481 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
482 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
483 efx_spec->efs_ether_type = ether_type_ipv6;
484 } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
485 rte_flow_error_set(error, EINVAL,
486 RTE_FLOW_ERROR_TYPE_ITEM, item,
487 "Ethertype in pattern with IPV6 item should be appropriate");
495 * IPv6 addresses are in big-endian byte order in item and in
498 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
499 sizeof(mask->hdr.src_addr)) == 0) {
500 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
502 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
503 sizeof(spec->hdr.src_addr));
504 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
505 sizeof(efx_spec->efs_rem_host));
506 } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
507 sizeof(mask->hdr.src_addr))) {
511 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
512 sizeof(mask->hdr.dst_addr)) == 0) {
513 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
515 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
516 sizeof(spec->hdr.dst_addr));
517 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
518 sizeof(efx_spec->efs_loc_host));
519 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
520 sizeof(mask->hdr.dst_addr))) {
524 if (mask->hdr.proto == supp_mask.hdr.proto) {
525 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
526 efx_spec->efs_ip_proto = spec->hdr.proto;
527 } else if (mask->hdr.proto != 0) {
534 rte_flow_error_set(error, EINVAL,
535 RTE_FLOW_ERROR_TYPE_ITEM, item,
536 "Bad mask in the IPV6 pattern item");
541 * Convert TCP item to EFX filter specification.
544 * Item specification. Only source and destination ports fields
545 * are supported. If the mask is NULL, default mask will be used.
546 * Ranging is not supported.
547 * @param efx_spec[in, out]
548 * EFX filter specification to update.
550 * Perform verbose error reporting if not NULL.
553 sfc_flow_parse_tcp(const struct rte_flow_item *item,
554 efx_filter_spec_t *efx_spec,
555 struct rte_flow_error *error)
558 const struct rte_flow_item_tcp *spec = NULL;
559 const struct rte_flow_item_tcp *mask = NULL;
560 const struct rte_flow_item_tcp supp_mask = {
567 rc = sfc_flow_parse_init(item,
568 (const void **)&spec,
569 (const void **)&mask,
571 &rte_flow_item_tcp_mask,
572 sizeof(struct rte_flow_item_tcp),
578 * Filtering by TCP source and destination ports requires
579 * the appropriate IP_PROTO in hardware filters
581 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
582 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
583 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
584 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
585 rte_flow_error_set(error, EINVAL,
586 RTE_FLOW_ERROR_TYPE_ITEM, item,
587 "IP proto in pattern with TCP item should be appropriate");
595 * Source and destination ports are in big-endian byte order in item and
596 * in little-endian in efx_spec, so byte swap is used
598 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
599 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
600 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
601 } else if (mask->hdr.src_port != 0) {
605 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
606 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
607 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
608 } else if (mask->hdr.dst_port != 0) {
615 rte_flow_error_set(error, EINVAL,
616 RTE_FLOW_ERROR_TYPE_ITEM, item,
617 "Bad mask in the TCP pattern item");
622 * Convert UDP item to EFX filter specification.
625 * Item specification. Only source and destination ports fields
626 * are supported. If the mask is NULL, default mask will be used.
627 * Ranging is not supported.
628 * @param efx_spec[in, out]
629 * EFX filter specification to update.
631 * Perform verbose error reporting if not NULL.
634 sfc_flow_parse_udp(const struct rte_flow_item *item,
635 efx_filter_spec_t *efx_spec,
636 struct rte_flow_error *error)
639 const struct rte_flow_item_udp *spec = NULL;
640 const struct rte_flow_item_udp *mask = NULL;
641 const struct rte_flow_item_udp supp_mask = {
648 rc = sfc_flow_parse_init(item,
649 (const void **)&spec,
650 (const void **)&mask,
652 &rte_flow_item_udp_mask,
653 sizeof(struct rte_flow_item_udp),
659 * Filtering by UDP source and destination ports requires
660 * the appropriate IP_PROTO in hardware filters
662 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
663 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
664 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
665 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
666 rte_flow_error_set(error, EINVAL,
667 RTE_FLOW_ERROR_TYPE_ITEM, item,
668 "IP proto in pattern with UDP item should be appropriate");
676 * Source and destination ports are in big-endian byte order in item and
677 * in little-endian in efx_spec, so byte swap is used
679 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
680 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
681 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
682 } else if (mask->hdr.src_port != 0) {
686 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
687 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
688 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
689 } else if (mask->hdr.dst_port != 0) {
696 rte_flow_error_set(error, EINVAL,
697 RTE_FLOW_ERROR_TYPE_ITEM, item,
698 "Bad mask in the UDP pattern item");
703 * Filters for encapsulated packets match based on the EtherType and IP
704 * protocol in the outer frame.
707 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
708 efx_filter_spec_t *efx_spec,
710 struct rte_flow_error *error)
712 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
713 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
714 efx_spec->efs_ip_proto = ip_proto;
715 } else if (efx_spec->efs_ip_proto != ip_proto) {
717 case EFX_IPPROTO_UDP:
718 rte_flow_error_set(error, EINVAL,
719 RTE_FLOW_ERROR_TYPE_ITEM, item,
720 "Outer IP header protocol must be UDP "
721 "in VxLAN/GENEVE pattern");
724 case EFX_IPPROTO_GRE:
725 rte_flow_error_set(error, EINVAL,
726 RTE_FLOW_ERROR_TYPE_ITEM, item,
727 "Outer IP header protocol must be GRE "
732 rte_flow_error_set(error, EINVAL,
733 RTE_FLOW_ERROR_TYPE_ITEM, item,
734 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
740 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
741 rte_flow_error_set(error, EINVAL,
742 RTE_FLOW_ERROR_TYPE_ITEM, item,
743 "Outer frame EtherType in pattern with tunneling "
746 } else if (efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
747 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
748 rte_flow_error_set(error, EINVAL,
749 RTE_FLOW_ERROR_TYPE_ITEM, item,
750 "Outer frame EtherType in pattern with tunneling "
751 "must be IPv4 or IPv6");
759 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
760 const uint8_t *vni_or_vsid_val,
761 const uint8_t *vni_or_vsid_mask,
762 const struct rte_flow_item *item,
763 struct rte_flow_error *error)
765 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
769 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
770 EFX_VNI_OR_VSID_LEN) == 0) {
771 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
772 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
773 EFX_VNI_OR_VSID_LEN);
774 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
775 rte_flow_error_set(error, EINVAL,
776 RTE_FLOW_ERROR_TYPE_ITEM, item,
777 "Unsupported VNI/VSID mask");
785 * Convert VXLAN item to EFX filter specification.
788 * Item specification. Only VXLAN network identifier field is supported.
789 * If the mask is NULL, default mask will be used.
790 * Ranging is not supported.
791 * @param efx_spec[in, out]
792 * EFX filter specification to update.
794 * Perform verbose error reporting if not NULL.
797 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
798 efx_filter_spec_t *efx_spec,
799 struct rte_flow_error *error)
802 const struct rte_flow_item_vxlan *spec = NULL;
803 const struct rte_flow_item_vxlan *mask = NULL;
804 const struct rte_flow_item_vxlan supp_mask = {
805 .vni = { 0xff, 0xff, 0xff }
808 rc = sfc_flow_parse_init(item,
809 (const void **)&spec,
810 (const void **)&mask,
812 &rte_flow_item_vxlan_mask,
813 sizeof(struct rte_flow_item_vxlan),
818 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
819 EFX_IPPROTO_UDP, error);
823 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
824 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
829 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
830 mask->vni, item, error);
836 * Convert GENEVE item to EFX filter specification.
839 * Item specification. Only Virtual Network Identifier and protocol type
840 * fields are supported. But protocol type can be only Ethernet (0x6558).
841 * If the mask is NULL, default mask will be used.
842 * Ranging is not supported.
843 * @param efx_spec[in, out]
844 * EFX filter specification to update.
846 * Perform verbose error reporting if not NULL.
849 sfc_flow_parse_geneve(const struct rte_flow_item *item,
850 efx_filter_spec_t *efx_spec,
851 struct rte_flow_error *error)
854 const struct rte_flow_item_geneve *spec = NULL;
855 const struct rte_flow_item_geneve *mask = NULL;
856 const struct rte_flow_item_geneve supp_mask = {
857 .protocol = RTE_BE16(0xffff),
858 .vni = { 0xff, 0xff, 0xff }
861 rc = sfc_flow_parse_init(item,
862 (const void **)&spec,
863 (const void **)&mask,
865 &rte_flow_item_geneve_mask,
866 sizeof(struct rte_flow_item_geneve),
871 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
872 EFX_IPPROTO_UDP, error);
876 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
877 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
882 if (mask->protocol == supp_mask.protocol) {
883 if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
884 rte_flow_error_set(error, EINVAL,
885 RTE_FLOW_ERROR_TYPE_ITEM, item,
886 "GENEVE encap. protocol must be Ethernet "
887 "(0x6558) in the GENEVE pattern item");
890 } else if (mask->protocol != 0) {
891 rte_flow_error_set(error, EINVAL,
892 RTE_FLOW_ERROR_TYPE_ITEM, item,
893 "Unsupported mask for GENEVE encap. protocol");
897 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
898 mask->vni, item, error);
904 * Convert NVGRE item to EFX filter specification.
907 * Item specification. Only virtual subnet ID field is supported.
908 * If the mask is NULL, default mask will be used.
909 * Ranging is not supported.
910 * @param efx_spec[in, out]
911 * EFX filter specification to update.
913 * Perform verbose error reporting if not NULL.
916 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
917 efx_filter_spec_t *efx_spec,
918 struct rte_flow_error *error)
921 const struct rte_flow_item_nvgre *spec = NULL;
922 const struct rte_flow_item_nvgre *mask = NULL;
923 const struct rte_flow_item_nvgre supp_mask = {
924 .tni = { 0xff, 0xff, 0xff }
927 rc = sfc_flow_parse_init(item,
928 (const void **)&spec,
929 (const void **)&mask,
931 &rte_flow_item_nvgre_mask,
932 sizeof(struct rte_flow_item_nvgre),
937 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
938 EFX_IPPROTO_GRE, error);
942 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
943 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
948 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
949 mask->tni, item, error);
954 static const struct sfc_flow_item sfc_flow_items[] = {
956 .type = RTE_FLOW_ITEM_TYPE_VOID,
957 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
958 .layer = SFC_FLOW_ITEM_ANY_LAYER,
959 .parse = sfc_flow_parse_void,
962 .type = RTE_FLOW_ITEM_TYPE_ETH,
963 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
964 .layer = SFC_FLOW_ITEM_L2,
965 .parse = sfc_flow_parse_eth,
968 .type = RTE_FLOW_ITEM_TYPE_VLAN,
969 .prev_layer = SFC_FLOW_ITEM_L2,
970 .layer = SFC_FLOW_ITEM_L2,
971 .parse = sfc_flow_parse_vlan,
974 .type = RTE_FLOW_ITEM_TYPE_IPV4,
975 .prev_layer = SFC_FLOW_ITEM_L2,
976 .layer = SFC_FLOW_ITEM_L3,
977 .parse = sfc_flow_parse_ipv4,
980 .type = RTE_FLOW_ITEM_TYPE_IPV6,
981 .prev_layer = SFC_FLOW_ITEM_L2,
982 .layer = SFC_FLOW_ITEM_L3,
983 .parse = sfc_flow_parse_ipv6,
986 .type = RTE_FLOW_ITEM_TYPE_TCP,
987 .prev_layer = SFC_FLOW_ITEM_L3,
988 .layer = SFC_FLOW_ITEM_L4,
989 .parse = sfc_flow_parse_tcp,
992 .type = RTE_FLOW_ITEM_TYPE_UDP,
993 .prev_layer = SFC_FLOW_ITEM_L3,
994 .layer = SFC_FLOW_ITEM_L4,
995 .parse = sfc_flow_parse_udp,
998 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
999 .prev_layer = SFC_FLOW_ITEM_L4,
1000 .layer = SFC_FLOW_ITEM_START_LAYER,
1001 .parse = sfc_flow_parse_vxlan,
1004 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1005 .prev_layer = SFC_FLOW_ITEM_L4,
1006 .layer = SFC_FLOW_ITEM_START_LAYER,
1007 .parse = sfc_flow_parse_geneve,
1010 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1011 .prev_layer = SFC_FLOW_ITEM_L3,
1012 .layer = SFC_FLOW_ITEM_START_LAYER,
1013 .parse = sfc_flow_parse_nvgre,
1018 * Protocol-independent flow API support
1021 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1022 struct rte_flow *flow,
1023 struct rte_flow_error *error)
1026 rte_flow_error_set(error, EINVAL,
1027 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1031 if (attr->group != 0) {
1032 rte_flow_error_set(error, ENOTSUP,
1033 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1034 "Groups are not supported");
1037 if (attr->priority != 0) {
1038 rte_flow_error_set(error, ENOTSUP,
1039 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1040 "Priorities are not supported");
1043 if (attr->egress != 0) {
1044 rte_flow_error_set(error, ENOTSUP,
1045 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1046 "Egress is not supported");
1049 if (attr->ingress == 0) {
1050 rte_flow_error_set(error, ENOTSUP,
1051 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1052 "Only ingress is supported");
1056 flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
1057 flow->spec.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1062 /* Get item from array sfc_flow_items */
1063 static const struct sfc_flow_item *
1064 sfc_flow_get_item(enum rte_flow_item_type type)
1068 for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1069 if (sfc_flow_items[i].type == type)
1070 return &sfc_flow_items[i];
1076 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1077 struct rte_flow *flow,
1078 struct rte_flow_error *error)
1081 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1082 boolean_t is_ifrm = B_FALSE;
1083 const struct sfc_flow_item *item;
1085 if (pattern == NULL) {
1086 rte_flow_error_set(error, EINVAL,
1087 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1092 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1093 item = sfc_flow_get_item(pattern->type);
1095 rte_flow_error_set(error, ENOTSUP,
1096 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1097 "Unsupported pattern item");
1102 * Omitting one or several protocol layers at the beginning
1103 * of pattern is supported
1105 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1106 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1107 item->prev_layer != prev_layer) {
1108 rte_flow_error_set(error, ENOTSUP,
1109 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1110 "Unexpected sequence of pattern items");
1115 * Allow only VOID pattern item in the inner frame.
1116 * Also check that there is only one tunneling protocol.
1118 switch (item->type) {
1119 case RTE_FLOW_ITEM_TYPE_VOID:
1122 case RTE_FLOW_ITEM_TYPE_VXLAN:
1123 case RTE_FLOW_ITEM_TYPE_GENEVE:
1124 case RTE_FLOW_ITEM_TYPE_NVGRE:
1126 rte_flow_error_set(error, EINVAL,
1127 RTE_FLOW_ERROR_TYPE_ITEM,
1129 "More than one tunneling protocol");
1137 rte_flow_error_set(error, EINVAL,
1138 RTE_FLOW_ERROR_TYPE_ITEM,
1140 "There is an unsupported pattern item "
1141 "in the inner frame");
1147 rc = item->parse(pattern, &flow->spec, error);
1151 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1152 prev_layer = item->layer;
1159 sfc_flow_parse_queue(struct sfc_adapter *sa,
1160 const struct rte_flow_action_queue *queue,
1161 struct rte_flow *flow)
1163 struct sfc_rxq *rxq;
1165 if (queue->index >= sa->rxq_count)
1168 rxq = sa->rxq_info[queue->index].rxq;
1169 flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
1174 #if EFSYS_OPT_RX_SCALE
1176 sfc_flow_parse_rss(struct sfc_adapter *sa,
1177 const struct rte_flow_action_rss *rss,
1178 struct rte_flow *flow)
1180 unsigned int rxq_sw_index;
1181 struct sfc_rxq *rxq;
1182 unsigned int rxq_hw_index_min;
1183 unsigned int rxq_hw_index_max;
1184 const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
1186 uint8_t *rss_key = NULL;
1187 struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
1193 rxq_sw_index = sa->rxq_count - 1;
1194 rxq = sa->rxq_info[rxq_sw_index].rxq;
1195 rxq_hw_index_min = rxq->hw_index;
1196 rxq_hw_index_max = 0;
1198 for (i = 0; i < rss->num; ++i) {
1199 rxq_sw_index = rss->queue[i];
1201 if (rxq_sw_index >= sa->rxq_count)
1204 rxq = sa->rxq_info[rxq_sw_index].rxq;
1206 if (rxq->hw_index < rxq_hw_index_min)
1207 rxq_hw_index_min = rxq->hw_index;
1209 if (rxq->hw_index > rxq_hw_index_max)
1210 rxq_hw_index_max = rxq->hw_index;
1213 rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
1214 if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
1217 if (rss_conf != NULL) {
1218 if (rss_conf->rss_key_len != sizeof(sa->rss_key))
1221 rss_key = rss_conf->rss_key;
1223 rss_key = sa->rss_key;
1228 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1229 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1230 sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
1231 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
1233 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1234 unsigned int rxq_sw_index = rss->queue[i % rss->num];
1235 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
1237 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1242 #endif /* EFSYS_OPT_RX_SCALE */
1245 sfc_flow_filter_insert(struct sfc_adapter *sa,
1246 struct rte_flow *flow)
1248 efx_filter_spec_t *spec = &flow->spec;
1250 #if EFSYS_OPT_RX_SCALE
1251 struct sfc_flow_rss *rss = &flow->rss_conf;
1255 unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
1256 rss->rxq_hw_index_min + 1,
1259 rc = efx_rx_scale_context_alloc(sa->nic,
1260 EFX_RX_SCALE_EXCLUSIVE,
1262 &spec->efs_rss_context);
1264 goto fail_scale_context_alloc;
1266 rc = efx_rx_scale_mode_set(sa->nic, spec->efs_rss_context,
1267 EFX_RX_HASHALG_TOEPLITZ,
1268 rss->rss_hash_types, B_TRUE);
1270 goto fail_scale_mode_set;
1272 rc = efx_rx_scale_key_set(sa->nic, spec->efs_rss_context,
1274 sizeof(sa->rss_key));
1276 goto fail_scale_key_set;
1278 spec->efs_dmaq_id = rss->rxq_hw_index_min;
1279 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1282 rc = efx_filter_insert(sa->nic, spec);
1284 goto fail_filter_insert;
1288 * Scale table is set after filter insertion because
1289 * the table entries are relative to the base RxQ ID
1290 * and the latter is submitted to the HW by means of
1291 * inserting a filter, so by the time of the request
1292 * the HW knows all the information needed to verify
1293 * the table entries, and the operation will succeed
1295 rc = efx_rx_scale_tbl_set(sa->nic, spec->efs_rss_context,
1296 rss->rss_tbl, RTE_DIM(rss->rss_tbl));
1298 goto fail_scale_tbl_set;
1304 efx_filter_remove(sa->nic, spec);
1308 fail_scale_mode_set:
1310 efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1312 fail_scale_context_alloc:
1314 #else /* !EFSYS_OPT_RX_SCALE */
1315 return efx_filter_insert(sa->nic, spec);
1316 #endif /* EFSYS_OPT_RX_SCALE */
1320 sfc_flow_filter_remove(struct sfc_adapter *sa,
1321 struct rte_flow *flow)
1323 efx_filter_spec_t *spec = &flow->spec;
1326 rc = efx_filter_remove(sa->nic, spec);
1330 #if EFSYS_OPT_RX_SCALE
1332 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1333 #endif /* EFSYS_OPT_RX_SCALE */
1339 sfc_flow_parse_actions(struct sfc_adapter *sa,
1340 const struct rte_flow_action actions[],
1341 struct rte_flow *flow,
1342 struct rte_flow_error *error)
1345 boolean_t is_specified = B_FALSE;
1347 if (actions == NULL) {
1348 rte_flow_error_set(error, EINVAL,
1349 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1354 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1355 switch (actions->type) {
1356 case RTE_FLOW_ACTION_TYPE_VOID:
1359 case RTE_FLOW_ACTION_TYPE_QUEUE:
1360 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1362 rte_flow_error_set(error, EINVAL,
1363 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1364 "Bad QUEUE action");
1368 is_specified = B_TRUE;
1371 #if EFSYS_OPT_RX_SCALE
1372 case RTE_FLOW_ACTION_TYPE_RSS:
1373 rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1375 rte_flow_error_set(error, rc,
1376 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1381 is_specified = B_TRUE;
1383 #endif /* EFSYS_OPT_RX_SCALE */
1386 rte_flow_error_set(error, ENOTSUP,
1387 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1388 "Action is not supported");
1393 if (!is_specified) {
1394 rte_flow_error_set(error, EINVAL,
1395 RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
1396 "Action is unspecified");
1404 sfc_flow_parse(struct rte_eth_dev *dev,
1405 const struct rte_flow_attr *attr,
1406 const struct rte_flow_item pattern[],
1407 const struct rte_flow_action actions[],
1408 struct rte_flow *flow,
1409 struct rte_flow_error *error)
1411 struct sfc_adapter *sa = dev->data->dev_private;
1414 rc = sfc_flow_parse_attr(attr, flow, error);
1416 goto fail_bad_value;
1418 rc = sfc_flow_parse_pattern(pattern, flow, error);
1420 goto fail_bad_value;
1422 rc = sfc_flow_parse_actions(sa, actions, flow, error);
1424 goto fail_bad_value;
1426 if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
1427 rte_flow_error_set(error, ENOTSUP,
1428 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1429 "Flow rule pattern is not supported");
1438 sfc_flow_validate(struct rte_eth_dev *dev,
1439 const struct rte_flow_attr *attr,
1440 const struct rte_flow_item pattern[],
1441 const struct rte_flow_action actions[],
1442 struct rte_flow_error *error)
1444 struct rte_flow flow;
1446 memset(&flow, 0, sizeof(flow));
1448 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
1451 static struct rte_flow *
1452 sfc_flow_create(struct rte_eth_dev *dev,
1453 const struct rte_flow_attr *attr,
1454 const struct rte_flow_item pattern[],
1455 const struct rte_flow_action actions[],
1456 struct rte_flow_error *error)
1458 struct sfc_adapter *sa = dev->data->dev_private;
1459 struct rte_flow *flow = NULL;
1462 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
1464 rte_flow_error_set(error, ENOMEM,
1465 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1466 "Failed to allocate memory");
1470 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
1472 goto fail_bad_value;
1474 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
1476 sfc_adapter_lock(sa);
1478 if (sa->state == SFC_ADAPTER_STARTED) {
1479 rc = sfc_flow_filter_insert(sa, flow);
1481 rte_flow_error_set(error, rc,
1482 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1483 "Failed to insert filter");
1484 goto fail_filter_insert;
1488 sfc_adapter_unlock(sa);
1493 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1497 sfc_adapter_unlock(sa);
1504 sfc_flow_remove(struct sfc_adapter *sa,
1505 struct rte_flow *flow,
1506 struct rte_flow_error *error)
1510 SFC_ASSERT(sfc_adapter_is_locked(sa));
1512 if (sa->state == SFC_ADAPTER_STARTED) {
1513 rc = sfc_flow_filter_remove(sa, flow);
1515 rte_flow_error_set(error, rc,
1516 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1517 "Failed to destroy flow rule");
1520 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1527 sfc_flow_destroy(struct rte_eth_dev *dev,
1528 struct rte_flow *flow,
1529 struct rte_flow_error *error)
1531 struct sfc_adapter *sa = dev->data->dev_private;
1532 struct rte_flow *flow_ptr;
1535 sfc_adapter_lock(sa);
1537 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
1538 if (flow_ptr == flow)
1542 rte_flow_error_set(error, rc,
1543 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1544 "Failed to find flow rule to destroy");
1545 goto fail_bad_value;
1548 rc = sfc_flow_remove(sa, flow, error);
1551 sfc_adapter_unlock(sa);
1557 sfc_flow_flush(struct rte_eth_dev *dev,
1558 struct rte_flow_error *error)
1560 struct sfc_adapter *sa = dev->data->dev_private;
1561 struct rte_flow *flow;
1565 sfc_adapter_lock(sa);
1567 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1568 rc = sfc_flow_remove(sa, flow, error);
1573 sfc_adapter_unlock(sa);
1579 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
1580 struct rte_flow_error *error)
1582 struct sfc_adapter *sa = dev->data->dev_private;
1583 struct sfc_port *port = &sa->port;
1586 sfc_adapter_lock(sa);
1587 if (sa->state != SFC_ADAPTER_INITIALIZED) {
1588 rte_flow_error_set(error, EBUSY,
1589 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1590 NULL, "please close the port first");
1593 port->isolated = (enable) ? B_TRUE : B_FALSE;
1595 sfc_adapter_unlock(sa);
1600 const struct rte_flow_ops sfc_flow_ops = {
1601 .validate = sfc_flow_validate,
1602 .create = sfc_flow_create,
1603 .destroy = sfc_flow_destroy,
1604 .flush = sfc_flow_flush,
1606 .isolate = sfc_flow_isolate,
1610 sfc_flow_init(struct sfc_adapter *sa)
1612 SFC_ASSERT(sfc_adapter_is_locked(sa));
1614 TAILQ_INIT(&sa->filter.flow_list);
1618 sfc_flow_fini(struct sfc_adapter *sa)
1620 struct rte_flow *flow;
1622 SFC_ASSERT(sfc_adapter_is_locked(sa));
1624 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1625 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1631 sfc_flow_stop(struct sfc_adapter *sa)
1633 struct rte_flow *flow;
1635 SFC_ASSERT(sfc_adapter_is_locked(sa));
1637 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
1638 sfc_flow_filter_remove(sa, flow);
1642 sfc_flow_start(struct sfc_adapter *sa)
1644 struct rte_flow *flow;
1647 sfc_log_init(sa, "entry");
1649 SFC_ASSERT(sfc_adapter_is_locked(sa));
1651 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
1652 rc = sfc_flow_filter_insert(sa, flow);
1657 sfc_log_init(sa, "done");