1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2017-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_tailq.h>
11 #include <rte_common.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_eth_ctrl.h>
14 #include <rte_ether.h>
16 #include <rte_flow_driver.h>
22 #include "sfc_filter.h"
27 * At now flow API is implemented in such a manner that each
28 * flow rule is converted to a hardware filter.
29 * All elements of flow rule (attributes, pattern items, actions)
30 * correspond to one or more fields in the efx_filter_spec_s structure
31 * that is responsible for the hardware filter.
34 enum sfc_flow_item_layers {
35 SFC_FLOW_ITEM_ANY_LAYER,
36 SFC_FLOW_ITEM_START_LAYER,
42 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
43 efx_filter_spec_t *spec,
44 struct rte_flow_error *error);
46 struct sfc_flow_item {
47 enum rte_flow_item_type type; /* Type of item */
48 enum sfc_flow_item_layers layer; /* Layer of item */
49 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
50 sfc_flow_item_parse *parse; /* Parsing function */
53 static sfc_flow_item_parse sfc_flow_parse_void;
54 static sfc_flow_item_parse sfc_flow_parse_eth;
55 static sfc_flow_item_parse sfc_flow_parse_vlan;
56 static sfc_flow_item_parse sfc_flow_parse_ipv4;
57 static sfc_flow_item_parse sfc_flow_parse_ipv6;
58 static sfc_flow_item_parse sfc_flow_parse_tcp;
59 static sfc_flow_item_parse sfc_flow_parse_udp;
60 static sfc_flow_item_parse sfc_flow_parse_vxlan;
61 static sfc_flow_item_parse sfc_flow_parse_geneve;
62 static sfc_flow_item_parse sfc_flow_parse_nvgre;
65 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
70 for (i = 0; i < size; i++)
73 return (sum == 0) ? B_TRUE : B_FALSE;
77 * Validate item and prepare structures spec and mask for parsing
80 sfc_flow_parse_init(const struct rte_flow_item *item,
81 const void **spec_ptr,
82 const void **mask_ptr,
83 const void *supp_mask,
86 struct rte_flow_error *error)
96 rte_flow_error_set(error, EINVAL,
97 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
102 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
103 rte_flow_error_set(error, EINVAL,
104 RTE_FLOW_ERROR_TYPE_ITEM, item,
105 "Mask or last is set without spec");
110 * If "mask" is not set, default mask is used,
111 * but if default mask is NULL, "mask" should be set
113 if (item->mask == NULL) {
114 if (def_mask == NULL) {
115 rte_flow_error_set(error, EINVAL,
116 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
117 "Mask should be specified");
133 * If field values in "last" are either 0 or equal to the corresponding
134 * values in "spec" then they are ignored
137 !sfc_flow_is_zero(last, size) &&
138 memcmp(last, spec, size) != 0) {
139 rte_flow_error_set(error, ENOTSUP,
140 RTE_FLOW_ERROR_TYPE_ITEM, item,
141 "Ranging is not supported");
145 if (supp_mask == NULL) {
146 rte_flow_error_set(error, EINVAL,
147 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
148 "Supported mask for item should be specified");
152 /* Check that mask and spec not asks for more match than supp_mask */
153 for (i = 0; i < size; i++) {
154 match = spec[i] | mask[i];
155 supp = ((const uint8_t *)supp_mask)[i];
157 if ((match | supp) != supp) {
158 rte_flow_error_set(error, ENOTSUP,
159 RTE_FLOW_ERROR_TYPE_ITEM, item,
160 "Item's field is not supported");
173 * Masking is not supported, so masks in items should be either
174 * full or empty (zeroed) and set only for supported fields which
175 * are specified in the supp_mask.
179 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
180 __rte_unused efx_filter_spec_t *efx_spec,
181 __rte_unused struct rte_flow_error *error)
187 * Convert Ethernet item to EFX filter specification.
190 * Item specification. Outer frame specification may only comprise
191 * source/destination addresses and Ethertype field.
192 * Inner frame specification may contain destination address only.
193 * There is support for individual/group mask as well as for empty and full.
194 * If the mask is NULL, default mask will be used. Ranging is not supported.
195 * @param efx_spec[in, out]
196 * EFX filter specification to update.
198 * Perform verbose error reporting if not NULL.
201 sfc_flow_parse_eth(const struct rte_flow_item *item,
202 efx_filter_spec_t *efx_spec,
203 struct rte_flow_error *error)
206 const struct rte_flow_item_eth *spec = NULL;
207 const struct rte_flow_item_eth *mask = NULL;
208 const struct rte_flow_item_eth supp_mask = {
209 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
210 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
213 const struct rte_flow_item_eth ifrm_supp_mask = {
214 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
216 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
217 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
219 const struct rte_flow_item_eth *supp_mask_p;
220 const struct rte_flow_item_eth *def_mask_p;
221 uint8_t *loc_mac = NULL;
222 boolean_t is_ifrm = (efx_spec->efs_encap_type !=
223 EFX_TUNNEL_PROTOCOL_NONE);
226 supp_mask_p = &ifrm_supp_mask;
227 def_mask_p = &ifrm_supp_mask;
228 loc_mac = efx_spec->efs_ifrm_loc_mac;
230 supp_mask_p = &supp_mask;
231 def_mask_p = &rte_flow_item_eth_mask;
232 loc_mac = efx_spec->efs_loc_mac;
235 rc = sfc_flow_parse_init(item,
236 (const void **)&spec,
237 (const void **)&mask,
238 supp_mask_p, def_mask_p,
239 sizeof(struct rte_flow_item_eth),
245 * If "spec" is not set, could be any Ethernet, but for the inner frame
246 * type of destination MAC must be set
250 goto fail_bad_ifrm_dst_mac;
255 if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
256 efx_spec->efs_match_flags |= is_ifrm ?
257 EFX_FILTER_MATCH_IFRM_LOC_MAC :
258 EFX_FILTER_MATCH_LOC_MAC;
259 rte_memcpy(loc_mac, spec->dst.addr_bytes,
261 } else if (memcmp(mask->dst.addr_bytes, ig_mask,
262 EFX_MAC_ADDR_LEN) == 0) {
263 if (is_unicast_ether_addr(&spec->dst))
264 efx_spec->efs_match_flags |= is_ifrm ?
265 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
266 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
268 efx_spec->efs_match_flags |= is_ifrm ?
269 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
270 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
271 } else if (!is_zero_ether_addr(&mask->dst)) {
273 } else if (is_ifrm) {
274 goto fail_bad_ifrm_dst_mac;
278 * ifrm_supp_mask ensures that the source address and
279 * ethertype masks are equal to zero in inner frame,
280 * so these fields are filled in only for the outer frame
282 if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
283 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
284 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
286 } else if (!is_zero_ether_addr(&mask->src)) {
291 * Ether type is in big-endian byte order in item and
292 * in little-endian in efx_spec, so byte swap is used
294 if (mask->type == supp_mask.type) {
295 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
296 efx_spec->efs_ether_type = rte_bswap16(spec->type);
297 } else if (mask->type != 0) {
304 rte_flow_error_set(error, EINVAL,
305 RTE_FLOW_ERROR_TYPE_ITEM, item,
306 "Bad mask in the ETH pattern item");
309 fail_bad_ifrm_dst_mac:
310 rte_flow_error_set(error, EINVAL,
311 RTE_FLOW_ERROR_TYPE_ITEM, item,
312 "Type of destination MAC address in inner frame "
318 * Convert VLAN item to EFX filter specification.
321 * Item specification. Only VID field is supported.
322 * The mask can not be NULL. Ranging is not supported.
323 * @param efx_spec[in, out]
324 * EFX filter specification to update.
326 * Perform verbose error reporting if not NULL.
329 sfc_flow_parse_vlan(const struct rte_flow_item *item,
330 efx_filter_spec_t *efx_spec,
331 struct rte_flow_error *error)
335 const struct rte_flow_item_vlan *spec = NULL;
336 const struct rte_flow_item_vlan *mask = NULL;
337 const struct rte_flow_item_vlan supp_mask = {
338 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
341 rc = sfc_flow_parse_init(item,
342 (const void **)&spec,
343 (const void **)&mask,
346 sizeof(struct rte_flow_item_vlan),
352 * VID is in big-endian byte order in item and
353 * in little-endian in efx_spec, so byte swap is used.
354 * If two VLAN items are included, the first matches
355 * the outer tag and the next matches the inner tag.
357 if (mask->tci == supp_mask.tci) {
358 vid = rte_bswap16(spec->tci);
360 if (!(efx_spec->efs_match_flags &
361 EFX_FILTER_MATCH_OUTER_VID)) {
362 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
363 efx_spec->efs_outer_vid = vid;
364 } else if (!(efx_spec->efs_match_flags &
365 EFX_FILTER_MATCH_INNER_VID)) {
366 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
367 efx_spec->efs_inner_vid = vid;
369 rte_flow_error_set(error, EINVAL,
370 RTE_FLOW_ERROR_TYPE_ITEM, item,
371 "More than two VLAN items");
375 rte_flow_error_set(error, EINVAL,
376 RTE_FLOW_ERROR_TYPE_ITEM, item,
377 "VLAN ID in TCI match is required");
385 * Convert IPv4 item to EFX filter specification.
388 * Item specification. Only source and destination addresses and
389 * protocol fields are supported. If the mask is NULL, default
390 * mask will be used. Ranging is not supported.
391 * @param efx_spec[in, out]
392 * EFX filter specification to update.
394 * Perform verbose error reporting if not NULL.
397 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
398 efx_filter_spec_t *efx_spec,
399 struct rte_flow_error *error)
402 const struct rte_flow_item_ipv4 *spec = NULL;
403 const struct rte_flow_item_ipv4 *mask = NULL;
404 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
405 const struct rte_flow_item_ipv4 supp_mask = {
407 .src_addr = 0xffffffff,
408 .dst_addr = 0xffffffff,
409 .next_proto_id = 0xff,
413 rc = sfc_flow_parse_init(item,
414 (const void **)&spec,
415 (const void **)&mask,
417 &rte_flow_item_ipv4_mask,
418 sizeof(struct rte_flow_item_ipv4),
424 * Filtering by IPv4 source and destination addresses requires
425 * the appropriate ETHER_TYPE in hardware filters
427 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
428 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
429 efx_spec->efs_ether_type = ether_type_ipv4;
430 } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
431 rte_flow_error_set(error, EINVAL,
432 RTE_FLOW_ERROR_TYPE_ITEM, item,
433 "Ethertype in pattern with IPV4 item should be appropriate");
441 * IPv4 addresses are in big-endian byte order in item and in
444 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
445 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
446 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
447 } else if (mask->hdr.src_addr != 0) {
451 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
452 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
453 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
454 } else if (mask->hdr.dst_addr != 0) {
458 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
459 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
460 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
461 } else if (mask->hdr.next_proto_id != 0) {
468 rte_flow_error_set(error, EINVAL,
469 RTE_FLOW_ERROR_TYPE_ITEM, item,
470 "Bad mask in the IPV4 pattern item");
475 * Convert IPv6 item to EFX filter specification.
478 * Item specification. Only source and destination addresses and
479 * next header fields are supported. If the mask is NULL, default
480 * mask will be used. Ranging is not supported.
481 * @param efx_spec[in, out]
482 * EFX filter specification to update.
484 * Perform verbose error reporting if not NULL.
487 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
488 efx_filter_spec_t *efx_spec,
489 struct rte_flow_error *error)
492 const struct rte_flow_item_ipv6 *spec = NULL;
493 const struct rte_flow_item_ipv6 *mask = NULL;
494 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
495 const struct rte_flow_item_ipv6 supp_mask = {
497 .src_addr = { 0xff, 0xff, 0xff, 0xff,
498 0xff, 0xff, 0xff, 0xff,
499 0xff, 0xff, 0xff, 0xff,
500 0xff, 0xff, 0xff, 0xff },
501 .dst_addr = { 0xff, 0xff, 0xff, 0xff,
502 0xff, 0xff, 0xff, 0xff,
503 0xff, 0xff, 0xff, 0xff,
504 0xff, 0xff, 0xff, 0xff },
509 rc = sfc_flow_parse_init(item,
510 (const void **)&spec,
511 (const void **)&mask,
513 &rte_flow_item_ipv6_mask,
514 sizeof(struct rte_flow_item_ipv6),
520 * Filtering by IPv6 source and destination addresses requires
521 * the appropriate ETHER_TYPE in hardware filters
523 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
524 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
525 efx_spec->efs_ether_type = ether_type_ipv6;
526 } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
527 rte_flow_error_set(error, EINVAL,
528 RTE_FLOW_ERROR_TYPE_ITEM, item,
529 "Ethertype in pattern with IPV6 item should be appropriate");
537 * IPv6 addresses are in big-endian byte order in item and in
540 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
541 sizeof(mask->hdr.src_addr)) == 0) {
542 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
544 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
545 sizeof(spec->hdr.src_addr));
546 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
547 sizeof(efx_spec->efs_rem_host));
548 } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
549 sizeof(mask->hdr.src_addr))) {
553 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
554 sizeof(mask->hdr.dst_addr)) == 0) {
555 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
557 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
558 sizeof(spec->hdr.dst_addr));
559 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
560 sizeof(efx_spec->efs_loc_host));
561 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
562 sizeof(mask->hdr.dst_addr))) {
566 if (mask->hdr.proto == supp_mask.hdr.proto) {
567 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
568 efx_spec->efs_ip_proto = spec->hdr.proto;
569 } else if (mask->hdr.proto != 0) {
576 rte_flow_error_set(error, EINVAL,
577 RTE_FLOW_ERROR_TYPE_ITEM, item,
578 "Bad mask in the IPV6 pattern item");
583 * Convert TCP item to EFX filter specification.
586 * Item specification. Only source and destination ports fields
587 * are supported. If the mask is NULL, default mask will be used.
588 * Ranging is not supported.
589 * @param efx_spec[in, out]
590 * EFX filter specification to update.
592 * Perform verbose error reporting if not NULL.
595 sfc_flow_parse_tcp(const struct rte_flow_item *item,
596 efx_filter_spec_t *efx_spec,
597 struct rte_flow_error *error)
600 const struct rte_flow_item_tcp *spec = NULL;
601 const struct rte_flow_item_tcp *mask = NULL;
602 const struct rte_flow_item_tcp supp_mask = {
609 rc = sfc_flow_parse_init(item,
610 (const void **)&spec,
611 (const void **)&mask,
613 &rte_flow_item_tcp_mask,
614 sizeof(struct rte_flow_item_tcp),
620 * Filtering by TCP source and destination ports requires
621 * the appropriate IP_PROTO in hardware filters
623 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
624 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
625 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
626 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
627 rte_flow_error_set(error, EINVAL,
628 RTE_FLOW_ERROR_TYPE_ITEM, item,
629 "IP proto in pattern with TCP item should be appropriate");
637 * Source and destination ports are in big-endian byte order in item and
638 * in little-endian in efx_spec, so byte swap is used
640 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
641 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
642 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
643 } else if (mask->hdr.src_port != 0) {
647 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
648 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
649 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
650 } else if (mask->hdr.dst_port != 0) {
657 rte_flow_error_set(error, EINVAL,
658 RTE_FLOW_ERROR_TYPE_ITEM, item,
659 "Bad mask in the TCP pattern item");
664 * Convert UDP item to EFX filter specification.
667 * Item specification. Only source and destination ports fields
668 * are supported. If the mask is NULL, default mask will be used.
669 * Ranging is not supported.
670 * @param efx_spec[in, out]
671 * EFX filter specification to update.
673 * Perform verbose error reporting if not NULL.
676 sfc_flow_parse_udp(const struct rte_flow_item *item,
677 efx_filter_spec_t *efx_spec,
678 struct rte_flow_error *error)
681 const struct rte_flow_item_udp *spec = NULL;
682 const struct rte_flow_item_udp *mask = NULL;
683 const struct rte_flow_item_udp supp_mask = {
690 rc = sfc_flow_parse_init(item,
691 (const void **)&spec,
692 (const void **)&mask,
694 &rte_flow_item_udp_mask,
695 sizeof(struct rte_flow_item_udp),
701 * Filtering by UDP source and destination ports requires
702 * the appropriate IP_PROTO in hardware filters
704 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
705 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
706 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
707 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
708 rte_flow_error_set(error, EINVAL,
709 RTE_FLOW_ERROR_TYPE_ITEM, item,
710 "IP proto in pattern with UDP item should be appropriate");
718 * Source and destination ports are in big-endian byte order in item and
719 * in little-endian in efx_spec, so byte swap is used
721 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
722 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
723 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
724 } else if (mask->hdr.src_port != 0) {
728 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
729 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
730 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
731 } else if (mask->hdr.dst_port != 0) {
738 rte_flow_error_set(error, EINVAL,
739 RTE_FLOW_ERROR_TYPE_ITEM, item,
740 "Bad mask in the UDP pattern item");
745 * Filters for encapsulated packets match based on the EtherType and IP
746 * protocol in the outer frame.
749 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
750 efx_filter_spec_t *efx_spec,
752 struct rte_flow_error *error)
754 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
755 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
756 efx_spec->efs_ip_proto = ip_proto;
757 } else if (efx_spec->efs_ip_proto != ip_proto) {
759 case EFX_IPPROTO_UDP:
760 rte_flow_error_set(error, EINVAL,
761 RTE_FLOW_ERROR_TYPE_ITEM, item,
762 "Outer IP header protocol must be UDP "
763 "in VxLAN/GENEVE pattern");
766 case EFX_IPPROTO_GRE:
767 rte_flow_error_set(error, EINVAL,
768 RTE_FLOW_ERROR_TYPE_ITEM, item,
769 "Outer IP header protocol must be GRE "
774 rte_flow_error_set(error, EINVAL,
775 RTE_FLOW_ERROR_TYPE_ITEM, item,
776 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
782 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
783 rte_flow_error_set(error, EINVAL,
784 RTE_FLOW_ERROR_TYPE_ITEM, item,
785 "Outer frame EtherType in pattern with tunneling "
788 } else if (efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
789 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
790 rte_flow_error_set(error, EINVAL,
791 RTE_FLOW_ERROR_TYPE_ITEM, item,
792 "Outer frame EtherType in pattern with tunneling "
793 "must be IPv4 or IPv6");
801 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
802 const uint8_t *vni_or_vsid_val,
803 const uint8_t *vni_or_vsid_mask,
804 const struct rte_flow_item *item,
805 struct rte_flow_error *error)
807 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
811 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
812 EFX_VNI_OR_VSID_LEN) == 0) {
813 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
814 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
815 EFX_VNI_OR_VSID_LEN);
816 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
817 rte_flow_error_set(error, EINVAL,
818 RTE_FLOW_ERROR_TYPE_ITEM, item,
819 "Unsupported VNI/VSID mask");
827 * Convert VXLAN item to EFX filter specification.
830 * Item specification. Only VXLAN network identifier field is supported.
831 * If the mask is NULL, default mask will be used.
832 * Ranging is not supported.
833 * @param efx_spec[in, out]
834 * EFX filter specification to update.
836 * Perform verbose error reporting if not NULL.
839 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
840 efx_filter_spec_t *efx_spec,
841 struct rte_flow_error *error)
844 const struct rte_flow_item_vxlan *spec = NULL;
845 const struct rte_flow_item_vxlan *mask = NULL;
846 const struct rte_flow_item_vxlan supp_mask = {
847 .vni = { 0xff, 0xff, 0xff }
850 rc = sfc_flow_parse_init(item,
851 (const void **)&spec,
852 (const void **)&mask,
854 &rte_flow_item_vxlan_mask,
855 sizeof(struct rte_flow_item_vxlan),
860 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
861 EFX_IPPROTO_UDP, error);
865 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
866 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
871 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
872 mask->vni, item, error);
878 * Convert GENEVE item to EFX filter specification.
881 * Item specification. Only Virtual Network Identifier and protocol type
882 * fields are supported. But protocol type can be only Ethernet (0x6558).
883 * If the mask is NULL, default mask will be used.
884 * Ranging is not supported.
885 * @param efx_spec[in, out]
886 * EFX filter specification to update.
888 * Perform verbose error reporting if not NULL.
891 sfc_flow_parse_geneve(const struct rte_flow_item *item,
892 efx_filter_spec_t *efx_spec,
893 struct rte_flow_error *error)
896 const struct rte_flow_item_geneve *spec = NULL;
897 const struct rte_flow_item_geneve *mask = NULL;
898 const struct rte_flow_item_geneve supp_mask = {
899 .protocol = RTE_BE16(0xffff),
900 .vni = { 0xff, 0xff, 0xff }
903 rc = sfc_flow_parse_init(item,
904 (const void **)&spec,
905 (const void **)&mask,
907 &rte_flow_item_geneve_mask,
908 sizeof(struct rte_flow_item_geneve),
913 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
914 EFX_IPPROTO_UDP, error);
918 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
919 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
924 if (mask->protocol == supp_mask.protocol) {
925 if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
926 rte_flow_error_set(error, EINVAL,
927 RTE_FLOW_ERROR_TYPE_ITEM, item,
928 "GENEVE encap. protocol must be Ethernet "
929 "(0x6558) in the GENEVE pattern item");
932 } else if (mask->protocol != 0) {
933 rte_flow_error_set(error, EINVAL,
934 RTE_FLOW_ERROR_TYPE_ITEM, item,
935 "Unsupported mask for GENEVE encap. protocol");
939 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
940 mask->vni, item, error);
946 * Convert NVGRE item to EFX filter specification.
949 * Item specification. Only virtual subnet ID field is supported.
950 * If the mask is NULL, default mask will be used.
951 * Ranging is not supported.
952 * @param efx_spec[in, out]
953 * EFX filter specification to update.
955 * Perform verbose error reporting if not NULL.
958 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
959 efx_filter_spec_t *efx_spec,
960 struct rte_flow_error *error)
963 const struct rte_flow_item_nvgre *spec = NULL;
964 const struct rte_flow_item_nvgre *mask = NULL;
965 const struct rte_flow_item_nvgre supp_mask = {
966 .tni = { 0xff, 0xff, 0xff }
969 rc = sfc_flow_parse_init(item,
970 (const void **)&spec,
971 (const void **)&mask,
973 &rte_flow_item_nvgre_mask,
974 sizeof(struct rte_flow_item_nvgre),
979 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
980 EFX_IPPROTO_GRE, error);
984 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
985 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
990 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
991 mask->tni, item, error);
996 static const struct sfc_flow_item sfc_flow_items[] = {
998 .type = RTE_FLOW_ITEM_TYPE_VOID,
999 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1000 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1001 .parse = sfc_flow_parse_void,
1004 .type = RTE_FLOW_ITEM_TYPE_ETH,
1005 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1006 .layer = SFC_FLOW_ITEM_L2,
1007 .parse = sfc_flow_parse_eth,
1010 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1011 .prev_layer = SFC_FLOW_ITEM_L2,
1012 .layer = SFC_FLOW_ITEM_L2,
1013 .parse = sfc_flow_parse_vlan,
1016 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1017 .prev_layer = SFC_FLOW_ITEM_L2,
1018 .layer = SFC_FLOW_ITEM_L3,
1019 .parse = sfc_flow_parse_ipv4,
1022 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1023 .prev_layer = SFC_FLOW_ITEM_L2,
1024 .layer = SFC_FLOW_ITEM_L3,
1025 .parse = sfc_flow_parse_ipv6,
1028 .type = RTE_FLOW_ITEM_TYPE_TCP,
1029 .prev_layer = SFC_FLOW_ITEM_L3,
1030 .layer = SFC_FLOW_ITEM_L4,
1031 .parse = sfc_flow_parse_tcp,
1034 .type = RTE_FLOW_ITEM_TYPE_UDP,
1035 .prev_layer = SFC_FLOW_ITEM_L3,
1036 .layer = SFC_FLOW_ITEM_L4,
1037 .parse = sfc_flow_parse_udp,
1040 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1041 .prev_layer = SFC_FLOW_ITEM_L4,
1042 .layer = SFC_FLOW_ITEM_START_LAYER,
1043 .parse = sfc_flow_parse_vxlan,
1046 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1047 .prev_layer = SFC_FLOW_ITEM_L4,
1048 .layer = SFC_FLOW_ITEM_START_LAYER,
1049 .parse = sfc_flow_parse_geneve,
1052 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1053 .prev_layer = SFC_FLOW_ITEM_L3,
1054 .layer = SFC_FLOW_ITEM_START_LAYER,
1055 .parse = sfc_flow_parse_nvgre,
1060 * Protocol-independent flow API support
1063 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1064 struct rte_flow *flow,
1065 struct rte_flow_error *error)
1068 rte_flow_error_set(error, EINVAL,
1069 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1073 if (attr->group != 0) {
1074 rte_flow_error_set(error, ENOTSUP,
1075 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1076 "Groups are not supported");
1079 if (attr->priority != 0) {
1080 rte_flow_error_set(error, ENOTSUP,
1081 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1082 "Priorities are not supported");
1085 if (attr->egress != 0) {
1086 rte_flow_error_set(error, ENOTSUP,
1087 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1088 "Egress is not supported");
1091 if (attr->ingress == 0) {
1092 rte_flow_error_set(error, ENOTSUP,
1093 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1094 "Only ingress is supported");
1098 flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
1099 flow->spec.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1104 /* Get item from array sfc_flow_items */
1105 static const struct sfc_flow_item *
1106 sfc_flow_get_item(enum rte_flow_item_type type)
1110 for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1111 if (sfc_flow_items[i].type == type)
1112 return &sfc_flow_items[i];
1118 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1119 struct rte_flow *flow,
1120 struct rte_flow_error *error)
1123 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1124 boolean_t is_ifrm = B_FALSE;
1125 const struct sfc_flow_item *item;
1127 if (pattern == NULL) {
1128 rte_flow_error_set(error, EINVAL,
1129 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1134 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1135 item = sfc_flow_get_item(pattern->type);
1137 rte_flow_error_set(error, ENOTSUP,
1138 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1139 "Unsupported pattern item");
1144 * Omitting one or several protocol layers at the beginning
1145 * of pattern is supported
1147 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1148 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1149 item->prev_layer != prev_layer) {
1150 rte_flow_error_set(error, ENOTSUP,
1151 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1152 "Unexpected sequence of pattern items");
1157 * Allow only VOID and ETH pattern items in the inner frame.
1158 * Also check that there is only one tunneling protocol.
1160 switch (item->type) {
1161 case RTE_FLOW_ITEM_TYPE_VOID:
1162 case RTE_FLOW_ITEM_TYPE_ETH:
1165 case RTE_FLOW_ITEM_TYPE_VXLAN:
1166 case RTE_FLOW_ITEM_TYPE_GENEVE:
1167 case RTE_FLOW_ITEM_TYPE_NVGRE:
1169 rte_flow_error_set(error, EINVAL,
1170 RTE_FLOW_ERROR_TYPE_ITEM,
1172 "More than one tunneling protocol");
1180 rte_flow_error_set(error, EINVAL,
1181 RTE_FLOW_ERROR_TYPE_ITEM,
1183 "There is an unsupported pattern item "
1184 "in the inner frame");
1190 rc = item->parse(pattern, &flow->spec, error);
1194 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1195 prev_layer = item->layer;
1202 sfc_flow_parse_queue(struct sfc_adapter *sa,
1203 const struct rte_flow_action_queue *queue,
1204 struct rte_flow *flow)
1206 struct sfc_rxq *rxq;
1208 if (queue->index >= sa->rxq_count)
1211 rxq = sa->rxq_info[queue->index].rxq;
1212 flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
1217 #if EFSYS_OPT_RX_SCALE
1219 sfc_flow_parse_rss(struct sfc_adapter *sa,
1220 const struct rte_flow_action_rss *rss,
1221 struct rte_flow *flow)
1223 unsigned int rxq_sw_index;
1224 struct sfc_rxq *rxq;
1225 unsigned int rxq_hw_index_min;
1226 unsigned int rxq_hw_index_max;
1227 const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
1229 uint8_t *rss_key = NULL;
1230 struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
1236 rxq_sw_index = sa->rxq_count - 1;
1237 rxq = sa->rxq_info[rxq_sw_index].rxq;
1238 rxq_hw_index_min = rxq->hw_index;
1239 rxq_hw_index_max = 0;
1241 for (i = 0; i < rss->num; ++i) {
1242 rxq_sw_index = rss->queue[i];
1244 if (rxq_sw_index >= sa->rxq_count)
1247 rxq = sa->rxq_info[rxq_sw_index].rxq;
1249 if (rxq->hw_index < rxq_hw_index_min)
1250 rxq_hw_index_min = rxq->hw_index;
1252 if (rxq->hw_index > rxq_hw_index_max)
1253 rxq_hw_index_max = rxq->hw_index;
1256 rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
1257 if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
1260 if (rss_conf != NULL) {
1261 if (rss_conf->rss_key_len != sizeof(sa->rss_key))
1264 rss_key = rss_conf->rss_key;
1266 rss_key = sa->rss_key;
1271 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1272 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1273 sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
1274 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
1276 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1277 unsigned int rxq_sw_index = rss->queue[i % rss->num];
1278 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
1280 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1285 #endif /* EFSYS_OPT_RX_SCALE */
1288 sfc_flow_filter_insert(struct sfc_adapter *sa,
1289 struct rte_flow *flow)
1291 efx_filter_spec_t *spec = &flow->spec;
1293 #if EFSYS_OPT_RX_SCALE
1294 struct sfc_flow_rss *rss = &flow->rss_conf;
1298 unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
1299 rss->rxq_hw_index_min + 1,
1302 rc = efx_rx_scale_context_alloc(sa->nic,
1303 EFX_RX_SCALE_EXCLUSIVE,
1305 &spec->efs_rss_context);
1307 goto fail_scale_context_alloc;
1309 rc = efx_rx_scale_mode_set(sa->nic, spec->efs_rss_context,
1310 EFX_RX_HASHALG_TOEPLITZ,
1311 rss->rss_hash_types, B_TRUE);
1313 goto fail_scale_mode_set;
1315 rc = efx_rx_scale_key_set(sa->nic, spec->efs_rss_context,
1317 sizeof(sa->rss_key));
1319 goto fail_scale_key_set;
1321 spec->efs_dmaq_id = rss->rxq_hw_index_min;
1322 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1325 rc = efx_filter_insert(sa->nic, spec);
1327 goto fail_filter_insert;
1331 * Scale table is set after filter insertion because
1332 * the table entries are relative to the base RxQ ID
1333 * and the latter is submitted to the HW by means of
1334 * inserting a filter, so by the time of the request
1335 * the HW knows all the information needed to verify
1336 * the table entries, and the operation will succeed
1338 rc = efx_rx_scale_tbl_set(sa->nic, spec->efs_rss_context,
1339 rss->rss_tbl, RTE_DIM(rss->rss_tbl));
1341 goto fail_scale_tbl_set;
1347 efx_filter_remove(sa->nic, spec);
1351 fail_scale_mode_set:
1353 efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1355 fail_scale_context_alloc:
1357 #else /* !EFSYS_OPT_RX_SCALE */
1358 return efx_filter_insert(sa->nic, spec);
1359 #endif /* EFSYS_OPT_RX_SCALE */
1363 sfc_flow_filter_remove(struct sfc_adapter *sa,
1364 struct rte_flow *flow)
1366 efx_filter_spec_t *spec = &flow->spec;
1369 rc = efx_filter_remove(sa->nic, spec);
1373 #if EFSYS_OPT_RX_SCALE
1375 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1376 #endif /* EFSYS_OPT_RX_SCALE */
1382 sfc_flow_parse_actions(struct sfc_adapter *sa,
1383 const struct rte_flow_action actions[],
1384 struct rte_flow *flow,
1385 struct rte_flow_error *error)
1388 boolean_t is_specified = B_FALSE;
1390 if (actions == NULL) {
1391 rte_flow_error_set(error, EINVAL,
1392 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1397 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1398 switch (actions->type) {
1399 case RTE_FLOW_ACTION_TYPE_VOID:
1402 case RTE_FLOW_ACTION_TYPE_QUEUE:
1403 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1405 rte_flow_error_set(error, EINVAL,
1406 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1407 "Bad QUEUE action");
1411 is_specified = B_TRUE;
1414 #if EFSYS_OPT_RX_SCALE
1415 case RTE_FLOW_ACTION_TYPE_RSS:
1416 rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1418 rte_flow_error_set(error, rc,
1419 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1424 is_specified = B_TRUE;
1426 #endif /* EFSYS_OPT_RX_SCALE */
1429 rte_flow_error_set(error, ENOTSUP,
1430 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1431 "Action is not supported");
1436 if (!is_specified) {
1437 rte_flow_error_set(error, EINVAL,
1438 RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
1439 "Action is unspecified");
1447 sfc_flow_parse(struct rte_eth_dev *dev,
1448 const struct rte_flow_attr *attr,
1449 const struct rte_flow_item pattern[],
1450 const struct rte_flow_action actions[],
1451 struct rte_flow *flow,
1452 struct rte_flow_error *error)
1454 struct sfc_adapter *sa = dev->data->dev_private;
1457 rc = sfc_flow_parse_attr(attr, flow, error);
1459 goto fail_bad_value;
1461 rc = sfc_flow_parse_pattern(pattern, flow, error);
1463 goto fail_bad_value;
1465 rc = sfc_flow_parse_actions(sa, actions, flow, error);
1467 goto fail_bad_value;
1469 if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
1470 rte_flow_error_set(error, ENOTSUP,
1471 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1472 "Flow rule pattern is not supported");
1481 sfc_flow_validate(struct rte_eth_dev *dev,
1482 const struct rte_flow_attr *attr,
1483 const struct rte_flow_item pattern[],
1484 const struct rte_flow_action actions[],
1485 struct rte_flow_error *error)
1487 struct rte_flow flow;
1489 memset(&flow, 0, sizeof(flow));
1491 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
1494 static struct rte_flow *
1495 sfc_flow_create(struct rte_eth_dev *dev,
1496 const struct rte_flow_attr *attr,
1497 const struct rte_flow_item pattern[],
1498 const struct rte_flow_action actions[],
1499 struct rte_flow_error *error)
1501 struct sfc_adapter *sa = dev->data->dev_private;
1502 struct rte_flow *flow = NULL;
1505 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
1507 rte_flow_error_set(error, ENOMEM,
1508 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1509 "Failed to allocate memory");
1513 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
1515 goto fail_bad_value;
1517 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
1519 sfc_adapter_lock(sa);
1521 if (sa->state == SFC_ADAPTER_STARTED) {
1522 rc = sfc_flow_filter_insert(sa, flow);
1524 rte_flow_error_set(error, rc,
1525 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1526 "Failed to insert filter");
1527 goto fail_filter_insert;
1531 sfc_adapter_unlock(sa);
1536 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1540 sfc_adapter_unlock(sa);
1547 sfc_flow_remove(struct sfc_adapter *sa,
1548 struct rte_flow *flow,
1549 struct rte_flow_error *error)
1553 SFC_ASSERT(sfc_adapter_is_locked(sa));
1555 if (sa->state == SFC_ADAPTER_STARTED) {
1556 rc = sfc_flow_filter_remove(sa, flow);
1558 rte_flow_error_set(error, rc,
1559 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1560 "Failed to destroy flow rule");
1563 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1570 sfc_flow_destroy(struct rte_eth_dev *dev,
1571 struct rte_flow *flow,
1572 struct rte_flow_error *error)
1574 struct sfc_adapter *sa = dev->data->dev_private;
1575 struct rte_flow *flow_ptr;
1578 sfc_adapter_lock(sa);
1580 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
1581 if (flow_ptr == flow)
1585 rte_flow_error_set(error, rc,
1586 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1587 "Failed to find flow rule to destroy");
1588 goto fail_bad_value;
1591 rc = sfc_flow_remove(sa, flow, error);
1594 sfc_adapter_unlock(sa);
1600 sfc_flow_flush(struct rte_eth_dev *dev,
1601 struct rte_flow_error *error)
1603 struct sfc_adapter *sa = dev->data->dev_private;
1604 struct rte_flow *flow;
1608 sfc_adapter_lock(sa);
1610 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1611 rc = sfc_flow_remove(sa, flow, error);
1616 sfc_adapter_unlock(sa);
1622 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
1623 struct rte_flow_error *error)
1625 struct sfc_adapter *sa = dev->data->dev_private;
1626 struct sfc_port *port = &sa->port;
1629 sfc_adapter_lock(sa);
1630 if (sa->state != SFC_ADAPTER_INITIALIZED) {
1631 rte_flow_error_set(error, EBUSY,
1632 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1633 NULL, "please close the port first");
1636 port->isolated = (enable) ? B_TRUE : B_FALSE;
1638 sfc_adapter_unlock(sa);
1643 const struct rte_flow_ops sfc_flow_ops = {
1644 .validate = sfc_flow_validate,
1645 .create = sfc_flow_create,
1646 .destroy = sfc_flow_destroy,
1647 .flush = sfc_flow_flush,
1649 .isolate = sfc_flow_isolate,
1653 sfc_flow_init(struct sfc_adapter *sa)
1655 SFC_ASSERT(sfc_adapter_is_locked(sa));
1657 TAILQ_INIT(&sa->filter.flow_list);
1661 sfc_flow_fini(struct sfc_adapter *sa)
1663 struct rte_flow *flow;
1665 SFC_ASSERT(sfc_adapter_is_locked(sa));
1667 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1668 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1674 sfc_flow_stop(struct sfc_adapter *sa)
1676 struct rte_flow *flow;
1678 SFC_ASSERT(sfc_adapter_is_locked(sa));
1680 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
1681 sfc_flow_filter_remove(sa, flow);
1685 sfc_flow_start(struct sfc_adapter *sa)
1687 struct rte_flow *flow;
1690 sfc_log_init(sa, "entry");
1692 SFC_ASSERT(sfc_adapter_is_locked(sa));
1694 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
1695 rc = sfc_flow_filter_insert(sa, flow);
1700 sfc_log_init(sa, "done");