1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2017-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_tailq.h>
11 #include <rte_common.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_eth_ctrl.h>
14 #include <rte_ether.h>
16 #include <rte_flow_driver.h>
22 #include "sfc_filter.h"
27 * At now flow API is implemented in such a manner that each
28 * flow rule is converted to one or more hardware filters.
29 * All elements of flow rule (attributes, pattern items, actions)
30 * correspond to one or more fields in the efx_filter_spec_s structure
31 * that is responsible for the hardware filter.
32 * If some required field is unset in the flow rule, then a handful
33 * of filter copies will be created to cover all possible values
37 enum sfc_flow_item_layers {
38 SFC_FLOW_ITEM_ANY_LAYER,
39 SFC_FLOW_ITEM_START_LAYER,
45 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
46 efx_filter_spec_t *spec,
47 struct rte_flow_error *error);
49 struct sfc_flow_item {
50 enum rte_flow_item_type type; /* Type of item */
51 enum sfc_flow_item_layers layer; /* Layer of item */
52 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
53 sfc_flow_item_parse *parse; /* Parsing function */
56 static sfc_flow_item_parse sfc_flow_parse_void;
57 static sfc_flow_item_parse sfc_flow_parse_eth;
58 static sfc_flow_item_parse sfc_flow_parse_vlan;
59 static sfc_flow_item_parse sfc_flow_parse_ipv4;
60 static sfc_flow_item_parse sfc_flow_parse_ipv6;
61 static sfc_flow_item_parse sfc_flow_parse_tcp;
62 static sfc_flow_item_parse sfc_flow_parse_udp;
63 static sfc_flow_item_parse sfc_flow_parse_vxlan;
64 static sfc_flow_item_parse sfc_flow_parse_geneve;
65 static sfc_flow_item_parse sfc_flow_parse_nvgre;
68 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
73 for (i = 0; i < size; i++)
76 return (sum == 0) ? B_TRUE : B_FALSE;
80 * Validate item and prepare structures spec and mask for parsing
83 sfc_flow_parse_init(const struct rte_flow_item *item,
84 const void **spec_ptr,
85 const void **mask_ptr,
86 const void *supp_mask,
89 struct rte_flow_error *error)
99 rte_flow_error_set(error, EINVAL,
100 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
105 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
106 rte_flow_error_set(error, EINVAL,
107 RTE_FLOW_ERROR_TYPE_ITEM, item,
108 "Mask or last is set without spec");
113 * If "mask" is not set, default mask is used,
114 * but if default mask is NULL, "mask" should be set
116 if (item->mask == NULL) {
117 if (def_mask == NULL) {
118 rte_flow_error_set(error, EINVAL,
119 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
120 "Mask should be specified");
136 * If field values in "last" are either 0 or equal to the corresponding
137 * values in "spec" then they are ignored
140 !sfc_flow_is_zero(last, size) &&
141 memcmp(last, spec, size) != 0) {
142 rte_flow_error_set(error, ENOTSUP,
143 RTE_FLOW_ERROR_TYPE_ITEM, item,
144 "Ranging is not supported");
148 if (supp_mask == NULL) {
149 rte_flow_error_set(error, EINVAL,
150 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
151 "Supported mask for item should be specified");
155 /* Check that mask and spec not asks for more match than supp_mask */
156 for (i = 0; i < size; i++) {
157 match = spec[i] | mask[i];
158 supp = ((const uint8_t *)supp_mask)[i];
160 if ((match | supp) != supp) {
161 rte_flow_error_set(error, ENOTSUP,
162 RTE_FLOW_ERROR_TYPE_ITEM, item,
163 "Item's field is not supported");
176 * Masking is not supported, so masks in items should be either
177 * full or empty (zeroed) and set only for supported fields which
178 * are specified in the supp_mask.
182 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
183 __rte_unused efx_filter_spec_t *efx_spec,
184 __rte_unused struct rte_flow_error *error)
190 * Convert Ethernet item to EFX filter specification.
193 * Item specification. Outer frame specification may only comprise
194 * source/destination addresses and Ethertype field.
195 * Inner frame specification may contain destination address only.
196 * There is support for individual/group mask as well as for empty and full.
197 * If the mask is NULL, default mask will be used. Ranging is not supported.
198 * @param efx_spec[in, out]
199 * EFX filter specification to update.
201 * Perform verbose error reporting if not NULL.
204 sfc_flow_parse_eth(const struct rte_flow_item *item,
205 efx_filter_spec_t *efx_spec,
206 struct rte_flow_error *error)
209 const struct rte_flow_item_eth *spec = NULL;
210 const struct rte_flow_item_eth *mask = NULL;
211 const struct rte_flow_item_eth supp_mask = {
212 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
213 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
216 const struct rte_flow_item_eth ifrm_supp_mask = {
217 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
219 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
220 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
222 const struct rte_flow_item_eth *supp_mask_p;
223 const struct rte_flow_item_eth *def_mask_p;
224 uint8_t *loc_mac = NULL;
225 boolean_t is_ifrm = (efx_spec->efs_encap_type !=
226 EFX_TUNNEL_PROTOCOL_NONE);
229 supp_mask_p = &ifrm_supp_mask;
230 def_mask_p = &ifrm_supp_mask;
231 loc_mac = efx_spec->efs_ifrm_loc_mac;
233 supp_mask_p = &supp_mask;
234 def_mask_p = &rte_flow_item_eth_mask;
235 loc_mac = efx_spec->efs_loc_mac;
238 rc = sfc_flow_parse_init(item,
239 (const void **)&spec,
240 (const void **)&mask,
241 supp_mask_p, def_mask_p,
242 sizeof(struct rte_flow_item_eth),
248 * If "spec" is not set, could be any Ethernet, but for the inner frame
249 * type of destination MAC must be set
253 goto fail_bad_ifrm_dst_mac;
258 if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
259 efx_spec->efs_match_flags |= is_ifrm ?
260 EFX_FILTER_MATCH_IFRM_LOC_MAC :
261 EFX_FILTER_MATCH_LOC_MAC;
262 rte_memcpy(loc_mac, spec->dst.addr_bytes,
264 } else if (memcmp(mask->dst.addr_bytes, ig_mask,
265 EFX_MAC_ADDR_LEN) == 0) {
266 if (is_unicast_ether_addr(&spec->dst))
267 efx_spec->efs_match_flags |= is_ifrm ?
268 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
269 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
271 efx_spec->efs_match_flags |= is_ifrm ?
272 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
273 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
274 } else if (!is_zero_ether_addr(&mask->dst)) {
276 } else if (is_ifrm) {
277 goto fail_bad_ifrm_dst_mac;
281 * ifrm_supp_mask ensures that the source address and
282 * ethertype masks are equal to zero in inner frame,
283 * so these fields are filled in only for the outer frame
285 if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
286 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
287 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
289 } else if (!is_zero_ether_addr(&mask->src)) {
294 * Ether type is in big-endian byte order in item and
295 * in little-endian in efx_spec, so byte swap is used
297 if (mask->type == supp_mask.type) {
298 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
299 efx_spec->efs_ether_type = rte_bswap16(spec->type);
300 } else if (mask->type != 0) {
307 rte_flow_error_set(error, EINVAL,
308 RTE_FLOW_ERROR_TYPE_ITEM, item,
309 "Bad mask in the ETH pattern item");
312 fail_bad_ifrm_dst_mac:
313 rte_flow_error_set(error, EINVAL,
314 RTE_FLOW_ERROR_TYPE_ITEM, item,
315 "Type of destination MAC address in inner frame "
321 * Convert VLAN item to EFX filter specification.
324 * Item specification. Only VID field is supported.
325 * The mask can not be NULL. Ranging is not supported.
326 * @param efx_spec[in, out]
327 * EFX filter specification to update.
329 * Perform verbose error reporting if not NULL.
332 sfc_flow_parse_vlan(const struct rte_flow_item *item,
333 efx_filter_spec_t *efx_spec,
334 struct rte_flow_error *error)
338 const struct rte_flow_item_vlan *spec = NULL;
339 const struct rte_flow_item_vlan *mask = NULL;
340 const struct rte_flow_item_vlan supp_mask = {
341 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
344 rc = sfc_flow_parse_init(item,
345 (const void **)&spec,
346 (const void **)&mask,
349 sizeof(struct rte_flow_item_vlan),
355 * VID is in big-endian byte order in item and
356 * in little-endian in efx_spec, so byte swap is used.
357 * If two VLAN items are included, the first matches
358 * the outer tag and the next matches the inner tag.
360 if (mask->tci == supp_mask.tci) {
361 vid = rte_bswap16(spec->tci);
363 if (!(efx_spec->efs_match_flags &
364 EFX_FILTER_MATCH_OUTER_VID)) {
365 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
366 efx_spec->efs_outer_vid = vid;
367 } else if (!(efx_spec->efs_match_flags &
368 EFX_FILTER_MATCH_INNER_VID)) {
369 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
370 efx_spec->efs_inner_vid = vid;
372 rte_flow_error_set(error, EINVAL,
373 RTE_FLOW_ERROR_TYPE_ITEM, item,
374 "More than two VLAN items");
378 rte_flow_error_set(error, EINVAL,
379 RTE_FLOW_ERROR_TYPE_ITEM, item,
380 "VLAN ID in TCI match is required");
388 * Convert IPv4 item to EFX filter specification.
391 * Item specification. Only source and destination addresses and
392 * protocol fields are supported. If the mask is NULL, default
393 * mask will be used. Ranging is not supported.
394 * @param efx_spec[in, out]
395 * EFX filter specification to update.
397 * Perform verbose error reporting if not NULL.
400 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
401 efx_filter_spec_t *efx_spec,
402 struct rte_flow_error *error)
405 const struct rte_flow_item_ipv4 *spec = NULL;
406 const struct rte_flow_item_ipv4 *mask = NULL;
407 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
408 const struct rte_flow_item_ipv4 supp_mask = {
410 .src_addr = 0xffffffff,
411 .dst_addr = 0xffffffff,
412 .next_proto_id = 0xff,
416 rc = sfc_flow_parse_init(item,
417 (const void **)&spec,
418 (const void **)&mask,
420 &rte_flow_item_ipv4_mask,
421 sizeof(struct rte_flow_item_ipv4),
427 * Filtering by IPv4 source and destination addresses requires
428 * the appropriate ETHER_TYPE in hardware filters
430 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
431 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
432 efx_spec->efs_ether_type = ether_type_ipv4;
433 } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
434 rte_flow_error_set(error, EINVAL,
435 RTE_FLOW_ERROR_TYPE_ITEM, item,
436 "Ethertype in pattern with IPV4 item should be appropriate");
444 * IPv4 addresses are in big-endian byte order in item and in
447 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
448 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
449 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
450 } else if (mask->hdr.src_addr != 0) {
454 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
455 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
456 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
457 } else if (mask->hdr.dst_addr != 0) {
461 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
462 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
463 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
464 } else if (mask->hdr.next_proto_id != 0) {
471 rte_flow_error_set(error, EINVAL,
472 RTE_FLOW_ERROR_TYPE_ITEM, item,
473 "Bad mask in the IPV4 pattern item");
478 * Convert IPv6 item to EFX filter specification.
481 * Item specification. Only source and destination addresses and
482 * next header fields are supported. If the mask is NULL, default
483 * mask will be used. Ranging is not supported.
484 * @param efx_spec[in, out]
485 * EFX filter specification to update.
487 * Perform verbose error reporting if not NULL.
490 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
491 efx_filter_spec_t *efx_spec,
492 struct rte_flow_error *error)
495 const struct rte_flow_item_ipv6 *spec = NULL;
496 const struct rte_flow_item_ipv6 *mask = NULL;
497 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
498 const struct rte_flow_item_ipv6 supp_mask = {
500 .src_addr = { 0xff, 0xff, 0xff, 0xff,
501 0xff, 0xff, 0xff, 0xff,
502 0xff, 0xff, 0xff, 0xff,
503 0xff, 0xff, 0xff, 0xff },
504 .dst_addr = { 0xff, 0xff, 0xff, 0xff,
505 0xff, 0xff, 0xff, 0xff,
506 0xff, 0xff, 0xff, 0xff,
507 0xff, 0xff, 0xff, 0xff },
512 rc = sfc_flow_parse_init(item,
513 (const void **)&spec,
514 (const void **)&mask,
516 &rte_flow_item_ipv6_mask,
517 sizeof(struct rte_flow_item_ipv6),
523 * Filtering by IPv6 source and destination addresses requires
524 * the appropriate ETHER_TYPE in hardware filters
526 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
527 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
528 efx_spec->efs_ether_type = ether_type_ipv6;
529 } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
530 rte_flow_error_set(error, EINVAL,
531 RTE_FLOW_ERROR_TYPE_ITEM, item,
532 "Ethertype in pattern with IPV6 item should be appropriate");
540 * IPv6 addresses are in big-endian byte order in item and in
543 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
544 sizeof(mask->hdr.src_addr)) == 0) {
545 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
547 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
548 sizeof(spec->hdr.src_addr));
549 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
550 sizeof(efx_spec->efs_rem_host));
551 } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
552 sizeof(mask->hdr.src_addr))) {
556 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
557 sizeof(mask->hdr.dst_addr)) == 0) {
558 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
560 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
561 sizeof(spec->hdr.dst_addr));
562 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
563 sizeof(efx_spec->efs_loc_host));
564 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
565 sizeof(mask->hdr.dst_addr))) {
569 if (mask->hdr.proto == supp_mask.hdr.proto) {
570 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
571 efx_spec->efs_ip_proto = spec->hdr.proto;
572 } else if (mask->hdr.proto != 0) {
579 rte_flow_error_set(error, EINVAL,
580 RTE_FLOW_ERROR_TYPE_ITEM, item,
581 "Bad mask in the IPV6 pattern item");
586 * Convert TCP item to EFX filter specification.
589 * Item specification. Only source and destination ports fields
590 * are supported. If the mask is NULL, default mask will be used.
591 * Ranging is not supported.
592 * @param efx_spec[in, out]
593 * EFX filter specification to update.
595 * Perform verbose error reporting if not NULL.
598 sfc_flow_parse_tcp(const struct rte_flow_item *item,
599 efx_filter_spec_t *efx_spec,
600 struct rte_flow_error *error)
603 const struct rte_flow_item_tcp *spec = NULL;
604 const struct rte_flow_item_tcp *mask = NULL;
605 const struct rte_flow_item_tcp supp_mask = {
612 rc = sfc_flow_parse_init(item,
613 (const void **)&spec,
614 (const void **)&mask,
616 &rte_flow_item_tcp_mask,
617 sizeof(struct rte_flow_item_tcp),
623 * Filtering by TCP source and destination ports requires
624 * the appropriate IP_PROTO in hardware filters
626 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
627 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
628 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
629 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
630 rte_flow_error_set(error, EINVAL,
631 RTE_FLOW_ERROR_TYPE_ITEM, item,
632 "IP proto in pattern with TCP item should be appropriate");
640 * Source and destination ports are in big-endian byte order in item and
641 * in little-endian in efx_spec, so byte swap is used
643 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
644 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
645 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
646 } else if (mask->hdr.src_port != 0) {
650 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
651 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
652 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
653 } else if (mask->hdr.dst_port != 0) {
660 rte_flow_error_set(error, EINVAL,
661 RTE_FLOW_ERROR_TYPE_ITEM, item,
662 "Bad mask in the TCP pattern item");
667 * Convert UDP item to EFX filter specification.
670 * Item specification. Only source and destination ports fields
671 * are supported. If the mask is NULL, default mask will be used.
672 * Ranging is not supported.
673 * @param efx_spec[in, out]
674 * EFX filter specification to update.
676 * Perform verbose error reporting if not NULL.
679 sfc_flow_parse_udp(const struct rte_flow_item *item,
680 efx_filter_spec_t *efx_spec,
681 struct rte_flow_error *error)
684 const struct rte_flow_item_udp *spec = NULL;
685 const struct rte_flow_item_udp *mask = NULL;
686 const struct rte_flow_item_udp supp_mask = {
693 rc = sfc_flow_parse_init(item,
694 (const void **)&spec,
695 (const void **)&mask,
697 &rte_flow_item_udp_mask,
698 sizeof(struct rte_flow_item_udp),
704 * Filtering by UDP source and destination ports requires
705 * the appropriate IP_PROTO in hardware filters
707 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
708 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
709 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
710 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
711 rte_flow_error_set(error, EINVAL,
712 RTE_FLOW_ERROR_TYPE_ITEM, item,
713 "IP proto in pattern with UDP item should be appropriate");
721 * Source and destination ports are in big-endian byte order in item and
722 * in little-endian in efx_spec, so byte swap is used
724 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
725 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
726 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
727 } else if (mask->hdr.src_port != 0) {
731 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
732 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
733 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
734 } else if (mask->hdr.dst_port != 0) {
741 rte_flow_error_set(error, EINVAL,
742 RTE_FLOW_ERROR_TYPE_ITEM, item,
743 "Bad mask in the UDP pattern item");
748 * Filters for encapsulated packets match based on the EtherType and IP
749 * protocol in the outer frame.
752 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
753 efx_filter_spec_t *efx_spec,
755 struct rte_flow_error *error)
757 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
758 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
759 efx_spec->efs_ip_proto = ip_proto;
760 } else if (efx_spec->efs_ip_proto != ip_proto) {
762 case EFX_IPPROTO_UDP:
763 rte_flow_error_set(error, EINVAL,
764 RTE_FLOW_ERROR_TYPE_ITEM, item,
765 "Outer IP header protocol must be UDP "
766 "in VxLAN/GENEVE pattern");
769 case EFX_IPPROTO_GRE:
770 rte_flow_error_set(error, EINVAL,
771 RTE_FLOW_ERROR_TYPE_ITEM, item,
772 "Outer IP header protocol must be GRE "
777 rte_flow_error_set(error, EINVAL,
778 RTE_FLOW_ERROR_TYPE_ITEM, item,
779 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
785 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
786 rte_flow_error_set(error, EINVAL,
787 RTE_FLOW_ERROR_TYPE_ITEM, item,
788 "Outer frame EtherType in pattern with tunneling "
791 } else if (efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
792 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
793 rte_flow_error_set(error, EINVAL,
794 RTE_FLOW_ERROR_TYPE_ITEM, item,
795 "Outer frame EtherType in pattern with tunneling "
796 "must be IPv4 or IPv6");
804 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
805 const uint8_t *vni_or_vsid_val,
806 const uint8_t *vni_or_vsid_mask,
807 const struct rte_flow_item *item,
808 struct rte_flow_error *error)
810 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
814 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
815 EFX_VNI_OR_VSID_LEN) == 0) {
816 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
817 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
818 EFX_VNI_OR_VSID_LEN);
819 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
820 rte_flow_error_set(error, EINVAL,
821 RTE_FLOW_ERROR_TYPE_ITEM, item,
822 "Unsupported VNI/VSID mask");
830 * Convert VXLAN item to EFX filter specification.
833 * Item specification. Only VXLAN network identifier field is supported.
834 * If the mask is NULL, default mask will be used.
835 * Ranging is not supported.
836 * @param efx_spec[in, out]
837 * EFX filter specification to update.
839 * Perform verbose error reporting if not NULL.
842 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
843 efx_filter_spec_t *efx_spec,
844 struct rte_flow_error *error)
847 const struct rte_flow_item_vxlan *spec = NULL;
848 const struct rte_flow_item_vxlan *mask = NULL;
849 const struct rte_flow_item_vxlan supp_mask = {
850 .vni = { 0xff, 0xff, 0xff }
853 rc = sfc_flow_parse_init(item,
854 (const void **)&spec,
855 (const void **)&mask,
857 &rte_flow_item_vxlan_mask,
858 sizeof(struct rte_flow_item_vxlan),
863 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
864 EFX_IPPROTO_UDP, error);
868 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
869 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
874 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
875 mask->vni, item, error);
881 * Convert GENEVE item to EFX filter specification.
884 * Item specification. Only Virtual Network Identifier and protocol type
885 * fields are supported. But protocol type can be only Ethernet (0x6558).
886 * If the mask is NULL, default mask will be used.
887 * Ranging is not supported.
888 * @param efx_spec[in, out]
889 * EFX filter specification to update.
891 * Perform verbose error reporting if not NULL.
894 sfc_flow_parse_geneve(const struct rte_flow_item *item,
895 efx_filter_spec_t *efx_spec,
896 struct rte_flow_error *error)
899 const struct rte_flow_item_geneve *spec = NULL;
900 const struct rte_flow_item_geneve *mask = NULL;
901 const struct rte_flow_item_geneve supp_mask = {
902 .protocol = RTE_BE16(0xffff),
903 .vni = { 0xff, 0xff, 0xff }
906 rc = sfc_flow_parse_init(item,
907 (const void **)&spec,
908 (const void **)&mask,
910 &rte_flow_item_geneve_mask,
911 sizeof(struct rte_flow_item_geneve),
916 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
917 EFX_IPPROTO_UDP, error);
921 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
922 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
927 if (mask->protocol == supp_mask.protocol) {
928 if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
929 rte_flow_error_set(error, EINVAL,
930 RTE_FLOW_ERROR_TYPE_ITEM, item,
931 "GENEVE encap. protocol must be Ethernet "
932 "(0x6558) in the GENEVE pattern item");
935 } else if (mask->protocol != 0) {
936 rte_flow_error_set(error, EINVAL,
937 RTE_FLOW_ERROR_TYPE_ITEM, item,
938 "Unsupported mask for GENEVE encap. protocol");
942 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
943 mask->vni, item, error);
949 * Convert NVGRE item to EFX filter specification.
952 * Item specification. Only virtual subnet ID field is supported.
953 * If the mask is NULL, default mask will be used.
954 * Ranging is not supported.
955 * @param efx_spec[in, out]
956 * EFX filter specification to update.
958 * Perform verbose error reporting if not NULL.
961 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
962 efx_filter_spec_t *efx_spec,
963 struct rte_flow_error *error)
966 const struct rte_flow_item_nvgre *spec = NULL;
967 const struct rte_flow_item_nvgre *mask = NULL;
968 const struct rte_flow_item_nvgre supp_mask = {
969 .tni = { 0xff, 0xff, 0xff }
972 rc = sfc_flow_parse_init(item,
973 (const void **)&spec,
974 (const void **)&mask,
976 &rte_flow_item_nvgre_mask,
977 sizeof(struct rte_flow_item_nvgre),
982 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
983 EFX_IPPROTO_GRE, error);
987 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
988 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
993 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
994 mask->tni, item, error);
999 static const struct sfc_flow_item sfc_flow_items[] = {
1001 .type = RTE_FLOW_ITEM_TYPE_VOID,
1002 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1003 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1004 .parse = sfc_flow_parse_void,
1007 .type = RTE_FLOW_ITEM_TYPE_ETH,
1008 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1009 .layer = SFC_FLOW_ITEM_L2,
1010 .parse = sfc_flow_parse_eth,
1013 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1014 .prev_layer = SFC_FLOW_ITEM_L2,
1015 .layer = SFC_FLOW_ITEM_L2,
1016 .parse = sfc_flow_parse_vlan,
1019 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1020 .prev_layer = SFC_FLOW_ITEM_L2,
1021 .layer = SFC_FLOW_ITEM_L3,
1022 .parse = sfc_flow_parse_ipv4,
1025 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1026 .prev_layer = SFC_FLOW_ITEM_L2,
1027 .layer = SFC_FLOW_ITEM_L3,
1028 .parse = sfc_flow_parse_ipv6,
1031 .type = RTE_FLOW_ITEM_TYPE_TCP,
1032 .prev_layer = SFC_FLOW_ITEM_L3,
1033 .layer = SFC_FLOW_ITEM_L4,
1034 .parse = sfc_flow_parse_tcp,
1037 .type = RTE_FLOW_ITEM_TYPE_UDP,
1038 .prev_layer = SFC_FLOW_ITEM_L3,
1039 .layer = SFC_FLOW_ITEM_L4,
1040 .parse = sfc_flow_parse_udp,
1043 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1044 .prev_layer = SFC_FLOW_ITEM_L4,
1045 .layer = SFC_FLOW_ITEM_START_LAYER,
1046 .parse = sfc_flow_parse_vxlan,
1049 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1050 .prev_layer = SFC_FLOW_ITEM_L4,
1051 .layer = SFC_FLOW_ITEM_START_LAYER,
1052 .parse = sfc_flow_parse_geneve,
1055 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1056 .prev_layer = SFC_FLOW_ITEM_L3,
1057 .layer = SFC_FLOW_ITEM_START_LAYER,
1058 .parse = sfc_flow_parse_nvgre,
1063 * Protocol-independent flow API support
1066 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1067 struct rte_flow *flow,
1068 struct rte_flow_error *error)
1071 rte_flow_error_set(error, EINVAL,
1072 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1076 if (attr->group != 0) {
1077 rte_flow_error_set(error, ENOTSUP,
1078 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1079 "Groups are not supported");
1082 if (attr->priority != 0) {
1083 rte_flow_error_set(error, ENOTSUP,
1084 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1085 "Priorities are not supported");
1088 if (attr->egress != 0) {
1089 rte_flow_error_set(error, ENOTSUP,
1090 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1091 "Egress is not supported");
1094 if (attr->ingress == 0) {
1095 rte_flow_error_set(error, ENOTSUP,
1096 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1097 "Only ingress is supported");
1101 flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
1102 flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1107 /* Get item from array sfc_flow_items */
1108 static const struct sfc_flow_item *
1109 sfc_flow_get_item(enum rte_flow_item_type type)
1113 for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1114 if (sfc_flow_items[i].type == type)
1115 return &sfc_flow_items[i];
1121 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1122 struct rte_flow *flow,
1123 struct rte_flow_error *error)
1126 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1127 boolean_t is_ifrm = B_FALSE;
1128 const struct sfc_flow_item *item;
1130 if (pattern == NULL) {
1131 rte_flow_error_set(error, EINVAL,
1132 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1137 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1138 item = sfc_flow_get_item(pattern->type);
1140 rte_flow_error_set(error, ENOTSUP,
1141 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1142 "Unsupported pattern item");
1147 * Omitting one or several protocol layers at the beginning
1148 * of pattern is supported
1150 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1151 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1152 item->prev_layer != prev_layer) {
1153 rte_flow_error_set(error, ENOTSUP,
1154 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1155 "Unexpected sequence of pattern items");
1160 * Allow only VOID and ETH pattern items in the inner frame.
1161 * Also check that there is only one tunneling protocol.
1163 switch (item->type) {
1164 case RTE_FLOW_ITEM_TYPE_VOID:
1165 case RTE_FLOW_ITEM_TYPE_ETH:
1168 case RTE_FLOW_ITEM_TYPE_VXLAN:
1169 case RTE_FLOW_ITEM_TYPE_GENEVE:
1170 case RTE_FLOW_ITEM_TYPE_NVGRE:
1172 rte_flow_error_set(error, EINVAL,
1173 RTE_FLOW_ERROR_TYPE_ITEM,
1175 "More than one tunneling protocol");
1183 rte_flow_error_set(error, EINVAL,
1184 RTE_FLOW_ERROR_TYPE_ITEM,
1186 "There is an unsupported pattern item "
1187 "in the inner frame");
1193 rc = item->parse(pattern, &flow->spec.template, error);
1197 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1198 prev_layer = item->layer;
1205 sfc_flow_parse_queue(struct sfc_adapter *sa,
1206 const struct rte_flow_action_queue *queue,
1207 struct rte_flow *flow)
1209 struct sfc_rxq *rxq;
1211 if (queue->index >= sa->rxq_count)
1214 rxq = sa->rxq_info[queue->index].rxq;
1215 flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1220 #if EFSYS_OPT_RX_SCALE
1222 sfc_flow_parse_rss(struct sfc_adapter *sa,
1223 const struct rte_flow_action_rss *rss,
1224 struct rte_flow *flow)
1226 unsigned int rxq_sw_index;
1227 struct sfc_rxq *rxq;
1228 unsigned int rxq_hw_index_min;
1229 unsigned int rxq_hw_index_max;
1230 const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
1232 uint8_t *rss_key = NULL;
1233 struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
1239 rxq_sw_index = sa->rxq_count - 1;
1240 rxq = sa->rxq_info[rxq_sw_index].rxq;
1241 rxq_hw_index_min = rxq->hw_index;
1242 rxq_hw_index_max = 0;
1244 for (i = 0; i < rss->num; ++i) {
1245 rxq_sw_index = rss->queue[i];
1247 if (rxq_sw_index >= sa->rxq_count)
1250 rxq = sa->rxq_info[rxq_sw_index].rxq;
1252 if (rxq->hw_index < rxq_hw_index_min)
1253 rxq_hw_index_min = rxq->hw_index;
1255 if (rxq->hw_index > rxq_hw_index_max)
1256 rxq_hw_index_max = rxq->hw_index;
1259 rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
1260 if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
1263 if (rss_conf != NULL) {
1264 if (rss_conf->rss_key_len != sizeof(sa->rss_key))
1267 rss_key = rss_conf->rss_key;
1269 rss_key = sa->rss_key;
1274 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1275 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1276 sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
1277 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
1279 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1280 unsigned int rxq_sw_index = rss->queue[i % rss->num];
1281 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
1283 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1288 #endif /* EFSYS_OPT_RX_SCALE */
1291 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1292 unsigned int filters_count)
1297 for (i = 0; i < filters_count; i++) {
1300 rc = efx_filter_remove(sa->nic, &spec->filters[i]);
1301 if (ret == 0 && rc != 0) {
1302 sfc_err(sa, "failed to remove filter specification "
1312 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1317 for (i = 0; i < spec->count; i++) {
1318 rc = efx_filter_insert(sa->nic, &spec->filters[i]);
1320 sfc_flow_spec_flush(sa, spec, i);
1329 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1331 return sfc_flow_spec_flush(sa, spec, spec->count);
1335 sfc_flow_filter_insert(struct sfc_adapter *sa,
1336 struct rte_flow *flow)
1338 #if EFSYS_OPT_RX_SCALE
1339 struct sfc_flow_rss *rss = &flow->rss_conf;
1340 uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1345 unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
1346 rss->rxq_hw_index_min + 1,
1349 rc = efx_rx_scale_context_alloc(sa->nic,
1350 EFX_RX_SCALE_EXCLUSIVE,
1354 goto fail_scale_context_alloc;
1356 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1357 EFX_RX_HASHALG_TOEPLITZ,
1358 rss->rss_hash_types, B_TRUE);
1360 goto fail_scale_mode_set;
1362 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1364 sizeof(sa->rss_key));
1366 goto fail_scale_key_set;
1369 * At this point, fully elaborated filter specifications
1370 * have been produced from the template. To make sure that
1371 * RSS behaviour is consistent between them, set the same
1372 * RSS context value everywhere.
1374 for (i = 0; i < flow->spec.count; i++) {
1375 efx_filter_spec_t *spec = &flow->spec.filters[i];
1377 spec->efs_rss_context = efs_rss_context;
1378 spec->efs_dmaq_id = rss->rxq_hw_index_min;
1379 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1383 rc = sfc_flow_spec_insert(sa, &flow->spec);
1385 goto fail_filter_insert;
1389 * Scale table is set after filter insertion because
1390 * the table entries are relative to the base RxQ ID
1391 * and the latter is submitted to the HW by means of
1392 * inserting a filter, so by the time of the request
1393 * the HW knows all the information needed to verify
1394 * the table entries, and the operation will succeed
1396 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1397 rss->rss_tbl, RTE_DIM(rss->rss_tbl));
1399 goto fail_scale_tbl_set;
1405 sfc_flow_spec_remove(sa, &flow->spec);
1409 fail_scale_mode_set:
1410 if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1411 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1413 fail_scale_context_alloc:
1415 #else /* !EFSYS_OPT_RX_SCALE */
1416 return sfc_flow_spec_insert(sa, &flow->spec);
1417 #endif /* EFSYS_OPT_RX_SCALE */
1421 sfc_flow_filter_remove(struct sfc_adapter *sa,
1422 struct rte_flow *flow)
1426 rc = sfc_flow_spec_remove(sa, &flow->spec);
1430 #if EFSYS_OPT_RX_SCALE
1433 * All specifications for a given flow rule have the same RSS
1434 * context, so that RSS context value is taken from the first
1435 * filter specification
1437 efx_filter_spec_t *spec = &flow->spec.filters[0];
1439 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1441 #endif /* EFSYS_OPT_RX_SCALE */
1447 sfc_flow_parse_actions(struct sfc_adapter *sa,
1448 const struct rte_flow_action actions[],
1449 struct rte_flow *flow,
1450 struct rte_flow_error *error)
1453 boolean_t is_specified = B_FALSE;
1455 if (actions == NULL) {
1456 rte_flow_error_set(error, EINVAL,
1457 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1462 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1463 switch (actions->type) {
1464 case RTE_FLOW_ACTION_TYPE_VOID:
1467 case RTE_FLOW_ACTION_TYPE_QUEUE:
1468 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1470 rte_flow_error_set(error, EINVAL,
1471 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1472 "Bad QUEUE action");
1476 is_specified = B_TRUE;
1479 #if EFSYS_OPT_RX_SCALE
1480 case RTE_FLOW_ACTION_TYPE_RSS:
1481 rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1483 rte_flow_error_set(error, rc,
1484 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1489 is_specified = B_TRUE;
1491 #endif /* EFSYS_OPT_RX_SCALE */
1494 rte_flow_error_set(error, ENOTSUP,
1495 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1496 "Action is not supported");
1501 if (!is_specified) {
1502 rte_flow_error_set(error, EINVAL,
1503 RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
1504 "Action is unspecified");
1512 sfc_flow_parse(struct rte_eth_dev *dev,
1513 const struct rte_flow_attr *attr,
1514 const struct rte_flow_item pattern[],
1515 const struct rte_flow_action actions[],
1516 struct rte_flow *flow,
1517 struct rte_flow_error *error)
1519 struct sfc_adapter *sa = dev->data->dev_private;
1520 efx_filter_match_flags_t match_flags =
1521 flow->spec.template.efs_match_flags;
1524 rc = sfc_flow_parse_attr(attr, flow, error);
1526 goto fail_bad_value;
1528 rc = sfc_flow_parse_pattern(pattern, flow, error);
1530 goto fail_bad_value;
1532 rc = sfc_flow_parse_actions(sa, actions, flow, error);
1534 goto fail_bad_value;
1536 if (!sfc_filter_is_match_supported(sa, match_flags)) {
1537 rte_flow_error_set(error, ENOTSUP,
1538 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1539 "Flow rule pattern is not supported");
1544 * At this point, template specification simply becomes the first
1545 * fully elaborated spec
1547 flow->spec.filters[0] = flow->spec.template;
1548 flow->spec.count = 1;
1555 sfc_flow_validate(struct rte_eth_dev *dev,
1556 const struct rte_flow_attr *attr,
1557 const struct rte_flow_item pattern[],
1558 const struct rte_flow_action actions[],
1559 struct rte_flow_error *error)
1561 struct rte_flow flow;
1563 memset(&flow, 0, sizeof(flow));
1565 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
1568 static struct rte_flow *
1569 sfc_flow_create(struct rte_eth_dev *dev,
1570 const struct rte_flow_attr *attr,
1571 const struct rte_flow_item pattern[],
1572 const struct rte_flow_action actions[],
1573 struct rte_flow_error *error)
1575 struct sfc_adapter *sa = dev->data->dev_private;
1576 struct rte_flow *flow = NULL;
1579 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
1581 rte_flow_error_set(error, ENOMEM,
1582 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1583 "Failed to allocate memory");
1587 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
1589 goto fail_bad_value;
1591 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
1593 sfc_adapter_lock(sa);
1595 if (sa->state == SFC_ADAPTER_STARTED) {
1596 rc = sfc_flow_filter_insert(sa, flow);
1598 rte_flow_error_set(error, rc,
1599 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1600 "Failed to insert filter");
1601 goto fail_filter_insert;
1605 sfc_adapter_unlock(sa);
1610 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1614 sfc_adapter_unlock(sa);
1621 sfc_flow_remove(struct sfc_adapter *sa,
1622 struct rte_flow *flow,
1623 struct rte_flow_error *error)
1627 SFC_ASSERT(sfc_adapter_is_locked(sa));
1629 if (sa->state == SFC_ADAPTER_STARTED) {
1630 rc = sfc_flow_filter_remove(sa, flow);
1632 rte_flow_error_set(error, rc,
1633 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1634 "Failed to destroy flow rule");
1637 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1644 sfc_flow_destroy(struct rte_eth_dev *dev,
1645 struct rte_flow *flow,
1646 struct rte_flow_error *error)
1648 struct sfc_adapter *sa = dev->data->dev_private;
1649 struct rte_flow *flow_ptr;
1652 sfc_adapter_lock(sa);
1654 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
1655 if (flow_ptr == flow)
1659 rte_flow_error_set(error, rc,
1660 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1661 "Failed to find flow rule to destroy");
1662 goto fail_bad_value;
1665 rc = sfc_flow_remove(sa, flow, error);
1668 sfc_adapter_unlock(sa);
1674 sfc_flow_flush(struct rte_eth_dev *dev,
1675 struct rte_flow_error *error)
1677 struct sfc_adapter *sa = dev->data->dev_private;
1678 struct rte_flow *flow;
1682 sfc_adapter_lock(sa);
1684 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1685 rc = sfc_flow_remove(sa, flow, error);
1690 sfc_adapter_unlock(sa);
1696 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
1697 struct rte_flow_error *error)
1699 struct sfc_adapter *sa = dev->data->dev_private;
1700 struct sfc_port *port = &sa->port;
1703 sfc_adapter_lock(sa);
1704 if (sa->state != SFC_ADAPTER_INITIALIZED) {
1705 rte_flow_error_set(error, EBUSY,
1706 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1707 NULL, "please close the port first");
1710 port->isolated = (enable) ? B_TRUE : B_FALSE;
1712 sfc_adapter_unlock(sa);
1717 const struct rte_flow_ops sfc_flow_ops = {
1718 .validate = sfc_flow_validate,
1719 .create = sfc_flow_create,
1720 .destroy = sfc_flow_destroy,
1721 .flush = sfc_flow_flush,
1723 .isolate = sfc_flow_isolate,
1727 sfc_flow_init(struct sfc_adapter *sa)
1729 SFC_ASSERT(sfc_adapter_is_locked(sa));
1731 TAILQ_INIT(&sa->filter.flow_list);
1735 sfc_flow_fini(struct sfc_adapter *sa)
1737 struct rte_flow *flow;
1739 SFC_ASSERT(sfc_adapter_is_locked(sa));
1741 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1742 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1748 sfc_flow_stop(struct sfc_adapter *sa)
1750 struct rte_flow *flow;
1752 SFC_ASSERT(sfc_adapter_is_locked(sa));
1754 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
1755 sfc_flow_filter_remove(sa, flow);
1759 sfc_flow_start(struct sfc_adapter *sa)
1761 struct rte_flow *flow;
1764 sfc_log_init(sa, "entry");
1766 SFC_ASSERT(sfc_adapter_is_locked(sa));
1768 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
1769 rc = sfc_flow_filter_insert(sa, flow);
1774 sfc_log_init(sa, "done");