1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2017-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_ether.h>
16 #include <rte_flow_driver.h>
22 #include "sfc_filter.h"
25 #include "sfc_dp_rx.h"
27 struct sfc_flow_ops_by_spec {
28 sfc_flow_parse_cb_t *parse;
31 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
33 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
34 .parse = sfc_flow_parse_rte_to_filter,
37 static const struct sfc_flow_ops_by_spec *
38 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
40 struct sfc_flow_spec *spec = &flow->spec;
41 const struct sfc_flow_ops_by_spec *ops = NULL;
44 case SFC_FLOW_SPEC_FILTER:
45 ops = &sfc_flow_ops_filter;
56 * Currently, filter-based (VNIC) flow API is implemented in such a manner
57 * that each flow rule is converted to one or more hardware filters.
58 * All elements of flow rule (attributes, pattern items, actions)
59 * correspond to one or more fields in the efx_filter_spec_s structure
60 * that is responsible for the hardware filter.
61 * If some required field is unset in the flow rule, then a handful
62 * of filter copies will be created to cover all possible values
66 enum sfc_flow_item_layers {
67 SFC_FLOW_ITEM_ANY_LAYER,
68 SFC_FLOW_ITEM_START_LAYER,
74 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
75 efx_filter_spec_t *spec,
76 struct rte_flow_error *error);
78 struct sfc_flow_item {
79 enum rte_flow_item_type type; /* Type of item */
80 enum sfc_flow_item_layers layer; /* Layer of item */
81 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
82 sfc_flow_item_parse *parse; /* Parsing function */
85 static sfc_flow_item_parse sfc_flow_parse_void;
86 static sfc_flow_item_parse sfc_flow_parse_eth;
87 static sfc_flow_item_parse sfc_flow_parse_vlan;
88 static sfc_flow_item_parse sfc_flow_parse_ipv4;
89 static sfc_flow_item_parse sfc_flow_parse_ipv6;
90 static sfc_flow_item_parse sfc_flow_parse_tcp;
91 static sfc_flow_item_parse sfc_flow_parse_udp;
92 static sfc_flow_item_parse sfc_flow_parse_vxlan;
93 static sfc_flow_item_parse sfc_flow_parse_geneve;
94 static sfc_flow_item_parse sfc_flow_parse_nvgre;
96 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
97 unsigned int filters_count_for_one_val,
98 struct rte_flow_error *error);
100 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
101 efx_filter_spec_t *spec,
102 struct sfc_filter *filter);
104 struct sfc_flow_copy_flag {
105 /* EFX filter specification match flag */
106 efx_filter_match_flags_t flag;
107 /* Number of values of corresponding field */
108 unsigned int vals_count;
109 /* Function to set values in specifications */
110 sfc_flow_spec_set_vals *set_vals;
112 * Function to check that the specification is suitable
113 * for adding this match flag
115 sfc_flow_spec_check *spec_check;
118 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
119 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
120 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
121 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
122 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
123 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
124 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
127 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
132 for (i = 0; i < size; i++)
135 return (sum == 0) ? B_TRUE : B_FALSE;
139 * Validate item and prepare structures spec and mask for parsing
142 sfc_flow_parse_init(const struct rte_flow_item *item,
143 const void **spec_ptr,
144 const void **mask_ptr,
145 const void *supp_mask,
146 const void *def_mask,
148 struct rte_flow_error *error)
157 rte_flow_error_set(error, EINVAL,
158 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
163 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
164 rte_flow_error_set(error, EINVAL,
165 RTE_FLOW_ERROR_TYPE_ITEM, item,
166 "Mask or last is set without spec");
171 * If "mask" is not set, default mask is used,
172 * but if default mask is NULL, "mask" should be set
174 if (item->mask == NULL) {
175 if (def_mask == NULL) {
176 rte_flow_error_set(error, EINVAL,
177 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
178 "Mask should be specified");
194 * If field values in "last" are either 0 or equal to the corresponding
195 * values in "spec" then they are ignored
198 !sfc_flow_is_zero(last, size) &&
199 memcmp(last, spec, size) != 0) {
200 rte_flow_error_set(error, ENOTSUP,
201 RTE_FLOW_ERROR_TYPE_ITEM, item,
202 "Ranging is not supported");
206 if (supp_mask == NULL) {
207 rte_flow_error_set(error, EINVAL,
208 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
209 "Supported mask for item should be specified");
213 /* Check that mask does not ask for more match than supp_mask */
214 for (i = 0; i < size; i++) {
215 supp = ((const uint8_t *)supp_mask)[i];
217 if (~supp & mask[i]) {
218 rte_flow_error_set(error, ENOTSUP,
219 RTE_FLOW_ERROR_TYPE_ITEM, item,
220 "Item's field is not supported");
233 * Masking is not supported, so masks in items should be either
234 * full or empty (zeroed) and set only for supported fields which
235 * are specified in the supp_mask.
239 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
240 __rte_unused efx_filter_spec_t *efx_spec,
241 __rte_unused struct rte_flow_error *error)
247 * Convert Ethernet item to EFX filter specification.
250 * Item specification. Outer frame specification may only comprise
251 * source/destination addresses and Ethertype field.
252 * Inner frame specification may contain destination address only.
253 * There is support for individual/group mask as well as for empty and full.
254 * If the mask is NULL, default mask will be used. Ranging is not supported.
255 * @param efx_spec[in, out]
256 * EFX filter specification to update.
258 * Perform verbose error reporting if not NULL.
261 sfc_flow_parse_eth(const struct rte_flow_item *item,
262 efx_filter_spec_t *efx_spec,
263 struct rte_flow_error *error)
266 const struct rte_flow_item_eth *spec = NULL;
267 const struct rte_flow_item_eth *mask = NULL;
268 const struct rte_flow_item_eth supp_mask = {
269 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
270 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
273 const struct rte_flow_item_eth ifrm_supp_mask = {
274 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
276 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
277 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
279 const struct rte_flow_item_eth *supp_mask_p;
280 const struct rte_flow_item_eth *def_mask_p;
281 uint8_t *loc_mac = NULL;
282 boolean_t is_ifrm = (efx_spec->efs_encap_type !=
283 EFX_TUNNEL_PROTOCOL_NONE);
286 supp_mask_p = &ifrm_supp_mask;
287 def_mask_p = &ifrm_supp_mask;
288 loc_mac = efx_spec->efs_ifrm_loc_mac;
290 supp_mask_p = &supp_mask;
291 def_mask_p = &rte_flow_item_eth_mask;
292 loc_mac = efx_spec->efs_loc_mac;
295 rc = sfc_flow_parse_init(item,
296 (const void **)&spec,
297 (const void **)&mask,
298 supp_mask_p, def_mask_p,
299 sizeof(struct rte_flow_item_eth),
304 /* If "spec" is not set, could be any Ethernet */
308 if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
309 efx_spec->efs_match_flags |= is_ifrm ?
310 EFX_FILTER_MATCH_IFRM_LOC_MAC :
311 EFX_FILTER_MATCH_LOC_MAC;
312 rte_memcpy(loc_mac, spec->dst.addr_bytes,
314 } else if (memcmp(mask->dst.addr_bytes, ig_mask,
315 EFX_MAC_ADDR_LEN) == 0) {
316 if (rte_is_unicast_ether_addr(&spec->dst))
317 efx_spec->efs_match_flags |= is_ifrm ?
318 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
319 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
321 efx_spec->efs_match_flags |= is_ifrm ?
322 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
323 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
324 } else if (!rte_is_zero_ether_addr(&mask->dst)) {
329 * ifrm_supp_mask ensures that the source address and
330 * ethertype masks are equal to zero in inner frame,
331 * so these fields are filled in only for the outer frame
333 if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
334 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
335 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
337 } else if (!rte_is_zero_ether_addr(&mask->src)) {
342 * Ether type is in big-endian byte order in item and
343 * in little-endian in efx_spec, so byte swap is used
345 if (mask->type == supp_mask.type) {
346 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
347 efx_spec->efs_ether_type = rte_bswap16(spec->type);
348 } else if (mask->type != 0) {
355 rte_flow_error_set(error, EINVAL,
356 RTE_FLOW_ERROR_TYPE_ITEM, item,
357 "Bad mask in the ETH pattern item");
362 * Convert VLAN item to EFX filter specification.
365 * Item specification. Only VID field is supported.
366 * The mask can not be NULL. Ranging is not supported.
367 * @param efx_spec[in, out]
368 * EFX filter specification to update.
370 * Perform verbose error reporting if not NULL.
373 sfc_flow_parse_vlan(const struct rte_flow_item *item,
374 efx_filter_spec_t *efx_spec,
375 struct rte_flow_error *error)
379 const struct rte_flow_item_vlan *spec = NULL;
380 const struct rte_flow_item_vlan *mask = NULL;
381 const struct rte_flow_item_vlan supp_mask = {
382 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
383 .inner_type = RTE_BE16(0xffff),
386 rc = sfc_flow_parse_init(item,
387 (const void **)&spec,
388 (const void **)&mask,
391 sizeof(struct rte_flow_item_vlan),
397 * VID is in big-endian byte order in item and
398 * in little-endian in efx_spec, so byte swap is used.
399 * If two VLAN items are included, the first matches
400 * the outer tag and the next matches the inner tag.
402 if (mask->tci == supp_mask.tci) {
403 /* Apply mask to keep VID only */
404 vid = rte_bswap16(spec->tci & mask->tci);
406 if (!(efx_spec->efs_match_flags &
407 EFX_FILTER_MATCH_OUTER_VID)) {
408 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
409 efx_spec->efs_outer_vid = vid;
410 } else if (!(efx_spec->efs_match_flags &
411 EFX_FILTER_MATCH_INNER_VID)) {
412 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
413 efx_spec->efs_inner_vid = vid;
415 rte_flow_error_set(error, EINVAL,
416 RTE_FLOW_ERROR_TYPE_ITEM, item,
417 "More than two VLAN items");
421 rte_flow_error_set(error, EINVAL,
422 RTE_FLOW_ERROR_TYPE_ITEM, item,
423 "VLAN ID in TCI match is required");
427 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
428 rte_flow_error_set(error, EINVAL,
429 RTE_FLOW_ERROR_TYPE_ITEM, item,
430 "VLAN TPID matching is not supported");
433 if (mask->inner_type == supp_mask.inner_type) {
434 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
435 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
436 } else if (mask->inner_type) {
437 rte_flow_error_set(error, EINVAL,
438 RTE_FLOW_ERROR_TYPE_ITEM, item,
439 "Bad mask for VLAN inner_type");
447 * Convert IPv4 item to EFX filter specification.
450 * Item specification. Only source and destination addresses and
451 * protocol fields are supported. If the mask is NULL, default
452 * mask will be used. Ranging is not supported.
453 * @param efx_spec[in, out]
454 * EFX filter specification to update.
456 * Perform verbose error reporting if not NULL.
459 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
460 efx_filter_spec_t *efx_spec,
461 struct rte_flow_error *error)
464 const struct rte_flow_item_ipv4 *spec = NULL;
465 const struct rte_flow_item_ipv4 *mask = NULL;
466 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
467 const struct rte_flow_item_ipv4 supp_mask = {
469 .src_addr = 0xffffffff,
470 .dst_addr = 0xffffffff,
471 .next_proto_id = 0xff,
475 rc = sfc_flow_parse_init(item,
476 (const void **)&spec,
477 (const void **)&mask,
479 &rte_flow_item_ipv4_mask,
480 sizeof(struct rte_flow_item_ipv4),
486 * Filtering by IPv4 source and destination addresses requires
487 * the appropriate ETHER_TYPE in hardware filters
489 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
490 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
491 efx_spec->efs_ether_type = ether_type_ipv4;
492 } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
493 rte_flow_error_set(error, EINVAL,
494 RTE_FLOW_ERROR_TYPE_ITEM, item,
495 "Ethertype in pattern with IPV4 item should be appropriate");
503 * IPv4 addresses are in big-endian byte order in item and in
506 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
507 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
508 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
509 } else if (mask->hdr.src_addr != 0) {
513 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
514 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
515 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
516 } else if (mask->hdr.dst_addr != 0) {
520 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
521 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
522 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
523 } else if (mask->hdr.next_proto_id != 0) {
530 rte_flow_error_set(error, EINVAL,
531 RTE_FLOW_ERROR_TYPE_ITEM, item,
532 "Bad mask in the IPV4 pattern item");
537 * Convert IPv6 item to EFX filter specification.
540 * Item specification. Only source and destination addresses and
541 * next header fields are supported. If the mask is NULL, default
542 * mask will be used. Ranging is not supported.
543 * @param efx_spec[in, out]
544 * EFX filter specification to update.
546 * Perform verbose error reporting if not NULL.
549 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
550 efx_filter_spec_t *efx_spec,
551 struct rte_flow_error *error)
554 const struct rte_flow_item_ipv6 *spec = NULL;
555 const struct rte_flow_item_ipv6 *mask = NULL;
556 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
557 const struct rte_flow_item_ipv6 supp_mask = {
559 .src_addr = { 0xff, 0xff, 0xff, 0xff,
560 0xff, 0xff, 0xff, 0xff,
561 0xff, 0xff, 0xff, 0xff,
562 0xff, 0xff, 0xff, 0xff },
563 .dst_addr = { 0xff, 0xff, 0xff, 0xff,
564 0xff, 0xff, 0xff, 0xff,
565 0xff, 0xff, 0xff, 0xff,
566 0xff, 0xff, 0xff, 0xff },
571 rc = sfc_flow_parse_init(item,
572 (const void **)&spec,
573 (const void **)&mask,
575 &rte_flow_item_ipv6_mask,
576 sizeof(struct rte_flow_item_ipv6),
582 * Filtering by IPv6 source and destination addresses requires
583 * the appropriate ETHER_TYPE in hardware filters
585 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
586 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
587 efx_spec->efs_ether_type = ether_type_ipv6;
588 } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
589 rte_flow_error_set(error, EINVAL,
590 RTE_FLOW_ERROR_TYPE_ITEM, item,
591 "Ethertype in pattern with IPV6 item should be appropriate");
599 * IPv6 addresses are in big-endian byte order in item and in
602 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
603 sizeof(mask->hdr.src_addr)) == 0) {
604 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
606 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
607 sizeof(spec->hdr.src_addr));
608 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
609 sizeof(efx_spec->efs_rem_host));
610 } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
611 sizeof(mask->hdr.src_addr))) {
615 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
616 sizeof(mask->hdr.dst_addr)) == 0) {
617 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
619 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
620 sizeof(spec->hdr.dst_addr));
621 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
622 sizeof(efx_spec->efs_loc_host));
623 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
624 sizeof(mask->hdr.dst_addr))) {
628 if (mask->hdr.proto == supp_mask.hdr.proto) {
629 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
630 efx_spec->efs_ip_proto = spec->hdr.proto;
631 } else if (mask->hdr.proto != 0) {
638 rte_flow_error_set(error, EINVAL,
639 RTE_FLOW_ERROR_TYPE_ITEM, item,
640 "Bad mask in the IPV6 pattern item");
645 * Convert TCP item to EFX filter specification.
648 * Item specification. Only source and destination ports fields
649 * are supported. If the mask is NULL, default mask will be used.
650 * Ranging is not supported.
651 * @param efx_spec[in, out]
652 * EFX filter specification to update.
654 * Perform verbose error reporting if not NULL.
657 sfc_flow_parse_tcp(const struct rte_flow_item *item,
658 efx_filter_spec_t *efx_spec,
659 struct rte_flow_error *error)
662 const struct rte_flow_item_tcp *spec = NULL;
663 const struct rte_flow_item_tcp *mask = NULL;
664 const struct rte_flow_item_tcp supp_mask = {
671 rc = sfc_flow_parse_init(item,
672 (const void **)&spec,
673 (const void **)&mask,
675 &rte_flow_item_tcp_mask,
676 sizeof(struct rte_flow_item_tcp),
682 * Filtering by TCP source and destination ports requires
683 * the appropriate IP_PROTO in hardware filters
685 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
686 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
687 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
688 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
689 rte_flow_error_set(error, EINVAL,
690 RTE_FLOW_ERROR_TYPE_ITEM, item,
691 "IP proto in pattern with TCP item should be appropriate");
699 * Source and destination ports are in big-endian byte order in item and
700 * in little-endian in efx_spec, so byte swap is used
702 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
703 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
704 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
705 } else if (mask->hdr.src_port != 0) {
709 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
710 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
711 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
712 } else if (mask->hdr.dst_port != 0) {
719 rte_flow_error_set(error, EINVAL,
720 RTE_FLOW_ERROR_TYPE_ITEM, item,
721 "Bad mask in the TCP pattern item");
726 * Convert UDP item to EFX filter specification.
729 * Item specification. Only source and destination ports fields
730 * are supported. If the mask is NULL, default mask will be used.
731 * Ranging is not supported.
732 * @param efx_spec[in, out]
733 * EFX filter specification to update.
735 * Perform verbose error reporting if not NULL.
738 sfc_flow_parse_udp(const struct rte_flow_item *item,
739 efx_filter_spec_t *efx_spec,
740 struct rte_flow_error *error)
743 const struct rte_flow_item_udp *spec = NULL;
744 const struct rte_flow_item_udp *mask = NULL;
745 const struct rte_flow_item_udp supp_mask = {
752 rc = sfc_flow_parse_init(item,
753 (const void **)&spec,
754 (const void **)&mask,
756 &rte_flow_item_udp_mask,
757 sizeof(struct rte_flow_item_udp),
763 * Filtering by UDP source and destination ports requires
764 * the appropriate IP_PROTO in hardware filters
766 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
767 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
768 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
769 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
770 rte_flow_error_set(error, EINVAL,
771 RTE_FLOW_ERROR_TYPE_ITEM, item,
772 "IP proto in pattern with UDP item should be appropriate");
780 * Source and destination ports are in big-endian byte order in item and
781 * in little-endian in efx_spec, so byte swap is used
783 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
784 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
785 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
786 } else if (mask->hdr.src_port != 0) {
790 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
791 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
792 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
793 } else if (mask->hdr.dst_port != 0) {
800 rte_flow_error_set(error, EINVAL,
801 RTE_FLOW_ERROR_TYPE_ITEM, item,
802 "Bad mask in the UDP pattern item");
807 * Filters for encapsulated packets match based on the EtherType and IP
808 * protocol in the outer frame.
811 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
812 efx_filter_spec_t *efx_spec,
814 struct rte_flow_error *error)
816 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
817 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
818 efx_spec->efs_ip_proto = ip_proto;
819 } else if (efx_spec->efs_ip_proto != ip_proto) {
821 case EFX_IPPROTO_UDP:
822 rte_flow_error_set(error, EINVAL,
823 RTE_FLOW_ERROR_TYPE_ITEM, item,
824 "Outer IP header protocol must be UDP "
825 "in VxLAN/GENEVE pattern");
828 case EFX_IPPROTO_GRE:
829 rte_flow_error_set(error, EINVAL,
830 RTE_FLOW_ERROR_TYPE_ITEM, item,
831 "Outer IP header protocol must be GRE "
836 rte_flow_error_set(error, EINVAL,
837 RTE_FLOW_ERROR_TYPE_ITEM, item,
838 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
844 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
845 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
846 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
847 rte_flow_error_set(error, EINVAL,
848 RTE_FLOW_ERROR_TYPE_ITEM, item,
849 "Outer frame EtherType in pattern with tunneling "
850 "must be IPv4 or IPv6");
858 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
859 const uint8_t *vni_or_vsid_val,
860 const uint8_t *vni_or_vsid_mask,
861 const struct rte_flow_item *item,
862 struct rte_flow_error *error)
864 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
868 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
869 EFX_VNI_OR_VSID_LEN) == 0) {
870 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
871 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
872 EFX_VNI_OR_VSID_LEN);
873 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
874 rte_flow_error_set(error, EINVAL,
875 RTE_FLOW_ERROR_TYPE_ITEM, item,
876 "Unsupported VNI/VSID mask");
884 * Convert VXLAN item to EFX filter specification.
887 * Item specification. Only VXLAN network identifier field is supported.
888 * If the mask is NULL, default mask will be used.
889 * Ranging is not supported.
890 * @param efx_spec[in, out]
891 * EFX filter specification to update.
893 * Perform verbose error reporting if not NULL.
896 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
897 efx_filter_spec_t *efx_spec,
898 struct rte_flow_error *error)
901 const struct rte_flow_item_vxlan *spec = NULL;
902 const struct rte_flow_item_vxlan *mask = NULL;
903 const struct rte_flow_item_vxlan supp_mask = {
904 .vni = { 0xff, 0xff, 0xff }
907 rc = sfc_flow_parse_init(item,
908 (const void **)&spec,
909 (const void **)&mask,
911 &rte_flow_item_vxlan_mask,
912 sizeof(struct rte_flow_item_vxlan),
917 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
918 EFX_IPPROTO_UDP, error);
922 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
923 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
928 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
929 mask->vni, item, error);
935 * Convert GENEVE item to EFX filter specification.
938 * Item specification. Only Virtual Network Identifier and protocol type
939 * fields are supported. But protocol type can be only Ethernet (0x6558).
940 * If the mask is NULL, default mask will be used.
941 * Ranging is not supported.
942 * @param efx_spec[in, out]
943 * EFX filter specification to update.
945 * Perform verbose error reporting if not NULL.
948 sfc_flow_parse_geneve(const struct rte_flow_item *item,
949 efx_filter_spec_t *efx_spec,
950 struct rte_flow_error *error)
953 const struct rte_flow_item_geneve *spec = NULL;
954 const struct rte_flow_item_geneve *mask = NULL;
955 const struct rte_flow_item_geneve supp_mask = {
956 .protocol = RTE_BE16(0xffff),
957 .vni = { 0xff, 0xff, 0xff }
960 rc = sfc_flow_parse_init(item,
961 (const void **)&spec,
962 (const void **)&mask,
964 &rte_flow_item_geneve_mask,
965 sizeof(struct rte_flow_item_geneve),
970 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
971 EFX_IPPROTO_UDP, error);
975 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
976 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
981 if (mask->protocol == supp_mask.protocol) {
982 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
983 rte_flow_error_set(error, EINVAL,
984 RTE_FLOW_ERROR_TYPE_ITEM, item,
985 "GENEVE encap. protocol must be Ethernet "
986 "(0x6558) in the GENEVE pattern item");
989 } else if (mask->protocol != 0) {
990 rte_flow_error_set(error, EINVAL,
991 RTE_FLOW_ERROR_TYPE_ITEM, item,
992 "Unsupported mask for GENEVE encap. protocol");
996 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
997 mask->vni, item, error);
1003 * Convert NVGRE item to EFX filter specification.
1006 * Item specification. Only virtual subnet ID field is supported.
1007 * If the mask is NULL, default mask will be used.
1008 * Ranging is not supported.
1009 * @param efx_spec[in, out]
1010 * EFX filter specification to update.
1012 * Perform verbose error reporting if not NULL.
1015 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1016 efx_filter_spec_t *efx_spec,
1017 struct rte_flow_error *error)
1020 const struct rte_flow_item_nvgre *spec = NULL;
1021 const struct rte_flow_item_nvgre *mask = NULL;
1022 const struct rte_flow_item_nvgre supp_mask = {
1023 .tni = { 0xff, 0xff, 0xff }
1026 rc = sfc_flow_parse_init(item,
1027 (const void **)&spec,
1028 (const void **)&mask,
1030 &rte_flow_item_nvgre_mask,
1031 sizeof(struct rte_flow_item_nvgre),
1036 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1037 EFX_IPPROTO_GRE, error);
1041 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1042 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1047 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1048 mask->tni, item, error);
1053 static const struct sfc_flow_item sfc_flow_items[] = {
1055 .type = RTE_FLOW_ITEM_TYPE_VOID,
1056 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1057 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1058 .parse = sfc_flow_parse_void,
1061 .type = RTE_FLOW_ITEM_TYPE_ETH,
1062 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1063 .layer = SFC_FLOW_ITEM_L2,
1064 .parse = sfc_flow_parse_eth,
1067 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1068 .prev_layer = SFC_FLOW_ITEM_L2,
1069 .layer = SFC_FLOW_ITEM_L2,
1070 .parse = sfc_flow_parse_vlan,
1073 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1074 .prev_layer = SFC_FLOW_ITEM_L2,
1075 .layer = SFC_FLOW_ITEM_L3,
1076 .parse = sfc_flow_parse_ipv4,
1079 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1080 .prev_layer = SFC_FLOW_ITEM_L2,
1081 .layer = SFC_FLOW_ITEM_L3,
1082 .parse = sfc_flow_parse_ipv6,
1085 .type = RTE_FLOW_ITEM_TYPE_TCP,
1086 .prev_layer = SFC_FLOW_ITEM_L3,
1087 .layer = SFC_FLOW_ITEM_L4,
1088 .parse = sfc_flow_parse_tcp,
1091 .type = RTE_FLOW_ITEM_TYPE_UDP,
1092 .prev_layer = SFC_FLOW_ITEM_L3,
1093 .layer = SFC_FLOW_ITEM_L4,
1094 .parse = sfc_flow_parse_udp,
1097 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1098 .prev_layer = SFC_FLOW_ITEM_L4,
1099 .layer = SFC_FLOW_ITEM_START_LAYER,
1100 .parse = sfc_flow_parse_vxlan,
1103 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1104 .prev_layer = SFC_FLOW_ITEM_L4,
1105 .layer = SFC_FLOW_ITEM_START_LAYER,
1106 .parse = sfc_flow_parse_geneve,
1109 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1110 .prev_layer = SFC_FLOW_ITEM_L3,
1111 .layer = SFC_FLOW_ITEM_START_LAYER,
1112 .parse = sfc_flow_parse_nvgre,
1117 * Protocol-independent flow API support
1120 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1121 struct rte_flow *flow,
1122 struct rte_flow_error *error)
1124 struct sfc_flow_spec *spec = &flow->spec;
1125 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1128 rte_flow_error_set(error, EINVAL,
1129 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1133 if (attr->group != 0) {
1134 rte_flow_error_set(error, ENOTSUP,
1135 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1136 "Groups are not supported");
1139 if (attr->egress != 0) {
1140 rte_flow_error_set(error, ENOTSUP,
1141 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1142 "Egress is not supported");
1145 if (attr->ingress == 0) {
1146 rte_flow_error_set(error, ENOTSUP,
1147 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1148 "Ingress is compulsory");
1151 if (attr->transfer == 0) {
1152 if (attr->priority != 0) {
1153 rte_flow_error_set(error, ENOTSUP,
1154 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1155 attr, "Priorities are unsupported");
1158 spec->type = SFC_FLOW_SPEC_FILTER;
1159 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1160 spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1162 rte_flow_error_set(error, ENOTSUP,
1163 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1164 "Transfer is not supported");
1171 /* Get item from array sfc_flow_items */
1172 static const struct sfc_flow_item *
1173 sfc_flow_get_item(enum rte_flow_item_type type)
1177 for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1178 if (sfc_flow_items[i].type == type)
1179 return &sfc_flow_items[i];
1185 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1186 struct rte_flow *flow,
1187 struct rte_flow_error *error)
1190 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1191 boolean_t is_ifrm = B_FALSE;
1192 const struct sfc_flow_item *item;
1193 struct sfc_flow_spec *spec = &flow->spec;
1194 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1196 if (pattern == NULL) {
1197 rte_flow_error_set(error, EINVAL,
1198 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1203 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1204 item = sfc_flow_get_item(pattern->type);
1206 rte_flow_error_set(error, ENOTSUP,
1207 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1208 "Unsupported pattern item");
1213 * Omitting one or several protocol layers at the beginning
1214 * of pattern is supported
1216 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1217 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1218 item->prev_layer != prev_layer) {
1219 rte_flow_error_set(error, ENOTSUP,
1220 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1221 "Unexpected sequence of pattern items");
1226 * Allow only VOID and ETH pattern items in the inner frame.
1227 * Also check that there is only one tunneling protocol.
1229 switch (item->type) {
1230 case RTE_FLOW_ITEM_TYPE_VOID:
1231 case RTE_FLOW_ITEM_TYPE_ETH:
1234 case RTE_FLOW_ITEM_TYPE_VXLAN:
1235 case RTE_FLOW_ITEM_TYPE_GENEVE:
1236 case RTE_FLOW_ITEM_TYPE_NVGRE:
1238 rte_flow_error_set(error, EINVAL,
1239 RTE_FLOW_ERROR_TYPE_ITEM,
1241 "More than one tunneling protocol");
1249 rte_flow_error_set(error, EINVAL,
1250 RTE_FLOW_ERROR_TYPE_ITEM,
1252 "There is an unsupported pattern item "
1253 "in the inner frame");
1259 rc = item->parse(pattern, &spec_filter->template, error);
1263 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1264 prev_layer = item->layer;
1271 sfc_flow_parse_queue(struct sfc_adapter *sa,
1272 const struct rte_flow_action_queue *queue,
1273 struct rte_flow *flow)
1275 struct sfc_flow_spec *spec = &flow->spec;
1276 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1277 struct sfc_rxq *rxq;
1279 if (queue->index >= sfc_sa2shared(sa)->rxq_count)
1282 rxq = &sa->rxq_ctrl[queue->index];
1283 spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1289 sfc_flow_parse_rss(struct sfc_adapter *sa,
1290 const struct rte_flow_action_rss *action_rss,
1291 struct rte_flow *flow)
1293 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1294 struct sfc_rss *rss = &sas->rss;
1295 unsigned int rxq_sw_index;
1296 struct sfc_rxq *rxq;
1297 unsigned int rxq_hw_index_min;
1298 unsigned int rxq_hw_index_max;
1299 efx_rx_hash_type_t efx_hash_types;
1300 const uint8_t *rss_key;
1301 struct sfc_flow_spec *spec = &flow->spec;
1302 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1303 struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1306 if (action_rss->queue_num == 0)
1309 rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
1310 rxq = &sa->rxq_ctrl[rxq_sw_index];
1311 rxq_hw_index_min = rxq->hw_index;
1312 rxq_hw_index_max = 0;
1314 for (i = 0; i < action_rss->queue_num; ++i) {
1315 rxq_sw_index = action_rss->queue[i];
1317 if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
1320 rxq = &sa->rxq_ctrl[rxq_sw_index];
1322 if (rxq->hw_index < rxq_hw_index_min)
1323 rxq_hw_index_min = rxq->hw_index;
1325 if (rxq->hw_index > rxq_hw_index_max)
1326 rxq_hw_index_max = rxq->hw_index;
1329 switch (action_rss->func) {
1330 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1331 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1337 if (action_rss->level)
1341 * Dummy RSS action with only one queue and no specific settings
1342 * for hash types and key does not require dedicated RSS context
1343 * and may be simplified to single queue action.
1345 if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1346 action_rss->key_len == 0) {
1347 spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1351 if (action_rss->types) {
1354 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1362 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1363 efx_hash_types |= rss->hf_map[i].efx;
1366 if (action_rss->key_len) {
1367 if (action_rss->key_len != sizeof(rss->key))
1370 rss_key = action_rss->key;
1375 spec_filter->rss = B_TRUE;
1377 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1378 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1379 sfc_rss_conf->rss_hash_types = efx_hash_types;
1380 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1382 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1383 unsigned int nb_queues = action_rss->queue_num;
1384 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1385 struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
1387 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1394 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1395 unsigned int filters_count)
1397 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1401 for (i = 0; i < filters_count; i++) {
1404 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1405 if (ret == 0 && rc != 0) {
1406 sfc_err(sa, "failed to remove filter specification "
1416 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1418 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1422 for (i = 0; i < spec_filter->count; i++) {
1423 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1425 sfc_flow_spec_flush(sa, spec, i);
1434 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1436 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1438 return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1442 sfc_flow_filter_insert(struct sfc_adapter *sa,
1443 struct rte_flow *flow)
1445 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1446 struct sfc_rss *rss = &sas->rss;
1447 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1448 struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1449 uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1453 if (spec_filter->rss) {
1454 unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
1455 flow_rss->rxq_hw_index_min + 1,
1458 rc = efx_rx_scale_context_alloc(sa->nic,
1459 EFX_RX_SCALE_EXCLUSIVE,
1463 goto fail_scale_context_alloc;
1465 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1467 flow_rss->rss_hash_types, B_TRUE);
1469 goto fail_scale_mode_set;
1471 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1475 goto fail_scale_key_set;
1478 * At this point, fully elaborated filter specifications
1479 * have been produced from the template. To make sure that
1480 * RSS behaviour is consistent between them, set the same
1481 * RSS context value everywhere.
1483 for (i = 0; i < spec_filter->count; i++) {
1484 efx_filter_spec_t *spec = &spec_filter->filters[i];
1486 spec->efs_rss_context = efs_rss_context;
1487 spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1488 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1492 rc = sfc_flow_spec_insert(sa, &flow->spec);
1494 goto fail_filter_insert;
1496 if (spec_filter->rss) {
1498 * Scale table is set after filter insertion because
1499 * the table entries are relative to the base RxQ ID
1500 * and the latter is submitted to the HW by means of
1501 * inserting a filter, so by the time of the request
1502 * the HW knows all the information needed to verify
1503 * the table entries, and the operation will succeed
1505 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1507 RTE_DIM(flow_rss->rss_tbl));
1509 goto fail_scale_tbl_set;
1515 sfc_flow_spec_remove(sa, &flow->spec);
1519 fail_scale_mode_set:
1520 if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1521 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1523 fail_scale_context_alloc:
1528 sfc_flow_filter_remove(struct sfc_adapter *sa,
1529 struct rte_flow *flow)
1531 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1534 rc = sfc_flow_spec_remove(sa, &flow->spec);
1538 if (spec_filter->rss) {
1540 * All specifications for a given flow rule have the same RSS
1541 * context, so that RSS context value is taken from the first
1542 * filter specification
1544 efx_filter_spec_t *spec = &spec_filter->filters[0];
1546 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1553 sfc_flow_parse_mark(struct sfc_adapter *sa,
1554 const struct rte_flow_action_mark *mark,
1555 struct rte_flow *flow)
1557 struct sfc_flow_spec *spec = &flow->spec;
1558 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1559 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1561 if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1564 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1565 spec_filter->template.efs_mark = mark->id;
1571 sfc_flow_parse_actions(struct sfc_adapter *sa,
1572 const struct rte_flow_action actions[],
1573 struct rte_flow *flow,
1574 struct rte_flow_error *error)
1577 struct sfc_flow_spec *spec = &flow->spec;
1578 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1579 const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1580 uint32_t actions_set = 0;
1581 const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1582 (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1583 (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1584 const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1585 (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1587 if (actions == NULL) {
1588 rte_flow_error_set(error, EINVAL,
1589 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1594 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
1595 RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
1597 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1598 switch (actions->type) {
1599 case RTE_FLOW_ACTION_TYPE_VOID:
1600 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1604 case RTE_FLOW_ACTION_TYPE_QUEUE:
1605 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1607 if ((actions_set & fate_actions_mask) != 0)
1608 goto fail_fate_actions;
1610 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1612 rte_flow_error_set(error, EINVAL,
1613 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1614 "Bad QUEUE action");
1619 case RTE_FLOW_ACTION_TYPE_RSS:
1620 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1622 if ((actions_set & fate_actions_mask) != 0)
1623 goto fail_fate_actions;
1625 rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1627 rte_flow_error_set(error, -rc,
1628 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1634 case RTE_FLOW_ACTION_TYPE_DROP:
1635 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1637 if ((actions_set & fate_actions_mask) != 0)
1638 goto fail_fate_actions;
1640 spec_filter->template.efs_dmaq_id =
1641 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1644 case RTE_FLOW_ACTION_TYPE_FLAG:
1645 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1647 if ((actions_set & mark_actions_mask) != 0)
1648 goto fail_actions_overlap;
1650 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1651 rte_flow_error_set(error, ENOTSUP,
1652 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1653 "FLAG action is not supported on the current Rx datapath");
1657 spec_filter->template.efs_flags |=
1658 EFX_FILTER_FLAG_ACTION_FLAG;
1661 case RTE_FLOW_ACTION_TYPE_MARK:
1662 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1664 if ((actions_set & mark_actions_mask) != 0)
1665 goto fail_actions_overlap;
1667 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1668 rte_flow_error_set(error, ENOTSUP,
1669 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1670 "MARK action is not supported on the current Rx datapath");
1674 rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1676 rte_flow_error_set(error, rc,
1677 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1684 rte_flow_error_set(error, ENOTSUP,
1685 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1686 "Action is not supported");
1690 actions_set |= (1UL << actions->type);
1692 #undef SFC_BUILD_SET_OVERFLOW
1694 /* When fate is unknown, drop traffic. */
1695 if ((actions_set & fate_actions_mask) == 0) {
1696 spec_filter->template.efs_dmaq_id =
1697 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1703 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1704 "Cannot combine several fate-deciding actions, "
1705 "choose between QUEUE, RSS or DROP");
1708 fail_actions_overlap:
1709 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1710 "Overlapping actions are not supported");
1715 * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1716 * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1717 * specifications after copying.
1719 * @param spec[in, out]
1720 * SFC flow specification to update.
1721 * @param filters_count_for_one_val[in]
1722 * How many specifications should have the same match flag, what is the
1723 * number of specifications before copying.
1725 * Perform verbose error reporting if not NULL.
1728 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1729 unsigned int filters_count_for_one_val,
1730 struct rte_flow_error *error)
1733 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1734 static const efx_filter_match_flags_t vals[] = {
1735 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1736 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1739 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1740 rte_flow_error_set(error, EINVAL,
1741 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1742 "Number of specifications is incorrect while copying "
1743 "by unknown destination flags");
1747 for (i = 0; i < spec_filter->count; i++) {
1748 /* The check above ensures that divisor can't be zero here */
1749 spec_filter->filters[i].efs_match_flags |=
1750 vals[i / filters_count_for_one_val];
1757 * Check that the following conditions are met:
1758 * - the list of supported filters has a filter
1759 * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1760 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1764 * The match flags of filter.
1766 * Specification to be supplemented.
1768 * SFC filter with list of supported filters.
1771 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1772 __rte_unused efx_filter_spec_t *spec,
1773 struct sfc_filter *filter)
1776 efx_filter_match_flags_t match_mcast_dst;
1779 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1780 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1781 for (i = 0; i < filter->supported_match_num; i++) {
1782 if (match_mcast_dst == filter->supported_match[i])
1790 * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1791 * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1792 * specifications after copying.
1794 * @param spec[in, out]
1795 * SFC flow specification to update.
1796 * @param filters_count_for_one_val[in]
1797 * How many specifications should have the same EtherType value, what is the
1798 * number of specifications before copying.
1800 * Perform verbose error reporting if not NULL.
1803 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1804 unsigned int filters_count_for_one_val,
1805 struct rte_flow_error *error)
1808 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1809 static const uint16_t vals[] = {
1810 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1813 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1814 rte_flow_error_set(error, EINVAL,
1815 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1816 "Number of specifications is incorrect "
1817 "while copying by Ethertype");
1821 for (i = 0; i < spec_filter->count; i++) {
1822 spec_filter->filters[i].efs_match_flags |=
1823 EFX_FILTER_MATCH_ETHER_TYPE;
1826 * The check above ensures that
1827 * filters_count_for_one_val is not 0
1829 spec_filter->filters[i].efs_ether_type =
1830 vals[i / filters_count_for_one_val];
1837 * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1838 * in the same specifications after copying.
1840 * @param spec[in, out]
1841 * SFC flow specification to update.
1842 * @param filters_count_for_one_val[in]
1843 * How many specifications should have the same match flag, what is the
1844 * number of specifications before copying.
1846 * Perform verbose error reporting if not NULL.
1849 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1850 unsigned int filters_count_for_one_val,
1851 struct rte_flow_error *error)
1853 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1856 if (filters_count_for_one_val != spec_filter->count) {
1857 rte_flow_error_set(error, EINVAL,
1858 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1859 "Number of specifications is incorrect "
1860 "while copying by outer VLAN ID");
1864 for (i = 0; i < spec_filter->count; i++) {
1865 spec_filter->filters[i].efs_match_flags |=
1866 EFX_FILTER_MATCH_OUTER_VID;
1868 spec_filter->filters[i].efs_outer_vid = 0;
1875 * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1876 * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1877 * specifications after copying.
1879 * @param spec[in, out]
1880 * SFC flow specification to update.
1881 * @param filters_count_for_one_val[in]
1882 * How many specifications should have the same match flag, what is the
1883 * number of specifications before copying.
1885 * Perform verbose error reporting if not NULL.
1888 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1889 unsigned int filters_count_for_one_val,
1890 struct rte_flow_error *error)
1893 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1894 static const efx_filter_match_flags_t vals[] = {
1895 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1896 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1899 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1900 rte_flow_error_set(error, EINVAL,
1901 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1902 "Number of specifications is incorrect while copying "
1903 "by inner frame unknown destination flags");
1907 for (i = 0; i < spec_filter->count; i++) {
1908 /* The check above ensures that divisor can't be zero here */
1909 spec_filter->filters[i].efs_match_flags |=
1910 vals[i / filters_count_for_one_val];
1917 * Check that the following conditions are met:
1918 * - the specification corresponds to a filter for encapsulated traffic
1919 * - the list of supported filters has a filter
1920 * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1921 * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1925 * The match flags of filter.
1927 * Specification to be supplemented.
1929 * SFC filter with list of supported filters.
1932 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1933 efx_filter_spec_t *spec,
1934 struct sfc_filter *filter)
1937 efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1938 efx_filter_match_flags_t match_mcast_dst;
1940 if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1944 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1945 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1946 for (i = 0; i < filter->supported_match_num; i++) {
1947 if (match_mcast_dst == filter->supported_match[i])
1955 * Check that the list of supported filters has a filter that differs
1956 * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
1957 * in this case that filter will be used and the flag
1958 * EFX_FILTER_MATCH_OUTER_VID is not needed.
1961 * The match flags of filter.
1963 * Specification to be supplemented.
1965 * SFC filter with list of supported filters.
1968 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
1969 __rte_unused efx_filter_spec_t *spec,
1970 struct sfc_filter *filter)
1973 efx_filter_match_flags_t match_without_vid =
1974 match & ~EFX_FILTER_MATCH_OUTER_VID;
1976 for (i = 0; i < filter->supported_match_num; i++) {
1977 if (match_without_vid == filter->supported_match[i])
1985 * Match flags that can be automatically added to filters.
1986 * Selecting the last minimum when searching for the copy flag ensures that the
1987 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
1988 * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
1989 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
1992 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1994 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1996 .set_vals = sfc_flow_set_unknown_dst_flags,
1997 .spec_check = sfc_flow_check_unknown_dst_flags,
2000 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
2002 .set_vals = sfc_flow_set_ethertypes,
2006 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2008 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2009 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2012 .flag = EFX_FILTER_MATCH_OUTER_VID,
2014 .set_vals = sfc_flow_set_outer_vid_flag,
2015 .spec_check = sfc_flow_check_outer_vid_flag,
2019 /* Get item from array sfc_flow_copy_flags */
2020 static const struct sfc_flow_copy_flag *
2021 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2025 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2026 if (sfc_flow_copy_flags[i].flag == flag)
2027 return &sfc_flow_copy_flags[i];
2034 * Make copies of the specifications, set match flag and values
2035 * of the field that corresponds to it.
2037 * @param spec[in, out]
2038 * SFC flow specification to update.
2040 * The match flag to add.
2042 * Perform verbose error reporting if not NULL.
2045 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2046 efx_filter_match_flags_t flag,
2047 struct rte_flow_error *error)
2050 unsigned int new_filters_count;
2051 unsigned int filters_count_for_one_val;
2052 const struct sfc_flow_copy_flag *copy_flag;
2053 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2056 copy_flag = sfc_flow_get_copy_flag(flag);
2057 if (copy_flag == NULL) {
2058 rte_flow_error_set(error, ENOTSUP,
2059 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2060 "Unsupported spec field for copying");
2064 new_filters_count = spec_filter->count * copy_flag->vals_count;
2065 if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2066 rte_flow_error_set(error, EINVAL,
2067 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2068 "Too much EFX specifications in the flow rule");
2072 /* Copy filters specifications */
2073 for (i = spec_filter->count; i < new_filters_count; i++) {
2074 spec_filter->filters[i] =
2075 spec_filter->filters[i - spec_filter->count];
2078 filters_count_for_one_val = spec_filter->count;
2079 spec_filter->count = new_filters_count;
2081 rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2089 * Check that the given set of match flags missing in the original filter spec
2090 * could be covered by adding spec copies which specify the corresponding
2091 * flags and packet field values to match.
2093 * @param miss_flags[in]
2094 * Flags that are missing until the supported filter.
2096 * Specification to be supplemented.
2101 * Number of specifications after copy or 0, if the flags can not be added.
2104 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2105 efx_filter_spec_t *spec,
2106 struct sfc_filter *filter)
2109 efx_filter_match_flags_t copy_flags = 0;
2110 efx_filter_match_flags_t flag;
2111 efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2112 sfc_flow_spec_check *check;
2113 unsigned int multiplier = 1;
2115 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2116 flag = sfc_flow_copy_flags[i].flag;
2117 check = sfc_flow_copy_flags[i].spec_check;
2118 if ((flag & miss_flags) == flag) {
2119 if (check != NULL && (!check(match, spec, filter)))
2123 multiplier *= sfc_flow_copy_flags[i].vals_count;
2127 if (copy_flags == miss_flags)
2134 * Attempt to supplement the specification template to the minimally
2135 * supported set of match flags. To do this, it is necessary to copy
2136 * the specifications, filling them with the values of fields that
2137 * correspond to the missing flags.
2138 * The necessary and sufficient filter is built from the fewest number
2139 * of copies which could be made to cover the minimally required set
2144 * @param spec[in, out]
2145 * SFC flow specification to update.
2147 * Perform verbose error reporting if not NULL.
2150 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2151 struct sfc_flow_spec *spec,
2152 struct rte_flow_error *error)
2154 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2155 struct sfc_filter *filter = &sa->filter;
2156 efx_filter_match_flags_t miss_flags;
2157 efx_filter_match_flags_t min_miss_flags = 0;
2158 efx_filter_match_flags_t match;
2159 unsigned int min_multiplier = UINT_MAX;
2160 unsigned int multiplier;
2164 match = spec_filter->template.efs_match_flags;
2165 for (i = 0; i < filter->supported_match_num; i++) {
2166 if ((match & filter->supported_match[i]) == match) {
2167 miss_flags = filter->supported_match[i] & (~match);
2168 multiplier = sfc_flow_check_missing_flags(miss_flags,
2169 &spec_filter->template, filter);
2170 if (multiplier > 0) {
2171 if (multiplier <= min_multiplier) {
2172 min_multiplier = multiplier;
2173 min_miss_flags = miss_flags;
2179 if (min_multiplier == UINT_MAX) {
2180 rte_flow_error_set(error, ENOTSUP,
2181 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2182 "The flow rule pattern is unsupported");
2186 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2187 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2189 if ((flag & min_miss_flags) == flag) {
2190 rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2200 * Check that set of match flags is referred to by a filter. Filter is
2201 * described by match flags with the ability to add OUTER_VID and INNER_VID
2204 * @param match_flags[in]
2205 * Set of match flags.
2206 * @param flags_pattern[in]
2207 * Pattern of filter match flags.
2210 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2211 efx_filter_match_flags_t flags_pattern)
2213 if ((match_flags & flags_pattern) != flags_pattern)
2216 switch (match_flags & ~flags_pattern) {
2218 case EFX_FILTER_MATCH_OUTER_VID:
2219 case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2227 * Check whether the spec maps to a hardware filter which is known to be
2228 * ineffective despite being valid.
2231 * SFC filter with list of supported filters.
2233 * SFC flow specification.
2236 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2237 struct sfc_flow_spec *spec)
2240 uint16_t ether_type;
2242 efx_filter_match_flags_t match_flags;
2243 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2245 for (i = 0; i < spec_filter->count; i++) {
2246 match_flags = spec_filter->filters[i].efs_match_flags;
2248 if (sfc_flow_is_match_with_vids(match_flags,
2249 EFX_FILTER_MATCH_ETHER_TYPE) ||
2250 sfc_flow_is_match_with_vids(match_flags,
2251 EFX_FILTER_MATCH_ETHER_TYPE |
2252 EFX_FILTER_MATCH_LOC_MAC)) {
2253 ether_type = spec_filter->filters[i].efs_ether_type;
2254 if (filter->supports_ip_proto_or_addr_filter &&
2255 (ether_type == EFX_ETHER_TYPE_IPV4 ||
2256 ether_type == EFX_ETHER_TYPE_IPV6))
2258 } else if (sfc_flow_is_match_with_vids(match_flags,
2259 EFX_FILTER_MATCH_ETHER_TYPE |
2260 EFX_FILTER_MATCH_IP_PROTO) ||
2261 sfc_flow_is_match_with_vids(match_flags,
2262 EFX_FILTER_MATCH_ETHER_TYPE |
2263 EFX_FILTER_MATCH_IP_PROTO |
2264 EFX_FILTER_MATCH_LOC_MAC)) {
2265 ip_proto = spec_filter->filters[i].efs_ip_proto;
2266 if (filter->supports_rem_or_local_port_filter &&
2267 (ip_proto == EFX_IPPROTO_TCP ||
2268 ip_proto == EFX_IPPROTO_UDP))
2277 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2278 struct rte_flow *flow,
2279 struct rte_flow_error *error)
2281 struct sfc_flow_spec *spec = &flow->spec;
2282 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2283 efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2284 efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2287 /* Initialize the first filter spec with template */
2288 spec_filter->filters[0] = *spec_tmpl;
2289 spec_filter->count = 1;
2291 if (!sfc_filter_is_match_supported(sa, match_flags)) {
2292 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2297 if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2298 rte_flow_error_set(error, ENOTSUP,
2299 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2300 "The flow rule pattern is unsupported");
2308 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2309 const struct rte_flow_item pattern[],
2310 const struct rte_flow_action actions[],
2311 struct rte_flow *flow,
2312 struct rte_flow_error *error)
2314 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2317 rc = sfc_flow_parse_pattern(pattern, flow, error);
2319 goto fail_bad_value;
2321 rc = sfc_flow_parse_actions(sa, actions, flow, error);
2323 goto fail_bad_value;
2325 rc = sfc_flow_validate_match_flags(sa, flow, error);
2327 goto fail_bad_value;
2336 sfc_flow_parse(struct rte_eth_dev *dev,
2337 const struct rte_flow_attr *attr,
2338 const struct rte_flow_item pattern[],
2339 const struct rte_flow_action actions[],
2340 struct rte_flow *flow,
2341 struct rte_flow_error *error)
2343 const struct sfc_flow_ops_by_spec *ops;
2346 rc = sfc_flow_parse_attr(attr, flow, error);
2350 ops = sfc_flow_get_ops_by_spec(flow);
2351 if (ops == NULL || ops->parse == NULL) {
2352 rte_flow_error_set(error, ENOTSUP,
2353 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2354 "No backend to handle this flow");
2358 return ops->parse(dev, pattern, actions, flow, error);
2361 static struct rte_flow *
2362 sfc_flow_zmalloc(struct rte_flow_error *error)
2364 struct rte_flow *flow;
2366 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2368 rte_flow_error_set(error, ENOMEM,
2369 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2370 "Failed to allocate memory");
2377 sfc_flow_free(__rte_unused struct sfc_adapter *sa, struct rte_flow *flow)
2383 sfc_flow_validate(struct rte_eth_dev *dev,
2384 const struct rte_flow_attr *attr,
2385 const struct rte_flow_item pattern[],
2386 const struct rte_flow_action actions[],
2387 struct rte_flow_error *error)
2389 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2390 struct rte_flow *flow;
2393 flow = sfc_flow_zmalloc(error);
2397 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2399 sfc_flow_free(sa, flow);
2404 static struct rte_flow *
2405 sfc_flow_create(struct rte_eth_dev *dev,
2406 const struct rte_flow_attr *attr,
2407 const struct rte_flow_item pattern[],
2408 const struct rte_flow_action actions[],
2409 struct rte_flow_error *error)
2411 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2412 struct rte_flow *flow = NULL;
2415 flow = sfc_flow_zmalloc(error);
2419 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2421 goto fail_bad_value;
2423 sfc_adapter_lock(sa);
2425 TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2427 if (sa->state == SFC_ADAPTER_STARTED) {
2428 rc = sfc_flow_filter_insert(sa, flow);
2430 rte_flow_error_set(error, rc,
2431 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2432 "Failed to insert filter");
2433 goto fail_filter_insert;
2437 sfc_adapter_unlock(sa);
2442 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2445 sfc_flow_free(sa, flow);
2446 sfc_adapter_unlock(sa);
2453 sfc_flow_remove(struct sfc_adapter *sa,
2454 struct rte_flow *flow,
2455 struct rte_flow_error *error)
2459 SFC_ASSERT(sfc_adapter_is_locked(sa));
2461 if (sa->state == SFC_ADAPTER_STARTED) {
2462 rc = sfc_flow_filter_remove(sa, flow);
2464 rte_flow_error_set(error, rc,
2465 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2466 "Failed to destroy flow rule");
2469 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2470 sfc_flow_free(sa, flow);
2476 sfc_flow_destroy(struct rte_eth_dev *dev,
2477 struct rte_flow *flow,
2478 struct rte_flow_error *error)
2480 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2481 struct rte_flow *flow_ptr;
2484 sfc_adapter_lock(sa);
2486 TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2487 if (flow_ptr == flow)
2491 rte_flow_error_set(error, rc,
2492 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2493 "Failed to find flow rule to destroy");
2494 goto fail_bad_value;
2497 rc = sfc_flow_remove(sa, flow, error);
2500 sfc_adapter_unlock(sa);
2506 sfc_flow_flush(struct rte_eth_dev *dev,
2507 struct rte_flow_error *error)
2509 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2510 struct rte_flow *flow;
2514 sfc_adapter_lock(sa);
2516 while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2517 rc = sfc_flow_remove(sa, flow, error);
2522 sfc_adapter_unlock(sa);
2528 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2529 struct rte_flow_error *error)
2531 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2534 sfc_adapter_lock(sa);
2535 if (sa->state != SFC_ADAPTER_INITIALIZED) {
2536 rte_flow_error_set(error, EBUSY,
2537 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2538 NULL, "please close the port first");
2541 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2543 sfc_adapter_unlock(sa);
2548 const struct rte_flow_ops sfc_flow_ops = {
2549 .validate = sfc_flow_validate,
2550 .create = sfc_flow_create,
2551 .destroy = sfc_flow_destroy,
2552 .flush = sfc_flow_flush,
2554 .isolate = sfc_flow_isolate,
2558 sfc_flow_init(struct sfc_adapter *sa)
2560 SFC_ASSERT(sfc_adapter_is_locked(sa));
2562 TAILQ_INIT(&sa->flow_list);
2566 sfc_flow_fini(struct sfc_adapter *sa)
2568 struct rte_flow *flow;
2570 SFC_ASSERT(sfc_adapter_is_locked(sa));
2572 while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2573 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2574 sfc_flow_free(sa, flow);
2579 sfc_flow_stop(struct sfc_adapter *sa)
2581 struct rte_flow *flow;
2583 SFC_ASSERT(sfc_adapter_is_locked(sa));
2585 TAILQ_FOREACH(flow, &sa->flow_list, entries)
2586 sfc_flow_filter_remove(sa, flow);
2590 sfc_flow_start(struct sfc_adapter *sa)
2592 struct rte_flow *flow;
2595 sfc_log_init(sa, "entry");
2597 SFC_ASSERT(sfc_adapter_is_locked(sa));
2599 TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2600 rc = sfc_flow_filter_insert(sa, flow);
2605 sfc_log_init(sa, "done");