1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2017-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_ether.h>
16 #include <rte_flow_driver.h>
22 #include "sfc_filter.h"
25 #include "sfc_dp_rx.h"
27 struct sfc_flow_ops_by_spec {
28 sfc_flow_parse_cb_t *parse;
29 sfc_flow_insert_cb_t *insert;
30 sfc_flow_remove_cb_t *remove;
33 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
34 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
35 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
37 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
38 .parse = sfc_flow_parse_rte_to_filter,
39 .insert = sfc_flow_filter_insert,
40 .remove = sfc_flow_filter_remove,
43 static const struct sfc_flow_ops_by_spec *
44 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
46 struct sfc_flow_spec *spec = &flow->spec;
47 const struct sfc_flow_ops_by_spec *ops = NULL;
50 case SFC_FLOW_SPEC_FILTER:
51 ops = &sfc_flow_ops_filter;
62 * Currently, filter-based (VNIC) flow API is implemented in such a manner
63 * that each flow rule is converted to one or more hardware filters.
64 * All elements of flow rule (attributes, pattern items, actions)
65 * correspond to one or more fields in the efx_filter_spec_s structure
66 * that is responsible for the hardware filter.
67 * If some required field is unset in the flow rule, then a handful
68 * of filter copies will be created to cover all possible values
72 static sfc_flow_item_parse sfc_flow_parse_void;
73 static sfc_flow_item_parse sfc_flow_parse_eth;
74 static sfc_flow_item_parse sfc_flow_parse_vlan;
75 static sfc_flow_item_parse sfc_flow_parse_ipv4;
76 static sfc_flow_item_parse sfc_flow_parse_ipv6;
77 static sfc_flow_item_parse sfc_flow_parse_tcp;
78 static sfc_flow_item_parse sfc_flow_parse_udp;
79 static sfc_flow_item_parse sfc_flow_parse_vxlan;
80 static sfc_flow_item_parse sfc_flow_parse_geneve;
81 static sfc_flow_item_parse sfc_flow_parse_nvgre;
83 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
84 unsigned int filters_count_for_one_val,
85 struct rte_flow_error *error);
87 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
88 efx_filter_spec_t *spec,
89 struct sfc_filter *filter);
91 struct sfc_flow_copy_flag {
92 /* EFX filter specification match flag */
93 efx_filter_match_flags_t flag;
94 /* Number of values of corresponding field */
95 unsigned int vals_count;
96 /* Function to set values in specifications */
97 sfc_flow_spec_set_vals *set_vals;
99 * Function to check that the specification is suitable
100 * for adding this match flag
102 sfc_flow_spec_check *spec_check;
105 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
106 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
107 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
108 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
109 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
110 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
111 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
114 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
119 for (i = 0; i < size; i++)
122 return (sum == 0) ? B_TRUE : B_FALSE;
126 * Validate item and prepare structures spec and mask for parsing
129 sfc_flow_parse_init(const struct rte_flow_item *item,
130 const void **spec_ptr,
131 const void **mask_ptr,
132 const void *supp_mask,
133 const void *def_mask,
135 struct rte_flow_error *error)
144 rte_flow_error_set(error, EINVAL,
145 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
150 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
151 rte_flow_error_set(error, EINVAL,
152 RTE_FLOW_ERROR_TYPE_ITEM, item,
153 "Mask or last is set without spec");
158 * If "mask" is not set, default mask is used,
159 * but if default mask is NULL, "mask" should be set
161 if (item->mask == NULL) {
162 if (def_mask == NULL) {
163 rte_flow_error_set(error, EINVAL,
164 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
165 "Mask should be specified");
181 * If field values in "last" are either 0 or equal to the corresponding
182 * values in "spec" then they are ignored
185 !sfc_flow_is_zero(last, size) &&
186 memcmp(last, spec, size) != 0) {
187 rte_flow_error_set(error, ENOTSUP,
188 RTE_FLOW_ERROR_TYPE_ITEM, item,
189 "Ranging is not supported");
193 if (supp_mask == NULL) {
194 rte_flow_error_set(error, EINVAL,
195 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
196 "Supported mask for item should be specified");
200 /* Check that mask does not ask for more match than supp_mask */
201 for (i = 0; i < size; i++) {
202 supp = ((const uint8_t *)supp_mask)[i];
204 if (~supp & mask[i]) {
205 rte_flow_error_set(error, ENOTSUP,
206 RTE_FLOW_ERROR_TYPE_ITEM, item,
207 "Item's field is not supported");
220 * Masking is not supported, so masks in items should be either
221 * full or empty (zeroed) and set only for supported fields which
222 * are specified in the supp_mask.
226 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
227 __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
228 __rte_unused struct rte_flow_error *error)
234 * Convert Ethernet item to EFX filter specification.
237 * Item specification. Outer frame specification may only comprise
238 * source/destination addresses and Ethertype field.
239 * Inner frame specification may contain destination address only.
240 * There is support for individual/group mask as well as for empty and full.
241 * If the mask is NULL, default mask will be used. Ranging is not supported.
242 * @param efx_spec[in, out]
243 * EFX filter specification to update.
245 * Perform verbose error reporting if not NULL.
248 sfc_flow_parse_eth(const struct rte_flow_item *item,
249 struct sfc_flow_parse_ctx *parse_ctx,
250 struct rte_flow_error *error)
253 efx_filter_spec_t *efx_spec = parse_ctx->filter;
254 const struct rte_flow_item_eth *spec = NULL;
255 const struct rte_flow_item_eth *mask = NULL;
256 const struct rte_flow_item_eth supp_mask = {
257 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
258 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
261 const struct rte_flow_item_eth ifrm_supp_mask = {
262 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
264 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
265 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
267 const struct rte_flow_item_eth *supp_mask_p;
268 const struct rte_flow_item_eth *def_mask_p;
269 uint8_t *loc_mac = NULL;
270 boolean_t is_ifrm = (efx_spec->efs_encap_type !=
271 EFX_TUNNEL_PROTOCOL_NONE);
274 supp_mask_p = &ifrm_supp_mask;
275 def_mask_p = &ifrm_supp_mask;
276 loc_mac = efx_spec->efs_ifrm_loc_mac;
278 supp_mask_p = &supp_mask;
279 def_mask_p = &rte_flow_item_eth_mask;
280 loc_mac = efx_spec->efs_loc_mac;
283 rc = sfc_flow_parse_init(item,
284 (const void **)&spec,
285 (const void **)&mask,
286 supp_mask_p, def_mask_p,
287 sizeof(struct rte_flow_item_eth),
292 /* If "spec" is not set, could be any Ethernet */
296 if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
297 efx_spec->efs_match_flags |= is_ifrm ?
298 EFX_FILTER_MATCH_IFRM_LOC_MAC :
299 EFX_FILTER_MATCH_LOC_MAC;
300 rte_memcpy(loc_mac, spec->dst.addr_bytes,
302 } else if (memcmp(mask->dst.addr_bytes, ig_mask,
303 EFX_MAC_ADDR_LEN) == 0) {
304 if (rte_is_unicast_ether_addr(&spec->dst))
305 efx_spec->efs_match_flags |= is_ifrm ?
306 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
307 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
309 efx_spec->efs_match_flags |= is_ifrm ?
310 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
311 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
312 } else if (!rte_is_zero_ether_addr(&mask->dst)) {
317 * ifrm_supp_mask ensures that the source address and
318 * ethertype masks are equal to zero in inner frame,
319 * so these fields are filled in only for the outer frame
321 if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
322 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
323 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
325 } else if (!rte_is_zero_ether_addr(&mask->src)) {
330 * Ether type is in big-endian byte order in item and
331 * in little-endian in efx_spec, so byte swap is used
333 if (mask->type == supp_mask.type) {
334 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
335 efx_spec->efs_ether_type = rte_bswap16(spec->type);
336 } else if (mask->type != 0) {
343 rte_flow_error_set(error, EINVAL,
344 RTE_FLOW_ERROR_TYPE_ITEM, item,
345 "Bad mask in the ETH pattern item");
350 * Convert VLAN item to EFX filter specification.
353 * Item specification. Only VID field is supported.
354 * The mask can not be NULL. Ranging is not supported.
355 * @param efx_spec[in, out]
356 * EFX filter specification to update.
358 * Perform verbose error reporting if not NULL.
361 sfc_flow_parse_vlan(const struct rte_flow_item *item,
362 struct sfc_flow_parse_ctx *parse_ctx,
363 struct rte_flow_error *error)
367 efx_filter_spec_t *efx_spec = parse_ctx->filter;
368 const struct rte_flow_item_vlan *spec = NULL;
369 const struct rte_flow_item_vlan *mask = NULL;
370 const struct rte_flow_item_vlan supp_mask = {
371 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
372 .inner_type = RTE_BE16(0xffff),
375 rc = sfc_flow_parse_init(item,
376 (const void **)&spec,
377 (const void **)&mask,
380 sizeof(struct rte_flow_item_vlan),
386 * VID is in big-endian byte order in item and
387 * in little-endian in efx_spec, so byte swap is used.
388 * If two VLAN items are included, the first matches
389 * the outer tag and the next matches the inner tag.
391 if (mask->tci == supp_mask.tci) {
392 /* Apply mask to keep VID only */
393 vid = rte_bswap16(spec->tci & mask->tci);
395 if (!(efx_spec->efs_match_flags &
396 EFX_FILTER_MATCH_OUTER_VID)) {
397 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
398 efx_spec->efs_outer_vid = vid;
399 } else if (!(efx_spec->efs_match_flags &
400 EFX_FILTER_MATCH_INNER_VID)) {
401 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
402 efx_spec->efs_inner_vid = vid;
404 rte_flow_error_set(error, EINVAL,
405 RTE_FLOW_ERROR_TYPE_ITEM, item,
406 "More than two VLAN items");
410 rte_flow_error_set(error, EINVAL,
411 RTE_FLOW_ERROR_TYPE_ITEM, item,
412 "VLAN ID in TCI match is required");
416 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
417 rte_flow_error_set(error, EINVAL,
418 RTE_FLOW_ERROR_TYPE_ITEM, item,
419 "VLAN TPID matching is not supported");
422 if (mask->inner_type == supp_mask.inner_type) {
423 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
424 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
425 } else if (mask->inner_type) {
426 rte_flow_error_set(error, EINVAL,
427 RTE_FLOW_ERROR_TYPE_ITEM, item,
428 "Bad mask for VLAN inner_type");
436 * Convert IPv4 item to EFX filter specification.
439 * Item specification. Only source and destination addresses and
440 * protocol fields are supported. If the mask is NULL, default
441 * mask will be used. Ranging is not supported.
442 * @param efx_spec[in, out]
443 * EFX filter specification to update.
445 * Perform verbose error reporting if not NULL.
448 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
449 struct sfc_flow_parse_ctx *parse_ctx,
450 struct rte_flow_error *error)
453 efx_filter_spec_t *efx_spec = parse_ctx->filter;
454 const struct rte_flow_item_ipv4 *spec = NULL;
455 const struct rte_flow_item_ipv4 *mask = NULL;
456 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
457 const struct rte_flow_item_ipv4 supp_mask = {
459 .src_addr = 0xffffffff,
460 .dst_addr = 0xffffffff,
461 .next_proto_id = 0xff,
465 rc = sfc_flow_parse_init(item,
466 (const void **)&spec,
467 (const void **)&mask,
469 &rte_flow_item_ipv4_mask,
470 sizeof(struct rte_flow_item_ipv4),
476 * Filtering by IPv4 source and destination addresses requires
477 * the appropriate ETHER_TYPE in hardware filters
479 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
480 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
481 efx_spec->efs_ether_type = ether_type_ipv4;
482 } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
483 rte_flow_error_set(error, EINVAL,
484 RTE_FLOW_ERROR_TYPE_ITEM, item,
485 "Ethertype in pattern with IPV4 item should be appropriate");
493 * IPv4 addresses are in big-endian byte order in item and in
496 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
497 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
498 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
499 } else if (mask->hdr.src_addr != 0) {
503 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
504 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
505 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
506 } else if (mask->hdr.dst_addr != 0) {
510 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
511 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
512 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
513 } else if (mask->hdr.next_proto_id != 0) {
520 rte_flow_error_set(error, EINVAL,
521 RTE_FLOW_ERROR_TYPE_ITEM, item,
522 "Bad mask in the IPV4 pattern item");
527 * Convert IPv6 item to EFX filter specification.
530 * Item specification. Only source and destination addresses and
531 * next header fields are supported. If the mask is NULL, default
532 * mask will be used. Ranging is not supported.
533 * @param efx_spec[in, out]
534 * EFX filter specification to update.
536 * Perform verbose error reporting if not NULL.
539 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
540 struct sfc_flow_parse_ctx *parse_ctx,
541 struct rte_flow_error *error)
544 efx_filter_spec_t *efx_spec = parse_ctx->filter;
545 const struct rte_flow_item_ipv6 *spec = NULL;
546 const struct rte_flow_item_ipv6 *mask = NULL;
547 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
548 const struct rte_flow_item_ipv6 supp_mask = {
550 .src_addr = { 0xff, 0xff, 0xff, 0xff,
551 0xff, 0xff, 0xff, 0xff,
552 0xff, 0xff, 0xff, 0xff,
553 0xff, 0xff, 0xff, 0xff },
554 .dst_addr = { 0xff, 0xff, 0xff, 0xff,
555 0xff, 0xff, 0xff, 0xff,
556 0xff, 0xff, 0xff, 0xff,
557 0xff, 0xff, 0xff, 0xff },
562 rc = sfc_flow_parse_init(item,
563 (const void **)&spec,
564 (const void **)&mask,
566 &rte_flow_item_ipv6_mask,
567 sizeof(struct rte_flow_item_ipv6),
573 * Filtering by IPv6 source and destination addresses requires
574 * the appropriate ETHER_TYPE in hardware filters
576 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
577 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
578 efx_spec->efs_ether_type = ether_type_ipv6;
579 } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
580 rte_flow_error_set(error, EINVAL,
581 RTE_FLOW_ERROR_TYPE_ITEM, item,
582 "Ethertype in pattern with IPV6 item should be appropriate");
590 * IPv6 addresses are in big-endian byte order in item and in
593 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
594 sizeof(mask->hdr.src_addr)) == 0) {
595 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
597 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
598 sizeof(spec->hdr.src_addr));
599 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
600 sizeof(efx_spec->efs_rem_host));
601 } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
602 sizeof(mask->hdr.src_addr))) {
606 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
607 sizeof(mask->hdr.dst_addr)) == 0) {
608 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
610 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
611 sizeof(spec->hdr.dst_addr));
612 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
613 sizeof(efx_spec->efs_loc_host));
614 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
615 sizeof(mask->hdr.dst_addr))) {
619 if (mask->hdr.proto == supp_mask.hdr.proto) {
620 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
621 efx_spec->efs_ip_proto = spec->hdr.proto;
622 } else if (mask->hdr.proto != 0) {
629 rte_flow_error_set(error, EINVAL,
630 RTE_FLOW_ERROR_TYPE_ITEM, item,
631 "Bad mask in the IPV6 pattern item");
636 * Convert TCP item to EFX filter specification.
639 * Item specification. Only source and destination ports fields
640 * are supported. If the mask is NULL, default mask will be used.
641 * Ranging is not supported.
642 * @param efx_spec[in, out]
643 * EFX filter specification to update.
645 * Perform verbose error reporting if not NULL.
648 sfc_flow_parse_tcp(const struct rte_flow_item *item,
649 struct sfc_flow_parse_ctx *parse_ctx,
650 struct rte_flow_error *error)
653 efx_filter_spec_t *efx_spec = parse_ctx->filter;
654 const struct rte_flow_item_tcp *spec = NULL;
655 const struct rte_flow_item_tcp *mask = NULL;
656 const struct rte_flow_item_tcp supp_mask = {
663 rc = sfc_flow_parse_init(item,
664 (const void **)&spec,
665 (const void **)&mask,
667 &rte_flow_item_tcp_mask,
668 sizeof(struct rte_flow_item_tcp),
674 * Filtering by TCP source and destination ports requires
675 * the appropriate IP_PROTO in hardware filters
677 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
678 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
679 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
680 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
681 rte_flow_error_set(error, EINVAL,
682 RTE_FLOW_ERROR_TYPE_ITEM, item,
683 "IP proto in pattern with TCP item should be appropriate");
691 * Source and destination ports are in big-endian byte order in item and
692 * in little-endian in efx_spec, so byte swap is used
694 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
695 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
696 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
697 } else if (mask->hdr.src_port != 0) {
701 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
702 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
703 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
704 } else if (mask->hdr.dst_port != 0) {
711 rte_flow_error_set(error, EINVAL,
712 RTE_FLOW_ERROR_TYPE_ITEM, item,
713 "Bad mask in the TCP pattern item");
718 * Convert UDP item to EFX filter specification.
721 * Item specification. Only source and destination ports fields
722 * are supported. If the mask is NULL, default mask will be used.
723 * Ranging is not supported.
724 * @param efx_spec[in, out]
725 * EFX filter specification to update.
727 * Perform verbose error reporting if not NULL.
730 sfc_flow_parse_udp(const struct rte_flow_item *item,
731 struct sfc_flow_parse_ctx *parse_ctx,
732 struct rte_flow_error *error)
735 efx_filter_spec_t *efx_spec = parse_ctx->filter;
736 const struct rte_flow_item_udp *spec = NULL;
737 const struct rte_flow_item_udp *mask = NULL;
738 const struct rte_flow_item_udp supp_mask = {
745 rc = sfc_flow_parse_init(item,
746 (const void **)&spec,
747 (const void **)&mask,
749 &rte_flow_item_udp_mask,
750 sizeof(struct rte_flow_item_udp),
756 * Filtering by UDP source and destination ports requires
757 * the appropriate IP_PROTO in hardware filters
759 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
760 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
761 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
762 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
763 rte_flow_error_set(error, EINVAL,
764 RTE_FLOW_ERROR_TYPE_ITEM, item,
765 "IP proto in pattern with UDP item should be appropriate");
773 * Source and destination ports are in big-endian byte order in item and
774 * in little-endian in efx_spec, so byte swap is used
776 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
777 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
778 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
779 } else if (mask->hdr.src_port != 0) {
783 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
784 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
785 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
786 } else if (mask->hdr.dst_port != 0) {
793 rte_flow_error_set(error, EINVAL,
794 RTE_FLOW_ERROR_TYPE_ITEM, item,
795 "Bad mask in the UDP pattern item");
800 * Filters for encapsulated packets match based on the EtherType and IP
801 * protocol in the outer frame.
804 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
805 efx_filter_spec_t *efx_spec,
807 struct rte_flow_error *error)
809 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
810 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
811 efx_spec->efs_ip_proto = ip_proto;
812 } else if (efx_spec->efs_ip_proto != ip_proto) {
814 case EFX_IPPROTO_UDP:
815 rte_flow_error_set(error, EINVAL,
816 RTE_FLOW_ERROR_TYPE_ITEM, item,
817 "Outer IP header protocol must be UDP "
818 "in VxLAN/GENEVE pattern");
821 case EFX_IPPROTO_GRE:
822 rte_flow_error_set(error, EINVAL,
823 RTE_FLOW_ERROR_TYPE_ITEM, item,
824 "Outer IP header protocol must be GRE "
829 rte_flow_error_set(error, EINVAL,
830 RTE_FLOW_ERROR_TYPE_ITEM, item,
831 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
837 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
838 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
839 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
840 rte_flow_error_set(error, EINVAL,
841 RTE_FLOW_ERROR_TYPE_ITEM, item,
842 "Outer frame EtherType in pattern with tunneling "
843 "must be IPv4 or IPv6");
851 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
852 const uint8_t *vni_or_vsid_val,
853 const uint8_t *vni_or_vsid_mask,
854 const struct rte_flow_item *item,
855 struct rte_flow_error *error)
857 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
861 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
862 EFX_VNI_OR_VSID_LEN) == 0) {
863 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
864 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
865 EFX_VNI_OR_VSID_LEN);
866 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
867 rte_flow_error_set(error, EINVAL,
868 RTE_FLOW_ERROR_TYPE_ITEM, item,
869 "Unsupported VNI/VSID mask");
877 * Convert VXLAN item to EFX filter specification.
880 * Item specification. Only VXLAN network identifier field is supported.
881 * If the mask is NULL, default mask will be used.
882 * Ranging is not supported.
883 * @param efx_spec[in, out]
884 * EFX filter specification to update.
886 * Perform verbose error reporting if not NULL.
889 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
890 struct sfc_flow_parse_ctx *parse_ctx,
891 struct rte_flow_error *error)
894 efx_filter_spec_t *efx_spec = parse_ctx->filter;
895 const struct rte_flow_item_vxlan *spec = NULL;
896 const struct rte_flow_item_vxlan *mask = NULL;
897 const struct rte_flow_item_vxlan supp_mask = {
898 .vni = { 0xff, 0xff, 0xff }
901 rc = sfc_flow_parse_init(item,
902 (const void **)&spec,
903 (const void **)&mask,
905 &rte_flow_item_vxlan_mask,
906 sizeof(struct rte_flow_item_vxlan),
911 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
912 EFX_IPPROTO_UDP, error);
916 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
917 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
922 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
923 mask->vni, item, error);
929 * Convert GENEVE item to EFX filter specification.
932 * Item specification. Only Virtual Network Identifier and protocol type
933 * fields are supported. But protocol type can be only Ethernet (0x6558).
934 * If the mask is NULL, default mask will be used.
935 * Ranging is not supported.
936 * @param efx_spec[in, out]
937 * EFX filter specification to update.
939 * Perform verbose error reporting if not NULL.
942 sfc_flow_parse_geneve(const struct rte_flow_item *item,
943 struct sfc_flow_parse_ctx *parse_ctx,
944 struct rte_flow_error *error)
947 efx_filter_spec_t *efx_spec = parse_ctx->filter;
948 const struct rte_flow_item_geneve *spec = NULL;
949 const struct rte_flow_item_geneve *mask = NULL;
950 const struct rte_flow_item_geneve supp_mask = {
951 .protocol = RTE_BE16(0xffff),
952 .vni = { 0xff, 0xff, 0xff }
955 rc = sfc_flow_parse_init(item,
956 (const void **)&spec,
957 (const void **)&mask,
959 &rte_flow_item_geneve_mask,
960 sizeof(struct rte_flow_item_geneve),
965 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
966 EFX_IPPROTO_UDP, error);
970 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
971 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
976 if (mask->protocol == supp_mask.protocol) {
977 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
978 rte_flow_error_set(error, EINVAL,
979 RTE_FLOW_ERROR_TYPE_ITEM, item,
980 "GENEVE encap. protocol must be Ethernet "
981 "(0x6558) in the GENEVE pattern item");
984 } else if (mask->protocol != 0) {
985 rte_flow_error_set(error, EINVAL,
986 RTE_FLOW_ERROR_TYPE_ITEM, item,
987 "Unsupported mask for GENEVE encap. protocol");
991 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
992 mask->vni, item, error);
998 * Convert NVGRE item to EFX filter specification.
1001 * Item specification. Only virtual subnet ID field is supported.
1002 * If the mask is NULL, default mask will be used.
1003 * Ranging is not supported.
1004 * @param efx_spec[in, out]
1005 * EFX filter specification to update.
1007 * Perform verbose error reporting if not NULL.
1010 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1011 struct sfc_flow_parse_ctx *parse_ctx,
1012 struct rte_flow_error *error)
1015 efx_filter_spec_t *efx_spec = parse_ctx->filter;
1016 const struct rte_flow_item_nvgre *spec = NULL;
1017 const struct rte_flow_item_nvgre *mask = NULL;
1018 const struct rte_flow_item_nvgre supp_mask = {
1019 .tni = { 0xff, 0xff, 0xff }
1022 rc = sfc_flow_parse_init(item,
1023 (const void **)&spec,
1024 (const void **)&mask,
1026 &rte_flow_item_nvgre_mask,
1027 sizeof(struct rte_flow_item_nvgre),
1032 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1033 EFX_IPPROTO_GRE, error);
1037 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1038 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1043 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1044 mask->tni, item, error);
1049 static const struct sfc_flow_item sfc_flow_items[] = {
1051 .type = RTE_FLOW_ITEM_TYPE_VOID,
1052 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1053 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1054 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1055 .parse = sfc_flow_parse_void,
1058 .type = RTE_FLOW_ITEM_TYPE_ETH,
1059 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1060 .layer = SFC_FLOW_ITEM_L2,
1061 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1062 .parse = sfc_flow_parse_eth,
1065 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1066 .prev_layer = SFC_FLOW_ITEM_L2,
1067 .layer = SFC_FLOW_ITEM_L2,
1068 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1069 .parse = sfc_flow_parse_vlan,
1072 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1073 .prev_layer = SFC_FLOW_ITEM_L2,
1074 .layer = SFC_FLOW_ITEM_L3,
1075 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1076 .parse = sfc_flow_parse_ipv4,
1079 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1080 .prev_layer = SFC_FLOW_ITEM_L2,
1081 .layer = SFC_FLOW_ITEM_L3,
1082 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1083 .parse = sfc_flow_parse_ipv6,
1086 .type = RTE_FLOW_ITEM_TYPE_TCP,
1087 .prev_layer = SFC_FLOW_ITEM_L3,
1088 .layer = SFC_FLOW_ITEM_L4,
1089 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1090 .parse = sfc_flow_parse_tcp,
1093 .type = RTE_FLOW_ITEM_TYPE_UDP,
1094 .prev_layer = SFC_FLOW_ITEM_L3,
1095 .layer = SFC_FLOW_ITEM_L4,
1096 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1097 .parse = sfc_flow_parse_udp,
1100 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1101 .prev_layer = SFC_FLOW_ITEM_L4,
1102 .layer = SFC_FLOW_ITEM_START_LAYER,
1103 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1104 .parse = sfc_flow_parse_vxlan,
1107 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1108 .prev_layer = SFC_FLOW_ITEM_L4,
1109 .layer = SFC_FLOW_ITEM_START_LAYER,
1110 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1111 .parse = sfc_flow_parse_geneve,
1114 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1115 .prev_layer = SFC_FLOW_ITEM_L3,
1116 .layer = SFC_FLOW_ITEM_START_LAYER,
1117 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1118 .parse = sfc_flow_parse_nvgre,
1123 * Protocol-independent flow API support
1126 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1127 struct rte_flow *flow,
1128 struct rte_flow_error *error)
1130 struct sfc_flow_spec *spec = &flow->spec;
1131 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1134 rte_flow_error_set(error, EINVAL,
1135 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1139 if (attr->group != 0) {
1140 rte_flow_error_set(error, ENOTSUP,
1141 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1142 "Groups are not supported");
1145 if (attr->egress != 0) {
1146 rte_flow_error_set(error, ENOTSUP,
1147 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1148 "Egress is not supported");
1151 if (attr->ingress == 0) {
1152 rte_flow_error_set(error, ENOTSUP,
1153 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1154 "Ingress is compulsory");
1157 if (attr->transfer == 0) {
1158 if (attr->priority != 0) {
1159 rte_flow_error_set(error, ENOTSUP,
1160 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1161 attr, "Priorities are unsupported");
1164 spec->type = SFC_FLOW_SPEC_FILTER;
1165 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1166 spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1167 spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
1169 rte_flow_error_set(error, ENOTSUP,
1170 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1171 "Transfer is not supported");
1178 /* Get item from array sfc_flow_items */
1179 static const struct sfc_flow_item *
1180 sfc_flow_get_item(const struct sfc_flow_item *items,
1181 unsigned int nb_items,
1182 enum rte_flow_item_type type)
1186 for (i = 0; i < nb_items; i++)
1187 if (items[i].type == type)
1194 sfc_flow_parse_pattern(const struct sfc_flow_item *flow_items,
1195 unsigned int nb_flow_items,
1196 const struct rte_flow_item pattern[],
1197 struct sfc_flow_parse_ctx *parse_ctx,
1198 struct rte_flow_error *error)
1201 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1202 boolean_t is_ifrm = B_FALSE;
1203 const struct sfc_flow_item *item;
1205 if (pattern == NULL) {
1206 rte_flow_error_set(error, EINVAL,
1207 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1212 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1213 item = sfc_flow_get_item(flow_items, nb_flow_items,
1216 rte_flow_error_set(error, ENOTSUP,
1217 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1218 "Unsupported pattern item");
1223 * Omitting one or several protocol layers at the beginning
1224 * of pattern is supported
1226 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1227 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1228 item->prev_layer != prev_layer) {
1229 rte_flow_error_set(error, ENOTSUP,
1230 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1231 "Unexpected sequence of pattern items");
1236 * Allow only VOID and ETH pattern items in the inner frame.
1237 * Also check that there is only one tunneling protocol.
1239 switch (item->type) {
1240 case RTE_FLOW_ITEM_TYPE_VOID:
1241 case RTE_FLOW_ITEM_TYPE_ETH:
1244 case RTE_FLOW_ITEM_TYPE_VXLAN:
1245 case RTE_FLOW_ITEM_TYPE_GENEVE:
1246 case RTE_FLOW_ITEM_TYPE_NVGRE:
1248 rte_flow_error_set(error, EINVAL,
1249 RTE_FLOW_ERROR_TYPE_ITEM,
1251 "More than one tunneling protocol");
1259 rte_flow_error_set(error, EINVAL,
1260 RTE_FLOW_ERROR_TYPE_ITEM,
1262 "There is an unsupported pattern item "
1263 "in the inner frame");
1269 if (parse_ctx->type != item->ctx_type) {
1270 rte_flow_error_set(error, EINVAL,
1271 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1272 "Parse context type mismatch");
1276 rc = item->parse(pattern, parse_ctx, error);
1280 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1281 prev_layer = item->layer;
1288 sfc_flow_parse_queue(struct sfc_adapter *sa,
1289 const struct rte_flow_action_queue *queue,
1290 struct rte_flow *flow)
1292 struct sfc_flow_spec *spec = &flow->spec;
1293 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1294 struct sfc_rxq *rxq;
1296 if (queue->index >= sfc_sa2shared(sa)->rxq_count)
1299 rxq = &sa->rxq_ctrl[queue->index];
1300 spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1306 sfc_flow_parse_rss(struct sfc_adapter *sa,
1307 const struct rte_flow_action_rss *action_rss,
1308 struct rte_flow *flow)
1310 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1311 struct sfc_rss *rss = &sas->rss;
1312 unsigned int rxq_sw_index;
1313 struct sfc_rxq *rxq;
1314 unsigned int rxq_hw_index_min;
1315 unsigned int rxq_hw_index_max;
1316 efx_rx_hash_type_t efx_hash_types;
1317 const uint8_t *rss_key;
1318 struct sfc_flow_spec *spec = &flow->spec;
1319 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1320 struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
1323 if (action_rss->queue_num == 0)
1326 rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
1327 rxq = &sa->rxq_ctrl[rxq_sw_index];
1328 rxq_hw_index_min = rxq->hw_index;
1329 rxq_hw_index_max = 0;
1331 for (i = 0; i < action_rss->queue_num; ++i) {
1332 rxq_sw_index = action_rss->queue[i];
1334 if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
1337 rxq = &sa->rxq_ctrl[rxq_sw_index];
1339 if (rxq->hw_index < rxq_hw_index_min)
1340 rxq_hw_index_min = rxq->hw_index;
1342 if (rxq->hw_index > rxq_hw_index_max)
1343 rxq_hw_index_max = rxq->hw_index;
1346 switch (action_rss->func) {
1347 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1348 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1354 if (action_rss->level)
1358 * Dummy RSS action with only one queue and no specific settings
1359 * for hash types and key does not require dedicated RSS context
1360 * and may be simplified to single queue action.
1362 if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1363 action_rss->key_len == 0) {
1364 spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
1368 if (action_rss->types) {
1371 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1379 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1380 efx_hash_types |= rss->hf_map[i].efx;
1383 if (action_rss->key_len) {
1384 if (action_rss->key_len != sizeof(rss->key))
1387 rss_key = action_rss->key;
1392 spec_filter->rss = B_TRUE;
1394 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1395 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1396 sfc_rss_conf->rss_hash_types = efx_hash_types;
1397 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1399 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1400 unsigned int nb_queues = action_rss->queue_num;
1401 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1402 struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
1404 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1411 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1412 unsigned int filters_count)
1414 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1418 for (i = 0; i < filters_count; i++) {
1421 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1422 if (ret == 0 && rc != 0) {
1423 sfc_err(sa, "failed to remove filter specification "
1433 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1435 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1439 for (i = 0; i < spec_filter->count; i++) {
1440 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1442 sfc_flow_spec_flush(sa, spec, i);
1451 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1453 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1455 return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1459 sfc_flow_filter_insert(struct sfc_adapter *sa,
1460 struct rte_flow *flow)
1462 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1463 struct sfc_rss *rss = &sas->rss;
1464 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1465 struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
1466 uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1470 if (spec_filter->rss) {
1471 unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
1472 flow_rss->rxq_hw_index_min + 1,
1475 rc = efx_rx_scale_context_alloc(sa->nic,
1476 EFX_RX_SCALE_EXCLUSIVE,
1480 goto fail_scale_context_alloc;
1482 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1484 flow_rss->rss_hash_types, B_TRUE);
1486 goto fail_scale_mode_set;
1488 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1492 goto fail_scale_key_set;
1495 * At this point, fully elaborated filter specifications
1496 * have been produced from the template. To make sure that
1497 * RSS behaviour is consistent between them, set the same
1498 * RSS context value everywhere.
1500 for (i = 0; i < spec_filter->count; i++) {
1501 efx_filter_spec_t *spec = &spec_filter->filters[i];
1503 spec->efs_rss_context = efs_rss_context;
1504 spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1505 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1509 rc = sfc_flow_spec_insert(sa, &flow->spec);
1511 goto fail_filter_insert;
1513 if (spec_filter->rss) {
1515 * Scale table is set after filter insertion because
1516 * the table entries are relative to the base RxQ ID
1517 * and the latter is submitted to the HW by means of
1518 * inserting a filter, so by the time of the request
1519 * the HW knows all the information needed to verify
1520 * the table entries, and the operation will succeed
1522 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1524 RTE_DIM(flow_rss->rss_tbl));
1526 goto fail_scale_tbl_set;
1532 sfc_flow_spec_remove(sa, &flow->spec);
1536 fail_scale_mode_set:
1537 if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1538 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1540 fail_scale_context_alloc:
1545 sfc_flow_filter_remove(struct sfc_adapter *sa,
1546 struct rte_flow *flow)
1548 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1551 rc = sfc_flow_spec_remove(sa, &flow->spec);
1555 if (spec_filter->rss) {
1557 * All specifications for a given flow rule have the same RSS
1558 * context, so that RSS context value is taken from the first
1559 * filter specification
1561 efx_filter_spec_t *spec = &spec_filter->filters[0];
1563 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1570 sfc_flow_parse_mark(struct sfc_adapter *sa,
1571 const struct rte_flow_action_mark *mark,
1572 struct rte_flow *flow)
1574 struct sfc_flow_spec *spec = &flow->spec;
1575 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1576 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1578 if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1581 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1582 spec_filter->template.efs_mark = mark->id;
1588 sfc_flow_parse_actions(struct sfc_adapter *sa,
1589 const struct rte_flow_action actions[],
1590 struct rte_flow *flow,
1591 struct rte_flow_error *error)
1594 struct sfc_flow_spec *spec = &flow->spec;
1595 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1596 const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1597 uint32_t actions_set = 0;
1598 const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1599 (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1600 (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1601 const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1602 (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1604 if (actions == NULL) {
1605 rte_flow_error_set(error, EINVAL,
1606 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1611 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
1612 RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
1614 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1615 switch (actions->type) {
1616 case RTE_FLOW_ACTION_TYPE_VOID:
1617 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1621 case RTE_FLOW_ACTION_TYPE_QUEUE:
1622 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1624 if ((actions_set & fate_actions_mask) != 0)
1625 goto fail_fate_actions;
1627 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1629 rte_flow_error_set(error, EINVAL,
1630 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1631 "Bad QUEUE action");
1636 case RTE_FLOW_ACTION_TYPE_RSS:
1637 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1639 if ((actions_set & fate_actions_mask) != 0)
1640 goto fail_fate_actions;
1642 rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1644 rte_flow_error_set(error, -rc,
1645 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1651 case RTE_FLOW_ACTION_TYPE_DROP:
1652 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1654 if ((actions_set & fate_actions_mask) != 0)
1655 goto fail_fate_actions;
1657 spec_filter->template.efs_dmaq_id =
1658 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1661 case RTE_FLOW_ACTION_TYPE_FLAG:
1662 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1664 if ((actions_set & mark_actions_mask) != 0)
1665 goto fail_actions_overlap;
1667 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1668 rte_flow_error_set(error, ENOTSUP,
1669 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1670 "FLAG action is not supported on the current Rx datapath");
1674 spec_filter->template.efs_flags |=
1675 EFX_FILTER_FLAG_ACTION_FLAG;
1678 case RTE_FLOW_ACTION_TYPE_MARK:
1679 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1681 if ((actions_set & mark_actions_mask) != 0)
1682 goto fail_actions_overlap;
1684 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1685 rte_flow_error_set(error, ENOTSUP,
1686 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1687 "MARK action is not supported on the current Rx datapath");
1691 rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1693 rte_flow_error_set(error, rc,
1694 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1701 rte_flow_error_set(error, ENOTSUP,
1702 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1703 "Action is not supported");
1707 actions_set |= (1UL << actions->type);
1709 #undef SFC_BUILD_SET_OVERFLOW
1711 /* When fate is unknown, drop traffic. */
1712 if ((actions_set & fate_actions_mask) == 0) {
1713 spec_filter->template.efs_dmaq_id =
1714 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1720 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1721 "Cannot combine several fate-deciding actions, "
1722 "choose between QUEUE, RSS or DROP");
1725 fail_actions_overlap:
1726 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1727 "Overlapping actions are not supported");
1732 * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1733 * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1734 * specifications after copying.
1736 * @param spec[in, out]
1737 * SFC flow specification to update.
1738 * @param filters_count_for_one_val[in]
1739 * How many specifications should have the same match flag, what is the
1740 * number of specifications before copying.
1742 * Perform verbose error reporting if not NULL.
1745 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1746 unsigned int filters_count_for_one_val,
1747 struct rte_flow_error *error)
1750 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1751 static const efx_filter_match_flags_t vals[] = {
1752 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1753 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1756 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1757 rte_flow_error_set(error, EINVAL,
1758 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1759 "Number of specifications is incorrect while copying "
1760 "by unknown destination flags");
1764 for (i = 0; i < spec_filter->count; i++) {
1765 /* The check above ensures that divisor can't be zero here */
1766 spec_filter->filters[i].efs_match_flags |=
1767 vals[i / filters_count_for_one_val];
1774 * Check that the following conditions are met:
1775 * - the list of supported filters has a filter
1776 * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1777 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1781 * The match flags of filter.
1783 * Specification to be supplemented.
1785 * SFC filter with list of supported filters.
1788 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1789 __rte_unused efx_filter_spec_t *spec,
1790 struct sfc_filter *filter)
1793 efx_filter_match_flags_t match_mcast_dst;
1796 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1797 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1798 for (i = 0; i < filter->supported_match_num; i++) {
1799 if (match_mcast_dst == filter->supported_match[i])
1807 * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1808 * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1809 * specifications after copying.
1811 * @param spec[in, out]
1812 * SFC flow specification to update.
1813 * @param filters_count_for_one_val[in]
1814 * How many specifications should have the same EtherType value, what is the
1815 * number of specifications before copying.
1817 * Perform verbose error reporting if not NULL.
1820 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1821 unsigned int filters_count_for_one_val,
1822 struct rte_flow_error *error)
1825 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1826 static const uint16_t vals[] = {
1827 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1830 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1831 rte_flow_error_set(error, EINVAL,
1832 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1833 "Number of specifications is incorrect "
1834 "while copying by Ethertype");
1838 for (i = 0; i < spec_filter->count; i++) {
1839 spec_filter->filters[i].efs_match_flags |=
1840 EFX_FILTER_MATCH_ETHER_TYPE;
1843 * The check above ensures that
1844 * filters_count_for_one_val is not 0
1846 spec_filter->filters[i].efs_ether_type =
1847 vals[i / filters_count_for_one_val];
1854 * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1855 * in the same specifications after copying.
1857 * @param spec[in, out]
1858 * SFC flow specification to update.
1859 * @param filters_count_for_one_val[in]
1860 * How many specifications should have the same match flag, what is the
1861 * number of specifications before copying.
1863 * Perform verbose error reporting if not NULL.
1866 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1867 unsigned int filters_count_for_one_val,
1868 struct rte_flow_error *error)
1870 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1873 if (filters_count_for_one_val != spec_filter->count) {
1874 rte_flow_error_set(error, EINVAL,
1875 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1876 "Number of specifications is incorrect "
1877 "while copying by outer VLAN ID");
1881 for (i = 0; i < spec_filter->count; i++) {
1882 spec_filter->filters[i].efs_match_flags |=
1883 EFX_FILTER_MATCH_OUTER_VID;
1885 spec_filter->filters[i].efs_outer_vid = 0;
1892 * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1893 * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1894 * specifications after copying.
1896 * @param spec[in, out]
1897 * SFC flow specification to update.
1898 * @param filters_count_for_one_val[in]
1899 * How many specifications should have the same match flag, what is the
1900 * number of specifications before copying.
1902 * Perform verbose error reporting if not NULL.
1905 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1906 unsigned int filters_count_for_one_val,
1907 struct rte_flow_error *error)
1910 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1911 static const efx_filter_match_flags_t vals[] = {
1912 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1913 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1916 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1917 rte_flow_error_set(error, EINVAL,
1918 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1919 "Number of specifications is incorrect while copying "
1920 "by inner frame unknown destination flags");
1924 for (i = 0; i < spec_filter->count; i++) {
1925 /* The check above ensures that divisor can't be zero here */
1926 spec_filter->filters[i].efs_match_flags |=
1927 vals[i / filters_count_for_one_val];
1934 * Check that the following conditions are met:
1935 * - the specification corresponds to a filter for encapsulated traffic
1936 * - the list of supported filters has a filter
1937 * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1938 * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1942 * The match flags of filter.
1944 * Specification to be supplemented.
1946 * SFC filter with list of supported filters.
1949 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1950 efx_filter_spec_t *spec,
1951 struct sfc_filter *filter)
1954 efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1955 efx_filter_match_flags_t match_mcast_dst;
1957 if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1961 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1962 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1963 for (i = 0; i < filter->supported_match_num; i++) {
1964 if (match_mcast_dst == filter->supported_match[i])
1972 * Check that the list of supported filters has a filter that differs
1973 * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
1974 * in this case that filter will be used and the flag
1975 * EFX_FILTER_MATCH_OUTER_VID is not needed.
1978 * The match flags of filter.
1980 * Specification to be supplemented.
1982 * SFC filter with list of supported filters.
1985 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
1986 __rte_unused efx_filter_spec_t *spec,
1987 struct sfc_filter *filter)
1990 efx_filter_match_flags_t match_without_vid =
1991 match & ~EFX_FILTER_MATCH_OUTER_VID;
1993 for (i = 0; i < filter->supported_match_num; i++) {
1994 if (match_without_vid == filter->supported_match[i])
2002 * Match flags that can be automatically added to filters.
2003 * Selecting the last minimum when searching for the copy flag ensures that the
2004 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2005 * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2006 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2009 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2011 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2013 .set_vals = sfc_flow_set_unknown_dst_flags,
2014 .spec_check = sfc_flow_check_unknown_dst_flags,
2017 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
2019 .set_vals = sfc_flow_set_ethertypes,
2023 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2025 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2026 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2029 .flag = EFX_FILTER_MATCH_OUTER_VID,
2031 .set_vals = sfc_flow_set_outer_vid_flag,
2032 .spec_check = sfc_flow_check_outer_vid_flag,
2036 /* Get item from array sfc_flow_copy_flags */
2037 static const struct sfc_flow_copy_flag *
2038 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2042 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2043 if (sfc_flow_copy_flags[i].flag == flag)
2044 return &sfc_flow_copy_flags[i];
2051 * Make copies of the specifications, set match flag and values
2052 * of the field that corresponds to it.
2054 * @param spec[in, out]
2055 * SFC flow specification to update.
2057 * The match flag to add.
2059 * Perform verbose error reporting if not NULL.
2062 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2063 efx_filter_match_flags_t flag,
2064 struct rte_flow_error *error)
2067 unsigned int new_filters_count;
2068 unsigned int filters_count_for_one_val;
2069 const struct sfc_flow_copy_flag *copy_flag;
2070 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2073 copy_flag = sfc_flow_get_copy_flag(flag);
2074 if (copy_flag == NULL) {
2075 rte_flow_error_set(error, ENOTSUP,
2076 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2077 "Unsupported spec field for copying");
2081 new_filters_count = spec_filter->count * copy_flag->vals_count;
2082 if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2083 rte_flow_error_set(error, EINVAL,
2084 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2085 "Too much EFX specifications in the flow rule");
2089 /* Copy filters specifications */
2090 for (i = spec_filter->count; i < new_filters_count; i++) {
2091 spec_filter->filters[i] =
2092 spec_filter->filters[i - spec_filter->count];
2095 filters_count_for_one_val = spec_filter->count;
2096 spec_filter->count = new_filters_count;
2098 rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2106 * Check that the given set of match flags missing in the original filter spec
2107 * could be covered by adding spec copies which specify the corresponding
2108 * flags and packet field values to match.
2110 * @param miss_flags[in]
2111 * Flags that are missing until the supported filter.
2113 * Specification to be supplemented.
2118 * Number of specifications after copy or 0, if the flags can not be added.
2121 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2122 efx_filter_spec_t *spec,
2123 struct sfc_filter *filter)
2126 efx_filter_match_flags_t copy_flags = 0;
2127 efx_filter_match_flags_t flag;
2128 efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2129 sfc_flow_spec_check *check;
2130 unsigned int multiplier = 1;
2132 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2133 flag = sfc_flow_copy_flags[i].flag;
2134 check = sfc_flow_copy_flags[i].spec_check;
2135 if ((flag & miss_flags) == flag) {
2136 if (check != NULL && (!check(match, spec, filter)))
2140 multiplier *= sfc_flow_copy_flags[i].vals_count;
2144 if (copy_flags == miss_flags)
2151 * Attempt to supplement the specification template to the minimally
2152 * supported set of match flags. To do this, it is necessary to copy
2153 * the specifications, filling them with the values of fields that
2154 * correspond to the missing flags.
2155 * The necessary and sufficient filter is built from the fewest number
2156 * of copies which could be made to cover the minimally required set
2161 * @param spec[in, out]
2162 * SFC flow specification to update.
2164 * Perform verbose error reporting if not NULL.
2167 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2168 struct sfc_flow_spec *spec,
2169 struct rte_flow_error *error)
2171 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2172 struct sfc_filter *filter = &sa->filter;
2173 efx_filter_match_flags_t miss_flags;
2174 efx_filter_match_flags_t min_miss_flags = 0;
2175 efx_filter_match_flags_t match;
2176 unsigned int min_multiplier = UINT_MAX;
2177 unsigned int multiplier;
2181 match = spec_filter->template.efs_match_flags;
2182 for (i = 0; i < filter->supported_match_num; i++) {
2183 if ((match & filter->supported_match[i]) == match) {
2184 miss_flags = filter->supported_match[i] & (~match);
2185 multiplier = sfc_flow_check_missing_flags(miss_flags,
2186 &spec_filter->template, filter);
2187 if (multiplier > 0) {
2188 if (multiplier <= min_multiplier) {
2189 min_multiplier = multiplier;
2190 min_miss_flags = miss_flags;
2196 if (min_multiplier == UINT_MAX) {
2197 rte_flow_error_set(error, ENOTSUP,
2198 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2199 "The flow rule pattern is unsupported");
2203 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2204 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2206 if ((flag & min_miss_flags) == flag) {
2207 rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2217 * Check that set of match flags is referred to by a filter. Filter is
2218 * described by match flags with the ability to add OUTER_VID and INNER_VID
2221 * @param match_flags[in]
2222 * Set of match flags.
2223 * @param flags_pattern[in]
2224 * Pattern of filter match flags.
2227 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2228 efx_filter_match_flags_t flags_pattern)
2230 if ((match_flags & flags_pattern) != flags_pattern)
2233 switch (match_flags & ~flags_pattern) {
2235 case EFX_FILTER_MATCH_OUTER_VID:
2236 case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2244 * Check whether the spec maps to a hardware filter which is known to be
2245 * ineffective despite being valid.
2248 * SFC filter with list of supported filters.
2250 * SFC flow specification.
2253 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2254 struct sfc_flow_spec *spec)
2257 uint16_t ether_type;
2259 efx_filter_match_flags_t match_flags;
2260 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2262 for (i = 0; i < spec_filter->count; i++) {
2263 match_flags = spec_filter->filters[i].efs_match_flags;
2265 if (sfc_flow_is_match_with_vids(match_flags,
2266 EFX_FILTER_MATCH_ETHER_TYPE) ||
2267 sfc_flow_is_match_with_vids(match_flags,
2268 EFX_FILTER_MATCH_ETHER_TYPE |
2269 EFX_FILTER_MATCH_LOC_MAC)) {
2270 ether_type = spec_filter->filters[i].efs_ether_type;
2271 if (filter->supports_ip_proto_or_addr_filter &&
2272 (ether_type == EFX_ETHER_TYPE_IPV4 ||
2273 ether_type == EFX_ETHER_TYPE_IPV6))
2275 } else if (sfc_flow_is_match_with_vids(match_flags,
2276 EFX_FILTER_MATCH_ETHER_TYPE |
2277 EFX_FILTER_MATCH_IP_PROTO) ||
2278 sfc_flow_is_match_with_vids(match_flags,
2279 EFX_FILTER_MATCH_ETHER_TYPE |
2280 EFX_FILTER_MATCH_IP_PROTO |
2281 EFX_FILTER_MATCH_LOC_MAC)) {
2282 ip_proto = spec_filter->filters[i].efs_ip_proto;
2283 if (filter->supports_rem_or_local_port_filter &&
2284 (ip_proto == EFX_IPPROTO_TCP ||
2285 ip_proto == EFX_IPPROTO_UDP))
2294 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2295 struct rte_flow *flow,
2296 struct rte_flow_error *error)
2298 struct sfc_flow_spec *spec = &flow->spec;
2299 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2300 efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2301 efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2304 /* Initialize the first filter spec with template */
2305 spec_filter->filters[0] = *spec_tmpl;
2306 spec_filter->count = 1;
2308 if (!sfc_filter_is_match_supported(sa, match_flags)) {
2309 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2314 if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2315 rte_flow_error_set(error, ENOTSUP,
2316 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2317 "The flow rule pattern is unsupported");
2325 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2326 const struct rte_flow_item pattern[],
2327 const struct rte_flow_action actions[],
2328 struct rte_flow *flow,
2329 struct rte_flow_error *error)
2331 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2332 struct sfc_flow_spec *spec = &flow->spec;
2333 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2334 struct sfc_flow_parse_ctx ctx;
2337 ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2338 ctx.filter = &spec_filter->template;
2340 rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items),
2341 pattern, &ctx, error);
2343 goto fail_bad_value;
2345 rc = sfc_flow_parse_actions(sa, actions, flow, error);
2347 goto fail_bad_value;
2349 rc = sfc_flow_validate_match_flags(sa, flow, error);
2351 goto fail_bad_value;
2360 sfc_flow_parse(struct rte_eth_dev *dev,
2361 const struct rte_flow_attr *attr,
2362 const struct rte_flow_item pattern[],
2363 const struct rte_flow_action actions[],
2364 struct rte_flow *flow,
2365 struct rte_flow_error *error)
2367 const struct sfc_flow_ops_by_spec *ops;
2370 rc = sfc_flow_parse_attr(attr, flow, error);
2374 ops = sfc_flow_get_ops_by_spec(flow);
2375 if (ops == NULL || ops->parse == NULL) {
2376 rte_flow_error_set(error, ENOTSUP,
2377 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2378 "No backend to handle this flow");
2382 return ops->parse(dev, pattern, actions, flow, error);
2385 static struct rte_flow *
2386 sfc_flow_zmalloc(struct rte_flow_error *error)
2388 struct rte_flow *flow;
2390 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2392 rte_flow_error_set(error, ENOMEM,
2393 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2394 "Failed to allocate memory");
2401 sfc_flow_free(__rte_unused struct sfc_adapter *sa, struct rte_flow *flow)
2407 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2408 struct rte_flow_error *error)
2410 const struct sfc_flow_ops_by_spec *ops;
2413 ops = sfc_flow_get_ops_by_spec(flow);
2414 if (ops == NULL || ops->insert == NULL) {
2415 rte_flow_error_set(error, ENOTSUP,
2416 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2417 "No backend to handle this flow");
2421 rc = ops->insert(sa, flow);
2423 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2424 NULL, "Failed to insert the flow rule");
2431 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2432 struct rte_flow_error *error)
2434 const struct sfc_flow_ops_by_spec *ops;
2437 ops = sfc_flow_get_ops_by_spec(flow);
2438 if (ops == NULL || ops->remove == NULL) {
2439 rte_flow_error_set(error, ENOTSUP,
2440 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2441 "No backend to handle this flow");
2445 rc = ops->remove(sa, flow);
2447 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2448 NULL, "Failed to remove the flow rule");
2455 sfc_flow_validate(struct rte_eth_dev *dev,
2456 const struct rte_flow_attr *attr,
2457 const struct rte_flow_item pattern[],
2458 const struct rte_flow_action actions[],
2459 struct rte_flow_error *error)
2461 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2462 struct rte_flow *flow;
2465 flow = sfc_flow_zmalloc(error);
2469 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2471 sfc_flow_free(sa, flow);
2476 static struct rte_flow *
2477 sfc_flow_create(struct rte_eth_dev *dev,
2478 const struct rte_flow_attr *attr,
2479 const struct rte_flow_item pattern[],
2480 const struct rte_flow_action actions[],
2481 struct rte_flow_error *error)
2483 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2484 struct rte_flow *flow = NULL;
2487 flow = sfc_flow_zmalloc(error);
2491 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2493 goto fail_bad_value;
2495 sfc_adapter_lock(sa);
2497 TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2499 if (sa->state == SFC_ADAPTER_STARTED) {
2500 rc = sfc_flow_insert(sa, flow, error);
2502 goto fail_flow_insert;
2505 sfc_adapter_unlock(sa);
2510 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2513 sfc_flow_free(sa, flow);
2514 sfc_adapter_unlock(sa);
2521 sfc_flow_destroy(struct rte_eth_dev *dev,
2522 struct rte_flow *flow,
2523 struct rte_flow_error *error)
2525 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2526 struct rte_flow *flow_ptr;
2529 sfc_adapter_lock(sa);
2531 TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2532 if (flow_ptr == flow)
2536 rte_flow_error_set(error, rc,
2537 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2538 "Failed to find flow rule to destroy");
2539 goto fail_bad_value;
2542 if (sa->state == SFC_ADAPTER_STARTED)
2543 rc = sfc_flow_remove(sa, flow, error);
2545 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2546 sfc_flow_free(sa, flow);
2549 sfc_adapter_unlock(sa);
2555 sfc_flow_flush(struct rte_eth_dev *dev,
2556 struct rte_flow_error *error)
2558 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2559 struct rte_flow *flow;
2562 sfc_adapter_lock(sa);
2564 while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2565 if (sa->state == SFC_ADAPTER_STARTED) {
2568 rc = sfc_flow_remove(sa, flow, error);
2573 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2574 sfc_flow_free(sa, flow);
2577 sfc_adapter_unlock(sa);
2583 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2584 struct rte_flow_error *error)
2586 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2589 sfc_adapter_lock(sa);
2590 if (sa->state != SFC_ADAPTER_INITIALIZED) {
2591 rte_flow_error_set(error, EBUSY,
2592 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2593 NULL, "please close the port first");
2596 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2598 sfc_adapter_unlock(sa);
2603 const struct rte_flow_ops sfc_flow_ops = {
2604 .validate = sfc_flow_validate,
2605 .create = sfc_flow_create,
2606 .destroy = sfc_flow_destroy,
2607 .flush = sfc_flow_flush,
2609 .isolate = sfc_flow_isolate,
2613 sfc_flow_init(struct sfc_adapter *sa)
2615 SFC_ASSERT(sfc_adapter_is_locked(sa));
2617 TAILQ_INIT(&sa->flow_list);
2621 sfc_flow_fini(struct sfc_adapter *sa)
2623 struct rte_flow *flow;
2625 SFC_ASSERT(sfc_adapter_is_locked(sa));
2627 while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2628 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2629 sfc_flow_free(sa, flow);
2634 sfc_flow_stop(struct sfc_adapter *sa)
2636 struct rte_flow *flow;
2638 SFC_ASSERT(sfc_adapter_is_locked(sa));
2640 TAILQ_FOREACH(flow, &sa->flow_list, entries)
2641 sfc_flow_remove(sa, flow, NULL);
2645 sfc_flow_start(struct sfc_adapter *sa)
2647 struct rte_flow *flow;
2650 sfc_log_init(sa, "entry");
2652 SFC_ASSERT(sfc_adapter_is_locked(sa));
2654 TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2655 rc = sfc_flow_insert(sa, flow, NULL);
2660 sfc_log_init(sa, "done");