1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2017-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <ethdev_driver.h>
14 #include <rte_ether.h>
16 #include <rte_flow_driver.h>
21 #include "sfc_debug.h"
23 #include "sfc_filter.h"
25 #include "sfc_flow_rss.h"
26 #include "sfc_flow_tunnel.h"
28 #include "sfc_dp_rx.h"
29 #include "sfc_mae_counter.h"
30 #include "sfc_switch.h"
32 struct sfc_flow_ops_by_spec {
33 sfc_flow_parse_cb_t *parse;
34 sfc_flow_verify_cb_t *verify;
35 sfc_flow_cleanup_cb_t *cleanup;
36 sfc_flow_insert_cb_t *insert;
37 sfc_flow_remove_cb_t *remove;
38 sfc_flow_query_cb_t *query;
41 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
42 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
43 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
44 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
45 static sfc_flow_cleanup_cb_t sfc_flow_cleanup;
47 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
48 .parse = sfc_flow_parse_rte_to_filter,
50 .cleanup = sfc_flow_cleanup,
51 .insert = sfc_flow_filter_insert,
52 .remove = sfc_flow_filter_remove,
56 static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = {
57 .parse = sfc_flow_parse_rte_to_mae,
58 .verify = sfc_mae_flow_verify,
59 .cleanup = sfc_mae_flow_cleanup,
60 .insert = sfc_mae_flow_insert,
61 .remove = sfc_mae_flow_remove,
62 .query = sfc_mae_flow_query,
65 static const struct sfc_flow_ops_by_spec *
66 sfc_flow_get_ops_by_spec(struct rte_flow *flow)
68 struct sfc_flow_spec *spec = &flow->spec;
69 const struct sfc_flow_ops_by_spec *ops = NULL;
72 case SFC_FLOW_SPEC_FILTER:
73 ops = &sfc_flow_ops_filter;
75 case SFC_FLOW_SPEC_MAE:
76 ops = &sfc_flow_ops_mae;
87 * Currently, filter-based (VNIC) flow API is implemented in such a manner
88 * that each flow rule is converted to one or more hardware filters.
89 * All elements of flow rule (attributes, pattern items, actions)
90 * correspond to one or more fields in the efx_filter_spec_s structure
91 * that is responsible for the hardware filter.
92 * If some required field is unset in the flow rule, then a handful
93 * of filter copies will be created to cover all possible values
97 static sfc_flow_item_parse sfc_flow_parse_void;
98 static sfc_flow_item_parse sfc_flow_parse_eth;
99 static sfc_flow_item_parse sfc_flow_parse_vlan;
100 static sfc_flow_item_parse sfc_flow_parse_ipv4;
101 static sfc_flow_item_parse sfc_flow_parse_ipv6;
102 static sfc_flow_item_parse sfc_flow_parse_tcp;
103 static sfc_flow_item_parse sfc_flow_parse_udp;
104 static sfc_flow_item_parse sfc_flow_parse_vxlan;
105 static sfc_flow_item_parse sfc_flow_parse_geneve;
106 static sfc_flow_item_parse sfc_flow_parse_nvgre;
107 static sfc_flow_item_parse sfc_flow_parse_pppoex;
109 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
110 unsigned int filters_count_for_one_val,
111 struct rte_flow_error *error);
113 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
114 efx_filter_spec_t *spec,
115 struct sfc_filter *filter);
117 struct sfc_flow_copy_flag {
118 /* EFX filter specification match flag */
119 efx_filter_match_flags_t flag;
120 /* Number of values of corresponding field */
121 unsigned int vals_count;
122 /* Function to set values in specifications */
123 sfc_flow_spec_set_vals *set_vals;
125 * Function to check that the specification is suitable
126 * for adding this match flag
128 sfc_flow_spec_check *spec_check;
131 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
132 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
133 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
134 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
135 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
136 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
137 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
140 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
145 for (i = 0; i < size; i++)
148 return (sum == 0) ? B_TRUE : B_FALSE;
152 * Validate item and prepare structures spec and mask for parsing
155 sfc_flow_parse_init(const struct rte_flow_item *item,
156 const void **spec_ptr,
157 const void **mask_ptr,
158 const void *supp_mask,
159 const void *def_mask,
161 struct rte_flow_error *error)
170 rte_flow_error_set(error, EINVAL,
171 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
176 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
177 rte_flow_error_set(error, EINVAL,
178 RTE_FLOW_ERROR_TYPE_ITEM, item,
179 "Mask or last is set without spec");
184 * If "mask" is not set, default mask is used,
185 * but if default mask is NULL, "mask" should be set
187 if (item->mask == NULL) {
188 if (def_mask == NULL) {
189 rte_flow_error_set(error, EINVAL,
190 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
191 "Mask should be specified");
207 * If field values in "last" are either 0 or equal to the corresponding
208 * values in "spec" then they are ignored
211 !sfc_flow_is_zero(last, size) &&
212 memcmp(last, spec, size) != 0) {
213 rte_flow_error_set(error, ENOTSUP,
214 RTE_FLOW_ERROR_TYPE_ITEM, item,
215 "Ranging is not supported");
219 if (supp_mask == NULL) {
220 rte_flow_error_set(error, EINVAL,
221 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
222 "Supported mask for item should be specified");
226 /* Check that mask does not ask for more match than supp_mask */
227 for (i = 0; i < size; i++) {
228 supp = ((const uint8_t *)supp_mask)[i];
230 if (~supp & mask[i]) {
231 rte_flow_error_set(error, ENOTSUP,
232 RTE_FLOW_ERROR_TYPE_ITEM, item,
233 "Item's field is not supported");
246 * Masking is not supported, so masks in items should be either
247 * full or empty (zeroed) and set only for supported fields which
248 * are specified in the supp_mask.
252 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
253 __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
254 __rte_unused struct rte_flow_error *error)
260 * Convert Ethernet item to EFX filter specification.
263 * Item specification. Outer frame specification may only comprise
264 * source/destination addresses and Ethertype field.
265 * Inner frame specification may contain destination address only.
266 * There is support for individual/group mask as well as for empty and full.
267 * If the mask is NULL, default mask will be used. Ranging is not supported.
268 * @param efx_spec[in, out]
269 * EFX filter specification to update.
271 * Perform verbose error reporting if not NULL.
274 sfc_flow_parse_eth(const struct rte_flow_item *item,
275 struct sfc_flow_parse_ctx *parse_ctx,
276 struct rte_flow_error *error)
279 efx_filter_spec_t *efx_spec = parse_ctx->filter;
280 const struct rte_flow_item_eth *spec = NULL;
281 const struct rte_flow_item_eth *mask = NULL;
282 const struct rte_flow_item_eth supp_mask = {
283 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
284 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
287 const struct rte_flow_item_eth ifrm_supp_mask = {
288 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
290 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
291 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
293 const struct rte_flow_item_eth *supp_mask_p;
294 const struct rte_flow_item_eth *def_mask_p;
295 uint8_t *loc_mac = NULL;
296 boolean_t is_ifrm = (efx_spec->efs_encap_type !=
297 EFX_TUNNEL_PROTOCOL_NONE);
300 supp_mask_p = &ifrm_supp_mask;
301 def_mask_p = &ifrm_supp_mask;
302 loc_mac = efx_spec->efs_ifrm_loc_mac;
304 supp_mask_p = &supp_mask;
305 def_mask_p = &rte_flow_item_eth_mask;
306 loc_mac = efx_spec->efs_loc_mac;
309 rc = sfc_flow_parse_init(item,
310 (const void **)&spec,
311 (const void **)&mask,
312 supp_mask_p, def_mask_p,
313 sizeof(struct rte_flow_item_eth),
318 /* If "spec" is not set, could be any Ethernet */
322 if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
323 efx_spec->efs_match_flags |= is_ifrm ?
324 EFX_FILTER_MATCH_IFRM_LOC_MAC :
325 EFX_FILTER_MATCH_LOC_MAC;
326 rte_memcpy(loc_mac, spec->dst.addr_bytes,
328 } else if (memcmp(mask->dst.addr_bytes, ig_mask,
329 EFX_MAC_ADDR_LEN) == 0) {
330 if (rte_is_unicast_ether_addr(&spec->dst))
331 efx_spec->efs_match_flags |= is_ifrm ?
332 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
333 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
335 efx_spec->efs_match_flags |= is_ifrm ?
336 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
337 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
338 } else if (!rte_is_zero_ether_addr(&mask->dst)) {
343 * ifrm_supp_mask ensures that the source address and
344 * ethertype masks are equal to zero in inner frame,
345 * so these fields are filled in only for the outer frame
347 if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
348 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
349 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
351 } else if (!rte_is_zero_ether_addr(&mask->src)) {
356 * Ether type is in big-endian byte order in item and
357 * in little-endian in efx_spec, so byte swap is used
359 if (mask->type == supp_mask.type) {
360 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
361 efx_spec->efs_ether_type = rte_bswap16(spec->type);
362 } else if (mask->type != 0) {
369 rte_flow_error_set(error, EINVAL,
370 RTE_FLOW_ERROR_TYPE_ITEM, item,
371 "Bad mask in the ETH pattern item");
376 * Convert VLAN item to EFX filter specification.
379 * Item specification. Only VID field is supported.
380 * The mask can not be NULL. Ranging is not supported.
381 * @param efx_spec[in, out]
382 * EFX filter specification to update.
384 * Perform verbose error reporting if not NULL.
387 sfc_flow_parse_vlan(const struct rte_flow_item *item,
388 struct sfc_flow_parse_ctx *parse_ctx,
389 struct rte_flow_error *error)
393 efx_filter_spec_t *efx_spec = parse_ctx->filter;
394 const struct rte_flow_item_vlan *spec = NULL;
395 const struct rte_flow_item_vlan *mask = NULL;
396 const struct rte_flow_item_vlan supp_mask = {
397 .tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
398 .inner_type = RTE_BE16(0xffff),
401 rc = sfc_flow_parse_init(item,
402 (const void **)&spec,
403 (const void **)&mask,
406 sizeof(struct rte_flow_item_vlan),
412 * VID is in big-endian byte order in item and
413 * in little-endian in efx_spec, so byte swap is used.
414 * If two VLAN items are included, the first matches
415 * the outer tag and the next matches the inner tag.
417 if (mask->tci == supp_mask.tci) {
418 /* Apply mask to keep VID only */
419 vid = rte_bswap16(spec->tci & mask->tci);
421 if (!(efx_spec->efs_match_flags &
422 EFX_FILTER_MATCH_OUTER_VID)) {
423 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
424 efx_spec->efs_outer_vid = vid;
425 } else if (!(efx_spec->efs_match_flags &
426 EFX_FILTER_MATCH_INNER_VID)) {
427 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
428 efx_spec->efs_inner_vid = vid;
430 rte_flow_error_set(error, EINVAL,
431 RTE_FLOW_ERROR_TYPE_ITEM, item,
432 "More than two VLAN items");
436 rte_flow_error_set(error, EINVAL,
437 RTE_FLOW_ERROR_TYPE_ITEM, item,
438 "VLAN ID in TCI match is required");
442 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
443 rte_flow_error_set(error, EINVAL,
444 RTE_FLOW_ERROR_TYPE_ITEM, item,
445 "VLAN TPID matching is not supported");
448 if (mask->inner_type == supp_mask.inner_type) {
449 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
450 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
451 } else if (mask->inner_type) {
452 rte_flow_error_set(error, EINVAL,
453 RTE_FLOW_ERROR_TYPE_ITEM, item,
454 "Bad mask for VLAN inner_type");
462 * Convert IPv4 item to EFX filter specification.
465 * Item specification. Only source and destination addresses and
466 * protocol fields are supported. If the mask is NULL, default
467 * mask will be used. Ranging is not supported.
468 * @param efx_spec[in, out]
469 * EFX filter specification to update.
471 * Perform verbose error reporting if not NULL.
474 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
475 struct sfc_flow_parse_ctx *parse_ctx,
476 struct rte_flow_error *error)
479 efx_filter_spec_t *efx_spec = parse_ctx->filter;
480 const struct rte_flow_item_ipv4 *spec = NULL;
481 const struct rte_flow_item_ipv4 *mask = NULL;
482 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
483 const struct rte_flow_item_ipv4 supp_mask = {
485 .src_addr = 0xffffffff,
486 .dst_addr = 0xffffffff,
487 .next_proto_id = 0xff,
491 rc = sfc_flow_parse_init(item,
492 (const void **)&spec,
493 (const void **)&mask,
495 &rte_flow_item_ipv4_mask,
496 sizeof(struct rte_flow_item_ipv4),
502 * Filtering by IPv4 source and destination addresses requires
503 * the appropriate ETHER_TYPE in hardware filters
505 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
506 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
507 efx_spec->efs_ether_type = ether_type_ipv4;
508 } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
509 rte_flow_error_set(error, EINVAL,
510 RTE_FLOW_ERROR_TYPE_ITEM, item,
511 "Ethertype in pattern with IPV4 item should be appropriate");
519 * IPv4 addresses are in big-endian byte order in item and in
522 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
523 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
524 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
525 } else if (mask->hdr.src_addr != 0) {
529 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
530 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
531 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
532 } else if (mask->hdr.dst_addr != 0) {
536 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
537 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
538 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
539 } else if (mask->hdr.next_proto_id != 0) {
546 rte_flow_error_set(error, EINVAL,
547 RTE_FLOW_ERROR_TYPE_ITEM, item,
548 "Bad mask in the IPV4 pattern item");
553 * Convert IPv6 item to EFX filter specification.
556 * Item specification. Only source and destination addresses and
557 * next header fields are supported. If the mask is NULL, default
558 * mask will be used. Ranging is not supported.
559 * @param efx_spec[in, out]
560 * EFX filter specification to update.
562 * Perform verbose error reporting if not NULL.
565 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
566 struct sfc_flow_parse_ctx *parse_ctx,
567 struct rte_flow_error *error)
570 efx_filter_spec_t *efx_spec = parse_ctx->filter;
571 const struct rte_flow_item_ipv6 *spec = NULL;
572 const struct rte_flow_item_ipv6 *mask = NULL;
573 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
574 const struct rte_flow_item_ipv6 supp_mask = {
576 .src_addr = { 0xff, 0xff, 0xff, 0xff,
577 0xff, 0xff, 0xff, 0xff,
578 0xff, 0xff, 0xff, 0xff,
579 0xff, 0xff, 0xff, 0xff },
580 .dst_addr = { 0xff, 0xff, 0xff, 0xff,
581 0xff, 0xff, 0xff, 0xff,
582 0xff, 0xff, 0xff, 0xff,
583 0xff, 0xff, 0xff, 0xff },
588 rc = sfc_flow_parse_init(item,
589 (const void **)&spec,
590 (const void **)&mask,
592 &rte_flow_item_ipv6_mask,
593 sizeof(struct rte_flow_item_ipv6),
599 * Filtering by IPv6 source and destination addresses requires
600 * the appropriate ETHER_TYPE in hardware filters
602 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
603 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
604 efx_spec->efs_ether_type = ether_type_ipv6;
605 } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
606 rte_flow_error_set(error, EINVAL,
607 RTE_FLOW_ERROR_TYPE_ITEM, item,
608 "Ethertype in pattern with IPV6 item should be appropriate");
616 * IPv6 addresses are in big-endian byte order in item and in
619 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
620 sizeof(mask->hdr.src_addr)) == 0) {
621 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
623 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
624 sizeof(spec->hdr.src_addr));
625 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
626 sizeof(efx_spec->efs_rem_host));
627 } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
628 sizeof(mask->hdr.src_addr))) {
632 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
633 sizeof(mask->hdr.dst_addr)) == 0) {
634 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
636 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
637 sizeof(spec->hdr.dst_addr));
638 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
639 sizeof(efx_spec->efs_loc_host));
640 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
641 sizeof(mask->hdr.dst_addr))) {
645 if (mask->hdr.proto == supp_mask.hdr.proto) {
646 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
647 efx_spec->efs_ip_proto = spec->hdr.proto;
648 } else if (mask->hdr.proto != 0) {
655 rte_flow_error_set(error, EINVAL,
656 RTE_FLOW_ERROR_TYPE_ITEM, item,
657 "Bad mask in the IPV6 pattern item");
662 * Convert TCP item to EFX filter specification.
665 * Item specification. Only source and destination ports fields
666 * are supported. If the mask is NULL, default mask will be used.
667 * Ranging is not supported.
668 * @param efx_spec[in, out]
669 * EFX filter specification to update.
671 * Perform verbose error reporting if not NULL.
674 sfc_flow_parse_tcp(const struct rte_flow_item *item,
675 struct sfc_flow_parse_ctx *parse_ctx,
676 struct rte_flow_error *error)
679 efx_filter_spec_t *efx_spec = parse_ctx->filter;
680 const struct rte_flow_item_tcp *spec = NULL;
681 const struct rte_flow_item_tcp *mask = NULL;
682 const struct rte_flow_item_tcp supp_mask = {
689 rc = sfc_flow_parse_init(item,
690 (const void **)&spec,
691 (const void **)&mask,
693 &rte_flow_item_tcp_mask,
694 sizeof(struct rte_flow_item_tcp),
700 * Filtering by TCP source and destination ports requires
701 * the appropriate IP_PROTO in hardware filters
703 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
704 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
705 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
706 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
707 rte_flow_error_set(error, EINVAL,
708 RTE_FLOW_ERROR_TYPE_ITEM, item,
709 "IP proto in pattern with TCP item should be appropriate");
717 * Source and destination ports are in big-endian byte order in item and
718 * in little-endian in efx_spec, so byte swap is used
720 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
721 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
722 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
723 } else if (mask->hdr.src_port != 0) {
727 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
728 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
729 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
730 } else if (mask->hdr.dst_port != 0) {
737 rte_flow_error_set(error, EINVAL,
738 RTE_FLOW_ERROR_TYPE_ITEM, item,
739 "Bad mask in the TCP pattern item");
744 * Convert UDP item to EFX filter specification.
747 * Item specification. Only source and destination ports fields
748 * are supported. If the mask is NULL, default mask will be used.
749 * Ranging is not supported.
750 * @param efx_spec[in, out]
751 * EFX filter specification to update.
753 * Perform verbose error reporting if not NULL.
756 sfc_flow_parse_udp(const struct rte_flow_item *item,
757 struct sfc_flow_parse_ctx *parse_ctx,
758 struct rte_flow_error *error)
761 efx_filter_spec_t *efx_spec = parse_ctx->filter;
762 const struct rte_flow_item_udp *spec = NULL;
763 const struct rte_flow_item_udp *mask = NULL;
764 const struct rte_flow_item_udp supp_mask = {
771 rc = sfc_flow_parse_init(item,
772 (const void **)&spec,
773 (const void **)&mask,
775 &rte_flow_item_udp_mask,
776 sizeof(struct rte_flow_item_udp),
782 * Filtering by UDP source and destination ports requires
783 * the appropriate IP_PROTO in hardware filters
785 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
786 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
787 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
788 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
789 rte_flow_error_set(error, EINVAL,
790 RTE_FLOW_ERROR_TYPE_ITEM, item,
791 "IP proto in pattern with UDP item should be appropriate");
799 * Source and destination ports are in big-endian byte order in item and
800 * in little-endian in efx_spec, so byte swap is used
802 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
803 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
804 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
805 } else if (mask->hdr.src_port != 0) {
809 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
810 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
811 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
812 } else if (mask->hdr.dst_port != 0) {
819 rte_flow_error_set(error, EINVAL,
820 RTE_FLOW_ERROR_TYPE_ITEM, item,
821 "Bad mask in the UDP pattern item");
826 * Filters for encapsulated packets match based on the EtherType and IP
827 * protocol in the outer frame.
830 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
831 efx_filter_spec_t *efx_spec,
833 struct rte_flow_error *error)
835 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
836 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
837 efx_spec->efs_ip_proto = ip_proto;
838 } else if (efx_spec->efs_ip_proto != ip_proto) {
840 case EFX_IPPROTO_UDP:
841 rte_flow_error_set(error, EINVAL,
842 RTE_FLOW_ERROR_TYPE_ITEM, item,
843 "Outer IP header protocol must be UDP "
844 "in VxLAN/GENEVE pattern");
847 case EFX_IPPROTO_GRE:
848 rte_flow_error_set(error, EINVAL,
849 RTE_FLOW_ERROR_TYPE_ITEM, item,
850 "Outer IP header protocol must be GRE "
855 rte_flow_error_set(error, EINVAL,
856 RTE_FLOW_ERROR_TYPE_ITEM, item,
857 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
863 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
864 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
865 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
866 rte_flow_error_set(error, EINVAL,
867 RTE_FLOW_ERROR_TYPE_ITEM, item,
868 "Outer frame EtherType in pattern with tunneling "
869 "must be IPv4 or IPv6");
877 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
878 const uint8_t *vni_or_vsid_val,
879 const uint8_t *vni_or_vsid_mask,
880 const struct rte_flow_item *item,
881 struct rte_flow_error *error)
883 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
887 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
888 EFX_VNI_OR_VSID_LEN) == 0) {
889 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
890 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
891 EFX_VNI_OR_VSID_LEN);
892 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
893 rte_flow_error_set(error, EINVAL,
894 RTE_FLOW_ERROR_TYPE_ITEM, item,
895 "Unsupported VNI/VSID mask");
903 * Convert VXLAN item to EFX filter specification.
906 * Item specification. Only VXLAN network identifier field is supported.
907 * If the mask is NULL, default mask will be used.
908 * Ranging is not supported.
909 * @param efx_spec[in, out]
910 * EFX filter specification to update.
912 * Perform verbose error reporting if not NULL.
915 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
916 struct sfc_flow_parse_ctx *parse_ctx,
917 struct rte_flow_error *error)
920 efx_filter_spec_t *efx_spec = parse_ctx->filter;
921 const struct rte_flow_item_vxlan *spec = NULL;
922 const struct rte_flow_item_vxlan *mask = NULL;
923 const struct rte_flow_item_vxlan supp_mask = {
924 .vni = { 0xff, 0xff, 0xff }
927 rc = sfc_flow_parse_init(item,
928 (const void **)&spec,
929 (const void **)&mask,
931 &rte_flow_item_vxlan_mask,
932 sizeof(struct rte_flow_item_vxlan),
937 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
938 EFX_IPPROTO_UDP, error);
942 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
943 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
948 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
949 mask->vni, item, error);
955 * Convert GENEVE item to EFX filter specification.
958 * Item specification. Only Virtual Network Identifier and protocol type
959 * fields are supported. But protocol type can be only Ethernet (0x6558).
960 * If the mask is NULL, default mask will be used.
961 * Ranging is not supported.
962 * @param efx_spec[in, out]
963 * EFX filter specification to update.
965 * Perform verbose error reporting if not NULL.
968 sfc_flow_parse_geneve(const struct rte_flow_item *item,
969 struct sfc_flow_parse_ctx *parse_ctx,
970 struct rte_flow_error *error)
973 efx_filter_spec_t *efx_spec = parse_ctx->filter;
974 const struct rte_flow_item_geneve *spec = NULL;
975 const struct rte_flow_item_geneve *mask = NULL;
976 const struct rte_flow_item_geneve supp_mask = {
977 .protocol = RTE_BE16(0xffff),
978 .vni = { 0xff, 0xff, 0xff }
981 rc = sfc_flow_parse_init(item,
982 (const void **)&spec,
983 (const void **)&mask,
985 &rte_flow_item_geneve_mask,
986 sizeof(struct rte_flow_item_geneve),
991 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
992 EFX_IPPROTO_UDP, error);
996 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
997 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1002 if (mask->protocol == supp_mask.protocol) {
1003 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
1004 rte_flow_error_set(error, EINVAL,
1005 RTE_FLOW_ERROR_TYPE_ITEM, item,
1006 "GENEVE encap. protocol must be Ethernet "
1007 "(0x6558) in the GENEVE pattern item");
1010 } else if (mask->protocol != 0) {
1011 rte_flow_error_set(error, EINVAL,
1012 RTE_FLOW_ERROR_TYPE_ITEM, item,
1013 "Unsupported mask for GENEVE encap. protocol");
1017 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
1018 mask->vni, item, error);
1024 * Convert NVGRE item to EFX filter specification.
1027 * Item specification. Only virtual subnet ID field is supported.
1028 * If the mask is NULL, default mask will be used.
1029 * Ranging is not supported.
1030 * @param efx_spec[in, out]
1031 * EFX filter specification to update.
1033 * Perform verbose error reporting if not NULL.
1036 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
1037 struct sfc_flow_parse_ctx *parse_ctx,
1038 struct rte_flow_error *error)
1041 efx_filter_spec_t *efx_spec = parse_ctx->filter;
1042 const struct rte_flow_item_nvgre *spec = NULL;
1043 const struct rte_flow_item_nvgre *mask = NULL;
1044 const struct rte_flow_item_nvgre supp_mask = {
1045 .tni = { 0xff, 0xff, 0xff }
1048 rc = sfc_flow_parse_init(item,
1049 (const void **)&spec,
1050 (const void **)&mask,
1052 &rte_flow_item_nvgre_mask,
1053 sizeof(struct rte_flow_item_nvgre),
1058 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1059 EFX_IPPROTO_GRE, error);
1063 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1064 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1069 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1070 mask->tni, item, error);
1076 * Convert PPPoEx item to EFX filter specification.
1079 * Item specification.
1080 * Matching on PPPoEx fields is not supported.
1081 * This item can only be used to set or validate the EtherType filter.
1082 * Only zero masks are allowed.
1083 * Ranging is not supported.
1084 * @param efx_spec[in, out]
1085 * EFX filter specification to update.
1087 * Perform verbose error reporting if not NULL.
1090 sfc_flow_parse_pppoex(const struct rte_flow_item *item,
1091 struct sfc_flow_parse_ctx *parse_ctx,
1092 struct rte_flow_error *error)
1094 efx_filter_spec_t *efx_spec = parse_ctx->filter;
1095 const struct rte_flow_item_pppoe *spec = NULL;
1096 const struct rte_flow_item_pppoe *mask = NULL;
1097 const struct rte_flow_item_pppoe supp_mask = {};
1098 const struct rte_flow_item_pppoe def_mask = {};
1099 uint16_t ether_type;
1102 rc = sfc_flow_parse_init(item,
1103 (const void **)&spec,
1104 (const void **)&mask,
1107 sizeof(struct rte_flow_item_pppoe),
1112 if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED)
1113 ether_type = RTE_ETHER_TYPE_PPPOE_DISCOVERY;
1115 ether_type = RTE_ETHER_TYPE_PPPOE_SESSION;
1117 if ((efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) != 0) {
1118 if (efx_spec->efs_ether_type != ether_type) {
1119 rte_flow_error_set(error, EINVAL,
1120 RTE_FLOW_ERROR_TYPE_ITEM, item,
1121 "Invalid EtherType for a PPPoE flow item");
1125 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
1126 efx_spec->efs_ether_type = ether_type;
1132 static const struct sfc_flow_item sfc_flow_items[] = {
1134 .type = RTE_FLOW_ITEM_TYPE_VOID,
1136 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1137 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1138 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1139 .parse = sfc_flow_parse_void,
1142 .type = RTE_FLOW_ITEM_TYPE_ETH,
1144 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1145 .layer = SFC_FLOW_ITEM_L2,
1146 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1147 .parse = sfc_flow_parse_eth,
1150 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1152 .prev_layer = SFC_FLOW_ITEM_L2,
1153 .layer = SFC_FLOW_ITEM_L2,
1154 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1155 .parse = sfc_flow_parse_vlan,
1158 .type = RTE_FLOW_ITEM_TYPE_PPPOED,
1160 .prev_layer = SFC_FLOW_ITEM_L2,
1161 .layer = SFC_FLOW_ITEM_L2,
1162 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1163 .parse = sfc_flow_parse_pppoex,
1166 .type = RTE_FLOW_ITEM_TYPE_PPPOES,
1168 .prev_layer = SFC_FLOW_ITEM_L2,
1169 .layer = SFC_FLOW_ITEM_L2,
1170 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1171 .parse = sfc_flow_parse_pppoex,
1174 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1176 .prev_layer = SFC_FLOW_ITEM_L2,
1177 .layer = SFC_FLOW_ITEM_L3,
1178 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1179 .parse = sfc_flow_parse_ipv4,
1182 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1184 .prev_layer = SFC_FLOW_ITEM_L2,
1185 .layer = SFC_FLOW_ITEM_L3,
1186 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1187 .parse = sfc_flow_parse_ipv6,
1190 .type = RTE_FLOW_ITEM_TYPE_TCP,
1192 .prev_layer = SFC_FLOW_ITEM_L3,
1193 .layer = SFC_FLOW_ITEM_L4,
1194 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1195 .parse = sfc_flow_parse_tcp,
1198 .type = RTE_FLOW_ITEM_TYPE_UDP,
1200 .prev_layer = SFC_FLOW_ITEM_L3,
1201 .layer = SFC_FLOW_ITEM_L4,
1202 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1203 .parse = sfc_flow_parse_udp,
1206 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1208 .prev_layer = SFC_FLOW_ITEM_L4,
1209 .layer = SFC_FLOW_ITEM_START_LAYER,
1210 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1211 .parse = sfc_flow_parse_vxlan,
1214 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1216 .prev_layer = SFC_FLOW_ITEM_L4,
1217 .layer = SFC_FLOW_ITEM_START_LAYER,
1218 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1219 .parse = sfc_flow_parse_geneve,
1222 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1224 .prev_layer = SFC_FLOW_ITEM_L3,
1225 .layer = SFC_FLOW_ITEM_START_LAYER,
1226 .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
1227 .parse = sfc_flow_parse_nvgre,
1232 * Protocol-independent flow API support
1235 sfc_flow_parse_attr(struct sfc_adapter *sa,
1236 const struct rte_flow_attr *attr,
1237 struct rte_flow *flow,
1238 struct rte_flow_error *error)
1240 struct sfc_flow_spec *spec = &flow->spec;
1241 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1242 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
1243 struct sfc_mae *mae = &sa->mae;
1246 rte_flow_error_set(error, EINVAL,
1247 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1251 if (attr->group != 0) {
1252 rte_flow_error_set(error, ENOTSUP,
1253 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1254 "Groups are not supported");
1257 if (attr->egress != 0 && attr->transfer == 0) {
1258 rte_flow_error_set(error, ENOTSUP,
1259 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1260 "Egress is not supported");
1263 if (attr->ingress == 0 && attr->transfer == 0) {
1264 rte_flow_error_set(error, ENOTSUP,
1265 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1266 "Ingress is compulsory");
1269 if (attr->transfer == 0) {
1270 if (attr->priority != 0) {
1271 rte_flow_error_set(error, ENOTSUP,
1272 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1273 attr, "Priorities are unsupported");
1276 spec->type = SFC_FLOW_SPEC_FILTER;
1277 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
1278 spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1279 spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
1281 if (mae->status != SFC_MAE_STATUS_ADMIN) {
1282 rte_flow_error_set(error, ENOTSUP,
1283 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1284 attr, "Transfer is not supported");
1287 if (attr->priority > mae->nb_action_rule_prios_max) {
1288 rte_flow_error_set(error, ENOTSUP,
1289 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1290 attr, "Unsupported priority level");
1293 spec->type = SFC_FLOW_SPEC_MAE;
1294 spec_mae->priority = attr->priority;
1295 spec_mae->match_spec = NULL;
1296 spec_mae->action_set = NULL;
1297 spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
1303 /* Get item from array sfc_flow_items */
1304 static const struct sfc_flow_item *
1305 sfc_flow_get_item(const struct sfc_flow_item *items,
1306 unsigned int nb_items,
1307 enum rte_flow_item_type type)
1311 for (i = 0; i < nb_items; i++)
1312 if (items[i].type == type)
1319 sfc_flow_parse_pattern(struct sfc_adapter *sa,
1320 const struct sfc_flow_item *flow_items,
1321 unsigned int nb_flow_items,
1322 const struct rte_flow_item pattern[],
1323 struct sfc_flow_parse_ctx *parse_ctx,
1324 struct rte_flow_error *error)
1327 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1328 boolean_t is_ifrm = B_FALSE;
1329 const struct sfc_flow_item *item;
1331 if (pattern == NULL) {
1332 rte_flow_error_set(error, EINVAL,
1333 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1338 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1339 item = sfc_flow_get_item(flow_items, nb_flow_items,
1342 rte_flow_error_set(error, ENOTSUP,
1343 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1344 "Unsupported pattern item");
1349 * Omitting one or several protocol layers at the beginning
1350 * of pattern is supported
1352 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1353 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1354 item->prev_layer != prev_layer) {
1355 rte_flow_error_set(error, ENOTSUP,
1356 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1357 "Unexpected sequence of pattern items");
1362 * Allow only VOID and ETH pattern items in the inner frame.
1363 * Also check that there is only one tunneling protocol.
1365 switch (item->type) {
1366 case RTE_FLOW_ITEM_TYPE_VOID:
1367 case RTE_FLOW_ITEM_TYPE_ETH:
1370 case RTE_FLOW_ITEM_TYPE_VXLAN:
1371 case RTE_FLOW_ITEM_TYPE_GENEVE:
1372 case RTE_FLOW_ITEM_TYPE_NVGRE:
1374 rte_flow_error_set(error, EINVAL,
1375 RTE_FLOW_ERROR_TYPE_ITEM,
1377 "More than one tunneling protocol");
1384 if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER &&
1386 rte_flow_error_set(error, EINVAL,
1387 RTE_FLOW_ERROR_TYPE_ITEM,
1389 "There is an unsupported pattern item "
1390 "in the inner frame");
1396 if (parse_ctx->type != item->ctx_type) {
1397 rte_flow_error_set(error, EINVAL,
1398 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1399 "Parse context type mismatch");
1403 rc = item->parse(pattern, parse_ctx, error);
1405 sfc_err(sa, "failed to parse item %s: %s",
1406 item->name, strerror(-rc));
1410 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1411 prev_layer = item->layer;
1418 sfc_flow_parse_queue(struct sfc_adapter *sa,
1419 const struct rte_flow_action_queue *queue,
1420 struct rte_flow *flow)
1422 struct sfc_flow_spec *spec = &flow->spec;
1423 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1424 struct sfc_rxq *rxq;
1425 struct sfc_rxq_info *rxq_info;
1427 if (queue->index >= sfc_sa2shared(sa)->ethdev_rxq_count)
1430 rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, queue->index);
1431 spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1433 rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
1435 if ((rxq_info->rxq_flags & SFC_RXQ_FLAG_RSS_HASH) != 0) {
1436 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1437 struct sfc_rss *ethdev_rss = &sas->rss;
1439 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1440 spec_filter->rss_ctx = ðdev_rss->dummy_ctx;
1447 sfc_flow_parse_rss(struct sfc_adapter *sa,
1448 const struct rte_flow_action_rss *action_rss,
1449 struct rte_flow *flow)
1451 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1452 struct sfc_flow_rss_conf conf;
1453 uint16_t sw_qid_min;
1454 struct sfc_rxq *rxq;
1457 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1459 rc = sfc_flow_rss_parse_conf(sa, action_rss, &conf, &sw_qid_min);
1463 rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, sw_qid_min);
1464 spec_filter->template.efs_dmaq_id = rxq->hw_index;
1466 spec_filter->rss_ctx = sfc_flow_rss_ctx_reuse(sa, &conf, sw_qid_min,
1468 if (spec_filter->rss_ctx != NULL)
1471 rc = sfc_flow_rss_ctx_add(sa, &conf, sw_qid_min, action_rss->queue,
1472 &spec_filter->rss_ctx);
1480 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1481 unsigned int filters_count)
1483 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1487 for (i = 0; i < filters_count; i++) {
1490 rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
1491 if (ret == 0 && rc != 0) {
1492 sfc_err(sa, "failed to remove filter specification "
1502 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1504 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1508 for (i = 0; i < spec_filter->count; i++) {
1509 rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
1511 sfc_flow_spec_flush(sa, spec, i);
1520 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1522 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1524 return sfc_flow_spec_flush(sa, spec, spec_filter->count);
1528 sfc_flow_filter_insert(struct sfc_adapter *sa,
1529 struct rte_flow *flow)
1531 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1532 struct sfc_flow_rss_ctx *rss_ctx = spec_filter->rss_ctx;
1535 rc = sfc_flow_rss_ctx_program(sa, rss_ctx);
1537 goto fail_rss_ctx_program;
1539 if (rss_ctx != NULL) {
1543 * At this point, fully elaborated filter specifications
1544 * have been produced from the template. To make sure that
1545 * RSS behaviour is consistent between them, set the same
1546 * RSS context value everywhere.
1548 for (i = 0; i < spec_filter->count; i++) {
1549 efx_filter_spec_t *spec = &spec_filter->filters[i];
1551 spec->efs_rss_context = rss_ctx->nic_handle;
1555 rc = sfc_flow_spec_insert(sa, &flow->spec);
1557 goto fail_filter_insert;
1562 sfc_flow_rss_ctx_terminate(sa, rss_ctx);
1564 fail_rss_ctx_program:
1569 sfc_flow_filter_remove(struct sfc_adapter *sa,
1570 struct rte_flow *flow)
1572 struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
1575 rc = sfc_flow_spec_remove(sa, &flow->spec);
1579 sfc_flow_rss_ctx_terminate(sa, spec_filter->rss_ctx);
1585 sfc_flow_parse_mark(struct sfc_adapter *sa,
1586 const struct rte_flow_action_mark *mark,
1587 struct rte_flow *flow)
1589 struct sfc_flow_spec *spec = &flow->spec;
1590 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1591 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1594 mark_max = encp->enc_filter_action_mark_max;
1595 if (sfc_ft_is_active(sa))
1596 mark_max = RTE_MIN(mark_max, SFC_FT_USER_MARK_MASK);
1598 if (mark == NULL || mark->id > mark_max)
1601 spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1602 spec_filter->template.efs_mark = mark->id;
1608 sfc_flow_parse_actions(struct sfc_adapter *sa,
1609 const struct rte_flow_action actions[],
1610 struct rte_flow *flow,
1611 struct rte_flow_error *error)
1614 struct sfc_flow_spec *spec = &flow->spec;
1615 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1616 const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1617 const uint64_t rx_metadata = sa->negotiated_rx_metadata;
1618 uint32_t actions_set = 0;
1619 const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1620 (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1621 (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1622 const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1623 (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1625 if (actions == NULL) {
1626 rte_flow_error_set(error, EINVAL,
1627 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1632 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1633 switch (actions->type) {
1634 case RTE_FLOW_ACTION_TYPE_VOID:
1635 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1639 case RTE_FLOW_ACTION_TYPE_QUEUE:
1640 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1642 if ((actions_set & fate_actions_mask) != 0)
1643 goto fail_fate_actions;
1645 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1647 rte_flow_error_set(error, EINVAL,
1648 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1649 "Bad QUEUE action");
1654 case RTE_FLOW_ACTION_TYPE_RSS:
1655 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1657 if ((actions_set & fate_actions_mask) != 0)
1658 goto fail_fate_actions;
1660 rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1662 rte_flow_error_set(error, -rc,
1663 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1669 case RTE_FLOW_ACTION_TYPE_DROP:
1670 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1672 if ((actions_set & fate_actions_mask) != 0)
1673 goto fail_fate_actions;
1675 spec_filter->template.efs_dmaq_id =
1676 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1679 case RTE_FLOW_ACTION_TYPE_FLAG:
1680 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1682 if ((actions_set & mark_actions_mask) != 0)
1683 goto fail_actions_overlap;
1685 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1686 rte_flow_error_set(error, ENOTSUP,
1687 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1688 "FLAG action is not supported on the current Rx datapath");
1690 } else if ((rx_metadata &
1691 RTE_ETH_RX_METADATA_USER_FLAG) == 0) {
1692 rte_flow_error_set(error, ENOTSUP,
1693 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1694 "flag delivery has not been negotiated");
1698 spec_filter->template.efs_flags |=
1699 EFX_FILTER_FLAG_ACTION_FLAG;
1702 case RTE_FLOW_ACTION_TYPE_MARK:
1703 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1705 if ((actions_set & mark_actions_mask) != 0)
1706 goto fail_actions_overlap;
1708 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1709 rte_flow_error_set(error, ENOTSUP,
1710 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1711 "MARK action is not supported on the current Rx datapath");
1713 } else if ((rx_metadata &
1714 RTE_ETH_RX_METADATA_USER_MARK) == 0) {
1715 rte_flow_error_set(error, ENOTSUP,
1716 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1717 "mark delivery has not been negotiated");
1721 rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1723 rte_flow_error_set(error, rc,
1724 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1731 rte_flow_error_set(error, ENOTSUP,
1732 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1733 "Action is not supported");
1737 actions_set |= (1UL << actions->type);
1740 /* When fate is unknown, drop traffic. */
1741 if ((actions_set & fate_actions_mask) == 0) {
1742 spec_filter->template.efs_dmaq_id =
1743 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1749 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1750 "Cannot combine several fate-deciding actions, "
1751 "choose between QUEUE, RSS or DROP");
1754 fail_actions_overlap:
1755 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1756 "Overlapping actions are not supported");
1761 * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1762 * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1763 * specifications after copying.
1765 * @param spec[in, out]
1766 * SFC flow specification to update.
1767 * @param filters_count_for_one_val[in]
1768 * How many specifications should have the same match flag, what is the
1769 * number of specifications before copying.
1771 * Perform verbose error reporting if not NULL.
1774 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1775 unsigned int filters_count_for_one_val,
1776 struct rte_flow_error *error)
1779 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1780 static const efx_filter_match_flags_t vals[] = {
1781 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1782 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1785 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1786 rte_flow_error_set(error, EINVAL,
1787 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1788 "Number of specifications is incorrect while copying "
1789 "by unknown destination flags");
1793 for (i = 0; i < spec_filter->count; i++) {
1794 /* The check above ensures that divisor can't be zero here */
1795 spec_filter->filters[i].efs_match_flags |=
1796 vals[i / filters_count_for_one_val];
1803 * Check that the following conditions are met:
1804 * - the list of supported filters has a filter
1805 * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1806 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1810 * The match flags of filter.
1812 * Specification to be supplemented.
1814 * SFC filter with list of supported filters.
1817 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1818 __rte_unused efx_filter_spec_t *spec,
1819 struct sfc_filter *filter)
1822 efx_filter_match_flags_t match_mcast_dst;
1825 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1826 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1827 for (i = 0; i < filter->supported_match_num; i++) {
1828 if (match_mcast_dst == filter->supported_match[i])
1836 * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1837 * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1838 * specifications after copying.
1840 * @param spec[in, out]
1841 * SFC flow specification to update.
1842 * @param filters_count_for_one_val[in]
1843 * How many specifications should have the same EtherType value, what is the
1844 * number of specifications before copying.
1846 * Perform verbose error reporting if not NULL.
1849 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1850 unsigned int filters_count_for_one_val,
1851 struct rte_flow_error *error)
1854 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1855 static const uint16_t vals[] = {
1856 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1859 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1860 rte_flow_error_set(error, EINVAL,
1861 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1862 "Number of specifications is incorrect "
1863 "while copying by Ethertype");
1867 for (i = 0; i < spec_filter->count; i++) {
1868 spec_filter->filters[i].efs_match_flags |=
1869 EFX_FILTER_MATCH_ETHER_TYPE;
1872 * The check above ensures that
1873 * filters_count_for_one_val is not 0
1875 spec_filter->filters[i].efs_ether_type =
1876 vals[i / filters_count_for_one_val];
1883 * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1884 * in the same specifications after copying.
1886 * @param spec[in, out]
1887 * SFC flow specification to update.
1888 * @param filters_count_for_one_val[in]
1889 * How many specifications should have the same match flag, what is the
1890 * number of specifications before copying.
1892 * Perform verbose error reporting if not NULL.
1895 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1896 unsigned int filters_count_for_one_val,
1897 struct rte_flow_error *error)
1899 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1902 if (filters_count_for_one_val != spec_filter->count) {
1903 rte_flow_error_set(error, EINVAL,
1904 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1905 "Number of specifications is incorrect "
1906 "while copying by outer VLAN ID");
1910 for (i = 0; i < spec_filter->count; i++) {
1911 spec_filter->filters[i].efs_match_flags |=
1912 EFX_FILTER_MATCH_OUTER_VID;
1914 spec_filter->filters[i].efs_outer_vid = 0;
1921 * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1922 * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1923 * specifications after copying.
1925 * @param spec[in, out]
1926 * SFC flow specification to update.
1927 * @param filters_count_for_one_val[in]
1928 * How many specifications should have the same match flag, what is the
1929 * number of specifications before copying.
1931 * Perform verbose error reporting if not NULL.
1934 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1935 unsigned int filters_count_for_one_val,
1936 struct rte_flow_error *error)
1939 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
1940 static const efx_filter_match_flags_t vals[] = {
1941 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1942 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1945 if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
1946 rte_flow_error_set(error, EINVAL,
1947 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1948 "Number of specifications is incorrect while copying "
1949 "by inner frame unknown destination flags");
1953 for (i = 0; i < spec_filter->count; i++) {
1954 /* The check above ensures that divisor can't be zero here */
1955 spec_filter->filters[i].efs_match_flags |=
1956 vals[i / filters_count_for_one_val];
1963 * Check that the following conditions are met:
1964 * - the specification corresponds to a filter for encapsulated traffic
1965 * - the list of supported filters has a filter
1966 * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1967 * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1971 * The match flags of filter.
1973 * Specification to be supplemented.
1975 * SFC filter with list of supported filters.
1978 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1979 efx_filter_spec_t *spec,
1980 struct sfc_filter *filter)
1983 efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1984 efx_filter_match_flags_t match_mcast_dst;
1986 if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1990 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1991 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1992 for (i = 0; i < filter->supported_match_num; i++) {
1993 if (match_mcast_dst == filter->supported_match[i])
2001 * Check that the list of supported filters has a filter that differs
2002 * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
2003 * in this case that filter will be used and the flag
2004 * EFX_FILTER_MATCH_OUTER_VID is not needed.
2007 * The match flags of filter.
2009 * Specification to be supplemented.
2011 * SFC filter with list of supported filters.
2014 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
2015 __rte_unused efx_filter_spec_t *spec,
2016 struct sfc_filter *filter)
2019 efx_filter_match_flags_t match_without_vid =
2020 match & ~EFX_FILTER_MATCH_OUTER_VID;
2022 for (i = 0; i < filter->supported_match_num; i++) {
2023 if (match_without_vid == filter->supported_match[i])
2031 * Match flags that can be automatically added to filters.
2032 * Selecting the last minimum when searching for the copy flag ensures that the
2033 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
2034 * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
2035 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
2038 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
2040 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
2042 .set_vals = sfc_flow_set_unknown_dst_flags,
2043 .spec_check = sfc_flow_check_unknown_dst_flags,
2046 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
2048 .set_vals = sfc_flow_set_ethertypes,
2052 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
2054 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
2055 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
2058 .flag = EFX_FILTER_MATCH_OUTER_VID,
2060 .set_vals = sfc_flow_set_outer_vid_flag,
2061 .spec_check = sfc_flow_check_outer_vid_flag,
2065 /* Get item from array sfc_flow_copy_flags */
2066 static const struct sfc_flow_copy_flag *
2067 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
2071 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2072 if (sfc_flow_copy_flags[i].flag == flag)
2073 return &sfc_flow_copy_flags[i];
2080 * Make copies of the specifications, set match flag and values
2081 * of the field that corresponds to it.
2083 * @param spec[in, out]
2084 * SFC flow specification to update.
2086 * The match flag to add.
2088 * Perform verbose error reporting if not NULL.
2091 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
2092 efx_filter_match_flags_t flag,
2093 struct rte_flow_error *error)
2096 unsigned int new_filters_count;
2097 unsigned int filters_count_for_one_val;
2098 const struct sfc_flow_copy_flag *copy_flag;
2099 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2102 copy_flag = sfc_flow_get_copy_flag(flag);
2103 if (copy_flag == NULL) {
2104 rte_flow_error_set(error, ENOTSUP,
2105 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2106 "Unsupported spec field for copying");
2110 new_filters_count = spec_filter->count * copy_flag->vals_count;
2111 if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2112 rte_flow_error_set(error, EINVAL,
2113 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2114 "Too much EFX specifications in the flow rule");
2118 /* Copy filters specifications */
2119 for (i = spec_filter->count; i < new_filters_count; i++) {
2120 spec_filter->filters[i] =
2121 spec_filter->filters[i - spec_filter->count];
2124 filters_count_for_one_val = spec_filter->count;
2125 spec_filter->count = new_filters_count;
2127 rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2135 * Check that the given set of match flags missing in the original filter spec
2136 * could be covered by adding spec copies which specify the corresponding
2137 * flags and packet field values to match.
2139 * @param miss_flags[in]
2140 * Flags that are missing until the supported filter.
2142 * Specification to be supplemented.
2147 * Number of specifications after copy or 0, if the flags can not be added.
2150 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2151 efx_filter_spec_t *spec,
2152 struct sfc_filter *filter)
2155 efx_filter_match_flags_t copy_flags = 0;
2156 efx_filter_match_flags_t flag;
2157 efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2158 sfc_flow_spec_check *check;
2159 unsigned int multiplier = 1;
2161 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2162 flag = sfc_flow_copy_flags[i].flag;
2163 check = sfc_flow_copy_flags[i].spec_check;
2164 if ((flag & miss_flags) == flag) {
2165 if (check != NULL && (!check(match, spec, filter)))
2169 multiplier *= sfc_flow_copy_flags[i].vals_count;
2173 if (copy_flags == miss_flags)
2180 * Attempt to supplement the specification template to the minimally
2181 * supported set of match flags. To do this, it is necessary to copy
2182 * the specifications, filling them with the values of fields that
2183 * correspond to the missing flags.
2184 * The necessary and sufficient filter is built from the fewest number
2185 * of copies which could be made to cover the minimally required set
2190 * @param spec[in, out]
2191 * SFC flow specification to update.
2193 * Perform verbose error reporting if not NULL.
2196 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2197 struct sfc_flow_spec *spec,
2198 struct rte_flow_error *error)
2200 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2201 struct sfc_filter *filter = &sa->filter;
2202 efx_filter_match_flags_t miss_flags;
2203 efx_filter_match_flags_t min_miss_flags = 0;
2204 efx_filter_match_flags_t match;
2205 unsigned int min_multiplier = UINT_MAX;
2206 unsigned int multiplier;
2210 match = spec_filter->template.efs_match_flags;
2211 for (i = 0; i < filter->supported_match_num; i++) {
2212 if ((match & filter->supported_match[i]) == match) {
2213 miss_flags = filter->supported_match[i] & (~match);
2214 multiplier = sfc_flow_check_missing_flags(miss_flags,
2215 &spec_filter->template, filter);
2216 if (multiplier > 0) {
2217 if (multiplier <= min_multiplier) {
2218 min_multiplier = multiplier;
2219 min_miss_flags = miss_flags;
2225 if (min_multiplier == UINT_MAX) {
2226 rte_flow_error_set(error, ENOTSUP,
2227 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2228 "The flow rule pattern is unsupported");
2232 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2233 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2235 if ((flag & min_miss_flags) == flag) {
2236 rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2246 * Check that set of match flags is referred to by a filter. Filter is
2247 * described by match flags with the ability to add OUTER_VID and INNER_VID
2250 * @param match_flags[in]
2251 * Set of match flags.
2252 * @param flags_pattern[in]
2253 * Pattern of filter match flags.
2256 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2257 efx_filter_match_flags_t flags_pattern)
2259 if ((match_flags & flags_pattern) != flags_pattern)
2262 switch (match_flags & ~flags_pattern) {
2264 case EFX_FILTER_MATCH_OUTER_VID:
2265 case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2273 * Check whether the spec maps to a hardware filter which is known to be
2274 * ineffective despite being valid.
2277 * SFC filter with list of supported filters.
2279 * SFC flow specification.
2282 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2283 struct sfc_flow_spec *spec)
2286 uint16_t ether_type;
2288 efx_filter_match_flags_t match_flags;
2289 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2291 for (i = 0; i < spec_filter->count; i++) {
2292 match_flags = spec_filter->filters[i].efs_match_flags;
2294 if (sfc_flow_is_match_with_vids(match_flags,
2295 EFX_FILTER_MATCH_ETHER_TYPE) ||
2296 sfc_flow_is_match_with_vids(match_flags,
2297 EFX_FILTER_MATCH_ETHER_TYPE |
2298 EFX_FILTER_MATCH_LOC_MAC)) {
2299 ether_type = spec_filter->filters[i].efs_ether_type;
2300 if (filter->supports_ip_proto_or_addr_filter &&
2301 (ether_type == EFX_ETHER_TYPE_IPV4 ||
2302 ether_type == EFX_ETHER_TYPE_IPV6))
2304 } else if (sfc_flow_is_match_with_vids(match_flags,
2305 EFX_FILTER_MATCH_ETHER_TYPE |
2306 EFX_FILTER_MATCH_IP_PROTO) ||
2307 sfc_flow_is_match_with_vids(match_flags,
2308 EFX_FILTER_MATCH_ETHER_TYPE |
2309 EFX_FILTER_MATCH_IP_PROTO |
2310 EFX_FILTER_MATCH_LOC_MAC)) {
2311 ip_proto = spec_filter->filters[i].efs_ip_proto;
2312 if (filter->supports_rem_or_local_port_filter &&
2313 (ip_proto == EFX_IPPROTO_TCP ||
2314 ip_proto == EFX_IPPROTO_UDP))
2323 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2324 struct rte_flow *flow,
2325 struct rte_flow_error *error)
2327 struct sfc_flow_spec *spec = &flow->spec;
2328 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2329 efx_filter_spec_t *spec_tmpl = &spec_filter->template;
2330 efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2333 /* Initialize the first filter spec with template */
2334 spec_filter->filters[0] = *spec_tmpl;
2335 spec_filter->count = 1;
2337 if (!sfc_filter_is_match_supported(sa, match_flags)) {
2338 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2343 if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2344 rte_flow_error_set(error, ENOTSUP,
2345 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2346 "The flow rule pattern is unsupported");
2354 sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
2355 const struct rte_flow_item pattern[],
2356 const struct rte_flow_action actions[],
2357 struct rte_flow *flow,
2358 struct rte_flow_error *error)
2360 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2361 struct sfc_flow_spec *spec = &flow->spec;
2362 struct sfc_flow_spec_filter *spec_filter = &spec->filter;
2363 struct sfc_flow_parse_ctx ctx;
2366 ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
2367 ctx.filter = &spec_filter->template;
2369 rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
2370 pattern, &ctx, error);
2372 goto fail_bad_value;
2374 rc = sfc_flow_parse_actions(sa, actions, flow, error);
2376 goto fail_bad_value;
2378 rc = sfc_flow_validate_match_flags(sa, flow, error);
2380 goto fail_bad_value;
2389 sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
2390 const struct rte_flow_item pattern[],
2391 const struct rte_flow_action actions[],
2392 struct rte_flow *flow,
2393 struct rte_flow_error *error)
2395 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2396 struct sfc_flow_spec *spec = &flow->spec;
2397 struct sfc_flow_spec_mae *spec_mae = &spec->mae;
2401 * If the flow is meant to be a TUNNEL rule in a FT context,
2402 * preparse its actions and save its properties in spec_mae.
2404 rc = sfc_ft_tunnel_rule_detect(sa, actions, spec_mae, error);
2408 rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error);
2412 if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
2414 * By design, this flow should be represented solely by the
2415 * outer rule. But the HW/FW hasn't got support for setting
2416 * Rx mark from RECIRC_ID on outer rule lookup yet. Neither
2417 * does it support outer rule counters. As a workaround, an
2418 * action rule of lower priority is used to do the job.
2420 * So don't skip sfc_mae_rule_parse_actions() below.
2424 rc = sfc_mae_rule_parse_actions(sa, actions, spec_mae, error);
2428 if (spec_mae->ft_ctx != NULL) {
2429 if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL)
2430 spec_mae->ft_ctx->tunnel_rule_is_set = B_TRUE;
2432 ++(spec_mae->ft_ctx->refcnt);
2438 /* Reset these values to avoid confusing sfc_mae_flow_cleanup(). */
2439 spec_mae->ft_rule_type = SFC_FT_RULE_NONE;
2440 spec_mae->ft_ctx = NULL;
2446 sfc_flow_parse(struct rte_eth_dev *dev,
2447 const struct rte_flow_attr *attr,
2448 const struct rte_flow_item pattern[],
2449 const struct rte_flow_action actions[],
2450 struct rte_flow *flow,
2451 struct rte_flow_error *error)
2453 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2454 const struct sfc_flow_ops_by_spec *ops;
2457 rc = sfc_flow_parse_attr(sa, attr, flow, error);
2461 ops = sfc_flow_get_ops_by_spec(flow);
2462 if (ops == NULL || ops->parse == NULL) {
2463 rte_flow_error_set(error, ENOTSUP,
2464 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2465 "No backend to handle this flow");
2469 return ops->parse(dev, pattern, actions, flow, error);
2472 static struct rte_flow *
2473 sfc_flow_zmalloc(struct rte_flow_error *error)
2475 struct rte_flow *flow;
2477 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2479 rte_flow_error_set(error, ENOMEM,
2480 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2481 "Failed to allocate memory");
2488 sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow)
2490 const struct sfc_flow_ops_by_spec *ops;
2492 ops = sfc_flow_get_ops_by_spec(flow);
2493 if (ops != NULL && ops->cleanup != NULL)
2494 ops->cleanup(sa, flow);
2500 sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
2501 struct rte_flow_error *error)
2503 const struct sfc_flow_ops_by_spec *ops;
2506 ops = sfc_flow_get_ops_by_spec(flow);
2507 if (ops == NULL || ops->insert == NULL) {
2508 rte_flow_error_set(error, ENOTSUP,
2509 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2510 "No backend to handle this flow");
2514 rc = ops->insert(sa, flow);
2516 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2517 NULL, "Failed to insert the flow rule");
2524 sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
2525 struct rte_flow_error *error)
2527 const struct sfc_flow_ops_by_spec *ops;
2530 ops = sfc_flow_get_ops_by_spec(flow);
2531 if (ops == NULL || ops->remove == NULL) {
2532 rte_flow_error_set(error, ENOTSUP,
2533 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2534 "No backend to handle this flow");
2538 rc = ops->remove(sa, flow);
2540 rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2541 NULL, "Failed to remove the flow rule");
2548 sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow,
2549 struct rte_flow_error *error)
2551 const struct sfc_flow_ops_by_spec *ops;
2554 ops = sfc_flow_get_ops_by_spec(flow);
2556 rte_flow_error_set(error, ENOTSUP,
2557 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2558 "No backend to handle this flow");
2562 if (ops->verify != NULL) {
2563 SFC_ASSERT(sfc_adapter_is_locked(sa));
2564 rc = ops->verify(sa, flow);
2568 rte_flow_error_set(error, rc,
2569 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2570 "Failed to verify flow validity with FW");
2578 sfc_flow_validate(struct rte_eth_dev *dev,
2579 const struct rte_flow_attr *attr,
2580 const struct rte_flow_item pattern[],
2581 const struct rte_flow_action actions[],
2582 struct rte_flow_error *error)
2584 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2585 struct rte_flow *flow;
2588 flow = sfc_flow_zmalloc(error);
2592 sfc_adapter_lock(sa);
2594 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2596 rc = sfc_flow_verify(sa, flow, error);
2598 sfc_flow_free(sa, flow);
2600 sfc_adapter_unlock(sa);
2605 static struct rte_flow *
2606 sfc_flow_create(struct rte_eth_dev *dev,
2607 const struct rte_flow_attr *attr,
2608 const struct rte_flow_item pattern[],
2609 const struct rte_flow_action actions[],
2610 struct rte_flow_error *error)
2612 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2613 struct rte_flow *flow = NULL;
2616 flow = sfc_flow_zmalloc(error);
2620 sfc_adapter_lock(sa);
2622 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2624 goto fail_bad_value;
2626 TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
2628 if (sa->state == SFC_ETHDEV_STARTED) {
2629 rc = sfc_flow_insert(sa, flow, error);
2631 goto fail_flow_insert;
2634 sfc_adapter_unlock(sa);
2639 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2642 sfc_flow_free(sa, flow);
2643 sfc_adapter_unlock(sa);
2650 sfc_flow_destroy(struct rte_eth_dev *dev,
2651 struct rte_flow *flow,
2652 struct rte_flow_error *error)
2654 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2655 struct rte_flow *flow_ptr;
2658 sfc_adapter_lock(sa);
2660 TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
2661 if (flow_ptr == flow)
2665 rte_flow_error_set(error, rc,
2666 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2667 "Failed to find flow rule to destroy");
2668 goto fail_bad_value;
2671 if (sa->state == SFC_ETHDEV_STARTED)
2672 rc = sfc_flow_remove(sa, flow, error);
2674 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2675 sfc_flow_free(sa, flow);
2678 sfc_adapter_unlock(sa);
2684 sfc_flow_flush(struct rte_eth_dev *dev,
2685 struct rte_flow_error *error)
2687 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2688 struct rte_flow *flow;
2691 sfc_adapter_lock(sa);
2693 while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2694 if (sa->state == SFC_ETHDEV_STARTED) {
2697 rc = sfc_flow_remove(sa, flow, error);
2702 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2703 sfc_flow_free(sa, flow);
2706 sfc_adapter_unlock(sa);
2712 sfc_flow_query(struct rte_eth_dev *dev,
2713 struct rte_flow *flow,
2714 const struct rte_flow_action *action,
2716 struct rte_flow_error *error)
2718 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2719 const struct sfc_flow_ops_by_spec *ops;
2722 sfc_adapter_lock(sa);
2724 ops = sfc_flow_get_ops_by_spec(flow);
2725 if (ops == NULL || ops->query == NULL) {
2726 ret = rte_flow_error_set(error, ENOTSUP,
2727 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2728 "No backend to handle this flow");
2729 goto fail_no_backend;
2732 if (sa->state != SFC_ETHDEV_STARTED) {
2733 ret = rte_flow_error_set(error, EINVAL,
2734 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2735 "Can't query the flow: the adapter is not started");
2736 goto fail_not_started;
2739 ret = ops->query(dev, flow, action, data, error);
2743 sfc_adapter_unlock(sa);
2750 sfc_adapter_unlock(sa);
2755 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2756 struct rte_flow_error *error)
2758 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2761 sfc_adapter_lock(sa);
2762 if (sa->state != SFC_ETHDEV_INITIALIZED) {
2763 rte_flow_error_set(error, EBUSY,
2764 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2765 NULL, "please close the port first");
2768 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2770 sfc_adapter_unlock(sa);
2776 sfc_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
2777 uint16_t *transfer_proxy_port,
2778 struct rte_flow_error *error)
2780 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2783 ret = sfc_mae_get_switch_domain_admin(sa->mae.switch_domain_id,
2784 transfer_proxy_port);
2786 return rte_flow_error_set(error, ret,
2787 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2794 const struct rte_flow_ops sfc_flow_ops = {
2795 .validate = sfc_flow_validate,
2796 .create = sfc_flow_create,
2797 .destroy = sfc_flow_destroy,
2798 .flush = sfc_flow_flush,
2799 .query = sfc_flow_query,
2800 .isolate = sfc_flow_isolate,
2801 .tunnel_decap_set = sfc_ft_decap_set,
2802 .tunnel_match = sfc_ft_match,
2803 .tunnel_action_decap_release = sfc_ft_action_decap_release,
2804 .tunnel_item_release = sfc_ft_item_release,
2805 .get_restore_info = sfc_ft_get_restore_info,
2806 .pick_transfer_proxy = sfc_flow_pick_transfer_proxy,
2810 sfc_flow_init(struct sfc_adapter *sa)
2812 SFC_ASSERT(sfc_adapter_is_locked(sa));
2814 TAILQ_INIT(&sa->flow_list);
2818 sfc_flow_fini(struct sfc_adapter *sa)
2820 struct rte_flow *flow;
2822 SFC_ASSERT(sfc_adapter_is_locked(sa));
2824 while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
2825 TAILQ_REMOVE(&sa->flow_list, flow, entries);
2826 sfc_flow_free(sa, flow);
2831 sfc_flow_stop(struct sfc_adapter *sa)
2833 struct rte_flow *flow;
2835 SFC_ASSERT(sfc_adapter_is_locked(sa));
2837 TAILQ_FOREACH(flow, &sa->flow_list, entries)
2838 sfc_flow_remove(sa, flow, NULL);
2841 * MAE counter service is not stopped on flow rule remove to avoid
2842 * extra work. Make sure that it is stopped here.
2844 sfc_mae_counter_stop(sa);
2848 sfc_flow_start(struct sfc_adapter *sa)
2850 struct rte_flow *flow;
2853 sfc_log_init(sa, "entry");
2855 SFC_ASSERT(sfc_adapter_is_locked(sa));
2857 sfc_ft_counters_reset(sa);
2859 TAILQ_FOREACH(flow, &sa->flow_list, entries) {
2860 rc = sfc_flow_insert(sa, flow, NULL);
2865 sfc_log_init(sa, "done");
2872 sfc_flow_cleanup(struct sfc_adapter *sa, struct rte_flow *flow)
2877 sfc_flow_rss_ctx_del(sa, flow->spec.filter.rss_ctx);