1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2017-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_tailq.h>
11 #include <rte_common.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_eth_ctrl.h>
14 #include <rte_ether.h>
16 #include <rte_flow_driver.h>
22 #include "sfc_filter.h"
27 * At now flow API is implemented in such a manner that each
28 * flow rule is converted to one or more hardware filters.
29 * All elements of flow rule (attributes, pattern items, actions)
30 * correspond to one or more fields in the efx_filter_spec_s structure
31 * that is responsible for the hardware filter.
32 * If some required field is unset in the flow rule, then a handful
33 * of filter copies will be created to cover all possible values
37 enum sfc_flow_item_layers {
38 SFC_FLOW_ITEM_ANY_LAYER,
39 SFC_FLOW_ITEM_START_LAYER,
45 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
46 efx_filter_spec_t *spec,
47 struct rte_flow_error *error);
49 struct sfc_flow_item {
50 enum rte_flow_item_type type; /* Type of item */
51 enum sfc_flow_item_layers layer; /* Layer of item */
52 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
53 sfc_flow_item_parse *parse; /* Parsing function */
56 static sfc_flow_item_parse sfc_flow_parse_void;
57 static sfc_flow_item_parse sfc_flow_parse_eth;
58 static sfc_flow_item_parse sfc_flow_parse_vlan;
59 static sfc_flow_item_parse sfc_flow_parse_ipv4;
60 static sfc_flow_item_parse sfc_flow_parse_ipv6;
61 static sfc_flow_item_parse sfc_flow_parse_tcp;
62 static sfc_flow_item_parse sfc_flow_parse_udp;
63 static sfc_flow_item_parse sfc_flow_parse_vxlan;
64 static sfc_flow_item_parse sfc_flow_parse_geneve;
65 static sfc_flow_item_parse sfc_flow_parse_nvgre;
67 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
68 unsigned int filters_count_for_one_val,
69 struct rte_flow_error *error);
71 struct sfc_flow_copy_flag {
72 /* EFX filter specification match flag */
73 efx_filter_match_flags_t flag;
74 /* Number of values of corresponding field */
75 unsigned int vals_count;
76 /* Function to set values in specifications */
77 sfc_flow_spec_set_vals *set_vals;
80 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
83 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
88 for (i = 0; i < size; i++)
91 return (sum == 0) ? B_TRUE : B_FALSE;
95 * Validate item and prepare structures spec and mask for parsing
98 sfc_flow_parse_init(const struct rte_flow_item *item,
99 const void **spec_ptr,
100 const void **mask_ptr,
101 const void *supp_mask,
102 const void *def_mask,
104 struct rte_flow_error *error)
114 rte_flow_error_set(error, EINVAL,
115 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
120 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
121 rte_flow_error_set(error, EINVAL,
122 RTE_FLOW_ERROR_TYPE_ITEM, item,
123 "Mask or last is set without spec");
128 * If "mask" is not set, default mask is used,
129 * but if default mask is NULL, "mask" should be set
131 if (item->mask == NULL) {
132 if (def_mask == NULL) {
133 rte_flow_error_set(error, EINVAL,
134 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
135 "Mask should be specified");
151 * If field values in "last" are either 0 or equal to the corresponding
152 * values in "spec" then they are ignored
155 !sfc_flow_is_zero(last, size) &&
156 memcmp(last, spec, size) != 0) {
157 rte_flow_error_set(error, ENOTSUP,
158 RTE_FLOW_ERROR_TYPE_ITEM, item,
159 "Ranging is not supported");
163 if (supp_mask == NULL) {
164 rte_flow_error_set(error, EINVAL,
165 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
166 "Supported mask for item should be specified");
170 /* Check that mask and spec not asks for more match than supp_mask */
171 for (i = 0; i < size; i++) {
172 match = spec[i] | mask[i];
173 supp = ((const uint8_t *)supp_mask)[i];
175 if ((match | supp) != supp) {
176 rte_flow_error_set(error, ENOTSUP,
177 RTE_FLOW_ERROR_TYPE_ITEM, item,
178 "Item's field is not supported");
191 * Masking is not supported, so masks in items should be either
192 * full or empty (zeroed) and set only for supported fields which
193 * are specified in the supp_mask.
197 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
198 __rte_unused efx_filter_spec_t *efx_spec,
199 __rte_unused struct rte_flow_error *error)
205 * Convert Ethernet item to EFX filter specification.
208 * Item specification. Outer frame specification may only comprise
209 * source/destination addresses and Ethertype field.
210 * Inner frame specification may contain destination address only.
211 * There is support for individual/group mask as well as for empty and full.
212 * If the mask is NULL, default mask will be used. Ranging is not supported.
213 * @param efx_spec[in, out]
214 * EFX filter specification to update.
216 * Perform verbose error reporting if not NULL.
219 sfc_flow_parse_eth(const struct rte_flow_item *item,
220 efx_filter_spec_t *efx_spec,
221 struct rte_flow_error *error)
224 const struct rte_flow_item_eth *spec = NULL;
225 const struct rte_flow_item_eth *mask = NULL;
226 const struct rte_flow_item_eth supp_mask = {
227 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
228 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
231 const struct rte_flow_item_eth ifrm_supp_mask = {
232 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
234 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
235 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
237 const struct rte_flow_item_eth *supp_mask_p;
238 const struct rte_flow_item_eth *def_mask_p;
239 uint8_t *loc_mac = NULL;
240 boolean_t is_ifrm = (efx_spec->efs_encap_type !=
241 EFX_TUNNEL_PROTOCOL_NONE);
244 supp_mask_p = &ifrm_supp_mask;
245 def_mask_p = &ifrm_supp_mask;
246 loc_mac = efx_spec->efs_ifrm_loc_mac;
248 supp_mask_p = &supp_mask;
249 def_mask_p = &rte_flow_item_eth_mask;
250 loc_mac = efx_spec->efs_loc_mac;
253 rc = sfc_flow_parse_init(item,
254 (const void **)&spec,
255 (const void **)&mask,
256 supp_mask_p, def_mask_p,
257 sizeof(struct rte_flow_item_eth),
262 /* If "spec" is not set, could be any Ethernet */
266 if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
267 efx_spec->efs_match_flags |= is_ifrm ?
268 EFX_FILTER_MATCH_IFRM_LOC_MAC :
269 EFX_FILTER_MATCH_LOC_MAC;
270 rte_memcpy(loc_mac, spec->dst.addr_bytes,
272 } else if (memcmp(mask->dst.addr_bytes, ig_mask,
273 EFX_MAC_ADDR_LEN) == 0) {
274 if (is_unicast_ether_addr(&spec->dst))
275 efx_spec->efs_match_flags |= is_ifrm ?
276 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
277 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
279 efx_spec->efs_match_flags |= is_ifrm ?
280 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
281 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
282 } else if (!is_zero_ether_addr(&mask->dst)) {
287 * ifrm_supp_mask ensures that the source address and
288 * ethertype masks are equal to zero in inner frame,
289 * so these fields are filled in only for the outer frame
291 if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
292 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
293 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
295 } else if (!is_zero_ether_addr(&mask->src)) {
300 * Ether type is in big-endian byte order in item and
301 * in little-endian in efx_spec, so byte swap is used
303 if (mask->type == supp_mask.type) {
304 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
305 efx_spec->efs_ether_type = rte_bswap16(spec->type);
306 } else if (mask->type != 0) {
313 rte_flow_error_set(error, EINVAL,
314 RTE_FLOW_ERROR_TYPE_ITEM, item,
315 "Bad mask in the ETH pattern item");
320 * Convert VLAN item to EFX filter specification.
323 * Item specification. Only VID field is supported.
324 * The mask can not be NULL. Ranging is not supported.
325 * @param efx_spec[in, out]
326 * EFX filter specification to update.
328 * Perform verbose error reporting if not NULL.
331 sfc_flow_parse_vlan(const struct rte_flow_item *item,
332 efx_filter_spec_t *efx_spec,
333 struct rte_flow_error *error)
337 const struct rte_flow_item_vlan *spec = NULL;
338 const struct rte_flow_item_vlan *mask = NULL;
339 const struct rte_flow_item_vlan supp_mask = {
340 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
343 rc = sfc_flow_parse_init(item,
344 (const void **)&spec,
345 (const void **)&mask,
348 sizeof(struct rte_flow_item_vlan),
354 * VID is in big-endian byte order in item and
355 * in little-endian in efx_spec, so byte swap is used.
356 * If two VLAN items are included, the first matches
357 * the outer tag and the next matches the inner tag.
359 if (mask->tci == supp_mask.tci) {
360 vid = rte_bswap16(spec->tci);
362 if (!(efx_spec->efs_match_flags &
363 EFX_FILTER_MATCH_OUTER_VID)) {
364 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
365 efx_spec->efs_outer_vid = vid;
366 } else if (!(efx_spec->efs_match_flags &
367 EFX_FILTER_MATCH_INNER_VID)) {
368 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
369 efx_spec->efs_inner_vid = vid;
371 rte_flow_error_set(error, EINVAL,
372 RTE_FLOW_ERROR_TYPE_ITEM, item,
373 "More than two VLAN items");
377 rte_flow_error_set(error, EINVAL,
378 RTE_FLOW_ERROR_TYPE_ITEM, item,
379 "VLAN ID in TCI match is required");
387 * Convert IPv4 item to EFX filter specification.
390 * Item specification. Only source and destination addresses and
391 * protocol fields are supported. If the mask is NULL, default
392 * mask will be used. Ranging is not supported.
393 * @param efx_spec[in, out]
394 * EFX filter specification to update.
396 * Perform verbose error reporting if not NULL.
399 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
400 efx_filter_spec_t *efx_spec,
401 struct rte_flow_error *error)
404 const struct rte_flow_item_ipv4 *spec = NULL;
405 const struct rte_flow_item_ipv4 *mask = NULL;
406 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
407 const struct rte_flow_item_ipv4 supp_mask = {
409 .src_addr = 0xffffffff,
410 .dst_addr = 0xffffffff,
411 .next_proto_id = 0xff,
415 rc = sfc_flow_parse_init(item,
416 (const void **)&spec,
417 (const void **)&mask,
419 &rte_flow_item_ipv4_mask,
420 sizeof(struct rte_flow_item_ipv4),
426 * Filtering by IPv4 source and destination addresses requires
427 * the appropriate ETHER_TYPE in hardware filters
429 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
430 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
431 efx_spec->efs_ether_type = ether_type_ipv4;
432 } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
433 rte_flow_error_set(error, EINVAL,
434 RTE_FLOW_ERROR_TYPE_ITEM, item,
435 "Ethertype in pattern with IPV4 item should be appropriate");
443 * IPv4 addresses are in big-endian byte order in item and in
446 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
447 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
448 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
449 } else if (mask->hdr.src_addr != 0) {
453 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
454 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
455 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
456 } else if (mask->hdr.dst_addr != 0) {
460 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
461 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
462 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
463 } else if (mask->hdr.next_proto_id != 0) {
470 rte_flow_error_set(error, EINVAL,
471 RTE_FLOW_ERROR_TYPE_ITEM, item,
472 "Bad mask in the IPV4 pattern item");
477 * Convert IPv6 item to EFX filter specification.
480 * Item specification. Only source and destination addresses and
481 * next header fields are supported. If the mask is NULL, default
482 * mask will be used. Ranging is not supported.
483 * @param efx_spec[in, out]
484 * EFX filter specification to update.
486 * Perform verbose error reporting if not NULL.
489 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
490 efx_filter_spec_t *efx_spec,
491 struct rte_flow_error *error)
494 const struct rte_flow_item_ipv6 *spec = NULL;
495 const struct rte_flow_item_ipv6 *mask = NULL;
496 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
497 const struct rte_flow_item_ipv6 supp_mask = {
499 .src_addr = { 0xff, 0xff, 0xff, 0xff,
500 0xff, 0xff, 0xff, 0xff,
501 0xff, 0xff, 0xff, 0xff,
502 0xff, 0xff, 0xff, 0xff },
503 .dst_addr = { 0xff, 0xff, 0xff, 0xff,
504 0xff, 0xff, 0xff, 0xff,
505 0xff, 0xff, 0xff, 0xff,
506 0xff, 0xff, 0xff, 0xff },
511 rc = sfc_flow_parse_init(item,
512 (const void **)&spec,
513 (const void **)&mask,
515 &rte_flow_item_ipv6_mask,
516 sizeof(struct rte_flow_item_ipv6),
522 * Filtering by IPv6 source and destination addresses requires
523 * the appropriate ETHER_TYPE in hardware filters
525 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
526 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
527 efx_spec->efs_ether_type = ether_type_ipv6;
528 } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
529 rte_flow_error_set(error, EINVAL,
530 RTE_FLOW_ERROR_TYPE_ITEM, item,
531 "Ethertype in pattern with IPV6 item should be appropriate");
539 * IPv6 addresses are in big-endian byte order in item and in
542 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
543 sizeof(mask->hdr.src_addr)) == 0) {
544 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
546 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
547 sizeof(spec->hdr.src_addr));
548 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
549 sizeof(efx_spec->efs_rem_host));
550 } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
551 sizeof(mask->hdr.src_addr))) {
555 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
556 sizeof(mask->hdr.dst_addr)) == 0) {
557 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
559 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
560 sizeof(spec->hdr.dst_addr));
561 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
562 sizeof(efx_spec->efs_loc_host));
563 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
564 sizeof(mask->hdr.dst_addr))) {
568 if (mask->hdr.proto == supp_mask.hdr.proto) {
569 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
570 efx_spec->efs_ip_proto = spec->hdr.proto;
571 } else if (mask->hdr.proto != 0) {
578 rte_flow_error_set(error, EINVAL,
579 RTE_FLOW_ERROR_TYPE_ITEM, item,
580 "Bad mask in the IPV6 pattern item");
585 * Convert TCP item to EFX filter specification.
588 * Item specification. Only source and destination ports fields
589 * are supported. If the mask is NULL, default mask will be used.
590 * Ranging is not supported.
591 * @param efx_spec[in, out]
592 * EFX filter specification to update.
594 * Perform verbose error reporting if not NULL.
597 sfc_flow_parse_tcp(const struct rte_flow_item *item,
598 efx_filter_spec_t *efx_spec,
599 struct rte_flow_error *error)
602 const struct rte_flow_item_tcp *spec = NULL;
603 const struct rte_flow_item_tcp *mask = NULL;
604 const struct rte_flow_item_tcp supp_mask = {
611 rc = sfc_flow_parse_init(item,
612 (const void **)&spec,
613 (const void **)&mask,
615 &rte_flow_item_tcp_mask,
616 sizeof(struct rte_flow_item_tcp),
622 * Filtering by TCP source and destination ports requires
623 * the appropriate IP_PROTO in hardware filters
625 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
626 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
627 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
628 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
629 rte_flow_error_set(error, EINVAL,
630 RTE_FLOW_ERROR_TYPE_ITEM, item,
631 "IP proto in pattern with TCP item should be appropriate");
639 * Source and destination ports are in big-endian byte order in item and
640 * in little-endian in efx_spec, so byte swap is used
642 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
643 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
644 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
645 } else if (mask->hdr.src_port != 0) {
649 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
650 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
651 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
652 } else if (mask->hdr.dst_port != 0) {
659 rte_flow_error_set(error, EINVAL,
660 RTE_FLOW_ERROR_TYPE_ITEM, item,
661 "Bad mask in the TCP pattern item");
666 * Convert UDP item to EFX filter specification.
669 * Item specification. Only source and destination ports fields
670 * are supported. If the mask is NULL, default mask will be used.
671 * Ranging is not supported.
672 * @param efx_spec[in, out]
673 * EFX filter specification to update.
675 * Perform verbose error reporting if not NULL.
678 sfc_flow_parse_udp(const struct rte_flow_item *item,
679 efx_filter_spec_t *efx_spec,
680 struct rte_flow_error *error)
683 const struct rte_flow_item_udp *spec = NULL;
684 const struct rte_flow_item_udp *mask = NULL;
685 const struct rte_flow_item_udp supp_mask = {
692 rc = sfc_flow_parse_init(item,
693 (const void **)&spec,
694 (const void **)&mask,
696 &rte_flow_item_udp_mask,
697 sizeof(struct rte_flow_item_udp),
703 * Filtering by UDP source and destination ports requires
704 * the appropriate IP_PROTO in hardware filters
706 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
707 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
708 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
709 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
710 rte_flow_error_set(error, EINVAL,
711 RTE_FLOW_ERROR_TYPE_ITEM, item,
712 "IP proto in pattern with UDP item should be appropriate");
720 * Source and destination ports are in big-endian byte order in item and
721 * in little-endian in efx_spec, so byte swap is used
723 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
724 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
725 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
726 } else if (mask->hdr.src_port != 0) {
730 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
731 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
732 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
733 } else if (mask->hdr.dst_port != 0) {
740 rte_flow_error_set(error, EINVAL,
741 RTE_FLOW_ERROR_TYPE_ITEM, item,
742 "Bad mask in the UDP pattern item");
747 * Filters for encapsulated packets match based on the EtherType and IP
748 * protocol in the outer frame.
751 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
752 efx_filter_spec_t *efx_spec,
754 struct rte_flow_error *error)
756 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
757 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
758 efx_spec->efs_ip_proto = ip_proto;
759 } else if (efx_spec->efs_ip_proto != ip_proto) {
761 case EFX_IPPROTO_UDP:
762 rte_flow_error_set(error, EINVAL,
763 RTE_FLOW_ERROR_TYPE_ITEM, item,
764 "Outer IP header protocol must be UDP "
765 "in VxLAN/GENEVE pattern");
768 case EFX_IPPROTO_GRE:
769 rte_flow_error_set(error, EINVAL,
770 RTE_FLOW_ERROR_TYPE_ITEM, item,
771 "Outer IP header protocol must be GRE "
776 rte_flow_error_set(error, EINVAL,
777 RTE_FLOW_ERROR_TYPE_ITEM, item,
778 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
784 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
785 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
786 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
787 rte_flow_error_set(error, EINVAL,
788 RTE_FLOW_ERROR_TYPE_ITEM, item,
789 "Outer frame EtherType in pattern with tunneling "
790 "must be IPv4 or IPv6");
798 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
799 const uint8_t *vni_or_vsid_val,
800 const uint8_t *vni_or_vsid_mask,
801 const struct rte_flow_item *item,
802 struct rte_flow_error *error)
804 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
808 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
809 EFX_VNI_OR_VSID_LEN) == 0) {
810 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
811 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
812 EFX_VNI_OR_VSID_LEN);
813 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
814 rte_flow_error_set(error, EINVAL,
815 RTE_FLOW_ERROR_TYPE_ITEM, item,
816 "Unsupported VNI/VSID mask");
824 * Convert VXLAN item to EFX filter specification.
827 * Item specification. Only VXLAN network identifier field is supported.
828 * If the mask is NULL, default mask will be used.
829 * Ranging is not supported.
830 * @param efx_spec[in, out]
831 * EFX filter specification to update.
833 * Perform verbose error reporting if not NULL.
836 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
837 efx_filter_spec_t *efx_spec,
838 struct rte_flow_error *error)
841 const struct rte_flow_item_vxlan *spec = NULL;
842 const struct rte_flow_item_vxlan *mask = NULL;
843 const struct rte_flow_item_vxlan supp_mask = {
844 .vni = { 0xff, 0xff, 0xff }
847 rc = sfc_flow_parse_init(item,
848 (const void **)&spec,
849 (const void **)&mask,
851 &rte_flow_item_vxlan_mask,
852 sizeof(struct rte_flow_item_vxlan),
857 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
858 EFX_IPPROTO_UDP, error);
862 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
863 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
868 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
869 mask->vni, item, error);
875 * Convert GENEVE item to EFX filter specification.
878 * Item specification. Only Virtual Network Identifier and protocol type
879 * fields are supported. But protocol type can be only Ethernet (0x6558).
880 * If the mask is NULL, default mask will be used.
881 * Ranging is not supported.
882 * @param efx_spec[in, out]
883 * EFX filter specification to update.
885 * Perform verbose error reporting if not NULL.
888 sfc_flow_parse_geneve(const struct rte_flow_item *item,
889 efx_filter_spec_t *efx_spec,
890 struct rte_flow_error *error)
893 const struct rte_flow_item_geneve *spec = NULL;
894 const struct rte_flow_item_geneve *mask = NULL;
895 const struct rte_flow_item_geneve supp_mask = {
896 .protocol = RTE_BE16(0xffff),
897 .vni = { 0xff, 0xff, 0xff }
900 rc = sfc_flow_parse_init(item,
901 (const void **)&spec,
902 (const void **)&mask,
904 &rte_flow_item_geneve_mask,
905 sizeof(struct rte_flow_item_geneve),
910 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
911 EFX_IPPROTO_UDP, error);
915 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
916 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
921 if (mask->protocol == supp_mask.protocol) {
922 if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
923 rte_flow_error_set(error, EINVAL,
924 RTE_FLOW_ERROR_TYPE_ITEM, item,
925 "GENEVE encap. protocol must be Ethernet "
926 "(0x6558) in the GENEVE pattern item");
929 } else if (mask->protocol != 0) {
930 rte_flow_error_set(error, EINVAL,
931 RTE_FLOW_ERROR_TYPE_ITEM, item,
932 "Unsupported mask for GENEVE encap. protocol");
936 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
937 mask->vni, item, error);
943 * Convert NVGRE item to EFX filter specification.
946 * Item specification. Only virtual subnet ID field is supported.
947 * If the mask is NULL, default mask will be used.
948 * Ranging is not supported.
949 * @param efx_spec[in, out]
950 * EFX filter specification to update.
952 * Perform verbose error reporting if not NULL.
955 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
956 efx_filter_spec_t *efx_spec,
957 struct rte_flow_error *error)
960 const struct rte_flow_item_nvgre *spec = NULL;
961 const struct rte_flow_item_nvgre *mask = NULL;
962 const struct rte_flow_item_nvgre supp_mask = {
963 .tni = { 0xff, 0xff, 0xff }
966 rc = sfc_flow_parse_init(item,
967 (const void **)&spec,
968 (const void **)&mask,
970 &rte_flow_item_nvgre_mask,
971 sizeof(struct rte_flow_item_nvgre),
976 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
977 EFX_IPPROTO_GRE, error);
981 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
982 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
987 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
988 mask->tni, item, error);
993 static const struct sfc_flow_item sfc_flow_items[] = {
995 .type = RTE_FLOW_ITEM_TYPE_VOID,
996 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
997 .layer = SFC_FLOW_ITEM_ANY_LAYER,
998 .parse = sfc_flow_parse_void,
1001 .type = RTE_FLOW_ITEM_TYPE_ETH,
1002 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1003 .layer = SFC_FLOW_ITEM_L2,
1004 .parse = sfc_flow_parse_eth,
1007 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1008 .prev_layer = SFC_FLOW_ITEM_L2,
1009 .layer = SFC_FLOW_ITEM_L2,
1010 .parse = sfc_flow_parse_vlan,
1013 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1014 .prev_layer = SFC_FLOW_ITEM_L2,
1015 .layer = SFC_FLOW_ITEM_L3,
1016 .parse = sfc_flow_parse_ipv4,
1019 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1020 .prev_layer = SFC_FLOW_ITEM_L2,
1021 .layer = SFC_FLOW_ITEM_L3,
1022 .parse = sfc_flow_parse_ipv6,
1025 .type = RTE_FLOW_ITEM_TYPE_TCP,
1026 .prev_layer = SFC_FLOW_ITEM_L3,
1027 .layer = SFC_FLOW_ITEM_L4,
1028 .parse = sfc_flow_parse_tcp,
1031 .type = RTE_FLOW_ITEM_TYPE_UDP,
1032 .prev_layer = SFC_FLOW_ITEM_L3,
1033 .layer = SFC_FLOW_ITEM_L4,
1034 .parse = sfc_flow_parse_udp,
1037 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1038 .prev_layer = SFC_FLOW_ITEM_L4,
1039 .layer = SFC_FLOW_ITEM_START_LAYER,
1040 .parse = sfc_flow_parse_vxlan,
1043 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1044 .prev_layer = SFC_FLOW_ITEM_L4,
1045 .layer = SFC_FLOW_ITEM_START_LAYER,
1046 .parse = sfc_flow_parse_geneve,
1049 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1050 .prev_layer = SFC_FLOW_ITEM_L3,
1051 .layer = SFC_FLOW_ITEM_START_LAYER,
1052 .parse = sfc_flow_parse_nvgre,
1057 * Protocol-independent flow API support
1060 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1061 struct rte_flow *flow,
1062 struct rte_flow_error *error)
1065 rte_flow_error_set(error, EINVAL,
1066 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1070 if (attr->group != 0) {
1071 rte_flow_error_set(error, ENOTSUP,
1072 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1073 "Groups are not supported");
1076 if (attr->priority != 0) {
1077 rte_flow_error_set(error, ENOTSUP,
1078 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1079 "Priorities are not supported");
1082 if (attr->egress != 0) {
1083 rte_flow_error_set(error, ENOTSUP,
1084 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1085 "Egress is not supported");
1088 if (attr->ingress == 0) {
1089 rte_flow_error_set(error, ENOTSUP,
1090 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1091 "Only ingress is supported");
1095 flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
1096 flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1101 /* Get item from array sfc_flow_items */
1102 static const struct sfc_flow_item *
1103 sfc_flow_get_item(enum rte_flow_item_type type)
1107 for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1108 if (sfc_flow_items[i].type == type)
1109 return &sfc_flow_items[i];
1115 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1116 struct rte_flow *flow,
1117 struct rte_flow_error *error)
1120 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1121 boolean_t is_ifrm = B_FALSE;
1122 const struct sfc_flow_item *item;
1124 if (pattern == NULL) {
1125 rte_flow_error_set(error, EINVAL,
1126 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1131 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1132 item = sfc_flow_get_item(pattern->type);
1134 rte_flow_error_set(error, ENOTSUP,
1135 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1136 "Unsupported pattern item");
1141 * Omitting one or several protocol layers at the beginning
1142 * of pattern is supported
1144 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1145 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1146 item->prev_layer != prev_layer) {
1147 rte_flow_error_set(error, ENOTSUP,
1148 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1149 "Unexpected sequence of pattern items");
1154 * Allow only VOID and ETH pattern items in the inner frame.
1155 * Also check that there is only one tunneling protocol.
1157 switch (item->type) {
1158 case RTE_FLOW_ITEM_TYPE_VOID:
1159 case RTE_FLOW_ITEM_TYPE_ETH:
1162 case RTE_FLOW_ITEM_TYPE_VXLAN:
1163 case RTE_FLOW_ITEM_TYPE_GENEVE:
1164 case RTE_FLOW_ITEM_TYPE_NVGRE:
1166 rte_flow_error_set(error, EINVAL,
1167 RTE_FLOW_ERROR_TYPE_ITEM,
1169 "More than one tunneling protocol");
1177 rte_flow_error_set(error, EINVAL,
1178 RTE_FLOW_ERROR_TYPE_ITEM,
1180 "There is an unsupported pattern item "
1181 "in the inner frame");
1187 rc = item->parse(pattern, &flow->spec.template, error);
1191 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1192 prev_layer = item->layer;
1199 sfc_flow_parse_queue(struct sfc_adapter *sa,
1200 const struct rte_flow_action_queue *queue,
1201 struct rte_flow *flow)
1203 struct sfc_rxq *rxq;
1205 if (queue->index >= sa->rxq_count)
1208 rxq = sa->rxq_info[queue->index].rxq;
1209 flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1214 #if EFSYS_OPT_RX_SCALE
1216 sfc_flow_parse_rss(struct sfc_adapter *sa,
1217 const struct rte_flow_action_rss *rss,
1218 struct rte_flow *flow)
1220 unsigned int rxq_sw_index;
1221 struct sfc_rxq *rxq;
1222 unsigned int rxq_hw_index_min;
1223 unsigned int rxq_hw_index_max;
1224 const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
1226 uint8_t *rss_key = NULL;
1227 struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
1233 rxq_sw_index = sa->rxq_count - 1;
1234 rxq = sa->rxq_info[rxq_sw_index].rxq;
1235 rxq_hw_index_min = rxq->hw_index;
1236 rxq_hw_index_max = 0;
1238 for (i = 0; i < rss->num; ++i) {
1239 rxq_sw_index = rss->queue[i];
1241 if (rxq_sw_index >= sa->rxq_count)
1244 rxq = sa->rxq_info[rxq_sw_index].rxq;
1246 if (rxq->hw_index < rxq_hw_index_min)
1247 rxq_hw_index_min = rxq->hw_index;
1249 if (rxq->hw_index > rxq_hw_index_max)
1250 rxq_hw_index_max = rxq->hw_index;
1253 rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
1254 if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
1257 if (rss_conf != NULL) {
1258 if (rss_conf->rss_key_len != sizeof(sa->rss_key))
1261 rss_key = rss_conf->rss_key;
1263 rss_key = sa->rss_key;
1268 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1269 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1270 sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
1271 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
1273 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1274 unsigned int rxq_sw_index = rss->queue[i % rss->num];
1275 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
1277 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1282 #endif /* EFSYS_OPT_RX_SCALE */
1285 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1286 unsigned int filters_count)
1291 for (i = 0; i < filters_count; i++) {
1294 rc = efx_filter_remove(sa->nic, &spec->filters[i]);
1295 if (ret == 0 && rc != 0) {
1296 sfc_err(sa, "failed to remove filter specification "
1306 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1311 for (i = 0; i < spec->count; i++) {
1312 rc = efx_filter_insert(sa->nic, &spec->filters[i]);
1314 sfc_flow_spec_flush(sa, spec, i);
1323 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1325 return sfc_flow_spec_flush(sa, spec, spec->count);
1329 sfc_flow_filter_insert(struct sfc_adapter *sa,
1330 struct rte_flow *flow)
1332 #if EFSYS_OPT_RX_SCALE
1333 struct sfc_flow_rss *rss = &flow->rss_conf;
1334 uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1339 unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
1340 rss->rxq_hw_index_min + 1,
1343 rc = efx_rx_scale_context_alloc(sa->nic,
1344 EFX_RX_SCALE_EXCLUSIVE,
1348 goto fail_scale_context_alloc;
1350 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1351 EFX_RX_HASHALG_TOEPLITZ,
1352 rss->rss_hash_types, B_TRUE);
1354 goto fail_scale_mode_set;
1356 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1358 sizeof(sa->rss_key));
1360 goto fail_scale_key_set;
1363 * At this point, fully elaborated filter specifications
1364 * have been produced from the template. To make sure that
1365 * RSS behaviour is consistent between them, set the same
1366 * RSS context value everywhere.
1368 for (i = 0; i < flow->spec.count; i++) {
1369 efx_filter_spec_t *spec = &flow->spec.filters[i];
1371 spec->efs_rss_context = efs_rss_context;
1372 spec->efs_dmaq_id = rss->rxq_hw_index_min;
1373 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1377 rc = sfc_flow_spec_insert(sa, &flow->spec);
1379 goto fail_filter_insert;
1383 * Scale table is set after filter insertion because
1384 * the table entries are relative to the base RxQ ID
1385 * and the latter is submitted to the HW by means of
1386 * inserting a filter, so by the time of the request
1387 * the HW knows all the information needed to verify
1388 * the table entries, and the operation will succeed
1390 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1391 rss->rss_tbl, RTE_DIM(rss->rss_tbl));
1393 goto fail_scale_tbl_set;
1399 sfc_flow_spec_remove(sa, &flow->spec);
1403 fail_scale_mode_set:
1404 if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1405 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1407 fail_scale_context_alloc:
1409 #else /* !EFSYS_OPT_RX_SCALE */
1410 return sfc_flow_spec_insert(sa, &flow->spec);
1411 #endif /* EFSYS_OPT_RX_SCALE */
1415 sfc_flow_filter_remove(struct sfc_adapter *sa,
1416 struct rte_flow *flow)
1420 rc = sfc_flow_spec_remove(sa, &flow->spec);
1424 #if EFSYS_OPT_RX_SCALE
1427 * All specifications for a given flow rule have the same RSS
1428 * context, so that RSS context value is taken from the first
1429 * filter specification
1431 efx_filter_spec_t *spec = &flow->spec.filters[0];
1433 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1435 #endif /* EFSYS_OPT_RX_SCALE */
1441 sfc_flow_parse_actions(struct sfc_adapter *sa,
1442 const struct rte_flow_action actions[],
1443 struct rte_flow *flow,
1444 struct rte_flow_error *error)
1447 boolean_t is_specified = B_FALSE;
1449 if (actions == NULL) {
1450 rte_flow_error_set(error, EINVAL,
1451 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1456 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1457 switch (actions->type) {
1458 case RTE_FLOW_ACTION_TYPE_VOID:
1461 case RTE_FLOW_ACTION_TYPE_QUEUE:
1462 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1464 rte_flow_error_set(error, EINVAL,
1465 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1466 "Bad QUEUE action");
1470 is_specified = B_TRUE;
1473 #if EFSYS_OPT_RX_SCALE
1474 case RTE_FLOW_ACTION_TYPE_RSS:
1475 rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1477 rte_flow_error_set(error, rc,
1478 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1483 is_specified = B_TRUE;
1485 #endif /* EFSYS_OPT_RX_SCALE */
1488 rte_flow_error_set(error, ENOTSUP,
1489 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1490 "Action is not supported");
1495 if (!is_specified) {
1496 rte_flow_error_set(error, EINVAL,
1497 RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
1498 "Action is unspecified");
1506 * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1507 * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1508 * specifications after copying.
1510 * @param spec[in, out]
1511 * SFC flow specification to update.
1512 * @param filters_count_for_one_val[in]
1513 * How many specifications should have the same EtherType value, what is the
1514 * number of specifications before copying.
1516 * Perform verbose error reporting if not NULL.
1519 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1520 unsigned int filters_count_for_one_val,
1521 struct rte_flow_error *error)
1524 static const uint16_t vals[] = {
1525 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1528 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1529 rte_flow_error_set(error, EINVAL,
1530 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1531 "Number of specifications is incorrect "
1532 "while copying by Ethertype");
1536 for (i = 0; i < spec->count; i++) {
1537 spec->filters[i].efs_match_flags |=
1538 EFX_FILTER_MATCH_ETHER_TYPE;
1541 * The check above ensures that
1542 * filters_count_for_one_val is not 0
1544 spec->filters[i].efs_ether_type =
1545 vals[i / filters_count_for_one_val];
1551 /* Match flags that can be automatically added to filters */
1552 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1554 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
1556 .set_vals = sfc_flow_set_ethertypes,
1560 /* Get item from array sfc_flow_copy_flags */
1561 static const struct sfc_flow_copy_flag *
1562 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
1566 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1567 if (sfc_flow_copy_flags[i].flag == flag)
1568 return &sfc_flow_copy_flags[i];
1575 * Make copies of the specifications, set match flag and values
1576 * of the field that corresponds to it.
1578 * @param spec[in, out]
1579 * SFC flow specification to update.
1581 * The match flag to add.
1583 * Perform verbose error reporting if not NULL.
1586 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
1587 efx_filter_match_flags_t flag,
1588 struct rte_flow_error *error)
1591 unsigned int new_filters_count;
1592 unsigned int filters_count_for_one_val;
1593 const struct sfc_flow_copy_flag *copy_flag;
1596 copy_flag = sfc_flow_get_copy_flag(flag);
1597 if (copy_flag == NULL) {
1598 rte_flow_error_set(error, ENOTSUP,
1599 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1600 "Unsupported spec field for copying");
1604 new_filters_count = spec->count * copy_flag->vals_count;
1605 if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
1606 rte_flow_error_set(error, EINVAL,
1607 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1608 "Too much EFX specifications in the flow rule");
1612 /* Copy filters specifications */
1613 for (i = spec->count; i < new_filters_count; i++)
1614 spec->filters[i] = spec->filters[i - spec->count];
1616 filters_count_for_one_val = spec->count;
1617 spec->count = new_filters_count;
1619 rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
1627 * Check that the given set of match flags missing in the original filter spec
1628 * could be covered by adding spec copies which specify the corresponding
1629 * flags and packet field values to match.
1631 * @param miss_flags[in]
1632 * Flags that are missing until the supported filter.
1635 * Number of specifications after copy or 0, if the flags can not be added.
1638 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags)
1641 efx_filter_match_flags_t copy_flags = 0;
1642 efx_filter_match_flags_t flag;
1643 unsigned int multiplier = 1;
1645 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1646 flag = sfc_flow_copy_flags[i].flag;
1647 if ((flag & miss_flags) == flag) {
1649 multiplier *= sfc_flow_copy_flags[i].vals_count;
1653 if (copy_flags == miss_flags)
1660 * Attempt to supplement the specification template to the minimally
1661 * supported set of match flags. To do this, it is necessary to copy
1662 * the specifications, filling them with the values of fields that
1663 * correspond to the missing flags.
1664 * The necessary and sufficient filter is built from the fewest number
1665 * of copies which could be made to cover the minimally required set
1670 * @param spec[in, out]
1671 * SFC flow specification to update.
1673 * Perform verbose error reporting if not NULL.
1676 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
1677 struct sfc_flow_spec *spec,
1678 struct rte_flow_error *error)
1680 struct sfc_filter *filter = &sa->filter;
1681 efx_filter_match_flags_t miss_flags;
1682 efx_filter_match_flags_t min_miss_flags = 0;
1683 efx_filter_match_flags_t match;
1684 unsigned int min_multiplier = UINT_MAX;
1685 unsigned int multiplier;
1689 match = spec->template.efs_match_flags;
1690 for (i = 0; i < filter->supported_match_num; i++) {
1691 if ((match & filter->supported_match[i]) == match) {
1692 miss_flags = filter->supported_match[i] & (~match);
1693 multiplier = sfc_flow_check_missing_flags(miss_flags);
1694 if (multiplier > 0) {
1695 if (multiplier <= min_multiplier) {
1696 min_multiplier = multiplier;
1697 min_miss_flags = miss_flags;
1703 if (min_multiplier == UINT_MAX) {
1704 rte_flow_error_set(error, ENOTSUP,
1705 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1706 "Flow rule pattern is not supported");
1710 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1711 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
1713 if ((flag & min_miss_flags) == flag) {
1714 rc = sfc_flow_spec_add_match_flag(spec, flag, error);
1724 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
1725 struct rte_flow *flow,
1726 struct rte_flow_error *error)
1728 efx_filter_spec_t *spec_tmpl = &flow->spec.template;
1729 efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
1732 /* Initialize the first filter spec with template */
1733 flow->spec.filters[0] = *spec_tmpl;
1734 flow->spec.count = 1;
1736 if (!sfc_filter_is_match_supported(sa, match_flags)) {
1737 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
1746 sfc_flow_parse(struct rte_eth_dev *dev,
1747 const struct rte_flow_attr *attr,
1748 const struct rte_flow_item pattern[],
1749 const struct rte_flow_action actions[],
1750 struct rte_flow *flow,
1751 struct rte_flow_error *error)
1753 struct sfc_adapter *sa = dev->data->dev_private;
1756 rc = sfc_flow_parse_attr(attr, flow, error);
1758 goto fail_bad_value;
1760 rc = sfc_flow_parse_pattern(pattern, flow, error);
1762 goto fail_bad_value;
1764 rc = sfc_flow_parse_actions(sa, actions, flow, error);
1766 goto fail_bad_value;
1768 rc = sfc_flow_validate_match_flags(sa, flow, error);
1770 goto fail_bad_value;
1779 sfc_flow_validate(struct rte_eth_dev *dev,
1780 const struct rte_flow_attr *attr,
1781 const struct rte_flow_item pattern[],
1782 const struct rte_flow_action actions[],
1783 struct rte_flow_error *error)
1785 struct rte_flow flow;
1787 memset(&flow, 0, sizeof(flow));
1789 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
1792 static struct rte_flow *
1793 sfc_flow_create(struct rte_eth_dev *dev,
1794 const struct rte_flow_attr *attr,
1795 const struct rte_flow_item pattern[],
1796 const struct rte_flow_action actions[],
1797 struct rte_flow_error *error)
1799 struct sfc_adapter *sa = dev->data->dev_private;
1800 struct rte_flow *flow = NULL;
1803 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
1805 rte_flow_error_set(error, ENOMEM,
1806 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1807 "Failed to allocate memory");
1811 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
1813 goto fail_bad_value;
1815 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
1817 sfc_adapter_lock(sa);
1819 if (sa->state == SFC_ADAPTER_STARTED) {
1820 rc = sfc_flow_filter_insert(sa, flow);
1822 rte_flow_error_set(error, rc,
1823 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1824 "Failed to insert filter");
1825 goto fail_filter_insert;
1829 sfc_adapter_unlock(sa);
1834 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1838 sfc_adapter_unlock(sa);
1845 sfc_flow_remove(struct sfc_adapter *sa,
1846 struct rte_flow *flow,
1847 struct rte_flow_error *error)
1851 SFC_ASSERT(sfc_adapter_is_locked(sa));
1853 if (sa->state == SFC_ADAPTER_STARTED) {
1854 rc = sfc_flow_filter_remove(sa, flow);
1856 rte_flow_error_set(error, rc,
1857 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1858 "Failed to destroy flow rule");
1861 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1868 sfc_flow_destroy(struct rte_eth_dev *dev,
1869 struct rte_flow *flow,
1870 struct rte_flow_error *error)
1872 struct sfc_adapter *sa = dev->data->dev_private;
1873 struct rte_flow *flow_ptr;
1876 sfc_adapter_lock(sa);
1878 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
1879 if (flow_ptr == flow)
1883 rte_flow_error_set(error, rc,
1884 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1885 "Failed to find flow rule to destroy");
1886 goto fail_bad_value;
1889 rc = sfc_flow_remove(sa, flow, error);
1892 sfc_adapter_unlock(sa);
1898 sfc_flow_flush(struct rte_eth_dev *dev,
1899 struct rte_flow_error *error)
1901 struct sfc_adapter *sa = dev->data->dev_private;
1902 struct rte_flow *flow;
1906 sfc_adapter_lock(sa);
1908 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1909 rc = sfc_flow_remove(sa, flow, error);
1914 sfc_adapter_unlock(sa);
1920 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
1921 struct rte_flow_error *error)
1923 struct sfc_adapter *sa = dev->data->dev_private;
1924 struct sfc_port *port = &sa->port;
1927 sfc_adapter_lock(sa);
1928 if (sa->state != SFC_ADAPTER_INITIALIZED) {
1929 rte_flow_error_set(error, EBUSY,
1930 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1931 NULL, "please close the port first");
1934 port->isolated = (enable) ? B_TRUE : B_FALSE;
1936 sfc_adapter_unlock(sa);
1941 const struct rte_flow_ops sfc_flow_ops = {
1942 .validate = sfc_flow_validate,
1943 .create = sfc_flow_create,
1944 .destroy = sfc_flow_destroy,
1945 .flush = sfc_flow_flush,
1947 .isolate = sfc_flow_isolate,
1951 sfc_flow_init(struct sfc_adapter *sa)
1953 SFC_ASSERT(sfc_adapter_is_locked(sa));
1955 TAILQ_INIT(&sa->filter.flow_list);
1959 sfc_flow_fini(struct sfc_adapter *sa)
1961 struct rte_flow *flow;
1963 SFC_ASSERT(sfc_adapter_is_locked(sa));
1965 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1966 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1972 sfc_flow_stop(struct sfc_adapter *sa)
1974 struct rte_flow *flow;
1976 SFC_ASSERT(sfc_adapter_is_locked(sa));
1978 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
1979 sfc_flow_filter_remove(sa, flow);
1983 sfc_flow_start(struct sfc_adapter *sa)
1985 struct rte_flow *flow;
1988 sfc_log_init(sa, "entry");
1990 SFC_ASSERT(sfc_adapter_is_locked(sa));
1992 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
1993 rc = sfc_flow_filter_insert(sa, flow);
1998 sfc_log_init(sa, "done");