2 * Copyright (c) 2017 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <rte_tailq.h>
31 #include <rte_common.h>
32 #include <rte_ethdev.h>
33 #include <rte_eth_ctrl.h>
34 #include <rte_ether.h>
36 #include <rte_flow_driver.h>
42 #include "sfc_filter.h"
47 * At now flow API is implemented in such a manner that each
48 * flow rule is converted to a hardware filter.
49 * All elements of flow rule (attributes, pattern items, actions)
50 * correspond to one or more fields in the efx_filter_spec_s structure
51 * that is responsible for the hardware filter.
54 enum sfc_flow_item_layers {
55 SFC_FLOW_ITEM_ANY_LAYER,
56 SFC_FLOW_ITEM_START_LAYER,
62 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
63 efx_filter_spec_t *spec,
64 struct rte_flow_error *error);
66 struct sfc_flow_item {
67 enum rte_flow_item_type type; /* Type of item */
68 enum sfc_flow_item_layers layer; /* Layer of item */
69 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
70 sfc_flow_item_parse *parse; /* Parsing function */
73 static sfc_flow_item_parse sfc_flow_parse_void;
74 static sfc_flow_item_parse sfc_flow_parse_eth;
75 static sfc_flow_item_parse sfc_flow_parse_vlan;
76 static sfc_flow_item_parse sfc_flow_parse_ipv4;
77 static sfc_flow_item_parse sfc_flow_parse_ipv6;
78 static sfc_flow_item_parse sfc_flow_parse_tcp;
79 static sfc_flow_item_parse sfc_flow_parse_udp;
82 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
87 for (i = 0; i < size; i++)
90 return (sum == 0) ? B_TRUE : B_FALSE;
94 * Validate item and prepare structures spec and mask for parsing
97 sfc_flow_parse_init(const struct rte_flow_item *item,
98 const void **spec_ptr,
99 const void **mask_ptr,
100 const void *supp_mask,
101 const void *def_mask,
103 struct rte_flow_error *error)
113 rte_flow_error_set(error, EINVAL,
114 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
119 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
120 rte_flow_error_set(error, EINVAL,
121 RTE_FLOW_ERROR_TYPE_ITEM, item,
122 "Mask or last is set without spec");
127 * If "mask" is not set, default mask is used,
128 * but if default mask is NULL, "mask" should be set
130 if (item->mask == NULL) {
131 if (def_mask == NULL) {
132 rte_flow_error_set(error, EINVAL,
133 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
134 "Mask should be specified");
138 mask = (const uint8_t *)def_mask;
140 mask = (const uint8_t *)item->mask;
143 spec = (const uint8_t *)item->spec;
144 last = (const uint8_t *)item->last;
150 * If field values in "last" are either 0 or equal to the corresponding
151 * values in "spec" then they are ignored
154 !sfc_flow_is_zero(last, size) &&
155 memcmp(last, spec, size) != 0) {
156 rte_flow_error_set(error, ENOTSUP,
157 RTE_FLOW_ERROR_TYPE_ITEM, item,
158 "Ranging is not supported");
162 if (supp_mask == NULL) {
163 rte_flow_error_set(error, EINVAL,
164 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
165 "Supported mask for item should be specified");
169 /* Check that mask and spec not asks for more match than supp_mask */
170 for (i = 0; i < size; i++) {
171 match = spec[i] | mask[i];
172 supp = ((const uint8_t *)supp_mask)[i];
174 if ((match | supp) != supp) {
175 rte_flow_error_set(error, ENOTSUP,
176 RTE_FLOW_ERROR_TYPE_ITEM, item,
177 "Item's field is not supported");
190 * Masking is not supported, so masks in items should be either
191 * full or empty (zeroed) and set only for supported fields which
192 * are specified in the supp_mask.
196 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
197 __rte_unused efx_filter_spec_t *efx_spec,
198 __rte_unused struct rte_flow_error *error)
204 * Convert Ethernet item to EFX filter specification.
207 * Item specification. Only source and destination addresses and
208 * Ethernet type fields are supported. If the mask is NULL, default
209 * mask will be used. Ranging is not supported.
210 * @param efx_spec[in, out]
211 * EFX filter specification to update.
213 * Perform verbose error reporting if not NULL.
216 sfc_flow_parse_eth(const struct rte_flow_item *item,
217 efx_filter_spec_t *efx_spec,
218 struct rte_flow_error *error)
221 const struct rte_flow_item_eth *spec = NULL;
222 const struct rte_flow_item_eth *mask = NULL;
223 const struct rte_flow_item_eth supp_mask = {
224 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
225 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
229 rc = sfc_flow_parse_init(item,
230 (const void **)&spec,
231 (const void **)&mask,
233 &rte_flow_item_eth_mask,
234 sizeof(struct rte_flow_item_eth),
239 /* If "spec" is not set, could be any Ethernet */
243 if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
244 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
245 rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
247 } else if (!is_zero_ether_addr(&mask->dst)) {
251 if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
252 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
253 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
255 } else if (!is_zero_ether_addr(&mask->src)) {
260 * Ether type is in big-endian byte order in item and
261 * in little-endian in efx_spec, so byte swap is used
263 if (mask->type == supp_mask.type) {
264 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
265 efx_spec->efs_ether_type = rte_bswap16(spec->type);
266 } else if (mask->type != 0) {
273 rte_flow_error_set(error, EINVAL,
274 RTE_FLOW_ERROR_TYPE_ITEM, item,
275 "Bad mask in the ETH pattern item");
280 * Convert VLAN item to EFX filter specification.
283 * Item specification. Only VID field is supported.
284 * The mask can not be NULL. Ranging is not supported.
285 * @param efx_spec[in, out]
286 * EFX filter specification to update.
288 * Perform verbose error reporting if not NULL.
291 sfc_flow_parse_vlan(const struct rte_flow_item *item,
292 efx_filter_spec_t *efx_spec,
293 struct rte_flow_error *error)
297 const struct rte_flow_item_vlan *spec = NULL;
298 const struct rte_flow_item_vlan *mask = NULL;
299 const struct rte_flow_item_vlan supp_mask = {
300 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
303 rc = sfc_flow_parse_init(item,
304 (const void **)&spec,
305 (const void **)&mask,
308 sizeof(struct rte_flow_item_vlan),
314 * VID is in big-endian byte order in item and
315 * in little-endian in efx_spec, so byte swap is used.
316 * If two VLAN items are included, the first matches
317 * the outer tag and the next matches the inner tag.
319 if (mask->tci == supp_mask.tci) {
320 vid = rte_bswap16(spec->tci);
322 if (!(efx_spec->efs_match_flags &
323 EFX_FILTER_MATCH_OUTER_VID)) {
324 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
325 efx_spec->efs_outer_vid = vid;
326 } else if (!(efx_spec->efs_match_flags &
327 EFX_FILTER_MATCH_INNER_VID)) {
328 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
329 efx_spec->efs_inner_vid = vid;
331 rte_flow_error_set(error, EINVAL,
332 RTE_FLOW_ERROR_TYPE_ITEM, item,
333 "More than two VLAN items");
337 rte_flow_error_set(error, EINVAL,
338 RTE_FLOW_ERROR_TYPE_ITEM, item,
339 "VLAN ID in TCI match is required");
347 * Convert IPv4 item to EFX filter specification.
350 * Item specification. Only source and destination addresses and
351 * protocol fields are supported. If the mask is NULL, default
352 * mask will be used. Ranging is not supported.
353 * @param efx_spec[in, out]
354 * EFX filter specification to update.
356 * Perform verbose error reporting if not NULL.
359 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
360 efx_filter_spec_t *efx_spec,
361 struct rte_flow_error *error)
364 const struct rte_flow_item_ipv4 *spec = NULL;
365 const struct rte_flow_item_ipv4 *mask = NULL;
366 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
367 const struct rte_flow_item_ipv4 supp_mask = {
369 .src_addr = 0xffffffff,
370 .dst_addr = 0xffffffff,
371 .next_proto_id = 0xff,
375 rc = sfc_flow_parse_init(item,
376 (const void **)&spec,
377 (const void **)&mask,
379 &rte_flow_item_ipv4_mask,
380 sizeof(struct rte_flow_item_ipv4),
386 * Filtering by IPv4 source and destination addresses requires
387 * the appropriate ETHER_TYPE in hardware filters
389 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
390 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
391 efx_spec->efs_ether_type = ether_type_ipv4;
392 } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
393 rte_flow_error_set(error, EINVAL,
394 RTE_FLOW_ERROR_TYPE_ITEM, item,
395 "Ethertype in pattern with IPV4 item should be appropriate");
403 * IPv4 addresses are in big-endian byte order in item and in
406 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
407 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
408 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
409 } else if (mask->hdr.src_addr != 0) {
413 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
414 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
415 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
416 } else if (mask->hdr.dst_addr != 0) {
420 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
421 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
422 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
423 } else if (mask->hdr.next_proto_id != 0) {
430 rte_flow_error_set(error, EINVAL,
431 RTE_FLOW_ERROR_TYPE_ITEM, item,
432 "Bad mask in the IPV4 pattern item");
437 * Convert IPv6 item to EFX filter specification.
440 * Item specification. Only source and destination addresses and
441 * next header fields are supported. If the mask is NULL, default
442 * mask will be used. Ranging is not supported.
443 * @param efx_spec[in, out]
444 * EFX filter specification to update.
446 * Perform verbose error reporting if not NULL.
449 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
450 efx_filter_spec_t *efx_spec,
451 struct rte_flow_error *error)
454 const struct rte_flow_item_ipv6 *spec = NULL;
455 const struct rte_flow_item_ipv6 *mask = NULL;
456 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
457 const struct rte_flow_item_ipv6 supp_mask = {
459 .src_addr = { 0xff, 0xff, 0xff, 0xff,
460 0xff, 0xff, 0xff, 0xff,
461 0xff, 0xff, 0xff, 0xff,
462 0xff, 0xff, 0xff, 0xff },
463 .dst_addr = { 0xff, 0xff, 0xff, 0xff,
464 0xff, 0xff, 0xff, 0xff,
465 0xff, 0xff, 0xff, 0xff,
466 0xff, 0xff, 0xff, 0xff },
471 rc = sfc_flow_parse_init(item,
472 (const void **)&spec,
473 (const void **)&mask,
475 &rte_flow_item_ipv6_mask,
476 sizeof(struct rte_flow_item_ipv6),
482 * Filtering by IPv6 source and destination addresses requires
483 * the appropriate ETHER_TYPE in hardware filters
485 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
486 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
487 efx_spec->efs_ether_type = ether_type_ipv6;
488 } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
489 rte_flow_error_set(error, EINVAL,
490 RTE_FLOW_ERROR_TYPE_ITEM, item,
491 "Ethertype in pattern with IPV6 item should be appropriate");
499 * IPv6 addresses are in big-endian byte order in item and in
502 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
503 sizeof(mask->hdr.src_addr)) == 0) {
504 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
506 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
507 sizeof(spec->hdr.src_addr));
508 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
509 sizeof(efx_spec->efs_rem_host));
510 } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
511 sizeof(mask->hdr.src_addr))) {
515 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
516 sizeof(mask->hdr.dst_addr)) == 0) {
517 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
519 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
520 sizeof(spec->hdr.dst_addr));
521 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
522 sizeof(efx_spec->efs_loc_host));
523 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
524 sizeof(mask->hdr.dst_addr))) {
528 if (mask->hdr.proto == supp_mask.hdr.proto) {
529 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
530 efx_spec->efs_ip_proto = spec->hdr.proto;
531 } else if (mask->hdr.proto != 0) {
538 rte_flow_error_set(error, EINVAL,
539 RTE_FLOW_ERROR_TYPE_ITEM, item,
540 "Bad mask in the IPV6 pattern item");
545 * Convert TCP item to EFX filter specification.
548 * Item specification. Only source and destination ports fields
549 * are supported. If the mask is NULL, default mask will be used.
550 * Ranging is not supported.
551 * @param efx_spec[in, out]
552 * EFX filter specification to update.
554 * Perform verbose error reporting if not NULL.
557 sfc_flow_parse_tcp(const struct rte_flow_item *item,
558 efx_filter_spec_t *efx_spec,
559 struct rte_flow_error *error)
562 const struct rte_flow_item_tcp *spec = NULL;
563 const struct rte_flow_item_tcp *mask = NULL;
564 const struct rte_flow_item_tcp supp_mask = {
571 rc = sfc_flow_parse_init(item,
572 (const void **)&spec,
573 (const void **)&mask,
575 &rte_flow_item_tcp_mask,
576 sizeof(struct rte_flow_item_tcp),
582 * Filtering by TCP source and destination ports requires
583 * the appropriate IP_PROTO in hardware filters
585 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
586 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
587 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
588 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
589 rte_flow_error_set(error, EINVAL,
590 RTE_FLOW_ERROR_TYPE_ITEM, item,
591 "IP proto in pattern with TCP item should be appropriate");
599 * Source and destination ports are in big-endian byte order in item and
600 * in little-endian in efx_spec, so byte swap is used
602 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
603 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
604 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
605 } else if (mask->hdr.src_port != 0) {
609 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
610 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
611 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
612 } else if (mask->hdr.dst_port != 0) {
619 rte_flow_error_set(error, EINVAL,
620 RTE_FLOW_ERROR_TYPE_ITEM, item,
621 "Bad mask in the TCP pattern item");
626 * Convert UDP item to EFX filter specification.
629 * Item specification. Only source and destination ports fields
630 * are supported. If the mask is NULL, default mask will be used.
631 * Ranging is not supported.
632 * @param efx_spec[in, out]
633 * EFX filter specification to update.
635 * Perform verbose error reporting if not NULL.
638 sfc_flow_parse_udp(const struct rte_flow_item *item,
639 efx_filter_spec_t *efx_spec,
640 struct rte_flow_error *error)
643 const struct rte_flow_item_udp *spec = NULL;
644 const struct rte_flow_item_udp *mask = NULL;
645 const struct rte_flow_item_udp supp_mask = {
652 rc = sfc_flow_parse_init(item,
653 (const void **)&spec,
654 (const void **)&mask,
656 &rte_flow_item_udp_mask,
657 sizeof(struct rte_flow_item_udp),
663 * Filtering by UDP source and destination ports requires
664 * the appropriate IP_PROTO in hardware filters
666 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
667 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
668 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
669 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
670 rte_flow_error_set(error, EINVAL,
671 RTE_FLOW_ERROR_TYPE_ITEM, item,
672 "IP proto in pattern with UDP item should be appropriate");
680 * Source and destination ports are in big-endian byte order in item and
681 * in little-endian in efx_spec, so byte swap is used
683 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
684 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
685 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
686 } else if (mask->hdr.src_port != 0) {
690 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
691 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
692 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
693 } else if (mask->hdr.dst_port != 0) {
700 rte_flow_error_set(error, EINVAL,
701 RTE_FLOW_ERROR_TYPE_ITEM, item,
702 "Bad mask in the UDP pattern item");
706 static const struct sfc_flow_item sfc_flow_items[] = {
708 .type = RTE_FLOW_ITEM_TYPE_VOID,
709 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
710 .layer = SFC_FLOW_ITEM_ANY_LAYER,
711 .parse = sfc_flow_parse_void,
714 .type = RTE_FLOW_ITEM_TYPE_ETH,
715 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
716 .layer = SFC_FLOW_ITEM_L2,
717 .parse = sfc_flow_parse_eth,
720 .type = RTE_FLOW_ITEM_TYPE_VLAN,
721 .prev_layer = SFC_FLOW_ITEM_L2,
722 .layer = SFC_FLOW_ITEM_L2,
723 .parse = sfc_flow_parse_vlan,
726 .type = RTE_FLOW_ITEM_TYPE_IPV4,
727 .prev_layer = SFC_FLOW_ITEM_L2,
728 .layer = SFC_FLOW_ITEM_L3,
729 .parse = sfc_flow_parse_ipv4,
732 .type = RTE_FLOW_ITEM_TYPE_IPV6,
733 .prev_layer = SFC_FLOW_ITEM_L2,
734 .layer = SFC_FLOW_ITEM_L3,
735 .parse = sfc_flow_parse_ipv6,
738 .type = RTE_FLOW_ITEM_TYPE_TCP,
739 .prev_layer = SFC_FLOW_ITEM_L3,
740 .layer = SFC_FLOW_ITEM_L4,
741 .parse = sfc_flow_parse_tcp,
744 .type = RTE_FLOW_ITEM_TYPE_UDP,
745 .prev_layer = SFC_FLOW_ITEM_L3,
746 .layer = SFC_FLOW_ITEM_L4,
747 .parse = sfc_flow_parse_udp,
752 * Protocol-independent flow API support
755 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
756 struct rte_flow *flow,
757 struct rte_flow_error *error)
760 rte_flow_error_set(error, EINVAL,
761 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
765 if (attr->group != 0) {
766 rte_flow_error_set(error, ENOTSUP,
767 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
768 "Groups are not supported");
771 if (attr->priority != 0) {
772 rte_flow_error_set(error, ENOTSUP,
773 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
774 "Priorities are not supported");
777 if (attr->egress != 0) {
778 rte_flow_error_set(error, ENOTSUP,
779 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
780 "Egress is not supported");
783 if (attr->ingress == 0) {
784 rte_flow_error_set(error, ENOTSUP,
785 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
786 "Only ingress is supported");
790 flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
791 flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
796 /* Get item from array sfc_flow_items */
797 static const struct sfc_flow_item *
798 sfc_flow_get_item(enum rte_flow_item_type type)
802 for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
803 if (sfc_flow_items[i].type == type)
804 return &sfc_flow_items[i];
810 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
811 struct rte_flow *flow,
812 struct rte_flow_error *error)
815 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
816 const struct sfc_flow_item *item;
818 if (pattern == NULL) {
819 rte_flow_error_set(error, EINVAL,
820 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
825 for (; pattern != NULL &&
826 pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
827 item = sfc_flow_get_item(pattern->type);
829 rte_flow_error_set(error, ENOTSUP,
830 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
831 "Unsupported pattern item");
836 * Omitting one or several protocol layers at the beginning
837 * of pattern is supported
839 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
840 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
841 item->prev_layer != prev_layer) {
842 rte_flow_error_set(error, ENOTSUP,
843 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
844 "Unexpected sequence of pattern items");
848 rc = item->parse(pattern, &flow->spec, error);
852 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
853 prev_layer = item->layer;
856 if (pattern == NULL) {
857 rte_flow_error_set(error, EINVAL,
858 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
867 sfc_flow_parse_queue(struct sfc_adapter *sa,
868 const struct rte_flow_action_queue *queue,
869 struct rte_flow *flow)
873 if (queue->index >= sa->rxq_count)
876 rxq = sa->rxq_info[queue->index].rxq;
877 flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
883 sfc_flow_parse_actions(struct sfc_adapter *sa,
884 const struct rte_flow_action actions[],
885 struct rte_flow *flow,
886 struct rte_flow_error *error)
889 boolean_t is_specified = B_FALSE;
891 if (actions == NULL) {
892 rte_flow_error_set(error, EINVAL,
893 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
898 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
899 switch (actions->type) {
900 case RTE_FLOW_ACTION_TYPE_VOID:
903 case RTE_FLOW_ACTION_TYPE_QUEUE:
904 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
906 rte_flow_error_set(error, EINVAL,
907 RTE_FLOW_ERROR_TYPE_ACTION, actions,
912 is_specified = B_TRUE;
916 rte_flow_error_set(error, ENOTSUP,
917 RTE_FLOW_ERROR_TYPE_ACTION, actions,
918 "Action is not supported");
924 rte_flow_error_set(error, EINVAL,
925 RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
926 "Action is unspecified");
934 sfc_flow_parse(struct rte_eth_dev *dev,
935 const struct rte_flow_attr *attr,
936 const struct rte_flow_item pattern[],
937 const struct rte_flow_action actions[],
938 struct rte_flow *flow,
939 struct rte_flow_error *error)
941 struct sfc_adapter *sa = dev->data->dev_private;
944 memset(&flow->spec, 0, sizeof(flow->spec));
946 rc = sfc_flow_parse_attr(attr, flow, error);
950 rc = sfc_flow_parse_pattern(pattern, flow, error);
954 rc = sfc_flow_parse_actions(sa, actions, flow, error);
958 if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
959 rte_flow_error_set(error, ENOTSUP,
960 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
961 "Flow rule pattern is not supported");
970 sfc_flow_validate(struct rte_eth_dev *dev,
971 const struct rte_flow_attr *attr,
972 const struct rte_flow_item pattern[],
973 const struct rte_flow_action actions[],
974 struct rte_flow_error *error)
976 struct rte_flow flow;
978 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
981 static struct rte_flow *
982 sfc_flow_create(struct rte_eth_dev *dev,
983 const struct rte_flow_attr *attr,
984 const struct rte_flow_item pattern[],
985 const struct rte_flow_action actions[],
986 struct rte_flow_error *error)
988 struct sfc_adapter *sa = dev->data->dev_private;
989 struct rte_flow *flow = NULL;
992 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
994 rte_flow_error_set(error, ENOMEM,
995 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
996 "Failed to allocate memory");
1000 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
1002 goto fail_bad_value;
1004 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
1006 sfc_adapter_lock(sa);
1008 if (sa->state == SFC_ADAPTER_STARTED) {
1009 rc = efx_filter_insert(sa->nic, &flow->spec);
1011 rte_flow_error_set(error, rc,
1012 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1013 "Failed to insert filter");
1014 goto fail_filter_insert;
1018 sfc_adapter_unlock(sa);
1023 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1027 sfc_adapter_unlock(sa);
1034 sfc_flow_remove(struct sfc_adapter *sa,
1035 struct rte_flow *flow,
1036 struct rte_flow_error *error)
1040 SFC_ASSERT(sfc_adapter_is_locked(sa));
1042 if (sa->state == SFC_ADAPTER_STARTED) {
1043 rc = efx_filter_remove(sa->nic, &flow->spec);
1045 rte_flow_error_set(error, rc,
1046 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1047 "Failed to destroy flow rule");
1050 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1057 sfc_flow_destroy(struct rte_eth_dev *dev,
1058 struct rte_flow *flow,
1059 struct rte_flow_error *error)
1061 struct sfc_adapter *sa = dev->data->dev_private;
1062 struct rte_flow *flow_ptr;
1065 sfc_adapter_lock(sa);
1067 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
1068 if (flow_ptr == flow)
1072 rte_flow_error_set(error, rc,
1073 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1074 "Failed to find flow rule to destroy");
1075 goto fail_bad_value;
1078 rc = sfc_flow_remove(sa, flow, error);
1081 sfc_adapter_unlock(sa);
1087 sfc_flow_flush(struct rte_eth_dev *dev,
1088 struct rte_flow_error *error)
1090 struct sfc_adapter *sa = dev->data->dev_private;
1091 struct rte_flow *flow;
1095 sfc_adapter_lock(sa);
1097 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1098 rc = sfc_flow_remove(sa, flow, error);
1103 sfc_adapter_unlock(sa);
1108 const struct rte_flow_ops sfc_flow_ops = {
1109 .validate = sfc_flow_validate,
1110 .create = sfc_flow_create,
1111 .destroy = sfc_flow_destroy,
1112 .flush = sfc_flow_flush,
1117 sfc_flow_init(struct sfc_adapter *sa)
1119 SFC_ASSERT(sfc_adapter_is_locked(sa));
1121 TAILQ_INIT(&sa->filter.flow_list);
1125 sfc_flow_fini(struct sfc_adapter *sa)
1127 struct rte_flow *flow;
1129 SFC_ASSERT(sfc_adapter_is_locked(sa));
1131 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1132 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1138 sfc_flow_stop(struct sfc_adapter *sa)
1140 struct rte_flow *flow;
1142 SFC_ASSERT(sfc_adapter_is_locked(sa));
1144 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
1145 efx_filter_remove(sa->nic, &flow->spec);
1149 sfc_flow_start(struct sfc_adapter *sa)
1151 struct rte_flow *flow;
1154 sfc_log_init(sa, "entry");
1156 SFC_ASSERT(sfc_adapter_is_locked(sa));
1158 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
1159 rc = efx_filter_insert(sa->nic, &flow->spec);
1164 sfc_log_init(sa, "done");