1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2017-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_tailq.h>
11 #include <rte_common.h>
12 #include <rte_ethdev.h>
13 #include <rte_eth_ctrl.h>
14 #include <rte_ether.h>
16 #include <rte_flow_driver.h>
22 #include "sfc_filter.h"
27 * At now flow API is implemented in such a manner that each
28 * flow rule is converted to a hardware filter.
29 * All elements of flow rule (attributes, pattern items, actions)
30 * correspond to one or more fields in the efx_filter_spec_s structure
31 * that is responsible for the hardware filter.
34 enum sfc_flow_item_layers {
35 SFC_FLOW_ITEM_ANY_LAYER,
36 SFC_FLOW_ITEM_START_LAYER,
42 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
43 efx_filter_spec_t *spec,
44 struct rte_flow_error *error);
46 struct sfc_flow_item {
47 enum rte_flow_item_type type; /* Type of item */
48 enum sfc_flow_item_layers layer; /* Layer of item */
49 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
50 sfc_flow_item_parse *parse; /* Parsing function */
53 static sfc_flow_item_parse sfc_flow_parse_void;
54 static sfc_flow_item_parse sfc_flow_parse_eth;
55 static sfc_flow_item_parse sfc_flow_parse_vlan;
56 static sfc_flow_item_parse sfc_flow_parse_ipv4;
57 static sfc_flow_item_parse sfc_flow_parse_ipv6;
58 static sfc_flow_item_parse sfc_flow_parse_tcp;
59 static sfc_flow_item_parse sfc_flow_parse_udp;
62 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
67 for (i = 0; i < size; i++)
70 return (sum == 0) ? B_TRUE : B_FALSE;
74 * Validate item and prepare structures spec and mask for parsing
77 sfc_flow_parse_init(const struct rte_flow_item *item,
78 const void **spec_ptr,
79 const void **mask_ptr,
80 const void *supp_mask,
83 struct rte_flow_error *error)
93 rte_flow_error_set(error, EINVAL,
94 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
99 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
100 rte_flow_error_set(error, EINVAL,
101 RTE_FLOW_ERROR_TYPE_ITEM, item,
102 "Mask or last is set without spec");
107 * If "mask" is not set, default mask is used,
108 * but if default mask is NULL, "mask" should be set
110 if (item->mask == NULL) {
111 if (def_mask == NULL) {
112 rte_flow_error_set(error, EINVAL,
113 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
114 "Mask should be specified");
118 mask = (const uint8_t *)def_mask;
120 mask = (const uint8_t *)item->mask;
123 spec = (const uint8_t *)item->spec;
124 last = (const uint8_t *)item->last;
130 * If field values in "last" are either 0 or equal to the corresponding
131 * values in "spec" then they are ignored
134 !sfc_flow_is_zero(last, size) &&
135 memcmp(last, spec, size) != 0) {
136 rte_flow_error_set(error, ENOTSUP,
137 RTE_FLOW_ERROR_TYPE_ITEM, item,
138 "Ranging is not supported");
142 if (supp_mask == NULL) {
143 rte_flow_error_set(error, EINVAL,
144 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
145 "Supported mask for item should be specified");
149 /* Check that mask and spec not asks for more match than supp_mask */
150 for (i = 0; i < size; i++) {
151 match = spec[i] | mask[i];
152 supp = ((const uint8_t *)supp_mask)[i];
154 if ((match | supp) != supp) {
155 rte_flow_error_set(error, ENOTSUP,
156 RTE_FLOW_ERROR_TYPE_ITEM, item,
157 "Item's field is not supported");
170 * Masking is not supported, so masks in items should be either
171 * full or empty (zeroed) and set only for supported fields which
172 * are specified in the supp_mask.
176 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
177 __rte_unused efx_filter_spec_t *efx_spec,
178 __rte_unused struct rte_flow_error *error)
184 * Convert Ethernet item to EFX filter specification.
187 * Item specification. Only source and destination addresses and
188 * Ethernet type fields are supported. In addition to full and
189 * empty masks of destination address, individual/group mask is
190 * also supported. If the mask is NULL, default mask will be used.
191 * Ranging is not supported.
192 * @param efx_spec[in, out]
193 * EFX filter specification to update.
195 * Perform verbose error reporting if not NULL.
198 sfc_flow_parse_eth(const struct rte_flow_item *item,
199 efx_filter_spec_t *efx_spec,
200 struct rte_flow_error *error)
203 const struct rte_flow_item_eth *spec = NULL;
204 const struct rte_flow_item_eth *mask = NULL;
205 const struct rte_flow_item_eth supp_mask = {
206 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
207 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
210 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
211 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
214 rc = sfc_flow_parse_init(item,
215 (const void **)&spec,
216 (const void **)&mask,
218 &rte_flow_item_eth_mask,
219 sizeof(struct rte_flow_item_eth),
224 /* If "spec" is not set, could be any Ethernet */
228 if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
229 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
230 rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
232 } else if (memcmp(mask->dst.addr_bytes, ig_mask,
233 EFX_MAC_ADDR_LEN) == 0) {
234 if (is_unicast_ether_addr(&spec->dst))
235 efx_spec->efs_match_flags |=
236 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
238 efx_spec->efs_match_flags |=
239 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
240 } else if (!is_zero_ether_addr(&mask->dst)) {
244 if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
245 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
246 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
248 } else if (!is_zero_ether_addr(&mask->src)) {
253 * Ether type is in big-endian byte order in item and
254 * in little-endian in efx_spec, so byte swap is used
256 if (mask->type == supp_mask.type) {
257 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
258 efx_spec->efs_ether_type = rte_bswap16(spec->type);
259 } else if (mask->type != 0) {
266 rte_flow_error_set(error, EINVAL,
267 RTE_FLOW_ERROR_TYPE_ITEM, item,
268 "Bad mask in the ETH pattern item");
273 * Convert VLAN item to EFX filter specification.
276 * Item specification. Only VID field is supported.
277 * The mask can not be NULL. Ranging is not supported.
278 * @param efx_spec[in, out]
279 * EFX filter specification to update.
281 * Perform verbose error reporting if not NULL.
284 sfc_flow_parse_vlan(const struct rte_flow_item *item,
285 efx_filter_spec_t *efx_spec,
286 struct rte_flow_error *error)
290 const struct rte_flow_item_vlan *spec = NULL;
291 const struct rte_flow_item_vlan *mask = NULL;
292 const struct rte_flow_item_vlan supp_mask = {
293 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
296 rc = sfc_flow_parse_init(item,
297 (const void **)&spec,
298 (const void **)&mask,
301 sizeof(struct rte_flow_item_vlan),
307 * VID is in big-endian byte order in item and
308 * in little-endian in efx_spec, so byte swap is used.
309 * If two VLAN items are included, the first matches
310 * the outer tag and the next matches the inner tag.
312 if (mask->tci == supp_mask.tci) {
313 vid = rte_bswap16(spec->tci);
315 if (!(efx_spec->efs_match_flags &
316 EFX_FILTER_MATCH_OUTER_VID)) {
317 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
318 efx_spec->efs_outer_vid = vid;
319 } else if (!(efx_spec->efs_match_flags &
320 EFX_FILTER_MATCH_INNER_VID)) {
321 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
322 efx_spec->efs_inner_vid = vid;
324 rte_flow_error_set(error, EINVAL,
325 RTE_FLOW_ERROR_TYPE_ITEM, item,
326 "More than two VLAN items");
330 rte_flow_error_set(error, EINVAL,
331 RTE_FLOW_ERROR_TYPE_ITEM, item,
332 "VLAN ID in TCI match is required");
340 * Convert IPv4 item to EFX filter specification.
343 * Item specification. Only source and destination addresses and
344 * protocol fields are supported. If the mask is NULL, default
345 * mask will be used. Ranging is not supported.
346 * @param efx_spec[in, out]
347 * EFX filter specification to update.
349 * Perform verbose error reporting if not NULL.
352 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
353 efx_filter_spec_t *efx_spec,
354 struct rte_flow_error *error)
357 const struct rte_flow_item_ipv4 *spec = NULL;
358 const struct rte_flow_item_ipv4 *mask = NULL;
359 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
360 const struct rte_flow_item_ipv4 supp_mask = {
362 .src_addr = 0xffffffff,
363 .dst_addr = 0xffffffff,
364 .next_proto_id = 0xff,
368 rc = sfc_flow_parse_init(item,
369 (const void **)&spec,
370 (const void **)&mask,
372 &rte_flow_item_ipv4_mask,
373 sizeof(struct rte_flow_item_ipv4),
379 * Filtering by IPv4 source and destination addresses requires
380 * the appropriate ETHER_TYPE in hardware filters
382 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
383 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
384 efx_spec->efs_ether_type = ether_type_ipv4;
385 } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
386 rte_flow_error_set(error, EINVAL,
387 RTE_FLOW_ERROR_TYPE_ITEM, item,
388 "Ethertype in pattern with IPV4 item should be appropriate");
396 * IPv4 addresses are in big-endian byte order in item and in
399 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
400 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
401 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
402 } else if (mask->hdr.src_addr != 0) {
406 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
407 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
408 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
409 } else if (mask->hdr.dst_addr != 0) {
413 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
414 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
415 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
416 } else if (mask->hdr.next_proto_id != 0) {
423 rte_flow_error_set(error, EINVAL,
424 RTE_FLOW_ERROR_TYPE_ITEM, item,
425 "Bad mask in the IPV4 pattern item");
430 * Convert IPv6 item to EFX filter specification.
433 * Item specification. Only source and destination addresses and
434 * next header fields are supported. If the mask is NULL, default
435 * mask will be used. Ranging is not supported.
436 * @param efx_spec[in, out]
437 * EFX filter specification to update.
439 * Perform verbose error reporting if not NULL.
442 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
443 efx_filter_spec_t *efx_spec,
444 struct rte_flow_error *error)
447 const struct rte_flow_item_ipv6 *spec = NULL;
448 const struct rte_flow_item_ipv6 *mask = NULL;
449 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
450 const struct rte_flow_item_ipv6 supp_mask = {
452 .src_addr = { 0xff, 0xff, 0xff, 0xff,
453 0xff, 0xff, 0xff, 0xff,
454 0xff, 0xff, 0xff, 0xff,
455 0xff, 0xff, 0xff, 0xff },
456 .dst_addr = { 0xff, 0xff, 0xff, 0xff,
457 0xff, 0xff, 0xff, 0xff,
458 0xff, 0xff, 0xff, 0xff,
459 0xff, 0xff, 0xff, 0xff },
464 rc = sfc_flow_parse_init(item,
465 (const void **)&spec,
466 (const void **)&mask,
468 &rte_flow_item_ipv6_mask,
469 sizeof(struct rte_flow_item_ipv6),
475 * Filtering by IPv6 source and destination addresses requires
476 * the appropriate ETHER_TYPE in hardware filters
478 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
479 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
480 efx_spec->efs_ether_type = ether_type_ipv6;
481 } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
482 rte_flow_error_set(error, EINVAL,
483 RTE_FLOW_ERROR_TYPE_ITEM, item,
484 "Ethertype in pattern with IPV6 item should be appropriate");
492 * IPv6 addresses are in big-endian byte order in item and in
495 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
496 sizeof(mask->hdr.src_addr)) == 0) {
497 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
499 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
500 sizeof(spec->hdr.src_addr));
501 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
502 sizeof(efx_spec->efs_rem_host));
503 } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
504 sizeof(mask->hdr.src_addr))) {
508 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
509 sizeof(mask->hdr.dst_addr)) == 0) {
510 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
512 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
513 sizeof(spec->hdr.dst_addr));
514 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
515 sizeof(efx_spec->efs_loc_host));
516 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
517 sizeof(mask->hdr.dst_addr))) {
521 if (mask->hdr.proto == supp_mask.hdr.proto) {
522 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
523 efx_spec->efs_ip_proto = spec->hdr.proto;
524 } else if (mask->hdr.proto != 0) {
531 rte_flow_error_set(error, EINVAL,
532 RTE_FLOW_ERROR_TYPE_ITEM, item,
533 "Bad mask in the IPV6 pattern item");
538 * Convert TCP item to EFX filter specification.
541 * Item specification. Only source and destination ports fields
542 * are supported. If the mask is NULL, default mask will be used.
543 * Ranging is not supported.
544 * @param efx_spec[in, out]
545 * EFX filter specification to update.
547 * Perform verbose error reporting if not NULL.
550 sfc_flow_parse_tcp(const struct rte_flow_item *item,
551 efx_filter_spec_t *efx_spec,
552 struct rte_flow_error *error)
555 const struct rte_flow_item_tcp *spec = NULL;
556 const struct rte_flow_item_tcp *mask = NULL;
557 const struct rte_flow_item_tcp supp_mask = {
564 rc = sfc_flow_parse_init(item,
565 (const void **)&spec,
566 (const void **)&mask,
568 &rte_flow_item_tcp_mask,
569 sizeof(struct rte_flow_item_tcp),
575 * Filtering by TCP source and destination ports requires
576 * the appropriate IP_PROTO in hardware filters
578 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
579 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
580 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
581 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
582 rte_flow_error_set(error, EINVAL,
583 RTE_FLOW_ERROR_TYPE_ITEM, item,
584 "IP proto in pattern with TCP item should be appropriate");
592 * Source and destination ports are in big-endian byte order in item and
593 * in little-endian in efx_spec, so byte swap is used
595 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
596 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
597 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
598 } else if (mask->hdr.src_port != 0) {
602 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
603 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
604 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
605 } else if (mask->hdr.dst_port != 0) {
612 rte_flow_error_set(error, EINVAL,
613 RTE_FLOW_ERROR_TYPE_ITEM, item,
614 "Bad mask in the TCP pattern item");
619 * Convert UDP item to EFX filter specification.
622 * Item specification. Only source and destination ports fields
623 * are supported. If the mask is NULL, default mask will be used.
624 * Ranging is not supported.
625 * @param efx_spec[in, out]
626 * EFX filter specification to update.
628 * Perform verbose error reporting if not NULL.
631 sfc_flow_parse_udp(const struct rte_flow_item *item,
632 efx_filter_spec_t *efx_spec,
633 struct rte_flow_error *error)
636 const struct rte_flow_item_udp *spec = NULL;
637 const struct rte_flow_item_udp *mask = NULL;
638 const struct rte_flow_item_udp supp_mask = {
645 rc = sfc_flow_parse_init(item,
646 (const void **)&spec,
647 (const void **)&mask,
649 &rte_flow_item_udp_mask,
650 sizeof(struct rte_flow_item_udp),
656 * Filtering by UDP source and destination ports requires
657 * the appropriate IP_PROTO in hardware filters
659 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
660 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
661 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
662 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
663 rte_flow_error_set(error, EINVAL,
664 RTE_FLOW_ERROR_TYPE_ITEM, item,
665 "IP proto in pattern with UDP item should be appropriate");
673 * Source and destination ports are in big-endian byte order in item and
674 * in little-endian in efx_spec, so byte swap is used
676 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
677 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
678 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
679 } else if (mask->hdr.src_port != 0) {
683 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
684 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
685 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
686 } else if (mask->hdr.dst_port != 0) {
693 rte_flow_error_set(error, EINVAL,
694 RTE_FLOW_ERROR_TYPE_ITEM, item,
695 "Bad mask in the UDP pattern item");
699 static const struct sfc_flow_item sfc_flow_items[] = {
701 .type = RTE_FLOW_ITEM_TYPE_VOID,
702 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
703 .layer = SFC_FLOW_ITEM_ANY_LAYER,
704 .parse = sfc_flow_parse_void,
707 .type = RTE_FLOW_ITEM_TYPE_ETH,
708 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
709 .layer = SFC_FLOW_ITEM_L2,
710 .parse = sfc_flow_parse_eth,
713 .type = RTE_FLOW_ITEM_TYPE_VLAN,
714 .prev_layer = SFC_FLOW_ITEM_L2,
715 .layer = SFC_FLOW_ITEM_L2,
716 .parse = sfc_flow_parse_vlan,
719 .type = RTE_FLOW_ITEM_TYPE_IPV4,
720 .prev_layer = SFC_FLOW_ITEM_L2,
721 .layer = SFC_FLOW_ITEM_L3,
722 .parse = sfc_flow_parse_ipv4,
725 .type = RTE_FLOW_ITEM_TYPE_IPV6,
726 .prev_layer = SFC_FLOW_ITEM_L2,
727 .layer = SFC_FLOW_ITEM_L3,
728 .parse = sfc_flow_parse_ipv6,
731 .type = RTE_FLOW_ITEM_TYPE_TCP,
732 .prev_layer = SFC_FLOW_ITEM_L3,
733 .layer = SFC_FLOW_ITEM_L4,
734 .parse = sfc_flow_parse_tcp,
737 .type = RTE_FLOW_ITEM_TYPE_UDP,
738 .prev_layer = SFC_FLOW_ITEM_L3,
739 .layer = SFC_FLOW_ITEM_L4,
740 .parse = sfc_flow_parse_udp,
745 * Protocol-independent flow API support
748 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
749 struct rte_flow *flow,
750 struct rte_flow_error *error)
753 rte_flow_error_set(error, EINVAL,
754 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
758 if (attr->group != 0) {
759 rte_flow_error_set(error, ENOTSUP,
760 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
761 "Groups are not supported");
764 if (attr->priority != 0) {
765 rte_flow_error_set(error, ENOTSUP,
766 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
767 "Priorities are not supported");
770 if (attr->egress != 0) {
771 rte_flow_error_set(error, ENOTSUP,
772 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
773 "Egress is not supported");
776 if (attr->ingress == 0) {
777 rte_flow_error_set(error, ENOTSUP,
778 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
779 "Only ingress is supported");
783 flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
784 flow->spec.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
789 /* Get item from array sfc_flow_items */
790 static const struct sfc_flow_item *
791 sfc_flow_get_item(enum rte_flow_item_type type)
795 for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
796 if (sfc_flow_items[i].type == type)
797 return &sfc_flow_items[i];
803 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
804 struct rte_flow *flow,
805 struct rte_flow_error *error)
808 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
809 const struct sfc_flow_item *item;
811 if (pattern == NULL) {
812 rte_flow_error_set(error, EINVAL,
813 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
818 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
819 item = sfc_flow_get_item(pattern->type);
821 rte_flow_error_set(error, ENOTSUP,
822 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
823 "Unsupported pattern item");
828 * Omitting one or several protocol layers at the beginning
829 * of pattern is supported
831 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
832 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
833 item->prev_layer != prev_layer) {
834 rte_flow_error_set(error, ENOTSUP,
835 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
836 "Unexpected sequence of pattern items");
840 rc = item->parse(pattern, &flow->spec, error);
844 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
845 prev_layer = item->layer;
852 sfc_flow_parse_queue(struct sfc_adapter *sa,
853 const struct rte_flow_action_queue *queue,
854 struct rte_flow *flow)
858 if (queue->index >= sa->rxq_count)
861 rxq = sa->rxq_info[queue->index].rxq;
862 flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
867 #if EFSYS_OPT_RX_SCALE
869 sfc_flow_parse_rss(struct sfc_adapter *sa,
870 const struct rte_flow_action_rss *rss,
871 struct rte_flow *flow)
873 unsigned int rxq_sw_index;
875 unsigned int rxq_hw_index_min;
876 unsigned int rxq_hw_index_max;
877 const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
879 uint8_t *rss_key = NULL;
880 struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
886 rxq_sw_index = sa->rxq_count - 1;
887 rxq = sa->rxq_info[rxq_sw_index].rxq;
888 rxq_hw_index_min = rxq->hw_index;
889 rxq_hw_index_max = 0;
891 for (i = 0; i < rss->num; ++i) {
892 rxq_sw_index = rss->queue[i];
894 if (rxq_sw_index >= sa->rxq_count)
897 rxq = sa->rxq_info[rxq_sw_index].rxq;
899 if (rxq->hw_index < rxq_hw_index_min)
900 rxq_hw_index_min = rxq->hw_index;
902 if (rxq->hw_index > rxq_hw_index_max)
903 rxq_hw_index_max = rxq->hw_index;
906 rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
907 if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
910 if (rss_conf != NULL) {
911 if (rss_conf->rss_key_len != sizeof(sa->rss_key))
914 rss_key = rss_conf->rss_key;
916 rss_key = sa->rss_key;
921 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
922 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
923 sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
924 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
926 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
927 unsigned int rxq_sw_index = rss->queue[i % rss->num];
928 struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
930 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
935 #endif /* EFSYS_OPT_RX_SCALE */
938 sfc_flow_filter_insert(struct sfc_adapter *sa,
939 struct rte_flow *flow)
941 efx_filter_spec_t *spec = &flow->spec;
943 #if EFSYS_OPT_RX_SCALE
944 struct sfc_flow_rss *rss = &flow->rss_conf;
948 unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
949 rss->rxq_hw_index_min + 1,
952 rc = efx_rx_scale_context_alloc(sa->nic,
953 EFX_RX_SCALE_EXCLUSIVE,
955 &spec->efs_rss_context);
957 goto fail_scale_context_alloc;
959 rc = efx_rx_scale_mode_set(sa->nic, spec->efs_rss_context,
960 EFX_RX_HASHALG_TOEPLITZ,
961 rss->rss_hash_types, B_TRUE);
963 goto fail_scale_mode_set;
965 rc = efx_rx_scale_key_set(sa->nic, spec->efs_rss_context,
967 sizeof(sa->rss_key));
969 goto fail_scale_key_set;
971 spec->efs_dmaq_id = rss->rxq_hw_index_min;
972 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
975 rc = efx_filter_insert(sa->nic, spec);
977 goto fail_filter_insert;
981 * Scale table is set after filter insertion because
982 * the table entries are relative to the base RxQ ID
983 * and the latter is submitted to the HW by means of
984 * inserting a filter, so by the time of the request
985 * the HW knows all the information needed to verify
986 * the table entries, and the operation will succeed
988 rc = efx_rx_scale_tbl_set(sa->nic, spec->efs_rss_context,
989 rss->rss_tbl, RTE_DIM(rss->rss_tbl));
991 goto fail_scale_tbl_set;
997 efx_filter_remove(sa->nic, spec);
1001 fail_scale_mode_set:
1003 efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1005 fail_scale_context_alloc:
1007 #else /* !EFSYS_OPT_RX_SCALE */
1008 return efx_filter_insert(sa->nic, spec);
1009 #endif /* EFSYS_OPT_RX_SCALE */
1013 sfc_flow_filter_remove(struct sfc_adapter *sa,
1014 struct rte_flow *flow)
1016 efx_filter_spec_t *spec = &flow->spec;
1019 rc = efx_filter_remove(sa->nic, spec);
1023 #if EFSYS_OPT_RX_SCALE
1025 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1026 #endif /* EFSYS_OPT_RX_SCALE */
1032 sfc_flow_parse_actions(struct sfc_adapter *sa,
1033 const struct rte_flow_action actions[],
1034 struct rte_flow *flow,
1035 struct rte_flow_error *error)
1038 boolean_t is_specified = B_FALSE;
1040 if (actions == NULL) {
1041 rte_flow_error_set(error, EINVAL,
1042 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1047 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1048 switch (actions->type) {
1049 case RTE_FLOW_ACTION_TYPE_VOID:
1052 case RTE_FLOW_ACTION_TYPE_QUEUE:
1053 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1055 rte_flow_error_set(error, EINVAL,
1056 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1057 "Bad QUEUE action");
1061 is_specified = B_TRUE;
1064 #if EFSYS_OPT_RX_SCALE
1065 case RTE_FLOW_ACTION_TYPE_RSS:
1066 rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1068 rte_flow_error_set(error, rc,
1069 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1074 is_specified = B_TRUE;
1076 #endif /* EFSYS_OPT_RX_SCALE */
1079 rte_flow_error_set(error, ENOTSUP,
1080 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1081 "Action is not supported");
1086 if (!is_specified) {
1087 rte_flow_error_set(error, EINVAL,
1088 RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
1089 "Action is unspecified");
1097 sfc_flow_parse(struct rte_eth_dev *dev,
1098 const struct rte_flow_attr *attr,
1099 const struct rte_flow_item pattern[],
1100 const struct rte_flow_action actions[],
1101 struct rte_flow *flow,
1102 struct rte_flow_error *error)
1104 struct sfc_adapter *sa = dev->data->dev_private;
1107 memset(&flow->spec, 0, sizeof(flow->spec));
1109 rc = sfc_flow_parse_attr(attr, flow, error);
1111 goto fail_bad_value;
1113 rc = sfc_flow_parse_pattern(pattern, flow, error);
1115 goto fail_bad_value;
1117 rc = sfc_flow_parse_actions(sa, actions, flow, error);
1119 goto fail_bad_value;
1121 if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
1122 rte_flow_error_set(error, ENOTSUP,
1123 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1124 "Flow rule pattern is not supported");
1133 sfc_flow_validate(struct rte_eth_dev *dev,
1134 const struct rte_flow_attr *attr,
1135 const struct rte_flow_item pattern[],
1136 const struct rte_flow_action actions[],
1137 struct rte_flow_error *error)
1139 struct rte_flow flow;
1141 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
1144 static struct rte_flow *
1145 sfc_flow_create(struct rte_eth_dev *dev,
1146 const struct rte_flow_attr *attr,
1147 const struct rte_flow_item pattern[],
1148 const struct rte_flow_action actions[],
1149 struct rte_flow_error *error)
1151 struct sfc_adapter *sa = dev->data->dev_private;
1152 struct rte_flow *flow = NULL;
1155 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
1157 rte_flow_error_set(error, ENOMEM,
1158 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1159 "Failed to allocate memory");
1163 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
1165 goto fail_bad_value;
1167 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
1169 sfc_adapter_lock(sa);
1171 if (sa->state == SFC_ADAPTER_STARTED) {
1172 rc = sfc_flow_filter_insert(sa, flow);
1174 rte_flow_error_set(error, rc,
1175 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1176 "Failed to insert filter");
1177 goto fail_filter_insert;
1181 sfc_adapter_unlock(sa);
1186 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1190 sfc_adapter_unlock(sa);
1197 sfc_flow_remove(struct sfc_adapter *sa,
1198 struct rte_flow *flow,
1199 struct rte_flow_error *error)
1203 SFC_ASSERT(sfc_adapter_is_locked(sa));
1205 if (sa->state == SFC_ADAPTER_STARTED) {
1206 rc = sfc_flow_filter_remove(sa, flow);
1208 rte_flow_error_set(error, rc,
1209 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1210 "Failed to destroy flow rule");
1213 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1220 sfc_flow_destroy(struct rte_eth_dev *dev,
1221 struct rte_flow *flow,
1222 struct rte_flow_error *error)
1224 struct sfc_adapter *sa = dev->data->dev_private;
1225 struct rte_flow *flow_ptr;
1228 sfc_adapter_lock(sa);
1230 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
1231 if (flow_ptr == flow)
1235 rte_flow_error_set(error, rc,
1236 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1237 "Failed to find flow rule to destroy");
1238 goto fail_bad_value;
1241 rc = sfc_flow_remove(sa, flow, error);
1244 sfc_adapter_unlock(sa);
1250 sfc_flow_flush(struct rte_eth_dev *dev,
1251 struct rte_flow_error *error)
1253 struct sfc_adapter *sa = dev->data->dev_private;
1254 struct rte_flow *flow;
1258 sfc_adapter_lock(sa);
1260 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1261 rc = sfc_flow_remove(sa, flow, error);
1266 sfc_adapter_unlock(sa);
1272 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
1273 struct rte_flow_error *error)
1275 struct sfc_adapter *sa = dev->data->dev_private;
1276 struct sfc_port *port = &sa->port;
1279 sfc_adapter_lock(sa);
1280 if (sa->state != SFC_ADAPTER_INITIALIZED) {
1281 rte_flow_error_set(error, EBUSY,
1282 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1283 NULL, "please close the port first");
1286 port->isolated = (enable) ? B_TRUE : B_FALSE;
1288 sfc_adapter_unlock(sa);
1293 const struct rte_flow_ops sfc_flow_ops = {
1294 .validate = sfc_flow_validate,
1295 .create = sfc_flow_create,
1296 .destroy = sfc_flow_destroy,
1297 .flush = sfc_flow_flush,
1299 .isolate = sfc_flow_isolate,
1303 sfc_flow_init(struct sfc_adapter *sa)
1305 SFC_ASSERT(sfc_adapter_is_locked(sa));
1307 TAILQ_INIT(&sa->filter.flow_list);
1311 sfc_flow_fini(struct sfc_adapter *sa)
1313 struct rte_flow *flow;
1315 SFC_ASSERT(sfc_adapter_is_locked(sa));
1317 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1318 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1324 sfc_flow_stop(struct sfc_adapter *sa)
1326 struct rte_flow *flow;
1328 SFC_ASSERT(sfc_adapter_is_locked(sa));
1330 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
1331 sfc_flow_filter_remove(sa, flow);
1335 sfc_flow_start(struct sfc_adapter *sa)
1337 struct rte_flow *flow;
1340 sfc_log_init(sa, "entry");
1342 SFC_ASSERT(sfc_adapter_is_locked(sa));
1344 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
1345 rc = sfc_flow_filter_insert(sa, flow);
1350 sfc_log_init(sa, "done");