1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2017-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_byteorder.h>
11 #include <rte_tailq.h>
12 #include <rte_common.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_ether.h>
16 #include <rte_flow_driver.h>
22 #include "sfc_filter.h"
25 #include "sfc_dp_rx.h"
28 * At now flow API is implemented in such a manner that each
29 * flow rule is converted to one or more hardware filters.
30 * All elements of flow rule (attributes, pattern items, actions)
31 * correspond to one or more fields in the efx_filter_spec_s structure
32 * that is responsible for the hardware filter.
33 * If some required field is unset in the flow rule, then a handful
34 * of filter copies will be created to cover all possible values
38 enum sfc_flow_item_layers {
39 SFC_FLOW_ITEM_ANY_LAYER,
40 SFC_FLOW_ITEM_START_LAYER,
46 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
47 efx_filter_spec_t *spec,
48 struct rte_flow_error *error);
50 struct sfc_flow_item {
51 enum rte_flow_item_type type; /* Type of item */
52 enum sfc_flow_item_layers layer; /* Layer of item */
53 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
54 sfc_flow_item_parse *parse; /* Parsing function */
57 static sfc_flow_item_parse sfc_flow_parse_void;
58 static sfc_flow_item_parse sfc_flow_parse_eth;
59 static sfc_flow_item_parse sfc_flow_parse_vlan;
60 static sfc_flow_item_parse sfc_flow_parse_ipv4;
61 static sfc_flow_item_parse sfc_flow_parse_ipv6;
62 static sfc_flow_item_parse sfc_flow_parse_tcp;
63 static sfc_flow_item_parse sfc_flow_parse_udp;
64 static sfc_flow_item_parse sfc_flow_parse_vxlan;
65 static sfc_flow_item_parse sfc_flow_parse_geneve;
66 static sfc_flow_item_parse sfc_flow_parse_nvgre;
68 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
69 unsigned int filters_count_for_one_val,
70 struct rte_flow_error *error);
72 typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
73 efx_filter_spec_t *spec,
74 struct sfc_filter *filter);
76 struct sfc_flow_copy_flag {
77 /* EFX filter specification match flag */
78 efx_filter_match_flags_t flag;
79 /* Number of values of corresponding field */
80 unsigned int vals_count;
81 /* Function to set values in specifications */
82 sfc_flow_spec_set_vals *set_vals;
84 * Function to check that the specification is suitable
85 * for adding this match flag
87 sfc_flow_spec_check *spec_check;
90 static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
91 static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
92 static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
93 static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
94 static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
95 static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
96 static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
99 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
104 for (i = 0; i < size; i++)
107 return (sum == 0) ? B_TRUE : B_FALSE;
111 * Validate item and prepare structures spec and mask for parsing
114 sfc_flow_parse_init(const struct rte_flow_item *item,
115 const void **spec_ptr,
116 const void **mask_ptr,
117 const void *supp_mask,
118 const void *def_mask,
120 struct rte_flow_error *error)
129 rte_flow_error_set(error, EINVAL,
130 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
135 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
136 rte_flow_error_set(error, EINVAL,
137 RTE_FLOW_ERROR_TYPE_ITEM, item,
138 "Mask or last is set without spec");
143 * If "mask" is not set, default mask is used,
144 * but if default mask is NULL, "mask" should be set
146 if (item->mask == NULL) {
147 if (def_mask == NULL) {
148 rte_flow_error_set(error, EINVAL,
149 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
150 "Mask should be specified");
166 * If field values in "last" are either 0 or equal to the corresponding
167 * values in "spec" then they are ignored
170 !sfc_flow_is_zero(last, size) &&
171 memcmp(last, spec, size) != 0) {
172 rte_flow_error_set(error, ENOTSUP,
173 RTE_FLOW_ERROR_TYPE_ITEM, item,
174 "Ranging is not supported");
178 if (supp_mask == NULL) {
179 rte_flow_error_set(error, EINVAL,
180 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
181 "Supported mask for item should be specified");
185 /* Check that mask does not ask for more match than supp_mask */
186 for (i = 0; i < size; i++) {
187 supp = ((const uint8_t *)supp_mask)[i];
189 if (~supp & mask[i]) {
190 rte_flow_error_set(error, ENOTSUP,
191 RTE_FLOW_ERROR_TYPE_ITEM, item,
192 "Item's field is not supported");
205 * Masking is not supported, so masks in items should be either
206 * full or empty (zeroed) and set only for supported fields which
207 * are specified in the supp_mask.
211 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
212 __rte_unused efx_filter_spec_t *efx_spec,
213 __rte_unused struct rte_flow_error *error)
219 * Convert Ethernet item to EFX filter specification.
222 * Item specification. Outer frame specification may only comprise
223 * source/destination addresses and Ethertype field.
224 * Inner frame specification may contain destination address only.
225 * There is support for individual/group mask as well as for empty and full.
226 * If the mask is NULL, default mask will be used. Ranging is not supported.
227 * @param efx_spec[in, out]
228 * EFX filter specification to update.
230 * Perform verbose error reporting if not NULL.
233 sfc_flow_parse_eth(const struct rte_flow_item *item,
234 efx_filter_spec_t *efx_spec,
235 struct rte_flow_error *error)
238 const struct rte_flow_item_eth *spec = NULL;
239 const struct rte_flow_item_eth *mask = NULL;
240 const struct rte_flow_item_eth supp_mask = {
241 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
242 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
245 const struct rte_flow_item_eth ifrm_supp_mask = {
246 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
248 const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
249 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
251 const struct rte_flow_item_eth *supp_mask_p;
252 const struct rte_flow_item_eth *def_mask_p;
253 uint8_t *loc_mac = NULL;
254 boolean_t is_ifrm = (efx_spec->efs_encap_type !=
255 EFX_TUNNEL_PROTOCOL_NONE);
258 supp_mask_p = &ifrm_supp_mask;
259 def_mask_p = &ifrm_supp_mask;
260 loc_mac = efx_spec->efs_ifrm_loc_mac;
262 supp_mask_p = &supp_mask;
263 def_mask_p = &rte_flow_item_eth_mask;
264 loc_mac = efx_spec->efs_loc_mac;
267 rc = sfc_flow_parse_init(item,
268 (const void **)&spec,
269 (const void **)&mask,
270 supp_mask_p, def_mask_p,
271 sizeof(struct rte_flow_item_eth),
276 /* If "spec" is not set, could be any Ethernet */
280 if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
281 efx_spec->efs_match_flags |= is_ifrm ?
282 EFX_FILTER_MATCH_IFRM_LOC_MAC :
283 EFX_FILTER_MATCH_LOC_MAC;
284 rte_memcpy(loc_mac, spec->dst.addr_bytes,
286 } else if (memcmp(mask->dst.addr_bytes, ig_mask,
287 EFX_MAC_ADDR_LEN) == 0) {
288 if (rte_is_unicast_ether_addr(&spec->dst))
289 efx_spec->efs_match_flags |= is_ifrm ?
290 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
291 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
293 efx_spec->efs_match_flags |= is_ifrm ?
294 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
295 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
296 } else if (!rte_is_zero_ether_addr(&mask->dst)) {
301 * ifrm_supp_mask ensures that the source address and
302 * ethertype masks are equal to zero in inner frame,
303 * so these fields are filled in only for the outer frame
305 if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
306 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
307 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
309 } else if (!rte_is_zero_ether_addr(&mask->src)) {
314 * Ether type is in big-endian byte order in item and
315 * in little-endian in efx_spec, so byte swap is used
317 if (mask->type == supp_mask.type) {
318 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
319 efx_spec->efs_ether_type = rte_bswap16(spec->type);
320 } else if (mask->type != 0) {
327 rte_flow_error_set(error, EINVAL,
328 RTE_FLOW_ERROR_TYPE_ITEM, item,
329 "Bad mask in the ETH pattern item");
334 * Convert VLAN item to EFX filter specification.
337 * Item specification. Only VID field is supported.
338 * The mask can not be NULL. Ranging is not supported.
339 * @param efx_spec[in, out]
340 * EFX filter specification to update.
342 * Perform verbose error reporting if not NULL.
345 sfc_flow_parse_vlan(const struct rte_flow_item *item,
346 efx_filter_spec_t *efx_spec,
347 struct rte_flow_error *error)
351 const struct rte_flow_item_vlan *spec = NULL;
352 const struct rte_flow_item_vlan *mask = NULL;
353 const struct rte_flow_item_vlan supp_mask = {
354 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
355 .inner_type = RTE_BE16(0xffff),
358 rc = sfc_flow_parse_init(item,
359 (const void **)&spec,
360 (const void **)&mask,
363 sizeof(struct rte_flow_item_vlan),
369 * VID is in big-endian byte order in item and
370 * in little-endian in efx_spec, so byte swap is used.
371 * If two VLAN items are included, the first matches
372 * the outer tag and the next matches the inner tag.
374 if (mask->tci == supp_mask.tci) {
375 /* Apply mask to keep VID only */
376 vid = rte_bswap16(spec->tci & mask->tci);
378 if (!(efx_spec->efs_match_flags &
379 EFX_FILTER_MATCH_OUTER_VID)) {
380 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
381 efx_spec->efs_outer_vid = vid;
382 } else if (!(efx_spec->efs_match_flags &
383 EFX_FILTER_MATCH_INNER_VID)) {
384 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
385 efx_spec->efs_inner_vid = vid;
387 rte_flow_error_set(error, EINVAL,
388 RTE_FLOW_ERROR_TYPE_ITEM, item,
389 "More than two VLAN items");
393 rte_flow_error_set(error, EINVAL,
394 RTE_FLOW_ERROR_TYPE_ITEM, item,
395 "VLAN ID in TCI match is required");
399 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
400 rte_flow_error_set(error, EINVAL,
401 RTE_FLOW_ERROR_TYPE_ITEM, item,
402 "VLAN TPID matching is not supported");
405 if (mask->inner_type == supp_mask.inner_type) {
406 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
407 efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
408 } else if (mask->inner_type) {
409 rte_flow_error_set(error, EINVAL,
410 RTE_FLOW_ERROR_TYPE_ITEM, item,
411 "Bad mask for VLAN inner_type");
419 * Convert IPv4 item to EFX filter specification.
422 * Item specification. Only source and destination addresses and
423 * protocol fields are supported. If the mask is NULL, default
424 * mask will be used. Ranging is not supported.
425 * @param efx_spec[in, out]
426 * EFX filter specification to update.
428 * Perform verbose error reporting if not NULL.
431 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
432 efx_filter_spec_t *efx_spec,
433 struct rte_flow_error *error)
436 const struct rte_flow_item_ipv4 *spec = NULL;
437 const struct rte_flow_item_ipv4 *mask = NULL;
438 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
439 const struct rte_flow_item_ipv4 supp_mask = {
441 .src_addr = 0xffffffff,
442 .dst_addr = 0xffffffff,
443 .next_proto_id = 0xff,
447 rc = sfc_flow_parse_init(item,
448 (const void **)&spec,
449 (const void **)&mask,
451 &rte_flow_item_ipv4_mask,
452 sizeof(struct rte_flow_item_ipv4),
458 * Filtering by IPv4 source and destination addresses requires
459 * the appropriate ETHER_TYPE in hardware filters
461 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
462 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
463 efx_spec->efs_ether_type = ether_type_ipv4;
464 } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
465 rte_flow_error_set(error, EINVAL,
466 RTE_FLOW_ERROR_TYPE_ITEM, item,
467 "Ethertype in pattern with IPV4 item should be appropriate");
475 * IPv4 addresses are in big-endian byte order in item and in
478 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
479 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
480 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
481 } else if (mask->hdr.src_addr != 0) {
485 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
486 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
487 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
488 } else if (mask->hdr.dst_addr != 0) {
492 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
493 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
494 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
495 } else if (mask->hdr.next_proto_id != 0) {
502 rte_flow_error_set(error, EINVAL,
503 RTE_FLOW_ERROR_TYPE_ITEM, item,
504 "Bad mask in the IPV4 pattern item");
509 * Convert IPv6 item to EFX filter specification.
512 * Item specification. Only source and destination addresses and
513 * next header fields are supported. If the mask is NULL, default
514 * mask will be used. Ranging is not supported.
515 * @param efx_spec[in, out]
516 * EFX filter specification to update.
518 * Perform verbose error reporting if not NULL.
521 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
522 efx_filter_spec_t *efx_spec,
523 struct rte_flow_error *error)
526 const struct rte_flow_item_ipv6 *spec = NULL;
527 const struct rte_flow_item_ipv6 *mask = NULL;
528 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
529 const struct rte_flow_item_ipv6 supp_mask = {
531 .src_addr = { 0xff, 0xff, 0xff, 0xff,
532 0xff, 0xff, 0xff, 0xff,
533 0xff, 0xff, 0xff, 0xff,
534 0xff, 0xff, 0xff, 0xff },
535 .dst_addr = { 0xff, 0xff, 0xff, 0xff,
536 0xff, 0xff, 0xff, 0xff,
537 0xff, 0xff, 0xff, 0xff,
538 0xff, 0xff, 0xff, 0xff },
543 rc = sfc_flow_parse_init(item,
544 (const void **)&spec,
545 (const void **)&mask,
547 &rte_flow_item_ipv6_mask,
548 sizeof(struct rte_flow_item_ipv6),
554 * Filtering by IPv6 source and destination addresses requires
555 * the appropriate ETHER_TYPE in hardware filters
557 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
558 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
559 efx_spec->efs_ether_type = ether_type_ipv6;
560 } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
561 rte_flow_error_set(error, EINVAL,
562 RTE_FLOW_ERROR_TYPE_ITEM, item,
563 "Ethertype in pattern with IPV6 item should be appropriate");
571 * IPv6 addresses are in big-endian byte order in item and in
574 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
575 sizeof(mask->hdr.src_addr)) == 0) {
576 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
578 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
579 sizeof(spec->hdr.src_addr));
580 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
581 sizeof(efx_spec->efs_rem_host));
582 } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
583 sizeof(mask->hdr.src_addr))) {
587 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
588 sizeof(mask->hdr.dst_addr)) == 0) {
589 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
591 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
592 sizeof(spec->hdr.dst_addr));
593 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
594 sizeof(efx_spec->efs_loc_host));
595 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
596 sizeof(mask->hdr.dst_addr))) {
600 if (mask->hdr.proto == supp_mask.hdr.proto) {
601 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
602 efx_spec->efs_ip_proto = spec->hdr.proto;
603 } else if (mask->hdr.proto != 0) {
610 rte_flow_error_set(error, EINVAL,
611 RTE_FLOW_ERROR_TYPE_ITEM, item,
612 "Bad mask in the IPV6 pattern item");
617 * Convert TCP item to EFX filter specification.
620 * Item specification. Only source and destination ports fields
621 * are supported. If the mask is NULL, default mask will be used.
622 * Ranging is not supported.
623 * @param efx_spec[in, out]
624 * EFX filter specification to update.
626 * Perform verbose error reporting if not NULL.
629 sfc_flow_parse_tcp(const struct rte_flow_item *item,
630 efx_filter_spec_t *efx_spec,
631 struct rte_flow_error *error)
634 const struct rte_flow_item_tcp *spec = NULL;
635 const struct rte_flow_item_tcp *mask = NULL;
636 const struct rte_flow_item_tcp supp_mask = {
643 rc = sfc_flow_parse_init(item,
644 (const void **)&spec,
645 (const void **)&mask,
647 &rte_flow_item_tcp_mask,
648 sizeof(struct rte_flow_item_tcp),
654 * Filtering by TCP source and destination ports requires
655 * the appropriate IP_PROTO in hardware filters
657 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
658 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
659 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
660 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
661 rte_flow_error_set(error, EINVAL,
662 RTE_FLOW_ERROR_TYPE_ITEM, item,
663 "IP proto in pattern with TCP item should be appropriate");
671 * Source and destination ports are in big-endian byte order in item and
672 * in little-endian in efx_spec, so byte swap is used
674 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
675 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
676 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
677 } else if (mask->hdr.src_port != 0) {
681 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
682 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
683 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
684 } else if (mask->hdr.dst_port != 0) {
691 rte_flow_error_set(error, EINVAL,
692 RTE_FLOW_ERROR_TYPE_ITEM, item,
693 "Bad mask in the TCP pattern item");
698 * Convert UDP item to EFX filter specification.
701 * Item specification. Only source and destination ports fields
702 * are supported. If the mask is NULL, default mask will be used.
703 * Ranging is not supported.
704 * @param efx_spec[in, out]
705 * EFX filter specification to update.
707 * Perform verbose error reporting if not NULL.
710 sfc_flow_parse_udp(const struct rte_flow_item *item,
711 efx_filter_spec_t *efx_spec,
712 struct rte_flow_error *error)
715 const struct rte_flow_item_udp *spec = NULL;
716 const struct rte_flow_item_udp *mask = NULL;
717 const struct rte_flow_item_udp supp_mask = {
724 rc = sfc_flow_parse_init(item,
725 (const void **)&spec,
726 (const void **)&mask,
728 &rte_flow_item_udp_mask,
729 sizeof(struct rte_flow_item_udp),
735 * Filtering by UDP source and destination ports requires
736 * the appropriate IP_PROTO in hardware filters
738 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
739 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
740 efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
741 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
742 rte_flow_error_set(error, EINVAL,
743 RTE_FLOW_ERROR_TYPE_ITEM, item,
744 "IP proto in pattern with UDP item should be appropriate");
752 * Source and destination ports are in big-endian byte order in item and
753 * in little-endian in efx_spec, so byte swap is used
755 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
756 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
757 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
758 } else if (mask->hdr.src_port != 0) {
762 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
763 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
764 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
765 } else if (mask->hdr.dst_port != 0) {
772 rte_flow_error_set(error, EINVAL,
773 RTE_FLOW_ERROR_TYPE_ITEM, item,
774 "Bad mask in the UDP pattern item");
779 * Filters for encapsulated packets match based on the EtherType and IP
780 * protocol in the outer frame.
783 sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
784 efx_filter_spec_t *efx_spec,
786 struct rte_flow_error *error)
788 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
789 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
790 efx_spec->efs_ip_proto = ip_proto;
791 } else if (efx_spec->efs_ip_proto != ip_proto) {
793 case EFX_IPPROTO_UDP:
794 rte_flow_error_set(error, EINVAL,
795 RTE_FLOW_ERROR_TYPE_ITEM, item,
796 "Outer IP header protocol must be UDP "
797 "in VxLAN/GENEVE pattern");
800 case EFX_IPPROTO_GRE:
801 rte_flow_error_set(error, EINVAL,
802 RTE_FLOW_ERROR_TYPE_ITEM, item,
803 "Outer IP header protocol must be GRE "
808 rte_flow_error_set(error, EINVAL,
809 RTE_FLOW_ERROR_TYPE_ITEM, item,
810 "Only VxLAN/GENEVE/NVGRE tunneling patterns "
816 if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
817 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
818 efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
819 rte_flow_error_set(error, EINVAL,
820 RTE_FLOW_ERROR_TYPE_ITEM, item,
821 "Outer frame EtherType in pattern with tunneling "
822 "must be IPv4 or IPv6");
830 sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
831 const uint8_t *vni_or_vsid_val,
832 const uint8_t *vni_or_vsid_mask,
833 const struct rte_flow_item *item,
834 struct rte_flow_error *error)
836 const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
840 if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
841 EFX_VNI_OR_VSID_LEN) == 0) {
842 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
843 rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
844 EFX_VNI_OR_VSID_LEN);
845 } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
846 rte_flow_error_set(error, EINVAL,
847 RTE_FLOW_ERROR_TYPE_ITEM, item,
848 "Unsupported VNI/VSID mask");
856 * Convert VXLAN item to EFX filter specification.
859 * Item specification. Only VXLAN network identifier field is supported.
860 * If the mask is NULL, default mask will be used.
861 * Ranging is not supported.
862 * @param efx_spec[in, out]
863 * EFX filter specification to update.
865 * Perform verbose error reporting if not NULL.
868 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
869 efx_filter_spec_t *efx_spec,
870 struct rte_flow_error *error)
873 const struct rte_flow_item_vxlan *spec = NULL;
874 const struct rte_flow_item_vxlan *mask = NULL;
875 const struct rte_flow_item_vxlan supp_mask = {
876 .vni = { 0xff, 0xff, 0xff }
879 rc = sfc_flow_parse_init(item,
880 (const void **)&spec,
881 (const void **)&mask,
883 &rte_flow_item_vxlan_mask,
884 sizeof(struct rte_flow_item_vxlan),
889 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
890 EFX_IPPROTO_UDP, error);
894 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
895 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
900 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
901 mask->vni, item, error);
907 * Convert GENEVE item to EFX filter specification.
910 * Item specification. Only Virtual Network Identifier and protocol type
911 * fields are supported. But protocol type can be only Ethernet (0x6558).
912 * If the mask is NULL, default mask will be used.
913 * Ranging is not supported.
914 * @param efx_spec[in, out]
915 * EFX filter specification to update.
917 * Perform verbose error reporting if not NULL.
920 sfc_flow_parse_geneve(const struct rte_flow_item *item,
921 efx_filter_spec_t *efx_spec,
922 struct rte_flow_error *error)
925 const struct rte_flow_item_geneve *spec = NULL;
926 const struct rte_flow_item_geneve *mask = NULL;
927 const struct rte_flow_item_geneve supp_mask = {
928 .protocol = RTE_BE16(0xffff),
929 .vni = { 0xff, 0xff, 0xff }
932 rc = sfc_flow_parse_init(item,
933 (const void **)&spec,
934 (const void **)&mask,
936 &rte_flow_item_geneve_mask,
937 sizeof(struct rte_flow_item_geneve),
942 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
943 EFX_IPPROTO_UDP, error);
947 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
948 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
953 if (mask->protocol == supp_mask.protocol) {
954 if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
955 rte_flow_error_set(error, EINVAL,
956 RTE_FLOW_ERROR_TYPE_ITEM, item,
957 "GENEVE encap. protocol must be Ethernet "
958 "(0x6558) in the GENEVE pattern item");
961 } else if (mask->protocol != 0) {
962 rte_flow_error_set(error, EINVAL,
963 RTE_FLOW_ERROR_TYPE_ITEM, item,
964 "Unsupported mask for GENEVE encap. protocol");
968 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
969 mask->vni, item, error);
975 * Convert NVGRE item to EFX filter specification.
978 * Item specification. Only virtual subnet ID field is supported.
979 * If the mask is NULL, default mask will be used.
980 * Ranging is not supported.
981 * @param efx_spec[in, out]
982 * EFX filter specification to update.
984 * Perform verbose error reporting if not NULL.
987 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
988 efx_filter_spec_t *efx_spec,
989 struct rte_flow_error *error)
992 const struct rte_flow_item_nvgre *spec = NULL;
993 const struct rte_flow_item_nvgre *mask = NULL;
994 const struct rte_flow_item_nvgre supp_mask = {
995 .tni = { 0xff, 0xff, 0xff }
998 rc = sfc_flow_parse_init(item,
999 (const void **)&spec,
1000 (const void **)&mask,
1002 &rte_flow_item_nvgre_mask,
1003 sizeof(struct rte_flow_item_nvgre),
1008 rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
1009 EFX_IPPROTO_GRE, error);
1013 efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
1014 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1019 rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
1020 mask->tni, item, error);
1025 static const struct sfc_flow_item sfc_flow_items[] = {
1027 .type = RTE_FLOW_ITEM_TYPE_VOID,
1028 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
1029 .layer = SFC_FLOW_ITEM_ANY_LAYER,
1030 .parse = sfc_flow_parse_void,
1033 .type = RTE_FLOW_ITEM_TYPE_ETH,
1034 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
1035 .layer = SFC_FLOW_ITEM_L2,
1036 .parse = sfc_flow_parse_eth,
1039 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1040 .prev_layer = SFC_FLOW_ITEM_L2,
1041 .layer = SFC_FLOW_ITEM_L2,
1042 .parse = sfc_flow_parse_vlan,
1045 .type = RTE_FLOW_ITEM_TYPE_IPV4,
1046 .prev_layer = SFC_FLOW_ITEM_L2,
1047 .layer = SFC_FLOW_ITEM_L3,
1048 .parse = sfc_flow_parse_ipv4,
1051 .type = RTE_FLOW_ITEM_TYPE_IPV6,
1052 .prev_layer = SFC_FLOW_ITEM_L2,
1053 .layer = SFC_FLOW_ITEM_L3,
1054 .parse = sfc_flow_parse_ipv6,
1057 .type = RTE_FLOW_ITEM_TYPE_TCP,
1058 .prev_layer = SFC_FLOW_ITEM_L3,
1059 .layer = SFC_FLOW_ITEM_L4,
1060 .parse = sfc_flow_parse_tcp,
1063 .type = RTE_FLOW_ITEM_TYPE_UDP,
1064 .prev_layer = SFC_FLOW_ITEM_L3,
1065 .layer = SFC_FLOW_ITEM_L4,
1066 .parse = sfc_flow_parse_udp,
1069 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
1070 .prev_layer = SFC_FLOW_ITEM_L4,
1071 .layer = SFC_FLOW_ITEM_START_LAYER,
1072 .parse = sfc_flow_parse_vxlan,
1075 .type = RTE_FLOW_ITEM_TYPE_GENEVE,
1076 .prev_layer = SFC_FLOW_ITEM_L4,
1077 .layer = SFC_FLOW_ITEM_START_LAYER,
1078 .parse = sfc_flow_parse_geneve,
1081 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
1082 .prev_layer = SFC_FLOW_ITEM_L3,
1083 .layer = SFC_FLOW_ITEM_START_LAYER,
1084 .parse = sfc_flow_parse_nvgre,
1089 * Protocol-independent flow API support
1092 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
1093 struct rte_flow *flow,
1094 struct rte_flow_error *error)
1097 rte_flow_error_set(error, EINVAL,
1098 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1102 if (attr->group != 0) {
1103 rte_flow_error_set(error, ENOTSUP,
1104 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
1105 "Groups are not supported");
1108 if (attr->priority != 0) {
1109 rte_flow_error_set(error, ENOTSUP,
1110 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
1111 "Priorities are not supported");
1114 if (attr->egress != 0) {
1115 rte_flow_error_set(error, ENOTSUP,
1116 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
1117 "Egress is not supported");
1120 if (attr->transfer != 0) {
1121 rte_flow_error_set(error, ENOTSUP,
1122 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
1123 "Transfer is not supported");
1126 if (attr->ingress == 0) {
1127 rte_flow_error_set(error, ENOTSUP,
1128 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
1129 "Only ingress is supported");
1133 flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
1134 flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1139 /* Get item from array sfc_flow_items */
1140 static const struct sfc_flow_item *
1141 sfc_flow_get_item(enum rte_flow_item_type type)
1145 for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
1146 if (sfc_flow_items[i].type == type)
1147 return &sfc_flow_items[i];
1153 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
1154 struct rte_flow *flow,
1155 struct rte_flow_error *error)
1158 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
1159 boolean_t is_ifrm = B_FALSE;
1160 const struct sfc_flow_item *item;
1162 if (pattern == NULL) {
1163 rte_flow_error_set(error, EINVAL,
1164 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1169 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
1170 item = sfc_flow_get_item(pattern->type);
1172 rte_flow_error_set(error, ENOTSUP,
1173 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1174 "Unsupported pattern item");
1179 * Omitting one or several protocol layers at the beginning
1180 * of pattern is supported
1182 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1183 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
1184 item->prev_layer != prev_layer) {
1185 rte_flow_error_set(error, ENOTSUP,
1186 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
1187 "Unexpected sequence of pattern items");
1192 * Allow only VOID and ETH pattern items in the inner frame.
1193 * Also check that there is only one tunneling protocol.
1195 switch (item->type) {
1196 case RTE_FLOW_ITEM_TYPE_VOID:
1197 case RTE_FLOW_ITEM_TYPE_ETH:
1200 case RTE_FLOW_ITEM_TYPE_VXLAN:
1201 case RTE_FLOW_ITEM_TYPE_GENEVE:
1202 case RTE_FLOW_ITEM_TYPE_NVGRE:
1204 rte_flow_error_set(error, EINVAL,
1205 RTE_FLOW_ERROR_TYPE_ITEM,
1207 "More than one tunneling protocol");
1215 rte_flow_error_set(error, EINVAL,
1216 RTE_FLOW_ERROR_TYPE_ITEM,
1218 "There is an unsupported pattern item "
1219 "in the inner frame");
1225 rc = item->parse(pattern, &flow->spec.template, error);
1229 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
1230 prev_layer = item->layer;
1237 sfc_flow_parse_queue(struct sfc_adapter *sa,
1238 const struct rte_flow_action_queue *queue,
1239 struct rte_flow *flow)
1241 struct sfc_rxq *rxq;
1243 if (queue->index >= sfc_sa2shared(sa)->rxq_count)
1246 rxq = &sa->rxq_ctrl[queue->index];
1247 flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
1253 sfc_flow_parse_rss(struct sfc_adapter *sa,
1254 const struct rte_flow_action_rss *action_rss,
1255 struct rte_flow *flow)
1257 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1258 struct sfc_rss *rss = &sas->rss;
1259 unsigned int rxq_sw_index;
1260 struct sfc_rxq *rxq;
1261 unsigned int rxq_hw_index_min;
1262 unsigned int rxq_hw_index_max;
1263 efx_rx_hash_type_t efx_hash_types;
1264 const uint8_t *rss_key;
1265 struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
1268 if (action_rss->queue_num == 0)
1271 rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
1272 rxq = &sa->rxq_ctrl[rxq_sw_index];
1273 rxq_hw_index_min = rxq->hw_index;
1274 rxq_hw_index_max = 0;
1276 for (i = 0; i < action_rss->queue_num; ++i) {
1277 rxq_sw_index = action_rss->queue[i];
1279 if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
1282 rxq = &sa->rxq_ctrl[rxq_sw_index];
1284 if (rxq->hw_index < rxq_hw_index_min)
1285 rxq_hw_index_min = rxq->hw_index;
1287 if (rxq->hw_index > rxq_hw_index_max)
1288 rxq_hw_index_max = rxq->hw_index;
1291 switch (action_rss->func) {
1292 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1293 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1299 if (action_rss->level)
1303 * Dummy RSS action with only one queue and no specific settings
1304 * for hash types and key does not require dedicated RSS context
1305 * and may be simplified to single queue action.
1307 if (action_rss->queue_num == 1 && action_rss->types == 0 &&
1308 action_rss->key_len == 0) {
1309 flow->spec.template.efs_dmaq_id = rxq_hw_index_min;
1313 if (action_rss->types) {
1316 rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
1324 for (i = 0; i < rss->hf_map_nb_entries; ++i)
1325 efx_hash_types |= rss->hf_map[i].efx;
1328 if (action_rss->key_len) {
1329 if (action_rss->key_len != sizeof(rss->key))
1332 rss_key = action_rss->key;
1339 sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
1340 sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
1341 sfc_rss_conf->rss_hash_types = efx_hash_types;
1342 rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
1344 for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
1345 unsigned int nb_queues = action_rss->queue_num;
1346 unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
1347 struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
1349 sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
1356 sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
1357 unsigned int filters_count)
1362 for (i = 0; i < filters_count; i++) {
1365 rc = efx_filter_remove(sa->nic, &spec->filters[i]);
1366 if (ret == 0 && rc != 0) {
1367 sfc_err(sa, "failed to remove filter specification "
1377 sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1382 for (i = 0; i < spec->count; i++) {
1383 rc = efx_filter_insert(sa->nic, &spec->filters[i]);
1385 sfc_flow_spec_flush(sa, spec, i);
1394 sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
1396 return sfc_flow_spec_flush(sa, spec, spec->count);
1400 sfc_flow_filter_insert(struct sfc_adapter *sa,
1401 struct rte_flow *flow)
1403 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1404 struct sfc_rss *rss = &sas->rss;
1405 struct sfc_flow_rss *flow_rss = &flow->rss_conf;
1406 uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
1411 unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
1412 flow_rss->rxq_hw_index_min + 1,
1415 rc = efx_rx_scale_context_alloc(sa->nic,
1416 EFX_RX_SCALE_EXCLUSIVE,
1420 goto fail_scale_context_alloc;
1422 rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
1424 flow_rss->rss_hash_types, B_TRUE);
1426 goto fail_scale_mode_set;
1428 rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
1432 goto fail_scale_key_set;
1435 * At this point, fully elaborated filter specifications
1436 * have been produced from the template. To make sure that
1437 * RSS behaviour is consistent between them, set the same
1438 * RSS context value everywhere.
1440 for (i = 0; i < flow->spec.count; i++) {
1441 efx_filter_spec_t *spec = &flow->spec.filters[i];
1443 spec->efs_rss_context = efs_rss_context;
1444 spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
1445 spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
1449 rc = sfc_flow_spec_insert(sa, &flow->spec);
1451 goto fail_filter_insert;
1455 * Scale table is set after filter insertion because
1456 * the table entries are relative to the base RxQ ID
1457 * and the latter is submitted to the HW by means of
1458 * inserting a filter, so by the time of the request
1459 * the HW knows all the information needed to verify
1460 * the table entries, and the operation will succeed
1462 rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
1464 RTE_DIM(flow_rss->rss_tbl));
1466 goto fail_scale_tbl_set;
1472 sfc_flow_spec_remove(sa, &flow->spec);
1476 fail_scale_mode_set:
1477 if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
1478 efx_rx_scale_context_free(sa->nic, efs_rss_context);
1480 fail_scale_context_alloc:
1485 sfc_flow_filter_remove(struct sfc_adapter *sa,
1486 struct rte_flow *flow)
1490 rc = sfc_flow_spec_remove(sa, &flow->spec);
1496 * All specifications for a given flow rule have the same RSS
1497 * context, so that RSS context value is taken from the first
1498 * filter specification
1500 efx_filter_spec_t *spec = &flow->spec.filters[0];
1502 rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
1509 sfc_flow_parse_mark(struct sfc_adapter *sa,
1510 const struct rte_flow_action_mark *mark,
1511 struct rte_flow *flow)
1513 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1515 if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
1518 flow->spec.template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
1519 flow->spec.template.efs_mark = mark->id;
1525 sfc_flow_parse_actions(struct sfc_adapter *sa,
1526 const struct rte_flow_action actions[],
1527 struct rte_flow *flow,
1528 struct rte_flow_error *error)
1531 const unsigned int dp_rx_features = sa->priv.dp_rx->features;
1532 uint32_t actions_set = 0;
1533 const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
1534 (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
1535 (1UL << RTE_FLOW_ACTION_TYPE_DROP);
1536 const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
1537 (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
1539 if (actions == NULL) {
1540 rte_flow_error_set(error, EINVAL,
1541 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1546 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
1547 RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
1549 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1550 switch (actions->type) {
1551 case RTE_FLOW_ACTION_TYPE_VOID:
1552 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
1556 case RTE_FLOW_ACTION_TYPE_QUEUE:
1557 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
1559 if ((actions_set & fate_actions_mask) != 0)
1560 goto fail_fate_actions;
1562 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
1564 rte_flow_error_set(error, EINVAL,
1565 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1566 "Bad QUEUE action");
1571 case RTE_FLOW_ACTION_TYPE_RSS:
1572 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
1574 if ((actions_set & fate_actions_mask) != 0)
1575 goto fail_fate_actions;
1577 rc = sfc_flow_parse_rss(sa, actions->conf, flow);
1579 rte_flow_error_set(error, -rc,
1580 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1586 case RTE_FLOW_ACTION_TYPE_DROP:
1587 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
1589 if ((actions_set & fate_actions_mask) != 0)
1590 goto fail_fate_actions;
1592 flow->spec.template.efs_dmaq_id =
1593 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1596 case RTE_FLOW_ACTION_TYPE_FLAG:
1597 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
1599 if ((actions_set & mark_actions_mask) != 0)
1600 goto fail_actions_overlap;
1602 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
1603 rte_flow_error_set(error, ENOTSUP,
1604 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1605 "FLAG action is not supported on the current Rx datapath");
1609 flow->spec.template.efs_flags |=
1610 EFX_FILTER_FLAG_ACTION_FLAG;
1613 case RTE_FLOW_ACTION_TYPE_MARK:
1614 SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
1616 if ((actions_set & mark_actions_mask) != 0)
1617 goto fail_actions_overlap;
1619 if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
1620 rte_flow_error_set(error, ENOTSUP,
1621 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1622 "MARK action is not supported on the current Rx datapath");
1626 rc = sfc_flow_parse_mark(sa, actions->conf, flow);
1628 rte_flow_error_set(error, rc,
1629 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1636 rte_flow_error_set(error, ENOTSUP,
1637 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1638 "Action is not supported");
1642 actions_set |= (1UL << actions->type);
1644 #undef SFC_BUILD_SET_OVERFLOW
1646 /* When fate is unknown, drop traffic. */
1647 if ((actions_set & fate_actions_mask) == 0) {
1648 flow->spec.template.efs_dmaq_id =
1649 EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
1655 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1656 "Cannot combine several fate-deciding actions, "
1657 "choose between QUEUE, RSS or DROP");
1660 fail_actions_overlap:
1661 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
1662 "Overlapping actions are not supported");
1667 * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
1668 * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
1669 * specifications after copying.
1671 * @param spec[in, out]
1672 * SFC flow specification to update.
1673 * @param filters_count_for_one_val[in]
1674 * How many specifications should have the same match flag, what is the
1675 * number of specifications before copying.
1677 * Perform verbose error reporting if not NULL.
1680 sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
1681 unsigned int filters_count_for_one_val,
1682 struct rte_flow_error *error)
1685 static const efx_filter_match_flags_t vals[] = {
1686 EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1687 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
1690 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1691 rte_flow_error_set(error, EINVAL,
1692 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1693 "Number of specifications is incorrect while copying "
1694 "by unknown destination flags");
1698 for (i = 0; i < spec->count; i++) {
1699 /* The check above ensures that divisor can't be zero here */
1700 spec->filters[i].efs_match_flags |=
1701 vals[i / filters_count_for_one_val];
1708 * Check that the following conditions are met:
1709 * - the list of supported filters has a filter
1710 * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
1711 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
1715 * The match flags of filter.
1717 * Specification to be supplemented.
1719 * SFC filter with list of supported filters.
1722 sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
1723 __rte_unused efx_filter_spec_t *spec,
1724 struct sfc_filter *filter)
1727 efx_filter_match_flags_t match_mcast_dst;
1730 (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
1731 EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
1732 for (i = 0; i < filter->supported_match_num; i++) {
1733 if (match_mcast_dst == filter->supported_match[i])
1741 * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
1742 * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
1743 * specifications after copying.
1745 * @param spec[in, out]
1746 * SFC flow specification to update.
1747 * @param filters_count_for_one_val[in]
1748 * How many specifications should have the same EtherType value, what is the
1749 * number of specifications before copying.
1751 * Perform verbose error reporting if not NULL.
1754 sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
1755 unsigned int filters_count_for_one_val,
1756 struct rte_flow_error *error)
1759 static const uint16_t vals[] = {
1760 EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
1763 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1764 rte_flow_error_set(error, EINVAL,
1765 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1766 "Number of specifications is incorrect "
1767 "while copying by Ethertype");
1771 for (i = 0; i < spec->count; i++) {
1772 spec->filters[i].efs_match_flags |=
1773 EFX_FILTER_MATCH_ETHER_TYPE;
1776 * The check above ensures that
1777 * filters_count_for_one_val is not 0
1779 spec->filters[i].efs_ether_type =
1780 vals[i / filters_count_for_one_val];
1787 * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
1788 * in the same specifications after copying.
1790 * @param spec[in, out]
1791 * SFC flow specification to update.
1792 * @param filters_count_for_one_val[in]
1793 * How many specifications should have the same match flag, what is the
1794 * number of specifications before copying.
1796 * Perform verbose error reporting if not NULL.
1799 sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
1800 unsigned int filters_count_for_one_val,
1801 struct rte_flow_error *error)
1805 if (filters_count_for_one_val != spec->count) {
1806 rte_flow_error_set(error, EINVAL,
1807 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1808 "Number of specifications is incorrect "
1809 "while copying by outer VLAN ID");
1813 for (i = 0; i < spec->count; i++) {
1814 spec->filters[i].efs_match_flags |=
1815 EFX_FILTER_MATCH_OUTER_VID;
1817 spec->filters[i].efs_outer_vid = 0;
1824 * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
1825 * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
1826 * specifications after copying.
1828 * @param spec[in, out]
1829 * SFC flow specification to update.
1830 * @param filters_count_for_one_val[in]
1831 * How many specifications should have the same match flag, what is the
1832 * number of specifications before copying.
1834 * Perform verbose error reporting if not NULL.
1837 sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
1838 unsigned int filters_count_for_one_val,
1839 struct rte_flow_error *error)
1842 static const efx_filter_match_flags_t vals[] = {
1843 EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1844 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
1847 if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
1848 rte_flow_error_set(error, EINVAL,
1849 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1850 "Number of specifications is incorrect while copying "
1851 "by inner frame unknown destination flags");
1855 for (i = 0; i < spec->count; i++) {
1856 /* The check above ensures that divisor can't be zero here */
1857 spec->filters[i].efs_match_flags |=
1858 vals[i / filters_count_for_one_val];
1865 * Check that the following conditions are met:
1866 * - the specification corresponds to a filter for encapsulated traffic
1867 * - the list of supported filters has a filter
1868 * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
1869 * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
1873 * The match flags of filter.
1875 * Specification to be supplemented.
1877 * SFC filter with list of supported filters.
1880 sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
1881 efx_filter_spec_t *spec,
1882 struct sfc_filter *filter)
1885 efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
1886 efx_filter_match_flags_t match_mcast_dst;
1888 if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
1892 (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
1893 EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
1894 for (i = 0; i < filter->supported_match_num; i++) {
1895 if (match_mcast_dst == filter->supported_match[i])
1903 * Check that the list of supported filters has a filter that differs
1904 * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
1905 * in this case that filter will be used and the flag
1906 * EFX_FILTER_MATCH_OUTER_VID is not needed.
1909 * The match flags of filter.
1911 * Specification to be supplemented.
1913 * SFC filter with list of supported filters.
1916 sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
1917 __rte_unused efx_filter_spec_t *spec,
1918 struct sfc_filter *filter)
1921 efx_filter_match_flags_t match_without_vid =
1922 match & ~EFX_FILTER_MATCH_OUTER_VID;
1924 for (i = 0; i < filter->supported_match_num; i++) {
1925 if (match_without_vid == filter->supported_match[i])
1933 * Match flags that can be automatically added to filters.
1934 * Selecting the last minimum when searching for the copy flag ensures that the
1935 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
1936 * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
1937 * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
1940 static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
1942 .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
1944 .set_vals = sfc_flow_set_unknown_dst_flags,
1945 .spec_check = sfc_flow_check_unknown_dst_flags,
1948 .flag = EFX_FILTER_MATCH_ETHER_TYPE,
1950 .set_vals = sfc_flow_set_ethertypes,
1954 .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
1956 .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
1957 .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
1960 .flag = EFX_FILTER_MATCH_OUTER_VID,
1962 .set_vals = sfc_flow_set_outer_vid_flag,
1963 .spec_check = sfc_flow_check_outer_vid_flag,
1967 /* Get item from array sfc_flow_copy_flags */
1968 static const struct sfc_flow_copy_flag *
1969 sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
1973 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
1974 if (sfc_flow_copy_flags[i].flag == flag)
1975 return &sfc_flow_copy_flags[i];
1982 * Make copies of the specifications, set match flag and values
1983 * of the field that corresponds to it.
1985 * @param spec[in, out]
1986 * SFC flow specification to update.
1988 * The match flag to add.
1990 * Perform verbose error reporting if not NULL.
1993 sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
1994 efx_filter_match_flags_t flag,
1995 struct rte_flow_error *error)
1998 unsigned int new_filters_count;
1999 unsigned int filters_count_for_one_val;
2000 const struct sfc_flow_copy_flag *copy_flag;
2003 copy_flag = sfc_flow_get_copy_flag(flag);
2004 if (copy_flag == NULL) {
2005 rte_flow_error_set(error, ENOTSUP,
2006 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2007 "Unsupported spec field for copying");
2011 new_filters_count = spec->count * copy_flag->vals_count;
2012 if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
2013 rte_flow_error_set(error, EINVAL,
2014 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2015 "Too much EFX specifications in the flow rule");
2019 /* Copy filters specifications */
2020 for (i = spec->count; i < new_filters_count; i++)
2021 spec->filters[i] = spec->filters[i - spec->count];
2023 filters_count_for_one_val = spec->count;
2024 spec->count = new_filters_count;
2026 rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
2034 * Check that the given set of match flags missing in the original filter spec
2035 * could be covered by adding spec copies which specify the corresponding
2036 * flags and packet field values to match.
2038 * @param miss_flags[in]
2039 * Flags that are missing until the supported filter.
2041 * Specification to be supplemented.
2046 * Number of specifications after copy or 0, if the flags can not be added.
2049 sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
2050 efx_filter_spec_t *spec,
2051 struct sfc_filter *filter)
2054 efx_filter_match_flags_t copy_flags = 0;
2055 efx_filter_match_flags_t flag;
2056 efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
2057 sfc_flow_spec_check *check;
2058 unsigned int multiplier = 1;
2060 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2061 flag = sfc_flow_copy_flags[i].flag;
2062 check = sfc_flow_copy_flags[i].spec_check;
2063 if ((flag & miss_flags) == flag) {
2064 if (check != NULL && (!check(match, spec, filter)))
2068 multiplier *= sfc_flow_copy_flags[i].vals_count;
2072 if (copy_flags == miss_flags)
2079 * Attempt to supplement the specification template to the minimally
2080 * supported set of match flags. To do this, it is necessary to copy
2081 * the specifications, filling them with the values of fields that
2082 * correspond to the missing flags.
2083 * The necessary and sufficient filter is built from the fewest number
2084 * of copies which could be made to cover the minimally required set
2089 * @param spec[in, out]
2090 * SFC flow specification to update.
2092 * Perform verbose error reporting if not NULL.
2095 sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
2096 struct sfc_flow_spec *spec,
2097 struct rte_flow_error *error)
2099 struct sfc_filter *filter = &sa->filter;
2100 efx_filter_match_flags_t miss_flags;
2101 efx_filter_match_flags_t min_miss_flags = 0;
2102 efx_filter_match_flags_t match;
2103 unsigned int min_multiplier = UINT_MAX;
2104 unsigned int multiplier;
2108 match = spec->template.efs_match_flags;
2109 for (i = 0; i < filter->supported_match_num; i++) {
2110 if ((match & filter->supported_match[i]) == match) {
2111 miss_flags = filter->supported_match[i] & (~match);
2112 multiplier = sfc_flow_check_missing_flags(miss_flags,
2113 &spec->template, filter);
2114 if (multiplier > 0) {
2115 if (multiplier <= min_multiplier) {
2116 min_multiplier = multiplier;
2117 min_miss_flags = miss_flags;
2123 if (min_multiplier == UINT_MAX) {
2124 rte_flow_error_set(error, ENOTSUP,
2125 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2126 "The flow rule pattern is unsupported");
2130 for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
2131 efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
2133 if ((flag & min_miss_flags) == flag) {
2134 rc = sfc_flow_spec_add_match_flag(spec, flag, error);
2144 * Check that set of match flags is referred to by a filter. Filter is
2145 * described by match flags with the ability to add OUTER_VID and INNER_VID
2148 * @param match_flags[in]
2149 * Set of match flags.
2150 * @param flags_pattern[in]
2151 * Pattern of filter match flags.
2154 sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
2155 efx_filter_match_flags_t flags_pattern)
2157 if ((match_flags & flags_pattern) != flags_pattern)
2160 switch (match_flags & ~flags_pattern) {
2162 case EFX_FILTER_MATCH_OUTER_VID:
2163 case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
2171 * Check whether the spec maps to a hardware filter which is known to be
2172 * ineffective despite being valid.
2175 * SFC filter with list of supported filters.
2177 * SFC flow specification.
2180 sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
2181 struct sfc_flow_spec *spec)
2184 uint16_t ether_type;
2186 efx_filter_match_flags_t match_flags;
2188 for (i = 0; i < spec->count; i++) {
2189 match_flags = spec->filters[i].efs_match_flags;
2191 if (sfc_flow_is_match_with_vids(match_flags,
2192 EFX_FILTER_MATCH_ETHER_TYPE) ||
2193 sfc_flow_is_match_with_vids(match_flags,
2194 EFX_FILTER_MATCH_ETHER_TYPE |
2195 EFX_FILTER_MATCH_LOC_MAC)) {
2196 ether_type = spec->filters[i].efs_ether_type;
2197 if (filter->supports_ip_proto_or_addr_filter &&
2198 (ether_type == EFX_ETHER_TYPE_IPV4 ||
2199 ether_type == EFX_ETHER_TYPE_IPV6))
2201 } else if (sfc_flow_is_match_with_vids(match_flags,
2202 EFX_FILTER_MATCH_ETHER_TYPE |
2203 EFX_FILTER_MATCH_IP_PROTO) ||
2204 sfc_flow_is_match_with_vids(match_flags,
2205 EFX_FILTER_MATCH_ETHER_TYPE |
2206 EFX_FILTER_MATCH_IP_PROTO |
2207 EFX_FILTER_MATCH_LOC_MAC)) {
2208 ip_proto = spec->filters[i].efs_ip_proto;
2209 if (filter->supports_rem_or_local_port_filter &&
2210 (ip_proto == EFX_IPPROTO_TCP ||
2211 ip_proto == EFX_IPPROTO_UDP))
2220 sfc_flow_validate_match_flags(struct sfc_adapter *sa,
2221 struct rte_flow *flow,
2222 struct rte_flow_error *error)
2224 efx_filter_spec_t *spec_tmpl = &flow->spec.template;
2225 efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
2228 /* Initialize the first filter spec with template */
2229 flow->spec.filters[0] = *spec_tmpl;
2230 flow->spec.count = 1;
2232 if (!sfc_filter_is_match_supported(sa, match_flags)) {
2233 rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
2238 if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
2239 rte_flow_error_set(error, ENOTSUP,
2240 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2241 "The flow rule pattern is unsupported");
2249 sfc_flow_parse(struct rte_eth_dev *dev,
2250 const struct rte_flow_attr *attr,
2251 const struct rte_flow_item pattern[],
2252 const struct rte_flow_action actions[],
2253 struct rte_flow *flow,
2254 struct rte_flow_error *error)
2256 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2259 rc = sfc_flow_parse_attr(attr, flow, error);
2261 goto fail_bad_value;
2263 rc = sfc_flow_parse_pattern(pattern, flow, error);
2265 goto fail_bad_value;
2267 rc = sfc_flow_parse_actions(sa, actions, flow, error);
2269 goto fail_bad_value;
2271 rc = sfc_flow_validate_match_flags(sa, flow, error);
2273 goto fail_bad_value;
2282 sfc_flow_validate(struct rte_eth_dev *dev,
2283 const struct rte_flow_attr *attr,
2284 const struct rte_flow_item pattern[],
2285 const struct rte_flow_action actions[],
2286 struct rte_flow_error *error)
2288 struct rte_flow flow;
2290 memset(&flow, 0, sizeof(flow));
2292 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
2295 static struct rte_flow *
2296 sfc_flow_create(struct rte_eth_dev *dev,
2297 const struct rte_flow_attr *attr,
2298 const struct rte_flow_item pattern[],
2299 const struct rte_flow_action actions[],
2300 struct rte_flow_error *error)
2302 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2303 struct rte_flow *flow = NULL;
2306 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
2308 rte_flow_error_set(error, ENOMEM,
2309 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2310 "Failed to allocate memory");
2314 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
2316 goto fail_bad_value;
2318 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
2320 sfc_adapter_lock(sa);
2322 if (sa->state == SFC_ADAPTER_STARTED) {
2323 rc = sfc_flow_filter_insert(sa, flow);
2325 rte_flow_error_set(error, rc,
2326 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2327 "Failed to insert filter");
2328 goto fail_filter_insert;
2332 sfc_adapter_unlock(sa);
2337 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2341 sfc_adapter_unlock(sa);
2348 sfc_flow_remove(struct sfc_adapter *sa,
2349 struct rte_flow *flow,
2350 struct rte_flow_error *error)
2354 SFC_ASSERT(sfc_adapter_is_locked(sa));
2356 if (sa->state == SFC_ADAPTER_STARTED) {
2357 rc = sfc_flow_filter_remove(sa, flow);
2359 rte_flow_error_set(error, rc,
2360 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2361 "Failed to destroy flow rule");
2364 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2371 sfc_flow_destroy(struct rte_eth_dev *dev,
2372 struct rte_flow *flow,
2373 struct rte_flow_error *error)
2375 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2376 struct rte_flow *flow_ptr;
2379 sfc_adapter_lock(sa);
2381 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
2382 if (flow_ptr == flow)
2386 rte_flow_error_set(error, rc,
2387 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2388 "Failed to find flow rule to destroy");
2389 goto fail_bad_value;
2392 rc = sfc_flow_remove(sa, flow, error);
2395 sfc_adapter_unlock(sa);
2401 sfc_flow_flush(struct rte_eth_dev *dev,
2402 struct rte_flow_error *error)
2404 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2405 struct rte_flow *flow;
2409 sfc_adapter_lock(sa);
2411 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2412 rc = sfc_flow_remove(sa, flow, error);
2417 sfc_adapter_unlock(sa);
2423 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
2424 struct rte_flow_error *error)
2426 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2429 sfc_adapter_lock(sa);
2430 if (sa->state != SFC_ADAPTER_INITIALIZED) {
2431 rte_flow_error_set(error, EBUSY,
2432 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2433 NULL, "please close the port first");
2436 sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
2438 sfc_adapter_unlock(sa);
2443 const struct rte_flow_ops sfc_flow_ops = {
2444 .validate = sfc_flow_validate,
2445 .create = sfc_flow_create,
2446 .destroy = sfc_flow_destroy,
2447 .flush = sfc_flow_flush,
2449 .isolate = sfc_flow_isolate,
2453 sfc_flow_init(struct sfc_adapter *sa)
2455 SFC_ASSERT(sfc_adapter_is_locked(sa));
2457 TAILQ_INIT(&sa->filter.flow_list);
2461 sfc_flow_fini(struct sfc_adapter *sa)
2463 struct rte_flow *flow;
2465 SFC_ASSERT(sfc_adapter_is_locked(sa));
2467 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
2468 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
2474 sfc_flow_stop(struct sfc_adapter *sa)
2476 struct rte_flow *flow;
2478 SFC_ASSERT(sfc_adapter_is_locked(sa));
2480 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
2481 sfc_flow_filter_remove(sa, flow);
2485 sfc_flow_start(struct sfc_adapter *sa)
2487 struct rte_flow *flow;
2490 sfc_log_init(sa, "entry");
2492 SFC_ASSERT(sfc_adapter_is_locked(sa));
2494 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
2495 rc = sfc_flow_filter_insert(sa, flow);
2500 sfc_log_init(sa, "done");