2 * Copyright (c) 2017 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <rte_tailq.h>
31 #include <rte_common.h>
32 #include <rte_ethdev.h>
33 #include <rte_eth_ctrl.h>
34 #include <rte_ether.h>
36 #include <rte_flow_driver.h>
42 #include "sfc_filter.h"
47 * At now flow API is implemented in such a manner that each
48 * flow rule is converted to a hardware filter.
49 * All elements of flow rule (attributes, pattern items, actions)
50 * correspond to one or more fields in the efx_filter_spec_s structure
51 * that is responsible for the hardware filter.
54 enum sfc_flow_item_layers {
55 SFC_FLOW_ITEM_ANY_LAYER,
56 SFC_FLOW_ITEM_START_LAYER,
62 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
63 efx_filter_spec_t *spec,
64 struct rte_flow_error *error);
66 struct sfc_flow_item {
67 enum rte_flow_item_type type; /* Type of item */
68 enum sfc_flow_item_layers layer; /* Layer of item */
69 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
70 sfc_flow_item_parse *parse; /* Parsing function */
73 static sfc_flow_item_parse sfc_flow_parse_void;
74 static sfc_flow_item_parse sfc_flow_parse_eth;
75 static sfc_flow_item_parse sfc_flow_parse_vlan;
76 static sfc_flow_item_parse sfc_flow_parse_ipv4;
77 static sfc_flow_item_parse sfc_flow_parse_ipv6;
78 static sfc_flow_item_parse sfc_flow_parse_tcp;
81 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
86 for (i = 0; i < size; i++)
89 return (sum == 0) ? B_TRUE : B_FALSE;
93 * Validate item and prepare structures spec and mask for parsing
96 sfc_flow_parse_init(const struct rte_flow_item *item,
97 const void **spec_ptr,
98 const void **mask_ptr,
99 const void *supp_mask,
100 const void *def_mask,
102 struct rte_flow_error *error)
112 rte_flow_error_set(error, EINVAL,
113 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
118 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
119 rte_flow_error_set(error, EINVAL,
120 RTE_FLOW_ERROR_TYPE_ITEM, item,
121 "Mask or last is set without spec");
126 * If "mask" is not set, default mask is used,
127 * but if default mask is NULL, "mask" should be set
129 if (item->mask == NULL) {
130 if (def_mask == NULL) {
131 rte_flow_error_set(error, EINVAL,
132 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
133 "Mask should be specified");
137 mask = (const uint8_t *)def_mask;
139 mask = (const uint8_t *)item->mask;
142 spec = (const uint8_t *)item->spec;
143 last = (const uint8_t *)item->last;
149 * If field values in "last" are either 0 or equal to the corresponding
150 * values in "spec" then they are ignored
153 !sfc_flow_is_zero(last, size) &&
154 memcmp(last, spec, size) != 0) {
155 rte_flow_error_set(error, ENOTSUP,
156 RTE_FLOW_ERROR_TYPE_ITEM, item,
157 "Ranging is not supported");
161 if (supp_mask == NULL) {
162 rte_flow_error_set(error, EINVAL,
163 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
164 "Supported mask for item should be specified");
168 /* Check that mask and spec not asks for more match than supp_mask */
169 for (i = 0; i < size; i++) {
170 match = spec[i] | mask[i];
171 supp = ((const uint8_t *)supp_mask)[i];
173 if ((match | supp) != supp) {
174 rte_flow_error_set(error, ENOTSUP,
175 RTE_FLOW_ERROR_TYPE_ITEM, item,
176 "Item's field is not supported");
189 * Masking is not supported, so masks in items should be either
190 * full or empty (zeroed) and set only for supported fields which
191 * are specified in the supp_mask.
195 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
196 __rte_unused efx_filter_spec_t *efx_spec,
197 __rte_unused struct rte_flow_error *error)
203 * Convert Ethernet item to EFX filter specification.
206 * Item specification. Only source and destination addresses and
207 * Ethernet type fields are supported. If the mask is NULL, default
208 * mask will be used. Ranging is not supported.
209 * @param efx_spec[in, out]
210 * EFX filter specification to update.
212 * Perform verbose error reporting if not NULL.
215 sfc_flow_parse_eth(const struct rte_flow_item *item,
216 efx_filter_spec_t *efx_spec,
217 struct rte_flow_error *error)
220 const struct rte_flow_item_eth *spec = NULL;
221 const struct rte_flow_item_eth *mask = NULL;
222 const struct rte_flow_item_eth supp_mask = {
223 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
224 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
228 rc = sfc_flow_parse_init(item,
229 (const void **)&spec,
230 (const void **)&mask,
232 &rte_flow_item_eth_mask,
233 sizeof(struct rte_flow_item_eth),
238 /* If "spec" is not set, could be any Ethernet */
242 if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
243 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
244 rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
246 } else if (!is_zero_ether_addr(&mask->dst)) {
250 if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
251 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
252 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
254 } else if (!is_zero_ether_addr(&mask->src)) {
259 * Ether type is in big-endian byte order in item and
260 * in little-endian in efx_spec, so byte swap is used
262 if (mask->type == supp_mask.type) {
263 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
264 efx_spec->efs_ether_type = rte_bswap16(spec->type);
265 } else if (mask->type != 0) {
272 rte_flow_error_set(error, EINVAL,
273 RTE_FLOW_ERROR_TYPE_ITEM, item,
274 "Bad mask in the ETH pattern item");
279 * Convert VLAN item to EFX filter specification.
282 * Item specification. Only VID field is supported.
283 * The mask can not be NULL. Ranging is not supported.
284 * @param efx_spec[in, out]
285 * EFX filter specification to update.
287 * Perform verbose error reporting if not NULL.
290 sfc_flow_parse_vlan(const struct rte_flow_item *item,
291 efx_filter_spec_t *efx_spec,
292 struct rte_flow_error *error)
296 const struct rte_flow_item_vlan *spec = NULL;
297 const struct rte_flow_item_vlan *mask = NULL;
298 const struct rte_flow_item_vlan supp_mask = {
299 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
302 rc = sfc_flow_parse_init(item,
303 (const void **)&spec,
304 (const void **)&mask,
307 sizeof(struct rte_flow_item_vlan),
313 * VID is in big-endian byte order in item and
314 * in little-endian in efx_spec, so byte swap is used.
315 * If two VLAN items are included, the first matches
316 * the outer tag and the next matches the inner tag.
318 if (mask->tci == supp_mask.tci) {
319 vid = rte_bswap16(spec->tci);
321 if (!(efx_spec->efs_match_flags &
322 EFX_FILTER_MATCH_OUTER_VID)) {
323 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
324 efx_spec->efs_outer_vid = vid;
325 } else if (!(efx_spec->efs_match_flags &
326 EFX_FILTER_MATCH_INNER_VID)) {
327 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
328 efx_spec->efs_inner_vid = vid;
330 rte_flow_error_set(error, EINVAL,
331 RTE_FLOW_ERROR_TYPE_ITEM, item,
332 "More than two VLAN items");
336 rte_flow_error_set(error, EINVAL,
337 RTE_FLOW_ERROR_TYPE_ITEM, item,
338 "VLAN ID in TCI match is required");
346 * Convert IPv4 item to EFX filter specification.
349 * Item specification. Only source and destination addresses and
350 * protocol fields are supported. If the mask is NULL, default
351 * mask will be used. Ranging is not supported.
352 * @param efx_spec[in, out]
353 * EFX filter specification to update.
355 * Perform verbose error reporting if not NULL.
358 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
359 efx_filter_spec_t *efx_spec,
360 struct rte_flow_error *error)
363 const struct rte_flow_item_ipv4 *spec = NULL;
364 const struct rte_flow_item_ipv4 *mask = NULL;
365 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
366 const struct rte_flow_item_ipv4 supp_mask = {
368 .src_addr = 0xffffffff,
369 .dst_addr = 0xffffffff,
370 .next_proto_id = 0xff,
374 rc = sfc_flow_parse_init(item,
375 (const void **)&spec,
376 (const void **)&mask,
378 &rte_flow_item_ipv4_mask,
379 sizeof(struct rte_flow_item_ipv4),
385 * Filtering by IPv4 source and destination addresses requires
386 * the appropriate ETHER_TYPE in hardware filters
388 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
389 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
390 efx_spec->efs_ether_type = ether_type_ipv4;
391 } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
392 rte_flow_error_set(error, EINVAL,
393 RTE_FLOW_ERROR_TYPE_ITEM, item,
394 "Ethertype in pattern with IPV4 item should be appropriate");
402 * IPv4 addresses are in big-endian byte order in item and in
405 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
406 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
407 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
408 } else if (mask->hdr.src_addr != 0) {
412 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
413 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
414 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
415 } else if (mask->hdr.dst_addr != 0) {
419 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
420 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
421 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
422 } else if (mask->hdr.next_proto_id != 0) {
429 rte_flow_error_set(error, EINVAL,
430 RTE_FLOW_ERROR_TYPE_ITEM, item,
431 "Bad mask in the IPV4 pattern item");
436 * Convert IPv6 item to EFX filter specification.
439 * Item specification. Only source and destination addresses and
440 * next header fields are supported. If the mask is NULL, default
441 * mask will be used. Ranging is not supported.
442 * @param efx_spec[in, out]
443 * EFX filter specification to update.
445 * Perform verbose error reporting if not NULL.
448 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
449 efx_filter_spec_t *efx_spec,
450 struct rte_flow_error *error)
453 const struct rte_flow_item_ipv6 *spec = NULL;
454 const struct rte_flow_item_ipv6 *mask = NULL;
455 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
456 const struct rte_flow_item_ipv6 supp_mask = {
458 .src_addr = { 0xff, 0xff, 0xff, 0xff,
459 0xff, 0xff, 0xff, 0xff,
460 0xff, 0xff, 0xff, 0xff,
461 0xff, 0xff, 0xff, 0xff },
462 .dst_addr = { 0xff, 0xff, 0xff, 0xff,
463 0xff, 0xff, 0xff, 0xff,
464 0xff, 0xff, 0xff, 0xff,
465 0xff, 0xff, 0xff, 0xff },
470 rc = sfc_flow_parse_init(item,
471 (const void **)&spec,
472 (const void **)&mask,
474 &rte_flow_item_ipv6_mask,
475 sizeof(struct rte_flow_item_ipv6),
481 * Filtering by IPv6 source and destination addresses requires
482 * the appropriate ETHER_TYPE in hardware filters
484 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
485 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
486 efx_spec->efs_ether_type = ether_type_ipv6;
487 } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
488 rte_flow_error_set(error, EINVAL,
489 RTE_FLOW_ERROR_TYPE_ITEM, item,
490 "Ethertype in pattern with IPV6 item should be appropriate");
498 * IPv6 addresses are in big-endian byte order in item and in
501 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
502 sizeof(mask->hdr.src_addr)) == 0) {
503 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
505 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
506 sizeof(spec->hdr.src_addr));
507 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
508 sizeof(efx_spec->efs_rem_host));
509 } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
510 sizeof(mask->hdr.src_addr))) {
514 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
515 sizeof(mask->hdr.dst_addr)) == 0) {
516 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
518 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
519 sizeof(spec->hdr.dst_addr));
520 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
521 sizeof(efx_spec->efs_loc_host));
522 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
523 sizeof(mask->hdr.dst_addr))) {
527 if (mask->hdr.proto == supp_mask.hdr.proto) {
528 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
529 efx_spec->efs_ip_proto = spec->hdr.proto;
530 } else if (mask->hdr.proto != 0) {
537 rte_flow_error_set(error, EINVAL,
538 RTE_FLOW_ERROR_TYPE_ITEM, item,
539 "Bad mask in the IPV6 pattern item");
544 * Convert TCP item to EFX filter specification.
547 * Item specification. Only source and destination ports fields
548 * are supported. If the mask is NULL, default mask will be used.
549 * Ranging is not supported.
550 * @param efx_spec[in, out]
551 * EFX filter specification to update.
553 * Perform verbose error reporting if not NULL.
556 sfc_flow_parse_tcp(const struct rte_flow_item *item,
557 efx_filter_spec_t *efx_spec,
558 struct rte_flow_error *error)
561 const struct rte_flow_item_tcp *spec = NULL;
562 const struct rte_flow_item_tcp *mask = NULL;
563 const struct rte_flow_item_tcp supp_mask = {
570 rc = sfc_flow_parse_init(item,
571 (const void **)&spec,
572 (const void **)&mask,
574 &rte_flow_item_tcp_mask,
575 sizeof(struct rte_flow_item_tcp),
581 * Filtering by TCP source and destination ports requires
582 * the appropriate IP_PROTO in hardware filters
584 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
585 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
586 efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
587 } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
588 rte_flow_error_set(error, EINVAL,
589 RTE_FLOW_ERROR_TYPE_ITEM, item,
590 "IP proto in pattern with TCP item should be appropriate");
598 * Source and destination ports are in big-endian byte order in item and
599 * in little-endian in efx_spec, so byte swap is used
601 if (mask->hdr.src_port == supp_mask.hdr.src_port) {
602 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
603 efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
604 } else if (mask->hdr.src_port != 0) {
608 if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
609 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
610 efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
611 } else if (mask->hdr.dst_port != 0) {
618 rte_flow_error_set(error, EINVAL,
619 RTE_FLOW_ERROR_TYPE_ITEM, item,
620 "Bad mask in the TCP pattern item");
624 static const struct sfc_flow_item sfc_flow_items[] = {
626 .type = RTE_FLOW_ITEM_TYPE_VOID,
627 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
628 .layer = SFC_FLOW_ITEM_ANY_LAYER,
629 .parse = sfc_flow_parse_void,
632 .type = RTE_FLOW_ITEM_TYPE_ETH,
633 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
634 .layer = SFC_FLOW_ITEM_L2,
635 .parse = sfc_flow_parse_eth,
638 .type = RTE_FLOW_ITEM_TYPE_VLAN,
639 .prev_layer = SFC_FLOW_ITEM_L2,
640 .layer = SFC_FLOW_ITEM_L2,
641 .parse = sfc_flow_parse_vlan,
644 .type = RTE_FLOW_ITEM_TYPE_IPV4,
645 .prev_layer = SFC_FLOW_ITEM_L2,
646 .layer = SFC_FLOW_ITEM_L3,
647 .parse = sfc_flow_parse_ipv4,
650 .type = RTE_FLOW_ITEM_TYPE_IPV6,
651 .prev_layer = SFC_FLOW_ITEM_L2,
652 .layer = SFC_FLOW_ITEM_L3,
653 .parse = sfc_flow_parse_ipv6,
656 .type = RTE_FLOW_ITEM_TYPE_TCP,
657 .prev_layer = SFC_FLOW_ITEM_L3,
658 .layer = SFC_FLOW_ITEM_L4,
659 .parse = sfc_flow_parse_tcp,
664 * Protocol-independent flow API support
667 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
668 struct rte_flow *flow,
669 struct rte_flow_error *error)
672 rte_flow_error_set(error, EINVAL,
673 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
677 if (attr->group != 0) {
678 rte_flow_error_set(error, ENOTSUP,
679 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
680 "Groups are not supported");
683 if (attr->priority != 0) {
684 rte_flow_error_set(error, ENOTSUP,
685 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
686 "Priorities are not supported");
689 if (attr->egress != 0) {
690 rte_flow_error_set(error, ENOTSUP,
691 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
692 "Egress is not supported");
695 if (attr->ingress == 0) {
696 rte_flow_error_set(error, ENOTSUP,
697 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
698 "Only ingress is supported");
702 flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
703 flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
708 /* Get item from array sfc_flow_items */
709 static const struct sfc_flow_item *
710 sfc_flow_get_item(enum rte_flow_item_type type)
714 for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
715 if (sfc_flow_items[i].type == type)
716 return &sfc_flow_items[i];
722 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
723 struct rte_flow *flow,
724 struct rte_flow_error *error)
727 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
728 const struct sfc_flow_item *item;
730 if (pattern == NULL) {
731 rte_flow_error_set(error, EINVAL,
732 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
737 for (; pattern != NULL &&
738 pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
739 item = sfc_flow_get_item(pattern->type);
741 rte_flow_error_set(error, ENOTSUP,
742 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
743 "Unsupported pattern item");
748 * Omitting one or several protocol layers at the beginning
749 * of pattern is supported
751 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
752 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
753 item->prev_layer != prev_layer) {
754 rte_flow_error_set(error, ENOTSUP,
755 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
756 "Unexpected sequence of pattern items");
760 rc = item->parse(pattern, &flow->spec, error);
764 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
765 prev_layer = item->layer;
768 if (pattern == NULL) {
769 rte_flow_error_set(error, EINVAL,
770 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
779 sfc_flow_parse_queue(struct sfc_adapter *sa,
780 const struct rte_flow_action_queue *queue,
781 struct rte_flow *flow)
785 if (queue->index >= sa->rxq_count)
788 rxq = sa->rxq_info[queue->index].rxq;
789 flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
795 sfc_flow_parse_actions(struct sfc_adapter *sa,
796 const struct rte_flow_action actions[],
797 struct rte_flow *flow,
798 struct rte_flow_error *error)
801 boolean_t is_specified = B_FALSE;
803 if (actions == NULL) {
804 rte_flow_error_set(error, EINVAL,
805 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
810 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
811 switch (actions->type) {
812 case RTE_FLOW_ACTION_TYPE_VOID:
815 case RTE_FLOW_ACTION_TYPE_QUEUE:
816 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
818 rte_flow_error_set(error, EINVAL,
819 RTE_FLOW_ERROR_TYPE_ACTION, actions,
824 is_specified = B_TRUE;
828 rte_flow_error_set(error, ENOTSUP,
829 RTE_FLOW_ERROR_TYPE_ACTION, actions,
830 "Action is not supported");
836 rte_flow_error_set(error, EINVAL,
837 RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
838 "Action is unspecified");
846 sfc_flow_parse(struct rte_eth_dev *dev,
847 const struct rte_flow_attr *attr,
848 const struct rte_flow_item pattern[],
849 const struct rte_flow_action actions[],
850 struct rte_flow *flow,
851 struct rte_flow_error *error)
853 struct sfc_adapter *sa = dev->data->dev_private;
856 memset(&flow->spec, 0, sizeof(flow->spec));
858 rc = sfc_flow_parse_attr(attr, flow, error);
862 rc = sfc_flow_parse_pattern(pattern, flow, error);
866 rc = sfc_flow_parse_actions(sa, actions, flow, error);
870 if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
871 rte_flow_error_set(error, ENOTSUP,
872 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
873 "Flow rule pattern is not supported");
882 sfc_flow_validate(struct rte_eth_dev *dev,
883 const struct rte_flow_attr *attr,
884 const struct rte_flow_item pattern[],
885 const struct rte_flow_action actions[],
886 struct rte_flow_error *error)
888 struct rte_flow flow;
890 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
893 static struct rte_flow *
894 sfc_flow_create(struct rte_eth_dev *dev,
895 const struct rte_flow_attr *attr,
896 const struct rte_flow_item pattern[],
897 const struct rte_flow_action actions[],
898 struct rte_flow_error *error)
900 struct sfc_adapter *sa = dev->data->dev_private;
901 struct rte_flow *flow = NULL;
904 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
906 rte_flow_error_set(error, ENOMEM,
907 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
908 "Failed to allocate memory");
912 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
916 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
918 sfc_adapter_lock(sa);
920 if (sa->state == SFC_ADAPTER_STARTED) {
921 rc = efx_filter_insert(sa->nic, &flow->spec);
923 rte_flow_error_set(error, rc,
924 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
925 "Failed to insert filter");
926 goto fail_filter_insert;
930 sfc_adapter_unlock(sa);
935 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
939 sfc_adapter_unlock(sa);
946 sfc_flow_remove(struct sfc_adapter *sa,
947 struct rte_flow *flow,
948 struct rte_flow_error *error)
952 SFC_ASSERT(sfc_adapter_is_locked(sa));
954 if (sa->state == SFC_ADAPTER_STARTED) {
955 rc = efx_filter_remove(sa->nic, &flow->spec);
957 rte_flow_error_set(error, rc,
958 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
959 "Failed to destroy flow rule");
962 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
969 sfc_flow_destroy(struct rte_eth_dev *dev,
970 struct rte_flow *flow,
971 struct rte_flow_error *error)
973 struct sfc_adapter *sa = dev->data->dev_private;
974 struct rte_flow *flow_ptr;
977 sfc_adapter_lock(sa);
979 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
980 if (flow_ptr == flow)
984 rte_flow_error_set(error, rc,
985 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
986 "Failed to find flow rule to destroy");
990 rc = sfc_flow_remove(sa, flow, error);
993 sfc_adapter_unlock(sa);
999 sfc_flow_flush(struct rte_eth_dev *dev,
1000 struct rte_flow_error *error)
1002 struct sfc_adapter *sa = dev->data->dev_private;
1003 struct rte_flow *flow;
1007 sfc_adapter_lock(sa);
1009 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1010 rc = sfc_flow_remove(sa, flow, error);
1015 sfc_adapter_unlock(sa);
1020 const struct rte_flow_ops sfc_flow_ops = {
1021 .validate = sfc_flow_validate,
1022 .create = sfc_flow_create,
1023 .destroy = sfc_flow_destroy,
1024 .flush = sfc_flow_flush,
1029 sfc_flow_init(struct sfc_adapter *sa)
1031 SFC_ASSERT(sfc_adapter_is_locked(sa));
1033 TAILQ_INIT(&sa->filter.flow_list);
1037 sfc_flow_fini(struct sfc_adapter *sa)
1039 struct rte_flow *flow;
1041 SFC_ASSERT(sfc_adapter_is_locked(sa));
1043 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
1044 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
1050 sfc_flow_stop(struct sfc_adapter *sa)
1052 struct rte_flow *flow;
1054 SFC_ASSERT(sfc_adapter_is_locked(sa));
1056 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
1057 efx_filter_remove(sa->nic, &flow->spec);
1061 sfc_flow_start(struct sfc_adapter *sa)
1063 struct rte_flow *flow;
1066 sfc_log_init(sa, "entry");
1068 SFC_ASSERT(sfc_adapter_is_locked(sa));
1070 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
1071 rc = efx_filter_insert(sa->nic, &flow->spec);
1076 sfc_log_init(sa, "done");