2 * Copyright (c) 2017 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <rte_tailq.h>
31 #include <rte_common.h>
32 #include <rte_ethdev.h>
33 #include <rte_eth_ctrl.h>
34 #include <rte_ether.h>
36 #include <rte_flow_driver.h>
42 #include "sfc_filter.h"
47 * At now flow API is implemented in such a manner that each
48 * flow rule is converted to a hardware filter.
49 * All elements of flow rule (attributes, pattern items, actions)
50 * correspond to one or more fields in the efx_filter_spec_s structure
51 * that is responsible for the hardware filter.
54 enum sfc_flow_item_layers {
55 SFC_FLOW_ITEM_ANY_LAYER,
56 SFC_FLOW_ITEM_START_LAYER,
61 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
62 efx_filter_spec_t *spec,
63 struct rte_flow_error *error);
65 struct sfc_flow_item {
66 enum rte_flow_item_type type; /* Type of item */
67 enum sfc_flow_item_layers layer; /* Layer of item */
68 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
69 sfc_flow_item_parse *parse; /* Parsing function */
72 static sfc_flow_item_parse sfc_flow_parse_void;
73 static sfc_flow_item_parse sfc_flow_parse_eth;
74 static sfc_flow_item_parse sfc_flow_parse_vlan;
75 static sfc_flow_item_parse sfc_flow_parse_ipv4;
76 static sfc_flow_item_parse sfc_flow_parse_ipv6;
79 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
84 for (i = 0; i < size; i++)
87 return (sum == 0) ? B_TRUE : B_FALSE;
91 * Validate item and prepare structures spec and mask for parsing
94 sfc_flow_parse_init(const struct rte_flow_item *item,
95 const void **spec_ptr,
96 const void **mask_ptr,
97 const void *supp_mask,
100 struct rte_flow_error *error)
110 rte_flow_error_set(error, EINVAL,
111 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
116 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
117 rte_flow_error_set(error, EINVAL,
118 RTE_FLOW_ERROR_TYPE_ITEM, item,
119 "Mask or last is set without spec");
124 * If "mask" is not set, default mask is used,
125 * but if default mask is NULL, "mask" should be set
127 if (item->mask == NULL) {
128 if (def_mask == NULL) {
129 rte_flow_error_set(error, EINVAL,
130 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
131 "Mask should be specified");
135 mask = (const uint8_t *)def_mask;
137 mask = (const uint8_t *)item->mask;
140 spec = (const uint8_t *)item->spec;
141 last = (const uint8_t *)item->last;
147 * If field values in "last" are either 0 or equal to the corresponding
148 * values in "spec" then they are ignored
151 !sfc_flow_is_zero(last, size) &&
152 memcmp(last, spec, size) != 0) {
153 rte_flow_error_set(error, ENOTSUP,
154 RTE_FLOW_ERROR_TYPE_ITEM, item,
155 "Ranging is not supported");
159 if (supp_mask == NULL) {
160 rte_flow_error_set(error, EINVAL,
161 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
162 "Supported mask for item should be specified");
166 /* Check that mask and spec not asks for more match than supp_mask */
167 for (i = 0; i < size; i++) {
168 match = spec[i] | mask[i];
169 supp = ((const uint8_t *)supp_mask)[i];
171 if ((match | supp) != supp) {
172 rte_flow_error_set(error, ENOTSUP,
173 RTE_FLOW_ERROR_TYPE_ITEM, item,
174 "Item's field is not supported");
187 * Masking is not supported, so masks in items should be either
188 * full or empty (zeroed) and set only for supported fields which
189 * are specified in the supp_mask.
193 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
194 __rte_unused efx_filter_spec_t *efx_spec,
195 __rte_unused struct rte_flow_error *error)
201 * Convert Ethernet item to EFX filter specification.
204 * Item specification. Only source and destination addresses and
205 * Ethernet type fields are supported. If the mask is NULL, default
206 * mask will be used. Ranging is not supported.
207 * @param efx_spec[in, out]
208 * EFX filter specification to update.
210 * Perform verbose error reporting if not NULL.
213 sfc_flow_parse_eth(const struct rte_flow_item *item,
214 efx_filter_spec_t *efx_spec,
215 struct rte_flow_error *error)
218 const struct rte_flow_item_eth *spec = NULL;
219 const struct rte_flow_item_eth *mask = NULL;
220 const struct rte_flow_item_eth supp_mask = {
221 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
222 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
226 rc = sfc_flow_parse_init(item,
227 (const void **)&spec,
228 (const void **)&mask,
230 &rte_flow_item_eth_mask,
231 sizeof(struct rte_flow_item_eth),
236 /* If "spec" is not set, could be any Ethernet */
240 if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
241 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
242 rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
244 } else if (!is_zero_ether_addr(&mask->dst)) {
248 if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
249 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
250 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
252 } else if (!is_zero_ether_addr(&mask->src)) {
257 * Ether type is in big-endian byte order in item and
258 * in little-endian in efx_spec, so byte swap is used
260 if (mask->type == supp_mask.type) {
261 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
262 efx_spec->efs_ether_type = rte_bswap16(spec->type);
263 } else if (mask->type != 0) {
270 rte_flow_error_set(error, EINVAL,
271 RTE_FLOW_ERROR_TYPE_ITEM, item,
272 "Bad mask in the ETH pattern item");
277 * Convert VLAN item to EFX filter specification.
280 * Item specification. Only VID field is supported.
281 * The mask can not be NULL. Ranging is not supported.
282 * @param efx_spec[in, out]
283 * EFX filter specification to update.
285 * Perform verbose error reporting if not NULL.
288 sfc_flow_parse_vlan(const struct rte_flow_item *item,
289 efx_filter_spec_t *efx_spec,
290 struct rte_flow_error *error)
294 const struct rte_flow_item_vlan *spec = NULL;
295 const struct rte_flow_item_vlan *mask = NULL;
296 const struct rte_flow_item_vlan supp_mask = {
297 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
300 rc = sfc_flow_parse_init(item,
301 (const void **)&spec,
302 (const void **)&mask,
305 sizeof(struct rte_flow_item_vlan),
311 * VID is in big-endian byte order in item and
312 * in little-endian in efx_spec, so byte swap is used.
313 * If two VLAN items are included, the first matches
314 * the outer tag and the next matches the inner tag.
316 if (mask->tci == supp_mask.tci) {
317 vid = rte_bswap16(spec->tci);
319 if (!(efx_spec->efs_match_flags &
320 EFX_FILTER_MATCH_OUTER_VID)) {
321 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
322 efx_spec->efs_outer_vid = vid;
323 } else if (!(efx_spec->efs_match_flags &
324 EFX_FILTER_MATCH_INNER_VID)) {
325 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
326 efx_spec->efs_inner_vid = vid;
328 rte_flow_error_set(error, EINVAL,
329 RTE_FLOW_ERROR_TYPE_ITEM, item,
330 "More than two VLAN items");
334 rte_flow_error_set(error, EINVAL,
335 RTE_FLOW_ERROR_TYPE_ITEM, item,
336 "VLAN ID in TCI match is required");
344 * Convert IPv4 item to EFX filter specification.
347 * Item specification. Only source and destination addresses and
348 * protocol fields are supported. If the mask is NULL, default
349 * mask will be used. Ranging is not supported.
350 * @param efx_spec[in, out]
351 * EFX filter specification to update.
353 * Perform verbose error reporting if not NULL.
356 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
357 efx_filter_spec_t *efx_spec,
358 struct rte_flow_error *error)
361 const struct rte_flow_item_ipv4 *spec = NULL;
362 const struct rte_flow_item_ipv4 *mask = NULL;
363 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
364 const struct rte_flow_item_ipv4 supp_mask = {
366 .src_addr = 0xffffffff,
367 .dst_addr = 0xffffffff,
368 .next_proto_id = 0xff,
372 rc = sfc_flow_parse_init(item,
373 (const void **)&spec,
374 (const void **)&mask,
376 &rte_flow_item_ipv4_mask,
377 sizeof(struct rte_flow_item_ipv4),
383 * Filtering by IPv4 source and destination addresses requires
384 * the appropriate ETHER_TYPE in hardware filters
386 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
387 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
388 efx_spec->efs_ether_type = ether_type_ipv4;
389 } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
390 rte_flow_error_set(error, EINVAL,
391 RTE_FLOW_ERROR_TYPE_ITEM, item,
392 "Ethertype in pattern with IPV4 item should be appropriate");
400 * IPv4 addresses are in big-endian byte order in item and in
403 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
404 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
405 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
406 } else if (mask->hdr.src_addr != 0) {
410 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
411 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
412 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
413 } else if (mask->hdr.dst_addr != 0) {
417 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
418 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
419 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
420 } else if (mask->hdr.next_proto_id != 0) {
427 rte_flow_error_set(error, EINVAL,
428 RTE_FLOW_ERROR_TYPE_ITEM, item,
429 "Bad mask in the IPV4 pattern item");
434 * Convert IPv6 item to EFX filter specification.
437 * Item specification. Only source and destination addresses and
438 * next header fields are supported. If the mask is NULL, default
439 * mask will be used. Ranging is not supported.
440 * @param efx_spec[in, out]
441 * EFX filter specification to update.
443 * Perform verbose error reporting if not NULL.
446 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
447 efx_filter_spec_t *efx_spec,
448 struct rte_flow_error *error)
451 const struct rte_flow_item_ipv6 *spec = NULL;
452 const struct rte_flow_item_ipv6 *mask = NULL;
453 const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
454 const struct rte_flow_item_ipv6 supp_mask = {
456 .src_addr = { 0xff, 0xff, 0xff, 0xff,
457 0xff, 0xff, 0xff, 0xff,
458 0xff, 0xff, 0xff, 0xff,
459 0xff, 0xff, 0xff, 0xff },
460 .dst_addr = { 0xff, 0xff, 0xff, 0xff,
461 0xff, 0xff, 0xff, 0xff,
462 0xff, 0xff, 0xff, 0xff,
463 0xff, 0xff, 0xff, 0xff },
468 rc = sfc_flow_parse_init(item,
469 (const void **)&spec,
470 (const void **)&mask,
472 &rte_flow_item_ipv6_mask,
473 sizeof(struct rte_flow_item_ipv6),
479 * Filtering by IPv6 source and destination addresses requires
480 * the appropriate ETHER_TYPE in hardware filters
482 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
483 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
484 efx_spec->efs_ether_type = ether_type_ipv6;
485 } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
486 rte_flow_error_set(error, EINVAL,
487 RTE_FLOW_ERROR_TYPE_ITEM, item,
488 "Ethertype in pattern with IPV6 item should be appropriate");
496 * IPv6 addresses are in big-endian byte order in item and in
499 if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
500 sizeof(mask->hdr.src_addr)) == 0) {
501 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
503 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
504 sizeof(spec->hdr.src_addr));
505 rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
506 sizeof(efx_spec->efs_rem_host));
507 } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
508 sizeof(mask->hdr.src_addr))) {
512 if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
513 sizeof(mask->hdr.dst_addr)) == 0) {
514 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
516 RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
517 sizeof(spec->hdr.dst_addr));
518 rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
519 sizeof(efx_spec->efs_loc_host));
520 } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
521 sizeof(mask->hdr.dst_addr))) {
525 if (mask->hdr.proto == supp_mask.hdr.proto) {
526 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
527 efx_spec->efs_ip_proto = spec->hdr.proto;
528 } else if (mask->hdr.proto != 0) {
535 rte_flow_error_set(error, EINVAL,
536 RTE_FLOW_ERROR_TYPE_ITEM, item,
537 "Bad mask in the IPV6 pattern item");
541 static const struct sfc_flow_item sfc_flow_items[] = {
543 .type = RTE_FLOW_ITEM_TYPE_VOID,
544 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
545 .layer = SFC_FLOW_ITEM_ANY_LAYER,
546 .parse = sfc_flow_parse_void,
549 .type = RTE_FLOW_ITEM_TYPE_ETH,
550 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
551 .layer = SFC_FLOW_ITEM_L2,
552 .parse = sfc_flow_parse_eth,
555 .type = RTE_FLOW_ITEM_TYPE_VLAN,
556 .prev_layer = SFC_FLOW_ITEM_L2,
557 .layer = SFC_FLOW_ITEM_L2,
558 .parse = sfc_flow_parse_vlan,
561 .type = RTE_FLOW_ITEM_TYPE_IPV4,
562 .prev_layer = SFC_FLOW_ITEM_L2,
563 .layer = SFC_FLOW_ITEM_L3,
564 .parse = sfc_flow_parse_ipv4,
567 .type = RTE_FLOW_ITEM_TYPE_IPV6,
568 .prev_layer = SFC_FLOW_ITEM_L2,
569 .layer = SFC_FLOW_ITEM_L3,
570 .parse = sfc_flow_parse_ipv6,
575 * Protocol-independent flow API support
578 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
579 struct rte_flow *flow,
580 struct rte_flow_error *error)
583 rte_flow_error_set(error, EINVAL,
584 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
588 if (attr->group != 0) {
589 rte_flow_error_set(error, ENOTSUP,
590 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
591 "Groups are not supported");
594 if (attr->priority != 0) {
595 rte_flow_error_set(error, ENOTSUP,
596 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
597 "Priorities are not supported");
600 if (attr->egress != 0) {
601 rte_flow_error_set(error, ENOTSUP,
602 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
603 "Egress is not supported");
606 if (attr->ingress == 0) {
607 rte_flow_error_set(error, ENOTSUP,
608 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
609 "Only ingress is supported");
613 flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
614 flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
619 /* Get item from array sfc_flow_items */
620 static const struct sfc_flow_item *
621 sfc_flow_get_item(enum rte_flow_item_type type)
625 for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
626 if (sfc_flow_items[i].type == type)
627 return &sfc_flow_items[i];
633 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
634 struct rte_flow *flow,
635 struct rte_flow_error *error)
638 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
639 const struct sfc_flow_item *item;
641 if (pattern == NULL) {
642 rte_flow_error_set(error, EINVAL,
643 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
648 for (; pattern != NULL &&
649 pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
650 item = sfc_flow_get_item(pattern->type);
652 rte_flow_error_set(error, ENOTSUP,
653 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
654 "Unsupported pattern item");
659 * Omitting one or several protocol layers at the beginning
660 * of pattern is supported
662 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
663 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
664 item->prev_layer != prev_layer) {
665 rte_flow_error_set(error, ENOTSUP,
666 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
667 "Unexpected sequence of pattern items");
671 rc = item->parse(pattern, &flow->spec, error);
675 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
676 prev_layer = item->layer;
679 if (pattern == NULL) {
680 rte_flow_error_set(error, EINVAL,
681 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
690 sfc_flow_parse_queue(struct sfc_adapter *sa,
691 const struct rte_flow_action_queue *queue,
692 struct rte_flow *flow)
696 if (queue->index >= sa->rxq_count)
699 rxq = sa->rxq_info[queue->index].rxq;
700 flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
706 sfc_flow_parse_actions(struct sfc_adapter *sa,
707 const struct rte_flow_action actions[],
708 struct rte_flow *flow,
709 struct rte_flow_error *error)
712 boolean_t is_specified = B_FALSE;
714 if (actions == NULL) {
715 rte_flow_error_set(error, EINVAL,
716 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
721 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
722 switch (actions->type) {
723 case RTE_FLOW_ACTION_TYPE_VOID:
726 case RTE_FLOW_ACTION_TYPE_QUEUE:
727 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
729 rte_flow_error_set(error, EINVAL,
730 RTE_FLOW_ERROR_TYPE_ACTION, actions,
735 is_specified = B_TRUE;
739 rte_flow_error_set(error, ENOTSUP,
740 RTE_FLOW_ERROR_TYPE_ACTION, actions,
741 "Action is not supported");
747 rte_flow_error_set(error, EINVAL,
748 RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
749 "Action is unspecified");
757 sfc_flow_parse(struct rte_eth_dev *dev,
758 const struct rte_flow_attr *attr,
759 const struct rte_flow_item pattern[],
760 const struct rte_flow_action actions[],
761 struct rte_flow *flow,
762 struct rte_flow_error *error)
764 struct sfc_adapter *sa = dev->data->dev_private;
767 memset(&flow->spec, 0, sizeof(flow->spec));
769 rc = sfc_flow_parse_attr(attr, flow, error);
773 rc = sfc_flow_parse_pattern(pattern, flow, error);
777 rc = sfc_flow_parse_actions(sa, actions, flow, error);
781 if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
782 rte_flow_error_set(error, ENOTSUP,
783 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
784 "Flow rule pattern is not supported");
793 sfc_flow_validate(struct rte_eth_dev *dev,
794 const struct rte_flow_attr *attr,
795 const struct rte_flow_item pattern[],
796 const struct rte_flow_action actions[],
797 struct rte_flow_error *error)
799 struct rte_flow flow;
801 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
804 static struct rte_flow *
805 sfc_flow_create(struct rte_eth_dev *dev,
806 const struct rte_flow_attr *attr,
807 const struct rte_flow_item pattern[],
808 const struct rte_flow_action actions[],
809 struct rte_flow_error *error)
811 struct sfc_adapter *sa = dev->data->dev_private;
812 struct rte_flow *flow = NULL;
815 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
817 rte_flow_error_set(error, ENOMEM,
818 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
819 "Failed to allocate memory");
823 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
827 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
829 sfc_adapter_lock(sa);
831 if (sa->state == SFC_ADAPTER_STARTED) {
832 rc = efx_filter_insert(sa->nic, &flow->spec);
834 rte_flow_error_set(error, rc,
835 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
836 "Failed to insert filter");
837 goto fail_filter_insert;
841 sfc_adapter_unlock(sa);
846 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
850 sfc_adapter_unlock(sa);
857 sfc_flow_remove(struct sfc_adapter *sa,
858 struct rte_flow *flow,
859 struct rte_flow_error *error)
863 SFC_ASSERT(sfc_adapter_is_locked(sa));
865 if (sa->state == SFC_ADAPTER_STARTED) {
866 rc = efx_filter_remove(sa->nic, &flow->spec);
868 rte_flow_error_set(error, rc,
869 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
870 "Failed to destroy flow rule");
873 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
880 sfc_flow_destroy(struct rte_eth_dev *dev,
881 struct rte_flow *flow,
882 struct rte_flow_error *error)
884 struct sfc_adapter *sa = dev->data->dev_private;
885 struct rte_flow *flow_ptr;
888 sfc_adapter_lock(sa);
890 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
891 if (flow_ptr == flow)
895 rte_flow_error_set(error, rc,
896 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
897 "Failed to find flow rule to destroy");
901 rc = sfc_flow_remove(sa, flow, error);
904 sfc_adapter_unlock(sa);
910 sfc_flow_flush(struct rte_eth_dev *dev,
911 struct rte_flow_error *error)
913 struct sfc_adapter *sa = dev->data->dev_private;
914 struct rte_flow *flow;
918 sfc_adapter_lock(sa);
920 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
921 rc = sfc_flow_remove(sa, flow, error);
926 sfc_adapter_unlock(sa);
931 const struct rte_flow_ops sfc_flow_ops = {
932 .validate = sfc_flow_validate,
933 .create = sfc_flow_create,
934 .destroy = sfc_flow_destroy,
935 .flush = sfc_flow_flush,
940 sfc_flow_init(struct sfc_adapter *sa)
942 SFC_ASSERT(sfc_adapter_is_locked(sa));
944 TAILQ_INIT(&sa->filter.flow_list);
948 sfc_flow_fini(struct sfc_adapter *sa)
950 struct rte_flow *flow;
952 SFC_ASSERT(sfc_adapter_is_locked(sa));
954 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
955 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
961 sfc_flow_stop(struct sfc_adapter *sa)
963 struct rte_flow *flow;
965 SFC_ASSERT(sfc_adapter_is_locked(sa));
967 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
968 efx_filter_remove(sa->nic, &flow->spec);
972 sfc_flow_start(struct sfc_adapter *sa)
974 struct rte_flow *flow;
977 sfc_log_init(sa, "entry");
979 SFC_ASSERT(sfc_adapter_is_locked(sa));
981 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
982 rc = efx_filter_insert(sa->nic, &flow->spec);
987 sfc_log_init(sa, "done");