X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_flow.c;h=cb802d7991ffec02ad1cbbd704aeba12ab4b7c7d;hb=57ddbf7edd9c5041603e224fbbb62c11ce423135;hp=2ddde6168c7615adcbe13025abab4ab1e22be165;hpb=7ba166841c68053484e1a520da3fd2186be33830;p=dpdk.git diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c index 2ddde6168c..cb802d7991 100644 --- a/drivers/net/sfc/sfc_flow.c +++ b/drivers/net/sfc/sfc_flow.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * - * Copyright (c) 2017-2018 Solarflare Communications Inc. - * All rights reserved. + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2017-2019 Solarflare Communications Inc. * * This software was jointly developed between OKTET Labs (under contract * for Solarflare) and Solarflare Communications, Inc. @@ -18,12 +18,47 @@ #include "efx.h" #include "sfc.h" +#include "sfc_debug.h" #include "sfc_rx.h" #include "sfc_filter.h" #include "sfc_flow.h" #include "sfc_log.h" #include "sfc_dp_rx.h" +struct sfc_flow_ops_by_spec { + sfc_flow_parse_cb_t *parse; + sfc_flow_insert_cb_t *insert; + sfc_flow_remove_cb_t *remove; +}; + +static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter; +static sfc_flow_insert_cb_t sfc_flow_filter_insert; +static sfc_flow_remove_cb_t sfc_flow_filter_remove; + +static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = { + .parse = sfc_flow_parse_rte_to_filter, + .insert = sfc_flow_filter_insert, + .remove = sfc_flow_filter_remove, +}; + +static const struct sfc_flow_ops_by_spec * +sfc_flow_get_ops_by_spec(struct rte_flow *flow) +{ + struct sfc_flow_spec *spec = &flow->spec; + const struct sfc_flow_ops_by_spec *ops = NULL; + + switch (spec->type) { + case SFC_FLOW_SPEC_FILTER: + ops = &sfc_flow_ops_filter; + break; + default: + SFC_ASSERT(false); + break; + } + + return ops; +} + /* * Currently, filter-based (VNIC) flow API is implemented in such a manner * that each flow rule is converted to one or more hardware filters. @@ -35,25 +70,6 @@ * of such a field. */ -enum sfc_flow_item_layers { - SFC_FLOW_ITEM_ANY_LAYER, - SFC_FLOW_ITEM_START_LAYER, - SFC_FLOW_ITEM_L2, - SFC_FLOW_ITEM_L3, - SFC_FLOW_ITEM_L4, -}; - -typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item, - efx_filter_spec_t *spec, - struct rte_flow_error *error); - -struct sfc_flow_item { - enum rte_flow_item_type type; /* Type of item */ - enum sfc_flow_item_layers layer; /* Layer of item */ - enum sfc_flow_item_layers prev_layer; /* Previous layer of item */ - sfc_flow_item_parse *parse; /* Parsing function */ -}; - static sfc_flow_item_parse sfc_flow_parse_void; static sfc_flow_item_parse sfc_flow_parse_eth; static sfc_flow_item_parse sfc_flow_parse_vlan; @@ -110,7 +126,7 @@ sfc_flow_is_zero(const uint8_t *buf, unsigned int size) /* * Validate item and prepare structures spec and mask for parsing */ -static int +int sfc_flow_parse_init(const struct rte_flow_item *item, const void **spec_ptr, const void **mask_ptr, @@ -209,7 +225,7 @@ exit: static int sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item, - __rte_unused efx_filter_spec_t *efx_spec, + __rte_unused struct sfc_flow_parse_ctx *parse_ctx, __rte_unused struct rte_flow_error *error) { return 0; @@ -231,10 +247,11 @@ sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item, */ static int sfc_flow_parse_eth(const struct rte_flow_item *item, - efx_filter_spec_t *efx_spec, + struct sfc_flow_parse_ctx *parse_ctx, struct rte_flow_error *error) { int rc; + efx_filter_spec_t *efx_spec = parse_ctx->filter; const struct rte_flow_item_eth *spec = NULL; const struct rte_flow_item_eth *mask = NULL; const struct rte_flow_item_eth supp_mask = { @@ -343,11 +360,12 @@ fail_bad_mask: */ static int sfc_flow_parse_vlan(const struct rte_flow_item *item, - efx_filter_spec_t *efx_spec, + struct sfc_flow_parse_ctx *parse_ctx, struct rte_flow_error *error) { int rc; uint16_t vid; + efx_filter_spec_t *efx_spec = parse_ctx->filter; const struct rte_flow_item_vlan *spec = NULL; const struct rte_flow_item_vlan *mask = NULL; const struct rte_flow_item_vlan supp_mask = { @@ -429,10 +447,11 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item, */ static int sfc_flow_parse_ipv4(const struct rte_flow_item *item, - efx_filter_spec_t *efx_spec, + struct sfc_flow_parse_ctx *parse_ctx, struct rte_flow_error *error) { int rc; + efx_filter_spec_t *efx_spec = parse_ctx->filter; const struct rte_flow_item_ipv4 *spec = NULL; const struct rte_flow_item_ipv4 *mask = NULL; const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4); @@ -519,10 +538,11 @@ fail_bad_mask: */ static int sfc_flow_parse_ipv6(const struct rte_flow_item *item, - efx_filter_spec_t *efx_spec, + struct sfc_flow_parse_ctx *parse_ctx, struct rte_flow_error *error) { int rc; + efx_filter_spec_t *efx_spec = parse_ctx->filter; const struct rte_flow_item_ipv6 *spec = NULL; const struct rte_flow_item_ipv6 *mask = NULL; const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6); @@ -627,10 +647,11 @@ fail_bad_mask: */ static int sfc_flow_parse_tcp(const struct rte_flow_item *item, - efx_filter_spec_t *efx_spec, + struct sfc_flow_parse_ctx *parse_ctx, struct rte_flow_error *error) { int rc; + efx_filter_spec_t *efx_spec = parse_ctx->filter; const struct rte_flow_item_tcp *spec = NULL; const struct rte_flow_item_tcp *mask = NULL; const struct rte_flow_item_tcp supp_mask = { @@ -708,10 +729,11 @@ fail_bad_mask: */ static int sfc_flow_parse_udp(const struct rte_flow_item *item, - efx_filter_spec_t *efx_spec, + struct sfc_flow_parse_ctx *parse_ctx, struct rte_flow_error *error) { int rc; + efx_filter_spec_t *efx_spec = parse_ctx->filter; const struct rte_flow_item_udp *spec = NULL; const struct rte_flow_item_udp *mask = NULL; const struct rte_flow_item_udp supp_mask = { @@ -866,10 +888,11 @@ sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec, */ static int sfc_flow_parse_vxlan(const struct rte_flow_item *item, - efx_filter_spec_t *efx_spec, + struct sfc_flow_parse_ctx *parse_ctx, struct rte_flow_error *error) { int rc; + efx_filter_spec_t *efx_spec = parse_ctx->filter; const struct rte_flow_item_vxlan *spec = NULL; const struct rte_flow_item_vxlan *mask = NULL; const struct rte_flow_item_vxlan supp_mask = { @@ -918,10 +941,11 @@ sfc_flow_parse_vxlan(const struct rte_flow_item *item, */ static int sfc_flow_parse_geneve(const struct rte_flow_item *item, - efx_filter_spec_t *efx_spec, + struct sfc_flow_parse_ctx *parse_ctx, struct rte_flow_error *error) { int rc; + efx_filter_spec_t *efx_spec = parse_ctx->filter; const struct rte_flow_item_geneve *spec = NULL; const struct rte_flow_item_geneve *mask = NULL; const struct rte_flow_item_geneve supp_mask = { @@ -985,10 +1009,11 @@ sfc_flow_parse_geneve(const struct rte_flow_item *item, */ static int sfc_flow_parse_nvgre(const struct rte_flow_item *item, - efx_filter_spec_t *efx_spec, + struct sfc_flow_parse_ctx *parse_ctx, struct rte_flow_error *error) { int rc; + efx_filter_spec_t *efx_spec = parse_ctx->filter; const struct rte_flow_item_nvgre *spec = NULL; const struct rte_flow_item_nvgre *mask = NULL; const struct rte_flow_item_nvgre supp_mask = { @@ -1027,60 +1052,70 @@ static const struct sfc_flow_item sfc_flow_items[] = { .type = RTE_FLOW_ITEM_TYPE_VOID, .prev_layer = SFC_FLOW_ITEM_ANY_LAYER, .layer = SFC_FLOW_ITEM_ANY_LAYER, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, .parse = sfc_flow_parse_void, }, { .type = RTE_FLOW_ITEM_TYPE_ETH, .prev_layer = SFC_FLOW_ITEM_START_LAYER, .layer = SFC_FLOW_ITEM_L2, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, .parse = sfc_flow_parse_eth, }, { .type = RTE_FLOW_ITEM_TYPE_VLAN, .prev_layer = SFC_FLOW_ITEM_L2, .layer = SFC_FLOW_ITEM_L2, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, .parse = sfc_flow_parse_vlan, }, { .type = RTE_FLOW_ITEM_TYPE_IPV4, .prev_layer = SFC_FLOW_ITEM_L2, .layer = SFC_FLOW_ITEM_L3, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, .parse = sfc_flow_parse_ipv4, }, { .type = RTE_FLOW_ITEM_TYPE_IPV6, .prev_layer = SFC_FLOW_ITEM_L2, .layer = SFC_FLOW_ITEM_L3, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, .parse = sfc_flow_parse_ipv6, }, { .type = RTE_FLOW_ITEM_TYPE_TCP, .prev_layer = SFC_FLOW_ITEM_L3, .layer = SFC_FLOW_ITEM_L4, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, .parse = sfc_flow_parse_tcp, }, { .type = RTE_FLOW_ITEM_TYPE_UDP, .prev_layer = SFC_FLOW_ITEM_L3, .layer = SFC_FLOW_ITEM_L4, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, .parse = sfc_flow_parse_udp, }, { .type = RTE_FLOW_ITEM_TYPE_VXLAN, .prev_layer = SFC_FLOW_ITEM_L4, .layer = SFC_FLOW_ITEM_START_LAYER, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, .parse = sfc_flow_parse_vxlan, }, { .type = RTE_FLOW_ITEM_TYPE_GENEVE, .prev_layer = SFC_FLOW_ITEM_L4, .layer = SFC_FLOW_ITEM_START_LAYER, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, .parse = sfc_flow_parse_geneve, }, { .type = RTE_FLOW_ITEM_TYPE_NVGRE, .prev_layer = SFC_FLOW_ITEM_L3, .layer = SFC_FLOW_ITEM_START_LAYER, + .ctx_type = SFC_FLOW_PARSE_CTX_FILTER, .parse = sfc_flow_parse_nvgre, }, }; @@ -1108,62 +1143,65 @@ sfc_flow_parse_attr(const struct rte_flow_attr *attr, "Groups are not supported"); return -rte_errno; } - if (attr->priority != 0) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, - "Priorities are not supported"); - return -rte_errno; - } if (attr->egress != 0) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, "Egress is not supported"); return -rte_errno; } - if (attr->transfer != 0) { + if (attr->ingress == 0) { rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, - "Transfer is not supported"); + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, + "Ingress is compulsory"); return -rte_errno; } - if (attr->ingress == 0) { + if (attr->transfer == 0) { + if (attr->priority != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Priorities are unsupported"); + return -rte_errno; + } + spec->type = SFC_FLOW_SPEC_FILTER; + spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX; + spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; + spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL; + } else { rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, - "Only ingress is supported"); + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, + "Transfer is not supported"); return -rte_errno; } - spec->type = SFC_FLOW_SPEC_FILTER; - spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX; - spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; - return 0; } /* Get item from array sfc_flow_items */ static const struct sfc_flow_item * -sfc_flow_get_item(enum rte_flow_item_type type) +sfc_flow_get_item(const struct sfc_flow_item *items, + unsigned int nb_items, + enum rte_flow_item_type type) { unsigned int i; - for (i = 0; i < RTE_DIM(sfc_flow_items); i++) - if (sfc_flow_items[i].type == type) - return &sfc_flow_items[i]; + for (i = 0; i < nb_items; i++) + if (items[i].type == type) + return &items[i]; return NULL; } -static int -sfc_flow_parse_pattern(const struct rte_flow_item pattern[], - struct rte_flow *flow, +int +sfc_flow_parse_pattern(const struct sfc_flow_item *flow_items, + unsigned int nb_flow_items, + const struct rte_flow_item pattern[], + struct sfc_flow_parse_ctx *parse_ctx, struct rte_flow_error *error) { int rc; unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER; boolean_t is_ifrm = B_FALSE; const struct sfc_flow_item *item; - struct sfc_flow_spec *spec = &flow->spec; - struct sfc_flow_spec_filter *spec_filter = &spec->filter; if (pattern == NULL) { rte_flow_error_set(error, EINVAL, @@ -1173,7 +1211,8 @@ sfc_flow_parse_pattern(const struct rte_flow_item pattern[], } for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { - item = sfc_flow_get_item(pattern->type); + item = sfc_flow_get_item(flow_items, nb_flow_items, + pattern->type); if (item == NULL) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, pattern, @@ -1228,7 +1267,14 @@ sfc_flow_parse_pattern(const struct rte_flow_item pattern[], break; } - rc = item->parse(pattern, &spec_filter->template, error); + if (parse_ctx->type != item->ctx_type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, pattern, + "Parse context type mismatch"); + return -rte_errno; + } + + rc = item->parse(pattern, parse_ctx, error); if (rc != 0) return rc; @@ -1247,6 +1293,7 @@ sfc_flow_parse_queue(struct sfc_adapter *sa, struct sfc_flow_spec *spec = &flow->spec; struct sfc_flow_spec_filter *spec_filter = &spec->filter; struct sfc_rxq *rxq; + struct sfc_rxq_info *rxq_info; if (queue->index >= sfc_sa2shared(sa)->rxq_count) return -EINVAL; @@ -1254,6 +1301,10 @@ sfc_flow_parse_queue(struct sfc_adapter *sa, rxq = &sa->rxq_ctrl[queue->index]; spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index; + rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index]; + spec_filter->rss_hash_required = !!(rxq_info->rxq_flags & + SFC_RXQ_FLAG_RSS_HASH); + return 0; } @@ -1419,13 +1470,34 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter; struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf; uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; + boolean_t create_context; unsigned int i; int rc = 0; - if (spec_filter->rss) { - unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max - - flow_rss->rxq_hw_index_min + 1, - EFX_MAXRSS); + create_context = spec_filter->rss || (spec_filter->rss_hash_required && + rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT); + + if (create_context) { + unsigned int rss_spread; + unsigned int rss_hash_types; + uint8_t *rss_key; + + if (spec_filter->rss) { + rss_spread = MIN(flow_rss->rxq_hw_index_max - + flow_rss->rxq_hw_index_min + 1, + EFX_MAXRSS); + rss_hash_types = flow_rss->rss_hash_types; + rss_key = flow_rss->rss_key; + } else { + /* + * Initialize dummy RSS context parameters to have + * valid RSS hash. Use default RSS hash function and + * key. + */ + rss_spread = 1; + rss_hash_types = rss->hash_types; + rss_key = rss->key; + } rc = efx_rx_scale_context_alloc(sa->nic, EFX_RX_SCALE_EXCLUSIVE, @@ -1436,16 +1508,19 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context, rss->hash_alg, - flow_rss->rss_hash_types, B_TRUE); + rss_hash_types, B_TRUE); if (rc != 0) goto fail_scale_mode_set; rc = efx_rx_scale_key_set(sa->nic, efs_rss_context, - flow_rss->rss_key, - sizeof(rss->key)); + rss_key, sizeof(rss->key)); if (rc != 0) goto fail_scale_key_set; + } else { + efs_rss_context = rss->dummy_rss_context; + } + if (spec_filter->rss || spec_filter->rss_hash_required) { /* * At this point, fully elaborated filter specifications * have been produced from the template. To make sure that @@ -1456,8 +1531,9 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, efx_filter_spec_t *spec = &spec_filter->filters[i]; spec->efs_rss_context = efs_rss_context; - spec->efs_dmaq_id = flow_rss->rxq_hw_index_min; spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS; + if (spec_filter->rss) + spec->efs_dmaq_id = flow_rss->rxq_hw_index_min; } } @@ -1465,7 +1541,12 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, if (rc != 0) goto fail_filter_insert; - if (spec_filter->rss) { + if (create_context) { + unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0}; + unsigned int *tbl; + + tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl; + /* * Scale table is set after filter insertion because * the table entries are relative to the base RxQ ID @@ -1475,10 +1556,13 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, * the table entries, and the operation will succeed */ rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context, - flow_rss->rss_tbl, - RTE_DIM(flow_rss->rss_tbl)); + tbl, RTE_DIM(flow_rss->rss_tbl)); if (rc != 0) goto fail_scale_tbl_set; + + /* Remember created dummy RSS context */ + if (!spec_filter->rss) + rss->dummy_rss_context = efs_rss_context; } return 0; @@ -1489,7 +1573,7 @@ fail_scale_tbl_set: fail_filter_insert: fail_scale_key_set: fail_scale_mode_set: - if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT) + if (create_context) efx_rx_scale_context_free(sa->nic, efs_rss_context); fail_scale_context_alloc: @@ -2277,21 +2361,23 @@ sfc_flow_validate_match_flags(struct sfc_adapter *sa, } static int -sfc_flow_parse(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow *flow, - struct rte_flow_error *error) +sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow *flow, + struct rte_flow_error *error) { struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_flow_spec *spec = &flow->spec; + struct sfc_flow_spec_filter *spec_filter = &spec->filter; + struct sfc_flow_parse_ctx ctx; int rc; - rc = sfc_flow_parse_attr(attr, flow, error); - if (rc != 0) - goto fail_bad_value; + ctx.type = SFC_FLOW_PARSE_CTX_FILTER; + ctx.filter = &spec_filter->template; - rc = sfc_flow_parse_pattern(pattern, flow, error); + rc = sfc_flow_parse_pattern(sfc_flow_items, RTE_DIM(sfc_flow_items), + pattern, &ctx, error); if (rc != 0) goto fail_bad_value; @@ -2309,6 +2395,32 @@ fail_bad_value: return rc; } +static int +sfc_flow_parse(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct sfc_flow_ops_by_spec *ops; + int rc; + + rc = sfc_flow_parse_attr(attr, flow, error); + if (rc != 0) + return rc; + + ops = sfc_flow_get_ops_by_spec(flow); + if (ops == NULL || ops->parse == NULL) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "No backend to handle this flow"); + return -rte_errno; + } + + return ops->parse(dev, pattern, actions, flow, error); +} + static struct rte_flow * sfc_flow_zmalloc(struct rte_flow_error *error) { @@ -2330,6 +2442,54 @@ sfc_flow_free(__rte_unused struct sfc_adapter *sa, struct rte_flow *flow) rte_free(flow); } +static int +sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct sfc_flow_ops_by_spec *ops; + int rc; + + ops = sfc_flow_get_ops_by_spec(flow); + if (ops == NULL || ops->insert == NULL) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "No backend to handle this flow"); + return rte_errno; + } + + rc = ops->insert(sa, flow); + if (rc != 0) { + rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to insert the flow rule"); + } + + return rc; +} + +static int +sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct sfc_flow_ops_by_spec *ops; + int rc; + + ops = sfc_flow_get_ops_by_spec(flow); + if (ops == NULL || ops->remove == NULL) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "No backend to handle this flow"); + return rte_errno; + } + + rc = ops->remove(sa, flow); + if (rc != 0) { + rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to remove the flow rule"); + } + + return rc; +} + static int sfc_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -2376,20 +2536,16 @@ sfc_flow_create(struct rte_eth_dev *dev, TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries); if (sa->state == SFC_ADAPTER_STARTED) { - rc = sfc_flow_filter_insert(sa, flow); - if (rc != 0) { - rte_flow_error_set(error, rc, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "Failed to insert filter"); - goto fail_filter_insert; - } + rc = sfc_flow_insert(sa, flow, error); + if (rc != 0) + goto fail_flow_insert; } sfc_adapter_unlock(sa); return flow; -fail_filter_insert: +fail_flow_insert: TAILQ_REMOVE(&sa->flow_list, flow, entries); fail_bad_value: @@ -2400,29 +2556,6 @@ fail_no_mem: return NULL; } -static int -sfc_flow_remove(struct sfc_adapter *sa, - struct rte_flow *flow, - struct rte_flow_error *error) -{ - int rc = 0; - - SFC_ASSERT(sfc_adapter_is_locked(sa)); - - if (sa->state == SFC_ADAPTER_STARTED) { - rc = sfc_flow_filter_remove(sa, flow); - if (rc != 0) - rte_flow_error_set(error, rc, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "Failed to destroy flow rule"); - } - - TAILQ_REMOVE(&sa->flow_list, flow, entries); - sfc_flow_free(sa, flow); - - return rc; -} - static int sfc_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, @@ -2445,7 +2578,11 @@ sfc_flow_destroy(struct rte_eth_dev *dev, goto fail_bad_value; } - rc = sfc_flow_remove(sa, flow, error); + if (sa->state == SFC_ADAPTER_STARTED) + rc = sfc_flow_remove(sa, flow, error); + + TAILQ_REMOVE(&sa->flow_list, flow, entries); + sfc_flow_free(sa, flow); fail_bad_value: sfc_adapter_unlock(sa); @@ -2459,15 +2596,21 @@ sfc_flow_flush(struct rte_eth_dev *dev, { struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); struct rte_flow *flow; - int rc = 0; int ret = 0; sfc_adapter_lock(sa); while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) { - rc = sfc_flow_remove(sa, flow, error); - if (rc != 0) - ret = rc; + if (sa->state == SFC_ADAPTER_STARTED) { + int rc; + + rc = sfc_flow_remove(sa, flow, error); + if (rc != 0) + ret = rc; + } + + TAILQ_REMOVE(&sa->flow_list, flow, entries); + sfc_flow_free(sa, flow); } sfc_adapter_unlock(sa); @@ -2529,12 +2672,19 @@ sfc_flow_fini(struct sfc_adapter *sa) void sfc_flow_stop(struct sfc_adapter *sa) { + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + struct sfc_rss *rss = &sas->rss; struct rte_flow *flow; SFC_ASSERT(sfc_adapter_is_locked(sa)); TAILQ_FOREACH(flow, &sa->flow_list, entries) - sfc_flow_filter_remove(sa, flow); + sfc_flow_remove(sa, flow, NULL); + + if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) { + efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context); + rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT; + } } int @@ -2548,7 +2698,7 @@ sfc_flow_start(struct sfc_adapter *sa) SFC_ASSERT(sfc_adapter_is_locked(sa)); TAILQ_FOREACH(flow, &sa->flow_list, entries) { - rc = sfc_flow_filter_insert(sa, flow); + rc = sfc_flow_insert(sa, flow, NULL); if (rc != 0) goto fail_bad_flow; }