X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_flow.c;h=4321045d1ada1384a2a896465305a0fedb124407;hb=dadff137931c;hp=ec0ca3cd6b65ba15f9c7abf0f1b37933cc4ec80b;hpb=2e2e5bdf908ef7ce6ba7a33be5bec6f42f4a39fe;p=dpdk.git diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c index ec0ca3cd6b..4321045d1a 100644 --- a/drivers/net/sfc/sfc_flow.c +++ b/drivers/net/sfc/sfc_flow.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * - * Copyright (c) 2017-2018 Solarflare Communications Inc. - * All rights reserved. + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2017-2019 Solarflare Communications Inc. * * This software was jointly developed between OKTET Labs (under contract * for Solarflare) and Solarflare Communications, Inc. @@ -18,6 +18,7 @@ #include "efx.h" #include "sfc.h" +#include "sfc_debug.h" #include "sfc_rx.h" #include "sfc_filter.h" #include "sfc_flow.h" @@ -26,20 +27,33 @@ struct sfc_flow_ops_by_spec { sfc_flow_parse_cb_t *parse; + sfc_flow_verify_cb_t *verify; + sfc_flow_cleanup_cb_t *cleanup; sfc_flow_insert_cb_t *insert; sfc_flow_remove_cb_t *remove; }; static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter; +static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae; static sfc_flow_insert_cb_t sfc_flow_filter_insert; static sfc_flow_remove_cb_t sfc_flow_filter_remove; static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = { .parse = sfc_flow_parse_rte_to_filter, + .verify = NULL, + .cleanup = NULL, .insert = sfc_flow_filter_insert, .remove = sfc_flow_filter_remove, }; +static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = { + .parse = sfc_flow_parse_rte_to_mae, + .verify = sfc_mae_flow_verify, + .cleanup = sfc_mae_flow_cleanup, + .insert = sfc_mae_flow_insert, + .remove = sfc_mae_flow_remove, +}; + static const struct sfc_flow_ops_by_spec * sfc_flow_get_ops_by_spec(struct rte_flow *flow) { @@ -50,6 +64,9 @@ sfc_flow_get_ops_by_spec(struct rte_flow *flow) case SFC_FLOW_SPEC_FILTER: ops = &sfc_flow_ops_filter; break; + case SFC_FLOW_SPEC_MAE: + ops = &sfc_flow_ops_mae; + break; default: SFC_ASSERT(false); break; @@ -1123,12 +1140,15 @@ static const struct sfc_flow_item sfc_flow_items[] = { * Protocol-independent flow API support */ static int -sfc_flow_parse_attr(const struct rte_flow_attr *attr, +sfc_flow_parse_attr(struct sfc_adapter *sa, + const struct rte_flow_attr *attr, struct rte_flow *flow, struct rte_flow_error *error) { struct sfc_flow_spec *spec = &flow->spec; struct sfc_flow_spec_filter *spec_filter = &spec->filter; + struct sfc_flow_spec_mae *spec_mae = &spec->mae; + struct sfc_mae *mae = &sa->mae; if (attr == NULL) { rte_flow_error_set(error, EINVAL, @@ -1164,11 +1184,25 @@ sfc_flow_parse_attr(const struct rte_flow_attr *attr, spec->type = SFC_FLOW_SPEC_FILTER; spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX; spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; + spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL; } else { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, - "Transfer is not supported"); - return -rte_errno; + if (mae->status != SFC_MAE_STATUS_SUPPORTED) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "Transfer is not supported"); + return -rte_errno; + } + if (attr->priority > mae->nb_action_rule_prios_max) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Unsupported priority level"); + return -rte_errno; + } + spec->type = SFC_FLOW_SPEC_MAE; + spec_mae->priority = attr->priority; + spec_mae->match_spec = NULL; + spec_mae->action_set = NULL; + spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID; } return 0; @@ -1254,7 +1288,8 @@ sfc_flow_parse_pattern(const struct sfc_flow_item *flow_items, break; default: - if (is_ifrm) { + if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER && + is_ifrm) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, pattern, @@ -1291,6 +1326,7 @@ sfc_flow_parse_queue(struct sfc_adapter *sa, struct sfc_flow_spec *spec = &flow->spec; struct sfc_flow_spec_filter *spec_filter = &spec->filter; struct sfc_rxq *rxq; + struct sfc_rxq_info *rxq_info; if (queue->index >= sfc_sa2shared(sa)->rxq_count) return -EINVAL; @@ -1298,6 +1334,10 @@ sfc_flow_parse_queue(struct sfc_adapter *sa, rxq = &sa->rxq_ctrl[queue->index]; spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index; + rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index]; + spec_filter->rss_hash_required = !!(rxq_info->rxq_flags & + SFC_RXQ_FLAG_RSS_HASH); + return 0; } @@ -1463,13 +1503,34 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter; struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf; uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; + boolean_t create_context; unsigned int i; int rc = 0; - if (spec_filter->rss) { - unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max - - flow_rss->rxq_hw_index_min + 1, - EFX_MAXRSS); + create_context = spec_filter->rss || (spec_filter->rss_hash_required && + rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT); + + if (create_context) { + unsigned int rss_spread; + unsigned int rss_hash_types; + uint8_t *rss_key; + + if (spec_filter->rss) { + rss_spread = MIN(flow_rss->rxq_hw_index_max - + flow_rss->rxq_hw_index_min + 1, + EFX_MAXRSS); + rss_hash_types = flow_rss->rss_hash_types; + rss_key = flow_rss->rss_key; + } else { + /* + * Initialize dummy RSS context parameters to have + * valid RSS hash. Use default RSS hash function and + * key. + */ + rss_spread = 1; + rss_hash_types = rss->hash_types; + rss_key = rss->key; + } rc = efx_rx_scale_context_alloc(sa->nic, EFX_RX_SCALE_EXCLUSIVE, @@ -1480,16 +1541,19 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context, rss->hash_alg, - flow_rss->rss_hash_types, B_TRUE); + rss_hash_types, B_TRUE); if (rc != 0) goto fail_scale_mode_set; rc = efx_rx_scale_key_set(sa->nic, efs_rss_context, - flow_rss->rss_key, - sizeof(rss->key)); + rss_key, sizeof(rss->key)); if (rc != 0) goto fail_scale_key_set; + } else { + efs_rss_context = rss->dummy_rss_context; + } + if (spec_filter->rss || spec_filter->rss_hash_required) { /* * At this point, fully elaborated filter specifications * have been produced from the template. To make sure that @@ -1500,8 +1564,9 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, efx_filter_spec_t *spec = &spec_filter->filters[i]; spec->efs_rss_context = efs_rss_context; - spec->efs_dmaq_id = flow_rss->rxq_hw_index_min; spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS; + if (spec_filter->rss) + spec->efs_dmaq_id = flow_rss->rxq_hw_index_min; } } @@ -1509,7 +1574,12 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, if (rc != 0) goto fail_filter_insert; - if (spec_filter->rss) { + if (create_context) { + unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0}; + unsigned int *tbl; + + tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl; + /* * Scale table is set after filter insertion because * the table entries are relative to the base RxQ ID @@ -1519,10 +1589,13 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, * the table entries, and the operation will succeed */ rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context, - flow_rss->rss_tbl, - RTE_DIM(flow_rss->rss_tbl)); + tbl, RTE_DIM(flow_rss->rss_tbl)); if (rc != 0) goto fail_scale_tbl_set; + + /* Remember created dummy RSS context */ + if (!spec_filter->rss) + rss->dummy_rss_context = efs_rss_context; } return 0; @@ -1533,7 +1606,7 @@ fail_scale_tbl_set: fail_filter_insert: fail_scale_key_set: fail_scale_mode_set: - if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT) + if (create_context) efx_rx_scale_context_free(sa->nic, efs_rss_context); fail_scale_context_alloc: @@ -1607,9 +1680,6 @@ sfc_flow_parse_actions(struct sfc_adapter *sa, return -rte_errno; } -#define SFC_BUILD_SET_OVERFLOW(_action, _set) \ - RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT) - for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { case RTE_FLOW_ACTION_TYPE_VOID: @@ -1705,7 +1775,6 @@ sfc_flow_parse_actions(struct sfc_adapter *sa, actions_set |= (1UL << actions->type); } -#undef SFC_BUILD_SET_OVERFLOW /* When fate is unknown, drop traffic. */ if ((actions_set & fate_actions_mask) == 0) { @@ -2355,6 +2424,30 @@ fail_bad_value: return rc; } +static int +sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_flow_spec *spec = &flow->spec; + struct sfc_flow_spec_mae *spec_mae = &spec->mae; + int rc; + + rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error); + if (rc != 0) + return rc; + + rc = sfc_mae_rule_parse_actions(sa, actions, &spec_mae->action_set, + error); + if (rc != 0) + return rc; + + return 0; +} + static int sfc_flow_parse(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -2363,10 +2456,11 @@ sfc_flow_parse(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error) { + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); const struct sfc_flow_ops_by_spec *ops; int rc; - rc = sfc_flow_parse_attr(attr, flow, error); + rc = sfc_flow_parse_attr(sa, attr, flow, error); if (rc != 0) return rc; @@ -2397,8 +2491,14 @@ sfc_flow_zmalloc(struct rte_flow_error *error) } static void -sfc_flow_free(__rte_unused struct sfc_adapter *sa, struct rte_flow *flow) +sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow) { + const struct sfc_flow_ops_by_spec *ops; + + ops = sfc_flow_get_ops_by_spec(flow); + if (ops != NULL && ops->cleanup != NULL) + ops->cleanup(sa, flow); + rte_free(flow); } @@ -2450,6 +2550,36 @@ sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow, return rc; } +static int +sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct sfc_flow_ops_by_spec *ops; + int rc = 0; + + ops = sfc_flow_get_ops_by_spec(flow); + if (ops == NULL) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "No backend to handle this flow"); + return -rte_errno; + } + + if (ops->verify != NULL) { + SFC_ASSERT(sfc_adapter_is_locked(sa)); + rc = ops->verify(sa, flow); + } + + if (rc != 0) { + rte_flow_error_set(error, rc, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to verify flow validity with FW"); + return -rte_errno; + } + + return 0; +} + static int sfc_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -2465,10 +2595,16 @@ sfc_flow_validate(struct rte_eth_dev *dev, if (flow == NULL) return -rte_errno; + sfc_adapter_lock(sa); + rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error); + if (rc == 0) + rc = sfc_flow_verify(sa, flow, error); sfc_flow_free(sa, flow); + sfc_adapter_unlock(sa); + return rc; } @@ -2487,12 +2623,12 @@ sfc_flow_create(struct rte_eth_dev *dev, if (flow == NULL) goto fail_no_mem; + sfc_adapter_lock(sa); + rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error); if (rc != 0) goto fail_bad_value; - sfc_adapter_lock(sa); - TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries); if (sa->state == SFC_ADAPTER_STARTED) { @@ -2632,12 +2768,19 @@ sfc_flow_fini(struct sfc_adapter *sa) void sfc_flow_stop(struct sfc_adapter *sa) { + struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + struct sfc_rss *rss = &sas->rss; struct rte_flow *flow; SFC_ASSERT(sfc_adapter_is_locked(sa)); TAILQ_FOREACH(flow, &sa->flow_list, entries) sfc_flow_remove(sa, flow, NULL); + + if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) { + efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context); + rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT; + } } int