'sfc_mae.c',
'sfc_mae_counter.c',
'sfc_flow.c',
+ 'sfc_flow_rss.c',
'sfc_flow_tunnel.c',
'sfc_dp.c',
'sfc_ef10_rx.c',
efx_intr_fini(sa->nic);
rte_memcpy(rss->key, default_rss_key, sizeof(rss->key));
- rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
+ memset(&rss->dummy_ctx, 0, sizeof(rss->dummy_ctx));
+ rss->dummy_ctx.conf.qid_span = 1;
+ rss->dummy_ctx.dummy = true;
return 0;
if (rc != 0)
goto fail_rss_attach;
+ rc = sfc_flow_rss_attach(sa);
+ if (rc != 0)
+ goto fail_flow_rss_attach;
+
rc = sfc_filter_attach(sa);
if (rc != 0)
goto fail_filter_attach;
sfc_filter_detach(sa);
fail_filter_attach:
+ sfc_flow_rss_detach(sa);
+
+fail_flow_rss_attach:
sfc_rss_detach(sa);
fail_rss_attach:
sfc_mae_detach(sa);
sfc_mae_counter_rxq_detach(sa);
sfc_filter_detach(sa);
+ sfc_flow_rss_detach(sa);
sfc_rss_detach(sa);
sfc_port_detach(sa);
sfc_ev_detach(sa);
#include "sfc_debug.h"
#include "sfc_log.h"
#include "sfc_filter.h"
+#include "sfc_flow_rss.h"
#include "sfc_flow_tunnel.h"
#include "sfc_sriov.h"
#include "sfc_mae.h"
unsigned int tbl[EFX_RSS_TBL_SIZE];
uint8_t key[EFX_RSS_KEY_SIZE];
- uint32_t dummy_rss_context;
+ struct sfc_flow_rss_ctx dummy_ctx;
};
/* Adapter private data shared by primary and secondary processes */
struct sfc_intr intr;
struct sfc_port port;
struct sfc_sw_stats sw_stats;
+ struct sfc_flow_rss flow_rss;
/* Registry of tunnel offload contexts */
struct sfc_flow_tunnel flow_tunnels[SFC_FT_MAX_NTUNNELS];
struct sfc_filter filter;
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
unsigned int efx_hash_types;
- uint32_t contexts[] = {EFX_RSS_CONTEXT_DEFAULT, rss->dummy_rss_context};
unsigned int n_contexts;
unsigned int mode_i = 0;
unsigned int key_i = 0;
+ uint32_t contexts[2];
unsigned int i = 0;
int rc = 0;
- n_contexts = rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT ? 1 : 2;
-
if (sfc_sa2shared(sa)->isolated)
return -ENOTSUP;
if (rc != 0)
goto fail_rx_hf_rte_to_efx;
+ contexts[0] = EFX_RSS_CONTEXT_DEFAULT;
+ contexts[1] = rss->dummy_ctx.nic_handle;
+ n_contexts = (rss->dummy_ctx.nic_handle_refcnt == 0) ? 1 : 2;
+
for (mode_i = 0; mode_i < n_contexts; mode_i++) {
rc = efx_rx_scale_mode_set(sa->nic, contexts[mode_i],
rss->hash_alg, efx_hash_types,
#include "sfc_rx.h"
#include "sfc_filter.h"
#include "sfc_flow.h"
+#include "sfc_flow_rss.h"
#include "sfc_flow_tunnel.h"
#include "sfc_log.h"
#include "sfc_dp_rx.h"
static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
static sfc_flow_insert_cb_t sfc_flow_filter_insert;
static sfc_flow_remove_cb_t sfc_flow_filter_remove;
+static sfc_flow_cleanup_cb_t sfc_flow_cleanup;
static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
.parse = sfc_flow_parse_rte_to_filter,
.verify = NULL,
- .cleanup = NULL,
+ .cleanup = sfc_flow_cleanup,
.insert = sfc_flow_filter_insert,
.remove = sfc_flow_filter_remove,
.query = NULL,
spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
- spec_filter->rss_hash_required = !!(rxq_info->rxq_flags &
- SFC_RXQ_FLAG_RSS_HASH);
+
+ if ((rxq_info->rxq_flags & SFC_RXQ_FLAG_RSS_HASH) != 0) {
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+ struct sfc_rss *ethdev_rss = &sas->rss;
+
+ spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX_RSS;
+ spec_filter->rss_ctx = ðdev_rss->dummy_ctx;
+ }
return 0;
}
const struct rte_flow_action_rss *action_rss,
struct rte_flow *flow)
{
- struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
- struct sfc_rss *rss = &sas->rss;
- sfc_ethdev_qid_t ethdev_qid;
+ struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
+ struct sfc_flow_rss_conf conf;
+ uint16_t sw_qid_min;
struct sfc_rxq *rxq;
- unsigned int rxq_hw_index_min;
- unsigned int rxq_hw_index_max;
- efx_rx_hash_type_t efx_hash_types;
- const uint8_t *rss_key;
- struct sfc_flow_spec *spec = &flow->spec;
- struct sfc_flow_spec_filter *spec_filter = &spec->filter;
- struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
- unsigned int i;
-
- if (action_rss->queue_num == 0)
- return -EINVAL;
-
- ethdev_qid = sfc_sa2shared(sa)->ethdev_rxq_count - 1;
- rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
- rxq_hw_index_min = rxq->hw_index;
- rxq_hw_index_max = 0;
-
- for (i = 0; i < action_rss->queue_num; ++i) {
- ethdev_qid = action_rss->queue[i];
-
- if ((unsigned int)ethdev_qid >=
- sfc_sa2shared(sa)->ethdev_rxq_count)
- return -EINVAL;
-
- rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
-
- if (rxq->hw_index < rxq_hw_index_min)
- rxq_hw_index_min = rxq->hw_index;
-
- if (rxq->hw_index > rxq_hw_index_max)
- rxq_hw_index_max = rxq->hw_index;
- }
+ int rc;
- if (rxq_hw_index_max - rxq_hw_index_min + 1 > EFX_MAXRSS)
- return -EINVAL;
+ spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX_RSS;
- switch (action_rss->func) {
- case RTE_ETH_HASH_FUNCTION_DEFAULT:
- case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
- break;
- default:
- return -EINVAL;
- }
+ rc = sfc_flow_rss_parse_conf(sa, action_rss, &conf, &sw_qid_min);
+ if (rc != 0)
+ return -rc;
- if (action_rss->level)
- return -EINVAL;
+ rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, sw_qid_min);
+ spec_filter->template.efs_dmaq_id = rxq->hw_index;
- /*
- * Dummy RSS action with only one queue and no specific settings
- * for hash types and key does not require dedicated RSS context
- * and may be simplified to single queue action.
- */
- if (action_rss->queue_num == 1 && action_rss->types == 0 &&
- action_rss->key_len == 0) {
- spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
+ spec_filter->rss_ctx = sfc_flow_rss_ctx_reuse(sa, &conf, sw_qid_min,
+ action_rss->queue);
+ if (spec_filter->rss_ctx != NULL)
return 0;
- }
-
- if (action_rss->types) {
- int rc;
-
- rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
- &efx_hash_types);
- if (rc != 0)
- return -rc;
- } else {
- unsigned int i;
-
- efx_hash_types = 0;
- for (i = 0; i < rss->hf_map_nb_entries; ++i)
- efx_hash_types |= rss->hf_map[i].efx;
- }
-
- if (action_rss->key_len) {
- if (action_rss->key_len != sizeof(rss->key))
- return -EINVAL;
-
- rss_key = action_rss->key;
- } else {
- rss_key = rss->key;
- }
-
- spec_filter->rss = B_TRUE;
-
- sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
- sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
- sfc_rss_conf->rss_hash_types = efx_hash_types;
- rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
- for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
- unsigned int nb_queues = action_rss->queue_num;
- struct sfc_rxq *rxq;
-
- ethdev_qid = action_rss->queue[i % nb_queues];
- rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
- sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
- }
+ rc = sfc_flow_rss_ctx_add(sa, &conf, sw_qid_min, action_rss->queue,
+ &spec_filter->rss_ctx);
+ if (rc != 0)
+ return -rc;
return 0;
}
sfc_flow_filter_insert(struct sfc_adapter *sa,
struct rte_flow *flow)
{
- struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
- struct sfc_rss *rss = &sas->rss;
struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
- struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
- uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
- boolean_t create_context;
- unsigned int i;
+ struct sfc_flow_rss_ctx *rss_ctx = spec_filter->rss_ctx;
int rc = 0;
- create_context = spec_filter->rss || (spec_filter->rss_hash_required &&
- rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT);
-
- if (create_context) {
- unsigned int rss_spread;
- unsigned int rss_hash_types;
- uint8_t *rss_key;
-
- if (spec_filter->rss) {
- rss_spread = flow_rss->rxq_hw_index_max -
- flow_rss->rxq_hw_index_min + 1;
- rss_hash_types = flow_rss->rss_hash_types;
- rss_key = flow_rss->rss_key;
- } else {
- /*
- * Initialize dummy RSS context parameters to have
- * valid RSS hash. Use default RSS hash function and
- * key.
- */
- rss_spread = 1;
- rss_hash_types = rss->hash_types;
- rss_key = rss->key;
- }
-
- rc = efx_rx_scale_context_alloc(sa->nic,
- EFX_RX_SCALE_EXCLUSIVE,
- rss_spread,
- &efs_rss_context);
- if (rc != 0)
- goto fail_scale_context_alloc;
-
- rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
- rss->hash_alg,
- rss_hash_types, B_TRUE);
- if (rc != 0)
- goto fail_scale_mode_set;
+ rc = sfc_flow_rss_ctx_program(sa, rss_ctx);
+ if (rc != 0)
+ goto fail_rss_ctx_program;
- rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
- rss_key, sizeof(rss->key));
- if (rc != 0)
- goto fail_scale_key_set;
- } else {
- efs_rss_context = rss->dummy_rss_context;
- }
+ if (rss_ctx != NULL) {
+ unsigned int i;
- if (spec_filter->rss || spec_filter->rss_hash_required) {
/*
* At this point, fully elaborated filter specifications
* have been produced from the template. To make sure that
for (i = 0; i < spec_filter->count; i++) {
efx_filter_spec_t *spec = &spec_filter->filters[i];
- spec->efs_rss_context = efs_rss_context;
- spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
- if (spec_filter->rss)
- spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
+ spec->efs_rss_context = rss_ctx->nic_handle;
}
}
if (rc != 0)
goto fail_filter_insert;
- if (create_context) {
- unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0};
- unsigned int *tbl;
-
- tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl;
-
- /*
- * Scale table is set after filter insertion because
- * the table entries are relative to the base RxQ ID
- * and the latter is submitted to the HW by means of
- * inserting a filter, so by the time of the request
- * the HW knows all the information needed to verify
- * the table entries, and the operation will succeed
- */
- rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
- tbl, RTE_DIM(flow_rss->rss_tbl));
- if (rc != 0)
- goto fail_scale_tbl_set;
-
- /* Remember created dummy RSS context */
- if (!spec_filter->rss)
- rss->dummy_rss_context = efs_rss_context;
- }
-
return 0;
-fail_scale_tbl_set:
- sfc_flow_spec_remove(sa, &flow->spec);
-
fail_filter_insert:
-fail_scale_key_set:
-fail_scale_mode_set:
- if (create_context)
- efx_rx_scale_context_free(sa->nic, efs_rss_context);
+ sfc_flow_rss_ctx_terminate(sa, rss_ctx);
-fail_scale_context_alloc:
+fail_rss_ctx_program:
return rc;
}
if (rc != 0)
return rc;
- if (spec_filter->rss) {
- /*
- * All specifications for a given flow rule have the same RSS
- * context, so that RSS context value is taken from the first
- * filter specification
- */
- efx_filter_spec_t *spec = &spec_filter->filters[0];
-
- rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
- }
+ sfc_flow_rss_ctx_terminate(sa, spec_filter->rss_ctx);
- return rc;
+ return 0;
}
static int
void
sfc_flow_stop(struct sfc_adapter *sa)
{
- struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
- struct sfc_rss *rss = &sas->rss;
struct rte_flow *flow;
SFC_ASSERT(sfc_adapter_is_locked(sa));
TAILQ_FOREACH(flow, &sa->flow_list, entries)
sfc_flow_remove(sa, flow, NULL);
- if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) {
- efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context);
- rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
- }
-
/*
* MAE counter service is not stopped on flow rule remove to avoid
* extra work. Make sure that it is stopped here.
fail_bad_flow:
return rc;
}
+
+static void
+sfc_flow_cleanup(struct sfc_adapter *sa, struct rte_flow *flow)
+{
+ if (flow == NULL)
+ return;
+
+ sfc_flow_rss_ctx_del(sa, flow->spec.filter.rss_ctx);
+}
#include "efx.h"
+#include "sfc_flow_rss.h"
+
#ifdef __cplusplus
extern "C" {
#endif
#define SFC_BUILD_SET_OVERFLOW(_action, _set) \
RTE_BUILD_BUG_ON((_action) >= sizeof(_set) * CHAR_BIT)
-/* RSS configuration storage */
-struct sfc_flow_rss {
- unsigned int rxq_hw_index_min;
- unsigned int rxq_hw_index_max;
- unsigned int rss_hash_types;
- uint8_t rss_key[EFX_RSS_KEY_SIZE];
- unsigned int rss_tbl[EFX_RSS_TBL_SIZE];
-};
-
/* Flow engines supported by the implementation */
enum sfc_flow_spec_type {
SFC_FLOW_SPEC_FILTER = 0,
efx_filter_spec_t filters[SF_FLOW_SPEC_NB_FILTERS_MAX];
/* number of complete specifications */
unsigned int count;
- /* RSS toggle */
- boolean_t rss;
- /* RSS hash toggle */
- boolean_t rss_hash_required;
- /* RSS configuration */
- struct sfc_flow_rss rss_conf;
+ /* RSS context (or NULL) */
+ struct sfc_flow_rss_ctx *rss_ctx;
};
/* Indicates the role of a given flow in tunnel offload */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright(c) 2022 Xilinx, Inc.
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_flow.h>
+#include <rte_tailq.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_flow_rss.h"
+#include "sfc_log.h"
+#include "sfc_rx.h"
+
+int
+sfc_flow_rss_attach(struct sfc_adapter *sa)
+{
+ struct sfc_flow_rss *flow_rss = &sa->flow_rss;
+
+ sfc_log_init(sa, "entry");
+
+ TAILQ_INIT(&flow_rss->ctx_list);
+
+ sfc_log_init(sa, "done");
+
+ return 0;
+}
+
+void
+sfc_flow_rss_detach(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_flow_rss_parse_conf(struct sfc_adapter *sa,
+ const struct rte_flow_action_rss *in,
+ struct sfc_flow_rss_conf *out, uint16_t *sw_qid_minp)
+{
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+ const struct sfc_rss *ethdev_rss = &sas->rss;
+ uint16_t sw_qid_min;
+ uint16_t sw_qid_max;
+ const uint8_t *key;
+ unsigned int i;
+ int rc;
+
+ if (in->level) {
+ /*
+ * The caller demands that RSS hash be computed
+ * within the given encapsulation frame / level.
+ * Per flow control for that is not implemented.
+ */
+ sfc_err(sa, "flow-rss: parse: 'level' must be 0");
+ return EINVAL;
+ }
+
+ if (in->types != 0) {
+ rc = sfc_rx_hf_rte_to_efx(sa, in->types,
+ &out->efx_hash_types);
+ if (rc != 0) {
+ sfc_err(sa, "flow-rss: parse: failed to process 'types'");
+ return rc;
+ }
+ } else {
+ sfc_dbg(sa, "flow-rss: parse: 'types' is 0; proceeding with ethdev setting");
+ out->efx_hash_types = ethdev_rss->hash_types;
+ }
+
+ if (in->key_len != 0) {
+ if (in->key_len != sizeof(out->key)) {
+ sfc_err(sa, "flow-rss: parse: 'key_len' must be either %zu or 0",
+ sizeof(out->key));
+ return EINVAL;
+ }
+
+ if (in->key == NULL) {
+ sfc_err(sa, "flow-rss: parse: 'key' is NULL");
+ return EINVAL;
+ }
+
+ key = in->key;
+ } else {
+ sfc_dbg(sa, "flow-rss: parse: 'key_len' is 0; proceeding with ethdev key");
+ key = ethdev_rss->key;
+ }
+
+ rte_memcpy(out->key, key, sizeof(out->key));
+
+ switch (in->func) {
+ case RTE_ETH_HASH_FUNCTION_DEFAULT:
+ /*
+ * DEFAULT means that conformance to a specific
+ * hash algorithm is a don't care to the caller.
+ * The driver can pick the one it deems optimal.
+ */
+ break;
+ case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
+ if (ethdev_rss->hash_alg != EFX_RX_HASHALG_TOEPLITZ) {
+ sfc_err(sa, "flow-rss: parse: 'func' TOEPLITZ is unavailable; use DEFAULT");
+ return EINVAL;
+ }
+ break;
+ default:
+ sfc_err(sa, "flow-rss: parse: 'func' #%d is unsupported", in->func);
+ return EINVAL;
+ }
+
+ if (in->queue_num == 0) {
+ sfc_err(sa, "flow-rss: parse: 'queue_num' is 0; MIN=1");
+ return EINVAL;
+ }
+
+ if (in->queue_num > EFX_RSS_TBL_SIZE) {
+ sfc_err(sa, "flow-rss: parse: 'queue_num' is too large; MAX=%u",
+ EFX_RSS_TBL_SIZE);
+ return EINVAL;
+ }
+
+ if (in->queue == NULL) {
+ sfc_err(sa, "flow-rss: parse: 'queue' is NULL");
+ return EINVAL;
+ }
+
+ sw_qid_min = sas->ethdev_rxq_count - 1;
+ sw_qid_max = 0;
+
+ out->nb_qid_offsets = 0;
+
+ for (i = 0; i < in->queue_num; ++i) {
+ uint16_t sw_qid = in->queue[i];
+
+ if (sw_qid >= sas->ethdev_rxq_count) {
+ sfc_err(sa, "flow-rss: parse: queue=%u does not exist",
+ sw_qid);
+ return EINVAL;
+ }
+
+ if (sw_qid < sw_qid_min)
+ sw_qid_min = sw_qid;
+
+ if (sw_qid > sw_qid_max)
+ sw_qid_max = sw_qid;
+
+ if (sw_qid != in->queue[0] + i)
+ out->nb_qid_offsets = in->queue_num;
+ }
+
+ out->qid_span = sw_qid_max - sw_qid_min + 1;
+
+ if (out->qid_span > EFX_MAXRSS) {
+ sfc_err(sa, "flow-rss: parse: queue ID span %u is too large; MAX=%u",
+ out->qid_span, EFX_MAXRSS);
+ return EINVAL;
+ }
+
+ if (sw_qid_minp != NULL)
+ *sw_qid_minp = sw_qid_min;
+
+ return 0;
+}
+
+struct sfc_flow_rss_ctx *
+sfc_flow_rss_ctx_reuse(struct sfc_adapter *sa,
+ const struct sfc_flow_rss_conf *conf,
+ uint16_t sw_qid_min, const uint16_t *sw_qids)
+{
+ struct sfc_flow_rss *flow_rss = &sa->flow_rss;
+ static struct sfc_flow_rss_ctx *ctx;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ TAILQ_FOREACH(ctx, &flow_rss->ctx_list, entries) {
+ if (memcmp(&ctx->conf, conf, sizeof(*conf)) != 0)
+ continue;
+
+ if (conf->nb_qid_offsets != 0) {
+ bool match_confirmed = true;
+ unsigned int i;
+
+ for (i = 0; i < conf->nb_qid_offsets; ++i) {
+ uint16_t qid_offset = sw_qids[i] - sw_qid_min;
+
+ if (ctx->qid_offsets[i] != qid_offset) {
+ match_confirmed = false;
+ break;
+ }
+ }
+
+ if (!match_confirmed)
+ continue;
+ }
+
+ sfc_dbg(sa, "flow-rss: reusing ctx=%p", ctx);
+ ++(ctx->refcnt);
+ return ctx;
+ }
+
+ return NULL;
+}
+
+int
+sfc_flow_rss_ctx_add(struct sfc_adapter *sa,
+ const struct sfc_flow_rss_conf *conf, uint16_t sw_qid_min,
+ const uint16_t *sw_qids, struct sfc_flow_rss_ctx **ctxp)
+{
+ struct sfc_flow_rss *flow_rss = &sa->flow_rss;
+ struct sfc_flow_rss_ctx *ctx;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ ctx = rte_zmalloc("sfc_flow_rss_ctx", sizeof(*ctx), 0);
+ if (ctx == NULL)
+ return ENOMEM;
+
+ if (conf->nb_qid_offsets != 0) {
+ unsigned int i;
+
+ ctx->qid_offsets = rte_calloc("sfc_flow_rss_ctx_qid_offsets",
+ conf->nb_qid_offsets,
+ sizeof(*ctx->qid_offsets), 0);
+ if (ctx->qid_offsets == NULL) {
+ rte_free(ctx);
+ return ENOMEM;
+ }
+
+ for (i = 0; i < conf->nb_qid_offsets; ++i)
+ ctx->qid_offsets[i] = sw_qids[i] - sw_qid_min;
+ }
+
+ ctx->conf = *conf;
+ ctx->refcnt = 1;
+
+ TAILQ_INSERT_TAIL(&flow_rss->ctx_list, ctx, entries);
+
+ *ctxp = ctx;
+
+ sfc_dbg(sa, "flow-rss: added ctx=%p", ctx);
+
+ return 0;
+}
+
+void
+sfc_flow_rss_ctx_del(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
+{
+ struct sfc_flow_rss *flow_rss = &sa->flow_rss;
+
+ if (ctx == NULL)
+ return;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ if (ctx->dummy)
+ return;
+
+ SFC_ASSERT(ctx->refcnt != 0);
+
+ --(ctx->refcnt);
+
+ if (ctx->refcnt != 0)
+ return;
+
+ if (ctx->nic_handle_refcnt != 0) {
+ sfc_err(sa, "flow-rss: deleting ctx=%p abandons its NIC resource: handle=0x%08x, refcnt=%u",
+ ctx, ctx->nic_handle, ctx->nic_handle_refcnt);
+ }
+
+ TAILQ_REMOVE(&flow_rss->ctx_list, ctx, entries);
+ rte_free(ctx->qid_offsets);
+ rte_free(ctx);
+
+ sfc_dbg(sa, "flow-rss: deleted ctx=%p", ctx);
+}
+
+static int
+sfc_flow_rss_ctx_program_tbl(struct sfc_adapter *sa,
+ const struct sfc_flow_rss_ctx *ctx)
+{
+ const struct sfc_flow_rss_conf *conf = &ctx->conf;
+ unsigned int *tbl = sa->flow_rss.bounce_tbl;
+ unsigned int i;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ if (conf->nb_qid_offsets != 0) {
+ SFC_ASSERT(ctx->qid_offsets != NULL);
+
+ for (i = 0; i < EFX_RSS_TBL_SIZE; ++i)
+ tbl[i] = ctx->qid_offsets[i % conf->nb_qid_offsets];
+ } else {
+ for (i = 0; i < EFX_RSS_TBL_SIZE; ++i)
+ tbl[i] = i % conf->qid_span;
+ }
+
+ return efx_rx_scale_tbl_set(sa->nic, ctx->nic_handle,
+ tbl, EFX_RSS_TBL_SIZE);
+}
+
+int
+sfc_flow_rss_ctx_program(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
+{
+ efx_rx_scale_context_type_t ctx_type = EFX_RX_SCALE_EXCLUSIVE;
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+ struct sfc_rss *ethdev_rss = &sas->rss;
+ struct sfc_flow_rss_conf *conf;
+ bool allocation_done = B_FALSE;
+ int rc;
+
+ if (ctx == NULL)
+ return 0;
+
+ conf = &ctx->conf;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ if (ctx->nic_handle_refcnt == 0) {
+ rc = efx_rx_scale_context_alloc(sa->nic, ctx_type,
+ conf->qid_span,
+ &ctx->nic_handle);
+ if (rc != 0) {
+ sfc_err(sa, "flow-rss: failed to allocate NIC resource for ctx=%p: type=%d, qid_span=%u, rc=%d",
+ ctx, ctx_type, conf->qid_span, rc);
+ goto fail;
+ }
+
+ sfc_dbg(sa, "flow-rss: allocated NIC resource for ctx=%p: type=%d, qid_span=%u; handle=0x%08x",
+ ctx, ctx_type, conf->qid_span,
+ ctx->nic_handle);
+
+ ++(ctx->nic_handle_refcnt);
+ allocation_done = B_TRUE;
+ } else {
+ ++(ctx->nic_handle_refcnt);
+ return 0;
+ }
+
+ rc = efx_rx_scale_mode_set(sa->nic, ctx->nic_handle,
+ ethdev_rss->hash_alg,
+ (ctx->dummy) ? ethdev_rss->hash_types :
+ conf->efx_hash_types,
+ B_TRUE);
+ if (rc != 0) {
+ sfc_err(sa, "flow-rss: failed to configure hash for ctx=%p: efx_hash_alg=%d, efx_hash_types=0x%08x; rc=%d",
+ ctx, ethdev_rss->hash_alg,
+ (ctx->dummy) ? ethdev_rss->hash_types :
+ conf->efx_hash_types,
+ rc);
+ goto fail;
+ }
+
+ rc = efx_rx_scale_key_set(sa->nic, ctx->nic_handle,
+ (ctx->dummy) ? ethdev_rss->key : conf->key,
+ RTE_DIM(conf->key));
+ if (rc != 0) {
+ sfc_err(sa, "flow-rss: failed to set key for ctx=%p; rc=%d",
+ ctx, rc);
+ goto fail;
+ }
+
+ rc = sfc_flow_rss_ctx_program_tbl(sa, ctx);
+ if (rc != 0) {
+ sfc_err(sa, "flow-rss: failed to program table for ctx=%p; rc=%d",
+ ctx, rc);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ if (allocation_done)
+ sfc_flow_rss_ctx_terminate(sa, ctx);
+
+ return rc;
+}
+
+void
+sfc_flow_rss_ctx_terminate(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
+{
+ if (ctx == NULL)
+ return;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ SFC_ASSERT(ctx->nic_handle_refcnt != 0);
+ --(ctx->nic_handle_refcnt);
+
+ if (ctx->nic_handle_refcnt == 0) {
+ int rc;
+
+ rc = efx_rx_scale_context_free(sa->nic, ctx->nic_handle);
+ if (rc != 0) {
+ sfc_err(sa, "flow-rss: failed to release NIC resource for ctx=%p: handle=0x%08x; rc=%d",
+ ctx, ctx->nic_handle, rc);
+
+ sfc_warn(sa, "flow-rss: proceeding despite the prior error");
+ }
+
+ sfc_dbg(sa, "flow-rss: released NIC resource for ctx=%p; rc=%d",
+ ctx, rc);
+ }
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright(c) 2022 Xilinx, Inc.
+ */
+
+#ifndef _SFC_FLOW_RSS_H
+#define _SFC_FLOW_RSS_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_flow.h>
+#include <rte_tailq.h>
+
+#include "efx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct sfc_flow_rss_conf {
+ uint8_t key[EFX_RSS_KEY_SIZE];
+ efx_rx_hash_type_t efx_hash_types;
+ unsigned int nb_qid_offsets;
+ unsigned int qid_span;
+};
+
+struct sfc_flow_rss_ctx {
+ TAILQ_ENTRY(sfc_flow_rss_ctx) entries;
+
+ unsigned int refcnt;
+ bool dummy;
+
+ unsigned int nic_handle_refcnt;
+ uint32_t nic_handle;
+
+ struct sfc_flow_rss_conf conf;
+
+ uint16_t *qid_offsets;
+};
+
+TAILQ_HEAD(sfc_flow_rss_ctx_list, sfc_flow_rss_ctx);
+
+struct sfc_flow_rss {
+ unsigned int bounce_tbl[EFX_RSS_TBL_SIZE];
+
+ struct sfc_flow_rss_ctx_list ctx_list;
+};
+
+struct sfc_adapter;
+
+int sfc_flow_rss_attach(struct sfc_adapter *sa);
+
+void sfc_flow_rss_detach(struct sfc_adapter *sa);
+
+int sfc_flow_rss_parse_conf(struct sfc_adapter *sa,
+ const struct rte_flow_action_rss *in,
+ struct sfc_flow_rss_conf *out,
+ uint16_t *sw_qid_minp);
+
+struct sfc_flow_rss_ctx *sfc_flow_rss_ctx_reuse(struct sfc_adapter *sa,
+ const struct sfc_flow_rss_conf *conf,
+ uint16_t sw_qid_min, const uint16_t *sw_qids);
+
+int sfc_flow_rss_ctx_add(struct sfc_adapter *sa,
+ const struct sfc_flow_rss_conf *conf,
+ uint16_t sw_qid_min, const uint16_t *sw_qids,
+ struct sfc_flow_rss_ctx **ctxp);
+
+void sfc_flow_rss_ctx_del(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx);
+
+int sfc_flow_rss_ctx_program(struct sfc_adapter *sa,
+ struct sfc_flow_rss_ctx *ctx);
+
+void sfc_flow_rss_ctx_terminate(struct sfc_adapter *sa,
+ struct sfc_flow_rss_ctx *ctx);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_FLOW_RSS_H */