1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2022 Xilinx, Inc.
9 #include <rte_common.h>
11 #include <rte_tailq.h>
16 #include "sfc_debug.h"
17 #include "sfc_flow_rss.h"
22 sfc_flow_rss_attach(struct sfc_adapter *sa)
24 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
25 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
27 sfc_log_init(sa, "entry");
29 flow_rss->qid_span_max = encp->enc_rx_scale_indirection_max_nqueues;
31 TAILQ_INIT(&flow_rss->ctx_list);
33 sfc_log_init(sa, "done");
39 sfc_flow_rss_detach(struct sfc_adapter *sa)
41 sfc_log_init(sa, "entry");
43 sfc_log_init(sa, "done");
47 sfc_flow_rss_parse_conf(struct sfc_adapter *sa,
48 const struct rte_flow_action_rss *in,
49 struct sfc_flow_rss_conf *out, uint16_t *sw_qid_minp)
51 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
52 const struct sfc_flow_rss *flow_rss = &sa->flow_rss;
53 const struct sfc_rss *ethdev_rss = &sas->rss;
62 * The caller demands that RSS hash be computed
63 * within the given encapsulation frame / level.
64 * Per flow control for that is not implemented.
66 sfc_err(sa, "flow-rss: parse: 'level' must be 0");
71 rc = sfc_rx_hf_rte_to_efx(sa, in->types,
72 &out->efx_hash_types);
74 sfc_err(sa, "flow-rss: parse: failed to process 'types'");
78 sfc_dbg(sa, "flow-rss: parse: 'types' is 0; proceeding with ethdev setting");
79 out->efx_hash_types = ethdev_rss->hash_types;
82 if (in->key_len != 0) {
83 if (in->key_len != sizeof(out->key)) {
84 sfc_err(sa, "flow-rss: parse: 'key_len' must be either %zu or 0",
89 if (in->key == NULL) {
90 sfc_err(sa, "flow-rss: parse: 'key' is NULL");
96 sfc_dbg(sa, "flow-rss: parse: 'key_len' is 0; proceeding with ethdev key");
97 key = ethdev_rss->key;
100 rte_memcpy(out->key, key, sizeof(out->key));
103 case RTE_ETH_HASH_FUNCTION_DEFAULT:
105 * DEFAULT means that conformance to a specific
106 * hash algorithm is a don't care to the caller.
107 * The driver can pick the one it deems optimal.
110 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
111 if (ethdev_rss->hash_alg != EFX_RX_HASHALG_TOEPLITZ) {
112 sfc_err(sa, "flow-rss: parse: 'func' TOEPLITZ is unavailable; use DEFAULT");
117 sfc_err(sa, "flow-rss: parse: 'func' #%d is unsupported", in->func);
121 if (in->queue_num == 0) {
122 sfc_err(sa, "flow-rss: parse: 'queue_num' is 0; MIN=1");
126 if (in->queue_num > EFX_RSS_TBL_SIZE) {
127 sfc_err(sa, "flow-rss: parse: 'queue_num' is too large; MAX=%u",
132 if (in->queue == NULL) {
133 sfc_err(sa, "flow-rss: parse: 'queue' is NULL");
137 sw_qid_min = sas->ethdev_rxq_count - 1;
140 out->nb_qid_offsets = 0;
142 for (i = 0; i < in->queue_num; ++i) {
143 uint16_t sw_qid = in->queue[i];
145 if (sw_qid >= sas->ethdev_rxq_count) {
146 sfc_err(sa, "flow-rss: parse: queue=%u does not exist",
151 if (sw_qid < sw_qid_min)
154 if (sw_qid > sw_qid_max)
157 if (sw_qid != in->queue[0] + i)
158 out->nb_qid_offsets = in->queue_num;
161 out->qid_span = sw_qid_max - sw_qid_min + 1;
163 if (out->qid_span > flow_rss->qid_span_max) {
164 sfc_err(sa, "flow-rss: parse: queue ID span %u is too large; MAX=%u",
165 out->qid_span, flow_rss->qid_span_max);
169 if (sw_qid_minp != NULL)
170 *sw_qid_minp = sw_qid_min;
175 struct sfc_flow_rss_ctx *
176 sfc_flow_rss_ctx_reuse(struct sfc_adapter *sa,
177 const struct sfc_flow_rss_conf *conf,
178 uint16_t sw_qid_min, const uint16_t *sw_qids)
180 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
181 static struct sfc_flow_rss_ctx *ctx;
183 SFC_ASSERT(sfc_adapter_is_locked(sa));
185 TAILQ_FOREACH(ctx, &flow_rss->ctx_list, entries) {
186 if (memcmp(&ctx->conf, conf, sizeof(*conf)) != 0)
189 if (conf->nb_qid_offsets != 0) {
190 bool match_confirmed = true;
193 for (i = 0; i < conf->nb_qid_offsets; ++i) {
194 uint16_t qid_offset = sw_qids[i] - sw_qid_min;
196 if (ctx->qid_offsets[i] != qid_offset) {
197 match_confirmed = false;
202 if (!match_confirmed)
206 sfc_dbg(sa, "flow-rss: reusing ctx=%p", ctx);
215 sfc_flow_rss_ctx_add(struct sfc_adapter *sa,
216 const struct sfc_flow_rss_conf *conf, uint16_t sw_qid_min,
217 const uint16_t *sw_qids, struct sfc_flow_rss_ctx **ctxp)
219 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
220 struct sfc_flow_rss_ctx *ctx;
222 SFC_ASSERT(sfc_adapter_is_locked(sa));
224 ctx = rte_zmalloc("sfc_flow_rss_ctx", sizeof(*ctx), 0);
228 if (conf->nb_qid_offsets != 0) {
231 ctx->qid_offsets = rte_calloc("sfc_flow_rss_ctx_qid_offsets",
232 conf->nb_qid_offsets,
233 sizeof(*ctx->qid_offsets), 0);
234 if (ctx->qid_offsets == NULL) {
239 for (i = 0; i < conf->nb_qid_offsets; ++i)
240 ctx->qid_offsets[i] = sw_qids[i] - sw_qid_min;
246 TAILQ_INSERT_TAIL(&flow_rss->ctx_list, ctx, entries);
250 sfc_dbg(sa, "flow-rss: added ctx=%p", ctx);
256 sfc_flow_rss_ctx_del(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
258 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
263 SFC_ASSERT(sfc_adapter_is_locked(sa));
268 SFC_ASSERT(ctx->refcnt != 0);
272 if (ctx->refcnt != 0)
275 if (ctx->nic_handle_refcnt != 0) {
276 sfc_err(sa, "flow-rss: deleting ctx=%p abandons its NIC resource: handle=0x%08x, refcnt=%u",
277 ctx, ctx->nic_handle, ctx->nic_handle_refcnt);
280 TAILQ_REMOVE(&flow_rss->ctx_list, ctx, entries);
281 rte_free(ctx->qid_offsets);
284 sfc_dbg(sa, "flow-rss: deleted ctx=%p", ctx);
288 sfc_flow_rss_ctx_program_tbl(struct sfc_adapter *sa,
289 const struct sfc_flow_rss_ctx *ctx)
291 const struct sfc_flow_rss_conf *conf = &ctx->conf;
292 unsigned int *tbl = sa->flow_rss.bounce_tbl;
295 SFC_ASSERT(sfc_adapter_is_locked(sa));
297 if (conf->nb_qid_offsets != 0) {
298 SFC_ASSERT(ctx->qid_offsets != NULL);
300 for (i = 0; i < EFX_RSS_TBL_SIZE; ++i)
301 tbl[i] = ctx->qid_offsets[i % conf->nb_qid_offsets];
303 for (i = 0; i < EFX_RSS_TBL_SIZE; ++i)
304 tbl[i] = i % conf->qid_span;
307 return efx_rx_scale_tbl_set(sa->nic, ctx->nic_handle,
308 tbl, EFX_RSS_TBL_SIZE);
312 sfc_flow_rss_ctx_program(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
314 efx_rx_scale_context_type_t ctx_type = EFX_RX_SCALE_EXCLUSIVE;
315 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
316 struct sfc_rss *ethdev_rss = &sas->rss;
317 struct sfc_flow_rss_conf *conf;
318 bool allocation_done = B_FALSE;
326 SFC_ASSERT(sfc_adapter_is_locked(sa));
328 if (ctx->nic_handle_refcnt == 0) {
329 rc = efx_rx_scale_context_alloc(sa->nic, ctx_type,
333 sfc_err(sa, "flow-rss: failed to allocate NIC resource for ctx=%p: type=%d, qid_span=%u, rc=%d",
334 ctx, ctx_type, conf->qid_span, rc);
338 sfc_dbg(sa, "flow-rss: allocated NIC resource for ctx=%p: type=%d, qid_span=%u; handle=0x%08x",
339 ctx, ctx_type, conf->qid_span,
342 ++(ctx->nic_handle_refcnt);
343 allocation_done = B_TRUE;
345 ++(ctx->nic_handle_refcnt);
349 rc = efx_rx_scale_mode_set(sa->nic, ctx->nic_handle,
350 ethdev_rss->hash_alg,
351 (ctx->dummy) ? ethdev_rss->hash_types :
352 conf->efx_hash_types,
355 sfc_err(sa, "flow-rss: failed to configure hash for ctx=%p: efx_hash_alg=%d, efx_hash_types=0x%08x; rc=%d",
356 ctx, ethdev_rss->hash_alg,
357 (ctx->dummy) ? ethdev_rss->hash_types :
358 conf->efx_hash_types,
363 rc = efx_rx_scale_key_set(sa->nic, ctx->nic_handle,
364 (ctx->dummy) ? ethdev_rss->key : conf->key,
367 sfc_err(sa, "flow-rss: failed to set key for ctx=%p; rc=%d",
372 rc = sfc_flow_rss_ctx_program_tbl(sa, ctx);
374 sfc_err(sa, "flow-rss: failed to program table for ctx=%p; rc=%d",
383 sfc_flow_rss_ctx_terminate(sa, ctx);
389 sfc_flow_rss_ctx_terminate(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
394 SFC_ASSERT(sfc_adapter_is_locked(sa));
396 SFC_ASSERT(ctx->nic_handle_refcnt != 0);
397 --(ctx->nic_handle_refcnt);
399 if (ctx->nic_handle_refcnt == 0) {
402 rc = efx_rx_scale_context_free(sa->nic, ctx->nic_handle);
404 sfc_err(sa, "flow-rss: failed to release NIC resource for ctx=%p: handle=0x%08x; rc=%d",
405 ctx, ctx->nic_handle, rc);
407 sfc_warn(sa, "flow-rss: proceeding despite the prior error");
410 sfc_dbg(sa, "flow-rss: released NIC resource for ctx=%p; rc=%d",