1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2022 Xilinx, Inc.
9 #include <rte_common.h>
11 #include <rte_tailq.h>
16 #include "sfc_debug.h"
17 #include "sfc_flow_rss.h"
22 sfc_flow_rss_attach(struct sfc_adapter *sa)
24 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
25 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
28 sfc_log_init(sa, "entry");
30 flow_rss->qid_span_max = encp->enc_rx_scale_indirection_max_nqueues;
31 flow_rss->nb_tbl_entries_min = encp->enc_rx_scale_tbl_min_nentries;
32 flow_rss->nb_tbl_entries_max = encp->enc_rx_scale_tbl_max_nentries;
34 sfc_log_init(sa, "allocate the bounce buffer for indirection entries");
35 flow_rss->bounce_tbl = rte_calloc("sfc_flow_rss_bounce_tbl",
36 flow_rss->nb_tbl_entries_max,
37 sizeof(*flow_rss->bounce_tbl), 0);
38 if (flow_rss->bounce_tbl == NULL) {
43 TAILQ_INIT(&flow_rss->ctx_list);
45 sfc_log_init(sa, "done");
50 sfc_log_init(sa, "failed %d", rc);
56 sfc_flow_rss_detach(struct sfc_adapter *sa)
58 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
60 sfc_log_init(sa, "entry");
62 sfc_log_init(sa, "free the bounce buffer for indirection entries");
63 rte_free(flow_rss->bounce_tbl);
65 sfc_log_init(sa, "done");
69 sfc_flow_rss_parse_conf(struct sfc_adapter *sa,
70 const struct rte_flow_action_rss *in,
71 struct sfc_flow_rss_conf *out, uint16_t *sw_qid_minp)
73 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
74 const struct sfc_flow_rss *flow_rss = &sa->flow_rss;
75 const struct sfc_rss *ethdev_rss = &sas->rss;
84 * The caller demands that RSS hash be computed
85 * within the given encapsulation frame / level.
86 * Per flow control for that is not implemented.
88 sfc_err(sa, "flow-rss: parse: 'level' must be 0");
93 rc = sfc_rx_hf_rte_to_efx(sa, in->types,
94 &out->efx_hash_types);
96 sfc_err(sa, "flow-rss: parse: failed to process 'types'");
100 sfc_dbg(sa, "flow-rss: parse: 'types' is 0; proceeding with ethdev setting");
101 out->efx_hash_types = ethdev_rss->hash_types;
104 if (in->key_len != 0) {
105 if (in->key_len != sizeof(out->key)) {
106 sfc_err(sa, "flow-rss: parse: 'key_len' must be either %zu or 0",
111 if (in->key == NULL) {
112 sfc_err(sa, "flow-rss: parse: 'key' is NULL");
118 sfc_dbg(sa, "flow-rss: parse: 'key_len' is 0; proceeding with ethdev key");
119 key = ethdev_rss->key;
122 rte_memcpy(out->key, key, sizeof(out->key));
125 case RTE_ETH_HASH_FUNCTION_DEFAULT:
127 * DEFAULT means that conformance to a specific
128 * hash algorithm is a don't care to the caller.
129 * The driver can pick the one it deems optimal.
132 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
133 if (ethdev_rss->hash_alg != EFX_RX_HASHALG_TOEPLITZ) {
134 sfc_err(sa, "flow-rss: parse: 'func' TOEPLITZ is unavailable; use DEFAULT");
139 sfc_err(sa, "flow-rss: parse: 'func' #%d is unsupported", in->func);
143 out->rte_hash_function = in->func;
145 if (in->queue_num == 0) {
146 sfc_err(sa, "flow-rss: parse: 'queue_num' is 0; MIN=1");
150 if (in->queue_num > flow_rss->nb_tbl_entries_max) {
151 sfc_err(sa, "flow-rss: parse: 'queue_num' is too large; MAX=%u",
152 flow_rss->nb_tbl_entries_max);
156 if (in->queue == NULL) {
157 sfc_err(sa, "flow-rss: parse: 'queue' is NULL");
161 sw_qid_min = sas->ethdev_rxq_count - 1;
164 out->nb_qid_offsets = 0;
166 for (i = 0; i < in->queue_num; ++i) {
167 uint16_t sw_qid = in->queue[i];
169 if (sw_qid >= sas->ethdev_rxq_count) {
170 sfc_err(sa, "flow-rss: parse: queue=%u does not exist",
175 if (sw_qid < sw_qid_min)
178 if (sw_qid > sw_qid_max)
181 if (sw_qid != in->queue[0] + i)
182 out->nb_qid_offsets = in->queue_num;
185 out->qid_span = sw_qid_max - sw_qid_min + 1;
187 if (out->qid_span > flow_rss->qid_span_max) {
188 sfc_err(sa, "flow-rss: parse: queue ID span %u is too large; MAX=%u",
189 out->qid_span, flow_rss->qid_span_max);
193 if (sw_qid_minp != NULL)
194 *sw_qid_minp = sw_qid_min;
199 struct sfc_flow_rss_ctx *
200 sfc_flow_rss_ctx_reuse(struct sfc_adapter *sa,
201 const struct sfc_flow_rss_conf *conf,
202 uint16_t sw_qid_min, const uint16_t *sw_qids)
204 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
205 static struct sfc_flow_rss_ctx *ctx;
207 SFC_ASSERT(sfc_adapter_is_locked(sa));
209 TAILQ_FOREACH(ctx, &flow_rss->ctx_list, entries) {
210 if (memcmp(&ctx->conf, conf, sizeof(*conf)) != 0)
213 if (conf->nb_qid_offsets != 0) {
214 bool match_confirmed = true;
217 for (i = 0; i < conf->nb_qid_offsets; ++i) {
218 uint16_t qid_offset = sw_qids[i] - sw_qid_min;
220 if (ctx->qid_offsets[i] != qid_offset) {
221 match_confirmed = false;
226 if (!match_confirmed)
230 sfc_dbg(sa, "flow-rss: reusing ctx=%p", ctx);
239 sfc_flow_rss_ctx_add(struct sfc_adapter *sa,
240 const struct sfc_flow_rss_conf *conf, uint16_t sw_qid_min,
241 const uint16_t *sw_qids, struct sfc_flow_rss_ctx **ctxp)
243 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
244 struct sfc_flow_rss_ctx *ctx;
246 SFC_ASSERT(sfc_adapter_is_locked(sa));
248 ctx = rte_zmalloc("sfc_flow_rss_ctx", sizeof(*ctx), 0);
252 if (conf->nb_qid_offsets != 0) {
255 ctx->qid_offsets = rte_calloc("sfc_flow_rss_ctx_qid_offsets",
256 conf->nb_qid_offsets,
257 sizeof(*ctx->qid_offsets), 0);
258 if (ctx->qid_offsets == NULL) {
263 for (i = 0; i < conf->nb_qid_offsets; ++i)
264 ctx->qid_offsets[i] = sw_qids[i] - sw_qid_min;
270 TAILQ_INSERT_TAIL(&flow_rss->ctx_list, ctx, entries);
274 sfc_dbg(sa, "flow-rss: added ctx=%p", ctx);
280 sfc_flow_rss_ctx_del(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
282 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
287 SFC_ASSERT(sfc_adapter_is_locked(sa));
292 SFC_ASSERT(ctx->refcnt != 0);
296 if (ctx->refcnt != 0)
299 if (ctx->nic_handle_refcnt != 0) {
300 sfc_err(sa, "flow-rss: deleting ctx=%p abandons its NIC resource: handle=0x%08x, refcnt=%u",
301 ctx, ctx->nic_handle, ctx->nic_handle_refcnt);
304 TAILQ_REMOVE(&flow_rss->ctx_list, ctx, entries);
305 rte_free(ctx->qid_offsets);
308 sfc_dbg(sa, "flow-rss: deleted ctx=%p", ctx);
312 sfc_flow_rss_ctx_program_tbl(struct sfc_adapter *sa,
313 unsigned int nb_tbl_entries,
314 const struct sfc_flow_rss_ctx *ctx)
316 const struct sfc_flow_rss_conf *conf = &ctx->conf;
317 unsigned int *tbl = sa->flow_rss.bounce_tbl;
320 SFC_ASSERT(sfc_adapter_is_locked(sa));
322 if (nb_tbl_entries == 0)
325 if (conf->nb_qid_offsets != 0) {
326 SFC_ASSERT(ctx->qid_offsets != NULL);
328 for (i = 0; i < nb_tbl_entries; ++i)
329 tbl[i] = ctx->qid_offsets[i % conf->nb_qid_offsets];
331 for (i = 0; i < nb_tbl_entries; ++i)
332 tbl[i] = i % conf->qid_span;
335 return efx_rx_scale_tbl_set(sa->nic, ctx->nic_handle,
336 tbl, nb_tbl_entries);
340 sfc_flow_rss_ctx_program(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
342 efx_rx_scale_context_type_t ctx_type = EFX_RX_SCALE_EXCLUSIVE;
343 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
344 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
345 const struct sfc_flow_rss *flow_rss = &sa->flow_rss;
346 struct sfc_rss *ethdev_rss = &sas->rss;
347 struct sfc_flow_rss_conf *conf;
348 bool allocation_done = B_FALSE;
349 unsigned int nb_qid_offsets;
350 unsigned int nb_tbl_entries;
358 SFC_ASSERT(sfc_adapter_is_locked(sa));
360 if (conf->nb_qid_offsets != 0)
361 nb_qid_offsets = conf->nb_qid_offsets;
363 nb_qid_offsets = conf->qid_span;
365 if (!RTE_IS_POWER_OF_2(nb_qid_offsets)) {
367 * Most likely, it pays to enlarge the indirection
368 * table to facilitate better distribution quality.
370 nb_qid_offsets = flow_rss->nb_tbl_entries_max;
373 nb_tbl_entries = RTE_MAX(flow_rss->nb_tbl_entries_min, nb_qid_offsets);
375 if (conf->rte_hash_function == RTE_ETH_HASH_FUNCTION_DEFAULT &&
376 conf->nb_qid_offsets == 0 &&
377 conf->qid_span <= encp->enc_rx_scale_even_spread_max_nqueues) {
379 * Conformance to a specific hash algorithm is a don't care to
380 * the user. The queue array is contiguous and ascending. That
381 * means that the even spread context may be requested here in
382 * order to avoid wasting precious indirection table resources.
384 ctx_type = EFX_RX_SCALE_EVEN_SPREAD;
388 if (ctx->nic_handle_refcnt == 0) {
389 rc = efx_rx_scale_context_alloc_v2(sa->nic, ctx_type,
394 sfc_err(sa, "flow-rss: failed to allocate NIC resource for ctx=%p: type=%d, qid_span=%u, nb_tbl_entries=%u; rc=%d",
395 ctx, ctx_type, conf->qid_span, nb_tbl_entries, rc);
399 sfc_dbg(sa, "flow-rss: allocated NIC resource for ctx=%p: type=%d, qid_span=%u, nb_tbl_entries=%u; handle=0x%08x",
400 ctx, ctx_type, conf->qid_span, nb_tbl_entries,
403 ++(ctx->nic_handle_refcnt);
404 allocation_done = B_TRUE;
406 ++(ctx->nic_handle_refcnt);
410 rc = efx_rx_scale_mode_set(sa->nic, ctx->nic_handle,
411 ethdev_rss->hash_alg,
412 (ctx->dummy) ? ethdev_rss->hash_types :
413 conf->efx_hash_types,
416 sfc_err(sa, "flow-rss: failed to configure hash for ctx=%p: efx_hash_alg=%d, efx_hash_types=0x%08x; rc=%d",
417 ctx, ethdev_rss->hash_alg,
418 (ctx->dummy) ? ethdev_rss->hash_types :
419 conf->efx_hash_types,
424 rc = efx_rx_scale_key_set(sa->nic, ctx->nic_handle,
425 (ctx->dummy) ? ethdev_rss->key : conf->key,
428 sfc_err(sa, "flow-rss: failed to set key for ctx=%p; rc=%d",
433 rc = sfc_flow_rss_ctx_program_tbl(sa, nb_tbl_entries, ctx);
435 sfc_err(sa, "flow-rss: failed to program table for ctx=%p: nb_tbl_entries=%u; rc=%d",
436 ctx, nb_tbl_entries, rc);
444 sfc_flow_rss_ctx_terminate(sa, ctx);
450 sfc_flow_rss_ctx_terminate(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
455 SFC_ASSERT(sfc_adapter_is_locked(sa));
457 SFC_ASSERT(ctx->nic_handle_refcnt != 0);
458 --(ctx->nic_handle_refcnt);
460 if (ctx->nic_handle_refcnt == 0) {
463 rc = efx_rx_scale_context_free(sa->nic, ctx->nic_handle);
465 sfc_err(sa, "flow-rss: failed to release NIC resource for ctx=%p: handle=0x%08x; rc=%d",
466 ctx, ctx->nic_handle, rc);
468 sfc_warn(sa, "flow-rss: proceeding despite the prior error");
471 sfc_dbg(sa, "flow-rss: released NIC resource for ctx=%p; rc=%d",