1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2022 Xilinx, Inc.
9 #include <rte_common.h>
11 #include <rte_tailq.h>
16 #include "sfc_debug.h"
17 #include "sfc_flow_rss.h"
22 sfc_flow_rss_attach(struct sfc_adapter *sa)
24 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
25 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
28 sfc_log_init(sa, "entry");
30 flow_rss->qid_span_max = encp->enc_rx_scale_indirection_max_nqueues;
31 flow_rss->nb_tbl_entries_min = encp->enc_rx_scale_tbl_min_nentries;
32 flow_rss->nb_tbl_entries_max = encp->enc_rx_scale_tbl_max_nentries;
34 sfc_log_init(sa, "allocate the bounce buffer for indirection entries");
35 flow_rss->bounce_tbl = rte_calloc("sfc_flow_rss_bounce_tbl",
36 flow_rss->nb_tbl_entries_max,
37 sizeof(*flow_rss->bounce_tbl), 0);
38 if (flow_rss->bounce_tbl == NULL) {
43 TAILQ_INIT(&flow_rss->ctx_list);
45 sfc_log_init(sa, "done");
50 sfc_log_init(sa, "failed %d", rc);
56 sfc_flow_rss_detach(struct sfc_adapter *sa)
58 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
60 sfc_log_init(sa, "entry");
62 sfc_log_init(sa, "free the bounce buffer for indirection entries");
63 rte_free(flow_rss->bounce_tbl);
65 sfc_log_init(sa, "done");
69 sfc_flow_rss_parse_conf(struct sfc_adapter *sa,
70 const struct rte_flow_action_rss *in,
71 struct sfc_flow_rss_conf *out, uint16_t *sw_qid_minp)
73 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
74 const struct sfc_flow_rss *flow_rss = &sa->flow_rss;
75 const struct sfc_rss *ethdev_rss = &sas->rss;
84 * The caller demands that RSS hash be computed
85 * within the given encapsulation frame / level.
86 * Per flow control for that is not implemented.
88 sfc_err(sa, "flow-rss: parse: 'level' must be 0");
93 rc = sfc_rx_hf_rte_to_efx(sa, in->types,
94 &out->efx_hash_types);
96 sfc_err(sa, "flow-rss: parse: failed to process 'types'");
100 sfc_dbg(sa, "flow-rss: parse: 'types' is 0; proceeding with ethdev setting");
101 out->efx_hash_types = ethdev_rss->hash_types;
104 if (in->key_len != 0) {
105 if (in->key_len != sizeof(out->key)) {
106 sfc_err(sa, "flow-rss: parse: 'key_len' must be either %zu or 0",
111 if (in->key == NULL) {
112 sfc_err(sa, "flow-rss: parse: 'key' is NULL");
118 sfc_dbg(sa, "flow-rss: parse: 'key_len' is 0; proceeding with ethdev key");
119 key = ethdev_rss->key;
122 rte_memcpy(out->key, key, sizeof(out->key));
125 case RTE_ETH_HASH_FUNCTION_DEFAULT:
127 * DEFAULT means that conformance to a specific
128 * hash algorithm is a don't care to the caller.
129 * The driver can pick the one it deems optimal.
132 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
133 if (ethdev_rss->hash_alg != EFX_RX_HASHALG_TOEPLITZ) {
134 sfc_err(sa, "flow-rss: parse: 'func' TOEPLITZ is unavailable; use DEFAULT");
139 sfc_err(sa, "flow-rss: parse: 'func' #%d is unsupported", in->func);
143 if (in->queue_num == 0) {
144 sfc_err(sa, "flow-rss: parse: 'queue_num' is 0; MIN=1");
148 if (in->queue_num > flow_rss->nb_tbl_entries_max) {
149 sfc_err(sa, "flow-rss: parse: 'queue_num' is too large; MAX=%u",
150 flow_rss->nb_tbl_entries_max);
154 if (in->queue == NULL) {
155 sfc_err(sa, "flow-rss: parse: 'queue' is NULL");
159 sw_qid_min = sas->ethdev_rxq_count - 1;
162 out->nb_qid_offsets = 0;
164 for (i = 0; i < in->queue_num; ++i) {
165 uint16_t sw_qid = in->queue[i];
167 if (sw_qid >= sas->ethdev_rxq_count) {
168 sfc_err(sa, "flow-rss: parse: queue=%u does not exist",
173 if (sw_qid < sw_qid_min)
176 if (sw_qid > sw_qid_max)
179 if (sw_qid != in->queue[0] + i)
180 out->nb_qid_offsets = in->queue_num;
183 out->qid_span = sw_qid_max - sw_qid_min + 1;
185 if (out->qid_span > flow_rss->qid_span_max) {
186 sfc_err(sa, "flow-rss: parse: queue ID span %u is too large; MAX=%u",
187 out->qid_span, flow_rss->qid_span_max);
191 if (sw_qid_minp != NULL)
192 *sw_qid_minp = sw_qid_min;
197 struct sfc_flow_rss_ctx *
198 sfc_flow_rss_ctx_reuse(struct sfc_adapter *sa,
199 const struct sfc_flow_rss_conf *conf,
200 uint16_t sw_qid_min, const uint16_t *sw_qids)
202 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
203 static struct sfc_flow_rss_ctx *ctx;
205 SFC_ASSERT(sfc_adapter_is_locked(sa));
207 TAILQ_FOREACH(ctx, &flow_rss->ctx_list, entries) {
208 if (memcmp(&ctx->conf, conf, sizeof(*conf)) != 0)
211 if (conf->nb_qid_offsets != 0) {
212 bool match_confirmed = true;
215 for (i = 0; i < conf->nb_qid_offsets; ++i) {
216 uint16_t qid_offset = sw_qids[i] - sw_qid_min;
218 if (ctx->qid_offsets[i] != qid_offset) {
219 match_confirmed = false;
224 if (!match_confirmed)
228 sfc_dbg(sa, "flow-rss: reusing ctx=%p", ctx);
237 sfc_flow_rss_ctx_add(struct sfc_adapter *sa,
238 const struct sfc_flow_rss_conf *conf, uint16_t sw_qid_min,
239 const uint16_t *sw_qids, struct sfc_flow_rss_ctx **ctxp)
241 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
242 struct sfc_flow_rss_ctx *ctx;
244 SFC_ASSERT(sfc_adapter_is_locked(sa));
246 ctx = rte_zmalloc("sfc_flow_rss_ctx", sizeof(*ctx), 0);
250 if (conf->nb_qid_offsets != 0) {
253 ctx->qid_offsets = rte_calloc("sfc_flow_rss_ctx_qid_offsets",
254 conf->nb_qid_offsets,
255 sizeof(*ctx->qid_offsets), 0);
256 if (ctx->qid_offsets == NULL) {
261 for (i = 0; i < conf->nb_qid_offsets; ++i)
262 ctx->qid_offsets[i] = sw_qids[i] - sw_qid_min;
268 TAILQ_INSERT_TAIL(&flow_rss->ctx_list, ctx, entries);
272 sfc_dbg(sa, "flow-rss: added ctx=%p", ctx);
278 sfc_flow_rss_ctx_del(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
280 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
285 SFC_ASSERT(sfc_adapter_is_locked(sa));
290 SFC_ASSERT(ctx->refcnt != 0);
294 if (ctx->refcnt != 0)
297 if (ctx->nic_handle_refcnt != 0) {
298 sfc_err(sa, "flow-rss: deleting ctx=%p abandons its NIC resource: handle=0x%08x, refcnt=%u",
299 ctx, ctx->nic_handle, ctx->nic_handle_refcnt);
302 TAILQ_REMOVE(&flow_rss->ctx_list, ctx, entries);
303 rte_free(ctx->qid_offsets);
306 sfc_dbg(sa, "flow-rss: deleted ctx=%p", ctx);
310 sfc_flow_rss_ctx_program_tbl(struct sfc_adapter *sa,
311 unsigned int nb_tbl_entries,
312 const struct sfc_flow_rss_ctx *ctx)
314 const struct sfc_flow_rss_conf *conf = &ctx->conf;
315 unsigned int *tbl = sa->flow_rss.bounce_tbl;
318 SFC_ASSERT(sfc_adapter_is_locked(sa));
320 if (conf->nb_qid_offsets != 0) {
321 SFC_ASSERT(ctx->qid_offsets != NULL);
323 for (i = 0; i < nb_tbl_entries; ++i)
324 tbl[i] = ctx->qid_offsets[i % conf->nb_qid_offsets];
326 for (i = 0; i < nb_tbl_entries; ++i)
327 tbl[i] = i % conf->qid_span;
330 return efx_rx_scale_tbl_set(sa->nic, ctx->nic_handle,
331 tbl, nb_tbl_entries);
335 sfc_flow_rss_ctx_program(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
337 efx_rx_scale_context_type_t ctx_type = EFX_RX_SCALE_EXCLUSIVE;
338 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
339 const struct sfc_flow_rss *flow_rss = &sa->flow_rss;
340 struct sfc_rss *ethdev_rss = &sas->rss;
341 struct sfc_flow_rss_conf *conf;
342 bool allocation_done = B_FALSE;
343 unsigned int nb_qid_offsets;
344 unsigned int nb_tbl_entries;
352 SFC_ASSERT(sfc_adapter_is_locked(sa));
354 if (conf->nb_qid_offsets != 0)
355 nb_qid_offsets = conf->nb_qid_offsets;
357 nb_qid_offsets = conf->qid_span;
359 if (!RTE_IS_POWER_OF_2(nb_qid_offsets)) {
361 * Most likely, it pays to enlarge the indirection
362 * table to facilitate better distribution quality.
364 nb_qid_offsets = flow_rss->nb_tbl_entries_max;
367 nb_tbl_entries = RTE_MAX(flow_rss->nb_tbl_entries_min, nb_qid_offsets);
369 if (ctx->nic_handle_refcnt == 0) {
370 rc = efx_rx_scale_context_alloc_v2(sa->nic, ctx_type,
375 sfc_err(sa, "flow-rss: failed to allocate NIC resource for ctx=%p: type=%d, qid_span=%u, nb_tbl_entries=%u; rc=%d",
376 ctx, ctx_type, conf->qid_span, nb_tbl_entries, rc);
380 sfc_dbg(sa, "flow-rss: allocated NIC resource for ctx=%p: type=%d, qid_span=%u, nb_tbl_entries=%u; handle=0x%08x",
381 ctx, ctx_type, conf->qid_span, nb_tbl_entries,
384 ++(ctx->nic_handle_refcnt);
385 allocation_done = B_TRUE;
387 ++(ctx->nic_handle_refcnt);
391 rc = efx_rx_scale_mode_set(sa->nic, ctx->nic_handle,
392 ethdev_rss->hash_alg,
393 (ctx->dummy) ? ethdev_rss->hash_types :
394 conf->efx_hash_types,
397 sfc_err(sa, "flow-rss: failed to configure hash for ctx=%p: efx_hash_alg=%d, efx_hash_types=0x%08x; rc=%d",
398 ctx, ethdev_rss->hash_alg,
399 (ctx->dummy) ? ethdev_rss->hash_types :
400 conf->efx_hash_types,
405 rc = efx_rx_scale_key_set(sa->nic, ctx->nic_handle,
406 (ctx->dummy) ? ethdev_rss->key : conf->key,
409 sfc_err(sa, "flow-rss: failed to set key for ctx=%p; rc=%d",
414 rc = sfc_flow_rss_ctx_program_tbl(sa, nb_tbl_entries, ctx);
416 sfc_err(sa, "flow-rss: failed to program table for ctx=%p: nb_tbl_entries=%u; rc=%d",
417 ctx, nb_tbl_entries, rc);
425 sfc_flow_rss_ctx_terminate(sa, ctx);
431 sfc_flow_rss_ctx_terminate(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
436 SFC_ASSERT(sfc_adapter_is_locked(sa));
438 SFC_ASSERT(ctx->nic_handle_refcnt != 0);
439 --(ctx->nic_handle_refcnt);
441 if (ctx->nic_handle_refcnt == 0) {
444 rc = efx_rx_scale_context_free(sa->nic, ctx->nic_handle);
446 sfc_err(sa, "flow-rss: failed to release NIC resource for ctx=%p: handle=0x%08x; rc=%d",
447 ctx, ctx->nic_handle, rc);
449 sfc_warn(sa, "flow-rss: proceeding despite the prior error");
452 sfc_dbg(sa, "flow-rss: released NIC resource for ctx=%p; rc=%d",