1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
10 #include "tf_shadow_tbl.h"
14 * The implementation includes 3 tables per table table type.
16 * - sized so that a minimum of 4 slots per shadow entry are available to
17 * minimize the likelihood of collisions.
19 * - sized to the number of entries requested and is directly indexed
20 * - the index is zero based and is the table index - the base address
21 * - the data associated with the entry is stored in the key table.
22 * - The stored key is actually the data associated with the entry.
23 * - shadow result table
24 * - the result table is stored separately since it only needs to be accessed
25 * when the key matches.
26 * - the result has a back pointer to the hash table via the hb handle. The
27 * hb handle is a 32 bit represention of the hash with a valid bit, bucket
28 * element index, and the hash index. It is necessary to store the hb handle
29 * with the result since subsequent removes only provide the table index.
31 * - Max entries is limited in the current implementation since bit 15 is the
32 * valid bit in the hash table.
33 * - A 16bit hash is calculated and masked based on the number of entries
34 * - 64b wide bucket is used and broken into 4x16bit elements.
35 * This decision is based on quicker bucket scanning to determine if any
36 * elements are in use.
37 * - bit 15 of each bucket element is the valid, this is done to prevent having
38 * to read the larger key/result data for determining VALID. It also aids
39 * in the more efficient scanning of the bucket for slot usage.
43 * The maximum number of shadow entries supported. The value also doubles as
44 * the maximum number of hash buckets. There are only 15 bits of data per
45 * bucket to point to the shadow tables.
47 #define TF_SHADOW_ENTRIES_MAX (1 << 15)
49 /* The number of elements(BE) per hash bucket (HB) */
50 #define TF_SHADOW_HB_NUM_ELEM (4)
51 #define TF_SHADOW_BE_VALID (1 << 15)
52 #define TF_SHADOW_BE_IS_VALID(be) (((be) & TF_SHADOW_BE_VALID) != 0)
55 * The hash bucket handle is 32b
56 * - bit 31, the Valid bit
57 * - bit 29-30, the element
58 * - bits 0-15, the hash idx (is masked based on the allocated size)
60 #define TF_SHADOW_HB_HANDLE_IS_VALID(hndl) (((hndl) & (1 << 31)) != 0)
61 #define TF_SHADOW_HB_HANDLE_CREATE(idx, be) ((1 << 31) | \
64 #define TF_SHADOW_HB_HANDLE_BE_GET(hdl) (((hdl) >> 29) & \
65 (TF_SHADOW_HB_NUM_ELEM - 1))
67 #define TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hdl)((hdl) & \
68 (ctxt)->hash_ctxt.hid_mask)
71 * The idx provided by the caller is within a region, so currently the base is
72 * either added or subtracted from the idx to ensure it can be used as a
76 /* Convert the table index to a shadow index */
77 #define TF_SHADOW_IDX_TO_SHIDX(ctxt, idx) ((idx) - \
78 (ctxt)->shadow_ctxt.base_addr)
80 /* Convert the shadow index to a tbl index */
81 #define TF_SHADOW_SHIDX_TO_IDX(ctxt, idx) ((idx) + \
82 (ctxt)->shadow_ctxt.base_addr)
84 /* Simple helper masks for clearing en element from the bucket */
85 #define TF_SHADOW_BE0_MASK_CLEAR(hb) ((hb) & 0xffffffffffff0000ull)
86 #define TF_SHADOW_BE1_MASK_CLEAR(hb) ((hb) & 0xffffffff0000ffffull)
87 #define TF_SHADOW_BE2_MASK_CLEAR(hb) ((hb) & 0xffff0000ffffffffull)
88 #define TF_SHADOW_BE3_MASK_CLEAR(hb) ((hb) & 0x0000ffffffffffffull)
91 * This should be coming from external, but for now it is assumed that no key
92 * is greater than 512 bits (64B). This makes allocation of the key table
93 * easier without having to allocate on the fly.
95 #define TF_SHADOW_MAX_KEY_SZ 64
98 * Local only defines for the internal data.
102 * tf_shadow_tbl_shadow_key_entry is the key entry of the key table.
103 * The key stored in the table is the result data of the index table.
105 struct tf_shadow_tbl_shadow_key_entry {
106 uint8_t key[TF_SHADOW_MAX_KEY_SZ];
110 * tf_shadow_tbl_shadow_result_entry is the result table entry.
111 * The result table writes are broken into two phases:
112 * - The search phase, which stores the hb_handle and key size and
113 * - The set phase, which writes the refcnt
115 struct tf_shadow_tbl_shadow_result_entry {
122 * tf_shadow_tbl_shadow_ctxt holds all information for accessing the key and
125 struct tf_shadow_tbl_shadow_ctxt {
126 struct tf_shadow_tbl_shadow_key_entry *sh_key_tbl;
127 struct tf_shadow_tbl_shadow_result_entry *sh_res_tbl;
129 uint16_t num_entries;
134 * tf_shadow_tbl_hash_ctxt holds all information related to accessing the hash
137 struct tf_shadow_tbl_hash_ctxt {
140 uint16_t hash_entries;
144 * tf_shadow_tbl_ctxt holds the hash and shadow tables for the current shadow
145 * table db. This structure is per table table type as each table table has
146 * it's own shadow and hash table.
148 struct tf_shadow_tbl_ctxt {
149 struct tf_shadow_tbl_shadow_ctxt shadow_ctxt;
150 struct tf_shadow_tbl_hash_ctxt hash_ctxt;
154 * tf_shadow_tbl_db is the allocated db structure returned as an opaque
155 * void * pointer to the caller during create db. It holds the pointers for
156 * each table associated with the db.
158 struct tf_shadow_tbl_db {
159 /* Each context holds the shadow and hash table information */
160 struct tf_shadow_tbl_ctxt *ctxt[TF_TBL_TYPE_MAX];
164 * Simple routine that decides what table types can be searchable.
167 static int tf_shadow_tbl_is_searchable(enum tf_tbl_type type)
172 case TF_TBL_TYPE_ACT_ENCAP_8B:
173 case TF_TBL_TYPE_ACT_ENCAP_16B:
174 case TF_TBL_TYPE_ACT_ENCAP_32B:
175 case TF_TBL_TYPE_ACT_ENCAP_64B:
176 case TF_TBL_TYPE_ACT_SP_SMAC:
177 case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
178 case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
179 case TF_TBL_TYPE_ACT_MODIFY_IPV4:
180 case TF_TBL_TYPE_ACT_MODIFY_SPORT:
181 case TF_TBL_TYPE_ACT_MODIFY_DPORT:
193 * Returns the number of entries in the contexts shadow table.
195 static inline uint16_t
196 tf_shadow_tbl_sh_num_entries_get(struct tf_shadow_tbl_ctxt *ctxt)
198 return ctxt->shadow_ctxt.num_entries;
202 * Compare the give key with the key in the shadow table.
204 * Returns 0 if the keys match
207 tf_shadow_tbl_key_cmp(struct tf_shadow_tbl_ctxt *ctxt,
212 if (size != ctxt->shadow_ctxt.sh_res_tbl[sh_idx].key_size ||
213 sh_idx >= tf_shadow_tbl_sh_num_entries_get(ctxt) || !key)
216 return memcmp(key, ctxt->shadow_ctxt.sh_key_tbl[sh_idx].key, size);
220 * Free the memory associated with the context.
223 tf_shadow_tbl_ctxt_delete(struct tf_shadow_tbl_ctxt *ctxt)
228 tfp_free(ctxt->hash_ctxt.hashtbl);
229 tfp_free(ctxt->shadow_ctxt.sh_key_tbl);
230 tfp_free(ctxt->shadow_ctxt.sh_res_tbl);
234 * The TF Shadow TBL context is per TBL and holds all information relating to
235 * managing the shadow and search capability. This routine allocated data that
236 * needs to be deallocated by the tf_shadow_tbl_ctxt_delete prior when deleting
240 tf_shadow_tbl_ctxt_create(struct tf_shadow_tbl_ctxt *ctxt,
241 uint16_t num_entries,
244 struct tfp_calloc_parms cparms;
245 uint16_t hash_size = 1;
249 /* Hash table is a power of two that holds the number of entries */
250 if (num_entries > TF_SHADOW_ENTRIES_MAX) {
251 TFP_DRV_LOG(ERR, "Too many entries for shadow %d > %d\n",
253 TF_SHADOW_ENTRIES_MAX);
257 while (hash_size < num_entries)
258 hash_size = hash_size << 1;
260 hash_mask = hash_size - 1;
262 /* Allocate the hash table */
263 cparms.nitems = hash_size;
264 cparms.size = sizeof(uint64_t);
265 cparms.alignment = 0;
266 rc = tfp_calloc(&cparms);
269 ctxt->hash_ctxt.hashtbl = cparms.mem_va;
270 ctxt->hash_ctxt.hid_mask = hash_mask;
271 ctxt->hash_ctxt.hash_entries = hash_size;
273 /* allocate the shadow tables */
274 /* allocate the shadow key table */
275 cparms.nitems = num_entries;
276 cparms.size = sizeof(struct tf_shadow_tbl_shadow_key_entry);
277 cparms.alignment = 0;
278 rc = tfp_calloc(&cparms);
281 ctxt->shadow_ctxt.sh_key_tbl = cparms.mem_va;
283 /* allocate the shadow result table */
284 cparms.nitems = num_entries;
285 cparms.size = sizeof(struct tf_shadow_tbl_shadow_result_entry);
286 cparms.alignment = 0;
287 rc = tfp_calloc(&cparms);
290 ctxt->shadow_ctxt.sh_res_tbl = cparms.mem_va;
292 ctxt->shadow_ctxt.num_entries = num_entries;
293 ctxt->shadow_ctxt.base_addr = base_addr;
297 tf_shadow_tbl_ctxt_delete(ctxt);
303 * Get a shadow table context given the db and the table type
305 static struct tf_shadow_tbl_ctxt *
306 tf_shadow_tbl_ctxt_get(struct tf_shadow_tbl_db *shadow_db,
307 enum tf_tbl_type type)
309 if (type >= TF_TBL_TYPE_MAX ||
311 !shadow_db->ctxt[type])
314 return shadow_db->ctxt[type];
318 * Sets the hash entry into the table given the table context, hash bucket
319 * handle, and shadow index.
322 tf_shadow_tbl_set_hash_entry(struct tf_shadow_tbl_ctxt *ctxt,
326 uint16_t hid = TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hb_handle);
327 uint16_t be = TF_SHADOW_HB_HANDLE_BE_GET(hb_handle);
328 uint64_t entry = sh_idx | TF_SHADOW_BE_VALID;
330 if (hid >= ctxt->hash_ctxt.hash_entries)
333 ctxt->hash_ctxt.hashtbl[hid] |= entry << (be * 16);
338 * Clears the hash entry given the TBL context and hash bucket handle.
341 tf_shadow_tbl_clear_hash_entry(struct tf_shadow_tbl_ctxt *ctxt,
347 if (!TF_SHADOW_HB_HANDLE_IS_VALID(hb_handle))
350 hid = TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hb_handle);
351 be = TF_SHADOW_HB_HANDLE_BE_GET(hb_handle);
352 bucket = &ctxt->hash_ctxt.hashtbl[hid];
356 *bucket = TF_SHADOW_BE0_MASK_CLEAR(*bucket);
359 *bucket = TF_SHADOW_BE1_MASK_CLEAR(*bucket);
362 *bucket = TF_SHADOW_BE2_MASK_CLEAR(*bucket);
365 *bucket = TF_SHADOW_BE2_MASK_CLEAR(*bucket);
369 * Since the BE_GET masks non-inclusive bits, this will not
377 * Clears the shadow key and result entries given the table context and
381 tf_shadow_tbl_clear_sh_entry(struct tf_shadow_tbl_ctxt *ctxt,
384 struct tf_shadow_tbl_shadow_key_entry *sk_entry;
385 struct tf_shadow_tbl_shadow_result_entry *sr_entry;
387 if (sh_idx >= tf_shadow_tbl_sh_num_entries_get(ctxt))
390 sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[sh_idx];
391 sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[sh_idx];
394 * memset key/result to zero for now, possibly leave the data alone
395 * in the future and rely on the valid bit in the hash table.
397 memset(sk_entry, 0, sizeof(struct tf_shadow_tbl_shadow_key_entry));
398 memset(sr_entry, 0, sizeof(struct tf_shadow_tbl_shadow_result_entry));
402 * Binds the allocated tbl index with the hash and shadow tables.
403 * The entry will be incomplete until the set has happened with the result
407 tf_shadow_tbl_bind_index(struct tf_shadow_tbl_bind_index_parms *parms)
411 struct tf_shadow_tbl_ctxt *ctxt;
412 struct tf_shadow_tbl_db *shadow_db;
413 struct tf_shadow_tbl_shadow_key_entry *sk_entry;
414 struct tf_shadow_tbl_shadow_result_entry *sr_entry;
416 if (!parms || !TF_SHADOW_HB_HANDLE_IS_VALID(parms->hb_handle) ||
418 TFP_DRV_LOG(ERR, "Invalid parms\n");
422 shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
423 ctxt = tf_shadow_tbl_ctxt_get(shadow_db, parms->type);
425 TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
426 tf_tbl_type_2_str(parms->type));
430 idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, parms->idx);
431 len = parms->data_sz_in_bytes;
432 if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt) ||
433 len > TF_SHADOW_MAX_KEY_SZ) {
434 TFP_DRV_LOG(ERR, "%s:%s Invalid len (%d) > %d || oob idx %d\n",
435 tf_dir_2_str(parms->dir),
436 tf_tbl_type_2_str(parms->type),
438 TF_SHADOW_MAX_KEY_SZ, idx);
443 rc = tf_shadow_tbl_set_hash_entry(ctxt, parms->hb_handle, idx);
447 sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[idx];
448 sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
450 /* For tables, the data is the key */
451 memcpy(sk_entry->key, parms->data, len);
453 /* Write the result table */
454 sr_entry->key_size = len;
455 sr_entry->hb_handle = parms->hb_handle;
456 sr_entry->refcnt = 1;
462 * Deletes hash/shadow information if no more references.
464 * Returns 0 - The caller should delete the table entry in hardware.
465 * Returns non-zero - The number of references to the entry
468 tf_shadow_tbl_remove(struct tf_shadow_tbl_remove_parms *parms)
472 struct tf_shadow_tbl_ctxt *ctxt;
473 struct tf_shadow_tbl_db *shadow_db;
474 struct tf_tbl_free_parms *fparms;
475 struct tf_shadow_tbl_shadow_result_entry *sr_entry;
477 if (!parms || !parms->fparms) {
478 TFP_DRV_LOG(ERR, "Invalid parms\n");
482 fparms = parms->fparms;
483 if (!tf_shadow_tbl_is_searchable(fparms->type))
486 * Initialize the ref count to zero. The default would be to remove
491 shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
492 ctxt = tf_shadow_tbl_ctxt_get(shadow_db, fparms->type);
494 TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
495 tf_tbl_type_2_str(fparms->type));
499 idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, fparms->idx);
500 if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt)) {
501 TFP_DRV_LOG(DEBUG, "%s %d >= %d\n",
502 tf_tbl_type_2_str(fparms->type),
504 tf_shadow_tbl_sh_num_entries_get(ctxt));
508 sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
509 if (sr_entry->refcnt <= 1) {
510 hb_handle = sr_entry->hb_handle;
511 tf_shadow_tbl_clear_hash_entry(ctxt, hb_handle);
512 tf_shadow_tbl_clear_sh_entry(ctxt, idx);
515 fparms->ref_cnt = sr_entry->refcnt;
522 tf_shadow_tbl_search(struct tf_shadow_tbl_search_parms *parms)
527 struct tf_shadow_tbl_ctxt *ctxt;
528 struct tf_shadow_tbl_db *shadow_db;
529 uint16_t hid16, hb_idx, hid_mask, shtbl_idx, shtbl_key, be_valid;
530 struct tf_tbl_alloc_search_parms *sparms;
531 uint32_t be_avail = TF_SHADOW_HB_NUM_ELEM;
533 if (!parms || !parms->sparms) {
534 TFP_DRV_LOG(ERR, "tbl search with invalid parms\n");
538 sparms = parms->sparms;
539 /* Check that caller was supposed to call search */
540 if (!tf_shadow_tbl_is_searchable(sparms->type))
543 /* Initialize return values to invalid */
545 sparms->search_status = REJECT;
546 parms->hb_handle = 0;
549 shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
550 ctxt = tf_shadow_tbl_ctxt_get(shadow_db, sparms->type);
552 TFP_DRV_LOG(ERR, "%s Unable to get tbl mgr context\n",
553 tf_tbl_type_2_str(sparms->type));
557 len = sparms->result_sz_in_bytes;
558 if (len > TF_SHADOW_MAX_KEY_SZ || !sparms->result || !len) {
559 TFP_DRV_LOG(ERR, "%s:%s Invalid parms %d : %p\n",
560 tf_dir_2_str(sparms->dir),
561 tf_tbl_type_2_str(sparms->type),
568 * Calculate the crc32
569 * Fold it to create a 16b value
570 * Reduce it to fit the table
572 hid32 = tf_hash_calc_crc32(sparms->result, len);
573 hid16 = (uint16_t)(((hid32 >> 16) & 0xffff) ^ (hid32 & 0xffff));
574 hid_mask = ctxt->hash_ctxt.hid_mask;
575 hb_idx = hid16 & hid_mask;
577 bucket = ctxt->hash_ctxt.hashtbl[hb_idx];
579 /* empty bucket means a miss and available entry */
580 sparms->search_status = MISS;
581 parms->hb_handle = TF_SHADOW_HB_HANDLE_CREATE(hb_idx, 0);
586 /* Set the avail to max so we can detect when there is an avail entry */
587 be_avail = TF_SHADOW_HB_NUM_ELEM;
588 for (i = 0; i < TF_SHADOW_HB_NUM_ELEM; i++) {
589 shtbl_idx = (uint16_t)((bucket >> (i * 16)) & 0xffff);
590 be_valid = TF_SHADOW_BE_IS_VALID(shtbl_idx);
592 /* The element is avail, keep going */
596 /* There is a valid entry, compare it */
597 shtbl_key = shtbl_idx & ~TF_SHADOW_BE_VALID;
598 if (!tf_shadow_tbl_key_cmp(ctxt,
603 * It matches, increment the ref count if the caller
604 * requested allocation and return the info
607 ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt =
608 ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt + 1;
611 sparms->search_status = HIT;
613 TF_SHADOW_HB_HANDLE_CREATE(hb_idx, i);
614 sparms->idx = TF_SHADOW_SHIDX_TO_IDX(ctxt, shtbl_key);
616 ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt;
622 /* No hits, return avail entry if exists */
623 if (be_avail < TF_SHADOW_HB_NUM_ELEM) {
625 * There is an available hash entry, so return MISS and the
626 * hash handle for the subsequent bind.
628 parms->hb_handle = TF_SHADOW_HB_HANDLE_CREATE(hb_idx, be_avail);
629 sparms->search_status = MISS;
633 /* No room for the entry in the hash table, must REJECT */
634 sparms->search_status = REJECT;
641 tf_shadow_tbl_insert(struct tf_shadow_tbl_insert_parms *parms)
644 struct tf_shadow_tbl_ctxt *ctxt;
645 struct tf_tbl_set_parms *sparms;
646 struct tf_shadow_tbl_db *shadow_db;
647 struct tf_shadow_tbl_shadow_result_entry *sr_entry;
649 if (!parms || !parms->sparms) {
650 TFP_DRV_LOG(ERR, "Null parms\n");
654 sparms = parms->sparms;
655 if (!sparms->data || !sparms->data_sz_in_bytes) {
656 TFP_DRV_LOG(ERR, "%s:%s No result to set.\n",
657 tf_dir_2_str(sparms->dir),
658 tf_tbl_type_2_str(sparms->type));
662 shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
663 ctxt = tf_shadow_tbl_ctxt_get(shadow_db, sparms->type);
665 /* We aren't tracking this table, so return success */
666 TFP_DRV_LOG(DEBUG, "%s Unable to get tbl mgr context\n",
667 tf_tbl_type_2_str(sparms->type));
671 idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, sparms->idx);
672 if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt)) {
673 TFP_DRV_LOG(ERR, "%s:%s Invalid idx(0x%x)\n",
674 tf_dir_2_str(sparms->dir),
675 tf_tbl_type_2_str(sparms->type),
680 /* Write the result table, the key/hash has been written already */
681 sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
684 * If the handle is not valid, the bind was never called. We aren't
685 * tracking this entry.
687 if (!TF_SHADOW_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
694 tf_shadow_tbl_free_db(struct tf_shadow_tbl_free_db_parms *parms)
696 struct tf_shadow_tbl_db *shadow_db;
699 TF_CHECK_PARMS1(parms);
701 shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
703 TFP_DRV_LOG(DEBUG, "Shadow db is NULL cannot be freed\n");
707 for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
708 if (shadow_db->ctxt[i]) {
709 tf_shadow_tbl_ctxt_delete(shadow_db->ctxt[i]);
710 tfp_free(shadow_db->ctxt[i]);
720 * Allocate the table resources for search and allocate
723 int tf_shadow_tbl_create_db(struct tf_shadow_tbl_create_db_parms *parms)
728 struct tfp_calloc_parms cparms;
729 struct tf_shadow_tbl_db *shadow_db = NULL;
731 TF_CHECK_PARMS1(parms);
733 /* Build the shadow DB per the request */
735 cparms.size = sizeof(struct tf_shadow_tbl_db);
736 cparms.alignment = 0;
737 rc = tfp_calloc(&cparms);
740 shadow_db = (void *)cparms.mem_va;
742 for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
743 /* If the element didn't request an allocation no need
744 * to create a pool nor verify if we got a reservation.
746 if (!parms->cfg->alloc_cnt[i] ||
747 !tf_shadow_tbl_is_searchable(i)) {
748 shadow_db->ctxt[i] = NULL;
753 cparms.size = sizeof(struct tf_shadow_tbl_ctxt);
754 cparms.alignment = 0;
755 rc = tfp_calloc(&cparms);
759 shadow_db->ctxt[i] = cparms.mem_va;
760 base = parms->cfg->base_addr[i];
761 rc = tf_shadow_tbl_ctxt_create(shadow_db->ctxt[i],
762 parms->cfg->alloc_cnt[i],
768 *parms->shadow_db = (void *)shadow_db;
771 "TF SHADOW TABLE - initialized\n");
775 for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
776 if (shadow_db->ctxt[i]) {
777 tf_shadow_tbl_ctxt_delete(shadow_db->ctxt[i]);
778 tfp_free(shadow_db->ctxt[i]);