1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
10 #include "tf_shadow_tcam.h"
14 * The implementation includes 3 tables per tcam table type.
16 * - sized so that a minimum of 4 slots per shadow entry are available to
17 * minimize the likelihood of collisions.
19 * - sized to the number of entries requested and is directly indexed
20 * - the index is zero based and is the tcam index - the base address
21 * - the key and mask are stored in the key table.
22 * - The stored key is the AND of the key/mask in order to eliminate the need
23 * to compare both the key and mask.
24 * - shadow result table
25 * - the result table is stored separately since it only needs to be accessed
26 * when the key matches.
27 * - the result has a back pointer to the hash table via the hb handle. The
28 * hb handle is a 32 bit represention of the hash with a valid bit, bucket
29 * element index, and the hash index. It is necessary to store the hb handle
30 * with the result since subsequent removes only provide the tcam index.
32 * - Max entries is limited in the current implementation since bit 15 is the
33 * valid bit in the hash table.
34 * - A 16bit hash is calculated and masked based on the number of entries
35 * - 64b wide bucket is used and broken into 4x16bit elements.
36 * This decision is based on quicker bucket scanning to determine if any
37 * elements are in use.
38 * - bit 15 of each bucket element is the valid, this is done to prevent having
39 * to read the larger key/result data for determining VALID. It also aids
40 * in the more efficient scanning of the bucket for slot usage.
44 * The maximum number of shadow entries supported. The value also doubles as
45 * the maximum number of hash buckets. There are only 15 bits of data per
46 * bucket to point to the shadow tables.
48 #define TF_SHADOW_TCAM_ENTRIES_MAX (1 << 15)
50 /* The number of elements(BE) per hash bucket (HB) */
51 #define TF_SHADOW_TCAM_HB_NUM_ELEM (4)
52 #define TF_SHADOW_TCAM_BE_VALID (1 << 15)
53 #define TF_SHADOW_TCAM_BE_IS_VALID(be) (((be) & TF_SHADOW_TCAM_BE_VALID) != 0)
56 * The hash bucket handle is 32b
57 * - bit 31, the Valid bit
58 * - bit 29-30, the element
59 * - bits 0-15, the hash idx (is masked based on the allocated size)
61 #define TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(hndl) (((hndl) & (1 << 31)) != 0)
62 #define TF_SHADOW_TCAM_HB_HANDLE_CREATE(idx, be) ((1 << 31) | \
65 #define TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hdl) (((hdl) >> 29) & \
66 (TF_SHADOW_TCAM_HB_NUM_ELEM - 1))
68 #define TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hdl)((hdl) & \
69 (ctxt)->hash_ctxt.hid_mask)
72 * The idx provided by the caller is within a region, so currently the base is
73 * either added or subtracted from the idx to ensure it can be used as a
77 /* Convert the tcam index to a shadow index */
78 #define TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, idx) ((idx) - \
79 (ctxt)->shadow_ctxt.base_addr)
81 /* Convert the shadow index to a tcam index */
82 #define TF_SHADOW_TCAM_SHIDX_TO_IDX(ctxt, idx) ((idx) + \
83 (ctxt)->shadow_ctxt.base_addr)
85 /* Simple helper masks for clearing en element from the bucket */
86 #define TF_SHADOW_TCAM_BE0_MASK_CLEAR(hb) ((hb) & 0xffffffffffff0000ull)
87 #define TF_SHADOW_TCAM_BE1_MASK_CLEAR(hb) ((hb) & 0xffffffff0000ffffull)
88 #define TF_SHADOW_TCAM_BE2_MASK_CLEAR(hb) ((hb) & 0xffff0000ffffffffull)
89 #define TF_SHADOW_TCAM_BE3_MASK_CLEAR(hb) ((hb) & 0x0000ffffffffffffull)
92 * This should be coming from external, but for now it is assumed that no key
93 * is greater than 1K bits and no result is bigger than 128 bits. This makes
94 * allocation of the hash table easier without having to allocate on the fly.
96 #define TF_SHADOW_TCAM_MAX_KEY_SZ 128
97 #define TF_SHADOW_TCAM_MAX_RESULT_SZ 16
100 * Local only defines for the internal data.
104 * tf_shadow_tcam_shadow_key_entry is the key/mask entry of the key table.
105 * The key stored in the table is the masked version of the key. This is done
106 * to eliminate the need of comparing both the key and mask.
108 struct tf_shadow_tcam_shadow_key_entry {
109 uint8_t key[TF_SHADOW_TCAM_MAX_KEY_SZ];
110 uint8_t mask[TF_SHADOW_TCAM_MAX_KEY_SZ];
114 * tf_shadow_tcam_shadow_result_entry is the result table entry.
115 * The result table writes are broken into two phases:
116 * - The search phase, which stores the hb_handle and key size and
117 * - The set phase, which writes the result, refcnt, and result size
119 struct tf_shadow_tcam_shadow_result_entry {
120 uint8_t result[TF_SHADOW_TCAM_MAX_RESULT_SZ];
121 uint16_t result_size;
128 * tf_shadow_tcam_shadow_ctxt holds all information for accessing the key and
131 struct tf_shadow_tcam_shadow_ctxt {
132 struct tf_shadow_tcam_shadow_key_entry *sh_key_tbl;
133 struct tf_shadow_tcam_shadow_result_entry *sh_res_tbl;
135 uint16_t num_entries;
140 * tf_shadow_tcam_hash_ctxt holds all information related to accessing the hash
143 struct tf_shadow_tcam_hash_ctxt {
146 uint16_t hash_entries;
150 * tf_shadow_tcam_ctxt holds the hash and shadow tables for the current shadow
151 * tcam db. This structure is per tcam table type as each tcam table has it's
152 * own shadow and hash table.
154 struct tf_shadow_tcam_ctxt {
155 struct tf_shadow_tcam_shadow_ctxt shadow_ctxt;
156 struct tf_shadow_tcam_hash_ctxt hash_ctxt;
160 * tf_shadow_tcam_db is the allocated db structure returned as an opaque
161 * void * pointer to the caller during create db. It holds the pointers for
162 * each tcam associated with the db.
164 struct tf_shadow_tcam_db {
165 /* Each context holds the shadow and hash table information */
166 struct tf_shadow_tcam_ctxt *ctxt[TF_TCAM_TBL_TYPE_MAX];
170 * Returns the number of entries in the contexts shadow table.
172 static inline uint16_t
173 tf_shadow_tcam_sh_num_entries_get(struct tf_shadow_tcam_ctxt *ctxt)
175 return ctxt->shadow_ctxt.num_entries;
179 * Compare the give key with the key in the shadow table.
181 * Returns 0 if the keys match
184 tf_shadow_tcam_key_cmp(struct tf_shadow_tcam_ctxt *ctxt,
190 if (size != ctxt->shadow_ctxt.sh_res_tbl[sh_idx].key_size ||
191 sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) || !key || !mask)
194 return memcmp(key, ctxt->shadow_ctxt.sh_key_tbl[sh_idx].key, size);
198 * Copies the shadow result to the result.
200 * Returns 0 on failure
203 tf_shadow_tcam_res_cpy(struct tf_shadow_tcam_ctxt *ctxt,
208 if (sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) || !result)
211 if (ctxt->shadow_ctxt.sh_res_tbl[sh_idx].result_size != size)
214 return memcpy(result,
215 ctxt->shadow_ctxt.sh_res_tbl[sh_idx].result,
220 * Using a software based CRC function for now, but will look into using hw
221 * assisted in the future.
224 tf_shadow_tcam_crc32_calc(uint8_t *key, uint32_t len)
226 return tf_hash_calc_crc32(key, len);
230 * Free the memory associated with the context.
233 tf_shadow_tcam_ctxt_delete(struct tf_shadow_tcam_ctxt *ctxt)
238 tfp_free(ctxt->hash_ctxt.hashtbl);
239 tfp_free(ctxt->shadow_ctxt.sh_key_tbl);
240 tfp_free(ctxt->shadow_ctxt.sh_res_tbl);
244 * The TF Shadow TCAM context is per TCAM and holds all information relating to
245 * managing the shadow and search capability. This routine allocated data that
246 * needs to be deallocated by the tf_shadow_tcam_ctxt_delete prior when deleting
250 tf_shadow_tcam_ctxt_create(struct tf_shadow_tcam_ctxt *ctxt,
251 uint16_t num_entries,
254 struct tfp_calloc_parms cparms;
255 uint16_t hash_size = 1;
259 /* Hash table is a power of two that holds the number of entries */
260 if (num_entries > TF_SHADOW_TCAM_ENTRIES_MAX) {
261 TFP_DRV_LOG(ERR, "Too many entries for shadow %d > %d\n",
263 TF_SHADOW_TCAM_ENTRIES_MAX);
267 while (hash_size < num_entries)
268 hash_size = hash_size << 1;
270 hash_mask = hash_size - 1;
272 /* Allocate the hash table */
273 cparms.nitems = hash_size;
274 cparms.size = sizeof(uint64_t);
275 cparms.alignment = 0;
276 rc = tfp_calloc(&cparms);
279 ctxt->hash_ctxt.hashtbl = cparms.mem_va;
280 ctxt->hash_ctxt.hid_mask = hash_mask;
281 ctxt->hash_ctxt.hash_entries = hash_size;
283 /* allocate the shadow tables */
284 /* allocate the shadow key table */
285 cparms.nitems = num_entries;
286 cparms.size = sizeof(struct tf_shadow_tcam_shadow_key_entry);
287 cparms.alignment = 0;
288 rc = tfp_calloc(&cparms);
291 ctxt->shadow_ctxt.sh_key_tbl = cparms.mem_va;
293 /* allocate the shadow result table */
294 cparms.nitems = num_entries;
295 cparms.size = sizeof(struct tf_shadow_tcam_shadow_result_entry);
296 cparms.alignment = 0;
297 rc = tfp_calloc(&cparms);
300 ctxt->shadow_ctxt.sh_res_tbl = cparms.mem_va;
302 ctxt->shadow_ctxt.num_entries = num_entries;
303 ctxt->shadow_ctxt.base_addr = base_addr;
307 tf_shadow_tcam_ctxt_delete(ctxt);
313 * Get a shadow TCAM context given the db and the TCAM type
315 static struct tf_shadow_tcam_ctxt *
316 tf_shadow_tcam_ctxt_get(struct tf_shadow_tcam_db *shadow_db,
317 enum tf_tcam_tbl_type type)
319 if (type >= TF_TCAM_TBL_TYPE_MAX ||
321 !shadow_db->ctxt[type])
324 return shadow_db->ctxt[type];
328 * Sets the hash entry into the table given the TCAM context, hash bucket
329 * handle, and shadow index.
332 tf_shadow_tcam_set_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
336 uint16_t hid = TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hb_handle);
337 uint16_t be = TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hb_handle);
338 uint64_t entry = sh_idx | TF_SHADOW_TCAM_BE_VALID;
340 if (hid >= ctxt->hash_ctxt.hash_entries)
343 ctxt->hash_ctxt.hashtbl[hid] |= entry << (be * 16);
348 * Clears the hash entry given the TCAM context and hash bucket handle.
351 tf_shadow_tcam_clear_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
357 if (!TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(hb_handle))
360 hid = TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hb_handle);
361 be = TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hb_handle);
362 bucket = &ctxt->hash_ctxt.hashtbl[hid];
366 *bucket = TF_SHADOW_TCAM_BE0_MASK_CLEAR(*bucket);
369 *bucket = TF_SHADOW_TCAM_BE1_MASK_CLEAR(*bucket);
372 *bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
375 *bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
379 * Since the BE_GET masks non-inclusive bits, this will not
387 * Clears the shadow key and result entries given the TCAM context and
391 tf_shadow_tcam_clear_sh_entry(struct tf_shadow_tcam_ctxt *ctxt,
394 struct tf_shadow_tcam_shadow_key_entry *sk_entry;
395 struct tf_shadow_tcam_shadow_result_entry *sr_entry;
397 if (sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt))
400 sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[sh_idx];
401 sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[sh_idx];
404 * memset key/result to zero for now, possibly leave the data alone
405 * in the future and rely on the valid bit in the hash table.
407 memset(sk_entry, 0, sizeof(struct tf_shadow_tcam_shadow_key_entry));
408 memset(sr_entry, 0, sizeof(struct tf_shadow_tcam_shadow_result_entry));
412 * Binds the allocated tcam index with the hash and shadow tables.
413 * The entry will be incomplete until the set has happened with the result
417 tf_shadow_tcam_bind_index(struct tf_shadow_tcam_bind_index_parms *parms)
422 struct tf_shadow_tcam_ctxt *ctxt;
423 struct tf_shadow_tcam_db *shadow_db;
424 struct tf_shadow_tcam_shadow_key_entry *sk_entry;
425 struct tf_shadow_tcam_shadow_result_entry *sr_entry;
426 uint8_t tkey[TF_SHADOW_TCAM_MAX_KEY_SZ];
428 if (!parms || !TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(parms->hb_handle) ||
429 !parms->key || !parms->mask) {
430 TFP_DRV_LOG(ERR, "Invalid parms\n");
434 shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
435 ctxt = tf_shadow_tcam_ctxt_get(shadow_db, parms->type);
437 TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
438 tf_tcam_tbl_2_str(parms->type));
442 memset(tkey, 0, sizeof(tkey));
443 idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, parms->idx);
444 klen = parms->key_size;
445 if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) ||
446 klen > TF_SHADOW_TCAM_MAX_KEY_SZ) {
447 TFP_DRV_LOG(ERR, "%s:%s Invalid len (%d) > %d || oob idx %d\n",
448 tf_dir_2_str(parms->dir),
449 tf_tcam_tbl_2_str(parms->type),
451 TF_SHADOW_TCAM_MAX_KEY_SZ, idx);
456 rc = tf_shadow_tcam_set_hash_entry(ctxt, parms->hb_handle, idx);
460 sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[idx];
461 sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
464 * Write the masked key to the table for more efficient comparisons
467 for (i = 0; i < klen; i++)
468 tkey[i] = parms->key[i] & parms->mask[i];
470 memcpy(sk_entry->key, tkey, klen);
471 memcpy(sk_entry->mask, parms->mask, klen);
473 /* Write the result table */
474 sr_entry->key_size = parms->key_size;
475 sr_entry->hb_handle = parms->hb_handle;
476 sr_entry->refcnt = 1;
482 * Deletes hash/shadow information if no more references.
484 * Returns 0 - The caller should delete the tcam entry in hardware.
485 * Returns non-zero - The number of references to the entry
488 tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms *parms)
492 struct tf_shadow_tcam_ctxt *ctxt;
493 struct tf_shadow_tcam_db *shadow_db;
494 struct tf_tcam_free_parms *fparms;
495 struct tf_shadow_tcam_shadow_result_entry *sr_entry;
497 if (!parms || !parms->fparms) {
498 TFP_DRV_LOG(ERR, "Invalid parms\n");
502 fparms = parms->fparms;
505 * Initialize the reference count to zero. It will only be changed if
510 shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
511 ctxt = tf_shadow_tcam_ctxt_get(shadow_db, fparms->type);
513 TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
514 tf_tcam_tbl_2_str(fparms->type));
518 idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, fparms->idx);
519 if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt)) {
520 TFP_DRV_LOG(DEBUG, "%s %d >= %d\n",
521 tf_tcam_tbl_2_str(fparms->type),
523 tf_shadow_tcam_sh_num_entries_get(ctxt));
527 sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
528 if (sr_entry->refcnt <= 1) {
529 hb_handle = sr_entry->hb_handle;
530 tf_shadow_tcam_clear_hash_entry(ctxt, hb_handle);
531 tf_shadow_tcam_clear_sh_entry(ctxt, idx);
534 fparms->ref_cnt = sr_entry->refcnt;
541 tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms *parms)
547 struct tf_shadow_tcam_ctxt *ctxt;
548 struct tf_shadow_tcam_db *shadow_db;
549 uint16_t hid16, hb_idx, hid_mask, shtbl_idx, shtbl_key, be_valid;
550 struct tf_tcam_alloc_search_parms *sparms;
551 uint8_t tkey[TF_SHADOW_TCAM_MAX_KEY_SZ];
552 uint32_t be_avail = TF_SHADOW_TCAM_HB_NUM_ELEM;
554 if (!parms || !parms->sparms) {
555 TFP_DRV_LOG(ERR, "tcam search with invalid parms\n");
559 memset(tkey, 0, sizeof(tkey));
560 sparms = parms->sparms;
562 /* Initialize return values to invalid */
564 sparms->search_status = REJECT;
565 parms->hb_handle = 0;
567 /* see if caller wanted the result */
568 rcopy = sparms->result && sparms->result_size;
570 shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
571 ctxt = tf_shadow_tcam_ctxt_get(shadow_db, sparms->type);
573 TFP_DRV_LOG(ERR, "%s Unable to get tcam mgr context\n",
574 tf_tcam_tbl_2_str(sparms->type));
578 hid_mask = ctxt->hash_ctxt.hid_mask;
580 len = sparms->key_size;
582 if (len > TF_SHADOW_TCAM_MAX_KEY_SZ ||
583 !sparms->key || !sparms->mask || !len) {
584 TFP_DRV_LOG(ERR, "%s:%s Invalid parms %d : %p : %p\n",
585 tf_dir_2_str(sparms->dir),
586 tf_tcam_tbl_2_str(sparms->type),
593 /* Combine the key and mask */
594 for (i = 0; i < len; i++)
595 tkey[i] = sparms->key[i] & sparms->mask[i];
598 * Calculate the crc32
599 * Fold it to create a 16b value
600 * Reduce it to fit the table
602 hid32 = tf_shadow_tcam_crc32_calc(tkey, len);
603 hid16 = (uint16_t)(((hid32 >> 16) & 0xffff) ^ (hid32 & 0xffff));
604 hb_idx = hid16 & hid_mask;
606 bucket = ctxt->hash_ctxt.hashtbl[hb_idx];
609 /* empty bucket means a miss and available entry */
610 sparms->search_status = MISS;
611 parms->hb_handle = TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, 0);
616 /* Set the avail to max so we can detect when there is an avail entry */
617 be_avail = TF_SHADOW_TCAM_HB_NUM_ELEM;
618 for (i = 0; i < TF_SHADOW_TCAM_HB_NUM_ELEM; i++) {
619 shtbl_idx = (uint16_t)((bucket >> (i * 16)) & 0xffff);
620 be_valid = TF_SHADOW_TCAM_BE_IS_VALID(shtbl_idx);
622 /* The element is avail, keep going */
626 /* There is a valid entry, compare it */
627 shtbl_key = shtbl_idx & ~TF_SHADOW_TCAM_BE_VALID;
628 if (!tf_shadow_tcam_key_cmp(ctxt,
634 * It matches, increment the ref count if the caller
635 * requested allocation and return the info
638 ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt++;
641 sparms->search_status = HIT;
643 TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, i);
644 sparms->idx = TF_SHADOW_TCAM_SHIDX_TO_IDX(ctxt,
647 ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt;
649 /* copy the result, if caller wanted it. */
651 !tf_shadow_tcam_res_cpy(ctxt,
654 sparms->result_size)) {
656 * Should never get here, possible memory
657 * corruption or something unexpected.
659 TFP_DRV_LOG(ERR, "Error copying result\n");
667 /* No hits, return avail entry if exists */
668 if (be_avail < TF_SHADOW_TCAM_HB_NUM_ELEM) {
670 TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, be_avail);
671 sparms->search_status = MISS;
675 sparms->search_status = REJECT;
682 tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms)
685 struct tf_shadow_tcam_ctxt *ctxt;
686 struct tf_tcam_set_parms *sparms;
687 struct tf_shadow_tcam_db *shadow_db;
688 struct tf_shadow_tcam_shadow_result_entry *sr_entry;
690 if (!parms || !parms->sparms) {
691 TFP_DRV_LOG(ERR, "Null parms\n");
695 sparms = parms->sparms;
696 if (!sparms->result || !sparms->result_size) {
697 TFP_DRV_LOG(ERR, "%s:%s No result to set.\n",
698 tf_dir_2_str(sparms->dir),
699 tf_tcam_tbl_2_str(sparms->type));
703 shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
704 ctxt = tf_shadow_tcam_ctxt_get(shadow_db, sparms->type);
706 /* We aren't tracking this table, so return success */
707 TFP_DRV_LOG(DEBUG, "%s Unable to get tcam mgr context\n",
708 tf_tcam_tbl_2_str(sparms->type));
712 idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, sparms->idx);
713 if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt)) {
714 TFP_DRV_LOG(ERR, "%s:%s Invalid idx(0x%x)\n",
715 tf_dir_2_str(sparms->dir),
716 tf_tcam_tbl_2_str(sparms->type),
721 /* Write the result table, the key/hash has been written already */
722 sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
725 * If the handle is not valid, the bind was never called. We aren't
726 * tracking this entry.
728 if (!TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
731 if (sparms->result_size > TF_SHADOW_TCAM_MAX_RESULT_SZ) {
732 TFP_DRV_LOG(ERR, "%s:%s Result length %d > %d\n",
733 tf_dir_2_str(sparms->dir),
734 tf_tcam_tbl_2_str(sparms->type),
736 TF_SHADOW_TCAM_MAX_RESULT_SZ);
740 memcpy(sr_entry->result, sparms->result, sparms->result_size);
741 sr_entry->result_size = sparms->result_size;
747 tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms *parms)
749 struct tf_shadow_tcam_db *shadow_db;
752 TF_CHECK_PARMS1(parms);
754 shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
756 TFP_DRV_LOG(DEBUG, "Shadow db is NULL cannot be freed\n");
760 for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
761 if (shadow_db->ctxt[i]) {
762 tf_shadow_tcam_ctxt_delete(shadow_db->ctxt[i]);
763 tfp_free(shadow_db->ctxt[i]);
773 * Allocate the TCAM resources for search and allocate
776 int tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms *parms)
781 struct tfp_calloc_parms cparms;
782 struct tf_shadow_tcam_db *shadow_db = NULL;
784 TF_CHECK_PARMS1(parms);
786 /* Build the shadow DB per the request */
788 cparms.size = sizeof(struct tf_shadow_tcam_db);
789 cparms.alignment = 0;
790 rc = tfp_calloc(&cparms);
793 shadow_db = (void *)cparms.mem_va;
795 for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
796 /* If the element didn't request an allocation no need
797 * to create a pool nor verify if we got a reservation.
799 if (!parms->cfg->alloc_cnt[i]) {
800 shadow_db->ctxt[i] = NULL;
805 cparms.size = sizeof(struct tf_shadow_tcam_ctxt);
806 cparms.alignment = 0;
807 rc = tfp_calloc(&cparms);
811 shadow_db->ctxt[i] = cparms.mem_va;
812 base = parms->cfg->base_addr[i];
813 rc = tf_shadow_tcam_ctxt_create(shadow_db->ctxt[i],
814 parms->cfg->alloc_cnt[i],
820 *parms->shadow_db = (void *)shadow_db;
823 "TF SHADOW TCAM - initialized\n");
827 for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
828 if (shadow_db->ctxt[i]) {
829 tf_shadow_tcam_ctxt_delete(shadow_db->ctxt[i]);
830 tfp_free(shadow_db->ctxt[i]);