1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
9 #include "tf_shadow_tcam.h"
13 * The implementation includes 3 tables per tcam table type.
15 * - sized so that a minimum of 4 slots per shadow entry are available to
16 * minimize the likelihood of collisions.
18 * - sized to the number of entries requested and is directly indexed
19 * - the index is zero based and is the tcam index - the base address
20 * - the key and mask are stored in the key table.
21 * - The stored key is the AND of the key/mask in order to eliminate the need
22 * to compare both the key and mask.
23 * - shadow result table
24 * - the result table is stored separately since it only needs to be accessed
25 * when the key matches.
26 * - the result has a back pointer to the hash table via the hb handle. The
27 * hb handle is a 32 bit represention of the hash with a valid bit, bucket
28 * element index, and the hash index. It is necessary to store the hb handle
29 * with the result since subsequent removes only provide the tcam index.
31 * - Max entries is limited in the current implementation since bit 15 is the
32 * valid bit in the hash table.
33 * - A 16bit hash is calculated and masked based on the number of entries
34 * - 64b wide bucket is used and broken into 4x16bit elements.
35 * This decision is based on quicker bucket scanning to determine if any
36 * elements are in use.
37 * - bit 15 of each bucket element is the valid, this is done to prevent having
38 * to read the larger key/result data for determining VALID. It also aids
39 * in the more efficient scanning of the bucket for slot usage.
43 * The maximum number of shadow entries supported. The value also doubles as
44 * the maximum number of hash buckets. There are only 15 bits of data per
45 * bucket to point to the shadow tables.
47 #define TF_SHADOW_TCAM_ENTRIES_MAX (1 << 15)
49 /* The number of elements(BE) per hash bucket (HB) */
50 #define TF_SHADOW_TCAM_HB_NUM_ELEM (4)
51 #define TF_SHADOW_TCAM_BE_VALID (1 << 15)
52 #define TF_SHADOW_TCAM_BE_IS_VALID(be) (((be) & TF_SHADOW_TCAM_BE_VALID) != 0)
55 * The hash bucket handle is 32b
56 * - bit 31, the Valid bit
57 * - bit 29-30, the element
58 * - bits 0-15, the hash idx (is masked based on the allocated size)
60 #define TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(hndl) (((hndl) & (1 << 31)) != 0)
61 #define TF_SHADOW_TCAM_HB_HANDLE_CREATE(idx, be) ((1 << 31) | \
64 #define TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hdl) (((hdl) >> 29) & \
65 (TF_SHADOW_TCAM_HB_NUM_ELEM - 1))
67 #define TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hdl)((hdl) & \
68 (ctxt)->hash_ctxt.hid_mask)
71 * The idx provided by the caller is within a region, so currently the base is
72 * either added or subtracted from the idx to ensure it can be used as a
76 /* Convert the tcam index to a shadow index */
77 #define TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, idx) ((idx) - \
78 (ctxt)->shadow_ctxt.base_addr)
80 /* Convert the shadow index to a tcam index */
81 #define TF_SHADOW_TCAM_SHIDX_TO_IDX(ctxt, idx) ((idx) + \
82 (ctxt)->shadow_ctxt.base_addr)
84 /* Simple helper masks for clearing en element from the bucket */
85 #define TF_SHADOW_TCAM_BE0_MASK_CLEAR(hb) ((hb) & 0xffffffffffff0000ull)
86 #define TF_SHADOW_TCAM_BE1_MASK_CLEAR(hb) ((hb) & 0xffffffff0000ffffull)
87 #define TF_SHADOW_TCAM_BE2_MASK_CLEAR(hb) ((hb) & 0xffff0000ffffffffull)
88 #define TF_SHADOW_TCAM_BE3_MASK_CLEAR(hb) ((hb) & 0x0000ffffffffffffull)
91 * This should be coming from external, but for now it is assumed that no key
92 * is greater than 1K bits and no result is bigger than 128 bits. This makes
93 * allocation of the hash table easier without having to allocate on the fly.
95 #define TF_SHADOW_TCAM_MAX_KEY_SZ 128
96 #define TF_SHADOW_TCAM_MAX_RESULT_SZ 16
99 * Local only defines for the internal data.
103 * tf_shadow_tcam_shadow_key_entry is the key/mask entry of the key table.
104 * The key stored in the table is the masked version of the key. This is done
105 * to eliminate the need of comparing both the key and mask.
107 struct tf_shadow_tcam_shadow_key_entry {
108 uint8_t key[TF_SHADOW_TCAM_MAX_KEY_SZ];
109 uint8_t mask[TF_SHADOW_TCAM_MAX_KEY_SZ];
113 * tf_shadow_tcam_shadow_result_entry is the result table entry.
114 * The result table writes are broken into two phases:
115 * - The search phase, which stores the hb_handle and key size and
116 * - The set phase, which writes the result, refcnt, and result size
118 struct tf_shadow_tcam_shadow_result_entry {
119 uint8_t result[TF_SHADOW_TCAM_MAX_RESULT_SZ];
120 uint16_t result_size;
127 * tf_shadow_tcam_shadow_ctxt holds all information for accessing the key and
130 struct tf_shadow_tcam_shadow_ctxt {
131 struct tf_shadow_tcam_shadow_key_entry *sh_key_tbl;
132 struct tf_shadow_tcam_shadow_result_entry *sh_res_tbl;
134 uint16_t num_entries;
139 * tf_shadow_tcam_hash_ctxt holds all information related to accessing the hash
142 struct tf_shadow_tcam_hash_ctxt {
145 uint16_t hash_entries;
149 * tf_shadow_tcam_ctxt holds the hash and shadow tables for the current shadow
150 * tcam db. This structure is per tcam table type as each tcam table has it's
151 * own shadow and hash table.
153 struct tf_shadow_tcam_ctxt {
154 struct tf_shadow_tcam_shadow_ctxt shadow_ctxt;
155 struct tf_shadow_tcam_hash_ctxt hash_ctxt;
159 * tf_shadow_tcam_db is the allocated db structure returned as an opaque
160 * void * pointer to the caller during create db. It holds the pointers for
161 * each tcam associated with the db.
163 struct tf_shadow_tcam_db {
164 /* Each context holds the shadow and hash table information */
165 struct tf_shadow_tcam_ctxt *ctxt[TF_TCAM_TBL_TYPE_MAX];
169 * Returns the number of entries in the contexts shadow table.
171 static inline uint16_t
172 tf_shadow_tcam_sh_num_entries_get(struct tf_shadow_tcam_ctxt *ctxt)
174 return ctxt->shadow_ctxt.num_entries;
178 * Compare the give key with the key in the shadow table.
180 * Returns 0 if the keys match
183 tf_shadow_tcam_key_cmp(struct tf_shadow_tcam_ctxt *ctxt,
189 if (size != ctxt->shadow_ctxt.sh_res_tbl[sh_idx].key_size ||
190 sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) || !key || !mask)
193 return memcmp(key, ctxt->shadow_ctxt.sh_key_tbl[sh_idx].key, size);
197 * Copies the shadow result to the result.
199 * Returns 0 on failure
202 tf_shadow_tcam_res_cpy(struct tf_shadow_tcam_ctxt *ctxt,
207 if (sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) || !result)
210 if (ctxt->shadow_ctxt.sh_res_tbl[sh_idx].result_size != size)
213 return memcpy(result,
214 ctxt->shadow_ctxt.sh_res_tbl[sh_idx].result,
219 * Using a software based CRC function for now, but will look into using hw
220 * assisted in the future.
223 tf_shadow_tcam_crc32_calc(uint8_t *key, uint32_t len)
225 return tf_hash_calc_crc32(key, len);
229 * Free the memory associated with the context.
232 tf_shadow_tcam_ctxt_delete(struct tf_shadow_tcam_ctxt *ctxt)
237 tfp_free(ctxt->hash_ctxt.hashtbl);
238 tfp_free(ctxt->shadow_ctxt.sh_key_tbl);
239 tfp_free(ctxt->shadow_ctxt.sh_res_tbl);
243 * The TF Shadow TCAM context is per TCAM and holds all information relating to
244 * managing the shadow and search capability. This routine allocated data that
245 * needs to be deallocated by the tf_shadow_tcam_ctxt_delete prior when deleting
249 tf_shadow_tcam_ctxt_create(struct tf_shadow_tcam_ctxt *ctxt,
250 uint16_t num_entries,
253 struct tfp_calloc_parms cparms;
254 uint16_t hash_size = 1;
258 /* Hash table is a power of two that holds the number of entries */
259 if (num_entries > TF_SHADOW_TCAM_ENTRIES_MAX) {
260 TFP_DRV_LOG(ERR, "Too many entries for shadow %d > %d\n",
262 TF_SHADOW_TCAM_ENTRIES_MAX);
266 while (hash_size < num_entries)
267 hash_size = hash_size << 1;
269 hash_mask = hash_size - 1;
271 /* Allocate the hash table */
272 cparms.nitems = hash_size;
273 cparms.size = sizeof(uint64_t);
274 cparms.alignment = 0;
275 rc = tfp_calloc(&cparms);
278 ctxt->hash_ctxt.hashtbl = cparms.mem_va;
279 ctxt->hash_ctxt.hid_mask = hash_mask;
280 ctxt->hash_ctxt.hash_entries = hash_size;
282 /* allocate the shadow tables */
283 /* allocate the shadow key table */
284 cparms.nitems = num_entries;
285 cparms.size = sizeof(struct tf_shadow_tcam_shadow_key_entry);
286 cparms.alignment = 0;
287 rc = tfp_calloc(&cparms);
290 ctxt->shadow_ctxt.sh_key_tbl = cparms.mem_va;
292 /* allocate the shadow result table */
293 cparms.nitems = num_entries;
294 cparms.size = sizeof(struct tf_shadow_tcam_shadow_result_entry);
295 cparms.alignment = 0;
296 rc = tfp_calloc(&cparms);
299 ctxt->shadow_ctxt.sh_res_tbl = cparms.mem_va;
301 ctxt->shadow_ctxt.num_entries = num_entries;
302 ctxt->shadow_ctxt.base_addr = base_addr;
306 tf_shadow_tcam_ctxt_delete(ctxt);
312 * Get a shadow TCAM context given the db and the TCAM type
314 static struct tf_shadow_tcam_ctxt *
315 tf_shadow_tcam_ctxt_get(struct tf_shadow_tcam_db *shadow_db,
316 enum tf_tcam_tbl_type type)
318 if (type >= TF_TCAM_TBL_TYPE_MAX ||
320 !shadow_db->ctxt[type])
323 return shadow_db->ctxt[type];
327 * Sets the hash entry into the table given the TCAM context, hash bucket
328 * handle, and shadow index.
331 tf_shadow_tcam_set_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
335 uint16_t hid = TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hb_handle);
336 uint16_t be = TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hb_handle);
337 uint64_t entry = sh_idx | TF_SHADOW_TCAM_BE_VALID;
339 if (hid >= ctxt->hash_ctxt.hash_entries)
342 ctxt->hash_ctxt.hashtbl[hid] |= entry << (be * 16);
347 * Clears the hash entry given the TCAM context and hash bucket handle.
350 tf_shadow_tcam_clear_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
356 if (!TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(hb_handle))
359 hid = TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hb_handle);
360 be = TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hb_handle);
361 bucket = &ctxt->hash_ctxt.hashtbl[hid];
365 *bucket = TF_SHADOW_TCAM_BE0_MASK_CLEAR(*bucket);
368 *bucket = TF_SHADOW_TCAM_BE1_MASK_CLEAR(*bucket);
371 *bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
374 *bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
380 * Clears the shadow key and result entries given the TCAM context and
384 tf_shadow_tcam_clear_sh_entry(struct tf_shadow_tcam_ctxt *ctxt,
387 struct tf_shadow_tcam_shadow_key_entry *sk_entry;
388 struct tf_shadow_tcam_shadow_result_entry *sr_entry;
390 if (sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt))
393 sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[sh_idx];
394 sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[sh_idx];
397 * memset key/result to zero for now, possibly leave the data alone
398 * in the future and rely on the valid bit in the hash table.
400 memset(sk_entry, 0, sizeof(struct tf_shadow_tcam_shadow_key_entry));
401 memset(sr_entry, 0, sizeof(struct tf_shadow_tcam_shadow_result_entry));
405 * Binds the allocated tcam index with the hash and shadow tables.
406 * The entry will be incomplete until the set has happened with the result
410 tf_shadow_tcam_bind_index(struct tf_shadow_tcam_bind_index_parms *parms)
415 struct tf_shadow_tcam_ctxt *ctxt;
416 struct tf_shadow_tcam_db *shadow_db;
417 struct tf_shadow_tcam_shadow_key_entry *sk_entry;
418 struct tf_shadow_tcam_shadow_result_entry *sr_entry;
419 uint8_t tkey[TF_SHADOW_TCAM_MAX_KEY_SZ];
421 if (!parms || !TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(parms->hb_handle) ||
422 !parms->key || !parms->mask) {
423 TFP_DRV_LOG(ERR, "Invalid parms\n");
427 shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
428 ctxt = tf_shadow_tcam_ctxt_get(shadow_db, parms->type);
430 TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
431 tf_tcam_tbl_2_str(parms->type));
435 memset(tkey, 0, sizeof(tkey));
436 idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, parms->idx);
437 klen = parms->key_size;
438 if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) ||
439 klen > TF_SHADOW_TCAM_MAX_KEY_SZ) {
440 TFP_DRV_LOG(ERR, "%s:%s Invalid len (%d) > %d || oob idx %d\n",
441 tf_dir_2_str(parms->dir),
442 tf_tcam_tbl_2_str(parms->type),
444 TF_SHADOW_TCAM_MAX_KEY_SZ, idx);
449 rc = tf_shadow_tcam_set_hash_entry(ctxt, parms->hb_handle, idx);
453 sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[idx];
454 sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
457 * Write the masked key to the table for more efficient comparisons
460 for (i = 0; i < klen; i++)
461 tkey[i] = parms->key[i] & parms->mask[i];
463 memcpy(sk_entry->key, tkey, klen);
464 memcpy(sk_entry->mask, parms->mask, klen);
466 /* Write the result table */
467 sr_entry->key_size = parms->key_size;
468 sr_entry->hb_handle = parms->hb_handle;
469 sr_entry->refcnt = 1;
475 * Deletes hash/shadow information if no more references.
477 * Returns 0 - The caller should delete the tcam entry in hardware.
478 * Returns non-zero - The number of references to the entry
481 tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms *parms)
485 struct tf_shadow_tcam_ctxt *ctxt;
486 struct tf_shadow_tcam_db *shadow_db;
487 struct tf_tcam_free_parms *fparms;
488 struct tf_shadow_tcam_shadow_result_entry *sr_entry;
490 if (!parms || !parms->fparms) {
491 TFP_DRV_LOG(ERR, "Invalid parms\n");
495 fparms = parms->fparms;
498 * Initialize the reference count to zero. It will only be changed if
503 shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
504 ctxt = tf_shadow_tcam_ctxt_get(shadow_db, fparms->type);
506 TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
507 tf_tcam_tbl_2_str(fparms->type));
511 idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, fparms->idx);
512 if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt)) {
513 TFP_DRV_LOG(DEBUG, "%s %d >= %d\n",
514 tf_tcam_tbl_2_str(fparms->type),
516 tf_shadow_tcam_sh_num_entries_get(ctxt));
520 sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
521 if (sr_entry->refcnt <= 1) {
522 hb_handle = sr_entry->hb_handle;
523 tf_shadow_tcam_clear_hash_entry(ctxt, hb_handle);
524 tf_shadow_tcam_clear_sh_entry(ctxt, idx);
527 fparms->ref_cnt = sr_entry->refcnt;
534 tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms *parms)
540 struct tf_shadow_tcam_ctxt *ctxt;
541 struct tf_shadow_tcam_db *shadow_db;
542 uint16_t hid16, hb_idx, hid_mask, shtbl_idx, shtbl_key, be_valid;
543 struct tf_tcam_alloc_search_parms *sparms;
544 uint8_t tkey[TF_SHADOW_TCAM_MAX_KEY_SZ];
545 uint32_t be_avail = TF_SHADOW_TCAM_HB_NUM_ELEM;
547 if (!parms || !parms->sparms) {
548 TFP_DRV_LOG(ERR, "tcam search with invalid parms\n");
552 memset(tkey, 0, sizeof(tkey));
553 sparms = parms->sparms;
555 /* Initialize return values to invalid */
557 sparms->search_status = REJECT;
558 parms->hb_handle = 0;
560 /* see if caller wanted the result */
561 rcopy = sparms->result && sparms->result_size;
563 shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
564 ctxt = tf_shadow_tcam_ctxt_get(shadow_db, sparms->type);
566 TFP_DRV_LOG(ERR, "%s Unable to get tcam mgr context\n",
567 tf_tcam_tbl_2_str(sparms->type));
571 hid_mask = ctxt->hash_ctxt.hid_mask;
573 len = sparms->key_size;
575 if (len > TF_SHADOW_TCAM_MAX_KEY_SZ ||
576 !sparms->key || !sparms->mask || !len) {
577 TFP_DRV_LOG(ERR, "%s:%s Invalid parms %d : %p : %p\n",
578 tf_dir_2_str(sparms->dir),
579 tf_tcam_tbl_2_str(sparms->type),
586 /* Combine the key and mask */
587 for (i = 0; i < len; i++)
588 tkey[i] = sparms->key[i] & sparms->mask[i];
591 * Calculate the crc32
592 * Fold it to create a 16b value
593 * Reduce it to fit the table
595 hid32 = tf_shadow_tcam_crc32_calc(tkey, len);
596 hid16 = (uint16_t)(((hid32 >> 16) & 0xffff) ^ (hid32 & 0xffff));
597 hb_idx = hid16 & hid_mask;
599 bucket = ctxt->hash_ctxt.hashtbl[hb_idx];
602 /* empty bucket means a miss and available entry */
603 sparms->search_status = MISS;
604 parms->hb_handle = TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, 0);
609 /* Set the avail to max so we can detect when there is an avail entry */
610 be_avail = TF_SHADOW_TCAM_HB_NUM_ELEM;
611 for (i = 0; i < TF_SHADOW_TCAM_HB_NUM_ELEM; i++) {
612 shtbl_idx = (uint16_t)((bucket >> (i * 16)) & 0xffff);
613 be_valid = TF_SHADOW_TCAM_BE_IS_VALID(shtbl_idx);
615 /* The element is avail, keep going */
619 /* There is a valid entry, compare it */
620 shtbl_key = shtbl_idx & ~TF_SHADOW_TCAM_BE_VALID;
621 if (!tf_shadow_tcam_key_cmp(ctxt,
627 * It matches, increment the ref count if the caller
628 * requested allocation and return the info
631 ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt =
632 ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt + 1;
635 sparms->search_status = HIT;
637 TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, i);
638 sparms->idx = TF_SHADOW_TCAM_SHIDX_TO_IDX(ctxt,
641 ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt;
643 /* copy the result, if caller wanted it. */
645 !tf_shadow_tcam_res_cpy(ctxt,
648 sparms->result_size)) {
650 * Should never get here, possible memory
651 * corruption or something unexpected.
653 TFP_DRV_LOG(ERR, "Error copying result\n");
661 /* No hits, return avail entry if exists */
662 if (be_avail < TF_SHADOW_TCAM_HB_NUM_ELEM) {
664 TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, be_avail);
665 sparms->search_status = MISS;
669 sparms->search_status = REJECT;
676 tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms)
679 struct tf_shadow_tcam_ctxt *ctxt;
680 struct tf_tcam_set_parms *sparms;
681 struct tf_shadow_tcam_db *shadow_db;
682 struct tf_shadow_tcam_shadow_result_entry *sr_entry;
684 if (!parms || !parms->sparms) {
685 TFP_DRV_LOG(ERR, "Null parms\n");
689 sparms = parms->sparms;
690 if (!sparms->result || !sparms->result_size) {
691 TFP_DRV_LOG(ERR, "%s:%s No result to set.\n",
692 tf_dir_2_str(sparms->dir),
693 tf_tcam_tbl_2_str(sparms->type));
697 shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
698 ctxt = tf_shadow_tcam_ctxt_get(shadow_db, sparms->type);
700 /* We aren't tracking this table, so return success */
701 TFP_DRV_LOG(DEBUG, "%s Unable to get tcam mgr context\n",
702 tf_tcam_tbl_2_str(sparms->type));
706 idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, sparms->idx);
707 if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt)) {
708 TFP_DRV_LOG(ERR, "%s:%s Invalid idx(0x%x)\n",
709 tf_dir_2_str(sparms->dir),
710 tf_tcam_tbl_2_str(sparms->type),
715 /* Write the result table, the key/hash has been written already */
716 sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
719 * If the handle is not valid, the bind was never called. We aren't
720 * tracking this entry.
722 if (!TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
725 if (sparms->result_size > TF_SHADOW_TCAM_MAX_RESULT_SZ) {
726 TFP_DRV_LOG(ERR, "%s:%s Result length %d > %d\n",
727 tf_dir_2_str(sparms->dir),
728 tf_tcam_tbl_2_str(sparms->type),
730 TF_SHADOW_TCAM_MAX_RESULT_SZ);
734 memcpy(sr_entry->result, sparms->result, sparms->result_size);
735 sr_entry->result_size = sparms->result_size;
741 tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms *parms)
743 struct tf_shadow_tcam_db *shadow_db;
746 TF_CHECK_PARMS1(parms);
748 shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
750 TFP_DRV_LOG(DEBUG, "Shadow db is NULL cannot be freed\n");
754 for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
755 if (shadow_db->ctxt[i]) {
756 tf_shadow_tcam_ctxt_delete(shadow_db->ctxt[i]);
757 tfp_free(shadow_db->ctxt[i]);
767 * Allocate the TCAM resources for search and allocate
770 int tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms *parms)
775 struct tfp_calloc_parms cparms;
776 struct tf_shadow_tcam_db *shadow_db = NULL;
778 TF_CHECK_PARMS1(parms);
780 /* Build the shadow DB per the request */
782 cparms.size = sizeof(struct tf_shadow_tcam_db);
783 cparms.alignment = 0;
784 rc = tfp_calloc(&cparms);
787 shadow_db = (void *)cparms.mem_va;
789 for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
790 /* If the element didn't request an allocation no need
791 * to create a pool nor verify if we got a reservation.
793 if (!parms->cfg->alloc_cnt[i]) {
794 shadow_db->ctxt[i] = NULL;
799 cparms.size = sizeof(struct tf_shadow_tcam_ctxt);
800 cparms.alignment = 0;
801 rc = tfp_calloc(&cparms);
805 shadow_db->ctxt[i] = cparms.mem_va;
806 base = parms->cfg->base_addr[i];
807 rc = tf_shadow_tcam_ctxt_create(shadow_db->ctxt[i],
808 parms->cfg->alloc_cnt[i],
814 *parms->shadow_db = (void *)shadow_db;
817 "TF SHADOW TCAM - initialized\n");
821 for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
822 if (shadow_db->ctxt[i]) {
823 tf_shadow_tcam_ctxt_delete(shadow_db->ctxt[i]);
824 tfp_free(shadow_db->ctxt[i]);