1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
9 #include "tf_shadow_tcam.h"
13 * The implementation includes 3 tables per tcam table type.
15 * - sized so that a minimum of 4 slots per shadow entry are available to
16 * minimize the likelihood of collisions.
18 * - sized to the number of entries requested and is directly indexed
19 * - the index is zero based and is the tcam index - the base address
20 * - the key and mask are stored in the key table.
21 * - The stored key is the AND of the key/mask in order to eliminate the need
22 * to compare both the key and mask.
23 * - shadow result table
24 * - the result table is stored separately since it only needs to be accessed
25 * when the key matches.
26 * - the result has a back pointer to the hash table via the hb handle. The
27 * hb handle is a 32 bit represention of the hash with a valid bit, bucket
28 * element index, and the hash index. It is necessary to store the hb handle
29 * with the result since subsequent removes only provide the tcam index.
31 * - Max entries is limited in the current implementation since bit 15 is the
32 * valid bit in the hash table.
33 * - A 16bit hash is calculated and masked based on the number of entries
34 * - 64b wide bucket is used and broken into 4x16bit elements.
35 * This decision is based on quicker bucket scanning to determine if any
36 * elements are in use.
37 * - bit 15 of each bucket element is the valid, this is done to prevent having
38 * to read the larger key/result data for determining VALID. It also aids
39 * in the more efficient scanning of the bucket for slot usage.
43 * The maximum number of shadow entries supported. The value also doubles as
44 * the maximum number of hash buckets. There are only 15 bits of data per
45 * bucket to point to the shadow tables.
47 #define TF_SHADOW_TCAM_ENTRIES_MAX (1 << 15)
49 /* The number of elements(BE) per hash bucket (HB) */
50 #define TF_SHADOW_TCAM_HB_NUM_ELEM (4)
51 #define TF_SHADOW_TCAM_BE_VALID (1 << 15)
52 #define TF_SHADOW_TCAM_BE_IS_VALID(be) (((be) & TF_SHADOW_TCAM_BE_VALID) != 0)
55 * The hash bucket handle is 32b
56 * - bit 31, the Valid bit
57 * - bit 29-30, the element
58 * - bits 0-15, the hash idx (is masked based on the allocated size)
60 #define TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(hndl) (((hndl) & (1 << 31)) != 0)
61 #define TF_SHADOW_TCAM_HB_HANDLE_CREATE(idx, be) ((1 << 31) | \
64 #define TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hdl) (((hdl) >> 29) & \
65 (TF_SHADOW_TCAM_HB_NUM_ELEM - 1))
67 #define TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hdl)((hdl) & \
68 (ctxt)->hash_ctxt.hid_mask)
71 * The idx provided by the caller is within a region, so currently the base is
72 * either added or subtracted from the idx to ensure it can be used as a
76 /* Convert the tcam index to a shadow index */
77 #define TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, idx) ((idx) - \
78 (ctxt)->shadow_ctxt.base_addr)
80 /* Convert the shadow index to a tcam index */
81 #define TF_SHADOW_TCAM_SHIDX_TO_IDX(ctxt, idx) ((idx) + \
82 (ctxt)->shadow_ctxt.base_addr)
84 /* Simple helper masks for clearing en element from the bucket */
85 #define TF_SHADOW_TCAM_BE0_MASK_CLEAR(hb) ((hb) & 0xffffffffffff0000ull)
86 #define TF_SHADOW_TCAM_BE1_MASK_CLEAR(hb) ((hb) & 0xffffffff0000ffffull)
87 #define TF_SHADOW_TCAM_BE2_MASK_CLEAR(hb) ((hb) & 0xffff0000ffffffffull)
88 #define TF_SHADOW_TCAM_BE3_MASK_CLEAR(hb) ((hb) & 0x0000ffffffffffffull)
91 * This should be coming from external, but for now it is assumed that no key
92 * is greater than 1K bits and no result is bigger than 128 bits. This makes
93 * allocation of the hash table easier without having to allocate on the fly.
95 #define TF_SHADOW_TCAM_MAX_KEY_SZ 128
96 #define TF_SHADOW_TCAM_MAX_RESULT_SZ 16
99 * Local only defines for the internal data.
103 * tf_shadow_tcam_shadow_key_entry is the key/mask entry of the key table.
104 * The key stored in the table is the masked version of the key. This is done
105 * to eliminate the need of comparing both the key and mask.
107 struct tf_shadow_tcam_shadow_key_entry {
108 uint8_t key[TF_SHADOW_TCAM_MAX_KEY_SZ];
109 uint8_t mask[TF_SHADOW_TCAM_MAX_KEY_SZ];
113 * tf_shadow_tcam_shadow_result_entry is the result table entry.
114 * The result table writes are broken into two phases:
115 * - The search phase, which stores the hb_handle and key size and
116 * - The set phase, which writes the result, refcnt, and result size
118 struct tf_shadow_tcam_shadow_result_entry {
119 uint8_t result[TF_SHADOW_TCAM_MAX_RESULT_SZ];
120 uint16_t result_size;
127 * tf_shadow_tcam_shadow_ctxt holds all information for accessing the key and
130 struct tf_shadow_tcam_shadow_ctxt {
131 struct tf_shadow_tcam_shadow_key_entry *sh_key_tbl;
132 struct tf_shadow_tcam_shadow_result_entry *sh_res_tbl;
134 uint16_t num_entries;
139 * tf_shadow_tcam_hash_ctxt holds all information related to accessing the hash
142 struct tf_shadow_tcam_hash_ctxt {
145 uint16_t hash_entries;
149 * tf_shadow_tcam_ctxt holds the hash and shadow tables for the current shadow
150 * tcam db. This structure is per tcam table type as each tcam table has it's
151 * own shadow and hash table.
153 struct tf_shadow_tcam_ctxt {
154 struct tf_shadow_tcam_shadow_ctxt shadow_ctxt;
155 struct tf_shadow_tcam_hash_ctxt hash_ctxt;
159 * tf_shadow_tcam_db is the allocated db structure returned as an opaque
160 * void * pointer to the caller during create db. It holds the pointers for
161 * each tcam associated with the db.
163 struct tf_shadow_tcam_db {
164 /* Each context holds the shadow and hash table information */
165 struct tf_shadow_tcam_ctxt *ctxt[TF_TCAM_TBL_TYPE_MAX];
169 * Returns the number of entries in the contexts shadow table.
171 static inline uint16_t
172 tf_shadow_tcam_sh_num_entries_get(struct tf_shadow_tcam_ctxt *ctxt)
174 return ctxt->shadow_ctxt.num_entries;
178 * Compare the give key with the key in the shadow table.
180 * Returns 0 if the keys match
183 tf_shadow_tcam_key_cmp(struct tf_shadow_tcam_ctxt *ctxt,
189 if (size != ctxt->shadow_ctxt.sh_res_tbl[sh_idx].key_size ||
190 sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) || !key || !mask)
193 return memcmp(key, ctxt->shadow_ctxt.sh_key_tbl[sh_idx].key, size);
197 * Copies the shadow result to the result.
199 * Returns 0 on failure
202 tf_shadow_tcam_res_cpy(struct tf_shadow_tcam_ctxt *ctxt,
207 if (sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) || !result)
210 if (ctxt->shadow_ctxt.sh_res_tbl[sh_idx].result_size != size)
213 return memcpy(result,
214 ctxt->shadow_ctxt.sh_res_tbl[sh_idx].result,
219 * Using a software based CRC function for now, but will look into using hw
220 * assisted in the future.
223 tf_shadow_tcam_crc32_calc(uint8_t *key, uint32_t len)
225 return tf_hash_calc_crc32(key, len);
229 * Free the memory associated with the context.
232 tf_shadow_tcam_ctxt_delete(struct tf_shadow_tcam_ctxt *ctxt)
237 tfp_free(ctxt->hash_ctxt.hashtbl);
238 tfp_free(ctxt->shadow_ctxt.sh_key_tbl);
239 tfp_free(ctxt->shadow_ctxt.sh_res_tbl);
243 * The TF Shadow TCAM context is per TCAM and holds all information relating to
244 * managing the shadow and search capability. This routine allocated data that
245 * needs to be deallocated by the tf_shadow_tcam_ctxt_delete prior when deleting
249 tf_shadow_tcam_ctxt_create(struct tf_shadow_tcam_ctxt *ctxt,
250 uint16_t num_entries,
253 struct tfp_calloc_parms cparms;
254 uint16_t hash_size = 1;
258 /* Hash table is a power of two that holds the number of entries */
259 if (num_entries > TF_SHADOW_TCAM_ENTRIES_MAX) {
260 TFP_DRV_LOG(ERR, "Too many entries for shadow %d > %d\n",
262 TF_SHADOW_TCAM_ENTRIES_MAX);
266 while (hash_size < num_entries)
267 hash_size = hash_size << 1;
269 hash_mask = hash_size - 1;
271 /* Allocate the hash table */
272 cparms.nitems = hash_size;
273 cparms.size = sizeof(uint64_t);
274 cparms.alignment = 0;
275 rc = tfp_calloc(&cparms);
278 ctxt->hash_ctxt.hashtbl = cparms.mem_va;
279 ctxt->hash_ctxt.hid_mask = hash_mask;
280 ctxt->hash_ctxt.hash_entries = hash_size;
282 /* allocate the shadow tables */
283 /* allocate the shadow key table */
284 cparms.nitems = num_entries;
285 cparms.size = sizeof(struct tf_shadow_tcam_shadow_key_entry);
286 cparms.alignment = 0;
287 rc = tfp_calloc(&cparms);
290 ctxt->shadow_ctxt.sh_key_tbl = cparms.mem_va;
292 /* allocate the shadow result table */
293 cparms.nitems = num_entries;
294 cparms.size = sizeof(struct tf_shadow_tcam_shadow_result_entry);
295 cparms.alignment = 0;
296 rc = tfp_calloc(&cparms);
299 ctxt->shadow_ctxt.sh_res_tbl = cparms.mem_va;
301 ctxt->shadow_ctxt.num_entries = num_entries;
302 ctxt->shadow_ctxt.base_addr = base_addr;
306 tf_shadow_tcam_ctxt_delete(ctxt);
312 * Get a shadow TCAM context given the db and the TCAM type
314 static struct tf_shadow_tcam_ctxt *
315 tf_shadow_tcam_ctxt_get(struct tf_shadow_tcam_db *shadow_db,
316 enum tf_tcam_tbl_type type)
318 if (type >= TF_TCAM_TBL_TYPE_MAX ||
320 !shadow_db->ctxt[type])
323 return shadow_db->ctxt[type];
327 * Sets the hash entry into the table given the TCAM context, hash bucket
328 * handle, and shadow index.
331 tf_shadow_tcam_set_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
335 uint16_t hid = TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hb_handle);
336 uint16_t be = TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hb_handle);
337 uint64_t entry = sh_idx | TF_SHADOW_TCAM_BE_VALID;
339 if (hid >= ctxt->hash_ctxt.hash_entries)
342 ctxt->hash_ctxt.hashtbl[hid] |= entry << (be * 16);
347 * Clears the hash entry given the TCAM context and hash bucket handle.
350 tf_shadow_tcam_clear_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
356 if (!TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(hb_handle))
359 hid = TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hb_handle);
360 be = TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hb_handle);
361 bucket = &ctxt->hash_ctxt.hashtbl[hid];
365 *bucket = TF_SHADOW_TCAM_BE0_MASK_CLEAR(*bucket);
368 *bucket = TF_SHADOW_TCAM_BE1_MASK_CLEAR(*bucket);
371 *bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
374 *bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
378 * Since the BE_GET masks non-inclusive bits, this will not
386 * Clears the shadow key and result entries given the TCAM context and
390 tf_shadow_tcam_clear_sh_entry(struct tf_shadow_tcam_ctxt *ctxt,
393 struct tf_shadow_tcam_shadow_key_entry *sk_entry;
394 struct tf_shadow_tcam_shadow_result_entry *sr_entry;
396 if (sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt))
399 sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[sh_idx];
400 sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[sh_idx];
403 * memset key/result to zero for now, possibly leave the data alone
404 * in the future and rely on the valid bit in the hash table.
406 memset(sk_entry, 0, sizeof(struct tf_shadow_tcam_shadow_key_entry));
407 memset(sr_entry, 0, sizeof(struct tf_shadow_tcam_shadow_result_entry));
411 * Binds the allocated tcam index with the hash and shadow tables.
412 * The entry will be incomplete until the set has happened with the result
416 tf_shadow_tcam_bind_index(struct tf_shadow_tcam_bind_index_parms *parms)
421 struct tf_shadow_tcam_ctxt *ctxt;
422 struct tf_shadow_tcam_db *shadow_db;
423 struct tf_shadow_tcam_shadow_key_entry *sk_entry;
424 struct tf_shadow_tcam_shadow_result_entry *sr_entry;
425 uint8_t tkey[TF_SHADOW_TCAM_MAX_KEY_SZ];
427 if (!parms || !TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(parms->hb_handle) ||
428 !parms->key || !parms->mask) {
429 TFP_DRV_LOG(ERR, "Invalid parms\n");
433 shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
434 ctxt = tf_shadow_tcam_ctxt_get(shadow_db, parms->type);
436 TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
437 tf_tcam_tbl_2_str(parms->type));
441 memset(tkey, 0, sizeof(tkey));
442 idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, parms->idx);
443 klen = parms->key_size;
444 if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) ||
445 klen > TF_SHADOW_TCAM_MAX_KEY_SZ) {
446 TFP_DRV_LOG(ERR, "%s:%s Invalid len (%d) > %d || oob idx %d\n",
447 tf_dir_2_str(parms->dir),
448 tf_tcam_tbl_2_str(parms->type),
450 TF_SHADOW_TCAM_MAX_KEY_SZ, idx);
455 rc = tf_shadow_tcam_set_hash_entry(ctxt, parms->hb_handle, idx);
459 sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[idx];
460 sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
463 * Write the masked key to the table for more efficient comparisons
466 for (i = 0; i < klen; i++)
467 tkey[i] = parms->key[i] & parms->mask[i];
469 memcpy(sk_entry->key, tkey, klen);
470 memcpy(sk_entry->mask, parms->mask, klen);
472 /* Write the result table */
473 sr_entry->key_size = parms->key_size;
474 sr_entry->hb_handle = parms->hb_handle;
475 sr_entry->refcnt = 1;
481 * Deletes hash/shadow information if no more references.
483 * Returns 0 - The caller should delete the tcam entry in hardware.
484 * Returns non-zero - The number of references to the entry
487 tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms *parms)
491 struct tf_shadow_tcam_ctxt *ctxt;
492 struct tf_shadow_tcam_db *shadow_db;
493 struct tf_tcam_free_parms *fparms;
494 struct tf_shadow_tcam_shadow_result_entry *sr_entry;
496 if (!parms || !parms->fparms) {
497 TFP_DRV_LOG(ERR, "Invalid parms\n");
501 fparms = parms->fparms;
504 * Initialize the reference count to zero. It will only be changed if
509 shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
510 ctxt = tf_shadow_tcam_ctxt_get(shadow_db, fparms->type);
512 TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
513 tf_tcam_tbl_2_str(fparms->type));
517 idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, fparms->idx);
518 if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt)) {
519 TFP_DRV_LOG(DEBUG, "%s %d >= %d\n",
520 tf_tcam_tbl_2_str(fparms->type),
522 tf_shadow_tcam_sh_num_entries_get(ctxt));
526 sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
527 if (sr_entry->refcnt <= 1) {
528 hb_handle = sr_entry->hb_handle;
529 tf_shadow_tcam_clear_hash_entry(ctxt, hb_handle);
530 tf_shadow_tcam_clear_sh_entry(ctxt, idx);
533 fparms->ref_cnt = sr_entry->refcnt;
540 tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms *parms)
546 struct tf_shadow_tcam_ctxt *ctxt;
547 struct tf_shadow_tcam_db *shadow_db;
548 uint16_t hid16, hb_idx, hid_mask, shtbl_idx, shtbl_key, be_valid;
549 struct tf_tcam_alloc_search_parms *sparms;
550 uint8_t tkey[TF_SHADOW_TCAM_MAX_KEY_SZ];
551 uint32_t be_avail = TF_SHADOW_TCAM_HB_NUM_ELEM;
553 if (!parms || !parms->sparms) {
554 TFP_DRV_LOG(ERR, "tcam search with invalid parms\n");
558 memset(tkey, 0, sizeof(tkey));
559 sparms = parms->sparms;
561 /* Initialize return values to invalid */
563 sparms->search_status = REJECT;
564 parms->hb_handle = 0;
566 /* see if caller wanted the result */
567 rcopy = sparms->result && sparms->result_size;
569 shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
570 ctxt = tf_shadow_tcam_ctxt_get(shadow_db, sparms->type);
572 TFP_DRV_LOG(ERR, "%s Unable to get tcam mgr context\n",
573 tf_tcam_tbl_2_str(sparms->type));
577 hid_mask = ctxt->hash_ctxt.hid_mask;
579 len = sparms->key_size;
581 if (len > TF_SHADOW_TCAM_MAX_KEY_SZ ||
582 !sparms->key || !sparms->mask || !len) {
583 TFP_DRV_LOG(ERR, "%s:%s Invalid parms %d : %p : %p\n",
584 tf_dir_2_str(sparms->dir),
585 tf_tcam_tbl_2_str(sparms->type),
592 /* Combine the key and mask */
593 for (i = 0; i < len; i++)
594 tkey[i] = sparms->key[i] & sparms->mask[i];
597 * Calculate the crc32
598 * Fold it to create a 16b value
599 * Reduce it to fit the table
601 hid32 = tf_shadow_tcam_crc32_calc(tkey, len);
602 hid16 = (uint16_t)(((hid32 >> 16) & 0xffff) ^ (hid32 & 0xffff));
603 hb_idx = hid16 & hid_mask;
605 bucket = ctxt->hash_ctxt.hashtbl[hb_idx];
608 /* empty bucket means a miss and available entry */
609 sparms->search_status = MISS;
610 parms->hb_handle = TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, 0);
615 /* Set the avail to max so we can detect when there is an avail entry */
616 be_avail = TF_SHADOW_TCAM_HB_NUM_ELEM;
617 for (i = 0; i < TF_SHADOW_TCAM_HB_NUM_ELEM; i++) {
618 shtbl_idx = (uint16_t)((bucket >> (i * 16)) & 0xffff);
619 be_valid = TF_SHADOW_TCAM_BE_IS_VALID(shtbl_idx);
621 /* The element is avail, keep going */
625 /* There is a valid entry, compare it */
626 shtbl_key = shtbl_idx & ~TF_SHADOW_TCAM_BE_VALID;
627 if (!tf_shadow_tcam_key_cmp(ctxt,
633 * It matches, increment the ref count if the caller
634 * requested allocation and return the info
637 ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt =
638 ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt + 1;
641 sparms->search_status = HIT;
643 TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, i);
644 sparms->idx = TF_SHADOW_TCAM_SHIDX_TO_IDX(ctxt,
647 ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt;
649 /* copy the result, if caller wanted it. */
651 !tf_shadow_tcam_res_cpy(ctxt,
654 sparms->result_size)) {
656 * Should never get here, possible memory
657 * corruption or something unexpected.
659 TFP_DRV_LOG(ERR, "Error copying result\n");
667 /* No hits, return avail entry if exists */
668 if (be_avail < TF_SHADOW_TCAM_HB_NUM_ELEM) {
670 TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, be_avail);
671 sparms->search_status = MISS;
675 sparms->search_status = REJECT;
682 tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms)
685 struct tf_shadow_tcam_ctxt *ctxt;
686 struct tf_tcam_set_parms *sparms;
687 struct tf_shadow_tcam_db *shadow_db;
688 struct tf_shadow_tcam_shadow_result_entry *sr_entry;
690 if (!parms || !parms->sparms) {
691 TFP_DRV_LOG(ERR, "Null parms\n");
695 sparms = parms->sparms;
696 if (!sparms->result || !sparms->result_size) {
697 TFP_DRV_LOG(ERR, "%s:%s No result to set.\n",
698 tf_dir_2_str(sparms->dir),
699 tf_tcam_tbl_2_str(sparms->type));
703 shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
704 ctxt = tf_shadow_tcam_ctxt_get(shadow_db, sparms->type);
706 /* We aren't tracking this table, so return success */
707 TFP_DRV_LOG(DEBUG, "%s Unable to get tcam mgr context\n",
708 tf_tcam_tbl_2_str(sparms->type));
712 idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, sparms->idx);
713 if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt)) {
714 TFP_DRV_LOG(ERR, "%s:%s Invalid idx(0x%x)\n",
715 tf_dir_2_str(sparms->dir),
716 tf_tcam_tbl_2_str(sparms->type),
721 /* Write the result table, the key/hash has been written already */
722 sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
725 * If the handle is not valid, the bind was never called. We aren't
726 * tracking this entry.
728 if (!TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
731 if (sparms->result_size > TF_SHADOW_TCAM_MAX_RESULT_SZ) {
732 TFP_DRV_LOG(ERR, "%s:%s Result length %d > %d\n",
733 tf_dir_2_str(sparms->dir),
734 tf_tcam_tbl_2_str(sparms->type),
736 TF_SHADOW_TCAM_MAX_RESULT_SZ);
740 memcpy(sr_entry->result, sparms->result, sparms->result_size);
741 sr_entry->result_size = sparms->result_size;
747 tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms *parms)
749 struct tf_shadow_tcam_db *shadow_db;
752 TF_CHECK_PARMS1(parms);
754 shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
756 TFP_DRV_LOG(DEBUG, "Shadow db is NULL cannot be freed\n");
760 for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
761 if (shadow_db->ctxt[i]) {
762 tf_shadow_tcam_ctxt_delete(shadow_db->ctxt[i]);
763 tfp_free(shadow_db->ctxt[i]);
773 * Allocate the TCAM resources for search and allocate
776 int tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms *parms)
781 struct tfp_calloc_parms cparms;
782 struct tf_shadow_tcam_db *shadow_db = NULL;
784 TF_CHECK_PARMS1(parms);
786 /* Build the shadow DB per the request */
788 cparms.size = sizeof(struct tf_shadow_tcam_db);
789 cparms.alignment = 0;
790 rc = tfp_calloc(&cparms);
793 shadow_db = (void *)cparms.mem_va;
795 for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
796 /* If the element didn't request an allocation no need
797 * to create a pool nor verify if we got a reservation.
799 if (!parms->cfg->alloc_cnt[i]) {
800 shadow_db->ctxt[i] = NULL;
805 cparms.size = sizeof(struct tf_shadow_tcam_ctxt);
806 cparms.alignment = 0;
807 rc = tfp_calloc(&cparms);
811 shadow_db->ctxt[i] = cparms.mem_va;
812 base = parms->cfg->base_addr[i];
813 rc = tf_shadow_tcam_ctxt_create(shadow_db->ctxt[i],
814 parms->cfg->alloc_cnt[i],
820 *parms->shadow_db = (void *)shadow_db;
823 "TF SHADOW TCAM - initialized\n");
827 for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
828 if (shadow_db->ctxt[i]) {
829 tf_shadow_tcam_ctxt_delete(shadow_db->ctxt[i]);
830 tfp_free(shadow_db->ctxt[i]);