+/**
+ * Simple routine that decides what table types can be searchable.
+ *
+ */
+static int tf_shadow_tbl_is_searchable(enum tf_tbl_type type)
+{
+ int rc = 0;
+
+ switch (type) {
+ case TF_TBL_TYPE_ACT_ENCAP_8B:
+ case TF_TBL_TYPE_ACT_ENCAP_16B:
+ case TF_TBL_TYPE_ACT_ENCAP_32B:
+ case TF_TBL_TYPE_ACT_ENCAP_64B:
+ case TF_TBL_TYPE_ACT_SP_SMAC:
+ case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
+ case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
+ case TF_TBL_TYPE_ACT_MODIFY_IPV4:
+ case TF_TBL_TYPE_ACT_MODIFY_SPORT:
+ case TF_TBL_TYPE_ACT_MODIFY_DPORT:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ };
+
+ return rc;
+}
+
+/**
+ * Returns the number of entries in the contexts shadow table.
+ */
+static inline uint16_t
+tf_shadow_tbl_sh_num_entries_get(struct tf_shadow_tbl_ctxt *ctxt)
+{
+ return ctxt->shadow_ctxt.num_entries;
+}
+
+/**
+ * Compare the give key with the key in the shadow table.
+ *
+ * Returns 0 if the keys match
+ */
+static int
+tf_shadow_tbl_key_cmp(struct tf_shadow_tbl_ctxt *ctxt,
+ uint8_t *key,
+ uint16_t sh_idx,
+ uint16_t size)
+{
+ if (size != ctxt->shadow_ctxt.sh_res_tbl[sh_idx].key_size ||
+ sh_idx >= tf_shadow_tbl_sh_num_entries_get(ctxt) || !key)
+ return -1;
+
+ return memcmp(key, ctxt->shadow_ctxt.sh_key_tbl[sh_idx].key, size);
+}
+
+/**
+ * Free the memory associated with the context.
+ */
+static void
+tf_shadow_tbl_ctxt_delete(struct tf_shadow_tbl_ctxt *ctxt)
+{
+ if (!ctxt)
+ return;
+
+ tfp_free(ctxt->hash_ctxt.hashtbl);
+ tfp_free(ctxt->shadow_ctxt.sh_key_tbl);
+ tfp_free(ctxt->shadow_ctxt.sh_res_tbl);
+}
+
+/**
+ * The TF Shadow TBL context is per TBL and holds all information relating to
+ * managing the shadow and search capability. This routine allocated data that
+ * needs to be deallocated by the tf_shadow_tbl_ctxt_delete prior when deleting
+ * the shadow db.
+ */
+static int
+tf_shadow_tbl_ctxt_create(struct tf_shadow_tbl_ctxt *ctxt,
+ uint16_t num_entries,
+ uint16_t base_addr)
+{
+ struct tfp_calloc_parms cparms;
+ uint16_t hash_size = 1;
+ uint16_t hash_mask;
+ int rc;
+
+ /* Hash table is a power of two that holds the number of entries */
+ if (num_entries > TF_SHADOW_ENTRIES_MAX) {
+ TFP_DRV_LOG(ERR, "Too many entries for shadow %d > %d\n",
+ num_entries,
+ TF_SHADOW_ENTRIES_MAX);
+ return -ENOMEM;
+ }
+
+ while (hash_size < num_entries)
+ hash_size = hash_size << 1;
+
+ hash_mask = hash_size - 1;
+
+ /* Allocate the hash table */
+ cparms.nitems = hash_size;
+ cparms.size = sizeof(uint64_t);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->hash_ctxt.hashtbl = cparms.mem_va;
+ ctxt->hash_ctxt.hid_mask = hash_mask;
+ ctxt->hash_ctxt.hash_entries = hash_size;
+
+ /* allocate the shadow tables */
+ /* allocate the shadow key table */
+ cparms.nitems = num_entries;
+ cparms.size = sizeof(struct tf_shadow_tbl_shadow_key_entry);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->shadow_ctxt.sh_key_tbl = cparms.mem_va;
+
+ /* allocate the shadow result table */
+ cparms.nitems = num_entries;
+ cparms.size = sizeof(struct tf_shadow_tbl_shadow_result_entry);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->shadow_ctxt.sh_res_tbl = cparms.mem_va;
+
+ ctxt->shadow_ctxt.num_entries = num_entries;
+ ctxt->shadow_ctxt.base_addr = base_addr;
+
+ return 0;
+error:
+ tf_shadow_tbl_ctxt_delete(ctxt);
+
+ return -ENOMEM;
+}
+
+/**
+ * Get a shadow table context given the db and the table type
+ */
+static struct tf_shadow_tbl_ctxt *
+tf_shadow_tbl_ctxt_get(struct tf_shadow_tbl_db *shadow_db,
+ enum tf_tbl_type type)
+{
+ if (type >= TF_TBL_TYPE_MAX ||
+ !shadow_db ||
+ !shadow_db->ctxt[type])
+ return NULL;
+
+ return shadow_db->ctxt[type];
+}
+
+/**
+ * Sets the hash entry into the table given the table context, hash bucket
+ * handle, and shadow index.
+ */
+static inline int
+tf_shadow_tbl_set_hash_entry(struct tf_shadow_tbl_ctxt *ctxt,
+ uint32_t hb_handle,
+ uint16_t sh_idx)
+{
+ uint16_t hid = TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hb_handle);
+ uint16_t be = TF_SHADOW_HB_HANDLE_BE_GET(hb_handle);
+ uint64_t entry = sh_idx | TF_SHADOW_BE_VALID;
+
+ if (hid >= ctxt->hash_ctxt.hash_entries)
+ return -EINVAL;
+
+ ctxt->hash_ctxt.hashtbl[hid] |= entry << (be * 16);
+ return 0;
+}
+
+/**
+ * Clears the hash entry given the TBL context and hash bucket handle.
+ */
+static inline void
+tf_shadow_tbl_clear_hash_entry(struct tf_shadow_tbl_ctxt *ctxt,
+ uint32_t hb_handle)
+{
+ uint16_t hid, be;
+ uint64_t *bucket;
+
+ if (!TF_SHADOW_HB_HANDLE_IS_VALID(hb_handle))
+ return;
+
+ hid = TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hb_handle);
+ be = TF_SHADOW_HB_HANDLE_BE_GET(hb_handle);
+ bucket = &ctxt->hash_ctxt.hashtbl[hid];
+
+ switch (be) {
+ case 0:
+ *bucket = TF_SHADOW_BE0_MASK_CLEAR(*bucket);
+ break;
+ case 1:
+ *bucket = TF_SHADOW_BE1_MASK_CLEAR(*bucket);
+ break;
+ case 2:
+ *bucket = TF_SHADOW_BE2_MASK_CLEAR(*bucket);
+ break;
+ case 3:
+ *bucket = TF_SHADOW_BE2_MASK_CLEAR(*bucket);
+ break;
+ default:
+ /*
+ * Since the BE_GET masks non-inclusive bits, this will not
+ * happen.
+ */
+ break;
+ }
+}
+
+/**
+ * Clears the shadow key and result entries given the table context and
+ * shadow index.
+ */
+static void
+tf_shadow_tbl_clear_sh_entry(struct tf_shadow_tbl_ctxt *ctxt,
+ uint16_t sh_idx)
+{
+ struct tf_shadow_tbl_shadow_key_entry *sk_entry;
+ struct tf_shadow_tbl_shadow_result_entry *sr_entry;
+
+ if (sh_idx >= tf_shadow_tbl_sh_num_entries_get(ctxt))
+ return;
+
+ sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[sh_idx];
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[sh_idx];
+
+ /*
+ * memset key/result to zero for now, possibly leave the data alone
+ * in the future and rely on the valid bit in the hash table.
+ */
+ memset(sk_entry, 0, sizeof(struct tf_shadow_tbl_shadow_key_entry));
+ memset(sr_entry, 0, sizeof(struct tf_shadow_tbl_shadow_result_entry));
+}
+
+/**
+ * Binds the allocated tbl index with the hash and shadow tables.
+ * The entry will be incomplete until the set has happened with the result
+ * data.
+ */