*num_slices_per_row = CFA_P4_WC_TCAM_SLICES_PER_ROW;
if (key_sz > *num_slices_per_row * CFA_P4_WC_TCAM_SLICE_SIZE)
return -ENOTSUP;
+
+ *num_slices_per_row = 1;
} else { /* for other type of tcam */
*num_slices_per_row = 1;
}
#include "tf_rm.h"
struct tf_rm_element_cfg tf_ident_p4[TF_IDENT_TYPE_MAX] = {
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_PROF_FUNC },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_WC_TCAM_PROF_ID },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_EM_PROF_ID },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_PROF_FUNC },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_WC_TCAM_PROF_ID },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_EM_PROF_ID },
/* CFA_RESOURCE_TYPE_P4_L2_FUNC */
{ TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID }
};
struct tf_rm_element_cfg tf_tcam_p4[TF_TCAM_TBL_TYPE_MAX] = {
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_PROF_TCAM },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_WC_TCAM },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SP_TCAM },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_PROF_TCAM },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_WC_TCAM },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_TCAM },
/* CFA_RESOURCE_TYPE_P4_CT_RULE_TCAM */
{ TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
/* CFA_RESOURCE_TYPE_P4_VEB_TCAM */
};
struct tf_rm_element_cfg tf_tbl_p4[TF_TBL_TYPE_MAX] = {
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_FULL_ACTION },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_MCG },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_ENCAP_8B },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_ENCAP_16B },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_FULL_ACTION },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MCG },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_8B },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_16B },
/* CFA_RESOURCE_TYPE_P4_ENCAP_32B */
{ TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_ENCAP_64B },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SP_MAC },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV4 },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV6 },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_COUNTER_64B },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_SPORT },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_DPORT },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_S_IPV4 },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_D_IPV4 },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_S_IPV6 },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_D_IPV6 },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_METER_PROF },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_METER },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_MIRROR },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_64B },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV4 },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV6 },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_COUNTER_64B },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_SPORT },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_DPORT },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_S_IPV4 },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_D_IPV4 },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_S_IPV6 },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_D_IPV6 },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER_PROF },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MIRROR },
/* CFA_RESOURCE_TYPE_P4_UPAR */
{ TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
/* CFA_RESOURCE_TYPE_P4_EPOC */
struct tf_rm_element_cfg tf_em_ext_p4[TF_EM_TBL_TYPE_MAX] = {
/* CFA_RESOURCE_TYPE_P4_EM_REC */
{ TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_TBL_SCOPE },
+ { TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_TBL_SCOPE },
};
struct tf_rm_element_cfg tf_em_int_p4[TF_EM_TBL_TYPE_MAX] = {
*/
static void *em_db[TF_DIR_MAX];
+#define TF_EM_DB_EM_REC 0
+
/**
* Init flag, set on bind and cleared on unbind
*/
static uint8_t init;
+
+/**
+ * EM Pool
+ */
+static struct stack em_pool[TF_DIR_MAX];
+
/**
* Create EM Tbl pool of memory indexes.
*
- * [in] session
- * Pointer to session
* [in] dir
* direction
* [in] num_entries
* number of entries to write
+ * [in] start
+ * starting offset
*
* Return:
* 0 - Success, entry allocated - no search support
* - Failure, entry not allocated, out of resources
*/
static int
-tf_create_em_pool(struct tf_session *session,
- enum tf_dir dir,
- uint32_t num_entries)
+tf_create_em_pool(enum tf_dir dir,
+ uint32_t num_entries,
+ uint32_t start)
{
struct tfp_calloc_parms parms;
uint32_t i, j;
int rc = 0;
- struct stack *pool = &session->em_pool[dir];
+ struct stack *pool = &em_pool[dir];
- parms.nitems = num_entries;
+ /* Assumes that num_entries has been checked before we get here */
+ parms.nitems = num_entries / TF_SESSION_EM_ENTRY_SIZE;
parms.size = sizeof(uint32_t);
parms.alignment = 0;
rc = tfp_calloc(&parms);
if (rc) {
- TFP_DRV_LOG(ERR, "EM pool allocation failure %s\n",
+ TFP_DRV_LOG(ERR,
+ "%s, EM pool allocation failure %s\n",
+ tf_dir_2_str(dir),
strerror(-rc));
return rc;
}
/* Create empty stack
*/
- rc = stack_init(num_entries, (uint32_t *)parms.mem_va, pool);
+ rc = stack_init(num_entries / TF_SESSION_EM_ENTRY_SIZE,
+ (uint32_t *)parms.mem_va,
+ pool);
if (rc) {
- TFP_DRV_LOG(ERR, "EM pool stack init failure %s\n",
+ TFP_DRV_LOG(ERR,
+ "%s, EM pool stack init failure %s\n",
+ tf_dir_2_str(dir),
strerror(-rc));
goto cleanup;
}
/* Fill pool with indexes
*/
- j = num_entries - 1;
+ j = start + num_entries - TF_SESSION_EM_ENTRY_SIZE;
- for (i = 0; i < num_entries; i++) {
+ for (i = 0; i < (num_entries / TF_SESSION_EM_ENTRY_SIZE); i++) {
rc = stack_push(pool, j);
if (rc) {
- TFP_DRV_LOG(ERR, "EM pool stack push failure %s\n",
+ TFP_DRV_LOG(ERR,
+ "%s, EM pool stack push failure %s\n",
+ tf_dir_2_str(dir),
strerror(-rc));
goto cleanup;
}
- j--;
+
+ j -= TF_SESSION_EM_ENTRY_SIZE;
}
if (!stack_is_full(pool)) {
rc = -EINVAL;
- TFP_DRV_LOG(ERR, "EM pool stack failure %s\n",
+ TFP_DRV_LOG(ERR,
+ "%s, EM pool stack failure %s\n",
+ tf_dir_2_str(dir),
strerror(-rc));
goto cleanup;
}
/**
* Create EM Tbl pool of memory indexes.
*
- * [in] session
- * Pointer to session
* [in] dir
* direction
*
* Return:
*/
static void
-tf_free_em_pool(struct tf_session *session,
- enum tf_dir dir)
+tf_free_em_pool(enum tf_dir dir)
{
- struct stack *pool = &session->em_pool[dir];
+ struct stack *pool = &em_pool[dir];
uint32_t *ptr;
ptr = stack_items(pool);
uint16_t rptr_index = 0;
uint8_t rptr_entry = 0;
uint8_t num_of_entries = 0;
- struct tf_session *session =
- (struct tf_session *)(tfp->session->core_data);
- struct stack *pool = &session->em_pool[parms->dir];
+ struct stack *pool = &em_pool[parms->dir];
uint32_t index;
rc = stack_pop(pool, &index);
if (rc) {
- PMD_DRV_LOG
- (ERR,
- "dir:%d, EM entry index allocation failed\n",
- parms->dir);
+ PMD_DRV_LOG(ERR,
+ "%s, EM entry index allocation failed\n",
+ tf_dir_2_str(parms->dir));
return rc;
}
- rptr_index = index * TF_SESSION_EM_ENTRY_SIZE;
+ rptr_index = index;
rc = tf_msg_insert_em_internal_entry(tfp,
parms,
&rptr_index,
PMD_DRV_LOG
(ERR,
- "Internal entry @ Index:%d rptr_index:0x%x rptr_entry:0x%x num_of_entries:%d\n",
- index * TF_SESSION_EM_ENTRY_SIZE,
+ "%s, Internal entry @ Index:%d rptr_index:0x%x rptr_entry:0x%x num_of_entries:%d\n",
+ tf_dir_2_str(parms->dir),
+ index,
rptr_index,
rptr_entry,
num_of_entries);
struct tf_delete_em_entry_parms *parms)
{
int rc = 0;
- struct tf_session *session =
- (struct tf_session *)(tfp->session->core_data);
- struct stack *pool = &session->em_pool[parms->dir];
+ struct stack *pool = &em_pool[parms->dir];
rc = tf_msg_delete_em_entry(tfp, parms);
/* Return resource to pool */
if (rc == 0)
- stack_push(pool, parms->index / TF_SESSION_EM_ENTRY_SIZE);
+ stack_push(pool, parms->index);
return rc;
}
int rc;
int i;
struct tf_rm_create_db_parms db_cfg = { 0 };
- struct tf_session *session;
uint8_t db_exists = 0;
+ struct tf_rm_get_alloc_info_parms iparms;
+ struct tf_rm_alloc_info info;
TF_CHECK_PARMS2(tfp, parms);
return -EINVAL;
}
- session = (struct tf_session *)tfp->session->core_data;
-
- for (i = 0; i < TF_DIR_MAX; i++) {
- tf_create_em_pool(session,
- i,
- TF_SESSION_EM_POOL_SIZE);
- }
-
db_cfg.type = TF_DEVICE_MODULE_TYPE_EM;
db_cfg.num_elements = parms->num_elements;
db_cfg.cfg = parms->cfg;
if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_EM_RECORD] == 0)
continue;
+ if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_EM_RECORD] %
+ TF_SESSION_EM_ENTRY_SIZE != 0) {
+ rc = -ENOMEM;
+ TFP_DRV_LOG(ERR,
+ "%s, EM Allocation must be in blocks of %d, failure %s\n",
+ tf_dir_2_str(i),
+ TF_SESSION_EM_ENTRY_SIZE,
+ strerror(-rc));
+
+ return rc;
+ }
+
db_cfg.rm_db = &em_db[i];
rc = tf_rm_create_db(tfp, &db_cfg);
if (rc) {
if (db_exists)
init = 1;
+ for (i = 0; i < TF_DIR_MAX; i++) {
+ iparms.rm_db = em_db[i];
+ iparms.db_index = TF_EM_DB_EM_REC;
+ iparms.info = &info;
+
+ rc = tf_rm_get_info(&iparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: EM DB get info failed\n",
+ tf_dir_2_str(i));
+ return rc;
+ }
+
+ rc = tf_create_em_pool(i,
+ iparms.info->entry.stride,
+ iparms.info->entry.start);
+ /* Logging handled in tf_create_em_pool */
+ if (rc)
+ return rc;
+ }
+
+
return 0;
}
int rc;
int i;
struct tf_rm_free_db_parms fparms = { 0 };
- struct tf_session *session;
TF_CHECK_PARMS1(tfp);
return 0;
}
- session = (struct tf_session *)tfp->session->core_data;
-
for (i = 0; i < TF_DIR_MAX; i++)
- tf_free_em_pool(session, i);
+ tf_free_em_pool(i);
for (i = 0; i < TF_DIR_MAX; i++) {
fparms.dir = i;
return rc;
/* Verify that we got enough buffer to return the requested data */
- if (resp.size < size)
+ if (tfp_le_to_cpu_32(resp.size) != size)
return -EINVAL;
tfp_memcpy(data,
&resp.data,
- resp.size);
+ size);
return tfp_le_to_cpu_32(parms.tf_resp_code);
}
return rc;
/* Verify that we got enough buffer to return the requested data */
- if (resp.size < data_size)
+ if (tfp_le_to_cpu_32(resp.size) != data_size)
return -EINVAL;
return tfp_le_to_cpu_32(parms.tf_resp_code);
uint16_t cnt = 0;
for (i = 0; i < count; i++) {
- if (cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI &&
+ if ((cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI ||
+ cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) &&
reservations[i] > 0)
cnt++;
/* Build the request */
for (i = 0, j = 0; i < parms->num_elements; i++) {
/* Skip any non HCAPI cfg elements */
- if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI) {
+ if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI ||
+ parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) {
/* Only perform reservation for entries that
* has been requested
*/
/* Skip any non HCAPI types as we didn't include them
* in the reservation request.
*/
- if (parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI)
+ if (parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI &&
+ parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
continue;
/* If the element didn't request an allocation no need
resv[j].start,
resv[j].stride);
- /* Create pool */
- pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
- sizeof(struct bitalloc));
- /* Alloc request, alignment already set */
- cparms.nitems = pool_size;
- cparms.size = sizeof(struct bitalloc);
- rc = tfp_calloc(&cparms);
- if (rc) {
- TFP_DRV_LOG(ERR,
- "%s: Pool alloc failed, type:%d\n",
- tf_dir_2_str(parms->dir),
- db[i].cfg_type);
- goto fail;
- }
- db[i].pool = (struct bitalloc *)cparms.mem_va;
-
- rc = ba_init(db[i].pool, resv[j].stride);
- if (rc) {
- TFP_DRV_LOG(ERR,
- "%s: Pool init failed, type:%d\n",
- tf_dir_2_str(parms->dir),
- db[i].cfg_type);
- goto fail;
+ /* Only allocate BA pool if so requested */
+ if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) {
+ /* Create pool */
+ pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
+ sizeof(struct bitalloc));
+ /* Alloc request, alignment already set */
+ cparms.nitems = pool_size;
+ cparms.size = sizeof(struct bitalloc);
+ rc = tfp_calloc(&cparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Pool alloc failed, type:%d\n",
+ tf_dir_2_str(parms->dir),
+ db[i].cfg_type);
+ goto fail;
+ }
+ db[i].pool = (struct bitalloc *)cparms.mem_va;
+
+ rc = ba_init(db[i].pool, resv[j].stride);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Pool init failed, type:%d\n",
+ tf_dir_2_str(parms->dir),
+ db[i].cfg_type);
+ goto fail;
+ }
}
j++;
} else {
tf_device_module_type_2_str(rm_db->type));
}
+ /* No need to check for configuration type, even if we do not
+ * have a BA pool we just delete on a null ptr, no harm
+ */
for (i = 0; i < rm_db->num_entries; i++)
tfp_free((void *)rm_db->db[i].pool);
cfg_type = rm_db->db[parms->db_index].cfg_type;
/* Bail out if not controlled by RM */
- if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
- cfg_type != TF_RM_ELEM_CFG_PRIVATE)
+ if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
return -ENOTSUP;
/* Bail out if the pool is not valid, should never happen */
cfg_type = rm_db->db[parms->db_index].cfg_type;
/* Bail out if not controlled by RM */
- if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
- cfg_type != TF_RM_ELEM_CFG_PRIVATE)
+ if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
return -ENOTSUP;
/* Bail out if the pool is not valid, should never happen */
cfg_type = rm_db->db[parms->db_index].cfg_type;
/* Bail out if not controlled by RM */
- if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
- cfg_type != TF_RM_ELEM_CFG_PRIVATE)
+ if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
return -ENOTSUP;
/* Bail out if the pool is not valid, should never happen */
rm_db = (struct tf_rm_new_db *)parms->rm_db;
cfg_type = rm_db->db[parms->db_index].cfg_type;
- /* Bail out if not controlled by RM */
+ /* Bail out if not controlled by HCAPI */
if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
- cfg_type != TF_RM_ELEM_CFG_PRIVATE)
+ cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
return -ENOTSUP;
memcpy(parms->info,
rm_db = (struct tf_rm_new_db *)parms->rm_db;
cfg_type = rm_db->db[parms->db_index].cfg_type;
- /* Bail out if not controlled by RM */
+ /* Bail out if not controlled by HCAPI */
if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
- cfg_type != TF_RM_ELEM_CFG_PRIVATE)
+ cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
return -ENOTSUP;
*parms->hcapi_type = rm_db->db[parms->db_index].hcapi_type;
cfg_type = rm_db->db[parms->db_index].cfg_type;
/* Bail out if not controlled by RM */
- if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
- cfg_type != TF_RM_ELEM_CFG_PRIVATE)
+ if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
return -ENOTSUP;
/* Bail silently (no logging), if the pool is not valid there
* ULP layer that is not controlled by HCAPI within the Firmware.
*/
enum tf_rm_elem_cfg_type {
- /** No configuration */
+ /**
+ * No configuration
+ */
TF_RM_ELEM_CFG_NULL,
- /** HCAPI 'controlled', uses a Pool for internal storage */
+ /** HCAPI 'controlled', no RM storage thus the Device Module
+ * using the RM can chose to handle storage locally.
+ */
TF_RM_ELEM_CFG_HCAPI,
- /** Private thus not HCAPI 'controlled', creates a Pool for storage */
- TF_RM_ELEM_CFG_PRIVATE,
+ /** HCAPI 'controlled', uses a Bit Allocator Pool for internal
+ * storage in the RM.
+ */
+ TF_RM_ELEM_CFG_HCAPI_BA,
/**
* Shared element thus it belongs to a shared FW Session and
* is not controlled by the Host.
/** Table scope array */
struct tf_tbl_scope_cb tbl_scopes[TF_NUM_TBL_SCOPE];
-
- /**
- * EM Pools
- */
- struct stack em_pool[TF_DIR_MAX];
};
/**
{
int rc;
int i;
+ struct tf_tcam_resources *tcam_cnt;
struct tf_rm_create_db_parms db_cfg = { 0 };
TF_CHECK_PARMS2(tfp, parms);
return -EINVAL;
}
+ tcam_cnt = parms->resources->tcam_cnt;
+ if ((tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] % 2) ||
+ (tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] % 2)) {
+ TFP_DRV_LOG(ERR,
+ "Number of WC TCAM entries cannot be odd num\n");
+ return -EINVAL;
+ }
+
db_cfg.type = TF_DEVICE_MODULE_TYPE_TCAM;
db_cfg.num_elements = parms->num_elements;
db_cfg.cfg = parms->cfg;
return rc;
}
+ if (parms->type == TF_TCAM_TBL_TYPE_WC_TCAM &&
+ (parms->idx % 2) != 0) {
+ rc = tf_rm_allocate(&aparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed tcam, type:%d\n",
+ tf_dir_2_str(parms->dir),
+ parms->type);
+ return rc;
+ }
+ }
+
parms->idx *= num_slice_per_row;
return 0;