#include "bnxt.h"
#include "tf_resources.h"
#include "tf_rm.h"
+#include "stack.h"
+#include "tf_common.h"
#define PTU_PTE_VALID 0x1UL
#define PTU_PTE_LAST 0x2UL
* Pointer to the page table to free
*/
static void
-tf_em_free_pg_tbl(struct tf_em_page_tbl *tp)
+tf_em_free_pg_tbl(struct hcapi_cfa_em_page_tbl *tp)
{
uint32_t i;
for (i = 0; i < tp->pg_count; i++) {
if (!tp->pg_va_tbl[i]) {
- PMD_DRV_LOG(WARNING,
- "No map for page %d table %016" PRIu64 "\n",
+ TFP_DRV_LOG(WARNING,
+ "No mapping for page: %d table: %016" PRIu64 "\n",
i,
(uint64_t)(uintptr_t)tp);
continue;
* Pointer to the EM table to free
*/
static void
-tf_em_free_page_table(struct tf_em_table *tbl)
+tf_em_free_page_table(struct hcapi_cfa_em_table *tbl)
{
- struct tf_em_page_tbl *tp;
+ struct hcapi_cfa_em_page_tbl *tp;
int i;
for (i = 0; i < tbl->num_lvl; i++) {
tp = &tbl->pg_tbl[i];
-
- PMD_DRV_LOG(INFO,
+ TFP_DRV_LOG(INFO,
"EEM: Freeing page table: size %u lvl %d cnt %u\n",
TF_EM_PAGE_SIZE,
i,
* -ENOMEM - Out of memory
*/
static int
-tf_em_alloc_pg_tbl(struct tf_em_page_tbl *tp,
+tf_em_alloc_pg_tbl(struct hcapi_cfa_em_page_tbl *tp,
uint32_t pg_count,
uint32_t pg_size)
{
* -ENOMEM - Out of memory
*/
static int
-tf_em_alloc_page_table(struct tf_em_table *tbl)
+tf_em_alloc_page_table(struct hcapi_cfa_em_table *tbl)
{
- struct tf_em_page_tbl *tp;
+ struct hcapi_cfa_em_page_tbl *tp;
int rc = 0;
int i;
uint32_t j;
tbl->page_cnt[i],
TF_EM_PAGE_SIZE);
if (rc) {
- PMD_DRV_LOG(WARNING,
- "Failed to allocate page table: lvl: %d\n",
- i);
+ TFP_DRV_LOG(WARNING,
+ "Failed to allocate page table: lvl: %d, rc:%s\n",
+ i,
+ strerror(-rc));
goto cleanup;
}
for (j = 0; j < tp->pg_count; j++) {
- PMD_DRV_LOG(INFO,
+ TFP_DRV_LOG(INFO,
"EEM: Allocated page table: size %u lvl %d cnt"
" %u VA:%p PA:%p\n",
TF_EM_PAGE_SIZE,
* Flag controlling if the page table is last
*/
static void
-tf_em_link_page_table(struct tf_em_page_tbl *tp,
- struct tf_em_page_tbl *tp_next,
+tf_em_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
+ struct hcapi_cfa_em_page_tbl *tp_next,
bool set_pte_last)
{
uint64_t *pg_pa = tp_next->pg_pa_tbl;
* Pointer to EM page table
*/
static void
-tf_em_setup_page_table(struct tf_em_table *tbl)
+tf_em_setup_page_table(struct hcapi_cfa_em_table *tbl)
{
- struct tf_em_page_tbl *tp_next;
- struct tf_em_page_tbl *tp;
+ struct hcapi_cfa_em_page_tbl *tp_next;
+ struct hcapi_cfa_em_page_tbl *tp;
bool set_pte_last = 0;
int i;
tf_em_link_page_table(tp, tp_next, set_pte_last);
}
- tbl->l0_addr = tbl->pg_tbl[PT_LVL_0].pg_va_tbl[0];
- tbl->l0_dma_addr = tbl->pg_tbl[PT_LVL_0].pg_pa_tbl[0];
+ tbl->l0_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_va_tbl[0];
+ tbl->l0_dma_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_pa_tbl[0];
}
/**
uint64_t *num_data_pages)
{
uint64_t lvl_data_size = page_size;
- int lvl = PT_LVL_0;
+ int lvl = TF_PT_LVL_0;
uint64_t data_size;
*num_data_pages = 0;
while (lvl_data_size < data_size) {
lvl++;
- if (lvl == PT_LVL_1)
+ if (lvl == TF_PT_LVL_1)
lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
page_size;
- else if (lvl == PT_LVL_2)
+ else if (lvl == TF_PT_LVL_2)
lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
MAX_PAGE_PTRS(page_size) * page_size;
else
uint32_t page_size,
uint32_t *page_cnt)
{
- if (max_lvl == PT_LVL_0) {
- page_cnt[PT_LVL_0] = num_data_pages;
- } else if (max_lvl == PT_LVL_1) {
- page_cnt[PT_LVL_1] = num_data_pages;
- page_cnt[PT_LVL_0] =
- tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_1], page_size);
- } else if (max_lvl == PT_LVL_2) {
- page_cnt[PT_LVL_2] = num_data_pages;
- page_cnt[PT_LVL_1] =
- tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_2], page_size);
- page_cnt[PT_LVL_0] =
- tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_1], page_size);
+ if (max_lvl == TF_PT_LVL_0) {
+ page_cnt[TF_PT_LVL_0] = num_data_pages;
+ } else if (max_lvl == TF_PT_LVL_1) {
+ page_cnt[TF_PT_LVL_1] = num_data_pages;
+ page_cnt[TF_PT_LVL_0] =
+ tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
+ } else if (max_lvl == TF_PT_LVL_2) {
+ page_cnt[TF_PT_LVL_2] = num_data_pages;
+ page_cnt[TF_PT_LVL_1] =
+ tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
+ page_cnt[TF_PT_LVL_0] =
+ tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
} else {
return;
}
* - ENOMEM - Out of memory
*/
static int
-tf_em_size_table(struct tf_em_table *tbl)
+tf_em_size_table(struct hcapi_cfa_em_table *tbl)
{
uint64_t num_data_pages;
uint32_t *page_cnt;
/* Determine number of page table levels and the number
* of data pages needed to process the given eem table.
*/
- if (tbl->type == RECORD_TABLE) {
+ if (tbl->type == TF_RECORD_TABLE) {
/*
* For action records just a memory size is provided. Work
* backwards to resolve to number of entries
tbl->num_entries,
&num_data_pages);
if (max_lvl < 0) {
- PMD_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
- PMD_DRV_LOG(WARNING,
+ TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
+ TFP_DRV_LOG(WARNING,
"table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
- tbl->type,
- (uint64_t)num_entries * tbl->entry_size,
+ tbl->type, (uint64_t)num_entries * tbl->entry_size,
TF_EM_PAGE_SIZE);
return -ENOMEM;
}
tf_em_size_page_tbls(max_lvl, num_data_pages, TF_EM_PAGE_SIZE,
page_cnt);
- PMD_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
- PMD_DRV_LOG(INFO,
+ TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
+ TFP_DRV_LOG(INFO,
"EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 " l0: %u l1: %u l2: %u\n",
max_lvl + 1,
(uint64_t)num_data_pages * TF_EM_PAGE_SIZE,
num_data_pages,
- page_cnt[PT_LVL_0],
- page_cnt[PT_LVL_1],
- page_cnt[PT_LVL_2]);
+ page_cnt[TF_PT_LVL_0],
+ page_cnt[TF_PT_LVL_1],
+ page_cnt[TF_PT_LVL_2]);
return 0;
}
struct tf_tbl_scope_cb *tbl_scope_cb,
int dir)
{
- struct tf_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
- struct tf_em_table *tbl;
+ struct hcapi_cfa_em_ctx_mem_info *ctxp =
+ &tbl_scope_cb->em_ctx_info[dir];
+ struct hcapi_cfa_em_table *tbl;
int i;
- for (i = KEY0_TABLE; i < MAX_TABLE; i++) {
+ for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
tbl = &ctxp->em_tables[i];
if (tbl->num_entries != 0 && tbl->entry_size != 0) {
struct tf_tbl_scope_cb *tbl_scope_cb,
int dir)
{
- struct tf_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
- struct tf_em_table *tbl;
+ struct hcapi_cfa_em_ctx_mem_info *ctxp =
+ &tbl_scope_cb->em_ctx_info[dir];
+ struct hcapi_cfa_em_table *tbl;
int rc = 0;
int i;
- for (i = KEY0_TABLE; i < MAX_TABLE; i++) {
+ for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
tbl = &ctxp->em_tables[i];
if (tbl->num_entries && tbl->entry_size) {
TF_MEGABYTE) / (key_b + action_b);
if (num_entries < TF_EM_MIN_ENTRIES) {
- PMD_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
+ TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
"%uMB\n",
parms->rx_mem_size_in_mb);
return -EINVAL;
cnt *= 2;
if (cnt > TF_EM_MAX_ENTRIES) {
- PMD_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
+ TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
"%u\n",
(parms->tx_num_flows_in_k * TF_KILOBYTE));
return -EINVAL;
TF_EM_MIN_ENTRIES ||
(parms->rx_num_flows_in_k * TF_KILOBYTE) >
tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"EEM: Invalid number of Rx flows "
"requested:%u max:%u\n",
parms->rx_num_flows_in_k * TF_KILOBYTE,
cnt *= 2;
if (cnt > TF_EM_MAX_ENTRIES) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"EEM: Invalid number of Rx requested: %u\n",
(parms->rx_num_flows_in_k * TF_KILOBYTE));
return -EINVAL;
(key_b + action_b);
if (num_entries < TF_EM_MIN_ENTRIES) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"EEM: Insufficient memory requested:%uMB\n",
parms->rx_mem_size_in_mb);
return -EINVAL;
cnt *= 2;
if (cnt > TF_EM_MAX_ENTRIES) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"EEM: Invalid number of Tx requested: %u\n",
(parms->tx_num_flows_in_k * TF_KILOBYTE));
return -EINVAL;
TF_EM_MIN_ENTRIES ||
(parms->tx_num_flows_in_k * TF_KILOBYTE) >
tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"EEM: Invalid number of Tx flows "
"requested:%u max:%u\n",
(parms->tx_num_flows_in_k * TF_KILOBYTE),
cnt *= 2;
if (cnt > TF_EM_MAX_ENTRIES) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"EEM: Invalid number of Tx requested: %u\n",
(parms->tx_num_flows_in_k * TF_KILOBYTE));
return -EINVAL;
if (parms->rx_num_flows_in_k != 0 &&
(parms->rx_max_key_sz_in_bits / 8 == 0)) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"EEM: Rx key size required: %u\n",
(parms->rx_max_key_sz_in_bits));
return -EINVAL;
if (parms->tx_num_flows_in_k != 0 &&
(parms->tx_max_key_sz_in_bits / 8 == 0)) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"EEM: Tx key size required: %u\n",
(parms->tx_max_key_sz_in_bits));
return -EINVAL;
}
/* Rx */
- tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY0_TABLE].num_entries =
+ tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
parms->rx_num_flows_in_k * TF_KILOBYTE;
- tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY0_TABLE].entry_size =
+ tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
parms->rx_max_key_sz_in_bits / 8;
- tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY1_TABLE].num_entries =
+ tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
parms->rx_num_flows_in_k * TF_KILOBYTE;
- tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY1_TABLE].entry_size =
+ tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
parms->rx_max_key_sz_in_bits / 8;
- tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[RECORD_TABLE].num_entries =
+ tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
parms->rx_num_flows_in_k * TF_KILOBYTE;
- tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[RECORD_TABLE].entry_size =
+ tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
parms->rx_max_action_entry_sz_in_bits / 8;
- tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[EFC_TABLE].num_entries =
+ tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries =
0;
/* Tx */
- tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY0_TABLE].num_entries =
+ tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
parms->tx_num_flows_in_k * TF_KILOBYTE;
- tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY0_TABLE].entry_size =
+ tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
parms->tx_max_key_sz_in_bits / 8;
- tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY1_TABLE].num_entries =
+ tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
parms->tx_num_flows_in_k * TF_KILOBYTE;
- tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY1_TABLE].entry_size =
+ tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
parms->tx_max_key_sz_in_bits / 8;
- tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[RECORD_TABLE].num_entries =
+ tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
parms->tx_num_flows_in_k * TF_KILOBYTE;
- tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[RECORD_TABLE].entry_size =
+ tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
parms->tx_max_action_entry_sz_in_bits / 8;
- tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[EFC_TABLE].num_entries =
+ tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries =
0;
return 0;
if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&
parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
parms->type != TF_TBL_TYPE_ACT_STATS_64) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Type not supported, type:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Type not supported, type:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type);
return -EOPNOTSUPP;
}
/* Verify that the entry has been previously allocated */
id = ba_inuse(session_pool, index);
if (id != 1) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Invalid or not allocated index, type:%d, idx:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Invalid or not allocated index, type:%d, idx:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type,
index);
return -EINVAL;
parms->data,
parms->idx);
if (rc) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Set failed, type:%d, rc:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Set failed, type:%d, rc:%s\n",
+ tf_dir_2_str(parms->dir),
parms->type,
- rc);
+ strerror(-rc));
}
return rc;
/* Verify that the entry has been previously allocated */
id = ba_inuse(session_pool, index);
if (id != 1) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Invalid or not allocated index, type:%d, idx:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Invalid or not allocated index, type:%d, idx:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type,
index);
return -EINVAL;
parms->data,
parms->idx);
if (rc) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Get failed, type:%d, rc:%d\n",
+ TFP_DRV_LOG(ERR,
+ "%s, Get failed, type:%d, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ parms->type,
+ strerror(-rc));
+ }
+
+ return rc;
+}
+
+/**
+ * Internal function to get a Table Entry. Supports all Table Types
+ * except the TF_TBL_TYPE_EXT as that is handled as a table scope.
+ *
+ * [in] tfp
+ * Pointer to TruFlow handle
+ *
+ * [in] parms
+ * Pointer to input parameters
+ *
+ * Returns:
+ * 0 - Success
+ * -EINVAL - Parameter error
+ */
+static int
+tf_bulk_get_tbl_entry_internal(struct tf *tfp,
+ struct tf_bulk_get_tbl_entry_parms *parms)
+{
+ int rc;
+ int id;
+ uint32_t index;
+ struct bitalloc *session_pool;
+ struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
+
+ /* Lookup the pool using the table type of the element */
+ rc = tf_rm_lookup_tbl_type_pool(tfs,
+ parms->dir,
+ parms->type,
+ &session_pool);
+ /* Error logging handled by tf_rm_lookup_tbl_type_pool */
+ if (rc)
+ return rc;
+
+ index = parms->starting_idx;
+
+ /*
+ * Adjust the returned index/offset as there is no guarantee
+ * that the start is 0 at time of RM allocation
+ */
+ tf_rm_convert_index(tfs,
parms->dir,
parms->type,
- rc);
+ TF_RM_CONVERT_RM_BASE,
+ parms->starting_idx,
+ &index);
+
+ /* Verify that the entry has been previously allocated */
+ id = ba_inuse(session_pool, index);
+ if (id != 1) {
+ TFP_DRV_LOG(ERR,
+ "%s, Invalid or not allocated index, type:%d, starting_idx:%d\n",
+ tf_dir_2_str(parms->dir),
+ parms->type,
+ index);
+ return -EINVAL;
+ }
+
+ /* Get the entry */
+ rc = tf_msg_bulk_get_tbl_entry(tfp, parms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s, Bulk get failed, type:%d, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ parms->type,
+ strerror(-rc));
}
return rc;
tf_alloc_tbl_entry_shadow(struct tf_session *tfs __rte_unused,
struct tf_alloc_tbl_entry_parms *parms __rte_unused)
{
- PMD_DRV_LOG(ERR,
- "dir:%d, Entry Alloc with search not supported\n",
- parms->dir);
-
+ TFP_DRV_LOG(ERR,
+ "%s, Entry Alloc with search not supported\n",
+ tf_dir_2_str(parms->dir));
return -EOPNOTSUPP;
}
tf_free_tbl_entry_shadow(struct tf_session *tfs,
struct tf_free_tbl_entry_parms *parms)
{
- PMD_DRV_LOG(ERR,
- "dir:%d, Entry Free with search not supported\n",
- parms->dir);
+ TFP_DRV_LOG(ERR,
+ "%s, Entry Free with search not supported\n",
+ tf_dir_2_str(parms->dir));
return -EOPNOTSUPP;
}
parms.alignment = 0;
if (tfp_calloc(&parms) != 0) {
- PMD_DRV_LOG(ERR, "%d: TBL: external pool failure %s\n",
- dir, strerror(-ENOMEM));
+ TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
+ tf_dir_2_str(dir), strerror(ENOMEM));
return -ENOMEM;
}
rc = stack_init(num_entries, parms.mem_va, pool);
if (rc != 0) {
- PMD_DRV_LOG(ERR, "%d: TBL: stack init failure %s\n",
- dir, strerror(-rc));
+ TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
+ tf_dir_2_str(dir), strerror(-rc));
goto cleanup;
}
for (i = 0; i < num_entries; i++) {
rc = stack_push(pool, j);
if (rc != 0) {
- PMD_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
+ TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
tf_dir_2_str(dir), strerror(-rc));
goto cleanup;
}
if (j < 0) {
- PMD_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
+ TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
dir, j);
goto cleanup;
}
if (!stack_is_full(pool)) {
rc = -EINVAL;
- PMD_DRV_LOG(ERR, "%d TBL: stack failure %s\n",
- dir, strerror(-rc));
+ TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
+ tf_dir_2_str(dir), strerror(-rc));
goto cleanup;
}
return 0;
struct tf_tbl_scope_cb *tbl_scope_cb;
struct stack *pool;
- /* Check parameters */
- if (tfp == NULL || parms == NULL) {
- PMD_DRV_LOG(ERR, "Invalid parameters\n");
- return -EINVAL;
- }
-
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
tfs = (struct tf_session *)(tfp->session->core_data);
tbl_scope_cb = tbl_scope_cb_find(tfs, parms->tbl_scope_id);
if (tbl_scope_cb == NULL) {
- PMD_DRV_LOG(ERR,
- "%s, table scope not allocated\n",
- tf_dir_2_str(parms->dir));
+ TFP_DRV_LOG(ERR,
+ "%s, table scope not allocated\n",
+ tf_dir_2_str(parms->dir));
return -EINVAL;
}
pool = &tbl_scope_cb->ext_act_pool[parms->dir];
rc = stack_pop(pool, &index);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Allocation failed, type:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Allocation failed, type:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type);
return rc;
}
struct bitalloc *session_pool;
struct tf_session *tfs;
- /* Check parameters */
- if (tfp == NULL || parms == NULL) {
- PMD_DRV_LOG(ERR, "Invalid parameters\n");
- return -EINVAL;
- }
-
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
tfs = (struct tf_session *)(tfp->session->core_data);
parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&
parms->type != TF_TBL_TYPE_ACT_ENCAP_64B &&
parms->type != TF_TBL_TYPE_ACT_STATS_64) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Type not supported, type:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Type not supported, type:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type);
return -EOPNOTSUPP;
}
if (id == -1) {
free_cnt = ba_free_count(session_pool);
- PMD_DRV_LOG(ERR,
- "dir:%d, Allocation failed, type:%d, free:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Allocation failed, type:%d, free:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type,
free_cnt);
return -ENOMEM;
struct tf_tbl_scope_cb *tbl_scope_cb;
struct stack *pool;
- /* Check parameters */
- if (tfp == NULL || parms == NULL) {
- PMD_DRV_LOG(ERR, "Invalid parameters\n");
- return -EINVAL;
- }
-
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
tfs = (struct tf_session *)(tfp->session->core_data);
tbl_scope_cb = tbl_scope_cb_find(tfs, parms->tbl_scope_id);
if (tbl_scope_cb == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
+ TFP_DRV_LOG(ERR,
+ "%s, table scope error\n",
+ tf_dir_2_str(parms->dir));
return -EINVAL;
}
pool = &tbl_scope_cb->ext_act_pool[parms->dir];
rc = stack_push(pool, index);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "dir:%d, consistency error, stack full, type:%d, idx:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, consistency error, stack full, type:%d, idx:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type,
index);
}
struct tf_session *tfs;
uint32_t index;
- /* Check parameters */
- if (tfp == NULL || parms == NULL) {
- PMD_DRV_LOG(ERR, "Invalid parameters\n");
- return -EINVAL;
- }
-
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
tfs = (struct tf_session *)(tfp->session->core_data);
parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&
parms->type != TF_TBL_TYPE_ACT_ENCAP_64B &&
parms->type != TF_TBL_TYPE_ACT_STATS_64) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Type not supported, type:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Type not supported, type:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type);
return -EOPNOTSUPP;
}
/* Check if element was indeed allocated */
id = ba_inuse_free(session_pool, index);
if (id == -1) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Element not previously alloc'ed, type:%d, idx:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Element not previously alloc'ed, type:%d, idx:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type,
index);
return -ENOMEM;
tbl_scope_cb = tbl_scope_cb_find(session,
parms->tbl_scope_id);
- if (tbl_scope_cb == NULL)
+ if (tbl_scope_cb == NULL) {
+ TFP_DRV_LOG(ERR, "Table scope error\n");
return -EINVAL;
+ }
/* Free Table control block */
ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);
int rc;
enum tf_dir dir;
struct tf_tbl_scope_cb *tbl_scope_cb;
- struct tf_em_table *em_tables;
+ struct hcapi_cfa_em_table *em_tables;
int index;
struct tf_session *session;
struct tf_free_tbl_scope_parms free_parms;
- /* check parameters */
- if (parms == NULL || tfp->session == NULL) {
- PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
- return -EINVAL;
- }
-
session = (struct tf_session *)tfp->session->core_data;
/* Get Table Scope control block from the session pool */
index = ba_alloc(session->tbl_scope_pool_rx);
if (index == -1) {
- PMD_DRV_LOG(ERR, "EEM: Unable to allocate table scope "
+ TFP_DRV_LOG(ERR, "EEM: Unable to allocate table scope "
"Control Block\n");
return -ENOMEM;
}
dir,
&tbl_scope_cb->em_caps[dir]);
if (rc) {
- PMD_DRV_LOG(ERR,
- "EEM: Unable to query for EEM capability\n");
+ TFP_DRV_LOG(ERR,
+ "EEM: Unable to query for EEM capability,"
+ " rc:%s\n",
+ strerror(-rc));
goto cleanup;
}
}
*/
rc = tf_em_ctx_reg(tfp, tbl_scope_cb, dir);
if (rc) {
- PMD_DRV_LOG(ERR,
- "EEM: Unable to register for EEM ctx\n");
+ TFP_DRV_LOG(ERR,
+ "EEM: Unable to register for EEM ctx,"
+ " rc:%s\n",
+ strerror(-rc));
goto cleanup;
}
em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;
rc = tf_msg_em_cfg(tfp,
- em_tables[KEY0_TABLE].num_entries,
- em_tables[KEY0_TABLE].ctx_id,
- em_tables[KEY1_TABLE].ctx_id,
- em_tables[RECORD_TABLE].ctx_id,
- em_tables[EFC_TABLE].ctx_id,
+ em_tables[TF_KEY0_TABLE].num_entries,
+ em_tables[TF_KEY0_TABLE].ctx_id,
+ em_tables[TF_KEY1_TABLE].ctx_id,
+ em_tables[TF_RECORD_TABLE].ctx_id,
+ em_tables[TF_EFC_TABLE].ctx_id,
parms->hw_flow_cache_flush_timer,
dir);
if (rc) {
- PMD_DRV_LOG(ERR,
- "TBL: Unable to configure EEM in firmware\n");
+ TFP_DRV_LOG(ERR,
+ "TBL: Unable to configure EEM in firmware"
+ " rc:%s\n",
+ strerror(-rc));
goto cleanup_full;
}
HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE);
if (rc) {
- PMD_DRV_LOG(ERR,
- "EEM: Unable to enable EEM in firmware\n");
+ TFP_DRV_LOG(ERR,
+ "EEM: Unable to enable EEM in firmware"
+ " rc:%s\n",
+ strerror(-rc));
goto cleanup_full;
}
* actions related to a single table scope.
*/
rc = tf_create_tbl_pool_external(dir,
- tbl_scope_cb,
- em_tables[RECORD_TABLE].num_entries,
- em_tables[RECORD_TABLE].entry_size);
+ tbl_scope_cb,
+ em_tables[TF_RECORD_TABLE].num_entries,
+ em_tables[TF_RECORD_TABLE].entry_size);
if (rc) {
- PMD_DRV_LOG(ERR,
- "%d TBL: Unable to allocate idx pools %s\n",
- dir,
+ TFP_DRV_LOG(ERR,
+ "%s TBL: Unable to allocate idx pools %s\n",
+ tf_dir_2_str(dir),
strerror(-rc));
goto cleanup_full;
}
struct tf_tbl_scope_cb *tbl_scope_cb;
struct tf_session *session;
- if (tfp == NULL || parms == NULL || parms->data == NULL)
- return -EINVAL;
+ TF_CHECK_PARMS_SESSION(tfp, parms);
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
+ if (parms->data == NULL) {
+ TFP_DRV_LOG(ERR,
+ "%s, invalid parms->data\n",
+ tf_dir_2_str(parms->dir));
return -EINVAL;
}
tbl_scope_id = parms->tbl_scope_id;
if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Table scope not allocated\n",
- parms->dir);
+ TFP_DRV_LOG(ERR,
+ "%s, Table scope not allocated\n",
+ tf_dir_2_str(parms->dir));
return -EINVAL;
}
*/
tbl_scope_cb = tbl_scope_cb_find(session, tbl_scope_id);
- if (tbl_scope_cb == NULL)
- return -EINVAL;
+ if (tbl_scope_cb == NULL) {
+ TFP_DRV_LOG(ERR,
+ "%s, table scope error\n",
+ tf_dir_2_str(parms->dir));
+ return -EINVAL;
+ }
/* External table, implicitly the Action table */
- base_addr = tf_em_get_table_page(tbl_scope_cb,
- parms->dir,
- offset,
- RECORD_TABLE);
+ base_addr = (void *)(uintptr_t)
+ hcapi_get_table_page(&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_RECORD_TABLE], offset);
+
if (base_addr == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Base address lookup failed\n",
- parms->dir);
+ TFP_DRV_LOG(ERR,
+ "%s, Base address lookup failed\n",
+ tf_dir_2_str(parms->dir));
return -EINVAL;
}
/* Internal table type processing */
rc = tf_set_tbl_entry_internal(tfp, parms);
if (rc) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Set failed, type:%d, rc:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Set failed, type:%d, rc:%s\n",
+ tf_dir_2_str(parms->dir),
parms->type,
- rc);
+ strerror(-rc));
}
}
{
int rc = 0;
- if (tfp == NULL || parms == NULL)
- return -EINVAL;
+ TF_CHECK_PARMS_SESSION(tfp, parms);
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
- return -EINVAL;
+ if (parms->type == TF_TBL_TYPE_EXT) {
+ /* Not supported, yet */
+ TFP_DRV_LOG(ERR,
+ "%s, External table type not supported\n",
+ tf_dir_2_str(parms->dir));
+
+ rc = -EOPNOTSUPP;
+ } else {
+ /* Internal table type processing */
+ rc = tf_get_tbl_entry_internal(tfp, parms);
+ if (rc)
+ TFP_DRV_LOG(ERR,
+ "%s, Get failed, type:%d, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ parms->type,
+ strerror(-rc));
}
+ return rc;
+}
+
+/* API defined in tf_core.h */
+int
+tf_bulk_get_tbl_entry(struct tf *tfp,
+ struct tf_bulk_get_tbl_entry_parms *parms)
+{
+ int rc = 0;
+
+ TF_CHECK_PARMS_SESSION(tfp, parms);
+
if (parms->type == TF_TBL_TYPE_EXT) {
- PMD_DRV_LOG(ERR,
- "dir:%d, External table type not supported\n",
- parms->dir);
+ /* Not supported, yet */
+ TFP_DRV_LOG(ERR,
+ "%s, External table type not supported\n",
+ tf_dir_2_str(parms->dir));
rc = -EOPNOTSUPP;
} else {
/* Internal table type processing */
- rc = tf_get_tbl_entry_internal(tfp, parms);
+ rc = tf_bulk_get_tbl_entry_internal(tfp, parms);
if (rc)
- PMD_DRV_LOG(ERR,
- "dir:%d, Get failed, type:%d, rc:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Bulk get failed, type:%d, rc:%s\n",
+ tf_dir_2_str(parms->dir),
parms->type,
- rc);
+ strerror(-rc));
}
return rc;
{
int rc;
- /* check parameters */
- if (parms == NULL || tfp == NULL) {
- PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION_NO_DIR(tfp, parms);
rc = tf_alloc_eem_tbl_scope(tfp, parms);
{
int rc;
- /* check parameters */
- if (parms == NULL || tfp == NULL) {
- PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION_NO_DIR(tfp, parms);
/* free table scope and all associated resources */
rc = tf_free_eem_tbl_scope_cb(tfp, parms);
struct tf_session *tfs;
#endif /* TF_SHADOW */
- /* Check parameters */
- if (parms == NULL || tfp == NULL) {
- PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
/*
* No shadow copy support for external tables, allocate and return
*/
}
#if (TF_SHADOW == 1)
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
- return -EINVAL;
- }
-
tfs = (struct tf_session *)(tfp->session->core_data);
/* Search the Shadow DB for requested element. If not found go
rc = tf_alloc_tbl_entry_pool_internal(tfp, parms);
if (rc)
- PMD_DRV_LOG(ERR, "dir%d, Alloc failed, rc:%d\n",
- parms->dir,
- rc);
+ TFP_DRV_LOG(ERR, "%s, Alloc failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
return rc;
}
struct tf_session *tfs;
#endif /* TF_SHADOW */
- /* Check parameters */
- if (parms == NULL || tfp == NULL) {
- PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
+
/*
* No shadow of external tables so just free the entry
*/
}
#if (TF_SHADOW == 1)
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
- return -EINVAL;
- }
-
tfs = (struct tf_session *)(tfp->session->core_data);
/* Search the Shadow DB for requested element. If not found go
rc = tf_free_tbl_entry_pool_internal(tfp, parms);
if (rc)
- PMD_DRV_LOG(ERR, "dir:%d, Alloc failed, rc:%d\n",
- parms->dir,
- rc);
+ TFP_DRV_LOG(ERR, "%s, Alloc failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
return rc;
}
static void
-tf_dump_link_page_table(struct tf_em_page_tbl *tp,
- struct tf_em_page_tbl *tp_next)
+tf_dump_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
+ struct hcapi_cfa_em_page_tbl *tp_next)
{
uint64_t *pg_va;
uint32_t i;
{
struct tf_session *session;
struct tf_tbl_scope_cb *tbl_scope_cb;
- struct tf_em_page_tbl *tp;
- struct tf_em_page_tbl *tp_next;
- struct tf_em_table *tbl;
+ struct hcapi_cfa_em_page_tbl *tp;
+ struct hcapi_cfa_em_page_tbl *tp_next;
+ struct hcapi_cfa_em_table *tbl;
int i;
int j;
int dir;
for (dir = 0; dir < TF_DIR_MAX; dir++) {
printf("Direction %s:\n", (dir == TF_DIR_RX ? "Rx" : "Tx"));
- for (j = KEY0_TABLE; j < MAX_TABLE; j++) {
+ for (j = TF_KEY0_TABLE; j < TF_MAX_TABLE; j++) {
tbl = &tbl_scope_cb->em_ctx_info[dir].em_tables[j];
printf
("Table: j:%d type:%d num_entries:%d entry_size:0x%x num_lvl:%d ",