*/
uint64_t (*hcapi_cfa_key_hash)(uint64_t *key_data, uint16_t bitlen);
-int hcapi_cfa_action_hw_op(struct hcapi_cfa_hwop *op,
- uint8_t *act_tbl,
- struct hcapi_cfa_data *act_obj);
-int hcapi_cfa_dev_hw_op(struct hcapi_cfa_hwop *op, uint16_t tbl_id,
- struct hcapi_cfa_data *obj_data);
-int hcapi_cfa_rm_register_client(hcapi_cfa_rm_data_t *data,
- const char *client_name,
- int *client_id);
-int hcapi_cfa_rm_unregister_client(hcapi_cfa_rm_data_t *data,
- int client_id);
-int hcapi_cfa_rm_query_resources(hcapi_cfa_rm_data_t *data,
- int client_id,
- uint16_t chnl_id,
- struct hcapi_cfa_resc_req_db *req_db);
-int hcapi_cfa_rm_query_resources_one(hcapi_cfa_rm_data_t *data,
- int clien_id,
- struct hcapi_cfa_resc_db *resc_db);
-int hcapi_cfa_rm_reserve_resources(hcapi_cfa_rm_data_t *data,
- int client_id,
- struct hcapi_cfa_resc_req_db *resc_req,
- struct hcapi_cfa_resc_db *resc_db);
-int hcapi_cfa_rm_release_resources(hcapi_cfa_rm_data_t *data,
- int client_id,
- struct hcapi_cfa_resc_req_db *resc_req,
- struct hcapi_cfa_resc_db *resc_db);
-int hcapi_cfa_rm_initialize(hcapi_cfa_rm_data_t *data);
-
-#if SUPPORT_CFA_HW_P4
-
-int hcapi_cfa_p4_dev_hw_op(struct hcapi_cfa_hwop *op, uint16_t tbl_id,
- struct hcapi_cfa_data *obj_data);
-int hcapi_cfa_p4_prof_l2ctxt_hwop(struct hcapi_cfa_hwop *op,
- struct hcapi_cfa_data *obj_data);
-int hcapi_cfa_p4_prof_l2ctxtrmp_hwop(struct hcapi_cfa_hwop *op,
- struct hcapi_cfa_data *obj_data);
-int hcapi_cfa_p4_prof_tcam_hwop(struct hcapi_cfa_hwop *op,
- struct hcapi_cfa_data *obj_data);
-int hcapi_cfa_p4_prof_tcamrmp_hwop(struct hcapi_cfa_hwop *op,
- struct hcapi_cfa_data *obj_data);
-int hcapi_cfa_p4_wc_tcam_hwop(struct hcapi_cfa_hwop *op,
- struct hcapi_cfa_data *obj_data);
-int hcapi_cfa_p4_wc_tcam_rec_hwop(struct hcapi_cfa_hwop *op,
- struct hcapi_cfa_data *obj_data);
-int hcapi_cfa_p4_mirror_hwop(struct hcapi_cfa_hwop *op,
- struct hcapi_cfa_data *mirror);
-int hcapi_cfa_p4_global_cfg_hwop(struct hcapi_cfa_hwop *op,
- uint32_t type,
- struct hcapi_cfa_data *config);
-/* SUPPORT_CFA_HW_P4 */
-#elif SUPPORT_CFA_HW_P45
-int hcapi_cfa_p45_mirror_hwop(struct hcapi_cfa_hwop *op,
- struct hcapi_cfa_data *mirror);
-int hcapi_cfa_p45_global_cfg_hwop(struct hcapi_cfa_hwop *op,
- uint32_t type,
- struct hcapi_cfa_data *config);
-/* SUPPORT_CFA_HW_P45 */
-#endif
-
-/**
- * HCAPI CFA device HW operation function callback definition
- * This is standardized function callback hook to install different
- * CFA HW table programming function callback.
- */
-
-struct hcapi_cfa_tbl_cb {
- /**
- * This function callback provides the functionality to read/write
- * HW table entry from a HW table.
+ /** hardware operation on the CFA EM key
+ *
+ * This API provides the functionality to program the exact match and
+ * key data to exact match record memory.
*
* @param[in] op
* A pointer to the Hardware operation parameter
* All rights reserved.
*/
-#include "hcapi_cfa_defs.h"
-#include <errno.h>
-#include "assert.h"
+#include "hcapi_cfa.h"
const uint32_t crc32tbl[] = { /* CRC polynomial 0xedb88320 */
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
-
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2019-2021 Broadcom
* All rights reserved.
*/
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
-#include <stdint.h>
#include <stddef.h>
+#include <stdint.h>
+
+#if !defined(__GNUC__)
+#pragma anon_unions
+#endif
#define CFA_BITS_PER_BYTE (8)
+#define CFA_BITS_PER_WORD (sizeof(uint32_t) * CFA_BITS_PER_BYTE)
#define __CFA_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
-#define CFA_ALIGN(x, a) __CFA_ALIGN_MASK(x, (a) - 1)
+#define CFA_ALIGN(x, a) __CFA_ALIGN_MASK((x), (a) - 1)
+#define CFA_ALIGN_256(x) CFA_ALIGN(x, 256)
#define CFA_ALIGN_128(x) CFA_ALIGN(x, 128)
#define CFA_ALIGN_32(x) CFA_ALIGN(x, 32)
-#define NUM_WORDS_ALIGN_32BIT(x) \
- (CFA_ALIGN_32(x) / (sizeof(uint32_t) * CFA_BITS_PER_BYTE))
-#define NUM_WORDS_ALIGN_128BIT(x) \
- (CFA_ALIGN_128(x) / (sizeof(uint32_t) * CFA_BITS_PER_BYTE))
+#define NUM_WORDS_ALIGN_32BIT(x) (CFA_ALIGN_32(x) / CFA_BITS_PER_WORD)
+#define NUM_WORDS_ALIGN_128BIT(x) (CFA_ALIGN_128(x) / CFA_BITS_PER_WORD)
+#define NUM_WORDS_ALIGN_256BIT(x) (CFA_ALIGN_256(x) / CFA_BITS_PER_WORD)
+/* TODO: redefine according to chip variant */
#define CFA_GLOBAL_CFG_DATA_SZ (100)
+#ifndef SUPPORT_CFA_HW_P4
+#define SUPPORT_CFA_HW_P4 (0)
+#endif
+
+#ifndef SUPPORT_CFA_HW_P45
+#define SUPPORT_CFA_HW_P45 (0)
+#endif
+
+#ifndef SUPPORT_CFA_HW_P58
+#define SUPPORT_CFA_HW_P58 (0)
+#endif
+
#if SUPPORT_CFA_HW_ALL
#include "hcapi_cfa_p4.h"
#include "hcapi_cfa_p58.h"
#endif /* SUPPORT_CFA_HW_ALL */
+/*
+ * Hashing defines
+ */
+#define HCAPI_CFA_LKUP_SEED_MEM_SIZE 512
+
+/* CRC32i support for Key0 hash */
+#define ucrc32(ch, crc) (crc32tbl[((crc) ^ (ch)) & 0xff] ^ ((crc) >> 8))
+#define crc32(x, y) crc32i(~0, x, y)
+
/**
* CFA HW version definition
*/
HCAPI_CFA_DIR_MAX = 2
};
-/*
- * Hashing defines
- */
-#define HCAPI_CFA_LKUP_SEED_MEM_SIZE 512
-
-/* CRC32i support for Key0 hash */
-#define ucrc32(ch, crc) (crc32tbl[((crc) ^ (ch)) & 0xff] ^ ((crc) >> 8))
-#define crc32(x, y) crc32i(~0, x, y)
-
-
/**
* CFA HW OPCODE definition
*/
enum hcapi_cfa_hwops {
- HCAPI_CFA_HWOPS_PUT, /**< Write to HW operation */
- HCAPI_CFA_HWOPS_GET, /**< Read from HW operation */
- HCAPI_CFA_HWOPS_ADD, /**< For operations which require more than simple
- * writes to HW, this operation is used. The
- * distinction with this operation when compared
- * to the PUT ops is that this operation is used
- * in conjunction with the HCAPI_CFA_HWOPS_DEL
- * op to remove the operations issued by the
- * ADD OP.
- */
- HCAPI_CFA_HWOPS_DEL, /**< This issues operations to clear the hardware.
- * This operation is used in conjunction
- * with the HCAPI_CFA_HWOPS_ADD op and is the
- * way to undo/clear the ADD op.
- */
+ HCAPI_CFA_HWOPS_PUT, /**< Write to HW operation */
+ HCAPI_CFA_HWOPS_GET, /**< Read from HW operation */
+ HCAPI_CFA_HWOPS_ADD, /*<
+ * For operations which require more then
+ * simple writes to HW, this operation is
+ * used. The distinction with this operation
+ * when compared to the PUT ops is that this
+ * operation is used in conjunction with
+ * the HCAPI_CFA_HWOPS_DEL op to remove
+ * the operations issued by the ADD OP.
+ */
+ HCAPI_CFA_HWOPS_DEL, /*<
+ * Beside to delete from the hardware, this
+ * operation is also undo the add operation
+ * performed by the HCAPI_CFA_HWOPS_ADD op.
+ */
+ HCAPI_CFA_HWOPS_EVICT, /*< This operation is used to evict entries from
+ * CFA cache memories. This operation is only
+ * applicable to tables that use CFA caches.
+ */
HCAPI_CFA_HWOPS_MAX
};
*/
enum hcapi_cfa_key_ctrlops {
HCAPI_CFA_KEY_CTRLOPS_INSERT, /**< insert control bits */
- HCAPI_CFA_KEY_CTRLOPS_STRIP, /**< strip control bits */
+ HCAPI_CFA_KEY_CTRLOPS_STRIP, /**< strip control bits */
HCAPI_CFA_KEY_CTRLOPS_MAX
};
-
/**
* CFA HW definition
*/
/** [in] physical offset to the HW table for the data to be
* written to. If this is an array of registers, this is the
* index into the array of registers. For writing keys, this
- * is the byte offset into the memory where the key should be
+ * is the byte pointer into the memory where the key should be
* written.
*/
union {
uint32_t index;
uint32_t byte_offset;
- } u;
+ };
/** [in] HW data buffer pointer */
uint8_t *data;
- /** [in] HW data mask buffer pointer */
+ /** [in] HW data mask buffer pointer.
+ * When the CFA data is a FKB and data_mask pointer
+ * is NULL, then the default mask to enable all bit will
+ * be used.
+ */
uint8_t *data_mask;
- /** [in] size of the HW data buffer in bytes */
+ /** [in/out] size of the HW data buffer in bytes
+ */
uint16_t data_sz;
};
TF_KEY1_TABLE,
TF_RECORD_TABLE,
TF_EFC_TABLE,
+ TF_ACTION_TABLE,
+ TF_EM_LKUP_TABLE,
TF_MAX_TABLE
};
struct hcapi_cfa_em_page_tbl {
- uint32_t pg_count;
- uint32_t pg_size;
- void **pg_va_tbl;
- uint64_t *pg_pa_tbl;
+ uint32_t pg_count;
+ uint32_t pg_size;
+ void **pg_va_tbl;
+ uint64_t *pg_pa_tbl;
};
struct hcapi_cfa_em_table {
- int type;
- uint32_t num_entries;
- uint16_t ctx_id;
- uint32_t entry_size;
- int num_lvl;
- uint32_t page_cnt[TF_PT_LVL_MAX];
- uint64_t num_data_pages;
- void *l0_addr;
- uint64_t l0_dma_addr;
- struct hcapi_cfa_em_page_tbl pg_tbl[TF_PT_LVL_MAX];
+ int type;
+ uint32_t num_entries;
+ uint16_t ctx_id;
+ uint32_t entry_size;
+ int num_lvl;
+ uint32_t page_cnt[TF_PT_LVL_MAX];
+ uint64_t num_data_pages;
+ void *l0_addr;
+ uint64_t l0_dma_addr;
+ struct hcapi_cfa_em_page_tbl pg_tbl[TF_PT_LVL_MAX];
};
struct hcapi_cfa_em_ctx_mem_info {
- struct hcapi_cfa_em_table em_tables[TF_MAX_TABLE];
+ struct hcapi_cfa_em_table em_tables[TF_MAX_TABLE];
};
/*********************** Truflow end ****************************/
-
/**
* CFA HW key table definition
*
* applicable for newer chip
*/
uint8_t *base1;
+ /** [in] Optional - If the table is managed by a Backing Store
+ * database, then this object can be use to configure the EM Key.
+ */
+ struct hcapi_cfa_bs_db *bs_db;
/** [in] Page size for EEM tables */
uint32_t page_size;
};
struct hcapi_cfa_key_obj {
/** [in] pointer to the key data buffer */
uint32_t *data;
- /** [in] buffer len in bits */
+ /** [in] buffer len in bytes */
uint32_t len;
/** [in] Pointer to the key layout */
struct hcapi_cfa_key_layout *layout;
uint8_t *data;
/** [in] size of the key in bytes */
uint16_t size;
+ /** [in] optional table scope ID */
+ uint8_t tbl_scope;
+ /** [in] the fid owner of the key */
+ uint64_t metadata;
+ /** [in] stored with the bucket which can be used to by
+ * the caller to retreved later via the GET HW OP.
+ */
};
/**
struct hcapi_cfa_key_loc {
/** [out] on-chip EM bucket offset or off-chip EM bucket mem pointer */
uint64_t bucket_mem_ptr;
+ /** [out] off-chip EM key offset mem pointer */
+ uint64_t mem_ptr;
+ /** [out] index within the array of the EM buckets */
+ uint32_t bucket_mem_idx;
/** [out] index within the EM bucket */
uint8_t bucket_idx;
+ /** [out] index within the EM records */
+ uint32_t mem_idx;
+};
+
+/**
+ * Action record info
+ */
+struct hcapi_cfa_action_addr {
+ /** [in] action SRAM block ID for on-chip action records or table
+ * scope of the action backing store
+ */
+ uint16_t blk_id;
+ /** [in] ar_id or cache line aligned address offset for the action
+ * record
+ */
+ uint32_t offset;
+};
+
+/**
+ * Action data definition
+ */
+struct hcapi_cfa_action_data {
+ /** [in] action record addr info for on-chip action records */
+ struct hcapi_cfa_action_addr addr;
+ /** [in/out] pointer to the action data buffer */
+ uint32_t *data;
+ /** [in] action data buffer len in bytes */
+ uint32_t len;
+};
+
+/**
+ * Action object definition
+ */
+struct hcapi_cfa_action_obj {
+ /** [in] pointer to the action data buffer */
+ uint32_t *data;
+ /** [in] buffer len in bytes */
+ uint32_t len;
+ /** [in] pointer to the action layout */
+ struct hcapi_cfa_action_layout *layout;
};
/**
* Copyright(c) 2019-2021 Broadcom
* All rights reserved.
*/
-#include <inttypes.h>
+
#include <stdint.h>
#include <stdlib.h>
#include <stdbool.h>
#include "lookup3.h"
#include "rand.h"
+#include "hcapi_cfa.h"
#include "hcapi_cfa_defs.h"
static uint32_t hcapi_cfa_lkup_lkup3_init_cfg;
static inline uint32_t SWAP_WORDS32(uint32_t val32)
{
- return (((val32 & 0x0000ffff) << 16) |
- ((val32 & 0xffff0000) >> 16));
+ return (((val32 & 0x0000ffff) << 16) | ((val32 & 0xffff0000) >> 16));
}
static void hcapi_cfa_seeds_init(void)
if (!(val2 & 0x1))
val1 = hcapi_cfa_crc32i(~val1, temp, 4);
- val1 = hcapi_cfa_crc32i(~val1,
- (key - (CFA_P4_EEM_KEY_MAX_SIZE - 1)),
- CFA_P4_EEM_KEY_MAX_SIZE);
+ val1 = hcapi_cfa_crc32i(~val1, (key - (CFA_P4_EEM_KEY_MAX_SIZE - 1)),
+ CFA_P4_EEM_KEY_MAX_SIZE);
/* End with seed */
if (val2 & 0x1)
{
uint32_t val1;
- val1 = hashword(((const uint32_t *)(uintptr_t *)in_key) + 1,
- CFA_P4_EEM_KEY_MAX_SIZE / (sizeof(uint32_t)),
- hcapi_cfa_lkup_lkup3_init_cfg);
+ val1 = hashword(((uint32_t *)in_key) + 1,
+ CFA_P4_EEM_KEY_MAX_SIZE / (sizeof(uint32_t)),
+ hcapi_cfa_lkup_lkup3_init_cfg);
return val1;
}
-
-uint64_t hcapi_get_table_page(struct hcapi_cfa_em_table *mem,
- uint32_t page)
+uint64_t hcapi_get_table_page(struct hcapi_cfa_em_table *mem, uint32_t page)
{
int level = 0;
uint64_t addr;
*/
level = mem->num_lvl - 1;
- addr = (uintptr_t)mem->pg_tbl[level].pg_va_tbl[page];
+ addr = (uint64_t)mem->pg_tbl[level].pg_va_tbl[page];
return addr;
}
if (!hcapi_cfa_lkup_init)
hcapi_cfa_seeds_init();
- key0_hash = hcapi_cfa_crc32_hash(((uint8_t *)key_data) +
- (bitlen / 8) - 1);
+ key0_hash =
+ hcapi_cfa_crc32_hash(((uint8_t *)key_data) + (bitlen / 8) - 1);
key1_hash = hcapi_cfa_lookup3_hash((uint8_t *)key_data);
return ((uint64_t)key0_hash) << 32 | (uint64_t)key1_hash;
}
-static int hcapi_cfa_key_hw_op_put(struct hcapi_cfa_hwop *op,
- struct hcapi_cfa_key_data *key_obj)
+static int hcapi_cfa_p4_key_hw_op_put(struct hcapi_cfa_hwop *op,
+ struct hcapi_cfa_key_data *key_obj)
{
int rc = 0;
- memcpy((uint8_t *)(uintptr_t)op->hw.base_addr +
- key_obj->offset,
- key_obj->data,
- key_obj->size);
+ memcpy((uint8_t *)(uintptr_t)op->hw.base_addr + key_obj->offset,
+ key_obj->data, key_obj->size);
return rc;
}
-static int hcapi_cfa_key_hw_op_get(struct hcapi_cfa_hwop *op,
- struct hcapi_cfa_key_data *key_obj)
+static int hcapi_cfa_p4_key_hw_op_get(struct hcapi_cfa_hwop *op,
+ struct hcapi_cfa_key_data *key_obj)
{
int rc = 0;
memcpy(key_obj->data,
- (uint8_t *)(uintptr_t)op->hw.base_addr +
- key_obj->offset,
+ (uint8_t *)(uintptr_t)op->hw.base_addr + key_obj->offset,
key_obj->size);
return rc;
}
-static int hcapi_cfa_key_hw_op_add(struct hcapi_cfa_hwop *op,
- struct hcapi_cfa_key_data *key_obj)
+static int hcapi_cfa_p4_key_hw_op_add(struct hcapi_cfa_hwop *op,
+ struct hcapi_cfa_key_data *key_obj)
{
int rc = 0;
struct cfa_p4_eem_64b_entry table_entry;
* Is entry free?
*/
memcpy(&table_entry,
- (uint8_t *)(uintptr_t)op->hw.base_addr +
- key_obj->offset,
+ (uint8_t *)(uintptr_t)op->hw.base_addr + key_obj->offset,
key_obj->size);
/*
if (table_entry.hdr.word1 & (1 << CFA_P4_EEM_ENTRY_VALID_SHIFT))
return -1;
- memcpy((uint8_t *)(uintptr_t)op->hw.base_addr +
- key_obj->offset,
- key_obj->data,
- key_obj->size);
+ memcpy((uint8_t *)(uintptr_t)op->hw.base_addr + key_obj->offset,
+ key_obj->data, key_obj->size);
return rc;
}
-static int hcapi_cfa_key_hw_op_del(struct hcapi_cfa_hwop *op,
- struct hcapi_cfa_key_data *key_obj)
+static int hcapi_cfa_p4_key_hw_op_del(struct hcapi_cfa_hwop *op,
+ struct hcapi_cfa_key_data *key_obj)
{
int rc = 0;
struct cfa_p4_eem_64b_entry table_entry;
* Read entry
*/
memcpy(&table_entry,
- (uint8_t *)(uintptr_t)op->hw.base_addr +
- key_obj->offset,
+ (uint8_t *)(uintptr_t)op->hw.base_addr + key_obj->offset,
key_obj->size);
/*
* before deleting the entry.
*/
if (key_obj->data != NULL) {
- if (memcmp(&table_entry,
- key_obj->data,
+ if (memcmp(&table_entry, key_obj->data,
key_obj->size) != 0)
return -1;
}
return -1;
}
-
/*
* Delete entry
*/
- memset((uint8_t *)(uintptr_t)op->hw.base_addr +
- key_obj->offset,
- 0,
- key_obj->size);
+ memset((uint8_t *)(uintptr_t)op->hw.base_addr + key_obj->offset, 0, key_obj->size);
return rc;
}
-
/** Apporiximation of hcapi_cfa_key_hw_op()
*
*
*/
-int hcapi_cfa_key_hw_op(struct hcapi_cfa_hwop *op,
- struct hcapi_cfa_key_tbl *key_tbl,
- struct hcapi_cfa_key_data *key_obj,
- struct hcapi_cfa_key_loc *key_loc)
+static int hcapi_cfa_p4_key_hw_op(struct hcapi_cfa_hwop *op,
+ struct hcapi_cfa_key_tbl *key_tbl,
+ struct hcapi_cfa_key_data *key_obj,
+ struct hcapi_cfa_key_loc *key_loc)
{
int rc = 0;
+ struct hcapi_cfa_em_table *em_tbl;
+ uint32_t page;
- if (op == NULL ||
- key_tbl == NULL ||
- key_obj == NULL ||
- key_loc == NULL)
+ if (op == NULL || key_tbl == NULL || key_obj == NULL || key_loc == NULL)
return -1;
- op->hw.base_addr =
- hcapi_get_table_page((struct hcapi_cfa_em_table *)
- key_tbl->base0,
- key_obj->offset / key_tbl->page_size);
+ page = key_obj->offset / key_tbl->page_size;
+ em_tbl = (struct hcapi_cfa_em_table *)key_tbl->base0;
+ op->hw.base_addr = hcapi_get_table_page(em_tbl, page);
/* Offset is adjusted to be the offset into the page */
key_obj->offset = key_obj->offset % key_tbl->page_size;
switch (op->opcode) {
case HCAPI_CFA_HWOPS_PUT: /**< Write to HW operation */
- rc = hcapi_cfa_key_hw_op_put(op, key_obj);
+ rc = hcapi_cfa_p4_key_hw_op_put(op, key_obj);
break;
case HCAPI_CFA_HWOPS_GET: /**< Read from HW operation */
- rc = hcapi_cfa_key_hw_op_get(op, key_obj);
+ rc = hcapi_cfa_p4_key_hw_op_get(op, key_obj);
break;
case HCAPI_CFA_HWOPS_ADD:
- /**< For operations which require more than
- * simple writes to HW, this operation is used. The
+ /**< For operations which require more then simple
+ * writes to HW, this operation is used. The
* distinction with this operation when compared
* to the PUT ops is that this operation is used
* in conjunction with the HCAPI_CFA_HWOPS_DEL
* ADD OP.
*/
- rc = hcapi_cfa_key_hw_op_add(op, key_obj);
+ rc = hcapi_cfa_p4_key_hw_op_add(op, key_obj);
break;
case HCAPI_CFA_HWOPS_DEL:
- rc = hcapi_cfa_key_hw_op_del(op, key_obj);
+ rc = hcapi_cfa_p4_key_hw_op_del(op, key_obj);
break;
default:
rc = -1;
return rc;
}
+
+const struct hcapi_cfa_devops cfa_p4_devops = {
+ .hcapi_cfa_key_hash = hcapi_cfa_p4_key_hash,
+ .hcapi_cfa_key_hw_op = hcapi_cfa_p4_key_hw_op,
+};
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2021 Broadcom
+ * Copyright(c) 2019-2021 Broadcom
* All rights reserved.
*/
enum cfa_p4_mac_sel_mode mode;
};
-/**
- * CFA action layout definition
- */
-
-#define CFA_P4_ACTION_MAX_LAYOUT_SIZE 184
-
-/**
- * Action object template structure
- *
- * Template structure presents data fields that are necessary to know
- * at the beginning of Action Builder (AB) processing. Like before the
- * AB compilation. One such example could be a template that is
- * flexible in size (Encap Record) and the presence of these fields
- * allows for determining the template size as well as where the
- * fields are located in the record.
- *
- * The template may also present fields that are not made visible to
- * the caller by way of the action fields.
- *
- * Template fields also allow for additional checking on user visible
- * fields. One such example could be the encap pointer behavior on a
- * CFA_P4_ACT_OBJ_TYPE_ACT or CFA_P4_ACT_OBJ_TYPE_ACT_SRAM.
- */
-struct cfa_p4_action_template {
- /** Action Object type
- *
- * Controls the type of the Action Template
- */
- enum {
- /** Select this type to build an Action Record Object
- */
- CFA_P4_ACT_OBJ_TYPE_ACT,
- /** Select this type to build an Action Statistics
- * Object
- */
- CFA_P4_ACT_OBJ_TYPE_STAT,
- /** Select this type to build a SRAM Action Record
- * Object.
- */
- CFA_P4_ACT_OBJ_TYPE_ACT_SRAM,
- /** Select this type to build a SRAM Action
- * Encapsulation Object.
- */
- CFA_P4_ACT_OBJ_TYPE_ENCAP_SRAM,
- /** Select this type to build a SRAM Action Modify
- * Object, with IPv4 capability.
- */
- /* In case of Stingray the term Modify is used for the 'NAT
- * action'. Action builder is leveraged to fill in the NAT
- * object which then can be referenced by the action
- * record.
- */
- CFA_P4_ACT_OBJ_TYPE_MODIFY_IPV4_SRAM,
- /** Select this type to build a SRAM Action Source
- * Property Object.
- */
- /* In case of Stingray this is not a 'pure' action record.
- * Action builder is leveraged to full in the Source Property
- * object which can then be referenced by the action
- * record.
- */
- CFA_P4_ACT_OBJ_TYPE_SRC_PROP_SRAM,
- /** Select this type to build a SRAM Action Statistics
- * Object
- */
- CFA_P4_ACT_OBJ_TYPE_STAT_SRAM,
- } obj_type;
-
- /** Action Control
- *
- * Controls the internals of the Action Template
- *
- * act is valid when:
- * (obj_type == CFA_P4_ACT_OBJ_TYPE_ACT)
- */
- /*
- * Stat and encap are always inline for EEM as table scope
- * allocation does not allow for separate Stats allocation,
- * but has the xx_inline flags as to be forward compatible
- * with Stingray 2, always treated as TRUE.
- */
- struct {
- /** Set to CFA_HCAPI_TRUE to enable statistics
- */
- uint8_t stat_enable;
- /** Set to CFA_HCAPI_TRUE to enable statistics to be inlined
- */
- uint8_t stat_inline;
-
- /** Set to CFA_HCAPI_TRUE to enable encapsulation
- */
- uint8_t encap_enable;
- /** Set to CFA_HCAPI_TRUE to enable encapsulation to be inlined
- */
- uint8_t encap_inline;
- } act;
-
- /** Modify Setting
- *
- * Controls the type of the Modify Action the template is
- * describing
- *
- * modify is valid when:
- * (obj_type == CFA_P4_ACT_OBJ_TYPE_MODIFY_SRAM)
- */
- enum {
- /** Set to enable Modify of Source IPv4 Address
- */
- CFA_P4_MR_REPLACE_SOURCE_IPV4 = 0,
- /** Set to enable Modify of Destination IPv4 Address
- */
- CFA_P4_MR_REPLACE_DEST_IPV4
- } modify;
-
- /** Encap Control
- * Controls the type of encapsulation the template is
- * describing
- *
- * encap is valid when:
- * ((obj_type == CFA_P4_ACT_OBJ_TYPE_ACT) &&
- * act.encap_enable) ||
- * ((obj_type == CFA_P4_ACT_OBJ_TYPE_SRC_PROP_SRAM)
- */
- struct {
- /* Direction is required as Stingray Encap on RX is
- * limited to l2 and VTAG only.
- */
- /** Receive or Transmit direction
- */
- uint8_t direction;
- /** Set to CFA_HCAPI_TRUE to enable L2 capability in the
- * template
- */
- uint8_t l2_enable;
- /** vtag controls the Encap Vector - VTAG Encoding, 4 bits
- *
- * <ul>
- * <li> CFA_P4_ACT_ENCAP_VTAGS_PUSH_0, default, no VLAN
- * Tags applied
- * <li> CFA_P4_ACT_ENCAP_VTAGS_PUSH_1, adds capability to
- * set 1 VLAN Tag. Action Template compile adds
- * the following field to the action object
- * ::TF_ER_VLAN1
- * <li> CFA_P4_ACT_ENCAP_VTAGS_PUSH_2, adds capability to
- * set 2 VLAN Tags. Action Template compile adds
- * the following fields to the action object
- * ::TF_ER_VLAN1 and ::TF_ER_VLAN2
- * </ul>
- */
- enum { CFA_P4_ACT_ENCAP_VTAGS_PUSH_0 = 0,
- CFA_P4_ACT_ENCAP_VTAGS_PUSH_1,
- CFA_P4_ACT_ENCAP_VTAGS_PUSH_2 } vtag;
-
- /*
- * The remaining fields are NOT supported when
- * direction is RX and ((obj_type ==
- * CFA_P4_ACT_OBJ_TYPE_ACT) && act.encap_enable).
- * ab_compile_layout will perform the checking and
- * skip remaining fields.
- */
- /** L3 Encap controls the Encap Vector - L3 Encoding,
- * 3 bits. Defines the type of L3 Encapsulation the
- * template is describing.
- * <ul>
- * <li> CFA_P4_ACT_ENCAP_L3_NONE, default, no L3
- * Encapsulation processing.
- * <li> CFA_P4_ACT_ENCAP_L3_IPV4, enables L3 IPv4
- * Encapsulation.
- * <li> CFA_P4_ACT_ENCAP_L3_IPV6, enables L3 IPv6
- * Encapsulation.
- * <li> CFA_P4_ACT_ENCAP_L3_MPLS_8847, enables L3 MPLS
- * 8847 Encapsulation.
- * <li> CFA_P4_ACT_ENCAP_L3_MPLS_8848, enables L3 MPLS
- * 8848 Encapsulation.
- * </ul>
- */
- enum {
- /** Set to disable any L3 encapsulation
- * processing, default
- */
- CFA_P4_ACT_ENCAP_L3_NONE = 0,
- /** Set to enable L3 IPv4 encapsulation
- */
- CFA_P4_ACT_ENCAP_L3_IPV4 = 4,
- /** Set to enable L3 IPv6 encapsulation
- */
- CFA_P4_ACT_ENCAP_L3_IPV6 = 5,
- /** Set to enable L3 MPLS 8847 encapsulation
- */
- CFA_P4_ACT_ENCAP_L3_MPLS_8847 = 6,
- /** Set to enable L3 MPLS 8848 encapsulation
- */
- CFA_P4_ACT_ENCAP_L3_MPLS_8848 = 7
- } l3;
-
-#define CFA_P4_ACT_ENCAP_MAX_MPLS_LABELS 8
- /** 1-8 labels, valid when
- * (l3 == CFA_P4_ACT_ENCAP_L3_MPLS_8847) ||
- * (l3 == CFA_P4_ACT_ENCAP_L3_MPLS_8848)
- *
- * MAX number of MPLS Labels 8.
- */
- uint8_t l3_num_mpls_labels;
-
- /** Set to CFA_HCAPI_TRUE to enable L4 capability in the
- * template.
- *
- * CFA_HCAPI_TRUE adds ::TF_EN_UDP_SRC_PORT and
- * ::TF_EN_UDP_DST_PORT to the template.
- */
- uint8_t l4_enable;
-
- /** Tunnel Encap controls the Encap Vector - Tunnel
- * Encap, 3 bits. Defines the type of Tunnel
- * encapsulation the template is describing
- * <ul>
- * <li> CFA_P4_ACT_ENCAP_TNL_NONE, default, no Tunnel
- * Encapsulation processing.
- * <li> CFA_P4_ACT_ENCAP_TNL_GENERIC_FULL
- * <li> CFA_P4_ACT_ENCAP_TNL_VXLAN. NOTE: Expects
- * l4_enable set to CFA_P4_TRUE;
- * <li> CFA_P4_ACT_ENCAP_TNL_NGE. NOTE: Expects l4_enable
- * set to CFA_P4_TRUE;
- * <li> CFA_P4_ACT_ENCAP_TNL_NVGRE. NOTE: only valid if
- * l4_enable set to CFA_HCAPI_FALSE.
- * <li> CFA_P4_ACT_ENCAP_TNL_GRE.NOTE: only valid if
- * l4_enable set to CFA_HCAPI_FALSE.
- * <li> CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TL4
- * <li> CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TNL
- * </ul>
- */
- enum {
- /** Set to disable Tunnel header encapsulation
- * processing, default
- */
- CFA_P4_ACT_ENCAP_TNL_NONE = 0,
- /** Set to enable Tunnel Generic Full header
- * encapsulation
- */
- CFA_P4_ACT_ENCAP_TNL_GENERIC_FULL,
- /** Set to enable VXLAN header encapsulation
- */
- CFA_P4_ACT_ENCAP_TNL_VXLAN,
- /** Set to enable NGE (VXLAN2) header encapsulation
- */
- CFA_P4_ACT_ENCAP_TNL_NGE,
- /** Set to enable NVGRE header encapsulation
- */
- CFA_P4_ACT_ENCAP_TNL_NVGRE,
- /** Set to enable GRE header encapsulation
- */
- CFA_P4_ACT_ENCAP_TNL_GRE,
- /** Set to enable Generic header after Tunnel
- * L4 encapsulation
- */
- CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TL4,
- /** Set to enable Generic header after Tunnel
- * encapsulation
- */
- CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TNL
- } tnl;
-
- /** Number of bytes of generic tunnel header,
- * valid when
- * (tnl == CFA_P4_ACT_ENCAP_TNL_GENERIC_FULL) ||
- * (tnl == CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TL4) ||
- * (tnl == CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TNL)
- */
- uint8_t tnl_generic_size;
- /** Number of 32b words of nge options,
- * valid when
- * (tnl == CFA_P4_ACT_ENCAP_TNL_NGE)
- */
- uint8_t tnl_nge_op_len;
- /* Currently not planned */
- /* Custom Header */
- /* uint8_t custom_enable; */
- } encap;
-};
-
/**
* Enumeration of SRAM entry types, used for allocation of
* fixed SRAM entities. The memory model for CFA HCAPI
{
uint32_t val1;
- val1 = hashword(((uint32_t *)in_key) + 1,
+ val1 = hashword(((uint32_t *)in_key),
CFA_P58_EEM_KEY_MAX_SIZE / (sizeof(uint32_t)),
hcapi_cfa_lkup_lkup3_init_cfg);
#define CFA_P58_EEM_KEY_MAX_SIZE 80
#define CFA_P58_EEM_KEY_RECORD_SIZE 80
+#define CFA_P58_EM_FKB_NUM_WORDS 4
+#define CFA_P58_EM_FKB_NUM_ENTRIES 64
+#define CFA_P58_WC_TCAM_FKB_NUM_WORDS 4
+#define CFA_P58_WC_TCAM_FKB_NUM_ENTRIES 64
+
/** CFA phase 5.8 fix formatted table(layout) ID definition
*
*/
CFA_P58_TBL_PROF_PARIF_DFLT_ACT_REC_PTR,
/** Error Profile TCAM Miss Action Record Pointer Table */
CFA_P58_TBL_PROF_PARIF_ERR_ACT_REC_PTR,
- /** SR2 VNIC/SVIF Properties Table */
+ /** VNIC/SVIF Properties Table */
CFA_P58_TBL_VSPT,
CFA_P58_TBL_MAX
};
enum cfa_p58_mac_sel_mode mode;
};
-/**
- * CFA action layout definition
- */
-
-#define CFA_P58_ACTION_MAX_LAYOUT_SIZE 184
-
-/**
- * Action object template structure
- *
- * Template structure presents data fields that are necessary to know
- * at the beginning of Action Builder (AB) processing. Like before the
- * AB compilation. One such example could be a template that is
- * flexible in size (Encap Record) and the presence of these fields
- * allows for determining the template size as well as where the
- * fields are located in the record.
- *
- * The template may also present fields that are not made visible to
- * the caller by way of the action fields.
- *
- * Template fields also allow for additional checking on user visible
- * fields. One such example could be the encap pointer behavior on a
- * CFA_P58_ACT_OBJ_TYPE_ACT or CFA_P58_ACT_OBJ_TYPE_ACT_SRAM.
- */
-struct cfa_p58_action_template {
- /** Action Object type
- *
- * Controls the type of the Action Template
- */
- enum {
- /** Select this type to build an Action Record Object
- */
- CFA_P58_ACT_OBJ_TYPE_ACT,
- /** Select this type to build an Action Statistics
- * Object
- */
- CFA_P58_ACT_OBJ_TYPE_STAT,
- /** Select this type to build a SRAM Action Record
- * Object.
- */
- CFA_P58_ACT_OBJ_TYPE_ACT_SRAM,
- /** Select this type to build a SRAM Action
- * Encapsulation Object.
- */
- CFA_P58_ACT_OBJ_TYPE_ENCAP_SRAM,
- /** Select this type to build a SRAM Action Modify
- * Object, with IPv4 capability.
- */
- /* In case of Stingray the term Modify is used for the 'NAT
- * action'. Action builder is leveraged to fill in the NAT
- * object which then can be referenced by the action
- * record.
- */
- CFA_P58_ACT_OBJ_TYPE_MODIFY_IPV4_SRAM,
- /** Select this type to build a SRAM Action Source
- * Property Object.
- */
- /* In case of Stingray this is not a 'pure' action record.
- * Action builder is leveraged to full in the Source Property
- * object which can then be referenced by the action
- * record.
- */
- CFA_P58_ACT_OBJ_TYPE_SRC_PROP_SRAM,
- /** Select this type to build a SRAM Action Statistics
- * Object
- */
- CFA_P58_ACT_OBJ_TYPE_STAT_SRAM,
- } obj_type;
-
- /** Action Control
- *
- * Controls the internals of the Action Template
- *
- * act is valid when:
- * (obj_type == CFA_P58_ACT_OBJ_TYPE_ACT)
- */
- /*
- * Stat and encap are always inline for EEM as table scope
- * allocation does not allow for separate Stats allocation,
- * but has the xx_inline flags as to be forward compatible
- * with Stingray 2, always treated as TRUE.
- */
- struct {
- /** Set to CFA_HCAPI_TRUE to enable statistics
- */
- uint8_t stat_enable;
- /** Set to CFA_HCAPI_TRUE to enable statistics to be inlined
- */
- uint8_t stat_inline;
-
- /** Set to CFA_HCAPI_TRUE to enable encapsulation
- */
- uint8_t encap_enable;
- /** Set to CFA_HCAPI_TRUE to enable encapsulation to be inlined
- */
- uint8_t encap_inline;
- } act;
-
- /** Modify Setting
- *
- * Controls the type of the Modify Action the template is
- * describing
- *
- * modify is valid when:
- * (obj_type == CFA_P58_ACT_OBJ_TYPE_MODIFY_SRAM)
- */
- enum {
- /** Set to enable Modify of Source IPv4 Address
- */
- CFA_P58_MR_REPLACE_SOURCE_IPV4 = 0,
- /** Set to enable Modify of Destination IPv4 Address
- */
- CFA_P58_MR_REPLACE_DEST_IPV4
- } modify;
-
- /** Encap Control
- * Controls the type of encapsulation the template is
- * describing
- *
- * encap is valid when:
- * ((obj_type == CFA_P58_ACT_OBJ_TYPE_ACT) &&
- * act.encap_enable) ||
- * ((obj_type == CFA_P58_ACT_OBJ_TYPE_SRC_PROP_SRAM)
- */
- struct {
- /* Direction is required as Stingray Encap on RX is
- * limited to l2 and VTAG only.
- */
- /** Receive or Transmit direction
- */
- uint8_t direction;
- /** Set to CFA_HCAPI_TRUE to enable L2 capability in the
- * template
- */
- uint8_t l2_enable;
- /** vtag controls the Encap Vector - VTAG Encoding, 4 bits
- *
- * <ul>
- * <li> CFA_P58_ACT_ENCAP_VTAGS_PUSH_0, default, no VLAN
- * Tags applied
- * <li> CFA_P58_ACT_ENCAP_VTAGS_PUSH_1, adds capability to
- * set 1 VLAN Tag. Action Template compile adds
- * the following field to the action object
- * ::TF_ER_VLAN1
- * <li> CFA_P58_ACT_ENCAP_VTAGS_PUSH_2, adds capability to
- * set 2 VLAN Tags. Action Template compile adds
- * the following fields to the action object
- * ::TF_ER_VLAN1 and ::TF_ER_VLAN2
- * </ul>
- */
- enum { CFA_P58_ACT_ENCAP_VTAGS_PUSH_0 = 0,
- CFA_P58_ACT_ENCAP_VTAGS_PUSH_1,
- CFA_P58_ACT_ENCAP_VTAGS_PUSH_2 } vtag;
-
- /*
- * The remaining fields are NOT supported when
- * direction is RX and ((obj_type ==
- * CFA_P58_ACT_OBJ_TYPE_ACT) && act.encap_enable).
- * ab_compile_layout will perform the checking and
- * skip remaining fields.
- */
- /** L3 Encap controls the Encap Vector - L3 Encoding,
- * 3 bits. Defines the type of L3 Encapsulation the
- * template is describing.
- * <ul>
- * <li> CFA_P58_ACT_ENCAP_L3_NONE, default, no L3
- * Encapsulation processing.
- * <li> CFA_P58_ACT_ENCAP_L3_IPV4, enables L3 IPv4
- * Encapsulation.
- * <li> CFA_P58_ACT_ENCAP_L3_IPV6, enables L3 IPv6
- * Encapsulation.
- * <li> CFA_P58_ACT_ENCAP_L3_MPLS_8847, enables L3 MPLS
- * 8847 Encapsulation.
- * <li> CFA_P58_ACT_ENCAP_L3_MPLS_8848, enables L3 MPLS
- * 8848 Encapsulation.
- * </ul>
- */
- enum {
- /** Set to disable any L3 encapsulation
- * processing, default
- */
- CFA_P58_ACT_ENCAP_L3_NONE = 0,
- /** Set to enable L3 IPv4 encapsulation
- */
- CFA_P58_ACT_ENCAP_L3_IPV4 = 4,
- /** Set to enable L3 IPv6 encapsulation
- */
- CFA_P58_ACT_ENCAP_L3_IPV6 = 5,
- /** Set to enable L3 MPLS 8847 encapsulation
- */
- CFA_P58_ACT_ENCAP_L3_MPLS_8847 = 6,
- /** Set to enable L3 MPLS 8848 encapsulation
- */
- CFA_P58_ACT_ENCAP_L3_MPLS_8848 = 7
- } l3;
-
-#define CFA_P58_ACT_ENCAP_MAX_MPLS_LABELS 8
- /** 1-8 labels, valid when
- * (l3 == CFA_P58_ACT_ENCAP_L3_MPLS_8847) ||
- * (l3 == CFA_P58_ACT_ENCAP_L3_MPLS_8848)
- *
- * MAX number of MPLS Labels 8.
- */
- uint8_t l3_num_mpls_labels;
-
- /** Set to CFA_HCAPI_TRUE to enable L4 capability in the
- * template.
- *
- * CFA_HCAPI_TRUE adds ::TF_EN_UDP_SRC_PORT and
- * ::TF_EN_UDP_DST_PORT to the template.
- */
- uint8_t l4_enable;
-
- /** Tunnel Encap controls the Encap Vector - Tunnel
- * Encap, 3 bits. Defines the type of Tunnel
- * encapsulation the template is describing
- * <ul>
- * <li> CFA_P58_ACT_ENCAP_TNL_NONE, default, no Tunnel
- * Encapsulation processing.
- * <li> CFA_P58_ACT_ENCAP_TNL_GENERIC_FULL
- * <li> CFA_P58_ACT_ENCAP_TNL_VXLAN. NOTE: Expects
- * l4_enable set to CFA_P58_TRUE;
- * <li> CFA_P58_ACT_ENCAP_TNL_NGE. NOTE: Expects l4_enable
- * set to CFA_P58_TRUE;
- * <li> CFA_P58_ACT_ENCAP_TNL_NVGRE. NOTE: only valid if
- * l4_enable set to CFA_HCAPI_FALSE.
- * <li> CFA_P58_ACT_ENCAP_TNL_GRE.NOTE: only valid if
- * l4_enable set to CFA_HCAPI_FALSE.
- * <li> CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TL4
- * <li> CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TNL
- * </ul>
- */
- enum {
- /** Set to disable Tunnel header encapsulation
- * processing, default
- */
- CFA_P58_ACT_ENCAP_TNL_NONE = 0,
- /** Set to enable Tunnel Generic Full header
- * encapsulation
- */
- CFA_P58_ACT_ENCAP_TNL_GENERIC_FULL,
- /** Set to enable VXLAN header encapsulation
- */
- CFA_P58_ACT_ENCAP_TNL_VXLAN,
- /** Set to enable NGE (VXLAN2) header encapsulation
- */
- CFA_P58_ACT_ENCAP_TNL_NGE,
- /** Set to enable NVGRE header encapsulation
- */
- CFA_P58_ACT_ENCAP_TNL_NVGRE,
- /** Set to enable GRE header encapsulation
- */
- CFA_P58_ACT_ENCAP_TNL_GRE,
- /** Set to enable Generic header after Tunnel
- * L4 encapsulation
- */
- CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TL4,
- /** Set to enable Generic header after Tunnel
- * encapsulation
- */
- CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TNL
- } tnl;
-
- /** Number of bytes of generic tunnel header,
- * valid when
- * (tnl == CFA_P58_ACT_ENCAP_TNL_GENERIC_FULL) ||
- * (tnl == CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TL4) ||
- * (tnl == CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TNL)
- */
- uint8_t tnl_generic_size;
- /** Number of 32b words of nge options,
- * valid when
- * (tnl == CFA_P58_ACT_ENCAP_TNL_NGE)
- */
- uint8_t tnl_nge_op_len;
- /* Currently not planned */
- /* Custom Header */
- /* uint8_t custom_enable; */
- } encap;
-};
-
/**
* Enumeration of SRAM entry types, used for allocation of
* fixed SRAM entities. The memory model for CFA HCAPI
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019-2021 Broadcom
- * All rights reserved.
- */
-#ifndef _HWRM_TF_H_
-#define _HWRM_TF_H_
-
-#include "tf_core.h"
-
-typedef enum tf_type {
- TF_TYPE_TRUFLOW,
- TF_TYPE_LAST = TF_TYPE_TRUFLOW,
-} tf_type_t;
-
-typedef enum tf_subtype {
- HWRM_TFT_GET_GLOBAL_CFG = 821,
- HWRM_TFT_SET_GLOBAL_CFG = 822,
- HWRM_TFT_TBL_TYPE_BULK_GET = 825,
- HWRM_TFT_IF_TBL_SET = 827,
- HWRM_TFT_IF_TBL_GET = 828,
- TF_SUBTYPE_LAST = HWRM_TFT_IF_TBL_GET,
-} tf_subtype_t;
-
-/* Request and Response compile time checking */
-/* u32_t tlv_req_value[26]; */
-#define TF_MAX_REQ_SIZE 104
-/* u32_t tlv_resp_value[170]; */
-#define TF_MAX_RESP_SIZE 680
-
-/* Use this to allocate/free any kind of
- * indexes over HWRM and fill the parms pointer
- */
-#define TF_BULK_RECV 128
-#define TF_BULK_SEND 16
-
-/* EM Key value */
-#define TF_DEV_DATA_TYPE_TF_EM_RULE_INSERT_KEY_DATA 0x2e30UL
-/* EM Key value */
-#define TF_DEV_DATA_TYPE_TF_EM_RULE_DELETE_KEY_DATA 0x2e40UL
-/* L2 Context DMA Address Type */
-#define TF_DEV_DATA_TYPE_TF_L2_CTX_DMA_ADDR 0x2fe0UL
-/* L2 Context Entry */
-#define TF_DEV_DATA_TYPE_TF_L2_CTX_ENTRY 0x2fe1UL
-/* Prof tcam DMA Address Type */
-#define TF_DEV_DATA_TYPE_TF_PROF_TCAM_DMA_ADDR 0x3030UL
-/* Prof tcam Entry */
-#define TF_DEV_DATA_TYPE_TF_PROF_TCAM_ENTRY 0x3031UL
-/* WC DMA Address Type */
-#define TF_DEV_DATA_TYPE_TF_WC_DMA_ADDR 0x30d0UL
-/* WC Entry */
-#define TF_DEV_DATA_TYPE_TF_WC_ENTRY 0x30d1UL
-/* SPIF DFLT L2 CTXT Entry */
-#define TF_DEV_DATA_TYPE_SPIF_DFLT_L2_CTXT 0x3131UL
-/* PARIF DFLT ACT REC PTR Entry */
-#define TF_DEV_DATA_TYPE_PARIF_DFLT_ACT_REC 0x3132UL
-/* PARIF ERR DFLT ACT REC PTR Entry */
-#define TF_DEV_DATA_TYPE_PARIF_ERR_DFLT_ACT_REC 0x3133UL
-/* ILT Entry */
-#define TF_DEV_DATA_TYPE_ILT 0x3134UL
-/* VNIC SVIF entry */
-#define TF_DEV_DATA_TYPE_VNIC_SVIF 0x3135UL
-/* Action Data */
-#define TF_DEV_DATA_TYPE_TF_ACTION_DATA 0x3170UL
-#define TF_DEV_DATA_TYPE_LAST TF_DEV_DATA_TYPE_TF_ACTION_DATA
-
-#define TF_BITS2BYTES(x) (((x) + 7) >> 3)
-#define TF_BITS2BYTES_WORD_ALIGN(x) ((((x) + 31) >> 5) * 4)
-#define TF_BITS2BYTES_64B_WORD_ALIGN(x) ((((x) + 63) >> 6) * 8)
-
-struct tf_set_global_cfg_input;
-struct tf_get_global_cfg_input;
-struct tf_get_global_cfg_output;
-struct tf_tbl_type_bulk_get_input;
-struct tf_tbl_type_bulk_get_output;
-struct tf_if_tbl_set_input;
-struct tf_if_tbl_get_input;
-struct tf_if_tbl_get_output;
-/* Input params for global config set */
-typedef struct tf_set_global_cfg_input {
- /* Session Id */
- uint32_t fw_session_id;
- /* flags */
- uint32_t flags;
- /* When set to 0, indicates the query apply to RX */
-#define TF_SET_GLOBAL_CFG_INPUT_FLAGS_DIR_RX (0x0)
- /* When set to 1, indicates the query apply to TX */
-#define TF_SET_GLOBAL_CFG_INPUT_FLAGS_DIR_TX (0x1)
- /* Config type */
- uint32_t type;
- /* Offset of the type */
- uint32_t offset;
- /* Size of the data to set in bytes */
- uint16_t size;
- /* Data to set */
- uint8_t data[TF_BULK_SEND];
-} tf_set_global_cfg_input_t, *ptf_set_global_cfg_input_t;
-
-/* Input params for global config to get */
-typedef struct tf_get_global_cfg_input {
- /* Session Id */
- uint32_t fw_session_id;
- /* flags */
- uint32_t flags;
- /* When set to 0, indicates the query apply to RX */
-#define TF_GET_GLOBAL_CFG_INPUT_FLAGS_DIR_RX (0x0)
- /* When set to 1, indicates the query apply to TX */
-#define TF_GET_GLOBAL_CFG_INPUT_FLAGS_DIR_TX (0x1)
- /* Config to retrieve */
- uint32_t type;
- /* Offset to retrieve */
- uint32_t offset;
- /* Size of the data to set in bytes */
- uint16_t size;
-} tf_get_global_cfg_input_t, *ptf_get_global_cfg_input_t;
-
-/* Output params for global config */
-typedef struct tf_get_global_cfg_output {
- /* Size of the total data read in bytes */
- uint16_t size;
- /* Data to get */
- uint8_t data[TF_BULK_SEND];
-} tf_get_global_cfg_output_t, *ptf_get_global_cfg_output_t;
-
-/* Input params for table type get */
-typedef struct tf_tbl_type_bulk_get_input {
- /* Session Id */
- uint32_t fw_session_id;
- /* flags */
- uint32_t flags;
- /* When set to 0, indicates the get apply to RX */
-#define TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_DIR_RX (0x0)
- /* When set to 1, indicates the get apply to TX */
-#define TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_DIR_TX (0x1)
- /* When set to 1, indicates the clear entry on read */
-#define TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_CLEAR_ON_READ (0x2)
- /* Type of the object to set */
- uint32_t type;
- /* Starting index to get from */
- uint32_t start_index;
- /* Number of entries to get */
- uint32_t num_entries;
- /* Host memory where data will be stored */
- uint64_t host_addr;
-} tf_tbl_type_bulk_get_input_t, *ptf_tbl_type_bulk_get_input_t;
-
-/* Output params for table type get */
-typedef struct tf_tbl_type_bulk_get_output {
- /* Size of the total data read in bytes */
- uint16_t size;
-} tf_tbl_type_bulk_get_output_t, *ptf_tbl_type_bulk_get_output_t;
-
-/* Input params for if tbl set */
-typedef struct tf_if_tbl_set_input {
- /* Session Id */
- uint32_t fw_session_id;
- /* flags */
- uint16_t flags;
- /* When set to 0, indicates the query apply to RX */
-#define TF_IF_TBL_SET_INPUT_FLAGS_DIR_RX (0x0)
- /* When set to 1, indicates the query apply to TX */
-#define TF_IF_TBL_SET_INPUT_FLAGS_DIR_TX (0x1)
- /* if table type */
- uint16_t tf_if_tbl_type;
- /* index of table entry */
- uint16_t idx;
- /* size of the data write to table entry */
- uint32_t data_sz_in_bytes;
- /* data to write into table entry */
- uint32_t data[2];
-} tf_if_tbl_set_input_t, *ptf_if_tbl_set_input_t;
-
-/* Input params for if tbl get */
-typedef struct tf_if_tbl_get_input {
- /* Session Id */
- uint32_t fw_session_id;
- /* flags */
- uint16_t flags;
- /* When set to 0, indicates the query apply to RX */
-#define TF_IF_TBL_GET_INPUT_FLAGS_DIR_RX (0x0)
- /* When set to 1, indicates the query apply to TX */
-#define TF_IF_TBL_GET_INPUT_FLAGS_DIR_TX (0x1)
- /* if table type */
- uint16_t tf_if_tbl_type;
- /* size of the data get from table entry */
- uint32_t data_sz_in_bytes;
- /* index of table entry */
- uint16_t idx;
-} tf_if_tbl_get_input_t, *ptf_if_tbl_get_input_t;
-
-/* output params for if tbl get */
-typedef struct tf_if_tbl_get_output {
- /* Value read from table entry */
- uint32_t data[2];
-} tf_if_tbl_get_output_t, *ptf_if_tbl_get_output_t;
-
-#endif /* _HWRM_TF_H_ */
size_t length,
uint32_t initval) {
uint32_t a, b, c;
- int index = 12;
+ int index = length - 1;
/* Set up the internal state */
a = 0xdeadbeef + (((uint32_t)length) << 2) + initval;
#include "bnxt.h"
#include "rand.h"
#include "tf_common.h"
-#include "hwrm_tf.h"
#include "tf_ext_flow_handle.h"
int
#include <stdlib.h>
#include <stdbool.h>
#include <stdio.h>
-#include "hcapi/cfa/hcapi_cfa_defs.h"
+#include "hcapi_cfa_defs.h"
#include "tf_project.h"
/**
TF_MEM_MAX
};
+/**
+ * External memory control channel type
+ */
+enum tf_ext_mem_chan_type {
+ /**
+ * Direct memory write(Wh+/SR)
+ */
+ TF_EXT_MEM_CHAN_TYPE_DIRECT = 0,
+ /**
+ * Ring interface MPC
+ */
+ TF_EXT_MEM_CHAN_TYPE_RING_IF,
+ /**
+ * Use HWRM message to firmware
+ */
+ TF_EXT_MEM_CHAN_TYPE_FW,
+ /**
+ * Use ring_if message to firmware
+ */
+ TF_EXT_MEM_CHAN_TYPE_RING_IF_FW,
+ TF_EXT_MEM_CHAN_TYPE_MAX
+};
+
/**
* EEM record AR helper
*
TF_DEVICE_TYPE_WH = 0, /**< Whitney+ */
TF_DEVICE_TYPE_SR, /**< Stingray */
TF_DEVICE_TYPE_THOR, /**< Thor */
- TF_DEVICE_TYPE_SR2, /**< Stingray2 */
TF_DEVICE_TYPE_MAX /**< Maximum */
};
*/
enum tf_identifier_type {
/**
- * WH/SR/TH/SR2
+ * WH/SR/TH
* The L2 Context is returned from the L2 Ctxt TCAM lookup
* and can be used in WC TCAM or EM keys to virtualize further
* lookups.
*/
TF_IDENT_TYPE_L2_CTXT_HIGH,
/**
- * WH/SR/TH/SR2
+ * WH/SR/TH
* The L2 Context is returned from the L2 Ctxt TCAM lookup
* and can be used in WC TCAM or EM keys to virtualize further
* lookups.
*/
TF_IDENT_TYPE_L2_CTXT_LOW,
/**
- * WH/SR/TH/SR2
+ * WH/SR/TH
* The WC profile func is returned from the L2 Ctxt TCAM lookup
* to enable virtualization of the profile TCAM.
*/
TF_IDENT_TYPE_PROF_FUNC,
/**
- * WH/SR/TH/SR2
+ * WH/SR/TH
* The WC profile ID is included in the WC lookup key
* to enable virtualization of the WC TCAM hardware.
*/
TF_IDENT_TYPE_WC_PROF,
/**
- * WH/SR/TH/SR2
+ * WH/SR/TH
* The EM profile ID is included in the EM lookup key
- * to enable virtualization of the EM hardware. (not required for SR2
- * as it has table scope)
+ * to enable virtualization of the EM hardware.
*/
TF_IDENT_TYPE_EM_PROF,
/**
- * TH/SR2
+ * TH
* The L2 func is included in the ILT result and from recycling to
* enable virtualization of further lookups.
*/
TF_TBL_TYPE_MIRROR_CONFIG,
/** (Future) UPAR */
TF_TBL_TYPE_UPAR,
- /** (Future) SR2 Epoch 0 table */
- TF_TBL_TYPE_EPOCH0,
- /** (Future) SR2 Epoch 1 table */
- TF_TBL_TYPE_EPOCH1,
- /** (Future) TH/SR2 Metadata */
+ /** (Future) TH Metadata */
TF_TBL_TYPE_METADATA,
- /** (Future) TH/SR2 CT State */
+ /** (Future) TH CT State */
TF_TBL_TYPE_CT_STATE,
- /** (Future) TH/SR2 Range Profile */
+ /** (Future) TH Range Profile */
TF_TBL_TYPE_RANGE_PROF,
- /** (Future) SR2 Range Entry */
- TF_TBL_TYPE_RANGE_ENTRY,
- /** (Future) SR2 LAG Entry */
- TF_TBL_TYPE_LAG,
- /** TH/SR2 EM Flexible Key builder */
+ /** TH EM Flexible Key builder */
TF_TBL_TYPE_EM_FKB,
- /** TH/SR2 WC Flexible Key builder */
+ /** TH WC Flexible Key builder */
TF_TBL_TYPE_WC_FKB,
/* External */
* a pool of 64B entries.
*/
TF_TBL_TYPE_EXT,
- /* (Future) SR2 32B External EM Action 32B Pool */
- TF_TBL_TYPE_EXT_32B,
- /* (Future) SR2 64B External EM Action 64B Pool */
- TF_TBL_TYPE_EXT_64B,
- /* (Future) SR2 96B External EM Action 96B Pool */
- TF_TBL_TYPE_EXT_96B,
- /* (Future) SR2 128B External EM Action 128B Pool */
- TF_TBL_TYPE_EXT_128B,
TF_TBL_TYPE_MAX
};
/**
* allocate a table scope
*
- * On SR2 Firmware will allocate a scope ID. On other devices, the scope
- * is a software construct to identify an EEM table. This function will
+ * The scope is a software construct to identify an EEM table. This function will
* divide the hash memory/buckets and records according to the device
* device constraints based upon calculations using either the number of flows
* requested or the size of memory indicated. Other parameters passed in
* determine the configuration (maximum key size, maximum external action record
* size).
*
- * This API will allocate the table region in DRAM, program the PTU page table
- * entries, and program the number of static buckets (if SR2) in the RX and TX
- * CFAs. Buckets are assumed to start at 0 in the EM memory for the scope.
- * Upon successful completion of this API, hash tables are fully initialized and
- * ready for entries to be inserted.
- *
* A single API is used to allocate a common table scope identifier in both
* receive and transmit CFA. The scope identifier is common due to nature of
* connection tracking sending notifications between RX and TX direction.
*
* Firmware checks that the table scope ID is owned by the TruFlow
* session, verifies that no references to this table scope remains
- * (SR2 ILT) or Profile TCAM entries for either CFA (RX/TX) direction,
+ * or Profile TCAM entries for either CFA (RX/TX) direction,
* then frees the table scope ID.
*
* Returns success or failure code.
* [in] Entry size
*/
uint16_t data_sz_in_bytes;
+ /**
+ * [in] External memory channel type to use
+ */
+ enum tf_ext_mem_chan_type chan_type;
/**
* [in] Entry index to write to
*/
* [in] Entry size
*/
uint16_t data_sz_in_bytes;
+ /**
+ * [in] External memory channel type to use
+ */
+ enum tf_ext_mem_chan_type chan_type;
/**
* [in] Entry index to read
*/
* structure for the physical address.
*/
uint64_t physical_mem_addr;
+ /**
+ * [in] External memory channel type to use
+ */
+ enum tf_ext_mem_chan_type chan_type;
};
/**
* [in] ID of table scope to use (external only)
*/
uint32_t tbl_scope_id;
- /**
- * [in] ID of table interface to use (SR2 only)
- */
- uint32_t tbl_if_id;
/**
* [in] ptr to structure containing key fields
*/
* [in] duplicate check flag
*/
uint8_t dup_check;
+ /**
+ * [in] External memory channel type to use
+ */
+ enum tf_ext_mem_chan_type chan_type;
/**
* [out] Flow handle value for the inserted entry. This is encoded
* as the entries[4]:bucket[2]:hashId[1]:hash[14]
* [in] ID of table scope to use (external only)
*/
uint32_t tbl_scope_id;
- /**
- * [in] ID of table interface to use (SR2 only)
- */
- uint32_t tbl_if_id;
- /**
- * [in] epoch group IDs of entry to delete
- * 2 element array with 2 ids. (SR2 only)
- */
- uint16_t *epochs;
/**
* [out] The index of the entry
*/
uint16_t index;
+ /**
+ * [in] External memory channel type to use
+ */
+ enum tf_ext_mem_chan_type chan_type;
/**
* [in] structure containing flow delete handle information
*/
* [in] ID of table scope to use (external only)
*/
uint32_t tbl_scope_id;
- /**
- * [in] ID of table interface to use (SR2 only)
- */
- uint32_t tbl_if_id;
/**
* [in] ptr to structure containing key fields
*/
*/
uint16_t em_record_sz_in_bits;
/**
- * [in] epoch group IDs of entry to lookup
- * 2 element array with 2 ids. (SR2 only)
+ * [in] External memory channel type to use
*/
- uint16_t *epochs;
+ enum tf_ext_mem_chan_type chan_type;
/**
* [in] ptr to structure containing flow delete handle
*/
* This API inserts an exact match entry into DRAM EM table memory of the
* specified direction and table scope.
*
- * When inserting an entry into an exact match table, the TruFlow library may
- * need to allocate a dynamic bucket for the entry (SR2 only).
- *
* The insertion of duplicate entries in an EM table is not permitted. If a
* TruFlow application can guarantee that it will never insert duplicates, it
* can disable duplicate checking by passing a zero value in the dup_check
TF_IF_TBL_TYPE_PROF_PARIF_ERR_ACT_REC_PTR,
/** Default Error Profile TCAM Miss Action Record Pointer Table */
TF_IF_TBL_TYPE_LKUP_PARIF_DFLT_ACT_REC_PTR,
- /** SR2 Ingress lookup table */
+ /** Ingress lookup table */
TF_IF_TBL_TYPE_ILT,
- /** SR2 VNIC/SVIF Properties Table */
+ /** VNIC/SVIF Properties Table */
TF_IF_TBL_TYPE_VSPT,
TF_IF_TBL_TYPE_MAX
};
* - (-EINVAL) on failure.
*/
static int
- tf_dev_unbind_p58(struct tf *tfp)
+tf_dev_unbind_p58(struct tf *tfp)
{
int rc = 0;
bool fail = false;
struct tf_rm_element_cfg tf_ident_p4[TF_IDENT_TYPE_MAX] = {
[TF_IDENT_TYPE_L2_CTXT_HIGH] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_HIGH
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_HIGH,
+ 0, 0, 0
},
[TF_IDENT_TYPE_L2_CTXT_LOW] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_LOW
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_LOW,
+ 0, 0, 0
},
[TF_IDENT_TYPE_PROF_FUNC] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_PROF_FUNC
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_PROF_FUNC,
+ 0, 0, 0
},
[TF_IDENT_TYPE_WC_PROF] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_WC_TCAM_PROF_ID
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_WC_TCAM_PROF_ID,
+ 0, 0, 0
},
[TF_IDENT_TYPE_EM_PROF] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_EM_PROF_ID
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_EM_PROF_ID,
+ 0, 0, 0
},
};
struct tf_rm_element_cfg tf_tcam_p4[TF_TCAM_TBL_TYPE_MAX] = {
[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_HIGH
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_HIGH,
+ 0, 0, 0
},
[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_LOW
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_LOW,
+ 0, 0, 0
},
[TF_TCAM_TBL_TYPE_PROF_TCAM] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_PROF_TCAM
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_PROF_TCAM,
+ 0, 0, 0
},
[TF_TCAM_TBL_TYPE_WC_TCAM] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_WC_TCAM
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_WC_TCAM,
+ 0, 0, 0
},
[TF_TCAM_TBL_TYPE_SP_TCAM] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_TCAM
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_TCAM,
+ 0, 0, 0
},
};
struct tf_rm_element_cfg tf_tbl_p4[TF_TBL_TYPE_MAX] = {
[TF_TBL_TYPE_FULL_ACT_RECORD] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_FULL_ACTION
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_FULL_ACTION,
+ 0, 0, 0
},
[TF_TBL_TYPE_MCAST_GROUPS] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MCG
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MCG,
+ 0, 0, 0
},
[TF_TBL_TYPE_ACT_ENCAP_8B] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_8B
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_8B,
+ 0, 0, 0
},
[TF_TBL_TYPE_ACT_ENCAP_16B] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_16B
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_16B,
+ 0, 0, 0
},
[TF_TBL_TYPE_ACT_ENCAP_64B] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_64B
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_64B,
+ 0, 0, 0
},
[TF_TBL_TYPE_ACT_SP_SMAC] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC,
+ 0, 0, 0
},
[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV4
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV4,
+ 0, 0, 0
},
[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV6
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV6,
+ 0, 0, 0
},
[TF_TBL_TYPE_ACT_STATS_64] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_COUNTER_64B
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_COUNTER_64B,
+ 0, 0, 0
},
[TF_TBL_TYPE_ACT_MODIFY_IPV4] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_IPV4
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_IPV4,
+ 0, 0, 0
},
[TF_TBL_TYPE_METER_PROF] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER_PROF
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER_PROF,
+ 0, 0, 0
},
[TF_TBL_TYPE_METER_INST] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER,
+ 0, 0, 0
},
[TF_TBL_TYPE_MIRROR_CONFIG] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MIRROR
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MIRROR,
+ 0, 0, 0
},
};
struct tf_rm_element_cfg tf_em_ext_p4[TF_EM_TBL_TYPE_MAX] = {
[TF_EM_TBL_TYPE_TBL_SCOPE] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_TBL_SCOPE
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_TBL_SCOPE,
+ 0, 0, 0
},
};
struct tf_rm_element_cfg tf_em_int_p4[TF_EM_TBL_TYPE_MAX] = {
[TF_EM_TBL_TYPE_EM_RECORD] = {
- TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_EM_REC
+ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_EM_REC,
+ 0, 0, 0
},
};
struct tf_rm_element_cfg tf_ident_p58[TF_IDENT_TYPE_MAX] = {
[TF_IDENT_TYPE_L2_CTXT_HIGH] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_HIGH
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_HIGH,
+ 0, 0, 0
},
[TF_IDENT_TYPE_L2_CTXT_LOW] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_LOW
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_LOW,
+ 0, 0, 0
},
[TF_IDENT_TYPE_PROF_FUNC] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_PROF_FUNC
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_PROF_FUNC,
+ 0, 0, 0
},
[TF_IDENT_TYPE_WC_PROF] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_TCAM_PROF_ID
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_TCAM_PROF_ID,
+ 0, 0, 0
},
[TF_IDENT_TYPE_EM_PROF] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_EM_PROF_ID
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_EM_PROF_ID,
+ 0, 0, 0
},
};
struct tf_rm_element_cfg tf_tcam_p58[TF_TCAM_TBL_TYPE_MAX] = {
[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_HIGH
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_HIGH,
+ 0, 0, 0
},
[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_LOW
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_LOW,
+ 0, 0, 0
},
[TF_TCAM_TBL_TYPE_PROF_TCAM] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_PROF_TCAM
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_PROF_TCAM,
+ 0, 0, 0
},
[TF_TCAM_TBL_TYPE_WC_TCAM] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_TCAM
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_TCAM,
+ 0, 0, 0
},
[TF_TCAM_TBL_TYPE_VEB_TCAM] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_VEB_TCAM
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_VEB_TCAM,
+ 0, 0, 0
},
};
struct tf_rm_element_cfg tf_tbl_p58[TF_TBL_TYPE_MAX] = {
[TF_TBL_TYPE_EM_FKB] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_EM_FKB
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_EM_FKB,
+ 0, 0, 0
},
[TF_TBL_TYPE_WC_FKB] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_FKB
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_FKB,
+ 0, 0, 0
},
[TF_TBL_TYPE_METER_PROF] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER_PROF
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER_PROF,
+ 0, 0, 0
},
[TF_TBL_TYPE_METER_INST] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER,
+ 0, 0, 0
},
[TF_TBL_TYPE_MIRROR_CONFIG] = {
- TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_MIRROR
+ TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_MIRROR,
+ 0, 0, 0
},
/* Policy - ARs in bank 1 */
[TF_TBL_TYPE_FULL_ACT_RECORD] = {
struct tf_rm_element_cfg tf_em_int_p58[TF_EM_TBL_TYPE_MAX] = {
[TF_EM_TBL_TYPE_EM_RECORD] = {
- TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P58_EM_REC
+ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P58_EM_REC,
+ 0, 0, 0
},
};
#include "tf_core.h"
#include "tf_session.h"
-#include "hcapi/cfa/hcapi_cfa_defs.h"
+#include "tf_em_common.h"
+
+#include "hcapi_cfa_defs.h"
#define TF_EM_MIN_ENTRIES (1 << 15) /* 32K */
#define TF_EM_MAX_ENTRIES (1 << 27) /* 128M */
#include "tfp.h"
#include "tf_device.h"
#include "tf_ext_flow_handle.h"
-#include "cfa_resource_types.h"
+#include "hcapi_cfa.h"
#include "bnxt.h"
/* Number of pointers per page_size */
#define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
-/**
- * EM DBs.
- */
-void *eem_db[TF_DIR_MAX];
-
/**
* Init flag, set on bind and cleared on unbind
*/
*/
static enum tf_mem_type mem_type;
-/** Table scope array */
-struct tf_tbl_scope_cb tbl_scopes[TF_NUM_TBL_SCOPE];
-
/* API defined in tf_em.h */
-struct tf_tbl_scope_cb *
-tbl_scope_cb_find(uint32_t tbl_scope_id)
-{
- int i;
- struct tf_rm_is_allocated_parms parms = { 0 };
- int allocated;
-
- /* Check that id is valid */
- parms.rm_db = eem_db[TF_DIR_RX];
- parms.subtype = TF_EM_TBL_TYPE_TBL_SCOPE;
- parms.index = tbl_scope_id;
- parms.allocated = &allocated;
-
- i = tf_rm_is_allocated(&parms);
-
- if (i < 0 || allocated != TF_RM_ALLOCATED_ENTRY_IN_USE)
- return NULL;
-
- for (i = 0; i < TF_NUM_TBL_SCOPE; i++) {
- if (tbl_scopes[i].tbl_scope_id == tbl_scope_id)
- return &tbl_scopes[i];
- }
-
- return NULL;
-}
-
int
tf_create_tbl_pool_external(enum tf_dir dir,
struct tf_tbl_scope_cb *tbl_scope_cb,
tfp_free(ext_act_pool_mem);
}
+/**
+ * Looks up table scope control block using tbl_scope_id from tf_session.
+ *
+ * [in] tfp
+ * Pointer to Truflow Handle
+ * [in] tbl_scope_id
+ * table scope id
+ *
+ * Return:
+ * - Pointer to the tf_tbl_scope_cb, if found.
+ * - (NULL) on failure, not found.
+ */
+struct tf_tbl_scope_cb *
+tf_em_ext_common_tbl_scope_find(struct tf *tfp,
+ uint32_t tbl_scope_id)
+{
+ int rc;
+ struct em_ext_db *ext_db;
+ void *ext_ptr = NULL;
+ struct tf_tbl_scope_cb *tbl_scope_cb = NULL;
+ struct ll_entry *entry;
+
+ rc = tf_session_get_em_ext_db(tfp, &ext_ptr);
+ if (rc)
+ return NULL;
+
+ ext_db = (struct em_ext_db *)ext_ptr;
+
+ for (entry = ext_db->tbl_scope_ll.head; entry != NULL;
+ entry = entry->next) {
+ tbl_scope_cb = (struct tf_tbl_scope_cb *)entry;
+ if (tbl_scope_cb->tbl_scope_id == tbl_scope_id)
+ return tbl_scope_cb;
+ }
+
+ return NULL;
+}
+
/**
* Allocate External Tbl entry from the scope pool.
*
TF_CHECK_PARMS2(tfp, parms);
- /* Get the pool info from the table scope
- */
- tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
-
+ tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
if (tbl_scope_cb == NULL) {
TFP_DRV_LOG(ERR,
"%s, table scope not allocated\n",
tf_dir_2_str(parms->dir));
return -EINVAL;
}
+
pool = &tbl_scope_cb->ext_act_pool[parms->dir];
/* Allocate an element
TF_CHECK_PARMS2(tfp, parms);
- /* Get the pool info from the table scope
- */
- tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
-
+ tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
if (tbl_scope_cb == NULL) {
TFP_DRV_LOG(ERR,
"%s, table scope error\n",
tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
parms->rx_max_action_entry_sz_in_bits / 8;
- tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries = 0;
+ tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries =
+ 0;
+
+ tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_ACTION_TABLE].num_entries =
+ parms->rx_num_flows_in_k * TF_KILOBYTE;
+ tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_ACTION_TABLE].entry_size =
+ parms->rx_max_action_entry_sz_in_bits / 8;
+
+ tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EM_LKUP_TABLE].num_entries =
+ parms->rx_num_flows_in_k * TF_KILOBYTE;
+ tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EM_LKUP_TABLE].entry_size =
+ parms->rx_max_key_sz_in_bits / 8;
/* Tx */
tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
parms->tx_max_action_entry_sz_in_bits / 8;
- tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries = 0;
+ tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries =
+ 0;
+
+ tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_ACTION_TABLE].num_entries =
+ parms->rx_num_flows_in_k * TF_KILOBYTE;
+ tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_ACTION_TABLE].entry_size =
+ parms->tx_max_action_entry_sz_in_bits / 8;
+
+ tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EM_LKUP_TABLE].num_entries =
+ parms->rx_num_flows_in_k * TF_KILOBYTE;
+ tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EM_LKUP_TABLE].entry_size =
+ parms->tx_max_key_sz_in_bits / 8;
return 0;
}
key_obj.data = (uint8_t *)&key_entry;
key_obj.size = TF_P4_EM_KEY_RECORD_SIZE;
- rc = hcapi_cfa_key_hw_op(&op,
- &key_tbl,
- &key_obj,
- &key_loc);
+ rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
+ &key_tbl,
+ &key_obj,
+ &key_loc);
if (rc == 0) {
table_type = TF_KEY0_TABLE;
(uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY1_TABLE];
key_obj.offset = index * TF_P4_EM_KEY_RECORD_SIZE;
- rc = hcapi_cfa_key_hw_op(&op,
- &key_tbl,
- &key_obj,
- &key_loc);
+ rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
+ &key_tbl,
+ &key_obj,
+ &key_loc);
if (rc != 0)
return rc;
TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
0,
0,
- TF_FLAGS_FLOW_HANDLE_EXTERNAL,
+ 0,
index,
0,
table_type);
key_obj.data = NULL;
key_obj.size = TF_P4_EM_KEY_RECORD_SIZE;
- rc = hcapi_cfa_key_hw_op(&op,
- &key_tbl,
- &key_obj,
- &key_loc);
+ rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
+ &key_tbl,
+ &key_obj,
+ &key_loc);
if (!rc)
return rc;
* -EINVAL - Error
*/
int
-tf_em_insert_ext_entry(struct tf *tfp __rte_unused,
+tf_em_insert_ext_entry(struct tf *tfp,
struct tf_insert_em_entry_parms *parms)
{
int rc;
struct tf_session *tfs;
struct tf_dev_info *dev;
- tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
+ tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
if (tbl_scope_cb == NULL) {
TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
return -EINVAL;
* -EINVAL - Error
*/
int
-tf_em_delete_ext_entry(struct tf *tfp __rte_unused,
+tf_em_delete_ext_entry(struct tf *tfp,
struct tf_delete_em_entry_parms *parms)
{
struct tf_tbl_scope_cb *tbl_scope_cb;
- tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
+ tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
if (tbl_scope_cb == NULL) {
TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
return -EINVAL;
int i;
struct tf_rm_create_db_parms db_cfg = { 0 };
uint8_t db_exists = 0;
+ struct em_ext_db *ext_db;
+ struct tfp_calloc_parms cparms;
TF_CHECK_PARMS2(tfp, parms);
return -EINVAL;
}
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct em_ext_db);
+ cparms.alignment = 0;
+ if (tfp_calloc(&cparms) != 0) {
+ TFP_DRV_LOG(ERR, "em_ext_db alloc error %s\n",
+ strerror(ENOMEM));
+ return -ENOMEM;
+ }
+
+ ext_db = cparms.mem_va;
+ ll_init(&ext_db->tbl_scope_ll);
+ for (i = 0; i < TF_DIR_MAX; i++)
+ ext_db->eem_db[i] = NULL;
+ tf_session_set_em_ext_db(tfp, ext_db);
+
db_cfg.module = TF_MODULE_TYPE_EM;
db_cfg.num_elements = parms->num_elements;
db_cfg.cfg = parms->cfg;
if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_TBL_SCOPE] == 0)
continue;
- db_cfg.rm_db = &eem_db[i];
+ db_cfg.rm_db = (void *)&ext_db->eem_db[i];
rc = tf_rm_create_db(tfp, &db_cfg);
if (rc) {
TFP_DRV_LOG(ERR,
int rc;
int i;
struct tf_rm_free_db_parms fparms = { 0 };
+ struct em_ext_db *ext_db = NULL;
+ struct tf_session *tfs = NULL;
+ struct tf_dev_info *dev;
+ struct ll_entry *entry;
+ struct tf_tbl_scope_cb *tbl_scope_cb = NULL;
+ void *ext_ptr = NULL;
+ struct tf_free_tbl_scope_parms tparms = { 0 };
TF_CHECK_PARMS1(tfp);
return 0;
}
+ rc = tf_session_get_session_internal(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR, "Failed to get tf_session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to lookup device, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tf_session_get_em_ext_db(tfp, &ext_ptr);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to get em_ext_db from session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+ ext_db = (struct em_ext_db *)ext_ptr;
+
+ entry = ext_db->tbl_scope_ll.head;
+ while (entry != NULL) {
+ tbl_scope_cb = (struct tf_tbl_scope_cb *)entry;
+ entry = entry->next;
+ tparms.tbl_scope_id = tbl_scope_cb->tbl_scope_id;
+
+ if (dev->ops->tf_dev_free_tbl_scope) {
+ dev->ops->tf_dev_free_tbl_scope(tfp, &tparms);
+ } else {
+ /* should not reach here */
+ ll_delete(&ext_db->tbl_scope_ll, &tbl_scope_cb->ll_entry);
+ tfp_free(tbl_scope_cb);
+ }
+ }
+
for (i = 0; i < TF_DIR_MAX; i++) {
+ if (ext_db->eem_db[i] == NULL)
+ continue;
+
fparms.dir = i;
- fparms.rm_db = eem_db[i];
+ fparms.rm_db = ext_db->eem_db[i];
rc = tf_rm_free_db(tfp, &fparms);
if (rc)
return rc;
- eem_db[i] = NULL;
+ ext_db->eem_db[i] = NULL;
}
+ tfp_free(ext_db);
+ tf_session_set_em_ext_db(tfp, NULL);
+
init = 0;
return 0;
return -EINVAL;
}
- /* Get the table scope control block associated with the
- * external pool
- */
- tbl_scope_cb = tbl_scope_cb_find(tbl_scope_id);
-
+ tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, tbl_scope_id);
if (tbl_scope_cb == NULL) {
TFP_DRV_LOG(ERR,
"%s, table scope error\n",
key_obj.data = parms->data;
key_obj.size = parms->data_sz_in_bytes;
- rc = hcapi_cfa_key_hw_op(&op,
- &key_tbl,
- &key_obj,
- &key_loc);
+ rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
+ &key_tbl,
+ &key_obj,
+ &key_loc);
return rc;
}
uint32_t sz_in_bytes = 8;
struct tf_dev_info *dev;
- tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
-
- if (tbl_scope_cb == NULL) {
- TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb tbl_scope_id(%d)\n",
- parms->tbl_scope_id);
- return -EINVAL;
- }
-
/* Retrieve the session information */
rc = tf_session_get_session_internal(tfp, &tfs);
if (rc)
if (rc)
return rc;
+ tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
+ if (tbl_scope_cb == NULL) {
+ TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb tbl_scope_id(%d)\n",
+ parms->tbl_scope_id);
+ return -EINVAL;
+ }
+
if (dev->ops->tf_dev_map_tbl_scope == NULL) {
rc = -EOPNOTSUPP;
TFP_DRV_LOG(ERR,
#include "tf_core.h"
#include "tf_session.h"
-
+#include "ll.h"
/**
* Function to search for table scope control block structure
*/
struct tf_tbl_scope_cb *tbl_scope_cb_find(uint32_t tbl_scope_id);
+/**
+ * Table scope control block content
+ */
+struct tf_em_caps {
+ uint32_t flags;
+ uint32_t supported;
+ uint32_t max_entries_supported;
+ uint16_t key_entry_size;
+ uint16_t record_entry_size;
+ uint16_t efc_entry_size;
+};
+
+/**
+ * EEM data
+ *
+ * Link list of ext em data allocated and managed by EEM module
+ * for a TruFlow session.
+ */
+struct em_ext_db {
+ struct ll tbl_scope_ll;
+ struct rm_db *eem_db[TF_DIR_MAX];
+};
+
+/**
+ * Table Scope Control Block
+ *
+ * Holds private data for a table scope.
+ */
+struct tf_tbl_scope_cb {
+ /**
+ * Linked list of tbl_scope
+ */
+ struct ll_entry ll_entry; /* For inserting in link list, must be
+ * first field of struct.
+ */
+
+ uint32_t tbl_scope_id;
+
+ /** The pf or parent pf of the vf used for table scope creation
+ */
+ uint16_t pf;
+ struct hcapi_cfa_em_ctx_mem_info em_ctx_info[TF_DIR_MAX];
+ struct tf_em_caps em_caps[TF_DIR_MAX];
+ struct stack ext_act_pool[TF_DIR_MAX];
+ uint32_t *ext_act_pool_mem[TF_DIR_MAX];
+};
+
/**
* Create and initialize a stack to use for action entries
*
int tf_em_size_table(struct hcapi_cfa_em_table *tbl,
uint32_t page_size);
+
+/**
+ * Look up table scope control block using tbl_scope_id from
+ * tf_session
+ *
+ * [in] tbl_scope_cb
+ * Pointer to Truflow Handle
+ *
+ * [in] tbl_scope_id
+ * table scope id
+ *
+ * Returns:
+ * - Pointer to the tf_tbl_scope_cb, if found.
+ * - (NULL) on failure, not found.
+ */
+struct tf_tbl_scope_cb *
+tf_em_ext_common_tbl_scope_find(struct tf *tfp,
+ uint32_t tbl_scope_id);
+
#endif /* _TF_EM_COMMON_H_ */
/* Number of pointers per page_size */
#define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
-/**
- * EM DBs.
- */
-extern void *eem_db[TF_DIR_MAX];
-
-extern struct tf_tbl_scope_cb tbl_scopes[TF_NUM_TBL_SCOPE];
-
/**
* Function to free a page table
*
}
int
-tf_em_ext_alloc(struct tf *tfp, struct tf_alloc_tbl_scope_parms *parms)
+tf_em_ext_alloc(struct tf *tfp,
+ struct tf_alloc_tbl_scope_parms *parms)
{
int rc;
enum tf_dir dir;
struct tf_free_tbl_scope_parms free_parms;
struct tf_rm_allocate_parms aparms = { 0 };
struct tf_rm_free_parms fparms = { 0 };
+ struct tfp_calloc_parms cparms;
+ struct tf_session *tfs = NULL;
+ struct em_ext_db *ext_db = NULL;
+ void *ext_ptr = NULL;
+ uint16_t pf;
+
+
+ rc = tf_session_get_session_internal(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR, "Failed to get tf_session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tf_session_get_em_ext_db(tfp, &ext_ptr);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to get em_ext_db from session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+ ext_db = (struct em_ext_db *)ext_ptr;
+
+ rc = tfp_get_pf(tfp, &pf);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "EEM: PF query error rc:%s\n",
+ strerror(-rc));
+ goto cleanup;
+ }
/* Get Table Scope control block from the session pool */
- aparms.rm_db = eem_db[TF_DIR_RX];
+ aparms.rm_db = ext_db->eem_db[TF_DIR_RX];
aparms.subtype = TF_EM_TBL_TYPE_TBL_SCOPE;
aparms.index = (uint32_t *)&parms->tbl_scope_id;
rc = tf_rm_allocate(&aparms);
if (rc) {
TFP_DRV_LOG(ERR,
"Failed to allocate table scope\n");
- return rc;
+ goto cleanup;
}
- tbl_scope_cb = &tbl_scopes[parms->tbl_scope_id];
- tbl_scope_cb->index = parms->tbl_scope_id;
- tbl_scope_cb->tbl_scope_id = parms->tbl_scope_id;
-
- rc = tfp_get_pf(tfp, &tbl_scope_cb->pf);
+ /* Create tbl_scope, initialize and attach to the session */
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_tbl_scope_cb);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
if (rc) {
+ /* Log error */
TFP_DRV_LOG(ERR,
- "EEM: PF query error rc:%s\n",
- strerror(-rc));
+ "Failed to allocate session table scope, rc:%s\n",
+ strerror(-rc));
goto cleanup;
}
+ tbl_scope_cb = cparms.mem_va;
+ tbl_scope_cb->tbl_scope_id = parms->tbl_scope_id;
+ tbl_scope_cb->pf = pf;
+
for (dir = 0; dir < TF_DIR_MAX; dir++) {
rc = tf_msg_em_qcaps(tfp,
dir,
"EEM: Unable to query for EEM capability,"
" rc:%s\n",
strerror(-rc));
- goto cleanup;
+ goto cleanup_ts;
}
}
* Validate and setup table sizes
*/
if (tf_em_validate_num_entries(tbl_scope_cb, parms))
- goto cleanup;
+ goto cleanup_ts;
for (dir = 0; dir < TF_DIR_MAX; dir++) {
/*
"EEM: Unable to register for EEM ctx,"
" rc:%s\n",
strerror(-rc));
- goto cleanup;
+ goto cleanup_ts;
}
em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;
}
}
+ /* Insert into session tbl_scope list */
+ ll_insert(&ext_db->tbl_scope_ll, &tbl_scope_cb->ll_entry);
return 0;
cleanup_full:
free_parms.tbl_scope_id = parms->tbl_scope_id;
+ /* Insert into session list prior to ext_free */
+ ll_insert(&ext_db->tbl_scope_ll, &tbl_scope_cb->ll_entry);
tf_em_ext_free(tfp, &free_parms);
return -EINVAL;
+cleanup_ts:
+ tfp_free(tbl_scope_cb);
+
cleanup:
/* Free Table control block */
- fparms.rm_db = eem_db[TF_DIR_RX];
+ fparms.rm_db = ext_db->eem_db[TF_DIR_RX];
fparms.subtype = TF_EM_TBL_TYPE_TBL_SCOPE;
fparms.index = parms->tbl_scope_id;
- tf_rm_free(&fparms);
+ rc = tf_rm_free(&fparms);
+ if (rc)
+ TFP_DRV_LOG(ERR, "Failed to free table scope\n");
+
return -EINVAL;
}
int rc = 0;
enum tf_dir dir;
struct tf_tbl_scope_cb *tbl_scope_cb;
+ struct tf_session *tfs;
+ struct em_ext_db *ext_db = NULL;
+ void *ext_ptr = NULL;
struct tf_rm_free_parms aparms = { 0 };
- tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
+ rc = tf_session_get_session_internal(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR, "Failed to get tf_session, rc:%s\n",
+ strerror(-rc));
+ return -EINVAL;
+ }
+
+ rc = tf_session_get_em_ext_db(tfp, &ext_ptr);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to get em_ext_db from session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+ ext_db = (struct em_ext_db *)ext_ptr;
+ tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
if (tbl_scope_cb == NULL) {
TFP_DRV_LOG(ERR, "Table scope error\n");
return -EINVAL;
}
/* Free Table control block */
- aparms.rm_db = eem_db[TF_DIR_RX];
+ aparms.rm_db = ext_db->eem_db[TF_DIR_RX];
aparms.subtype = TF_EM_TBL_TYPE_TBL_SCOPE;
aparms.index = parms->tbl_scope_id;
rc = tf_rm_free(&aparms);
tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
}
- tbl_scopes[parms->tbl_scope_id].tbl_scope_id = TF_TBL_SCOPE_INVALID;
+ /* remove from session list and free tbl_scope */
+ ll_delete(&ext_db->tbl_scope_ll, &tbl_scope_cb->ll_entry);
+ tfp_free(tbl_scope_cb);
return rc;
}
#include <stdlib.h>
#include <string.h>
+#include "tf_em_common.h"
#include "tf_msg_common.h"
#include "tf_device.h"
#include "tf_msg.h"
#include "tf_common.h"
#include "tf_session.h"
#include "tfp.h"
-#include "hwrm_tf.h"
#include "tf_em.h"
/* Specific msg size defines as we cannot use defines in tf.yaml. This
* array size (define above) should be checked and compared.
*/
#define TF_MSG_SIZE_HWRM_TF_GLOBAL_CFG_SET 56
+static_assert(sizeof(struct hwrm_tf_global_cfg_set_input) ==
+ TF_MSG_SIZE_HWRM_TF_GLOBAL_CFG_SET,
+ "HWRM message size changed: hwrm_tf_global_cfg_set_input");
+
#define TF_MSG_SIZE_HWRM_TF_EM_INSERT 104
+static_assert(sizeof(struct hwrm_tf_em_insert_input) ==
+ TF_MSG_SIZE_HWRM_TF_EM_INSERT,
+ "HWRM message size changed: hwrm_tf_em_insert_input");
+
#define TF_MSG_SIZE_HWRM_TF_TBL_TYPE_SET 128
+static_assert(sizeof(struct hwrm_tf_tbl_type_set_input) ==
+ TF_MSG_SIZE_HWRM_TF_TBL_TYPE_SET,
+ "HWRM message size changed: hwrm_tf_tbl_type_set_input");
/**
* This is the MAX data we can transport across regular HWRM
return 0;
}
+int tf_msg_ext_em_ctxt_mem_alloc(struct tf *tfp,
+ struct hcapi_cfa_em_table *tbl,
+ uint64_t *dma_addr,
+ uint32_t *page_lvl,
+ uint32_t *page_size)
+{
+ struct tfp_send_msg_parms parms = { 0 };
+ struct hwrm_tf_ctxt_mem_alloc_input req = {0};
+ struct hwrm_tf_ctxt_mem_alloc_output resp = {0};
+ uint32_t mem_size_k;
+ int rc = 0;
+ struct tf_dev_info *dev;
+ struct tf_session *tfs;
+ uint32_t fw_se_id;
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session_internal(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to lookup session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to lookup device, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+ /* Retrieve the session information */
+ fw_se_id = tfs->session_id.internal.fw_session_id;
+
+ if (tbl->num_entries && tbl->entry_size) {
+ /* unit: kbytes */
+ mem_size_k = (tbl->num_entries / TF_KILOBYTE) * tbl->entry_size;
+ req.mem_size = tfp_cpu_to_le_32(mem_size_k);
+ req.fw_session_id = tfp_cpu_to_le_32(fw_se_id);
+ parms.tf_type = HWRM_TF_CTXT_MEM_ALLOC;
+ parms.req_data = (uint32_t *)&req;
+ parms.req_size = sizeof(req);
+ parms.resp_data = (uint32_t *)&resp;
+ parms.resp_size = sizeof(resp);
+ parms.mailbox = dev->ops->tf_dev_get_mailbox();
+ rc = tfp_send_msg_direct(tfp, &parms);
+ if (rc) {
+ TFP_DRV_LOG(ERR, "Failed ext_em_alloc error rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ *dma_addr = tfp_le_to_cpu_64(resp.page_dir);
+ *page_lvl = resp.page_level;
+ *page_size = resp.page_size;
+ }
+
+ return rc;
+}
+
+int tf_msg_ext_em_ctxt_mem_free(struct tf *tfp,
+ uint32_t mem_size_k,
+ uint64_t dma_addr,
+ uint8_t page_level,
+ uint8_t page_size)
+{
+ struct tfp_send_msg_parms parms = { 0 };
+ struct hwrm_tf_ctxt_mem_free_input req = {0};
+ struct hwrm_tf_ctxt_mem_free_output resp = {0};
+ int rc = 0;
+ struct tf_dev_info *dev;
+ struct tf_session *tfs;
+ uint32_t fw_se_id;
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session_internal(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to lookup session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to lookup device, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+ /* Retrieve the session information */
+ fw_se_id = tfs->session_id.internal.fw_session_id;
+
+ req.fw_session_id = tfp_cpu_to_le_32(fw_se_id);
+ req.mem_size = tfp_cpu_to_le_32(mem_size_k);
+ req.page_dir = tfp_cpu_to_le_64(dma_addr);
+ req.page_level = page_level;
+ req.page_size = page_size;
+ parms.tf_type = HWRM_TF_CTXT_MEM_FREE;
+ parms.req_data = (uint32_t *)&req;
+ parms.req_size = sizeof(req);
+ parms.resp_data = (uint32_t *)&resp;
+ parms.resp_size = sizeof(resp);
+ parms.mailbox = dev->ops->tf_dev_get_mailbox();
+ rc = tfp_send_msg_direct(tfp, &parms);
+
+ return rc;
+}
+
int
tf_msg_em_mem_rgtr(struct tf *tfp,
int page_lvl,
struct tfp_send_msg_parms parms = { 0 };
struct tf_dev_info *dev;
struct tf_session *tfs;
+ uint32_t fw_se_id;
/* Retrieve the session information */
rc = tf_session_get_session_internal(tfp, &tfs);
strerror(-rc));
return rc;
}
+ fw_se_id = tfs->session_id.internal.fw_session_id;
+ req.fw_session_id = tfp_cpu_to_le_32(fw_se_id);
req.page_level = page_lvl;
req.page_size = page_size;
req.page_dir = tfp_cpu_to_le_64(dma_addr);
struct tfp_send_msg_parms parms = { 0 };
struct tf_dev_info *dev;
struct tf_session *tfs;
+ uint32_t fw_se_id;
/* Retrieve the session information */
rc = tf_session_get_session_internal(tfp, &tfs);
return rc;
}
+ fw_se_id = tfs->session_id.internal.fw_session_id;
+ req.fw_session_id = tfp_cpu_to_le_32(fw_se_id);
+
req.ctx_id = tfp_cpu_to_le_32(*ctx_id);
parms.tf_type = HWRM_TF_CTXT_MEM_UNRGTR;
struct tfp_send_msg_parms parms = { 0 };
struct tf_dev_info *dev;
struct tf_session *tfs;
+ uint32_t fw_se_id;
/* Retrieve the session information */
rc = tf_session_get_session_internal(tfp, &tfs);
strerror(-rc));
return rc;
}
+ fw_se_id = tfs->session_id.internal.fw_session_id;
/* Retrieve the device information */
rc = tf_session_get_device(tfs, &dev);
req.flags = tfp_cpu_to_le_32(flags);
parms.tf_type = HWRM_TF_EXT_EM_QCAPS;
+ req.fw_session_id = tfp_cpu_to_le_32(fw_se_id);
parms.req_data = (uint32_t *)&req;
parms.req_size = sizeof(req);
parms.resp_data = (uint32_t *)&resp;
return rc;
}
+int
+tf_msg_ext_em_cfg(struct tf *tfp,
+ struct tf_tbl_scope_cb *tbl_scope_cb,
+ uint32_t st_buckets,
+ uint8_t flush_interval,
+ enum tf_dir dir)
+{
+ struct hcapi_cfa_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
+ struct hcapi_cfa_em_table *lkup_tbl, *act_tbl;
+ struct hwrm_tf_ext_em_cfg_input req = {0};
+ struct hwrm_tf_ext_em_cfg_output resp = {0};
+ struct tfp_send_msg_parms parms = { 0 };
+ uint32_t flags;
+ struct tf_dev_info *dev;
+ struct tf_session *tfs;
+ uint32_t fw_se_id;
+ int rc;
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session_internal(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup session, rc:%s\n",
+ tf_dir_2_str(dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup device, rc:%s\n",
+ tf_dir_2_str(dir),
+ strerror(-rc));
+ return rc;
+ }
+ fw_se_id = tfs->session_id.internal.fw_session_id;
+
+ lkup_tbl = &ctxp->em_tables[TF_EM_LKUP_TABLE];
+ act_tbl = &ctxp->em_tables[TF_ACTION_TABLE];
+ flags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :
+ HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);
+ flags |= HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD;
+
+ req.flags = tfp_cpu_to_le_32(flags);
+ req.num_entries = tfp_cpu_to_le_32(act_tbl->num_entries);
+ req.lkup_static_buckets = tfp_cpu_to_le_32(st_buckets);
+ req.fw_session_id = tfp_cpu_to_le_32(fw_se_id);
+ req.flush_interval = flush_interval;
+ req.action_ctx_id = tfp_cpu_to_le_16(act_tbl->ctx_id);
+ req.action_tbl_scope = tfp_cpu_to_le_16(tbl_scope_cb->tbl_scope_id);
+ req.lkup_ctx_id = tfp_cpu_to_le_16(lkup_tbl->ctx_id);
+ req.lkup_tbl_scope = tfp_cpu_to_le_16(tbl_scope_cb->tbl_scope_id);
+
+ req.enables = (HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_ACTION_CTX_ID |
+ HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_ACTION_TBL_SCOPE |
+ HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_LKUP_CTX_ID |
+ HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_LKUP_TBL_SCOPE |
+ HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_LKUP_STATIC_BUCKETS |
+ HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_NUM_ENTRIES);
+
+ parms.tf_type = HWRM_TF_EXT_EM_CFG;
+ parms.req_data = (uint32_t *)&req;
+ parms.req_size = sizeof(req);
+ parms.resp_data = (uint32_t *)&resp;
+ parms.resp_size = sizeof(resp);
+ parms.mailbox = dev->ops->tf_dev_get_mailbox();
+
+ rc = tfp_send_msg_direct(tfp,
+ &parms);
+ return rc;
+}
+
int
tf_msg_em_op(struct tf *tfp,
int dir,
if (rc != 0)
return rc;
- if (mparms.tf_resp_code != 0)
- return tfp_le_to_cpu_32(mparms.tf_resp_code);
-
if (parms->key_size < resp.key_size ||
parms->result_size < resp.result_size) {
rc = -EINVAL;
tfp_memcpy(parms->mask, &resp.dev_data[resp.key_size], resp.key_size);
tfp_memcpy(parms->result, &resp.dev_data[resp.result_offset], resp.result_size);
- return tfp_le_to_cpu_32(mparms.tf_resp_code);
+ return 0;
}
int
if (rc)
return rc;
- return tfp_le_to_cpu_32(parms.tf_resp_code);
+ return 0;
}
int
if (rc)
return rc;
- /* Verify that we got enough buffer to return the requested data */
- if (tfp_le_to_cpu_32(resp.size) != size)
+ /*
+ * The response will be 64 bytes long, the response size will
+ * be in words (16). All we can test for is that the response
+ * size is < to the requested size.
+ */
+ if ((tfp_le_to_cpu_32(resp.size) * 4) < size)
return -EINVAL;
+ /*
+ * Copy the requested number of bytes
+ */
tfp_memcpy(data,
&resp.data,
size);
- return tfp_le_to_cpu_32(parms.tf_resp_code);
+ return 0;
}
/* HWRM Tunneled messages */
else
return -EFAULT;
- return tfp_le_to_cpu_32(parms.tf_resp_code);
+ return 0;
}
int
struct tf_dev_info *dev;
struct tf_session *tfs;
- RTE_BUILD_BUG_ON(sizeof(struct hwrm_tf_global_cfg_set_input) !=
- TF_MSG_SIZE_HWRM_TF_GLOBAL_CFG_SET);
-
/* Retrieve the session information */
rc = tf_session_get_session_internal(tfp, &tfs);
if (rc) {
if (rc != 0)
return rc;
- return tfp_le_to_cpu_32(parms.tf_resp_code);
+ return 0;
}
int
{
int rc;
struct tfp_send_msg_parms parms = { 0 };
- struct tf_tbl_type_bulk_get_input req = { 0 };
- struct tf_tbl_type_bulk_get_output resp = { 0 };
+ struct hwrm_tf_tbl_type_bulk_get_input req = { 0 };
+ struct hwrm_tf_tbl_type_bulk_get_output resp = { 0 };
int data_size = 0;
uint8_t fw_session_id;
struct tf_dev_info *dev;
req.host_addr = tfp_cpu_to_le_64(physical_mem_addr);
- MSG_PREP(parms,
- dev->ops->tf_dev_get_mailbox(),
- HWRM_TF,
- HWRM_TFT_TBL_TYPE_BULK_GET,
- req,
- resp);
+ parms.tf_type = HWRM_TF_TBL_TYPE_BULK_GET;
+ parms.req_data = (uint32_t *)&req;
+ parms.req_size = sizeof(req);
+ parms.resp_data = (uint32_t *)&resp;
+ parms.resp_size = sizeof(resp);
+ parms.mailbox = dev->ops->tf_dev_get_mailbox();
- rc = tfp_send_msg_tunneled(tfp, &parms);
+ rc = tfp_send_msg_direct(tfp,
+ &parms);
if (rc)
return rc;
if (tfp_le_to_cpu_32(resp.size) != data_size)
return -EINVAL;
- return tfp_le_to_cpu_32(parms.tf_resp_code);
+ return 0;
}
int
if (rc != 0)
return rc;
- if (parms.tf_resp_code != 0)
- return tfp_le_to_cpu_32(parms.tf_resp_code);
-
tfp_memcpy(params->data, resp.data, req.size);
- return tfp_le_to_cpu_32(parms.tf_resp_code);
+ return 0;
}
int
if (rc != 0)
return rc;
- return tfp_le_to_cpu_32(parms.tf_resp_code);
+ return 0;
}
#include <rte_common.h>
#include <hsi_struct_def_dpdk.h>
+#include "tf_em_common.h"
#include "tf_tbl.h"
#include "tf_rm.h"
#include "tf_tcam.h"
int tf_msg_delete_em_entry(struct tf *tfp,
struct tf_delete_em_entry_parms *em_parms);
+/**
+ * Sends Ext EM mem allocation request to Firmware
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] tbl
+ * memory allocation details
+ *
+ * [out] dma_addr
+ * memory address
+ *
+ * [out] page_lvl
+ * page level
+ *
+ * [out] page_size
+ * page size
+ *
+ * Returns:
+ * 0 on Success else internal Truflow error
+ */
+int tf_msg_ext_em_ctxt_mem_alloc(struct tf *tfp,
+ struct hcapi_cfa_em_table *tbl,
+ uint64_t *dma_addr,
+ uint32_t *page_lvl,
+ uint32_t *page_size);
+
+/**
+ * Sends Ext EM mem allocation request to Firmware
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] mem_size_k
+ * memory size in KB
+ *
+ * [in] page_dir
+ * Pointer to the PBL or PDL depending on number of levels
+ *
+ * [in] page_level
+ * PBL indirect levels
+ *
+ * [in] page_size
+ * page size
+ *
+ * Returns:
+ * 0 on Success else internal Truflow error
+ */
+int tf_msg_ext_em_ctxt_mem_free(struct tf *tfp,
+ uint32_t mem_size_k,
+ uint64_t dma_addr,
+ uint8_t page_level,
+ uint8_t page_size);
+
/**
* Sends EM mem register request to Firmware
*
uint8_t flush_interval,
int dir);
+/**
+ * Sends Ext EM config request to Firmware
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] fw_se_id
+ * FW session id
+ *
+ * [in] tbl_scope_cb
+ * Table scope parameters
+ *
+ * [in] st_buckets
+ * static bucket size
+ *
+ * [in] flush_interval
+ * Flush pending HW cached flows every 1/10th of value set in
+ * seconds, both idle and active flows are flushed from the HW
+ * cache. If set to 0, this feature will be disabled.
+ *
+ * [in] dir
+ * Receive or Transmit direction
+ *
+ * Returns:
+ * 0 on Success else internal Truflow error
+ */
+int tf_msg_ext_em_cfg(struct tf *tfp,
+ struct tf_tbl_scope_cb *tbl_scope_cb,
+ uint32_t st_buckets,
+ uint8_t flush_interval,
+ enum tf_dir dir);
+
/**
* Sends EM operation request to Firmware
*
parms.mailbox = mb; \
parms.tf_type = type; \
parms.tf_subtype = subtype; \
- parms.tf_resp_code = 0; \
parms.req_size = sizeof(req); \
parms.req_data = (uint32_t *)&(req); \
parms.resp_size = sizeof(resp); \
parms.mailbox = mb; \
parms.tf_type = type; \
parms.tf_subtype = subtype; \
- parms.tf_resp_code = 0; \
parms.req_size = 0; \
parms.req_data = NULL; \
parms.resp_size = sizeof(resp); \
parms.mailbox = mb; \
parms.tf_type = type; \
parms.tf_subtype = subtype; \
- parms.tf_resp_code = 0; \
parms.req_size = sizeof(req); \
parms.req_data = (uint32_t *)&(req); \
parms.resp_size = 0; \
ll_insert(&session->client_ll, &client->ll_entry);
session->ref_count++;
+ /* Init session em_ext_db */
+ session->em_ext_db_handle = NULL;
+
rc = tf_dev_bind(tfp,
parms->open_cfg->device_type,
session->shadow_copy,
return 0;
}
+
+int
+tf_session_get_em_ext_db(struct tf *tfp,
+ void **em_ext_db_handle)
+{
+ struct tf_session *tfs = NULL;
+ int rc = 0;
+
+ *em_ext_db_handle = NULL;
+
+ if (tfp == NULL)
+ return (-EINVAL);
+
+ rc = tf_session_get_session_internal(tfp, &tfs);
+ if (rc)
+ return rc;
+
+ *em_ext_db_handle = tfs->em_ext_db_handle;
+ return rc;
+}
+
+int
+tf_session_set_em_ext_db(struct tf *tfp,
+ void *em_ext_db_handle)
+{
+ struct tf_session *tfs = NULL;
+ int rc = 0;
+
+ if (tfp == NULL)
+ return (-EINVAL);
+
+ rc = tf_session_get_session_internal(tfp, &tfs);
+ if (rc)
+ return rc;
+
+ tfs->em_ext_db_handle = em_ext_db_handle;
+ return rc;
+}
+
+int
+tf_session_get_db(struct tf *tfp,
+ enum tf_module_type type,
+ void **db_handle)
+{
+ struct tf_session *tfs = NULL;
+ int rc = 0;
+
+ *db_handle = NULL;
+
+ if (tfp == NULL)
+ return (-EINVAL);
+
+ rc = tf_session_get_session_internal(tfp, &tfs);
+ if (rc)
+ return rc;
+
+ switch (type) {
+ case TF_MODULE_TYPE_IDENTIFIER:
+ *db_handle = tfs->id_db_handle;
+ break;
+ case TF_MODULE_TYPE_TABLE:
+ *db_handle = tfs->tbl_db_handle;
+ break;
+ case TF_MODULE_TYPE_TCAM:
+ *db_handle = tfs->tcam_db_handle;
+ break;
+ case TF_MODULE_TYPE_EM:
+ *db_handle = tfs->em_db_handle;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+int
+tf_session_set_db(struct tf *tfp,
+ enum tf_module_type type,
+ void *db_handle)
+{
+ struct tf_session *tfs = NULL;
+ int rc = 0;
+
+ if (tfp == NULL)
+ return (-EINVAL);
+
+ rc = tf_session_get_session_internal(tfp, &tfs);
+ if (rc)
+ return rc;
+
+ switch (type) {
+ case TF_MODULE_TYPE_IDENTIFIER:
+ tfs->id_db_handle = db_handle;
+ break;
+ case TF_MODULE_TYPE_TABLE:
+ tfs->tbl_db_handle = db_handle;
+ break;
+ case TF_MODULE_TYPE_TCAM:
+ tfs->tcam_db_handle = db_handle;
+ break;
+ case TF_MODULE_TYPE_EM:
+ tfs->em_db_handle = db_handle;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
* Linked list of clients registered for this session
*/
struct ll client_ll;
+
+ /**
+ * em ext db reference for the session
+ */
+ void *em_ext_db_handle;
+
+ /**
+ * tcam db reference for the session
+ */
+ void *tcam_db_handle;
+
+ /**
+ * table db reference for the session
+ */
+ void *tbl_db_handle;
+
+ /**
+ * identifier db reference for the session
+ */
+ void *id_db_handle;
+
+ /**
+ * em db reference for the session
+ */
+ void *em_db_handle;
};
/**
int tf_session_get_session_id(struct tf *tfp,
union tf_session_id *session_id);
+/**
+ * API to get the em_ext_db from tf_session.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [out] em_ext_db_handle, pointer to eem handle
+ *
+ * Returns:
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+int
+tf_session_get_em_ext_db(struct tf *tfp,
+ void **em_ext_db_handle);
+
+/**
+ * API to set the em_ext_db in tf_session.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] em_ext_db_handle, pointer to eem handle
+ *
+ * Returns:
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+int
+tf_session_set_em_ext_db(struct tf *tfp,
+ void *em_ext_db_handle);
+
+/**
+ * API to get the db from tf_session.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [out] db_handle, pointer to db handle
+ *
+ * Returns:
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+int
+tf_session_get_db(struct tf *tfp,
+ enum tf_module_type type,
+ void **db_handle);
+
+/**
+ * API to set the db in tf_session.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] db_handle, pointer to db handle
+ *
+ * Returns:
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+int
+tf_session_set_db(struct tf *tfp,
+ enum tf_module_type type,
+ void *db_handle);
#endif /* _TF_SESSION_H_ */
* The Table module provides processing of Internal TF table types.
*/
-/**
- * Table scope control block content
- */
-struct tf_em_caps {
- uint32_t flags;
- uint32_t supported;
- uint32_t max_entries_supported;
- uint16_t key_entry_size;
- uint16_t record_entry_size;
- uint16_t efc_entry_size;
-};
-
/** Invalid table scope id */
#define TF_TBL_SCOPE_INVALID 0xffffffff
-/**
- * Table Scope Control Block
- *
- * Holds private data for a table scope. Only one instance of a table
- * scope with Internal EM is supported.
- */
-struct tf_tbl_scope_cb {
- uint32_t tbl_scope_id;
- /** The pf or parent pf of the vf used for table scope creation
- */
- uint16_t pf;
- int index;
- struct hcapi_cfa_em_ctx_mem_info em_ctx_info[TF_DIR_MAX];
- struct tf_em_caps em_caps[TF_DIR_MAX];
- struct stack ext_act_pool[TF_DIR_MAX];
- uint32_t *ext_act_pool_mem[TF_DIR_MAX];
-};
-
/**
* Table configuration parameters
*/
return "Mirror";
case TF_TBL_TYPE_UPAR:
return "UPAR";
- case TF_TBL_TYPE_EPOCH0:
- return "EPOCH0";
- case TF_TBL_TYPE_EPOCH1:
- return "EPOCH1";
case TF_TBL_TYPE_METADATA:
return "Metadata";
- case TF_TBL_TYPE_CT_STATE:
- return "Connection State";
- case TF_TBL_TYPE_RANGE_PROF:
- return "Range Profile";
- case TF_TBL_TYPE_RANGE_ENTRY:
- return "Range";
- case TF_TBL_TYPE_LAG:
- return "Link Aggregation";
case TF_TBL_TYPE_EM_FKB:
return "EM Flexible Key Builder";
case TF_TBL_TYPE_WC_FKB:
#include "tf_core.h"
#include "tf_device.h"
+#define TF_BITS2BYTES(x) (((x) + 7) >> 3)
+#define TF_BITS2BYTES_WORD_ALIGN(x) ((((x) + 31) >> 5) * 4)
+#define TF_BITS2BYTES_64B_WORD_ALIGN(x) ((((x) + 63) >> 6) * 8)
+
/**
* Helper function converting direction to text string
*
return rc;
}
-/**
- * Sends preformatted TruFlow msg to the TruFlow Firmware using
- * the Truflow tunnel HWRM message type.
- *
- * Returns success or failure code.
- */
-int
-tfp_send_msg_tunneled(struct tf *tfp,
- struct tfp_send_msg_parms *parms)
-{
- int rc = 0;
- uint8_t use_kong_mb = 1;
-
- if (parms == NULL)
- return -EINVAL;
-
- if (parms->mailbox == TF_CHIMP_MB)
- use_kong_mb = 0;
-
- rc = bnxt_hwrm_tf_message_tunneled(container_of(tfp,
- struct bnxt,
- tfp),
- use_kong_mb,
- parms->tf_type,
- parms->tf_subtype,
- &parms->tf_resp_code,
- parms->req_data,
- parms->req_size,
- parms->resp_data,
- parms->resp_size);
-
- return rc;
-}
-
/**
* Allocates zero'ed memory from the heap.
*
* [in] tlv_subtype, specifies the tlv_subtype.
*/
uint16_t tf_subtype;
- /**
- * [out] tf_resp_code, response code from the internal tlv
- * message. Only supported on tunneled messages.
- */
- uint32_t tf_resp_code;
/**
* [out] size, number specifying the request size of the data in bytes
*/
* @page Portability
*
* @ref tfp_send_direct
- * @ref tfp_send_msg_tunneled
*
* @ref tfp_calloc
* @ref tfp_memcpy
int tfp_send_msg_direct(struct tf *tfp,
struct tfp_send_msg_parms *parms);
-/**
- * Provides communication capability from the TrueFlow API layer to
- * the TrueFlow firmware. The portability layer internally provides
- * the transport to the firmware.
- *
- * [in] session, pointer to session handle
- * [in] parms, parameter structure
- *
- * Returns:
- * 0 - Success
- * -1 - Global error like not supported
- * -EINVAL - Parameter Error
- */
-int tfp_send_msg_tunneled(struct tf *tfp,
- struct tfp_send_msg_parms *parms);
-
-/**
- * Sends OEM command message to Chimp
- *
- * [in] session, pointer to session handle
- * [in] max_flows, max number of flows requested
- *
- * Returns:
- * 0 - Success
- * -1 - Global error like not supported
- * -EINVAL - Parameter Error
- */
-int
-tfp_msg_hwrm_oem_cmd(struct tf *tfp,
- uint32_t max_flows);
-
/**
* Sends OEM command message to Chimp
*
#define tfp_bswap_32(val) rte_bswap32(val)
#define tfp_bswap_64(val) rte_bswap64(val)
-/**
- * Lookup of the FID in the platform specific structure.
- *
- * [in] session
- * Pointer to session handle
- *
- * [out] fw_fid
- * Pointer to the fw_fid
- *
- * Returns:
- * 0 - Success
- * -EINVAL - Parameter error
- */
-int tfp_get_fid(struct tf *tfp, uint16_t *fw_fid);
-
/**
* Get the PF associated with the fw communications channel.
*