{
ll->head = NULL;
ll->tail = NULL;
+ ll->cnt = 0;
}
/* insert entry in linked list */
entry->next->prev = entry;
ll->head = entry->next->prev;
}
+ ll->cnt++;
}
/* delete entry from linked list */
entry->prev->next = entry->next;
entry->next->prev = entry->prev;
}
+ ll->cnt--;
}
#ifndef _LL_H_
#define _LL_H_
+#include <stdint.h>
+
/* linked list entry */
struct ll_entry {
struct ll_entry *prev;
struct ll {
struct ll_entry *head;
struct ll_entry *tail;
+ uint32_t cnt;
};
/**
void ll_init(struct ll *ll);
/**
- * Linked list insert
+ * Linked list insert head
*
* [in] ll, linked list where element is inserted
* [in] entry, entry to be added
*/
void ll_delete(struct ll *ll, struct ll_entry *entry);
+/**
+ * Linked list return next entry without deleting it
+ *
+ * Useful in performing search
+ *
+ * [in] Entry in the list
+ */
+static inline struct ll_entry *ll_next(struct ll_entry *entry)
+{
+ return entry->next;
+}
+
+/**
+ * Linked list return the head of the list without removing it
+ *
+ * Useful in performing search
+ *
+ * [in] ll, linked list
+ */
+static inline struct ll_entry *ll_head(struct ll *ll)
+{
+ return ll->head;
+}
+
+/**
+ * Linked list return the tail of the list without removing it
+ *
+ * Useful in performing search
+ *
+ * [in] ll, linked list
+ */
+static inline struct ll_entry *ll_tail(struct ll *ll)
+{
+ return ll->tail;
+}
+
+/**
+ * Linked list return the number of entries in the list
+ *
+ * [in] ll, linked list
+ */
+static inline uint32_t ll_cnt(struct ll *ll)
+{
+ return ll->cnt;
+}
#endif /* _LL_H_ */
'stack.c',
'tf_rm.c',
'tf_tbl.c',
+ 'tf_tbl_sram.c',
+ 'tf_sram_mgr.c',
'tf_em_common.c',
'tf_em_host.c',
'tf_em_internal.c',
strerror(-rc));
return rc;
}
-
- } else {
- if (dev->ops->tf_dev_alloc_tbl == NULL) {
- rc = -EOPNOTSUPP;
+ } else if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) {
+ rc = dev->ops->tf_dev_alloc_sram_tbl(tfp, &aparms);
+ if (rc) {
TFP_DRV_LOG(ERR,
- "%s: Operation not supported, rc:%s\n",
+ "%s: SRAM table allocation failed, rc:%s\n",
tf_dir_2_str(parms->dir),
strerror(-rc));
- return -EOPNOTSUPP;
+ return rc;
}
-
+ } else {
rc = dev->ops->tf_dev_alloc_tbl(tfp, &aparms);
if (rc) {
TFP_DRV_LOG(ERR,
strerror(-rc));
return rc;
}
- } else {
- if (dev->ops->tf_dev_free_tbl == NULL) {
- rc = -EOPNOTSUPP;
+ } else if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) {
+ rc = dev->ops->tf_dev_free_sram_tbl(tfp, &fparms);
+ if (rc) {
TFP_DRV_LOG(ERR,
- "%s: Operation not supported, rc:%s\n",
+ "%s: SRAM table free failed, rc:%s\n",
tf_dir_2_str(parms->dir),
strerror(-rc));
- return -EOPNOTSUPP;
+ return rc;
}
+ } else {
rc = dev->ops->tf_dev_free_tbl(tfp, &fparms);
if (rc) {
return rc;
}
}
-
return 0;
}
strerror(-rc));
return rc;
}
+ } else if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) {
+ rc = dev->ops->tf_dev_set_sram_tbl(tfp, &sparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: SRAM table set failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
} else {
if (dev->ops->tf_dev_set_tbl == NULL) {
rc = -EOPNOTSUPP;
strerror(-rc));
return rc;
}
-
- if (dev->ops->tf_dev_get_tbl == NULL) {
- rc = -EOPNOTSUPP;
- TFP_DRV_LOG(ERR,
- "%s: Operation not supported, rc:%s\n",
- tf_dir_2_str(parms->dir),
- strerror(-rc));
- return -EOPNOTSUPP;
- }
-
gparms.dir = parms->dir;
gparms.type = parms->type;
gparms.data = parms->data;
gparms.data_sz_in_bytes = parms->data_sz_in_bytes;
gparms.idx = parms->idx;
- rc = dev->ops->tf_dev_get_tbl(tfp, &gparms);
- if (rc) {
- TFP_DRV_LOG(ERR,
- "%s: Table get failed, rc:%s\n",
- tf_dir_2_str(parms->dir),
- strerror(-rc));
- return rc;
+
+ if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) {
+ rc = dev->ops->tf_dev_get_sram_tbl(tfp, &gparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: SRAM table get failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+ } else {
+ if (dev->ops->tf_dev_get_tbl == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return -EOPNOTSUPP;
+ }
+
+ rc = dev->ops->tf_dev_get_tbl(tfp, &gparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Table get failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
}
return rc;
return rc;
}
+ bparms.dir = parms->dir;
+ bparms.type = parms->type;
+ bparms.starting_idx = parms->starting_idx;
+ bparms.num_entries = parms->num_entries;
+ bparms.entry_sz_in_bytes = parms->entry_sz_in_bytes;
+ bparms.physical_mem_addr = parms->physical_mem_addr;
+
if (parms->type == TF_TBL_TYPE_EXT) {
/* Not supported, yet */
rc = -EOPNOTSUPP;
strerror(-rc));
return rc;
+ } else if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) {
+ rc = dev->ops->tf_dev_get_bulk_sram_tbl(tfp, &bparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: SRAM table bulk get failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ }
+ return rc;
}
- /* Internal table type processing */
-
if (dev->ops->tf_dev_get_bulk_tbl == NULL) {
rc = -EOPNOTSUPP;
TFP_DRV_LOG(ERR,
return -EOPNOTSUPP;
}
- bparms.dir = parms->dir;
- bparms.type = parms->type;
- bparms.starting_idx = parms->starting_idx;
- bparms.num_entries = parms->num_entries;
- bparms.entry_sz_in_bytes = parms->entry_sz_in_bytes;
- bparms.physical_mem_addr = parms->physical_mem_addr;
rc = dev->ops->tf_dev_get_bulk_tbl(tfp, &bparms);
if (rc) {
TFP_DRV_LOG(ERR,
strerror(-rc));
return rc;
}
-
return rc;
}
TF_EXT_MEM_CHAN_TYPE_MAX
};
+/**
+ * WC TCAM number of slice per row that devices supported
+ */
+enum tf_wc_num_slice {
+ TF_WC_TCAM_1_SLICE_PER_ROW = 1,
+ TF_WC_TCAM_2_SLICE_PER_ROW = 2,
+ TF_WC_TCAM_4_SLICE_PER_ROW = 4,
+ TF_WC_TCAM_8_SLICE_PER_ROW = 8,
+};
+
/**
* EEM record AR helper
*
*/
void *bp;
+ /**
+ * [in]
+ *
+ * The number of slices per row for WC TCAM entry.
+ */
+ enum tf_wc_num_slice wc_num_slices;
+
/**
* [out] shared_session_creator
*
/**
* General internal resource info
*
- * TODO: remove tf_rm_new_entry structure and use this structure
- * internally.
*/
struct tf_resource_info {
uint16_t start;
* entry of the indicated type for this TruFlow session.
*
* Allocates an index table record. This function will attempt to
- * allocate an entry or search an index table for a matching entry if
- * search is enabled (only the shadow copy of the table is accessed).
- *
- * If search is not enabled, the first available free entry is
- * returned. If search is enabled and a matching entry to entry_data
- * is found hit is set to TRUE and success is returned.
+ * allocate an index table entry.
*
* External types:
*
* Allocates an external index table action record.
*
* NOTE:
- * Implementation of the internals of this function will be a stack with push
- * and pop.
+ * Implementation of the internals of the external function will be a stack with
+ * push and pop.
*
* Returns success or failure code.
*/
*
* Internal types:
*
- * If session has shadow_copy enabled the shadow DB is searched and if
- * found the element ref_cnt is decremented. If ref_cnt goes to
- * zero then the element is returned to the session pool.
- *
- * If the session does not have a shadow DB the element is free'ed and
- * given back to the session pool.
+ * The element is freed and given back to the session pool.
*
* External types:
*
- * Free's an external index table action record.
+ * Frees an external index table action record.
*
* NOTE:
- * Implementation of the internals of this function will be a stack with push
- * and pop.
+ * Implementation of the internals of the external table will be a stack with
+ * push and pop.
*
* Returns success or failure code.
*/
/**
* set index table entry
*
- * Used to insert an application programmed index table entry into a
- * previous allocated table location. A shadow copy of the table
- * is maintained (if enabled) (only for internal objects)
+ * Used to set an application programmed index table entry into a
+ * previous allocated table location.
*
* Returns success or failure code.
*/
#include "tf_rm.h"
#ifdef TF_TCAM_SHARED
#include "tf_tcam_shared.h"
+#include "tf_tbl_sram.h"
#endif /* TF_TCAM_SHARED */
struct tf;
+/* Number of slices per row for WC TCAM */
+uint16_t g_wc_num_slices_per_row = TF_WC_TCAM_1_SLICE_PER_ROW;
+
/* Forward declarations */
static int tf_dev_unbind_p4(struct tf *tfp);
static int tf_dev_unbind_p58(struct tf *tfp);
tf_dev_bind_p4(struct tf *tfp,
bool shadow_copy,
struct tf_session_resources *resources,
- struct tf_dev_info *dev_handle)
+ struct tf_dev_info *dev_handle,
+ enum tf_wc_num_slice wc_num_slices)
{
int rc;
int frc;
if (rsv_cnt) {
tbl_cfg.num_elements = TF_TBL_TYPE_MAX;
tbl_cfg.cfg = tf_tbl_p4;
- tbl_cfg.shadow_copy = shadow_copy;
tbl_cfg.resources = resources;
rc = tf_tbl_bind(tfp, &tbl_cfg);
if (rc) {
tcam_cfg.cfg = tf_tcam_p4;
tcam_cfg.shadow_copy = shadow_copy;
tcam_cfg.resources = resources;
+ tcam_cfg.wc_num_slices = wc_num_slices;
#ifdef TF_TCAM_SHARED
rc = tf_tcam_shared_bind(tfp, &tcam_cfg);
#else /* !TF_TCAM_SHARED */
tf_dev_bind_p58(struct tf *tfp,
bool shadow_copy,
struct tf_session_resources *resources,
- struct tf_dev_info *dev_handle)
+ struct tf_dev_info *dev_handle,
+ enum tf_wc_num_slice wc_num_slices)
{
int rc;
int frc;
if (rsv_cnt) {
tbl_cfg.num_elements = TF_TBL_TYPE_MAX;
tbl_cfg.cfg = tf_tbl_p58;
- tbl_cfg.shadow_copy = shadow_copy;
tbl_cfg.resources = resources;
rc = tf_tbl_bind(tfp, &tbl_cfg);
if (rc) {
goto fail;
}
no_rsv_flag = false;
+
+ rc = tf_tbl_sram_bind(tfp);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "SRAM table initialization failure\n");
+ goto fail;
+ }
}
rsv_cnt = tf_dev_reservation_check(TF_TCAM_TBL_TYPE_MAX,
tcam_cfg.cfg = tf_tcam_p58;
tcam_cfg.shadow_copy = shadow_copy;
tcam_cfg.resources = resources;
+ tcam_cfg.wc_num_slices = wc_num_slices;
#ifdef TF_TCAM_SHARED
rc = tf_tcam_shared_bind(tfp, &tcam_cfg);
#else /* !TF_TCAM_SHARED */
fail = true;
}
+ /* Unbind the SRAM table prior to table as the table manager
+ * owns and frees the table DB while the SRAM table manager owns
+ * and manages it's internal data structures. SRAM table manager
+ * relies on the table rm_db to exist.
+ */
+ rc = tf_tbl_sram_unbind(tfp);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Device unbind failed, SRAM table\n");
+ fail = true;
+ }
+
rc = tf_tbl_unbind(tfp);
if (rc) {
TFP_DRV_LOG(INFO,
enum tf_device_type type,
bool shadow_copy,
struct tf_session_resources *resources,
+ uint16_t wc_num_slices,
struct tf_dev_info *dev_handle)
{
switch (type) {
return tf_dev_bind_p4(tfp,
shadow_copy,
resources,
- dev_handle);
+ dev_handle,
+ wc_num_slices);
case TF_DEVICE_TYPE_THOR:
dev_handle->type = type;
return tf_dev_bind_p58(tfp,
shadow_copy,
resources,
- dev_handle);
+ dev_handle,
+ wc_num_slices);
default:
TFP_DRV_LOG(ERR,
"No such device\n");
* [in] resources
* Pointer to resource allocation information
*
+ * [in] wc_num_slices
+ * Number of slices per row for WC
+ *
* [out] dev_handle
* Device handle
*
enum tf_device_type type,
bool shadow_copy,
struct tf_session_resources *resources,
+ uint16_t wc_num_slices,
struct tf_dev_info *dev_handle);
/**
uint16_t resource_id,
const char **resource_str);
+ /**
+ * Set the WC TCAM slice information that the device
+ * supports.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] num_slices_per_row
+ * Number of slices per row the device supports
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+ int (*tf_dev_set_tcam_slice_info)(struct tf *tfp,
+ enum tf_wc_num_slice num_slices_per_row);
+
/**
* Retrieves the WC TCAM slice information that the device
* supports.
int (*tf_dev_get_ident_resc_info)(struct tf *tfp,
struct tf_identifier_resource_info *parms);
+ /**
+ * Indicates whether the index table type is SRAM managed
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] type
+ * Truflow index table type, e.g. TF_TYPE_FULL_ACT_RECORD
+ *
+ * Returns
+ * - (0) if the table is not managed by the SRAM manager
+ * - (1) if the table is managed by the SRAM manager
+ */
+ bool (*tf_dev_is_sram_managed)(struct tf *tfp,
+ enum tf_tbl_type tbl_type);
+
/**
* Get SRAM table information.
*
int (*tf_dev_alloc_tbl)(struct tf *tfp,
struct tf_tbl_alloc_parms *parms);
+ /**
+ * Allocation of an SRAM index table type element.
+ *
+ * This API allocates the specified table type element from a
+ * device specific table type DB. The allocated element is
+ * returned.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] parms
+ * Pointer to table allocation parameters
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+ int (*tf_dev_alloc_sram_tbl)(struct tf *tfp,
+ struct tf_tbl_alloc_parms *parms);
/**
* Allocation of a external table type element.
*
*/
int (*tf_dev_free_tbl)(struct tf *tfp,
struct tf_tbl_free_parms *parms);
-
+ /**
+ * Free of an SRAM table type element.
+ *
+ * This API free's a previous allocated table type element from a
+ * device specific table type DB.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] parms
+ * Pointer to table free parameters
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+ int (*tf_dev_free_sram_tbl)(struct tf *tfp,
+ struct tf_tbl_free_parms *parms);
/**
* Free of a external table type element.
*
int (*tf_dev_set_ext_tbl)(struct tf *tfp,
struct tf_tbl_set_parms *parms);
+ /**
+ * Sets the specified SRAM table type element.
+ *
+ * This API sets the specified element data by invoking the
+ * firmware.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] parms
+ * Pointer to table set parameters
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+ int (*tf_dev_set_sram_tbl)(struct tf *tfp,
+ struct tf_tbl_set_parms *parms);
+
/**
* Retrieves the specified table type element.
*
int (*tf_dev_get_tbl)(struct tf *tfp,
struct tf_tbl_get_parms *parms);
+ /**
+ * Retrieves the specified SRAM table type element.
+ *
+ * This API retrieves the specified element data by invoking the
+ * firmware.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] parms
+ * Pointer to table get parameters
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+ int (*tf_dev_get_sram_tbl)(struct tf *tfp,
+ struct tf_tbl_get_parms *parms);
+
/**
* Retrieves the specified table type element using 'bulk'
* mechanism.
int (*tf_dev_get_bulk_tbl)(struct tf *tfp,
struct tf_tbl_get_bulk_parms *parms);
+ /**
+ * Retrieves the specified SRAM table type element using 'bulk'
+ * mechanism.
+ *
+ * This API retrieves the specified element data by invoking the
+ * firmware.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] parms
+ * Pointer to table get bulk parameters
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+ int (*tf_dev_get_bulk_sram_tbl)(struct tf *tfp,
+ struct tf_tbl_get_bulk_parms *parms);
+
/**
* Gets the increment value to add to the shared session resource
* start offset by for each count in the "stride"
}
/**
- * Device specific function that retrieves the WC TCAM slices the
+ * Device specific function that set the WC TCAM slices the
* device supports.
*
* [in] tfp
* Pointer to TF handle
*
- * [out] slice_size
- * Pointer to the WC TCAM slice size
+ * [in] num_slices_per_row
+ * The WC TCAM row slice configuration
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+static int
+tf_dev_p4_set_tcam_slice_info(struct tf *tfp __rte_unused,
+ enum tf_wc_num_slice num_slices_per_row)
+{
+ switch (num_slices_per_row) {
+ case TF_WC_TCAM_1_SLICE_PER_ROW:
+ case TF_WC_TCAM_2_SLICE_PER_ROW:
+ case TF_WC_TCAM_4_SLICE_PER_ROW:
+ g_wc_num_slices_per_row = num_slices_per_row;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Device specific function that retrieves the TCAM slices the
+ * device supports.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] type
+ * TF TCAM type
+ *
+ * [in] key_sz
+ * The key size
*
* [out] num_slices_per_row
* Pointer to the WC TCAM row slice configuration
uint16_t *num_slices_per_row)
{
/* Single slice support */
-#define CFA_P4_WC_TCAM_SLICES_PER_ROW 1
#define CFA_P4_WC_TCAM_SLICE_SIZE 12
if (type == TF_TCAM_TBL_TYPE_WC_TCAM) {
- *num_slices_per_row = CFA_P4_WC_TCAM_SLICES_PER_ROW;
+ *num_slices_per_row = g_wc_num_slices_per_row;
if (key_sz > *num_slices_per_row * CFA_P4_WC_TCAM_SLICE_SIZE)
return -ENOTSUP;
} else { /* for other type of tcam */
return ((((size) + 31) >> 5) * 4);
}
+/**
+ * Indicates whether the index table type is SRAM managed
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] type
+ * Truflow index table type, e.g. TF_TYPE_FULL_ACT_RECORD
+ *
+ * Returns
+ * - (0) if the table is not managed by the SRAM manager
+ * - (1) if the table is managed by the SRAM manager
+ */
+static bool tf_dev_p4_is_sram_managed(struct tf *tfp __rte_unused,
+ enum tf_tbl_type type __rte_unused)
+{
+ return false;
+}
/**
* Truflow P4 device specific functions
*/
const struct tf_dev_ops tf_dev_ops_p4_init = {
.tf_dev_get_max_types = tf_dev_p4_get_max_types,
.tf_dev_get_resource_str = tf_dev_p4_get_resource_str,
+ .tf_dev_set_tcam_slice_info = tf_dev_p4_set_tcam_slice_info,
.tf_dev_get_tcam_slice_info = tf_dev_p4_get_tcam_slice_info,
.tf_dev_alloc_ident = NULL,
.tf_dev_free_ident = NULL,
.tf_dev_search_ident = NULL,
.tf_dev_get_ident_resc_info = NULL,
.tf_dev_get_tbl_info = NULL,
+ .tf_dev_is_sram_managed = tf_dev_p4_is_sram_managed,
.tf_dev_alloc_ext_tbl = NULL,
.tf_dev_alloc_tbl = NULL,
+ .tf_dev_alloc_sram_tbl = NULL,
.tf_dev_free_ext_tbl = NULL,
.tf_dev_free_tbl = NULL,
+ .tf_dev_free_sram_tbl = NULL,
.tf_dev_set_tbl = NULL,
.tf_dev_set_ext_tbl = NULL,
+ .tf_dev_set_sram_tbl = NULL,
.tf_dev_get_tbl = NULL,
+ .tf_dev_get_sram_tbl = NULL,
.tf_dev_get_bulk_tbl = NULL,
+ .tf_dev_get_bulk_sram_tbl = NULL,
.tf_dev_get_shared_tbl_increment = tf_dev_p4_get_shared_tbl_increment,
.tf_dev_get_tbl_resc_info = NULL,
.tf_dev_alloc_tcam = NULL,
const struct tf_dev_ops tf_dev_ops_p4 = {
.tf_dev_get_max_types = tf_dev_p4_get_max_types,
.tf_dev_get_resource_str = tf_dev_p4_get_resource_str,
+ .tf_dev_set_tcam_slice_info = tf_dev_p4_set_tcam_slice_info,
.tf_dev_get_tcam_slice_info = tf_dev_p4_get_tcam_slice_info,
.tf_dev_alloc_ident = tf_ident_alloc,
.tf_dev_free_ident = tf_ident_free,
.tf_dev_search_ident = tf_ident_search,
.tf_dev_get_ident_resc_info = tf_ident_get_resc_info,
.tf_dev_get_tbl_info = NULL,
+ .tf_dev_is_sram_managed = tf_dev_p4_is_sram_managed,
.tf_dev_alloc_tbl = tf_tbl_alloc,
.tf_dev_alloc_ext_tbl = tf_tbl_ext_alloc,
+ .tf_dev_alloc_sram_tbl = tf_tbl_alloc,
.tf_dev_free_tbl = tf_tbl_free,
.tf_dev_free_ext_tbl = tf_tbl_ext_free,
+ .tf_dev_free_sram_tbl = tf_tbl_free,
.tf_dev_set_tbl = tf_tbl_set,
.tf_dev_set_ext_tbl = tf_tbl_ext_common_set,
+ .tf_dev_set_sram_tbl = NULL,
.tf_dev_get_tbl = tf_tbl_get,
+ .tf_dev_get_sram_tbl = NULL,
.tf_dev_get_bulk_tbl = tf_tbl_bulk_get,
+ .tf_dev_get_bulk_sram_tbl = NULL,
.tf_dev_get_shared_tbl_increment = tf_dev_p4_get_shared_tbl_increment,
.tf_dev_get_tbl_resc_info = tf_tbl_get_resc_info,
#ifdef TF_TCAM_SHARED
struct tf_rm_element_cfg tf_ident_p4[TF_IDENT_TYPE_MAX] = {
[TF_IDENT_TYPE_L2_CTXT_HIGH] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_HIGH,
- 0, 0, 0
+ 0, 0
},
[TF_IDENT_TYPE_L2_CTXT_LOW] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_LOW,
- 0, 0, 0
+ 0, 0
},
[TF_IDENT_TYPE_PROF_FUNC] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_PROF_FUNC,
- 0, 0, 0
+ 0, 0
},
[TF_IDENT_TYPE_WC_PROF] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_WC_TCAM_PROF_ID,
- 0, 0, 0
+ 0, 0
},
[TF_IDENT_TYPE_EM_PROF] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_EM_PROF_ID,
- 0, 0, 0
+ 0, 0
},
};
struct tf_rm_element_cfg tf_tcam_p4[TF_TCAM_TBL_TYPE_MAX] = {
[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_HIGH,
- 0, 0, 0
+ 0, 0
},
[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_LOW,
- 0, 0, 0
+ 0, 0
},
[TF_TCAM_TBL_TYPE_PROF_TCAM] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_PROF_TCAM,
- 0, 0, 0
+ 0, 0
},
[TF_TCAM_TBL_TYPE_WC_TCAM] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_WC_TCAM,
- 0, 0, 0
+ 0, 0
},
[TF_TCAM_TBL_TYPE_SP_TCAM] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_TCAM,
- 0, 0, 0
+ 0, 0
},
};
struct tf_rm_element_cfg tf_tbl_p4[TF_TBL_TYPE_MAX] = {
[TF_TBL_TYPE_FULL_ACT_RECORD] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_FULL_ACTION,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_MCAST_GROUPS] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MCG,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_ACT_ENCAP_8B] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_8B,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_ACT_ENCAP_16B] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_16B,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_ACT_ENCAP_64B] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_64B,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_ACT_SP_SMAC] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV4,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV6,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_ACT_STATS_64] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_COUNTER_64B,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_ACT_MODIFY_IPV4] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_IPV4,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_METER_PROF] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER_PROF,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_METER_INST] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_MIRROR_CONFIG] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MIRROR,
- 0, 0, 0
+ 0, 0
},
};
struct tf_rm_element_cfg tf_em_ext_p4[TF_EM_TBL_TYPE_MAX] = {
[TF_EM_TBL_TYPE_TBL_SCOPE] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_TBL_SCOPE,
- 0, 0, 0
+ 0, 0
},
};
struct tf_rm_element_cfg tf_em_int_p4[TF_EM_TBL_TYPE_MAX] = {
[TF_EM_TBL_TYPE_EM_RECORD] = {
TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_EM_REC,
- 0, 0, 0
+ 0, 0
},
};
#include "tf_if_tbl.h"
#include "tfp.h"
#include "tf_msg_common.h"
+#include "tf_tbl_sram.h"
#define TF_DEV_P58_PARIF_MAX 16
#define TF_DEV_P58_PF_MASK 0xfUL
}
/**
- * Device specific function that retrieves the WC TCAM slices the
+ * Device specific function that set the WC TCAM slices the
* device supports.
*
* [in] tfp
* Pointer to TF handle
*
- * [out] slice_size
- * Pointer to the WC TCAM slice size
+ * [in] num_slices_per_row
+ * The WC TCAM row slice configuration
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+static int
+tf_dev_p58_set_tcam_slice_info(struct tf *tfp __rte_unused,
+ enum tf_wc_num_slice num_slices_per_row)
+{
+ switch (num_slices_per_row) {
+ case TF_WC_TCAM_1_SLICE_PER_ROW:
+ case TF_WC_TCAM_2_SLICE_PER_ROW:
+ case TF_WC_TCAM_4_SLICE_PER_ROW:
+ g_wc_num_slices_per_row = num_slices_per_row;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Device specific function that retrieves the TCAM slices the
+ * device supports.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] type
+ * TF TCAM type
+ *
+ * [in] key_sz
+ * The key size
*
* [out] num_slices_per_row
* Pointer to the WC TCAM row slice configuration
*/
static int
tf_dev_p58_get_tcam_slice_info(struct tf *tfp __rte_unused,
- enum tf_tcam_tbl_type type,
- uint16_t key_sz,
- uint16_t *num_slices_per_row)
+ enum tf_tcam_tbl_type type,
+ uint16_t key_sz,
+ uint16_t *num_slices_per_row)
{
-#define CFA_P58_WC_TCAM_SLICES_PER_ROW 1
#define CFA_P58_WC_TCAM_SLICE_SIZE 24
-
if (type == TF_TCAM_TBL_TYPE_WC_TCAM) {
- /* only support single slice key size now */
- *num_slices_per_row = CFA_P58_WC_TCAM_SLICES_PER_ROW;
+ *num_slices_per_row = g_wc_num_slices_per_row;
if (key_sz > *num_slices_per_row * CFA_P58_WC_TCAM_SLICE_SIZE)
return -ENOTSUP;
} else { /* for other type of tcam */
return 0;
}
+/**
+ * Indicates whether the index table type is SRAM managed
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] type
+ * Truflow index table type, e.g. TF_TYPE_FULL_ACT_RECORD
+ *
+ * Returns
+ * - (0) if the table is not managed by the SRAM manager
+ * - (1) if the table is managed by the SRAM manager
+ */
+static bool tf_dev_p58_is_sram_managed(struct tf *tfp __rte_unused,
+ enum tf_tbl_type type)
+{
+ switch (type) {
+ case TF_TBL_TYPE_FULL_ACT_RECORD:
+ case TF_TBL_TYPE_COMPACT_ACT_RECORD:
+ case TF_TBL_TYPE_ACT_ENCAP_8B:
+ case TF_TBL_TYPE_ACT_ENCAP_16B:
+ case TF_TBL_TYPE_ACT_ENCAP_32B:
+ case TF_TBL_TYPE_ACT_ENCAP_64B:
+ case TF_TBL_TYPE_ACT_SP_SMAC:
+ case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
+ case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
+ case TF_TBL_TYPE_ACT_STATS_64:
+ case TF_TBL_TYPE_ACT_MODIFY_IPV4:
+ case TF_TBL_TYPE_ACT_MODIFY_8B:
+ case TF_TBL_TYPE_ACT_MODIFY_16B:
+ case TF_TBL_TYPE_ACT_MODIFY_32B:
+ case TF_TBL_TYPE_ACT_MODIFY_64B:
+ return true;
+ default:
+ return false;
+ }
+}
+
#define TF_DEV_P58_BANK_SZ_64B 2048
/**
* Get SRAM table information.
}
return 0;
}
+
/**
* Truflow P58 device specific functions
*/
const struct tf_dev_ops tf_dev_ops_p58_init = {
.tf_dev_get_max_types = tf_dev_p58_get_max_types,
.tf_dev_get_resource_str = tf_dev_p58_get_resource_str,
+ .tf_dev_set_tcam_slice_info = tf_dev_p58_set_tcam_slice_info,
.tf_dev_get_tcam_slice_info = tf_dev_p58_get_tcam_slice_info,
.tf_dev_alloc_ident = NULL,
.tf_dev_free_ident = NULL,
.tf_dev_search_ident = NULL,
.tf_dev_get_ident_resc_info = NULL,
.tf_dev_get_tbl_info = NULL,
+ .tf_dev_is_sram_managed = tf_dev_p58_is_sram_managed,
.tf_dev_alloc_ext_tbl = NULL,
.tf_dev_alloc_tbl = NULL,
+ .tf_dev_alloc_sram_tbl = NULL,
.tf_dev_free_ext_tbl = NULL,
.tf_dev_free_tbl = NULL,
+ .tf_dev_free_sram_tbl = NULL,
.tf_dev_set_tbl = NULL,
.tf_dev_set_ext_tbl = NULL,
+ .tf_dev_set_sram_tbl = NULL,
.tf_dev_get_tbl = NULL,
+ .tf_dev_get_sram_tbl = NULL,
.tf_dev_get_bulk_tbl = NULL,
+ .tf_dev_get_bulk_sram_tbl = NULL,
.tf_dev_get_shared_tbl_increment = tf_dev_p58_get_shared_tbl_increment,
.tf_dev_get_tbl_resc_info = NULL,
.tf_dev_alloc_tcam = NULL,
const struct tf_dev_ops tf_dev_ops_p58 = {
.tf_dev_get_max_types = tf_dev_p58_get_max_types,
.tf_dev_get_resource_str = tf_dev_p58_get_resource_str,
+ .tf_dev_set_tcam_slice_info = tf_dev_p58_set_tcam_slice_info,
.tf_dev_get_tcam_slice_info = tf_dev_p58_get_tcam_slice_info,
.tf_dev_alloc_ident = tf_ident_alloc,
.tf_dev_free_ident = tf_ident_free,
.tf_dev_search_ident = tf_ident_search,
.tf_dev_get_ident_resc_info = tf_ident_get_resc_info,
+ .tf_dev_is_sram_managed = tf_dev_p58_is_sram_managed,
.tf_dev_get_tbl_info = tf_dev_p58_get_sram_tbl_info,
.tf_dev_alloc_tbl = tf_tbl_alloc,
+ .tf_dev_alloc_sram_tbl = tf_tbl_sram_alloc,
.tf_dev_alloc_ext_tbl = tf_tbl_ext_alloc,
.tf_dev_free_tbl = tf_tbl_free,
.tf_dev_free_ext_tbl = tf_tbl_ext_free,
+ .tf_dev_free_sram_tbl = tf_tbl_sram_free,
.tf_dev_set_tbl = tf_tbl_set,
.tf_dev_set_ext_tbl = tf_tbl_ext_common_set,
+ .tf_dev_set_sram_tbl = tf_tbl_sram_set,
.tf_dev_get_tbl = tf_tbl_get,
+ .tf_dev_get_sram_tbl = tf_tbl_sram_get,
.tf_dev_get_bulk_tbl = tf_tbl_bulk_get,
+ .tf_dev_get_bulk_sram_tbl = tf_tbl_sram_bulk_get,
.tf_dev_get_shared_tbl_increment = tf_dev_p58_get_shared_tbl_increment,
.tf_dev_get_tbl_resc_info = tf_tbl_get_resc_info,
#ifdef TF_TCAM_SHARED
struct tf_rm_element_cfg tf_ident_p58[TF_IDENT_TYPE_MAX] = {
[TF_IDENT_TYPE_L2_CTXT_HIGH] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_HIGH,
- 0, 0, 0
+ 0, 0
},
[TF_IDENT_TYPE_L2_CTXT_LOW] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_LOW,
- 0, 0, 0
+ 0, 0
},
[TF_IDENT_TYPE_PROF_FUNC] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_PROF_FUNC,
- 0, 0, 0
+ 0, 0
},
[TF_IDENT_TYPE_WC_PROF] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_TCAM_PROF_ID,
- 0, 0, 0
+ 0, 0
},
[TF_IDENT_TYPE_EM_PROF] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_EM_PROF_ID,
- 0, 0, 0
+ 0, 0
},
};
struct tf_rm_element_cfg tf_tcam_p58[TF_TCAM_TBL_TYPE_MAX] = {
[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_HIGH,
- 0, 0, 0
+ 0, 0
},
[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_LOW,
- 0, 0, 0
+ 0, 0
},
[TF_TCAM_TBL_TYPE_PROF_TCAM] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_PROF_TCAM,
- 0, 0, 0
+ 0, 0
},
[TF_TCAM_TBL_TYPE_WC_TCAM] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_TCAM,
- 0, 0, 0
+ 0, 0
},
[TF_TCAM_TBL_TYPE_VEB_TCAM] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_VEB_TCAM,
- 0, 0, 0
+ 0, 0
},
};
struct tf_rm_element_cfg tf_tbl_p58[TF_TBL_TYPE_MAX] = {
[TF_TBL_TYPE_EM_FKB] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_EM_FKB,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_WC_FKB] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_FKB,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_METER_PROF] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER_PROF,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_METER_INST] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_METER_DROP_CNT] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER_DROP_CNT,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_MIRROR_CONFIG] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_MIRROR,
- 0, 0, 0
+ 0, 0
},
[TF_TBL_TYPE_METADATA] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METADATA,
- 0, 0, 0
+ 0, 0
},
/* Policy - ARs in bank 1 */
[TF_TBL_TYPE_FULL_ACT_RECORD] = {
.cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_PARENT,
.hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_1,
- .slices = 1,
+ .slices = 4,
},
[TF_TBL_TYPE_COMPACT_ACT_RECORD] = {
.cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
.parent_subtype = TF_TBL_TYPE_FULL_ACT_RECORD,
.hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_1,
- .slices = 1,
+ .slices = 8,
},
/* Policy - Encaps in bank 2 */
[TF_TBL_TYPE_ACT_ENCAP_8B] = {
.cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_PARENT,
.hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
- .slices = 1,
+ .slices = 8,
},
[TF_TBL_TYPE_ACT_ENCAP_16B] = {
.cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
.parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B,
.hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
- .slices = 1,
+ .slices = 4,
},
[TF_TBL_TYPE_ACT_ENCAP_32B] = {
.cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
.parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B,
.hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
- .slices = 1,
+ .slices = 2,
},
[TF_TBL_TYPE_ACT_ENCAP_64B] = {
.cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
.cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
.parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B,
.hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
- .slices = 1,
+ .slices = 8,
},
[TF_TBL_TYPE_ACT_MODIFY_16B] = {
.cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
.parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B,
.hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
- .slices = 1,
+ .slices = 4,
},
[TF_TBL_TYPE_ACT_MODIFY_32B] = {
.cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
.parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B,
.hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
- .slices = 1,
+ .slices = 2,
},
[TF_TBL_TYPE_ACT_MODIFY_64B] = {
.cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
[TF_TBL_TYPE_ACT_SP_SMAC] = {
.cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_PARENT,
.hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0,
- .slices = 1,
+ .slices = 8,
},
[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = {
.cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
.parent_subtype = TF_TBL_TYPE_ACT_SP_SMAC,
.hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0,
- .slices = 1,
+ .slices = 4,
},
[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = {
.cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
.parent_subtype = TF_TBL_TYPE_ACT_SP_SMAC,
.hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0,
- .slices = 1,
+ .slices = 2,
},
/* Policy - Stats in bank 3 */
[TF_TBL_TYPE_ACT_STATS_64] = {
.cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_PARENT,
.hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_3,
- .slices = 1,
+ .slices = 8,
},
};
struct tf_rm_element_cfg tf_em_int_p58[TF_EM_TBL_TYPE_MAX] = {
[TF_EM_TBL_TYPE_EM_RECORD] = {
TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P58_EM_REC,
- 0, 0, 0
+ 0, 0
},
};
if (rc != 0)
return rc;
- tfp_memcpy(params->data, resp.data, req.size);
+ tfp_memcpy(¶ms->data[0], resp.data, req.size);
return 0;
}
*/
uint16_t hcapi_type;
+ /**
+ * Resource slices. How many slices will fit in the
+ * resource pool chunk size.
+ */
+ uint8_t slices;
+
/**
* HCAPI RM allocated range information for the element.
*/
* - - Failure if negative
*/
static int
-tf_rm_update_parent_reservations(struct tf_rm_element_cfg *cfg,
+tf_rm_update_parent_reservations(struct tf *tfp,
+ struct tf_dev_info *dev,
+ struct tf_rm_element_cfg *cfg,
uint16_t *alloc_cnt,
uint16_t num_elements,
uint16_t *req_cnt)
{
int parent, child;
+ const char *type_str;
/* Search through all the elements */
for (parent = 0; parent < num_elements; parent++) {
if (alloc_cnt[parent] % cfg[parent].slices)
combined_cnt++;
+ if (alloc_cnt[parent]) {
+ dev->ops->tf_dev_get_resource_str(tfp,
+ cfg[parent].hcapi_type,
+ &type_str);
+ }
+
/* Search again through all the elements */
for (child = 0; child < num_elements; child++) {
/* If this is one of my children */
if (cfg[child].cfg_type ==
TF_RM_ELEM_CFG_HCAPI_BA_CHILD &&
- cfg[child].parent_subtype == parent) {
+ cfg[child].parent_subtype == parent &&
+ alloc_cnt[child]) {
uint16_t cnt = 0;
RTE_ASSERT(cfg[child].slices);
+ dev->ops->tf_dev_get_resource_str(tfp,
+ cfg[child].hcapi_type,
+ &type_str);
/* Increment the parents combined count
* with each child's count adjusted for
* number of slices per RM allocated item.
/* Update the req_cnt based upon the element configuration
*/
- tf_rm_update_parent_reservations(parms->cfg,
+ tf_rm_update_parent_reservations(tfp, dev, parms->cfg,
parms->alloc_cnt,
parms->num_elements,
req_cnt);
db[i].cfg_type = cfg->cfg_type;
db[i].hcapi_type = cfg->hcapi_type;
+ db[i].slices = cfg->slices;
/* Save the parent subtype for later use to find the pool
*/
return 0;
}
+int
+tf_rm_get_slices(struct tf_rm_get_slices_parms *parms)
+{
+ struct tf_rm_new_db *rm_db;
+ enum tf_rm_elem_cfg_type cfg_type;
+
+ TF_CHECK_PARMS2(parms, parms->rm_db);
+ rm_db = (struct tf_rm_new_db *)parms->rm_db;
+ TF_CHECK_PARMS1(rm_db->db);
+
+ cfg_type = rm_db->db[parms->subtype].cfg_type;
+
+ /* Bail out if not controlled by HCAPI */
+ if (cfg_type == TF_RM_ELEM_CFG_NULL)
+ return -ENOTSUP;
+
+ *parms->slices = rm_db->db[parms->subtype].slices;
+
+ return 0;
+}
int
tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
* support module, not called directly.
*/
-/**
- * Resource reservation single entry result. Used when accessing HCAPI
- * RM on the firmware.
- */
-struct tf_rm_new_entry {
- /** Starting index of the allocated resource */
- uint16_t start;
- /** Number of allocated elements */
- uint16_t stride;
-};
/**
* RM Element configuration enumeration. Used by the Device to
*/
enum tf_rm_elem_cfg_type cfg_type;
- /* If a HCAPI to TF type conversion is required then TF type
- * can be added here.
- */
-
/**
* HCAPI RM Type for the element. Used for TF to HCAPI type
* conversion.
uint16_t hcapi_type;
/**
- * if cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD
+ * if cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD/PARENT
*
* Parent Truflow module subtype associated with this resource type.
*/
uint16_t parent_subtype;
/**
- * if cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD
+ * if cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD/PARENT
*
* Resource slices. How many slices will fit in the
* resource pool chunk size.
*/
uint8_t slices;
-
- /**
- * Pool element divider count
- * If 0 or 1, there is 1:1 correspondence between the RM
- * BA pool resource element and the HCAPI RM firmware
- * resource. If > 1, the RM BA pool element has a 1:n
- * correspondence to the HCAPI RM firmware resource.
- */
- uint8_t divider;
};
/**
* In case of dynamic allocation support this would have
* to be changed to linked list of tf_rm_entry instead.
*/
- struct tf_rm_new_entry entry;
+ struct tf_resource_info entry;
};
/**
*/
uint16_t *hcapi_type;
};
+/**
+ * Get Slices parameters for a single element
+ */
+struct tf_rm_get_slices_parms {
+ /**
+ * [in] RM DB Handle
+ */
+ void *rm_db;
+ /**
+ * [in] TF subtype indicates which DB entry to perform the
+ * action on. (e.g. TF_TBL_TYPE_FULL_ACTION subtype of module
+ * TF_MODULE_TYPE_TABLE)
+ */
+ uint16_t subtype;
+ /**
+ * [in/out] Pointer to number of slices for the given type
+ */
+ uint16_t *slices;
+};
/**
* Get InUse count parameters for single element
* @ref tf_rm_get_hcapi_type
*
* @ref tf_rm_get_inuse_count
+ *
+ * @ref tf_rm_get_slice_size
*/
/**
int
tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms);
+/**
+ * Get the number of slices per resource bit allocator for the resource type
+ *
+ * [in] parms
+ * Pointer to get inuse parameters
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+int
+tf_rm_get_slices(struct tf_rm_get_slices_parms *parms);
#endif /* TF_RM_NEW_H_ */
parms->open_cfg->device_type,
session->shadow_copy,
&parms->open_cfg->resources,
+ parms->open_cfg->wc_num_slices,
&session->dev);
/* Logging handled by dev_bind */
return rc;
}
+int tf_session_get(struct tf *tfp,
+ struct tf_session **tfs,
+ struct tf_dev_info **tfd)
+{
+ int rc;
+ rc = tf_session_get_session_internal(tfp, tfs);
+
+ /* Logging done by tf_session_get_session_internal */
+ if (rc)
+ return rc;
+
+ rc = tf_session_get_device(*tfs, tfd);
+
+ return rc;
+}
+
struct tf_session_client *
tf_session_get_session_client(struct tf_session *tfs,
union tf_session_client_id session_client_id)
tfs->tcam_shared_db_handle = tcam_shared_db_handle;
return rc;
}
+
+int
+tf_session_get_sram_db(struct tf *tfp,
+ void **sram_handle)
+{
+ struct tf_session *tfs = NULL;
+ int rc = 0;
+
+ *sram_handle = NULL;
+
+ if (tfp == NULL)
+ return (-EINVAL);
+
+ rc = tf_session_get_session_internal(tfp, &tfs);
+ if (rc)
+ return rc;
+
+ *sram_handle = tfs->sram_handle;
+ return rc;
+}
+
+int
+tf_session_set_sram_db(struct tf *tfp,
+ void *sram_handle)
+{
+ struct tf_session *tfs = NULL;
+ int rc = 0;
+
+ if (tfp == NULL)
+ return (-EINVAL);
+
+ rc = tf_session_get_session_internal(tfp, &tfs);
+ if (rc)
+ return rc;
+
+ tfs->sram_handle = sram_handle;
+ return rc;
+}
+
#endif /* TF_TCAM_SHARED */
*/
void *tcam_shared_db_handle;
#endif /* TF_TCAM_SHARED */
+ /**
+ * SRAM db reference for the session
+ */
+ void *sram_handle;
};
/**
*
* @ref tf_session_set_tcam_shared_db
* #endif
+ *
+ * @ref tf_session_get_sram_db
+ *
+ * @ref tf_session_set_sram_db
*/
/**
/**
* Looks up the device information from the TF Session.
*
- * [in] tfp
- * Pointer to TF handle
+ * [in] tfs
+ * Pointer to session handle
*
* [out] tfd
- * Pointer pointer to the device
+ * Pointer to the device
*
* Returns
* - (0) if successful.
int tf_session_get_device(struct tf_session *tfs,
struct tf_dev_info **tfd);
+/**
+ * Returns the session and the device from the tfp.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [out] tfs
+ * Pointer to the session
+ *
+ * [out] tfd
+ * Pointer to the device
+
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+int tf_session_get(struct tf *tfp,
+ struct tf_session **tfs,
+ struct tf_dev_info **tfd);
+
/**
* Looks up the FW Session id the requested TF handle.
*
tf_session_get_tcam_shared_db(struct tf *tfp,
void **tcam_shared_db_handle);
+/**
+ * Set the pointer to the SRAM database
+ *
+ * [in] session, pointer to the session
+ *
+ * Returns:
+ * - the pointer to the parent bnxt struct
+ */
+int
+tf_session_set_sram_db(struct tf *tfp,
+ void *sram_handle);
+
+/**
+ * Get the pointer to the SRAM database
+ *
+ * [in] session, pointer to the session
+ *
+ * Returns:
+ * - the pointer to the parent bnxt struct
+ */
+int
+tf_session_get_sram_db(struct tf *tfp,
+ void **sram_handle);
+
#endif /* _TF_SESSION_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include "tf_sram_mgr.h"
+#include "tf_core.h"
+#include "tf_rm.h"
+#include "tf_common.h"
+#include "assert.h"
+#include "tf_util.h"
+#include "tfp.h"
+#if (STATS_CLEAR_ON_READ_SUPPORT == 0)
+#include "tf_msg.h"
+#endif
+/***************************
+ * Internal Data Structures
+ ***************************/
+
+/**
+ * TF SRAM block info
+ *
+ * Contains all the information about a particular 64B SRAM
+ * block and the slices within it.
+ */
+struct tf_sram_block {
+ /* Previous block
+ */
+ struct tf_sram_block *prev;
+ /* Next block
+ */
+ struct tf_sram_block *next;
+
+ /** Bitmap indicating which slices are in use
+ * If a bit is set, it indicates the slice
+ * in the row is in use.
+ */
+ uint8_t in_use_mask;
+
+ /** Block id - this is a 64B offset
+ */
+ uint16_t block_id;
+};
+
+/**
+ * TF SRAM block list
+ *
+ * List of 64B SRAM blocks used for fixed size slices (8, 16, 32, 64B)
+ */
+struct tf_sram_slice_list {
+ /** Pointer to head of linked list of blocks.
+ */
+ struct tf_sram_block *head;
+
+ /** Pointer to tail of linked list of blocks.
+ */
+ struct tf_sram_block *tail;
+
+ /** Total count of blocks
+ */
+ uint32_t cnt;
+
+ /** First non-full block in the list
+ */
+ struct tf_sram_block *first_not_full_block;
+
+ /** Entry slice size for this list
+ */
+ enum tf_sram_slice_size size;
+};
+
+
+/**
+ * TF SRAM bank info consists of lists of different slice sizes per bank
+ */
+struct tf_sram_bank_info {
+ struct tf_sram_slice_list slice[TF_SRAM_SLICE_SIZE_MAX];
+};
+
+/**
+ * SRAM banks consist of SRAM bank information
+ */
+struct tf_sram_bank {
+ struct tf_sram_bank_info bank[TF_SRAM_BANK_ID_MAX];
+};
+
+/**
+ * SRAM banks consist of SRAM bank information
+ */
+struct tf_sram {
+ struct tf_sram_bank dir[TF_DIR_MAX];
+};
+
+/**********************
+ * Internal functions
+ **********************/
+
+/**
+ * Get slice size in string format
+ */
+const char
+*tf_sram_slice_2_str(enum tf_sram_slice_size slice_size)
+{
+ switch (slice_size) {
+ case TF_SRAM_SLICE_SIZE_8B:
+ return "8B slice";
+ case TF_SRAM_SLICE_SIZE_16B:
+ return "16B slice";
+ case TF_SRAM_SLICE_SIZE_32B:
+ return "32B slice";
+ case TF_SRAM_SLICE_SIZE_64B:
+ return "64B slice";
+ default:
+ return "Invalid slice size";
+ }
+}
+
+/**
+ * Get bank in string format
+ */
+const char
+*tf_sram_bank_2_str(enum tf_sram_bank_id bank_id)
+{
+ switch (bank_id) {
+ case TF_SRAM_BANK_ID_0:
+ return "bank_0";
+ case TF_SRAM_BANK_ID_1:
+ return "bank_1";
+ case TF_SRAM_BANK_ID_2:
+ return "bank_2";
+ case TF_SRAM_BANK_ID_3:
+ return "bank_3";
+ default:
+ return "Invalid bank_id";
+ }
+}
+
+/**
+ * TF SRAM get slice list
+ */
+static int
+tf_sram_get_slice_list(struct tf_sram *sram,
+ struct tf_sram_slice_list **slice_list,
+ enum tf_sram_slice_size slice_size,
+ enum tf_dir dir,
+ enum tf_sram_bank_id bank_id)
+{
+ int rc = 0;
+
+ TF_CHECK_PARMS2(sram, slice_list);
+
+ *slice_list = &sram->dir[dir].bank[bank_id].slice[slice_size];
+
+ return rc;
+}
+
+uint16_t tf_sram_bank_2_base_offset[TF_SRAM_BANK_ID_MAX] = {
+ 0,
+ 2048,
+ 4096,
+ 6144
+};
+
+/**
+ * Translate a block id and bank_id to an 8B offset
+ */
+static void
+tf_sram_block_id_2_offset(enum tf_sram_bank_id bank_id, uint16_t block_id,
+ uint16_t *offset)
+{
+ *offset = (block_id + tf_sram_bank_2_base_offset[bank_id]) << 3;
+}
+
+/**
+ * Translates an 8B offset and bank_id to a block_id
+ */
+static void
+tf_sram_offset_2_block_id(enum tf_sram_bank_id bank_id, uint16_t offset,
+ uint16_t *block_id, uint16_t *slice_offset)
+{
+ *slice_offset = offset & 0x7;
+ *block_id = ((offset & ~0x7) >> 3) -
+ tf_sram_bank_2_base_offset[bank_id];
+}
+
+/**
+ * Find a matching block_id within the slice list
+ */
+static struct tf_sram_block
+*tf_sram_find_block(uint16_t block_id, struct tf_sram_slice_list *slice_list)
+{
+ uint32_t cnt;
+ struct tf_sram_block *block;
+
+ cnt = slice_list->cnt;
+ block = slice_list->head;
+
+ while (cnt > 0 && block) {
+ if (block->block_id == block_id)
+ return block;
+ block = block->next;
+ cnt--;
+ }
+ return NULL;
+}
+
+/**
+ * Given the current block get the next block within the slice list
+ *
+ * List is not changed.
+ */
+static struct tf_sram_block
+*tf_sram_get_next_block(struct tf_sram_block *block)
+{
+ struct tf_sram_block *nblock;
+
+ if (block != NULL)
+ nblock = block->next;
+ else
+ nblock = NULL;
+ return nblock;
+}
+
+/**
+ * Free an allocated slice from a block and if the block is empty,
+ * return an indication so that the block can be freed.
+ */
+static int
+tf_sram_free_slice(enum tf_sram_slice_size slice_size,
+ uint16_t slice_offset, struct tf_sram_block *block,
+ bool *block_is_empty)
+{
+ int rc = 0;
+ uint8_t shift;
+ uint8_t slice_mask = 0;
+
+ TF_CHECK_PARMS2(block, block_is_empty);
+
+ switch (slice_size) {
+ case TF_SRAM_SLICE_SIZE_8B:
+ shift = slice_offset >> 0;
+ assert(shift < 8);
+ slice_mask = 1 << shift;
+ break;
+
+ case TF_SRAM_SLICE_SIZE_16B:
+ shift = slice_offset >> 1;
+ assert(shift < 4);
+ slice_mask = 1 << shift;
+ break;
+
+ case TF_SRAM_SLICE_SIZE_32B:
+ shift = slice_offset >> 2;
+ assert(shift < 2);
+ slice_mask = 1 << shift;
+ break;
+
+ case TF_SRAM_SLICE_SIZE_64B:
+ default:
+ shift = slice_offset >> 0;
+ assert(shift < 1);
+ slice_mask = 1 << shift;
+ break;
+ }
+
+ if ((block->in_use_mask & slice_mask) == 0) {
+ rc = -EINVAL;
+ TFP_DRV_LOG(ERR, "block_id(0x%x) slice(%d) was not allocated\n",
+ block->block_id, slice_offset);
+ return rc;
+ }
+
+ block->in_use_mask &= ~slice_mask;
+
+ if (block->in_use_mask == 0)
+ *block_is_empty = true;
+ else
+ *block_is_empty = false;
+
+ return rc;
+}
+
+/**
+ * TF SRAM get next slice
+ *
+ * Gets the next slice_offset available in the block
+ * and updates the in_use_mask.
+ */
+static int
+tf_sram_get_next_slice_in_block(struct tf_sram_block *block,
+ enum tf_sram_slice_size slice_size,
+ uint16_t *slice_offset,
+ bool *block_is_full)
+{
+ int rc, free_id = -1;
+ uint8_t shift, max_slices, mask, i, full_mask;
+
+ TF_CHECK_PARMS3(block, slice_offset, block_is_full);
+
+ switch (slice_size) {
+ case TF_SRAM_SLICE_SIZE_8B:
+ shift = 0;
+ max_slices = 8;
+ full_mask = 0xff;
+ break;
+ case TF_SRAM_SLICE_SIZE_16B:
+ shift = 1;
+ max_slices = 4;
+ full_mask = 0xf;
+ break;
+ case TF_SRAM_SLICE_SIZE_32B:
+ shift = 2;
+ max_slices = 2;
+ full_mask = 0x3;
+ break;
+ case TF_SRAM_SLICE_SIZE_64B:
+ default:
+ shift = 0;
+ max_slices = 1;
+ full_mask = 1;
+ break;
+ }
+
+ mask = block->in_use_mask;
+
+ for (i = 0; i < max_slices; i++) {
+ if ((mask & 1) == 0) {
+ free_id = i;
+ block->in_use_mask |= 1 << free_id;
+ break;
+ }
+ mask = mask >> 1;
+ }
+
+ if (block->in_use_mask == full_mask)
+ *block_is_full = true;
+ else
+ *block_is_full = false;
+
+
+ if (free_id >= 0) {
+ *slice_offset = free_id << shift;
+ rc = 0;
+ } else {
+ *slice_offset = 0;
+ rc = -ENOMEM;
+ }
+
+ return rc;
+}
+
+/**
+ * TF SRAM get indication as to whether the slice offset is
+ * allocated in the block.
+ *
+ */
+static int
+tf_sram_is_slice_allocated_in_block(struct tf_sram_block *block,
+ enum tf_sram_slice_size slice_size,
+ uint16_t slice_offset,
+ bool *is_allocated)
+{
+ int rc = 0;
+ uint8_t shift;
+ uint8_t slice_mask = 0;
+
+ TF_CHECK_PARMS2(block, is_allocated);
+
+ *is_allocated = false;
+
+ switch (slice_size) {
+ case TF_SRAM_SLICE_SIZE_8B:
+ shift = slice_offset >> 0;
+ assert(shift < 8);
+ slice_mask = 1 << shift;
+ break;
+
+ case TF_SRAM_SLICE_SIZE_16B:
+ shift = slice_offset >> 1;
+ assert(shift < 4);
+ slice_mask = 1 << shift;
+ break;
+
+ case TF_SRAM_SLICE_SIZE_32B:
+ shift = slice_offset >> 2;
+ assert(shift < 2);
+ slice_mask = 1 << shift;
+ break;
+
+ case TF_SRAM_SLICE_SIZE_64B:
+ default:
+ shift = slice_offset >> 0;
+ assert(shift < 1);
+ slice_mask = 1 << shift;
+ break;
+ }
+
+ if ((block->in_use_mask & slice_mask) == 0) {
+ TFP_DRV_LOG(ERR, "block_id(0x%x) slice(%d) was not allocated\n",
+ block->block_id, slice_offset);
+ *is_allocated = false;
+ } else {
+ *is_allocated = true;
+ }
+
+ return rc;
+}
+
+/**
+ * Get the block count
+ */
+static uint32_t
+tf_sram_get_block_cnt(struct tf_sram_slice_list *slice_list)
+{
+ return slice_list->cnt;
+}
+
+
+/**
+ * Free a block data structure - does not free to the RM
+ */
+static void
+tf_sram_free_block(struct tf_sram_slice_list *slice_list,
+ struct tf_sram_block *block)
+{
+ if (slice_list->head == block && slice_list->tail == block) {
+ slice_list->head = NULL;
+ slice_list->tail = NULL;
+ } else if (slice_list->head == block) {
+ slice_list->head = block->next;
+ slice_list->head->prev = NULL;
+ } else if (slice_list->tail == block) {
+ slice_list->tail = block->prev;
+ slice_list->tail->next = NULL;
+ } else {
+ block->prev->next = block->next;
+ block->next->prev = block->prev;
+ }
+ tfp_free(block);
+ slice_list->cnt--;
+}
+/**
+ * Free the entire slice_list
+ */
+static void
+tf_sram_free_slice_list(struct tf_sram_slice_list *slice_list)
+{
+ uint32_t i, block_cnt;
+ struct tf_sram_block *nblock, *block;
+
+ block_cnt = tf_sram_get_block_cnt(slice_list);
+ block = slice_list->head;
+
+ for (i = 0; i < block_cnt; i++) {
+ nblock = block->next;
+ tf_sram_free_block(slice_list, block);
+ block = nblock;
+ }
+}
+
+/**
+ * Allocate a single SRAM block from memory and add it to the slice list
+ */
+static struct tf_sram_block
+*tf_sram_alloc_block(struct tf_sram_slice_list *slice_list,
+ uint16_t block_id)
+{
+ struct tf_sram_block *block;
+ struct tfp_calloc_parms cparms;
+ int rc;
+
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_sram_block);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc) {
+ /* Log error */
+ TFP_DRV_LOG(ERR,
+ "Failed to allocate block, rc:%s\n",
+ strerror(-rc));
+ return NULL;
+ }
+ block = (struct tf_sram_block *)cparms.mem_va;
+ block->block_id = block_id;
+
+ if (slice_list->head == NULL) {
+ slice_list->head = block;
+ slice_list->tail = block;
+ block->next = NULL;
+ block->prev = NULL;
+ } else {
+ block->next = slice_list->head;
+ block->prev = NULL;
+ block->next->prev = block;
+ slice_list->head = block->next->prev;
+ }
+ slice_list->cnt++;
+ return block;
+}
+
+/**
+ * Find the first not full block in the slice list
+ */
+static void
+tf_sram_find_first_not_full_block(struct tf_sram_slice_list *slice_list,
+ enum tf_sram_slice_size slice_size,
+ struct tf_sram_block **first_not_full_block)
+{
+ struct tf_sram_block *block = slice_list->head;
+ uint8_t slice_mask, mask;
+
+ switch (slice_size) {
+ case TF_SRAM_SLICE_SIZE_8B:
+ slice_mask = 0xff;
+ break;
+
+ case TF_SRAM_SLICE_SIZE_16B:
+ slice_mask = 0xf;
+ break;
+
+ case TF_SRAM_SLICE_SIZE_32B:
+ slice_mask = 0x3;
+ break;
+
+ case TF_SRAM_SLICE_SIZE_64B:
+ default:
+ slice_mask = 0x1;
+ break;
+ }
+
+ *first_not_full_block = NULL;
+
+ while (block) {
+ mask = block->in_use_mask & slice_mask;
+ if (mask != slice_mask) {
+ *first_not_full_block = block;
+ break;
+ }
+ block = block->next;
+ }
+}
+static void
+tf_sram_dump_block(struct tf_sram_block *block)
+{
+ TFP_DRV_LOG(INFO, "block_id(0x%x) in_use_mask(0x%02x)\n",
+ block->block_id,
+ block->in_use_mask);
+}
+
+/**********************
+ * External functions
+ **********************/
+int
+tf_sram_mgr_bind(void **sram_handle)
+{
+ int rc = 0;
+ struct tf_sram *sram;
+ struct tfp_calloc_parms cparms;
+
+ TF_CHECK_PARMS1(sram_handle);
+
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_sram);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc) {
+ /* Log error */
+ TFP_DRV_LOG(ERR,
+ "Failed to allocate SRAM mgmt data, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+ sram = (struct tf_sram *)cparms.mem_va;
+ *sram_handle = sram;
+ return rc;
+}
+
+int
+tf_sram_mgr_unbind(void *sram_handle)
+{
+ int rc = 0;
+ struct tf_sram *sram;
+ enum tf_sram_bank_id bank_id;
+ enum tf_sram_slice_size slice_size;
+ enum tf_dir dir;
+ struct tf_sram_slice_list *slice_list;
+
+ TF_CHECK_PARMS1(sram_handle);
+
+ sram = (struct tf_sram *)sram_handle;
+
+ for (dir = 0; dir < TF_DIR_MAX; dir++) {
+ /* For each bank
+ */
+ for (bank_id = TF_SRAM_BANK_ID_0;
+ bank_id < TF_SRAM_BANK_ID_MAX;
+ bank_id++) {
+ /* For each slice size
+ */
+ for (slice_size = TF_SRAM_SLICE_SIZE_8B;
+ slice_size < TF_SRAM_SLICE_SIZE_MAX;
+ slice_size++) {
+ rc = tf_sram_get_slice_list(sram, &slice_list,
+ slice_size, dir,
+ bank_id);
+ if (rc) {
+ /* Log error */
+ TFP_DRV_LOG(ERR,
+ "No SRAM slice list, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+ if (tf_sram_get_block_cnt(slice_list))
+ tf_sram_free_slice_list(slice_list);
+ }
+ }
+ }
+
+ tfp_free(sram);
+ sram_handle = NULL;
+
+ /* Freeing of the RM resources is handled by the table manager */
+ return rc;
+}
+
+int tf_sram_mgr_alloc(void *sram_handle,
+ struct tf_sram_mgr_alloc_parms *parms)
+{
+ int rc = 0;
+ struct tf_sram *sram;
+ struct tf_sram_slice_list *slice_list;
+ uint16_t block_id, slice_offset = 0;
+ uint32_t index;
+ struct tf_sram_block *block;
+ struct tf_rm_allocate_parms aparms = { 0 };
+ bool block_is_full;
+ uint16_t block_offset;
+
+ TF_CHECK_PARMS3(sram_handle, parms, parms->sram_offset);
+
+ sram = (struct tf_sram *)sram_handle;
+
+ /* Check the current slice list
+ */
+ rc = tf_sram_get_slice_list(sram, &slice_list, parms->slice_size,
+ parms->dir, parms->bank_id);
+ if (rc) {
+ /* Log error */
+ TFP_DRV_LOG(ERR,
+ "No SRAM slice list, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ /* If the list is empty or all entries are full allocate a new block
+ */
+ if (!slice_list->first_not_full_block) {
+ /* Allocate and insert a new block
+ */
+ aparms.index = &index;
+ aparms.subtype = parms->tbl_type;
+ aparms.rm_db = parms->rm_db;
+ rc = tf_rm_allocate(&aparms);
+ if (rc)
+ return rc;
+
+ block_id = index;
+ block = tf_sram_alloc_block(slice_list, block_id);
+ } else {
+ /* Block exists
+ */
+ block =
+ (struct tf_sram_block *)(slice_list->first_not_full_block);
+ }
+ rc = tf_sram_get_next_slice_in_block(block,
+ parms->slice_size,
+ &slice_offset,
+ &block_is_full);
+
+ /* Find the new first non-full block in the list
+ */
+ tf_sram_find_first_not_full_block(slice_list,
+ parms->slice_size,
+ &slice_list->first_not_full_block);
+
+ tf_sram_block_id_2_offset(parms->bank_id, block->block_id,
+ &block_offset);
+
+ *parms->sram_offset = block_offset + slice_offset;
+ return rc;
+}
+
+int
+tf_sram_mgr_free(void *sram_handle,
+ struct tf_sram_mgr_free_parms *parms)
+{
+ int rc = 0;
+ struct tf_sram *sram;
+ struct tf_sram_slice_list *slice_list;
+ uint16_t block_id, slice_offset;
+ struct tf_sram_block *block;
+ bool block_is_empty;
+ struct tf_rm_free_parms fparms = { 0 };
+
+ TF_CHECK_PARMS2(sram_handle, parms);
+
+ sram = (struct tf_sram *)sram_handle;
+
+ /* Check the current slice list
+ */
+ rc = tf_sram_get_slice_list(sram, &slice_list, parms->slice_size,
+ parms->dir, parms->bank_id);
+ if (rc) {
+ /* Log error */
+ TFP_DRV_LOG(ERR,
+ "No SRAM slice list, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Determine the block id and slice offset from the SRAM offset
+ */
+ tf_sram_offset_2_block_id(parms->bank_id, parms->sram_offset, &block_id,
+ &slice_offset);
+
+ /* Search the list of blocks for the matching block id
+ */
+ block = tf_sram_find_block(block_id, slice_list);
+ if (block == NULL) {
+ TFP_DRV_LOG(ERR, "block not found 0x%x\n", block_id);
+ return rc;
+ }
+
+ /* If found, search for the matching SRAM slice in use.
+ */
+ rc = tf_sram_free_slice(parms->slice_size, slice_offset,
+ block, &block_is_empty);
+ if (rc) {
+ TFP_DRV_LOG(ERR, "Error freeing slice (%s)\n", strerror(-rc));
+ return rc;
+ }
+#if (STATS_CLEAR_ON_READ_SUPPORT == 0)
+ /* If this is a counter, clear it. In the future we need to switch to
+ * using the special access registers on Thor to automatically clear on
+ * read.
+ */
+ /* If this is counter table, clear the entry on free */
+ if (parms->tbl_type == TF_TBL_TYPE_ACT_STATS_64) {
+ uint8_t data[8] = { 0 };
+ uint16_t hcapi_type = 0;
+ struct tf_rm_get_hcapi_parms hparms = { 0 };
+
+ /* Get the hcapi type */
+ hparms.rm_db = parms->rm_db;
+ hparms.subtype = parms->tbl_type;
+ hparms.hcapi_type = &hcapi_type;
+ rc = tf_rm_get_hcapi_type(&hparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s, Failed type lookup, type:%s, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->tbl_type),
+ strerror(-rc));
+ return rc;
+ }
+ /* Clear the counter
+ */
+ rc = tf_msg_set_tbl_entry(parms->tfp,
+ parms->dir,
+ hcapi_type,
+ sizeof(data),
+ data,
+ parms->sram_offset);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s, Set failed, type:%s, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->tbl_type),
+ strerror(-rc));
+ return rc;
+ }
+ }
+#endif
+ /* If the block is empty, free the block to the RM
+ */
+ if (block_is_empty) {
+ fparms.rm_db = parms->rm_db;
+ fparms.subtype = parms->tbl_type;
+ fparms.index = block_id;
+ rc = tf_rm_free(&fparms);
+
+ if (rc) {
+ TFP_DRV_LOG(ERR, "Free block_id(%d) failed error(%s)\n",
+ block_id, strerror(-rc));
+ }
+ /* Free local entry regardless
+ */
+ tf_sram_free_block(slice_list, block);
+
+ /* Find the next non-full block in the list
+ */
+ tf_sram_find_first_not_full_block(slice_list,
+ parms->slice_size,
+ &slice_list->first_not_full_block);
+ }
+
+ return rc;
+}
+
+int
+tf_sram_mgr_dump(void *sram_handle,
+ struct tf_sram_mgr_dump_parms *parms)
+{
+ int rc = 0;
+ struct tf_sram *sram;
+ struct tf_sram_slice_list *slice_list;
+ uint32_t block_cnt, i;
+ struct tf_sram_block *block;
+
+ TF_CHECK_PARMS2(sram_handle, parms);
+
+ sram = (struct tf_sram *)sram_handle;
+
+ rc = tf_sram_get_slice_list(sram, &slice_list, parms->slice_size,
+ parms->dir, parms->bank_id);
+ if (rc)
+ return rc;
+
+ if (slice_list->cnt || slice_list->first_not_full_block) {
+ TFP_DRV_LOG(INFO, "\n********** %s: %s: %s ***********\n",
+ tf_sram_bank_2_str(parms->bank_id),
+ tf_dir_2_str(parms->dir),
+ tf_sram_slice_2_str(parms->slice_size));
+
+ block_cnt = tf_sram_get_block_cnt(slice_list);
+ TFP_DRV_LOG(INFO, "block_cnt(%d)\n", block_cnt);
+ if (slice_list->first_not_full_block)
+ TFP_DRV_LOG(INFO, "first_not_full_block(0x%x)\n",
+ slice_list->first_not_full_block->block_id);
+ block = slice_list->head;
+ for (i = 0; i < block_cnt; i++) {
+ tf_sram_dump_block(block);
+ block = tf_sram_get_next_block(block);
+ }
+ TFP_DRV_LOG(INFO, "*********************************\n");
+ }
+ return rc;
+}
+/**
+ * Validate an SRAM Slice is allocated
+ *
+ * Validate whether the SRAM slice is allocated
+ *
+ * [in] sram_handle
+ * Pointer to SRAM handle
+ *
+ * [in] parms
+ * Pointer to the SRAM alloc parameters
+ *
+ * Returns
+ * - (0) if successful
+ * - (-EINVAL) on failure
+ *
+ */
+int tf_sram_mgr_is_allocated(void *sram_handle,
+ struct tf_sram_mgr_is_allocated_parms *parms)
+{
+ int rc = 0;
+ struct tf_sram *sram;
+ struct tf_sram_slice_list *slice_list;
+ uint16_t block_id, slice_offset;
+ struct tf_sram_block *block;
+
+ TF_CHECK_PARMS3(sram_handle, parms, parms->is_allocated);
+
+ sram = (struct tf_sram *)sram_handle;
+
+ /* Check the current slice list
+ */
+ rc = tf_sram_get_slice_list(sram, &slice_list, parms->slice_size,
+ parms->dir, parms->bank_id);
+ if (rc) {
+ /* Log error */
+ TFP_DRV_LOG(ERR,
+ "No SRAM slice list, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ /* If the list is empty, then it cannot be allocated
+ */
+ if (!slice_list->cnt) {
+ TFP_DRV_LOG(ERR, "List is empty for %s:%s:%s\n",
+ tf_dir_2_str(parms->dir),
+ tf_sram_slice_2_str(parms->slice_size),
+ tf_sram_bank_2_str(parms->bank_id));
+
+ parms->is_allocated = false;
+ goto done;
+ }
+
+ /* Determine the block id and slice offset from the SRAM offset
+ */
+ tf_sram_offset_2_block_id(parms->bank_id, parms->sram_offset, &block_id,
+ &slice_offset);
+
+ /* Search the list of blocks for the matching block id
+ */
+ block = tf_sram_find_block(block_id, slice_list);
+ if (block == NULL) {
+ TFP_DRV_LOG(ERR, "block not found in list 0x%x\n",
+ parms->sram_offset);
+ parms->is_allocated = false;
+ goto done;
+ }
+
+ rc = tf_sram_is_slice_allocated_in_block(block,
+ parms->slice_size,
+ slice_offset,
+ parms->is_allocated);
+done:
+ return rc;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _TF_SRAM_MGR_H_
+#define _TF_SRAM_MGR_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <limits.h>
+#include <errno.h>
+#include "tf_core.h"
+#include "tf_rm.h"
+
+/* When special access registers are used to access the SRAM, stats can be
+ * automatically cleared on read by the hardware. This requires additional
+ * support to be added in the firmware to use these registers for statistics.
+ * The support entails using the special access registers to read the stats.
+ * These are stored in bank 3 currently but may move depending upon the
+ * policy defined in tf_device_p58.h
+ */
+#define STATS_CLEAR_ON_READ_SUPPORT 0
+
+#define TF_SRAM_MGR_BLOCK_SZ_BYTES 64
+#define TF_SRAM_MGR_MIN_SLICE_BYTES 8
+/**
+ * Bank identifier
+ */
+enum tf_sram_bank_id {
+ TF_SRAM_BANK_ID_0, /**< SRAM Bank 0 id */
+ TF_SRAM_BANK_ID_1, /**< SRAM Bank 1 id */
+ TF_SRAM_BANK_ID_2, /**< SRAM Bank 2 id */
+ TF_SRAM_BANK_ID_3, /**< SRAM Bank 3 id */
+ TF_SRAM_BANK_ID_MAX /**< SRAM Bank index limit */
+};
+
+/**
+ * TF slice size.
+ *
+ * A slice is part of a 64B row
+ *
+ * Each slice is a multiple of 8B
+ */
+enum tf_sram_slice_size {
+ TF_SRAM_SLICE_SIZE_8B, /**< 8 byte SRAM slice */
+ TF_SRAM_SLICE_SIZE_16B, /**< 16 byte SRAM slice */
+ TF_SRAM_SLICE_SIZE_32B, /**< 32 byte SRAM slice */
+ TF_SRAM_SLICE_SIZE_64B, /**< 64 byte SRAM slice */
+ TF_SRAM_SLICE_SIZE_MAX /**< slice limit */
+};
+
+
+/** Initialize the SRAM slice manager
+ *
+ * The SRAM slice manager manages slices within 64B rows. Slices are of size
+ * tf_sram_slice_size. This function provides a handle to the SRAM manager
+ * data.
+ *
+ * SRAM manager data may dynamically allocate data upon initialization if
+ * running on the host.
+ *
+ * [in/out] sram_handle
+ * Pointer to SRAM handle
+ *
+ * Returns
+ * - (0) if successful
+ * - (-EINVAL) on failure
+ *
+ * Returns the handle for the SRAM slice manager
+ */
+int tf_sram_mgr_bind(void **sram_handle);
+
+/** Uninitialize the SRAM slice manager
+ *
+ * Frees any dynamically allocated data structures for SRAM slice management.
+ *
+ * [in] sram_handle
+ * Pointer to SRAM handle
+ *
+ * Returns
+ * - (0) if successful
+ * - (-EINVAL) on failure
+ */
+int tf_sram_mgr_unbind(void *sram_handle);
+
+/**
+ * tf_sram_mgr_alloc_parms parameter definition
+ */
+struct tf_sram_mgr_alloc_parms {
+ /**
+ * [in] dir
+ */
+ enum tf_dir dir;
+ /**
+ * [in] bank
+ *
+ * the SRAM bank to allocate from
+ */
+ enum tf_sram_bank_id bank_id;
+ /**
+ * [in] slice_size
+ *
+ * the slice size to allocate
+ */
+ enum tf_sram_slice_size slice_size;
+ /**
+ * [in/out] sram_slice
+ *
+ * A pointer to be filled with an 8B sram slice offset
+ */
+ uint16_t *sram_offset;
+ /**
+ * [in] RM DB Handle required for RM allocation
+ */
+ void *rm_db;
+ /**
+ * [in] tf table type
+ */
+ enum tf_tbl_type tbl_type;
+};
+
+/**
+ * Allocate an SRAM Slice
+ *
+ * Allocate an SRAM slice from the indicated bank. If successful an 8B SRAM
+ * offset will be returned. Slices are variable sized. This may result in
+ * a row being allocated from the RM SRAM bank pool if required.
+ *
+ * [in] sram_handle
+ * Pointer to SRAM handle
+ *
+ * [in] parms
+ * Pointer to the SRAM alloc parameters
+ *
+ * Returns
+ * - (0) if successful
+ * - (-EINVAL) on failure
+ *
+ */
+int tf_sram_mgr_alloc(void *sram_handle,
+ struct tf_sram_mgr_alloc_parms *parms);
+/**
+ * tf_sram_mgr_free_parms parameter definition
+ */
+struct tf_sram_mgr_free_parms {
+ /**
+ * [in] dir
+ */
+ enum tf_dir dir;
+ /**
+ * [in] bank
+ *
+ * the SRAM bank to free to
+ */
+ enum tf_sram_bank_id bank_id;
+ /**
+ * [in] slice_size
+ *
+ * the slice size to be returned
+ */
+ enum tf_sram_slice_size slice_size;
+ /**
+ * [in] sram_offset
+ *
+ * the SRAM slice offset (8B) to be returned
+ */
+ uint16_t sram_offset;
+ /**
+ * [in] RM DB Handle required for RM free
+ */
+ void *rm_db;
+ /**
+ * [in] tf table type
+ */
+ enum tf_tbl_type tbl_type;
+#if (STATS_CLEAR_ON_READ_SUPPORT == 0)
+ /**
+ * [in] tfp
+ *
+ * A pointer to the tf handle
+ */
+ void *tfp;
+#endif
+};
+
+/**
+ * Free an SRAM Slice
+ *
+ * Free an SRAM slice to the indicated bank. This may result in a 64B row
+ * being returned to the RM SRAM bank pool.
+ *
+ * [in] sram_handle
+ * Pointer to SRAM handle
+ *
+ * [in] parms
+ * Pointer to the SRAM free parameters
+ *
+ * Returns
+ * - (0) if successful
+ * - (-EINVAL) on failure
+ *
+ */
+int tf_sram_mgr_free(void *sram_handle,
+ struct tf_sram_mgr_free_parms *parms);
+
+/**
+ * tf_sram_mgr_dump_parms parameter definition
+ */
+struct tf_sram_mgr_dump_parms {
+ /**
+ * [in] dir
+ */
+ enum tf_dir dir;
+ /**
+ * [in] bank
+ *
+ * the SRAM bank to dump
+ */
+ enum tf_sram_bank_id bank_id;
+ /**
+ * [in] slice_size
+ *
+ * the slice size list to be dumped
+ */
+ enum tf_sram_slice_size slice_size;
+};
+
+/**
+ * Dump a slice list
+ *
+ * Dump the slice list given the SRAM bank and the slice size
+ *
+ * [in] sram_handle
+ * Pointer to SRAM handle
+ *
+ * [in] parms
+ * Pointer to the SRAM free parameters
+ *
+ * Returns
+ * - (0) if successful
+ * - (-EINVAL) on failure
+ *
+ */
+int tf_sram_mgr_dump(void *sram_handle,
+ struct tf_sram_mgr_dump_parms *parms);
+
+/**
+ * tf_sram_mgr_is_allocated_parms parameter definition
+ */
+struct tf_sram_mgr_is_allocated_parms {
+ /**
+ * [in] dir
+ */
+ enum tf_dir dir;
+ /**
+ * [in] bank
+ *
+ * the SRAM bank to allocate from
+ */
+ enum tf_sram_bank_id bank_id;
+ /**
+ * [in] slice_size
+ *
+ * the slice size which was allocated
+ */
+ enum tf_sram_slice_size slice_size;
+ /**
+ * [in] sram_offset
+ *
+ * The sram slice offset to validate
+ */
+ uint16_t sram_offset;
+ /**
+ * [in/out] is_allocated
+ *
+ * Pointer passed in to be filled with indication of allocation
+ */
+ bool *is_allocated;
+};
+
+/**
+ * Validate an SRAM Slice is allocated
+ *
+ * Validate whether the SRAM slice is allocated
+ *
+ * [in] sram_handle
+ * Pointer to SRAM handle
+ *
+ * [in] parms
+ * Pointer to the SRAM alloc parameters
+ *
+ * Returns
+ * - (0) if successful
+ * - (-EINVAL) on failure
+ *
+ */
+int tf_sram_mgr_is_allocated(void *sram_handle,
+ struct tf_sram_mgr_is_allocated_parms *parms);
+
+/**
+ * Given the slice size, return a char string
+ */
+const char
+*tf_sram_slice_2_str(enum tf_sram_slice_size slice_size);
+
+/**
+ * Given the bank_id, return a char string
+ */
+const char
+*tf_sram_bank_2_str(enum tf_sram_bank_id bank_id);
+
+#endif /* _TF_SRAM_MGR_H_ */
#include "tf_session.h"
#include "tf_device.h"
-#define TF_TBL_RM_TO_PTR(new_idx, idx, base, shift) { \
- *(new_idx) = (((idx) + (base)) << (shift)); \
-}
-
-#define TF_TBL_PTR_TO_RM(new_idx, idx, base, shift) { \
- *(new_idx) = (((idx) >> (shift)) - (base)); \
-}
-
struct tf;
-/**
- * Shadow init flag, set on bind and cleared on unbind
- */
-static uint8_t shadow_init;
+#define TF_TBL_RM_TO_PTR(new_idx, idx, base, shift) { \
+ *(new_idx) = (((idx) + (base)) << (shift)); \
+}
int
tf_tbl_bind(struct tf *tfp,
tbl_db->tbl_db[i] = NULL;
}
- shadow_init = 0;
-
return 0;
}
struct tf_rm_allocate_parms aparms = { 0 };
struct tf_session *tfs;
struct tf_dev_info *dev;
- uint16_t base = 0, shift = 0;
struct tbl_rm_db *tbl_db;
void *tbl_db_ptr = NULL;
rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);
if (rc) {
TFP_DRV_LOG(ERR,
- "Failed to get em_ext_db from session, rc:%s\n",
+ "Failed to get tbl_db from session, rc:%s\n",
strerror(-rc));
return rc;
}
tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
- /* Only get table info if required for the device */
- if (dev->ops->tf_dev_get_tbl_info) {
- rc = dev->ops->tf_dev_get_tbl_info(tfp,
- tbl_db->tbl_db[parms->dir],
- parms->type,
- &base,
- &shift);
- if (rc) {
- TFP_DRV_LOG(ERR,
- "%s: Failed to get table info:%d\n",
- tf_dir_2_str(parms->dir),
- parms->type);
- return rc;
- }
- }
-
/* Allocate requested element */
aparms.rm_db = tbl_db->tbl_db[parms->dir];
aparms.subtype = parms->type;
rc = tf_rm_allocate(&aparms);
if (rc) {
TFP_DRV_LOG(ERR,
- "%s: Failed allocate, type:%d\n",
+ "%s: Failed allocate, type:%s\n",
tf_dir_2_str(parms->dir),
- parms->type);
+ tf_tbl_type_2_str(parms->type));
return rc;
}
- TF_TBL_RM_TO_PTR(&idx, idx, base, shift);
*parms->idx = idx;
return 0;
int allocated = 0;
struct tf_session *tfs;
struct tf_dev_info *dev;
- uint16_t base = 0, shift = 0;
struct tbl_rm_db *tbl_db;
void *tbl_db_ptr = NULL;
}
tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
- /* Only get table info if required for the device */
- if (dev->ops->tf_dev_get_tbl_info) {
- rc = dev->ops->tf_dev_get_tbl_info(tfp,
- tbl_db->tbl_db[parms->dir],
- parms->type,
- &base,
- &shift);
- if (rc) {
- TFP_DRV_LOG(ERR,
- "%s: Failed to get table info:%d\n",
- tf_dir_2_str(parms->dir),
- parms->type);
- return rc;
- }
- }
-
/* Check if element is in use */
aparms.rm_db = tbl_db->tbl_db[parms->dir];
aparms.subtype = parms->type;
-
- TF_TBL_PTR_TO_RM(&aparms.index, parms->idx, base, shift);
-
+ aparms.index = parms->idx;
aparms.allocated = &allocated;
rc = tf_rm_is_allocated(&aparms);
if (rc)
if (allocated != TF_RM_ALLOCATED_ENTRY_IN_USE) {
TFP_DRV_LOG(ERR,
- "%s: Entry already free, type:%d, index:%d\n",
+ "%s: Entry already free, type:%s, index:%d\n",
tf_dir_2_str(parms->dir),
- parms->type,
+ tf_tbl_type_2_str(parms->type),
parms->idx);
return -EINVAL;
}
rc = tf_rm_get_hcapi_type(&hparms);
if (rc) {
TFP_DRV_LOG(ERR,
- "%s, Failed type lookup, type:%d, rc:%s\n",
+ "%s, Failed type lookup, type:%s, rc:%s\n",
tf_dir_2_str(parms->dir),
- parms->type,
+ tf_tbl_type_2_str(parms->type),
strerror(-rc));
return rc;
}
parms->idx);
if (rc) {
TFP_DRV_LOG(ERR,
- "%s, Set failed, type:%d, rc:%s\n",
+ "%s, Set failed, type:%s, rc:%s\n",
tf_dir_2_str(parms->dir),
- parms->type,
+ tf_tbl_type_2_str(parms->type),
strerror(-rc));
return rc;
}
/* Free requested element */
fparms.rm_db = tbl_db->tbl_db[parms->dir];
fparms.subtype = parms->type;
-
- TF_TBL_PTR_TO_RM(&fparms.index, parms->idx, base, shift);
-
+ fparms.index = parms->idx;
rc = tf_rm_free(&fparms);
if (rc) {
TFP_DRV_LOG(ERR,
- "%s: Free failed, type:%d, index:%d\n",
+ "%s: Free failed, type:%s, index:%d\n",
tf_dir_2_str(parms->dir),
- parms->type,
+ tf_tbl_type_2_str(parms->type),
parms->idx);
return rc;
}
struct tf_rm_get_hcapi_parms hparms = { 0 };
struct tf_session *tfs;
struct tf_dev_info *dev;
- uint16_t base = 0, shift = 0;
struct tbl_rm_db *tbl_db;
void *tbl_db_ptr = NULL;
}
tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
- /* Only get table info if required for the device */
- if (dev->ops->tf_dev_get_tbl_info) {
- rc = dev->ops->tf_dev_get_tbl_info(tfp,
- tbl_db->tbl_db[parms->dir],
- parms->type,
- &base,
- &shift);
- if (rc) {
- TFP_DRV_LOG(ERR,
- "%s: Failed to get table info:%d\n",
- tf_dir_2_str(parms->dir),
- parms->type);
- return rc;
- }
- }
/* Do not check meter drop counter because it is not allocated
* resources
/* Verify that the entry has been previously allocated */
aparms.rm_db = tbl_db->tbl_db[parms->dir];
aparms.subtype = parms->type;
- TF_TBL_PTR_TO_RM(&aparms.index, parms->idx, base, shift);
-
aparms.allocated = &allocated;
+ aparms.index = parms->idx;
rc = tf_rm_is_allocated(&aparms);
if (rc)
return rc;
if (allocated != TF_RM_ALLOCATED_ENTRY_IN_USE) {
TFP_DRV_LOG(ERR,
- "%s, Invalid or not allocated index, type:%d, idx:%d\n",
- tf_dir_2_str(parms->dir),
- parms->type,
- parms->idx);
+ "%s, Invalid or not allocated, type:%s, idx:%d\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type),
+ parms->idx);
return -EINVAL;
}
}
rc = tf_rm_get_hcapi_type(&hparms);
if (rc) {
TFP_DRV_LOG(ERR,
- "%s, Failed type lookup, type:%d, rc:%s\n",
+ "%s, Failed type lookup, type:%s, rc:%s\n",
tf_dir_2_str(parms->dir),
- parms->type,
+ tf_tbl_type_2_str(parms->type),
strerror(-rc));
return rc;
}
parms->idx);
if (rc) {
TFP_DRV_LOG(ERR,
- "%s, Set failed, type:%d, rc:%s\n",
+ "%s, Set failed, type:%s, rc:%s\n",
tf_dir_2_str(parms->dir),
- parms->type,
+ tf_tbl_type_2_str(parms->type),
strerror(-rc));
return rc;
}
struct tf_rm_get_hcapi_parms hparms = { 0 };
struct tf_session *tfs;
struct tf_dev_info *dev;
- uint16_t base = 0, shift = 0;
struct tbl_rm_db *tbl_db;
void *tbl_db_ptr = NULL;
}
tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
- /* Only get table info if required for the device */
- if (dev->ops->tf_dev_get_tbl_info) {
- rc = dev->ops->tf_dev_get_tbl_info(tfp,
- tbl_db->tbl_db[parms->dir],
- parms->type,
- &base,
- &shift);
- if (rc) {
- TFP_DRV_LOG(ERR,
- "%s: Failed to get table info:%d\n",
- tf_dir_2_str(parms->dir),
- parms->type);
- return rc;
- }
- }
-
/* Do not check meter drop counter because it is not allocated
* resources.
*/
/* Verify that the entry has been previously allocated */
aparms.rm_db = tbl_db->tbl_db[parms->dir];
aparms.subtype = parms->type;
- TF_TBL_PTR_TO_RM(&aparms.index, parms->idx, base, shift);
-
+ aparms.index = parms->idx;
aparms.allocated = &allocated;
rc = tf_rm_is_allocated(&aparms);
if (rc)
if (allocated != TF_RM_ALLOCATED_ENTRY_IN_USE) {
TFP_DRV_LOG(ERR,
- "%s, Invalid or not allocated index, type:%d, idx:%d\n",
+ "%s, Invalid or not allocated index, type:%s, idx:%d\n",
tf_dir_2_str(parms->dir),
- parms->type,
+ tf_tbl_type_2_str(parms->type),
parms->idx);
return -EINVAL;
}
rc = tf_rm_get_hcapi_type(&hparms);
if (rc) {
TFP_DRV_LOG(ERR,
- "%s, Failed type lookup, type:%d, rc:%s\n",
+ "%s, Failed type lookup, type:%s, rc:%s\n",
tf_dir_2_str(parms->dir),
- parms->type,
+ tf_tbl_type_2_str(parms->type),
strerror(-rc));
return rc;
}
parms->idx);
if (rc) {
TFP_DRV_LOG(ERR,
- "%s, Get failed, type:%d, rc:%s\n",
+ "%s, Get failed, type:%s, rc:%s\n",
tf_dir_2_str(parms->dir),
- parms->type,
+ tf_tbl_type_2_str(parms->type),
strerror(-rc));
return rc;
}
struct tf_rm_check_indexes_in_range_parms cparms = { 0 };
struct tf_session *tfs;
struct tf_dev_info *dev;
- uint16_t base = 0, shift = 0;
struct tbl_rm_db *tbl_db;
void *tbl_db_ptr = NULL;
}
tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
- /* Only get table info if required for the device */
- if (dev->ops->tf_dev_get_tbl_info) {
- rc = dev->ops->tf_dev_get_tbl_info(tfp,
- tbl_db->tbl_db[parms->dir],
- parms->type,
- &base,
- &shift);
- if (rc) {
- TFP_DRV_LOG(ERR,
- "%s: Failed to get table info:%d\n",
- tf_dir_2_str(parms->dir),
- parms->type);
- return rc;
- }
- }
-
/* Verify that the entries are in the range of reserved resources. */
cparms.rm_db = tbl_db->tbl_db[parms->dir];
cparms.subtype = parms->type;
-
- TF_TBL_PTR_TO_RM(&cparms.starting_index, parms->starting_idx,
- base, shift);
-
cparms.num_entries = parms->num_entries;
+ cparms.starting_index = parms->starting_idx;
rc = tf_rm_check_indexes_in_range(&cparms);
if (rc) {
TFP_DRV_LOG(ERR,
"%s, Invalid or %d index starting from %d"
- " not in range, type:%d",
+ " not in range, type:%s",
tf_dir_2_str(parms->dir),
parms->starting_idx,
parms->num_entries,
- parms->type);
+ tf_tbl_type_2_str(parms->type));
return rc;
}
rc = tf_rm_get_hcapi_type(&hparms);
if (rc) {
TFP_DRV_LOG(ERR,
- "%s, Failed type lookup, type:%d, rc:%s\n",
+ "%s, Failed type lookup, type:%s, rc:%s\n",
tf_dir_2_str(parms->dir),
- parms->type,
+ tf_tbl_type_2_str(parms->type),
strerror(-rc));
return rc;
}
parms->physical_mem_addr);
if (rc) {
TFP_DRV_LOG(ERR,
- "%s, Bulk get failed, type:%d, rc:%s\n",
+ "%s, Bulk get failed, type:%s, rc:%s\n",
tf_dir_2_str(parms->dir),
- parms->type,
+ tf_tbl_type_2_str(parms->type),
strerror(-rc));
}
struct tf_rm_get_alloc_info_parms ainfo;
void *tbl_db_ptr = NULL;
struct tbl_rm_db *tbl_db;
- uint16_t base = 0, shift = 0;
struct tf_dev_info *dev;
struct tf_session *tfs;
+ uint16_t base = 0, shift = 0;
TF_CHECK_PARMS2(tfp, tbl);
tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
- /* check if reserved resource for WC is multiple of num_slices */
for (d = 0; d < TF_DIR_MAX; d++) {
ainfo.rm_db = tbl_db->tbl_db[d];
dinfo = tbl[d].info;
* Table Type element configuration array
*/
struct tf_rm_element_cfg *cfg;
- /**
- * Shadow table type configuration array
- */
- struct tf_shadow_tbl_cfg *shadow_cfg;
- /**
- * Boolean controlling the request shadow copy.
- */
- bool shadow_copy;
/**
* Session resource allocations
*/
*
* @ref tf_tbl_free
*
- * @ref tf_tbl_alloc_search
- *
* @ref tf_tbl_set
*
* @ref tf_tbl_get
struct tf_tbl_alloc_parms *parms);
/**
- * Free's the requested table type and returns it to the DB. If shadow
- * DB is enabled its searched first and if found the element refcount
- * is decremented. If refcount goes to 0 then its returned to the
- * table type DB.
+ * Frees the requested table type and returns it to the DB.
*
* [in] tfp
* Pointer to TF handle, used for HCAPI communication
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+
+/* Truflow Table APIs and supporting code */
+
+#include <rte_common.h>
+
+#include "tf_tbl.h"
+#include "tf_tbl_sram.h"
+#include "tf_sram_mgr.h"
+#include "tf_common.h"
+#include "tf_rm.h"
+#include "tf_util.h"
+#include "tf_msg.h"
+#include "tfp.h"
+#include "tf_session.h"
+#include "tf_device.h"
+#include "cfa_resource_types.h"
+
+#define DBG_SRAM 0
+
+/**
+ * tf_sram_tbl_get_info_parms parameter definition
+ */
+struct tf_tbl_sram_get_info_parms {
+ /**
+ * [in] table RM database
+ */
+ void *rm_db;
+ /**
+ * [in] Receive or transmit direction
+ */
+ enum tf_dir dir;
+ /**
+ * [in] table_type
+ *
+ * the TF index table type
+ */
+ enum tf_tbl_type tbl_type;
+ /**
+ * [out] bank
+ *
+ * The SRAM bank associated with the type
+ */
+ enum tf_sram_bank_id bank_id;
+ /**
+ * [out] slice_size
+ *
+ * the slice size for the indicated table type
+ */
+ enum tf_sram_slice_size slice_size;
+};
+
+/**
+ * Translate HCAPI type to SRAM Manager bank
+ */
+const uint16_t tf_tbl_sram_hcapi_2_bank[CFA_RESOURCE_TYPE_P58_LAST] = {
+ [CFA_RESOURCE_TYPE_P58_SRAM_BANK_0] = TF_SRAM_BANK_ID_0,
+ [CFA_RESOURCE_TYPE_P58_SRAM_BANK_1] = TF_SRAM_BANK_ID_1,
+ [CFA_RESOURCE_TYPE_P58_SRAM_BANK_2] = TF_SRAM_BANK_ID_2,
+ [CFA_RESOURCE_TYPE_P58_SRAM_BANK_3] = TF_SRAM_BANK_ID_3
+};
+
+#define TF_TBL_SRAM_SLICES_MAX \
+ (TF_SRAM_MGR_BLOCK_SZ_BYTES / TF_SRAM_MGR_MIN_SLICE_BYTES)
+/**
+ * Translate HCAPI type to SRAM Manager bank
+ */
+const uint8_t tf_tbl_sram_slices_2_size[TF_TBL_SRAM_SLICES_MAX + 1] = {
+ [0] = TF_SRAM_SLICE_SIZE_64B, /* if 0 slices assume 1 64B block */
+ [1] = TF_SRAM_SLICE_SIZE_64B, /* 1 slice per 64B block */
+ [2] = TF_SRAM_SLICE_SIZE_32B, /* 2 slices per 64B block */
+ [4] = TF_SRAM_SLICE_SIZE_16B, /* 4 slices per 64B block */
+ [8] = TF_SRAM_SLICE_SIZE_8B /* 8 slices per 64B block */
+};
+
+/**
+ * Get SRAM Table Information for a given index table type
+ *
+ *
+ * [in] sram_handle
+ * Pointer to SRAM handle
+ *
+ * [in] parms
+ * Pointer to the SRAM get info parameters
+ *
+ * Returns
+ * - (0) if successful
+ * - (-EINVAL) on failure
+ *
+ */
+static int tf_tbl_sram_get_info(struct tf_tbl_sram_get_info_parms *parms)
+{
+ int rc = 0;
+ uint16_t hcapi_type;
+ uint16_t slices;
+ struct tf_rm_get_hcapi_parms hparms;
+ struct tf_rm_get_slices_parms sparms;
+
+ hparms.rm_db = parms->rm_db;
+ hparms.subtype = parms->tbl_type;
+ hparms.hcapi_type = &hcapi_type;
+
+ rc = tf_rm_get_hcapi_type(&hparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to get hcapi_type %s, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->tbl_type),
+ strerror(-rc));
+ return rc;
+ }
+ parms->bank_id = tf_tbl_sram_hcapi_2_bank[hcapi_type];
+
+ sparms.rm_db = parms->rm_db;
+ sparms.subtype = parms->tbl_type;
+ sparms.slices = &slices;
+
+ rc = tf_rm_get_slices(&sparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to get slice cnt %s, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->tbl_type),
+ strerror(-rc));
+ return rc;
+ }
+ if (slices)
+ parms->slice_size = tf_tbl_sram_slices_2_size[slices];
+
+ TFP_DRV_LOG(INFO,
+ "(%s) bank(%s) slice_size(%s)\n",
+ tf_tbl_type_2_str(parms->tbl_type),
+ tf_sram_bank_2_str(parms->bank_id),
+ tf_sram_slice_2_str(parms->slice_size));
+ return rc;
+}
+
+int
+tf_tbl_sram_bind(struct tf *tfp __rte_unused)
+{
+ int rc = 0;
+ void *sram_handle = NULL;
+
+ TF_CHECK_PARMS1(tfp);
+
+ rc = tf_sram_mgr_bind(&sram_handle);
+
+ tf_session_set_sram_db(tfp, sram_handle);
+
+ TFP_DRV_LOG(INFO,
+ "SRAM Table - initialized\n");
+
+ return rc;
+}
+
+int
+tf_tbl_sram_unbind(struct tf *tfp __rte_unused)
+{
+ int rc = 0;
+ void *sram_handle = NULL;
+
+ TF_CHECK_PARMS1(tfp);
+
+ rc = tf_session_get_sram_db(tfp, &sram_handle);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to get sram_handle from session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+ if (sram_handle)
+ rc = tf_sram_mgr_unbind(sram_handle);
+
+ TFP_DRV_LOG(INFO,
+ "SRAM Table - deinitialized\n");
+ return rc;
+}
+
+int
+tf_tbl_sram_alloc(struct tf *tfp,
+ struct tf_tbl_alloc_parms *parms)
+{
+ int rc;
+ uint16_t idx;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ struct tf_tbl_sram_get_info_parms iparms = { 0 };
+ struct tf_sram_mgr_alloc_parms aparms = { 0 };
+ struct tbl_rm_db *tbl_db;
+ void *tbl_db_ptr = NULL;
+ void *sram_handle = NULL;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ /* Retrieve the session information */
+ rc = tf_session_get(tfp, &tfs, &dev);
+ if (rc)
+ return rc;
+
+ rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to get tbl_db from session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
+
+ rc = tf_session_get_sram_db(tfp, &sram_handle);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to get sram_handle from session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ iparms.rm_db = tbl_db->tbl_db[parms->dir];
+ iparms.dir = parms->dir;
+ iparms.tbl_type = parms->type;
+
+ rc = tf_tbl_sram_get_info(&iparms);
+
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to get SRAM info %s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type));
+ return rc;
+ }
+
+ aparms.dir = parms->dir;
+ aparms.bank_id = iparms.bank_id;
+ aparms.slice_size = iparms.slice_size;
+ aparms.sram_offset = &idx;
+ aparms.tbl_type = parms->type;
+ aparms.rm_db = tbl_db->tbl_db[parms->dir];
+
+ rc = tf_sram_mgr_alloc(sram_handle, &aparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to allocate SRAM table:%s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type));
+ return rc;
+ }
+ *parms->idx = idx;
+
+#if (DBG_SRAM == 1)
+ {
+ struct tf_sram_mgr_dump_parms dparms;
+
+ dparms.dir = parms->dir;
+ dparms.bank_id = iparms.bank_id;
+ dparms.slice_size = iparms.slice_size;
+
+ rc = tf_sram_mgr_dump(sram_handle, &dparms);
+ }
+#endif
+
+ return rc;
+}
+
+int
+tf_tbl_sram_free(struct tf *tfp __rte_unused,
+ struct tf_tbl_free_parms *parms)
+{
+ int rc;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ struct tbl_rm_db *tbl_db;
+ void *tbl_db_ptr = NULL;
+ struct tf_tbl_sram_get_info_parms iparms = { 0 };
+ struct tf_sram_mgr_free_parms fparms = { 0 };
+ struct tf_sram_mgr_is_allocated_parms aparms = { 0 };
+ bool allocated = false;
+ void *sram_handle = NULL;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ /* Retrieve the session information */
+ rc = tf_session_get(tfp, &tfs, &dev);
+ if (rc)
+ return rc;
+
+ rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to get em_ext_db from session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+ tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
+
+ rc = tf_session_get_sram_db(tfp, &sram_handle);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to get sram_handle from session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ iparms.rm_db = tbl_db->tbl_db[parms->dir];
+ iparms.dir = parms->dir;
+ iparms.tbl_type = parms->type;
+
+ rc = tf_tbl_sram_get_info(&iparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to get table info:%s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type));
+ return rc;
+ }
+
+#if (DBG_SRAM == 1)
+ {
+ struct tf_sram_mgr_dump_parms dparms;
+
+ printf("%s: %s: %s\n", tf_dir_2_str(parms->dir),
+ tf_sram_slice_2_str(iparms.slice_size),
+ tf_sram_bank_2_str(iparms.bank_id));
+
+ dparms.dir = parms->dir;
+ dparms.bank_id = iparms.bank_id;
+ dparms.slice_size = iparms.slice_size;
+
+ rc = tf_sram_mgr_dump(sram_handle, &dparms);
+ }
+#endif
+
+ aparms.sram_offset = parms->idx;
+ aparms.slice_size = iparms.slice_size;
+ aparms.bank_id = iparms.bank_id;
+ aparms.dir = parms->dir;
+ aparms.is_allocated = &allocated;
+
+ rc = tf_sram_mgr_is_allocated(sram_handle, &aparms);
+ if (rc || !allocated) {
+ TFP_DRV_LOG(ERR,
+ "%s: Free of invalid entry:%s idx(%d):(%s)\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type),
+ parms->idx,
+ strerror(-rc));
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ fparms.rm_db = tbl_db->tbl_db[parms->dir];
+ fparms.tbl_type = parms->type;
+ fparms.sram_offset = parms->idx;
+ fparms.slice_size = iparms.slice_size;
+ fparms.bank_id = iparms.bank_id;
+ fparms.dir = parms->dir;
+#if (STATS_CLEAR_ON_READ_SUPPORT == 0)
+ fparms.tfp = tfp;
+#endif
+ rc = tf_sram_mgr_free(sram_handle, &fparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to free entry:%s idx(%d)\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type),
+ parms->idx);
+ return rc;
+ }
+
+
+#if (DBG_SRAM == 1)
+ {
+ struct tf_sram_mgr_dump_parms dparms;
+
+ printf("%s: %s: %s\n", tf_dir_2_str(parms->dir),
+ tf_sram_slice_2_str(iparms.slice_size),
+ tf_sram_bank_2_str(iparms.bank_id));
+
+ dparms.dir = parms->dir;
+ dparms.bank_id = iparms.bank_id;
+ dparms.slice_size = iparms.slice_size;
+
+ rc = tf_sram_mgr_dump(sram_handle, &dparms);
+ }
+#endif
+ return rc;
+}
+
+int
+tf_tbl_sram_set(struct tf *tfp,
+ struct tf_tbl_set_parms *parms)
+{
+ int rc;
+ bool allocated = 0;
+ uint16_t hcapi_type;
+ struct tf_rm_get_hcapi_parms hparms = { 0 };
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ struct tbl_rm_db *tbl_db;
+ void *tbl_db_ptr = NULL;
+ struct tf_tbl_sram_get_info_parms iparms = { 0 };
+ struct tf_sram_mgr_is_allocated_parms aparms = { 0 };
+ void *sram_handle = NULL;
+
+
+ TF_CHECK_PARMS3(tfp, parms, parms->data);
+
+ /* Retrieve the session information */
+ rc = tf_session_get(tfp, &tfs, &dev);
+ if (rc)
+ return rc;
+
+ rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to get em_ext_db from session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+ tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
+
+ rc = tf_session_get_sram_db(tfp, &sram_handle);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to get sram_handle from session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ iparms.rm_db = tbl_db->tbl_db[parms->dir];
+ iparms.dir = parms->dir;
+ iparms.tbl_type = parms->type;
+
+ rc = tf_tbl_sram_get_info(&iparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to get table info:%s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type));
+ return rc;
+ }
+
+ aparms.sram_offset = parms->idx;
+ aparms.slice_size = iparms.slice_size;
+ aparms.bank_id = iparms.bank_id;
+ aparms.dir = parms->dir;
+ aparms.is_allocated = &allocated;
+ rc = tf_sram_mgr_is_allocated(sram_handle, &aparms);
+ if (rc || !allocated) {
+ TFP_DRV_LOG(ERR,
+ "%s: Entry not allocated:%s idx(%d):(%s)\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type),
+ parms->idx,
+ strerror(-rc));
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ /* Set the entry */
+ hparms.rm_db = tbl_db->tbl_db[parms->dir];
+ hparms.subtype = parms->type;
+ hparms.hcapi_type = &hcapi_type;
+ rc = tf_rm_get_hcapi_type(&hparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s, Failed type lookup, type:%s, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type),
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tf_msg_set_tbl_entry(tfp,
+ parms->dir,
+ hcapi_type,
+ parms->data_sz_in_bytes,
+ parms->data,
+ parms->idx);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s, Set failed, type:%s, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type),
+ strerror(-rc));
+ return rc;
+ }
+ return rc;
+}
+
+int
+tf_tbl_sram_get(struct tf *tfp,
+ struct tf_tbl_get_parms *parms)
+{
+ int rc;
+ uint16_t hcapi_type;
+ bool allocated = 0;
+ struct tf_rm_get_hcapi_parms hparms = { 0 };
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ struct tbl_rm_db *tbl_db;
+ void *tbl_db_ptr = NULL;
+ struct tf_tbl_sram_get_info_parms iparms = { 0 };
+ struct tf_sram_mgr_is_allocated_parms aparms = { 0 };
+ void *sram_handle = NULL;
+
+ TF_CHECK_PARMS3(tfp, parms, parms->data);
+
+ /* Retrieve the session information */
+ rc = tf_session_get(tfp, &tfs, &dev);
+ if (rc)
+ return rc;
+
+ rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to get em_ext_db from session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+ tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
+
+ rc = tf_session_get_sram_db(tfp, &sram_handle);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to get sram_handle from session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ iparms.rm_db = tbl_db->tbl_db[parms->dir];
+ iparms.dir = parms->dir;
+ iparms.tbl_type = parms->type;
+
+ rc = tf_tbl_sram_get_info(&iparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to get table info:%s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type));
+ return rc;
+ }
+
+ aparms.sram_offset = parms->idx;
+ aparms.slice_size = iparms.slice_size;
+ aparms.bank_id = iparms.bank_id;
+ aparms.dir = parms->dir;
+ aparms.is_allocated = &allocated;
+
+ rc = tf_sram_mgr_is_allocated(sram_handle, &aparms);
+ if (rc || !allocated) {
+ TFP_DRV_LOG(ERR,
+ "%s: Entry not allocated:%s idx(%d):(%s)\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type),
+ parms->idx,
+ strerror(-rc));
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ /* Get the entry */
+ hparms.rm_db = tbl_db->tbl_db[parms->dir];
+ hparms.subtype = parms->type;
+ hparms.hcapi_type = &hcapi_type;
+ rc = tf_rm_get_hcapi_type(&hparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s, Failed type lookup, type:%s, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Get the entry */
+ rc = tf_msg_get_tbl_entry(tfp,
+ parms->dir,
+ hcapi_type,
+ parms->data_sz_in_bytes,
+ parms->data,
+ parms->idx);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s, Get failed, type:%s, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type),
+ strerror(-rc));
+ return rc;
+ }
+ return rc;
+}
+
+int
+tf_tbl_sram_bulk_get(struct tf *tfp,
+ struct tf_tbl_get_bulk_parms *parms)
+{
+ int rc;
+ uint16_t hcapi_type;
+ struct tf_rm_get_hcapi_parms hparms = { 0 };
+ struct tf_tbl_sram_get_info_parms iparms = { 0 };
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ struct tbl_rm_db *tbl_db;
+ void *tbl_db_ptr = NULL;
+ uint16_t idx;
+ struct tf_sram_mgr_is_allocated_parms aparms = { 0 };
+ bool allocated = false;
+ void *sram_handle = NULL;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ /* Retrieve the session information */
+ rc = tf_session_get(tfp, &tfs, &dev);
+ if (rc)
+ return rc;
+
+ rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to get em_ext_db from session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+ tbl_db = (struct tbl_rm_db *)tbl_db_ptr;
+
+ rc = tf_session_get_sram_db(tfp, &sram_handle);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Failed to get sram_handle from session, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ iparms.rm_db = tbl_db->tbl_db[parms->dir];
+ iparms.dir = parms->dir;
+ iparms.tbl_type = parms->type;
+
+ rc = tf_tbl_sram_get_info(&iparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to get table info:%s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type));
+ return rc;
+ }
+
+ /* Validate the start offset and the end offset is allocated
+ * This API is only used for statistics. 8 Byte entry allocation
+ * is used to verify
+ */
+ aparms.sram_offset = parms->starting_idx;
+ aparms.slice_size = iparms.slice_size;
+ aparms.bank_id = iparms.bank_id;
+ aparms.dir = parms->dir;
+ aparms.is_allocated = &allocated;
+ rc = tf_sram_mgr_is_allocated(sram_handle, &aparms);
+ if (rc || !allocated) {
+ TFP_DRV_LOG(ERR,
+ "%s: Entry not allocated:%s starting_idx(%d):(%s)\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type),
+ parms->starting_idx,
+ strerror(-rc));
+ rc = -ENOMEM;
+ return rc;
+ }
+ idx = parms->starting_idx + parms->num_entries - 1;
+ aparms.sram_offset = idx;
+ rc = tf_sram_mgr_is_allocated(sram_handle, &aparms);
+ if (rc || !allocated) {
+ TFP_DRV_LOG(ERR,
+ "%s: Entry not allocated:%s last_idx(%d):(%s)\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type),
+ idx,
+ strerror(-rc));
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ hparms.rm_db = tbl_db->tbl_db[parms->dir];
+ hparms.subtype = parms->type;
+ hparms.hcapi_type = &hcapi_type;
+ rc = tf_rm_get_hcapi_type(&hparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s, Failed type lookup, type:%s, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Get the entries */
+ rc = tf_msg_bulk_get_tbl_entry(tfp,
+ parms->dir,
+ hcapi_type,
+ parms->starting_idx,
+ parms->num_entries,
+ parms->entry_sz_in_bytes,
+ parms->physical_mem_addr);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s, Bulk get failed, type:%s, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type),
+ strerror(-rc));
+ }
+ return rc;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef TF_TBL_SRAM_H_
+#define TF_TBL_SRAM_H_
+
+#include "tf_core.h"
+#include "stack.h"
+
+
+/**
+ * The SRAM Table module provides processing of managed SRAM types.
+ */
+
+
+/**
+ * @page tblsram SRAM Table
+ *
+ * @ref tf_tbl_sram_bind
+ *
+ * @ref tf_tbl_sram_unbind
+ *
+ * @ref tf_tbl_sram_alloc
+ *
+ * @ref tf_tbl_sram_free
+ *
+ * @ref tf_tbl_sram_set
+ *
+ * @ref tf_tbl_sram_get
+ *
+ * @ref tf_tbl_sram_bulk_get
+ */
+
+/**
+ * Initializes the Table module with the requested DBs. Must be
+ * invoked as the first thing before any of the access functions.
+ *
+ * [in] tfp
+ * Pointer to TF handle, used for HCAPI communication
+ *
+ * [in] parms
+ * Pointer to Table configuration parameters
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+int tf_tbl_sram_bind(struct tf *tfp);
+
+/**
+ * Cleans up the private DBs and releases all the data.
+ *
+ * [in] tfp
+ * Pointer to TF handle, used for HCAPI communication
+ *
+ * [in] parms
+ * Pointer to parameters
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+int tf_tbl_sram_unbind(struct tf *tfp);
+
+/**
+ * Allocates the requested table type from the internal RM DB.
+ *
+ * [in] tfp
+ * Pointer to TF handle, used for HCAPI communication
+ *
+ * [in] parms
+ * Pointer to Table allocation parameters
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+int tf_tbl_sram_alloc(struct tf *tfp,
+ struct tf_tbl_alloc_parms *parms);
+
+/**
+ * Free's the requested table type and returns it to the DB. If shadow
+ * DB is enabled its searched first and if found the element refcount
+ * is decremented. If refcount goes to 0 then its returned to the
+ * table type DB.
+ *
+ * [in] tfp
+ * Pointer to TF handle, used for HCAPI communication
+ *
+ * [in] parms
+ * Pointer to Table free parameters
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+int tf_tbl_sram_free(struct tf *tfp,
+ struct tf_tbl_free_parms *parms);
+
+
+/**
+ * Configures the requested element by sending a firmware request which
+ * then installs it into the device internal structures.
+ *
+ * [in] tfp
+ * Pointer to TF handle, used for HCAPI communication
+ *
+ * [in] parms
+ * Pointer to Table set parameters
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+int tf_tbl_sram_set(struct tf *tfp,
+ struct tf_tbl_set_parms *parms);
+
+/**
+ * Retrieves the requested element by sending a firmware request to get
+ * the element.
+ *
+ * [in] tfp
+ * Pointer to TF handle, used for HCAPI communication
+ *
+ * [in] parms
+ * Pointer to Table get parameters
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+int tf_tbl_sram_get(struct tf *tfp,
+ struct tf_tbl_get_parms *parms);
+
+/**
+ * Retrieves bulk block of elements by sending a firmware request to
+ * get the elements.
+ *
+ * [in] tfp
+ * Pointer to TF handle, used for HCAPI communication
+ *
+ * [in] parms
+ * Pointer to Table get bulk parameters
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+int tf_tbl_sram_bulk_get(struct tf *tfp,
+ struct tf_tbl_get_bulk_parms *parms);
+
+#endif /* TF_TBL_SRAM_H */
struct tf_shadow_tcam_free_db_parms fshadow;
struct tf_shadow_tcam_cfg_parms shadow_cfg;
struct tf_shadow_tcam_create_db_parms shadow_cdb;
- uint16_t num_slices = 1;
+ uint16_t num_slices = parms->wc_num_slices;
struct tf_session *tfs;
struct tf_dev_info *dev;
struct tcam_rm_db *tcam_db;
if (rc)
return rc;
- if (dev->ops->tf_dev_get_tcam_slice_info == NULL) {
+ if (dev->ops->tf_dev_set_tcam_slice_info == NULL) {
rc = -EOPNOTSUPP;
TFP_DRV_LOG(ERR,
"Operation not supported, rc:%s\n",
return rc;
}
- rc = dev->ops->tf_dev_get_tcam_slice_info(tfp,
- TF_TCAM_TBL_TYPE_WC_TCAM,
- 0,
- &num_slices);
+ rc = dev->ops->tf_dev_set_tcam_slice_info(tfp,
+ num_slices);
if (rc)
return rc;
* The TCAM module provides processing of Internal TCAM types.
*/
+/* Number of slices per row for WC TCAM */
+extern uint16_t g_wc_num_slices_per_row;
+
/**
* TCAM configuration parameters
*/
* Session resource allocations
*/
struct tf_session_resources *resources;
+ /**
+ * WC number of slices per row.
+ */
+ enum tf_wc_num_slice wc_num_slices;
};
/**
if (rc)
return rc;
- rc = tf_tcam_shared_get_slices(tfp,
- dev,
- &num_slices);
- if (rc)
- return rc;
-
- if (num_slices > 1) {
- TFP_DRV_LOG(ERR,
- "Only single slice supported\n");
- return -EOPNOTSUPP;
- }
-
tf_tcam_shared_create_db(&tcam_shared_wc);
tf_session_set_tcam_shared_db(tfp, (void *)tcam_shared_wc);
}
+
+ rc = tf_tcam_shared_get_slices(tfp,
+ dev,
+ &num_slices);
+ if (rc)
+ return rc;
+
+ if (num_slices > 1) {
+ TFP_DRV_LOG(ERR,
+ "Only single slice supported\n");
+ return -EOPNOTSUPP;
+ }
}
done:
return rc;
sparms.idx = dphy_idx;
sparms.key = gparms.key;
sparms.mask = gparms.mask;
- sparms.key_size = gparms.key_size;
+ sparms.key_size = key_sz_bytes;
sparms.result = gparms.result;
- sparms.result_size = gparms.result_size;
+ sparms.result_size = remap_sz_bytes;
rc = tf_msg_tcam_entry_set(tfp, dev, &sparms);
if (rc) {
switch (tbl_type) {
case TF_TBL_TYPE_FULL_ACT_RECORD:
return "Full Action record";
+ case TF_TBL_TYPE_COMPACT_ACT_RECORD:
+ return "Compact Action record";
case TF_TBL_TYPE_MCAST_GROUPS:
return "Multicast Groups";
case TF_TBL_TYPE_ACT_ENCAP_8B:
return "Stats 64B";
case TF_TBL_TYPE_ACT_MODIFY_IPV4:
return "Modify IPv4";
+ case TF_TBL_TYPE_ACT_MODIFY_8B:
+ return "Modify 8B";
+ case TF_TBL_TYPE_ACT_MODIFY_16B:
+ return "Modify 16B";
+ case TF_TBL_TYPE_ACT_MODIFY_32B:
+ return "Modify 32B";
+ case TF_TBL_TYPE_ACT_MODIFY_64B:
+ return "Modify 64B";
case TF_TBL_TYPE_METER_PROF:
return "Meter Profile";
case TF_TBL_TYPE_METER_INST:
size_t copy_nbytes;
uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
int32_t rc = 0;
+ uint8_t app_id;
/* only perform this if shared session is enabled. */
if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
if (rc)
return rc;
+ rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
+ return -EINVAL;
+ }
+
rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
if (rc) {
BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
parms.shadow_copy = true;
parms.bp = bp;
+ if (app_id == 0 || app_id == 3)
+ parms.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;
+ else
+ parms.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;
/*
* Open the session here, but the collect the resources during the
struct tf_open_session_parms params;
struct tf_session_resources *resources;
uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
+ uint8_t app_id;
memset(¶ms, 0, sizeof(params));
params.shadow_copy = true;
+ rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
+ return -EINVAL;
+ }
+
rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
if (rc) {
BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
return rc;
params.bp = bp;
+ if (app_id == 0 || app_id == 3)
+ params.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;
+ else
+ params.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;
+
rc = tf_open_session(&bp->tfp, ¶ms);
if (rc) {
BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",