X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Ftf_core%2Ftf_rm.c;h=19de6e4c631f07af24895e6bc3b9cb04eb4ac8be;hb=539931eab3a5f81adaaac583e792ac1e4237db20;hp=56767e73278c70a7c466813224050e3c67550aaa;hpb=f8b6392ad769670fc1f9219db1f2cb8e4c83a44c;p=dpdk.git diff --git a/drivers/net/bnxt/tf_core/tf_rm.c b/drivers/net/bnxt/tf_core/tf_rm.c index 56767e7327..19de6e4c63 100644 --- a/drivers/net/bnxt/tf_core/tf_rm.c +++ b/drivers/net/bnxt/tf_core/tf_rm.c @@ -1,1731 +1,981 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2019-2020 Broadcom + * Copyright(c) 2019-2021 Broadcom * All rights reserved. */ #include #include +#include + +#include #include "tf_rm.h" -#include "tf_core.h" +#include "tf_common.h" +#include "tf_util.h" #include "tf_session.h" -#include "tf_resources.h" +#include "tf_device.h" +#include "tfp.h" #include "tf_msg.h" -#include "bnxt.h" + +/* Logging defines */ +#define TF_RM_DEBUG 0 /** - * Internal macro to perform HW resource allocation check between what - * firmware reports vs what was statically requested. - * - * Parameters: - * struct tf_rm_hw_query *hquery - Pointer to the hw query result - * enum tf_dir dir - Direction to process - * enum tf_resource_type_hw hcapi_type - HCAPI type, the index element - * in the hw query structure - * define def_value - Define value to check against - * uint32_t *eflag - Result of the check + * Generic RM Element data type that an RM DB is build upon. */ -#define TF_RM_CHECK_HW_ALLOC(hquery, dir, hcapi_type, def_value, eflag) do { \ - if ((dir) == TF_DIR_RX) { \ - if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _RX) \ - *(eflag) |= 1 << (hcapi_type); \ - } else { \ - if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _TX) \ - *(eflag) |= 1 << (hcapi_type); \ - } \ -} while (0) +struct tf_rm_element { + /** + * RM Element configuration type. If Private then the + * hcapi_type can be ignored. If Null then the element is not + * valid for the device. + */ + enum tf_rm_elem_cfg_type cfg_type; + + /** + * HCAPI RM Type for the element. + */ + uint16_t hcapi_type; + + /** + * HCAPI RM allocated range information for the element. + */ + struct tf_rm_alloc_info alloc; + + /** + * Bit allocator pool for the element. Pool size is controlled + * by the struct tf_session_resources at time of session creation. + * Null indicates that the element is not used for the device. + */ + struct bitalloc *pool; +}; /** - * Internal macro to perform HW resource allocation check between what - * firmware reports vs what was statically requested. - * - * Parameters: - * struct tf_rm_sram_query *squery - Pointer to the sram query result - * enum tf_dir dir - Direction to process - * enum tf_resource_type_sram hcapi_type - HCAPI type, the index element - * in the hw query structure - * define def_value - Define value to check against - * uint32_t *eflag - Result of the check + * TF RM DB definition */ -#define TF_RM_CHECK_SRAM_ALLOC(squery, dir, hcapi_type, def_value, eflag) do { \ - if ((dir) == TF_DIR_RX) { \ - if ((squery)->sram_query[(hcapi_type)].max != def_value ## _RX)\ - *(eflag) |= 1 << (hcapi_type); \ - } else { \ - if ((squery)->sram_query[(hcapi_type)].max != def_value ## _TX)\ - *(eflag) |= 1 << (hcapi_type); \ - } \ -} while (0) +struct tf_rm_new_db { + /** + * Number of elements in the DB + */ + uint16_t num_entries; + + /** + * Direction this DB controls. + */ + enum tf_dir dir; + + /** + * Module type, used for logging purposes. + */ + enum tf_device_module_type type; + + /** + * The DB consists of an array of elements + */ + struct tf_rm_element *db; +}; /** - * Internal macro to convert a reserved resource define name to be - * direction specific. + * Adjust an index according to the allocation information. + * + * All resources are controlled in a 0 based pool. Some resources, by + * design, are not 0 based, i.e. Full Action Records (SRAM) thus they + * need to be adjusted before they are handed out. + * + * [in] cfg + * Pointer to the DB configuration * - * Parameters: - * enum tf_dir dir - Direction to process - * string type - Type name to append RX or TX to - * string dtype - Direction specific type + * [in] reservations + * Pointer to the allocation values associated with the module * + * [in] count + * Number of DB configuration elements * + * [out] valid_count + * Number of HCAPI entries with a reservation value greater than 0 + * + * Returns: + * 0 - Success + * - EOPNOTSUPP - Operation not supported */ -#define TF_RESC_RSVD(dir, type, dtype) do { \ - if ((dir) == TF_DIR_RX) \ - (dtype) = type ## _RX; \ - else \ - (dtype) = type ## _TX; \ - } while (0) - -const char -*tf_dir_2_str(enum tf_dir dir) +static void +tf_rm_count_hcapi_reservations(enum tf_dir dir, + enum tf_device_module_type type, + struct tf_rm_element_cfg *cfg, + uint16_t *reservations, + uint16_t count, + uint16_t *valid_count) { - switch (dir) { - case TF_DIR_RX: - return "RX"; - case TF_DIR_TX: - return "TX"; - default: - return "Invalid direction"; + int i; + uint16_t cnt = 0; + + for (i = 0; i < count; i++) { + if ((cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI || + cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) && + reservations[i] > 0) + cnt++; + + /* Only log msg if a type is attempted reserved and + * not supported. We ignore EM module as its using a + * split configuration array thus it would fail for + * this type of check. + */ + if (type != TF_DEVICE_MODULE_TYPE_EM && + cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL && + reservations[i] > 0) { + TFP_DRV_LOG(ERR, + "%s, %s, %s allocation of %d not supported\n", + tf_device_module_type_2_str(type), + tf_dir_2_str(dir), + tf_device_module_type_subtype_2_str(type, i), + reservations[i]); + } } -} -const char -*tf_ident_2_str(enum tf_identifier_type id_type) -{ - switch (id_type) { - case TF_IDENT_TYPE_L2_CTXT: - return "l2_ctxt_remap"; - case TF_IDENT_TYPE_PROF_FUNC: - return "prof_func"; - case TF_IDENT_TYPE_WC_PROF: - return "wc_prof"; - case TF_IDENT_TYPE_EM_PROF: - return "em_prof"; - case TF_IDENT_TYPE_L2_FUNC: - return "l2_func"; - default: - break; - } - return "Invalid identifier"; + *valid_count = cnt; } -const char -*tf_hcapi_sram_2_str(enum tf_resource_type_sram sram_type) -{ - switch (sram_type) { - case TF_RESC_TYPE_SRAM_FULL_ACTION: - return "Full action"; - case TF_RESC_TYPE_SRAM_MCG: - return "MCG"; - case TF_RESC_TYPE_SRAM_ENCAP_8B: - return "Encap 8B"; - case TF_RESC_TYPE_SRAM_ENCAP_16B: - return "Encap 16B"; - case TF_RESC_TYPE_SRAM_ENCAP_64B: - return "Encap 64B"; - case TF_RESC_TYPE_SRAM_SP_SMAC: - return "Source properties SMAC"; - case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4: - return "Source properties SMAC IPv4"; - case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6: - return "Source properties IPv6"; - case TF_RESC_TYPE_SRAM_COUNTER_64B: - return "Counter 64B"; - case TF_RESC_TYPE_SRAM_NAT_SPORT: - return "NAT source port"; - case TF_RESC_TYPE_SRAM_NAT_DPORT: - return "NAT destination port"; - case TF_RESC_TYPE_SRAM_NAT_S_IPV4: - return "NAT source IPv4"; - case TF_RESC_TYPE_SRAM_NAT_D_IPV4: - return "NAT destination IPv4"; - default: - return "Invalid identifier"; - } -} +/** + * Resource Manager Adjust of base index definitions. + */ +enum tf_rm_adjust_type { + TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */ + TF_RM_ADJUST_RM_BASE /**< Removes base from the index */ +}; /** - * Helper function to perform a SRAM HCAPI resource type lookup - * against the reserved value of the same static type. + * Adjust an index according to the allocation information. + * + * All resources are controlled in a 0 based pool. Some resources, by + * design, are not 0 based, i.e. Full Action Records (SRAM) thus they + * need to be adjusted before they are handed out. + * + * [in] db + * Pointer to the db, used for the lookup + * + * [in] action + * Adjust action + * + * [in] db_index + * DB index for the element type + * + * [in] index + * Index to convert + * + * [out] adj_index + * Adjusted index * * Returns: - * -EOPNOTSUPP - Reserved resource type not supported - * Value - Integer value of the reserved value for the requested type + * 0 - Success + * - EOPNOTSUPP - Operation not supported */ static int -tf_rm_rsvd_sram_value(enum tf_dir dir, enum tf_resource_type_sram index) +tf_rm_adjust_index(struct tf_rm_element *db, + enum tf_rm_adjust_type action, + uint32_t db_index, + uint32_t index, + uint32_t *adj_index) { - uint32_t value = -EOPNOTSUPP; + int rc = 0; + uint32_t base_index; - switch (index) { - case TF_RESC_TYPE_SRAM_FULL_ACTION: - TF_RESC_RSVD(dir, TF_RSVD_SRAM_FULL_ACTION, value); - break; - case TF_RESC_TYPE_SRAM_MCG: - TF_RESC_RSVD(dir, TF_RSVD_SRAM_MCG, value); - break; - case TF_RESC_TYPE_SRAM_ENCAP_8B: - TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_8B, value); - break; - case TF_RESC_TYPE_SRAM_ENCAP_16B: - TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_16B, value); - break; - case TF_RESC_TYPE_SRAM_ENCAP_64B: - TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_64B, value); - break; - case TF_RESC_TYPE_SRAM_SP_SMAC: - TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC, value); - break; - case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4: - TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV4, value); - break; - case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6: - TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV6, value); - break; - case TF_RESC_TYPE_SRAM_COUNTER_64B: - TF_RESC_RSVD(dir, TF_RSVD_SRAM_COUNTER_64B, value); - break; - case TF_RESC_TYPE_SRAM_NAT_SPORT: - TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_SPORT, value); - break; - case TF_RESC_TYPE_SRAM_NAT_DPORT: - TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_DPORT, value); - break; - case TF_RESC_TYPE_SRAM_NAT_S_IPV4: - TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_S_IPV4, value); + base_index = db[db_index].alloc.entry.start; + + switch (action) { + case TF_RM_ADJUST_RM_BASE: + *adj_index = index - base_index; break; - case TF_RESC_TYPE_SRAM_NAT_D_IPV4: - TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_D_IPV4, value); + case TF_RM_ADJUST_ADD_BASE: + *adj_index = index + base_index; break; default: - break; + return -EOPNOTSUPP; } - return value; + return rc; } /** - * Helper function to print all the SRAM resource qcaps errors - * reported in the error_flag. + * Logs an array of found residual entries to the console. * * [in] dir * Receive or transmit direction * - * [in] error_flag - * Pointer to the sram error flags created at time of the query check + * [in] type + * Type of Device Module + * + * [in] count + * Number of entries in the residual array + * + * [in] residuals + * Pointer to an array of residual entries. Array is index same as + * the DB in which this function is used. Each entry holds residual + * value for that entry. */ static void -tf_rm_print_sram_qcaps_error(enum tf_dir dir, - struct tf_rm_sram_query *sram_query, - uint32_t *error_flag) +tf_rm_log_residuals(enum tf_dir dir, + enum tf_device_module_type type, + uint16_t count, + uint16_t *residuals) { int i; - PMD_DRV_LOG(ERR, "QCAPS errors SRAM\n"); - PMD_DRV_LOG(ERR, " Direction: %s\n", tf_dir_2_str(dir)); - PMD_DRV_LOG(ERR, " Elements:\n"); - - for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) { - if (*error_flag & 1 << i) - PMD_DRV_LOG(ERR, " %s, %d elem available, req:%d\n", - tf_hcapi_sram_2_str(i), - sram_query->sram_query[i].max, - tf_rm_rsvd_sram_value(dir, i)); + /* Walk the residual array and log the types that wasn't + * cleaned up to the console. + */ + for (i = 0; i < count; i++) { + if (residuals[i] != 0) + TFP_DRV_LOG(ERR, + "%s, %s was not cleaned up, %d outstanding\n", + tf_dir_2_str(dir), + tf_device_module_type_subtype_2_str(type, i), + residuals[i]); } } /** - * Performs a HW resource check between what firmware capability - * reports and what the core expects is available. + * Performs a check of the passed in DB for any lingering elements. If + * a resource type was found to not have been cleaned up by the caller + * then its residual values are recorded, logged and passed back in an + * allocate reservation array that the caller can pass to the FW for + * cleanup. * - * Firmware performs the resource carving at AFM init time and the - * resource capability is reported in the TruFlow qcaps msg. + * [in] db + * Pointer to the db, used for the lookup * - * [in] query - * Pointer to HW Query data structure. Query holds what the firmware - * offers of the HW resources. + * [out] resv_size + * Pointer to the reservation size of the generated reservation + * array. * - * [in] dir - * Receive or transmit direction + * [in/out] resv + * Pointer Pointer to a reservation array. The reservation array is + * allocated after the residual scan and holds any found residual + * entries. Thus it can be smaller than the DB that the check was + * performed on. Array must be freed by the caller. * - * [in/out] error_flag - * Pointer to a bit array indicating the error of a single HCAPI - * resource type. When a bit is set to 1, the HCAPI resource type - * failed static allocation. + * [out] residuals_present + * Pointer to a bool flag indicating if residual was present in the + * DB * * Returns: - * 0 - Success - * -ENOMEM - Failure on one of the allocated resources. Check the - * error_flag for what types are flagged errored. + * 0 - Success + * - EOPNOTSUPP - Operation not supported */ static int -tf_rm_check_hw_qcaps_static(struct tf_rm_hw_query *query, - enum tf_dir dir, - uint32_t *error_flag) +tf_rm_check_residuals(struct tf_rm_new_db *rm_db, + uint16_t *resv_size, + struct tf_rm_resc_entry **resv, + bool *residuals_present) { - *error_flag = 0; - TF_RM_CHECK_HW_ALLOC(query, - dir, - TF_RESC_TYPE_HW_RANGE_ENTRY, - TF_RSVD_RANGE_ENTRY, - error_flag); - - if (*error_flag != 0) - return -ENOMEM; - - return 0; -} - -/** - * Performs a SRAM resource check between what firmware capability - * reports and what the core expects is available. - * - * Firmware performs the resource carving at AFM init time and the - * resource capability is reported in the TruFlow qcaps msg. - * - * [in] query - * Pointer to SRAM Query data structure. Query holds what the - * firmware offers of the SRAM resources. - * - * [in] dir - * Receive or transmit direction - * - * [in/out] error_flag - * Pointer to a bit array indicating the error of a single HCAPI - * resource type. When a bit is set to 1, the HCAPI resource type - * failed static allocation. - * - * Returns: - * 0 - Success - * -ENOMEM - Failure on one of the allocated resources. Check the - * error_flag for what types are flagged errored. - */ -static int -tf_rm_check_sram_qcaps_static(struct tf_rm_sram_query *query, - enum tf_dir dir, - uint32_t *error_flag) -{ - *error_flag = 0; - - TF_RM_CHECK_SRAM_ALLOC(query, - dir, - TF_RESC_TYPE_SRAM_FULL_ACTION, - TF_RSVD_SRAM_FULL_ACTION, - error_flag); - - TF_RM_CHECK_SRAM_ALLOC(query, - dir, - TF_RESC_TYPE_SRAM_MCG, - TF_RSVD_SRAM_MCG, - error_flag); - - TF_RM_CHECK_SRAM_ALLOC(query, - dir, - TF_RESC_TYPE_SRAM_ENCAP_8B, - TF_RSVD_SRAM_ENCAP_8B, - error_flag); - - TF_RM_CHECK_SRAM_ALLOC(query, - dir, - TF_RESC_TYPE_SRAM_ENCAP_16B, - TF_RSVD_SRAM_ENCAP_16B, - error_flag); - - TF_RM_CHECK_SRAM_ALLOC(query, - dir, - TF_RESC_TYPE_SRAM_ENCAP_64B, - TF_RSVD_SRAM_ENCAP_64B, - error_flag); - - TF_RM_CHECK_SRAM_ALLOC(query, - dir, - TF_RESC_TYPE_SRAM_SP_SMAC, - TF_RSVD_SRAM_SP_SMAC, - error_flag); - - TF_RM_CHECK_SRAM_ALLOC(query, - dir, - TF_RESC_TYPE_SRAM_SP_SMAC_IPV4, - TF_RSVD_SRAM_SP_SMAC_IPV4, - error_flag); - - TF_RM_CHECK_SRAM_ALLOC(query, - dir, - TF_RESC_TYPE_SRAM_SP_SMAC_IPV6, - TF_RSVD_SRAM_SP_SMAC_IPV6, - error_flag); - - TF_RM_CHECK_SRAM_ALLOC(query, - dir, - TF_RESC_TYPE_SRAM_COUNTER_64B, - TF_RSVD_SRAM_COUNTER_64B, - error_flag); - - TF_RM_CHECK_SRAM_ALLOC(query, - dir, - TF_RESC_TYPE_SRAM_NAT_SPORT, - TF_RSVD_SRAM_NAT_SPORT, - error_flag); - - TF_RM_CHECK_SRAM_ALLOC(query, - dir, - TF_RESC_TYPE_SRAM_NAT_DPORT, - TF_RSVD_SRAM_NAT_DPORT, - error_flag); - - TF_RM_CHECK_SRAM_ALLOC(query, - dir, - TF_RESC_TYPE_SRAM_NAT_S_IPV4, - TF_RSVD_SRAM_NAT_S_IPV4, - error_flag); - - TF_RM_CHECK_SRAM_ALLOC(query, - dir, - TF_RESC_TYPE_SRAM_NAT_D_IPV4, - TF_RSVD_SRAM_NAT_D_IPV4, - error_flag); - - if (*error_flag != 0) - return -ENOMEM; - - return 0; -} - -/** - * Internal function to mark pool entries used. - */ -static void -tf_rm_reserve_range(uint32_t count, - uint32_t rsv_begin, - uint32_t rsv_end, - uint32_t max, - struct bitalloc *pool) -{ - uint32_t i; + int rc; + int i; + int f; + uint16_t count; + uint16_t found; + uint16_t *residuals = NULL; + uint16_t hcapi_type; + struct tf_rm_get_inuse_count_parms iparms; + struct tf_rm_get_alloc_info_parms aparms; + struct tf_rm_get_hcapi_parms hparms; + struct tf_rm_alloc_info info; + struct tfp_calloc_parms cparms; + struct tf_rm_resc_entry *local_resv = NULL; + + /* Create array to hold the entries that have residuals */ + cparms.nitems = rm_db->num_entries; + cparms.size = sizeof(uint16_t); + cparms.alignment = 0; + rc = tfp_calloc(&cparms); + if (rc) + return rc; - /* If no resources has been requested we mark everything - * 'used' - */ - if (count == 0) { - for (i = 0; i < max; i++) - ba_alloc_index(pool, i); - } else { - /* Support 2 main modes - * Reserved range starts from bottom up (with - * pre-reserved value or not) - * - begin = 0 to end xx - * - begin = 1 to end xx - * - * Reserved range starts from top down - * - begin = yy to end max - */ + residuals = (uint16_t *)cparms.mem_va; + + /* Traverse the DB and collect any residual elements */ + iparms.rm_db = rm_db; + iparms.count = &count; + for (i = 0, found = 0; i < rm_db->num_entries; i++) { + iparms.db_index = i; + rc = tf_rm_get_inuse_count(&iparms); + /* Not a device supported entry, just skip */ + if (rc == -ENOTSUP) + continue; + if (rc) + goto cleanup_residuals; - /* Bottom up check, start from 0 */ - if (rsv_begin == 0) { - for (i = rsv_end + 1; i < max; i++) - ba_alloc_index(pool, i); + if (count) { + found++; + residuals[i] = count; + *residuals_present = true; } + } - /* Bottom up check, start from 1 or higher OR - * Top Down + if (*residuals_present) { + /* Populate a reduced resv array with only the entries + * that have residuals. */ - if (rsv_begin >= 1) { - /* Allocate from 0 until start */ - for (i = 0; i < rsv_begin; i++) - ba_alloc_index(pool, i); - - /* Skip and then do the remaining */ - if (rsv_end < max - 1) { - for (i = rsv_end; i < max; i++) - ba_alloc_index(pool, i); - } + cparms.nitems = found; + cparms.size = sizeof(struct tf_rm_resc_entry); + cparms.alignment = 0; + rc = tfp_calloc(&cparms); + if (rc) + return rc; + + local_resv = (struct tf_rm_resc_entry *)cparms.mem_va; + + aparms.rm_db = rm_db; + hparms.rm_db = rm_db; + hparms.hcapi_type = &hcapi_type; + for (i = 0, f = 0; i < rm_db->num_entries; i++) { + if (residuals[i] == 0) + continue; + aparms.db_index = i; + aparms.info = &info; + rc = tf_rm_get_info(&aparms); + if (rc) + goto cleanup_all; + + hparms.db_index = i; + rc = tf_rm_get_hcapi_type(&hparms); + if (rc) + goto cleanup_all; + + local_resv[f].type = hcapi_type; + local_resv[f].start = info.entry.start; + local_resv[f].stride = info.entry.stride; + f++; } + *resv_size = found; } -} -/** - * Internal function to mark all the l2 ctxt allocated that Truflow - * does not own. - */ -static void -tf_rm_rsvd_l2_ctxt(struct tf_session *tfs) -{ - uint32_t index = TF_RESC_TYPE_HW_L2_CTXT_TCAM; - uint32_t end = 0; - - /* l2 ctxt rx direction */ - if (tfs->resc.rx.hw_entry[index].stride > 0) - end = tfs->resc.rx.hw_entry[index].start + - tfs->resc.rx.hw_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, - tfs->resc.rx.hw_entry[index].start, - end, - TF_NUM_L2_CTXT_TCAM, - tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX); - - /* l2 ctxt tx direction */ - if (tfs->resc.tx.hw_entry[index].stride > 0) - end = tfs->resc.tx.hw_entry[index].start + - tfs->resc.tx.hw_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, - tfs->resc.tx.hw_entry[index].start, - end, - TF_NUM_L2_CTXT_TCAM, - tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX); -} + tf_rm_log_residuals(rm_db->dir, + rm_db->type, + rm_db->num_entries, + residuals); -/** - * Internal function to mark all the l2 func resources allocated that - * Truflow does not own. - */ -static void -tf_rm_rsvd_l2_func(struct tf_session *tfs) -{ - uint32_t index = TF_RESC_TYPE_HW_L2_FUNC; - uint32_t end = 0; - - /* l2 func rx direction */ - if (tfs->resc.rx.hw_entry[index].stride > 0) - end = tfs->resc.rx.hw_entry[index].start + - tfs->resc.rx.hw_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride, - tfs->resc.rx.hw_entry[index].start, - end, - TF_NUM_L2_FUNC, - tfs->TF_L2_FUNC_POOL_NAME_RX); - - /* l2 func tx direction */ - if (tfs->resc.tx.hw_entry[index].stride > 0) - end = tfs->resc.tx.hw_entry[index].start + - tfs->resc.tx.hw_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride, - tfs->resc.tx.hw_entry[index].start, - end, - TF_NUM_L2_FUNC, - tfs->TF_L2_FUNC_POOL_NAME_TX); -} + tfp_free((void *)residuals); + *resv = local_resv; -/** - * Internal function to mark all the full action resources allocated - * that Truflow does not own. - */ -static void -tf_rm_rsvd_sram_full_action(struct tf_session *tfs) -{ - uint32_t index = TF_RESC_TYPE_SRAM_FULL_ACTION; - uint16_t end = 0; - - /* full action rx direction */ - if (tfs->resc.rx.sram_entry[index].stride > 0) - end = tfs->resc.rx.sram_entry[index].start + - tfs->resc.rx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, - TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_RX, - end, - TF_RSVD_SRAM_FULL_ACTION_RX, - tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX); - - /* full action tx direction */ - if (tfs->resc.tx.sram_entry[index].stride > 0) - end = tfs->resc.tx.sram_entry[index].start + - tfs->resc.tx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, - TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_TX, - end, - TF_RSVD_SRAM_FULL_ACTION_TX, - tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX); -} - -/** - * Internal function to mark all the multicast group resources - * allocated that Truflow does not own. - */ -static void -tf_rm_rsvd_sram_mcg(struct tf_session *tfs) -{ - uint32_t index = TF_RESC_TYPE_SRAM_MCG; - uint16_t end = 0; - - /* multicast group rx direction */ - if (tfs->resc.rx.sram_entry[index].stride > 0) - end = tfs->resc.rx.sram_entry[index].start + - tfs->resc.rx.sram_entry[index].stride - 1; + return 0; - tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, - TF_RSVD_SRAM_MCG_BEGIN_IDX_RX, - end, - TF_RSVD_SRAM_MCG_RX, - tfs->TF_SRAM_MCG_POOL_NAME_RX); + cleanup_all: + tfp_free((void *)local_resv); + *resv = NULL; + cleanup_residuals: + tfp_free((void *)residuals); - /* Multicast Group on TX is not supported */ + return rc; } -/** - * Internal function to mark all the encap resources allocated that - * Truflow does not own. - */ -static void -tf_rm_rsvd_sram_encap(struct tf_session *tfs) +int +tf_rm_create_db(struct tf *tfp, + struct tf_rm_create_db_parms *parms) { - uint32_t index = TF_RESC_TYPE_SRAM_ENCAP_8B; - uint16_t end = 0; - - /* encap 8b rx direction */ - if (tfs->resc.rx.sram_entry[index].stride > 0) - end = tfs->resc.rx.sram_entry[index].start + - tfs->resc.rx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, - TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_RX, - end, - TF_RSVD_SRAM_ENCAP_8B_RX, - tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX); - - /* encap 8b tx direction */ - if (tfs->resc.tx.sram_entry[index].stride > 0) - end = tfs->resc.tx.sram_entry[index].start + - tfs->resc.tx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, - TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_TX, - end, - TF_RSVD_SRAM_ENCAP_8B_TX, - tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX); - - index = TF_RESC_TYPE_SRAM_ENCAP_16B; - - /* encap 16b rx direction */ - if (tfs->resc.rx.sram_entry[index].stride > 0) - end = tfs->resc.rx.sram_entry[index].start + - tfs->resc.rx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, - TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_RX, - end, - TF_RSVD_SRAM_ENCAP_16B_RX, - tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX); - - /* encap 16b tx direction */ - if (tfs->resc.tx.sram_entry[index].stride > 0) - end = tfs->resc.tx.sram_entry[index].start + - tfs->resc.tx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, - TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_TX, - end, - TF_RSVD_SRAM_ENCAP_16B_TX, - tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX); - - index = TF_RESC_TYPE_SRAM_ENCAP_64B; - - /* Encap 64B not supported on RX */ - - /* Encap 64b tx direction */ - if (tfs->resc.tx.sram_entry[index].stride > 0) - end = tfs->resc.tx.sram_entry[index].start + - tfs->resc.tx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, - TF_RSVD_SRAM_ENCAP_64B_BEGIN_IDX_TX, - end, - TF_RSVD_SRAM_ENCAP_64B_TX, - tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX); -} + int rc; + int i; + int j; + struct tf_session *tfs; + struct tf_dev_info *dev; + uint16_t max_types; + struct tfp_calloc_parms cparms; + struct tf_rm_resc_req_entry *query; + enum tf_rm_resc_resv_strategy resv_strategy; + struct tf_rm_resc_req_entry *req; + struct tf_rm_resc_entry *resv; + struct tf_rm_new_db *rm_db; + struct tf_rm_element *db; + uint32_t pool_size; + uint16_t hcapi_items; + + TF_CHECK_PARMS2(tfp, parms); + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; -/** - * Internal function to mark all the sp resources allocated that - * Truflow does not own. - */ -static void -tf_rm_rsvd_sram_sp(struct tf_session *tfs) -{ - uint32_t index = TF_RESC_TYPE_SRAM_SP_SMAC; - uint16_t end = 0; - - /* sp smac rx direction */ - if (tfs->resc.rx.sram_entry[index].stride > 0) - end = tfs->resc.rx.sram_entry[index].start + - tfs->resc.rx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, - TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_RX, - end, - TF_RSVD_SRAM_SP_SMAC_RX, - tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX); - - /* sp smac tx direction */ - if (tfs->resc.tx.sram_entry[index].stride > 0) - end = tfs->resc.tx.sram_entry[index].start + - tfs->resc.tx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, - TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_TX, - end, - TF_RSVD_SRAM_SP_SMAC_TX, - tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX); - - index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4; - - /* SP SMAC IPv4 not supported on RX */ - - /* sp smac ipv4 tx direction */ - if (tfs->resc.tx.sram_entry[index].stride > 0) - end = tfs->resc.tx.sram_entry[index].start + - tfs->resc.tx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, - TF_RSVD_SRAM_SP_SMAC_IPV4_BEGIN_IDX_TX, - end, - TF_RSVD_SRAM_SP_SMAC_IPV4_TX, - tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX); - - index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6; - - /* SP SMAC IPv6 not supported on RX */ - - /* sp smac ipv6 tx direction */ - if (tfs->resc.tx.sram_entry[index].stride > 0) - end = tfs->resc.tx.sram_entry[index].start + - tfs->resc.tx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, - TF_RSVD_SRAM_SP_SMAC_IPV6_BEGIN_IDX_TX, - end, - TF_RSVD_SRAM_SP_SMAC_IPV6_TX, - tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX); -} + /* Retrieve device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; -/** - * Internal function to mark all the stat resources allocated that - * Truflow does not own. - */ -static void -tf_rm_rsvd_sram_stats(struct tf_session *tfs) -{ - uint32_t index = TF_RESC_TYPE_SRAM_COUNTER_64B; - uint16_t end = 0; - - /* counter 64b rx direction */ - if (tfs->resc.rx.sram_entry[index].stride > 0) - end = tfs->resc.rx.sram_entry[index].start + - tfs->resc.rx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, - TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_RX, - end, - TF_RSVD_SRAM_COUNTER_64B_RX, - tfs->TF_SRAM_STATS_64B_POOL_NAME_RX); - - /* counter 64b tx direction */ - if (tfs->resc.tx.sram_entry[index].stride > 0) - end = tfs->resc.tx.sram_entry[index].start + - tfs->resc.tx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, - TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_TX, - end, - TF_RSVD_SRAM_COUNTER_64B_TX, - tfs->TF_SRAM_STATS_64B_POOL_NAME_TX); -} + /* Need device max number of elements for the RM QCAPS */ + rc = dev->ops->tf_dev_get_max_types(tfp, &max_types); + if (rc) + return rc; -/** - * Internal function to mark all the nat resources allocated that - * Truflow does not own. - */ -static void -tf_rm_rsvd_sram_nat(struct tf_session *tfs) -{ - uint32_t index = TF_RESC_TYPE_SRAM_NAT_SPORT; - uint16_t end = 0; - - /* nat source port rx direction */ - if (tfs->resc.rx.sram_entry[index].stride > 0) - end = tfs->resc.rx.sram_entry[index].start + - tfs->resc.rx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, - TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_RX, - end, - TF_RSVD_SRAM_NAT_SPORT_RX, - tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX); - - /* nat source port tx direction */ - if (tfs->resc.tx.sram_entry[index].stride > 0) - end = tfs->resc.tx.sram_entry[index].start + - tfs->resc.tx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, - TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_TX, - end, - TF_RSVD_SRAM_NAT_SPORT_TX, - tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX); - - index = TF_RESC_TYPE_SRAM_NAT_DPORT; - - /* nat destination port rx direction */ - if (tfs->resc.rx.sram_entry[index].stride > 0) - end = tfs->resc.rx.sram_entry[index].start + - tfs->resc.rx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, - TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_RX, - end, - TF_RSVD_SRAM_NAT_DPORT_RX, - tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX); - - /* nat destination port tx direction */ - if (tfs->resc.tx.sram_entry[index].stride > 0) - end = tfs->resc.tx.sram_entry[index].start + - tfs->resc.tx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, - TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_TX, - end, - TF_RSVD_SRAM_NAT_DPORT_TX, - tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX); - - index = TF_RESC_TYPE_SRAM_NAT_S_IPV4; - - /* nat source port ipv4 rx direction */ - if (tfs->resc.rx.sram_entry[index].stride > 0) - end = tfs->resc.rx.sram_entry[index].start + - tfs->resc.rx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, - TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_RX, - end, - TF_RSVD_SRAM_NAT_S_IPV4_RX, - tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX); - - /* nat source ipv4 port tx direction */ - if (tfs->resc.tx.sram_entry[index].stride > 0) - end = tfs->resc.tx.sram_entry[index].start + - tfs->resc.tx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, - TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_TX, - end, - TF_RSVD_SRAM_NAT_S_IPV4_TX, - tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX); - - index = TF_RESC_TYPE_SRAM_NAT_D_IPV4; - - /* nat destination port ipv4 rx direction */ - if (tfs->resc.rx.sram_entry[index].stride > 0) - end = tfs->resc.rx.sram_entry[index].start + - tfs->resc.rx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride, - TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_RX, - end, - TF_RSVD_SRAM_NAT_D_IPV4_RX, - tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX); - - /* nat destination ipv4 port tx direction */ - if (tfs->resc.tx.sram_entry[index].stride > 0) - end = tfs->resc.tx.sram_entry[index].start + - tfs->resc.tx.sram_entry[index].stride - 1; - - tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride, - TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_TX, - end, - TF_RSVD_SRAM_NAT_D_IPV4_TX, - tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX); -} + cparms.nitems = max_types; + cparms.size = sizeof(struct tf_rm_resc_req_entry); + cparms.alignment = 0; + rc = tfp_calloc(&cparms); + if (rc) + return rc; -/** - * Internal function used to validate the HW allocated resources - * against the requested values. - */ -static int -tf_rm_hw_alloc_validate(enum tf_dir dir, - struct tf_rm_hw_alloc *hw_alloc, - struct tf_rm_entry *hw_entry) -{ - int error = 0; - int i; + query = (struct tf_rm_resc_req_entry *)cparms.mem_va; - for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) { - if (hw_entry[i].stride != hw_alloc->hw_num[i]) { - PMD_DRV_LOG(ERR, - "%s, Alloc failed id:%d expect:%d got:%d\n", - tf_dir_2_str(dir), - i, - hw_alloc->hw_num[i], - hw_entry[i].stride); - error = -1; - } - } + /* Get Firmware Capabilities */ + rc = tf_msg_session_resc_qcaps(tfp, + dev, + parms->dir, + max_types, + query, + &resv_strategy); + if (rc) + return rc; - return error; -} + /* Process capabilities against DB requirements. However, as a + * DB can hold elements that are not HCAPI we can reduce the + * req msg content by removing those out of the request yet + * the DB holds them all as to give a fast lookup. We can also + * remove entries where there are no request for elements. + */ + tf_rm_count_hcapi_reservations(parms->dir, + parms->type, + parms->cfg, + parms->alloc_cnt, + parms->num_elements, + &hcapi_items); + + /* Handle the case where a DB create request really ends up + * being empty. Unsupported (if not rare) case but possible + * that no resources are necessary for a 'direction'. + */ + if (hcapi_items == 0) { + TFP_DRV_LOG(ERR, + "%s: DB create request for Zero elements, DB Type:%s\n", + tf_dir_2_str(parms->dir), + tf_device_module_type_2_str(parms->type)); -/** - * Internal function used to validate the SRAM allocated resources - * against the requested values. - */ -static int -tf_rm_sram_alloc_validate(enum tf_dir dir __rte_unused, - struct tf_rm_sram_alloc *sram_alloc, - struct tf_rm_entry *sram_entry) -{ - int error = 0; - int i; + parms->rm_db = NULL; + return -ENOMEM; + } - for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) { - if (sram_entry[i].stride != sram_alloc->sram_num[i]) { - PMD_DRV_LOG(ERR, - "%s, Alloc failed idx:%d expect:%d got:%d\n", - tf_dir_2_str(dir), - i, - sram_alloc->sram_num[i], - sram_entry[i].stride); - error = -1; + /* Alloc request, alignment already set */ + cparms.nitems = (size_t)hcapi_items; + cparms.size = sizeof(struct tf_rm_resc_req_entry); + rc = tfp_calloc(&cparms); + if (rc) + return rc; + req = (struct tf_rm_resc_req_entry *)cparms.mem_va; + + /* Alloc reservation, alignment and nitems already set */ + cparms.size = sizeof(struct tf_rm_resc_entry); + rc = tfp_calloc(&cparms); + if (rc) + return rc; + resv = (struct tf_rm_resc_entry *)cparms.mem_va; + + /* Build the request */ + for (i = 0, j = 0; i < parms->num_elements; i++) { + /* Skip any non HCAPI cfg elements */ + if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI || + parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) { + /* Only perform reservation for entries that + * has been requested + */ + if (parms->alloc_cnt[i] == 0) + continue; + + /* Verify that we can get the full amount + * allocated per the qcaps availability. + */ + if (parms->alloc_cnt[i] <= + query[parms->cfg[i].hcapi_type].max) { + req[j].type = parms->cfg[i].hcapi_type; + req[j].min = parms->alloc_cnt[i]; + req[j].max = parms->alloc_cnt[i]; + j++; + } else { + const char *type_str; + uint16_t hcapi_type = parms->cfg[i].hcapi_type; + + dev->ops->tf_dev_get_resource_str(tfp, + hcapi_type, + &type_str); + TFP_DRV_LOG(ERR, + "%s: Resource failure, type:%d:%s\n", + tf_dir_2_str(parms->dir), + hcapi_type, type_str); + TFP_DRV_LOG(ERR, + "req:%d, avail:%d\n", + parms->alloc_cnt[i], + query[hcapi_type].max); + return -EINVAL; + } } } - return error; -} - -/** - * Internal function used to mark all the HW resources allocated that - * Truflow does not own. - */ -static void -tf_rm_reserve_hw(struct tf *tfp) -{ - struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + rc = tf_msg_session_resc_alloc(tfp, + dev, + parms->dir, + hcapi_items, + req, + resv); + if (rc) + return rc; - /* TBD - * There is no direct AFM resource allocation as it is carved - * statically at AFM boot time. Thus the bit allocators work - * on the full HW resource amount and we just mark everything - * used except the resources that Truflow took ownership off. - */ - tf_rm_rsvd_l2_ctxt(tfs); - tf_rm_rsvd_l2_func(tfs); -} + /* Build the RM DB per the request */ + cparms.nitems = 1; + cparms.size = sizeof(struct tf_rm_new_db); + rc = tfp_calloc(&cparms); + if (rc) + return rc; + rm_db = (void *)cparms.mem_va; -/** - * Internal function used to mark all the SRAM resources allocated - * that Truflow does not own. - */ -static void -tf_rm_reserve_sram(struct tf *tfp) -{ - struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); + /* Build the DB within RM DB */ + cparms.nitems = parms->num_elements; + cparms.size = sizeof(struct tf_rm_element); + rc = tfp_calloc(&cparms); + if (rc) + return rc; + rm_db->db = (struct tf_rm_element *)cparms.mem_va; - /* TBD - * There is no direct AFM resource allocation as it is carved - * statically at AFM boot time. Thus the bit allocators work - * on the full HW resource amount and we just mark everything - * used except the resources that Truflow took ownership off. - */ - tf_rm_rsvd_sram_full_action(tfs); - tf_rm_rsvd_sram_mcg(tfs); - tf_rm_rsvd_sram_encap(tfs); - tf_rm_rsvd_sram_sp(tfs); - tf_rm_rsvd_sram_stats(tfs); - tf_rm_rsvd_sram_nat(tfs); -} + db = rm_db->db; + for (i = 0, j = 0; i < parms->num_elements; i++) { + db[i].cfg_type = parms->cfg[i].cfg_type; + db[i].hcapi_type = parms->cfg[i].hcapi_type; -/** - * Internal function used to allocate and validate all HW resources. - */ -static int -tf_rm_allocate_validate_hw(struct tf *tfp, - enum tf_dir dir) -{ - int rc; - int i; - struct tf_rm_hw_query hw_query; - struct tf_rm_hw_alloc hw_alloc; - struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); - struct tf_rm_entry *hw_entries; - uint32_t error_flag; - - if (dir == TF_DIR_RX) - hw_entries = tfs->resc.rx.hw_entry; - else - hw_entries = tfs->resc.tx.hw_entry; + /* Skip any non HCAPI types as we didn't include them + * in the reservation request. + */ + if (parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI && + parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI_BA) + continue; - /* Query for Session HW Resources */ - rc = tf_msg_session_hw_resc_qcaps(tfp, dir, &hw_query); - if (rc) { - /* Log error */ - PMD_DRV_LOG(ERR, - "%s, HW qcaps message send failed\n", - tf_dir_2_str(dir)); - goto cleanup; - } + /* If the element didn't request an allocation no need + * to create a pool nor verify if we got a reservation. + */ + if (parms->alloc_cnt[i] == 0) + continue; - rc = tf_rm_check_hw_qcaps_static(&hw_query, dir, &error_flag); - if (rc) { - /* Log error */ - PMD_DRV_LOG(ERR, - "%s, HW QCAPS validation failed, error_flag:0x%x\n", - tf_dir_2_str(dir), - error_flag); - goto cleanup; + /* If the element had requested an allocation and that + * allocation was a success (full amount) then + * allocate the pool. + */ + if (parms->alloc_cnt[i] == resv[j].stride) { + db[i].alloc.entry.start = resv[j].start; + db[i].alloc.entry.stride = resv[j].stride; + + /* Only allocate BA pool if so requested */ + if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) { + /* Create pool */ + pool_size = (BITALLOC_SIZEOF(resv[j].stride) / + sizeof(struct bitalloc)); + /* Alloc request, alignment already set */ + cparms.nitems = pool_size; + cparms.size = sizeof(struct bitalloc); + rc = tfp_calloc(&cparms); + if (rc) { + TFP_DRV_LOG(ERR, + "%s: Pool alloc failed, type:%d\n", + tf_dir_2_str(parms->dir), + db[i].cfg_type); + goto fail; + } + db[i].pool = (struct bitalloc *)cparms.mem_va; + + rc = ba_init(db[i].pool, resv[j].stride); + if (rc) { + TFP_DRV_LOG(ERR, + "%s: Pool init failed, type:%d\n", + tf_dir_2_str(parms->dir), + db[i].cfg_type); + goto fail; + } + } + j++; + } else { + /* Bail out as we want what we requested for + * all elements, not any less. + */ + TFP_DRV_LOG(ERR, + "%s: Alloc failed, type:%d\n", + tf_dir_2_str(parms->dir), + db[i].cfg_type); + TFP_DRV_LOG(ERR, + "req:%d, alloc:%d\n", + parms->alloc_cnt[i], + resv[j].stride); + goto fail; + } } - /* Post process HW capability */ - for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) - hw_alloc.hw_num[i] = hw_query.hw_query[i].max; + rm_db->num_entries = parms->num_elements; + rm_db->dir = parms->dir; + rm_db->type = parms->type; + *parms->rm_db = (void *)rm_db; - /* Allocate Session HW Resources */ - rc = tf_msg_session_hw_resc_alloc(tfp, dir, &hw_alloc, hw_entries); - if (rc) { - /* Log error */ - PMD_DRV_LOG(ERR, - "%s, HW alloc message send failed\n", - tf_dir_2_str(dir)); - goto cleanup; - } - - /* Perform HW allocation validation as its possible the - * resource availability changed between qcaps and alloc - */ - rc = tf_rm_hw_alloc_validate(dir, &hw_alloc, hw_entries); - if (rc) { - /* Log error */ - PMD_DRV_LOG(ERR, - "%s, HW Resource validation failed\n", - tf_dir_2_str(dir)); - goto cleanup; - } + tfp_free((void *)req); + tfp_free((void *)resv); return 0; - cleanup: - return -1; + fail: + tfp_free((void *)req); + tfp_free((void *)resv); + tfp_free((void *)db->pool); + tfp_free((void *)db); + tfp_free((void *)rm_db); + parms->rm_db = NULL; + + return -EINVAL; } -/** - * Internal function used to allocate and validate all SRAM resources. - * - * [in] tfp - * Pointer to TF handle - * - * [in] dir - * Receive or transmit direction - * - * Returns: - * 0 - Success - * -1 - Internal error - */ -static int -tf_rm_allocate_validate_sram(struct tf *tfp, - enum tf_dir dir) +int +tf_rm_free_db(struct tf *tfp, + struct tf_rm_free_db_parms *parms) { int rc; int i; - struct tf_rm_sram_query sram_query; - struct tf_rm_sram_alloc sram_alloc; - struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data); - struct tf_rm_entry *sram_entries; - uint32_t error_flag; - - if (dir == TF_DIR_RX) - sram_entries = tfs->resc.rx.sram_entry; - else - sram_entries = tfs->resc.tx.sram_entry; + uint16_t resv_size = 0; + struct tf_rm_new_db *rm_db; + struct tf_rm_resc_entry *resv; + bool residuals_found = false; - /* Query for Session SRAM Resources */ - rc = tf_msg_session_sram_resc_qcaps(tfp, dir, &sram_query); - if (rc) { - /* Log error */ - PMD_DRV_LOG(ERR, - "%s, SRAM qcaps message send failed\n", - tf_dir_2_str(dir)); - goto cleanup; - } + TF_CHECK_PARMS2(parms, parms->rm_db); - rc = tf_rm_check_sram_qcaps_static(&sram_query, dir, &error_flag); - if (rc) { - /* Log error */ - PMD_DRV_LOG(ERR, - "%s, SRAM QCAPS validation failed, error_flag:%x\n", - tf_dir_2_str(dir), - error_flag); - tf_rm_print_sram_qcaps_error(dir, &sram_query, &error_flag); - goto cleanup; - } + /* Device unbind happens when the TF Session is closed and the + * session ref count is 0. Device unbind will cleanup each of + * its support modules, i.e. Identifier, thus we're ending up + * here to close the DB. + * + * On TF Session close it is assumed that the session has already + * cleaned up all its resources, individually, while + * destroying its flows. + * + * To assist in the 'cleanup checking' the DB is checked for any + * remaining elements and logged if found to be the case. + * + * Any such elements will need to be 'cleared' ahead of + * returning the resources to the HCAPI RM. + * + * RM will signal FW to flush the DB resources. FW will + * perform the invalidation. TF Session close will return the + * previous allocated elements to the RM and then close the + * HCAPI RM registration. That then saves several 'free' msgs + * from being required. + */ - /* Post process SRAM capability */ - for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) - sram_alloc.sram_num[i] = sram_query.sram_query[i].max; + rm_db = (struct tf_rm_new_db *)parms->rm_db; - /* Allocate Session SRAM Resources */ - rc = tf_msg_session_sram_resc_alloc(tfp, - dir, - &sram_alloc, - sram_entries); - if (rc) { - /* Log error */ - PMD_DRV_LOG(ERR, - "%s, SRAM alloc message send failed\n", - tf_dir_2_str(dir)); - goto cleanup; - } + /* Check for residuals that the client didn't clean up */ + rc = tf_rm_check_residuals(rm_db, + &resv_size, + &resv, + &residuals_found); + if (rc) + return rc; - /* Perform SRAM allocation validation as its possible the - * resource availability changed between qcaps and alloc + /* Invalidate any residuals followed by a DB traversal for + * pool cleanup. */ - rc = tf_rm_sram_alloc_validate(dir, &sram_alloc, sram_entries); - if (rc) { - /* Log error */ - PMD_DRV_LOG(ERR, - "%s, SRAM Resource allocation validation failed\n", - tf_dir_2_str(dir)); - goto cleanup; + if (residuals_found) { + rc = tf_msg_session_resc_flush(tfp, + parms->dir, + resv_size, + resv); + tfp_free((void *)resv); + /* On failure we still have to cleanup so we can only + * log that FW failed. + */ + if (rc) + TFP_DRV_LOG(ERR, + "%s: Internal Flush error, module:%s\n", + tf_dir_2_str(parms->dir), + tf_device_module_type_2_str(rm_db->type)); } - return 0; + /* No need to check for configuration type, even if we do not + * have a BA pool we just delete on a null ptr, no harm + */ + for (i = 0; i < rm_db->num_entries; i++) + tfp_free((void *)rm_db->db[i].pool); - cleanup: - return -1; + tfp_free((void *)parms->rm_db); + + return rc; } -/** - * Helper function used to prune a SRAM resource array to only hold - * elements that needs to be flushed. - * - * [in] tfs - * Session handle - * - * [in] dir - * Receive or transmit direction - * - * [in] hw_entries - * Master SRAM Resource data base - * - * [in/out] flush_entries - * Pruned SRAM Resource database of entries to be flushed. This - * array should be passed in as a complete copy of the master SRAM - * Resource database. The outgoing result will be a pruned version - * based on the result of the requested checking - * - * Returns: - * 0 - Success, no flush required - * 1 - Success, flush required - * -1 - Internal error - */ -static int -tf_rm_sram_to_flush(struct tf_session *tfs, - enum tf_dir dir, - struct tf_rm_entry *sram_entries, - struct tf_rm_entry *flush_entries) +int +tf_rm_allocate(struct tf_rm_allocate_parms *parms) { int rc; - int flush_rc = 0; - int free_cnt; - struct bitalloc *pool; - - /* Check all the sram resource pools and check for left over - * elements. Any found will result in the complete pool of a - * type to get invalidated. - */ - - TF_RM_GET_POOLS(tfs, dir, &pool, - TF_SRAM_FULL_ACTION_POOL_NAME, - rc); - if (rc) + int id; + uint32_t index; + struct tf_rm_new_db *rm_db; + enum tf_rm_elem_cfg_type cfg_type; + + TF_CHECK_PARMS2(parms, parms->rm_db); + + rm_db = (struct tf_rm_new_db *)parms->rm_db; + if (!rm_db->db) + return -EINVAL; + cfg_type = rm_db->db[parms->db_index].cfg_type; + + /* Bail out if not controlled by RM */ + if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA) + return -ENOTSUP; + + /* Bail out if the pool is not valid, should never happen */ + if (rm_db->db[parms->db_index].pool == NULL) { + rc = -ENOTSUP; + TFP_DRV_LOG(ERR, + "%s: Invalid pool for this type:%d, rc:%s\n", + tf_dir_2_str(rm_db->dir), + parms->db_index, + strerror(-rc)); return rc; - free_cnt = ba_free_count(pool); - if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride) { - flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride = 0; - } else { - flush_rc = 1; } - /* Only pools for RX direction */ - if (dir == TF_DIR_RX) { - TF_RM_GET_POOLS_RX(tfs, &pool, - TF_SRAM_MCG_POOL_NAME); - if (rc) - return rc; - free_cnt = ba_free_count(pool); - if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_MCG].stride) { - flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0; - } else { - flush_rc = 1; - } - } else { - /* Always prune TX direction */ - flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0; - } - - TF_RM_GET_POOLS(tfs, dir, &pool, - TF_SRAM_ENCAP_8B_POOL_NAME, - rc); - if (rc) - return rc; - free_cnt = ba_free_count(pool); - if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride) { - flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride = 0; - } else { - flush_rc = 1; - } - - TF_RM_GET_POOLS(tfs, dir, &pool, - TF_SRAM_ENCAP_16B_POOL_NAME, - rc); - if (rc) + /* + * priority 0: allocate from top of the tcam i.e. high + * priority !0: allocate index from bottom i.e lowest + */ + if (parms->priority) + id = ba_alloc_reverse(rm_db->db[parms->db_index].pool); + else + id = ba_alloc(rm_db->db[parms->db_index].pool); + if (id == BA_FAIL) { + rc = -ENOMEM; + TFP_DRV_LOG(ERR, + "%s: Allocation failed, rc:%s\n", + tf_dir_2_str(rm_db->dir), + strerror(-rc)); return rc; - free_cnt = ba_free_count(pool); - if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride) { - flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride = 0; - } else { - flush_rc = 1; } - /* Only pools for TX direction */ - if (dir == TF_DIR_TX) { - TF_RM_GET_POOLS_TX(tfs, &pool, - TF_SRAM_ENCAP_64B_POOL_NAME); - if (rc) - return rc; - free_cnt = ba_free_count(pool); - if (free_cnt == - sram_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride) { - flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0; - } else { - flush_rc = 1; - } - } else { - /* Always prune RX direction */ - flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0; - } - - TF_RM_GET_POOLS(tfs, dir, &pool, - TF_SRAM_SP_SMAC_POOL_NAME, - rc); - if (rc) - return rc; - free_cnt = ba_free_count(pool); - if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride) { - flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride = 0; - } else { - flush_rc = 1; + /* Adjust for any non zero start value */ + rc = tf_rm_adjust_index(rm_db->db, + TF_RM_ADJUST_ADD_BASE, + parms->db_index, + id, + &index); + if (rc) { + TFP_DRV_LOG(ERR, + "%s: Alloc adjust of base index failed, rc:%s\n", + tf_dir_2_str(rm_db->dir), + strerror(-rc)); + return -EINVAL; } - /* Only pools for TX direction */ - if (dir == TF_DIR_TX) { - TF_RM_GET_POOLS_TX(tfs, &pool, - TF_SRAM_SP_SMAC_IPV4_POOL_NAME); - if (rc) - return rc; - free_cnt = ba_free_count(pool); - if (free_cnt == - sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride) { - flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride = - 0; - } else { - flush_rc = 1; - } - } else { - /* Always prune RX direction */ - flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride = 0; - } + *parms->index = index; + if (parms->base_index) + *parms->base_index = id; - /* Only pools for TX direction */ - if (dir == TF_DIR_TX) { - TF_RM_GET_POOLS_TX(tfs, &pool, - TF_SRAM_SP_SMAC_IPV6_POOL_NAME); - if (rc) - return rc; - free_cnt = ba_free_count(pool); - if (free_cnt == - sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride) { - flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride = - 0; - } else { - flush_rc = 1; - } - } else { - /* Always prune RX direction */ - flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride = 0; - } + return rc; +} - TF_RM_GET_POOLS(tfs, dir, &pool, - TF_SRAM_STATS_64B_POOL_NAME, - rc); - if (rc) +int +tf_rm_free(struct tf_rm_free_parms *parms) +{ + int rc; + uint32_t adj_index; + struct tf_rm_new_db *rm_db; + enum tf_rm_elem_cfg_type cfg_type; + + TF_CHECK_PARMS2(parms, parms->rm_db); + + rm_db = (struct tf_rm_new_db *)parms->rm_db; + if (!rm_db->db) + return -EINVAL; + cfg_type = rm_db->db[parms->db_index].cfg_type; + + /* Bail out if not controlled by RM */ + if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA) + return -ENOTSUP; + + /* Bail out if the pool is not valid, should never happen */ + if (rm_db->db[parms->db_index].pool == NULL) { + rc = -ENOTSUP; + TFP_DRV_LOG(ERR, + "%s: Invalid pool for this type:%d, rc:%s\n", + tf_dir_2_str(rm_db->dir), + parms->db_index, + strerror(-rc)); return rc; - free_cnt = ba_free_count(pool); - if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride) { - flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride = 0; - } else { - flush_rc = 1; } - TF_RM_GET_POOLS(tfs, dir, &pool, - TF_SRAM_NAT_SPORT_POOL_NAME, - rc); + /* Adjust for any non zero start value */ + rc = tf_rm_adjust_index(rm_db->db, + TF_RM_ADJUST_RM_BASE, + parms->db_index, + parms->index, + &adj_index); if (rc) return rc; - free_cnt = ba_free_count(pool); - if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride) { - flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride = 0; - } else { - flush_rc = 1; - } - TF_RM_GET_POOLS(tfs, dir, &pool, - TF_SRAM_NAT_DPORT_POOL_NAME, - rc); + rc = ba_free(rm_db->db[parms->db_index].pool, adj_index); + /* No logging direction matters and that is not available here */ if (rc) return rc; - free_cnt = ba_free_count(pool); - if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride) { - flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride = 0; - } else { - flush_rc = 1; - } - TF_RM_GET_POOLS(tfs, dir, &pool, - TF_SRAM_NAT_S_IPV4_POOL_NAME, - rc); - if (rc) + return rc; +} + +int +tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms) +{ + int rc; + uint32_t adj_index; + struct tf_rm_new_db *rm_db; + enum tf_rm_elem_cfg_type cfg_type; + + TF_CHECK_PARMS2(parms, parms->rm_db); + + rm_db = (struct tf_rm_new_db *)parms->rm_db; + if (!rm_db->db) + return -EINVAL; + cfg_type = rm_db->db[parms->db_index].cfg_type; + + /* Bail out if not controlled by RM */ + if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA) + return -ENOTSUP; + + /* Bail out if the pool is not valid, should never happen */ + if (rm_db->db[parms->db_index].pool == NULL) { + rc = -ENOTSUP; + TFP_DRV_LOG(ERR, + "%s: Invalid pool for this type:%d, rc:%s\n", + tf_dir_2_str(rm_db->dir), + parms->db_index, + strerror(-rc)); return rc; - free_cnt = ba_free_count(pool); - if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride) { - flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride = 0; - } else { - flush_rc = 1; } - TF_RM_GET_POOLS(tfs, dir, &pool, - TF_SRAM_NAT_D_IPV4_POOL_NAME, - rc); + /* Adjust for any non zero start value */ + rc = tf_rm_adjust_index(rm_db->db, + TF_RM_ADJUST_RM_BASE, + parms->db_index, + parms->index, + &adj_index); if (rc) return rc; - free_cnt = ba_free_count(pool); - if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride) { - flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].start = 0; - flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride = 0; - } else { - flush_rc = 1; - } - return flush_rc; -} + if (parms->base_index) + *parms->base_index = adj_index; + *parms->allocated = ba_inuse(rm_db->db[parms->db_index].pool, + adj_index); -/** - * Helper function used to generate an error log for the SRAM types - * that needs to be flushed. The types should have been cleaned up - * ahead of invoking tf_close_session. - * - * [in] sram_entries - * SRAM Resource database holding elements to be flushed - */ -static void -tf_rm_log_sram_flush(enum tf_dir dir, - struct tf_rm_entry *sram_entries) -{ - int i; - - /* Walk the sram flush array and log the types that wasn't - * cleaned up. - */ - for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) { - if (sram_entries[i].stride != 0) - PMD_DRV_LOG(ERR, - "%s: %s was not cleaned up\n", - tf_dir_2_str(dir), - tf_hcapi_sram_2_str(i)); - } + return rc; } -void -tf_rm_init(struct tf *tfp __rte_unused) +int +tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms) { - struct tf_session *tfs = - (struct tf_session *)(tfp->session->core_data); - - /* This version is host specific and should be checked against - * when attaching as there is no guarantee that a secondary - * would run from same image version. - */ - tfs->ver.major = TF_SESSION_VER_MAJOR; - tfs->ver.minor = TF_SESSION_VER_MINOR; - tfs->ver.update = TF_SESSION_VER_UPDATE; + struct tf_rm_new_db *rm_db; + enum tf_rm_elem_cfg_type cfg_type; - tfs->session_id.id = 0; - tfs->ref_count = 0; + TF_CHECK_PARMS2(parms, parms->rm_db); - /* Initialization of Table Scopes */ - /* ll_init(&tfs->tbl_scope_ll); */ + rm_db = (struct tf_rm_new_db *)parms->rm_db; + if (!rm_db->db) + return -EINVAL; + cfg_type = rm_db->db[parms->db_index].cfg_type; - /* Initialization of HW and SRAM resource DB */ - memset(&tfs->resc, 0, sizeof(struct tf_rm_db)); + /* Bail out if not controlled by HCAPI */ + if (cfg_type != TF_RM_ELEM_CFG_HCAPI && + cfg_type != TF_RM_ELEM_CFG_HCAPI_BA) + return -ENOTSUP; - /* Initialization of HW Resource Pools */ - ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM); - ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM); + memcpy(parms->info, + &rm_db->db[parms->db_index].alloc, + sizeof(struct tf_rm_alloc_info)); - /* Initialization of SRAM Resource Pools - * These pools are set to the TFLIB defined MAX sizes not - * AFM's HW max as to limit the memory consumption - */ - ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX, - TF_RSVD_SRAM_FULL_ACTION_RX); - ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX, - TF_RSVD_SRAM_FULL_ACTION_TX); - /* Only Multicast Group on RX is supported */ - ba_init(tfs->TF_SRAM_MCG_POOL_NAME_RX, - TF_RSVD_SRAM_MCG_RX); - ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX, - TF_RSVD_SRAM_ENCAP_8B_RX); - ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX, - TF_RSVD_SRAM_ENCAP_8B_TX); - ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX, - TF_RSVD_SRAM_ENCAP_16B_RX); - ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX, - TF_RSVD_SRAM_ENCAP_16B_TX); - /* Only Encap 64B on TX is supported */ - ba_init(tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX, - TF_RSVD_SRAM_ENCAP_64B_TX); - ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX, - TF_RSVD_SRAM_SP_SMAC_RX); - ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX, - TF_RSVD_SRAM_SP_SMAC_TX); - /* Only SP SMAC IPv4 on TX is supported */ - ba_init(tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX, - TF_RSVD_SRAM_SP_SMAC_IPV4_TX); - /* Only SP SMAC IPv6 on TX is supported */ - ba_init(tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX, - TF_RSVD_SRAM_SP_SMAC_IPV6_TX); - ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_RX, - TF_RSVD_SRAM_COUNTER_64B_RX); - ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_TX, - TF_RSVD_SRAM_COUNTER_64B_TX); - ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX, - TF_RSVD_SRAM_NAT_SPORT_RX); - ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX, - TF_RSVD_SRAM_NAT_SPORT_TX); - ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX, - TF_RSVD_SRAM_NAT_DPORT_RX); - ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX, - TF_RSVD_SRAM_NAT_DPORT_TX); - ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX, - TF_RSVD_SRAM_NAT_S_IPV4_RX); - ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX, - TF_RSVD_SRAM_NAT_S_IPV4_TX); - ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX, - TF_RSVD_SRAM_NAT_D_IPV4_RX); - ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX, - TF_RSVD_SRAM_NAT_D_IPV4_TX); - - /* Initialization of pools local to TF Core */ - ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM); - ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM); + return 0; } int -tf_rm_allocate_validate(struct tf *tfp) +tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms) { - int rc; - int i; + struct tf_rm_new_db *rm_db; + enum tf_rm_elem_cfg_type cfg_type; - for (i = 0; i < TF_DIR_MAX; i++) { - rc = tf_rm_allocate_validate_hw(tfp, i); - if (rc) - return rc; - rc = tf_rm_allocate_validate_sram(tfp, i); - if (rc) - return rc; - } + TF_CHECK_PARMS2(parms, parms->rm_db); - /* With both HW and SRAM allocated and validated we can - * 'scrub' the reservation on the pools. - */ - tf_rm_reserve_hw(tfp); - tf_rm_reserve_sram(tfp); + rm_db = (struct tf_rm_new_db *)parms->rm_db; + if (!rm_db->db) + return -EINVAL; + cfg_type = rm_db->db[parms->db_index].cfg_type; - return rc; + /* Bail out if not controlled by HCAPI */ + if (cfg_type != TF_RM_ELEM_CFG_HCAPI && + cfg_type != TF_RM_ELEM_CFG_HCAPI_BA) + return -ENOTSUP; + + *parms->hcapi_type = rm_db->db[parms->db_index].hcapi_type; + + return 0; } int -tf_rm_close(struct tf *tfp) +tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms) { - int rc; - int rc_close = 0; - int i; - struct tf_rm_entry *hw_entries; - struct tf_rm_entry *sram_entries; - struct tf_rm_entry *sram_flush_entries; - struct tf_session *tfs __rte_unused = - (struct tf_session *)(tfp->session->core_data); - - struct tf_rm_db flush_resc = tfs->resc; - - /* On close it is assumed that the session has already cleaned - * up all its resources, individually, while destroying its - * flows. No checking is performed thus the behavior is as - * follows. - * - * Session RM will signal FW to release session resources. FW - * will perform invalidation of all the allocated entries - * (assures any outstanding resources has been cleared, then - * free the FW RM instance. - * - * Session will then be freed by tf_close_session() thus there - * is no need to clean each resource pool as the whole session - * is going away. - */ + int rc = 0; + struct tf_rm_new_db *rm_db; + enum tf_rm_elem_cfg_type cfg_type; - for (i = 0; i < TF_DIR_MAX; i++) { - if (i == TF_DIR_RX) { - hw_entries = tfs->resc.rx.hw_entry; - sram_entries = tfs->resc.rx.sram_entry; - sram_flush_entries = flush_resc.rx.sram_entry; - } else { - hw_entries = tfs->resc.tx.hw_entry; - sram_entries = tfs->resc.tx.sram_entry; - sram_flush_entries = flush_resc.tx.sram_entry; - } + TF_CHECK_PARMS2(parms, parms->rm_db); - /* Check for any not previously freed SRAM resources - * and flush if required. - */ - rc = tf_rm_sram_to_flush(tfs, - i, - sram_entries, - sram_flush_entries); - if (rc) { - rc_close = -ENOTEMPTY; - /* Log error */ - PMD_DRV_LOG(ERR, - "%s, lingering SRAM resources\n", - tf_dir_2_str(i)); - - /* Log the entries to be flushed */ - tf_rm_log_sram_flush(i, sram_flush_entries); - - rc = tf_msg_session_sram_resc_flush(tfp, - i, - sram_flush_entries); - if (rc) { - rc_close = rc; - /* Log error */ - PMD_DRV_LOG(ERR, - "%s, HW flush failed\n", - tf_dir_2_str(i)); - } - } + rm_db = (struct tf_rm_new_db *)parms->rm_db; + if (!rm_db->db) + return -EINVAL; + cfg_type = rm_db->db[parms->db_index].cfg_type; - rc = tf_msg_session_hw_resc_free(tfp, i, hw_entries); - if (rc) { - rc_close = rc; - /* Log error */ - PMD_DRV_LOG(ERR, - "%s, HW free failed\n", - tf_dir_2_str(i)); - } + /* Bail out if not controlled by RM */ + if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA) + return -ENOTSUP; - rc = tf_msg_session_sram_resc_free(tfp, i, sram_entries); - if (rc) { - rc_close = rc; - /* Log error */ - PMD_DRV_LOG(ERR, - "%s, SRAM free failed\n", - tf_dir_2_str(i)); - } + /* Bail silently (no logging), if the pool is not valid there + * was no elements allocated for it. + */ + if (rm_db->db[parms->db_index].pool == NULL) { + *parms->count = 0; + return 0; } - return rc_close; -} - -int -tf_rm_convert_tbl_type(enum tf_tbl_type type, - uint32_t *hcapi_type) -{ - int rc = 0; - - switch (type) { - case TF_TBL_TYPE_FULL_ACT_RECORD: - *hcapi_type = TF_RESC_TYPE_SRAM_FULL_ACTION; - break; - case TF_TBL_TYPE_MCAST_GROUPS: - *hcapi_type = TF_RESC_TYPE_SRAM_MCG; - break; - case TF_TBL_TYPE_ACT_ENCAP_8B: - *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_8B; - break; - case TF_TBL_TYPE_ACT_ENCAP_16B: - *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_16B; - break; - case TF_TBL_TYPE_ACT_ENCAP_64B: - *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_64B; - break; - case TF_TBL_TYPE_ACT_SP_SMAC: - *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC; - break; - case TF_TBL_TYPE_ACT_SP_SMAC_IPV4: - *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4; - break; - case TF_TBL_TYPE_ACT_SP_SMAC_IPV6: - *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6; - break; - case TF_TBL_TYPE_ACT_STATS_64: - *hcapi_type = TF_RESC_TYPE_SRAM_COUNTER_64B; - break; - case TF_TBL_TYPE_ACT_MODIFY_SPORT: - *hcapi_type = TF_RESC_TYPE_SRAM_NAT_SPORT; - break; - case TF_TBL_TYPE_ACT_MODIFY_DPORT: - *hcapi_type = TF_RESC_TYPE_SRAM_NAT_DPORT; - break; - case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC: - *hcapi_type = TF_RESC_TYPE_SRAM_NAT_S_IPV4; - break; - case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST: - *hcapi_type = TF_RESC_TYPE_SRAM_NAT_D_IPV4; - break; - case TF_TBL_TYPE_METER_PROF: - *hcapi_type = TF_RESC_TYPE_HW_METER_PROF; - break; - case TF_TBL_TYPE_METER_INST: - *hcapi_type = TF_RESC_TYPE_HW_METER_INST; - break; - case TF_TBL_TYPE_MIRROR_CONFIG: - *hcapi_type = TF_RESC_TYPE_HW_MIRROR; - break; - case TF_TBL_TYPE_UPAR: - *hcapi_type = TF_RESC_TYPE_HW_UPAR; - break; - case TF_TBL_TYPE_EPOCH0: - *hcapi_type = TF_RESC_TYPE_HW_EPOCH0; - break; - case TF_TBL_TYPE_EPOCH1: - *hcapi_type = TF_RESC_TYPE_HW_EPOCH1; - break; - case TF_TBL_TYPE_METADATA: - *hcapi_type = TF_RESC_TYPE_HW_METADATA; - break; - case TF_TBL_TYPE_CT_STATE: - *hcapi_type = TF_RESC_TYPE_HW_CT_STATE; - break; - case TF_TBL_TYPE_RANGE_PROF: - *hcapi_type = TF_RESC_TYPE_HW_RANGE_PROF; - break; - case TF_TBL_TYPE_RANGE_ENTRY: - *hcapi_type = TF_RESC_TYPE_HW_RANGE_ENTRY; - break; - case TF_TBL_TYPE_LAG: - *hcapi_type = TF_RESC_TYPE_HW_LAG_ENTRY; - break; - /* Not yet supported */ - case TF_TBL_TYPE_ACT_ENCAP_32B: - case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST: - case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC: - case TF_TBL_TYPE_VNIC_SVIF: - case TF_TBL_TYPE_EXT: /* No pools for this type */ - case TF_TBL_TYPE_EXT_0: /* No pools for this type */ - default: - *hcapi_type = -1; - rc = -EOPNOTSUPP; - } + *parms->count = ba_inuse_count(rm_db->db[parms->db_index].pool); return rc; + } int -tf_rm_convert_index(struct tf_session *tfs, - enum tf_dir dir, - enum tf_tbl_type type, - enum tf_rm_convert_type c_type, - uint32_t index, - uint32_t *convert_index) +tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms) { - int rc; - struct tf_rm_resc *resc; - uint32_t hcapi_type; + struct tf_rm_new_db *rm_db; + enum tf_rm_elem_cfg_type cfg_type; uint32_t base_index; + uint32_t stride; + int rc = 0; - if (dir == TF_DIR_RX) - resc = &tfs->resc.rx; - else if (dir == TF_DIR_TX) - resc = &tfs->resc.tx; - else - return -EOPNOTSUPP; - - rc = tf_rm_convert_tbl_type(type, &hcapi_type); - if (rc) - return -1; - - switch (type) { - case TF_TBL_TYPE_FULL_ACT_RECORD: - case TF_TBL_TYPE_MCAST_GROUPS: - case TF_TBL_TYPE_ACT_ENCAP_8B: - case TF_TBL_TYPE_ACT_ENCAP_16B: - case TF_TBL_TYPE_ACT_ENCAP_32B: - case TF_TBL_TYPE_ACT_ENCAP_64B: - case TF_TBL_TYPE_ACT_SP_SMAC: - case TF_TBL_TYPE_ACT_SP_SMAC_IPV4: - case TF_TBL_TYPE_ACT_SP_SMAC_IPV6: - case TF_TBL_TYPE_ACT_STATS_64: - case TF_TBL_TYPE_ACT_MODIFY_SPORT: - case TF_TBL_TYPE_ACT_MODIFY_DPORT: - case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC: - case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST: - base_index = resc->sram_entry[hcapi_type].start; - break; - case TF_TBL_TYPE_MIRROR_CONFIG: - case TF_TBL_TYPE_METER_PROF: - case TF_TBL_TYPE_METER_INST: - case TF_TBL_TYPE_UPAR: - case TF_TBL_TYPE_EPOCH0: - case TF_TBL_TYPE_EPOCH1: - case TF_TBL_TYPE_METADATA: - case TF_TBL_TYPE_CT_STATE: - case TF_TBL_TYPE_RANGE_PROF: - case TF_TBL_TYPE_RANGE_ENTRY: - case TF_TBL_TYPE_LAG: - base_index = resc->hw_entry[hcapi_type].start; - break; - /* Not yet supported */ - case TF_TBL_TYPE_VNIC_SVIF: - case TF_TBL_TYPE_EXT: /* No pools for this type */ - case TF_TBL_TYPE_EXT_0: /* No pools for this type */ - default: - return -EOPNOTSUPP; + TF_CHECK_PARMS2(parms, parms->rm_db); + + rm_db = (struct tf_rm_new_db *)parms->rm_db; + if (!rm_db->db) + return -EINVAL; + cfg_type = rm_db->db[parms->db_index].cfg_type; + + /* Bail out if not controlled by RM */ + if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA) + return -ENOTSUP; + + /* Bail out if the pool is not valid, should never happen */ + if (rm_db->db[parms->db_index].pool == NULL) { + rc = -ENOTSUP; + TFP_DRV_LOG(ERR, + "%s: Invalid pool for this type:%d, rc:%s\n", + tf_dir_2_str(rm_db->dir), + parms->db_index, + strerror(-rc)); + return rc; } - switch (c_type) { - case TF_RM_CONVERT_RM_BASE: - *convert_index = index - base_index; - break; - case TF_RM_CONVERT_ADD_BASE: - *convert_index = index + base_index; - break; - default: - return -EOPNOTSUPP; - } + base_index = rm_db->db[parms->db_index].alloc.entry.start; + stride = rm_db->db[parms->db_index].alloc.entry.stride; - return 0; + if (parms->starting_index < base_index || + parms->starting_index + parms->num_entries > base_index + stride) + return -EINVAL; + + return rc; }