--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Broadcom
+ * All rights reserved.
+ */
+
+#include "bitalloc.h"
+
+#define BITALLOC_MAX_LEVELS 6
+
+/* Finds the first bit set plus 1, equivalent to gcc __builtin_ffs */
+static int
+ba_ffs(bitalloc_word_t v)
+{
+ int c; /* c will be the number of zero bits on the right plus 1 */
+
+ v &= -v;
+ c = v ? 32 : 0;
+
+ if (v & 0x0000FFFF)
+ c -= 16;
+ if (v & 0x00FF00FF)
+ c -= 8;
+ if (v & 0x0F0F0F0F)
+ c -= 4;
+ if (v & 0x33333333)
+ c -= 2;
+ if (v & 0x55555555)
+ c -= 1;
+
+ return c;
+}
+
+int
+ba_init(struct bitalloc *pool, int size)
+{
+ bitalloc_word_t *mem = (bitalloc_word_t *)pool;
+ int i;
+
+ /* Initialize */
+ pool->size = 0;
+
+ if (size < 1 || size > BITALLOC_MAX_SIZE)
+ return -1;
+
+ /* Zero structure */
+ for (i = 0;
+ i < (int)(BITALLOC_SIZEOF(size) / sizeof(bitalloc_word_t));
+ i++)
+ mem[i] = 0;
+
+ /* Initialize */
+ pool->size = size;
+
+ /* Embed number of words of next level, after each level */
+ int words[BITALLOC_MAX_LEVELS];
+ int lev = 0;
+ int offset = 0;
+
+ words[0] = (size + 31) / 32;
+ while (words[lev] > 1) {
+ lev++;
+ words[lev] = (words[lev - 1] + 31) / 32;
+ }
+
+ while (lev) {
+ offset += words[lev];
+ pool->storage[offset++] = words[--lev];
+ }
+
+ /* Free the entire pool */
+ for (i = 0; i < size; i++)
+ ba_free(pool, i);
+
+ return 0;
+}
+
+static int
+ba_alloc_helper(struct bitalloc *pool,
+ int offset,
+ int words,
+ unsigned int size,
+ int index,
+ int *clear)
+{
+ bitalloc_word_t *storage = &pool->storage[offset];
+ int loc = ba_ffs(storage[index]);
+ int r;
+
+ if (loc == 0)
+ return -1;
+
+ loc--;
+
+ if (pool->size > size) {
+ r = ba_alloc_helper(pool,
+ offset + words + 1,
+ storage[words],
+ size * 32,
+ index * 32 + loc,
+ clear);
+ } else {
+ r = index * 32 + loc;
+ *clear = 1;
+ pool->free_count--;
+ }
+
+ if (*clear) {
+ storage[index] &= ~(1 << loc);
+ *clear = (storage[index] == 0);
+ }
+
+ return r;
+}
+
+int
+ba_alloc(struct bitalloc *pool)
+{
+ int clear = 0;
+
+ return ba_alloc_helper(pool, 0, 1, 32, 0, &clear);
+}
+
+static int
+ba_alloc_index_helper(struct bitalloc *pool,
+ int offset,
+ int words,
+ unsigned int size,
+ int *index,
+ int *clear)
+{
+ bitalloc_word_t *storage = &pool->storage[offset];
+ int loc;
+ int r;
+
+ if (pool->size > size)
+ r = ba_alloc_index_helper(pool,
+ offset + words + 1,
+ storage[words],
+ size * 32,
+ index,
+ clear);
+ else
+ r = 1; /* Check if already allocated */
+
+ loc = (*index % 32);
+ *index = *index / 32;
+
+ if (r == 1) {
+ r = (storage[*index] & (1 << loc)) ? 0 : -1;
+ if (r == 0) {
+ *clear = 1;
+ pool->free_count--;
+ }
+ }
+
+ if (*clear) {
+ storage[*index] &= ~(1 << loc);
+ *clear = (storage[*index] == 0);
+ }
+
+ return r;
+}
+
+int
+ba_alloc_index(struct bitalloc *pool, int index)
+{
+ int clear = 0;
+ int index_copy = index;
+
+ if (index < 0 || index >= (int)pool->size)
+ return -1;
+
+ if (ba_alloc_index_helper(pool, 0, 1, 32, &index_copy, &clear) >= 0)
+ return index;
+ else
+ return -1;
+}
+
+static int
+ba_inuse_helper(struct bitalloc *pool,
+ int offset,
+ int words,
+ unsigned int size,
+ int *index)
+{
+ bitalloc_word_t *storage = &pool->storage[offset];
+ int loc;
+ int r;
+
+ if (pool->size > size)
+ r = ba_inuse_helper(pool,
+ offset + words + 1,
+ storage[words],
+ size * 32,
+ index);
+ else
+ r = 1; /* Check if in use */
+
+ loc = (*index % 32);
+ *index = *index / 32;
+
+ if (r == 1)
+ r = (storage[*index] & (1 << loc)) ? -1 : 0;
+
+ return r;
+}
+
+int
+ba_inuse(struct bitalloc *pool, int index)
+{
+ if (index < 0 || index >= (int)pool->size)
+ return -1;
+
+ return ba_inuse_helper(pool, 0, 1, 32, &index) == 0;
+}
+
+static int
+ba_free_helper(struct bitalloc *pool,
+ int offset,
+ int words,
+ unsigned int size,
+ int *index)
+{
+ bitalloc_word_t *storage = &pool->storage[offset];
+ int loc;
+ int r;
+
+ if (pool->size > size)
+ r = ba_free_helper(pool,
+ offset + words + 1,
+ storage[words],
+ size * 32,
+ index);
+ else
+ r = 1; /* Check if already free */
+
+ loc = (*index % 32);
+ *index = *index / 32;
+
+ if (r == 1) {
+ r = (storage[*index] & (1 << loc)) ? -1 : 0;
+ if (r == 0)
+ pool->free_count++;
+ }
+
+ if (r == 0)
+ storage[*index] |= (1 << loc);
+
+ return r;
+}
+
+int
+ba_free(struct bitalloc *pool, int index)
+{
+ if (index < 0 || index >= (int)pool->size)
+ return -1;
+
+ return ba_free_helper(pool, 0, 1, 32, &index);
+}
+
+int
+ba_inuse_free(struct bitalloc *pool, int index)
+{
+ if (index < 0 || index >= (int)pool->size)
+ return -1;
+
+ return ba_free_helper(pool, 0, 1, 32, &index) + 1;
+}
+
+int
+ba_free_count(struct bitalloc *pool)
+{
+ return (int)pool->free_count;
+}
+
+int ba_inuse_count(struct bitalloc *pool)
+{
+ return (int)(pool->size) - (int)(pool->free_count);
+}
+
+static int
+ba_find_next_helper(struct bitalloc *pool,
+ int offset,
+ int words,
+ unsigned int size,
+ int *index,
+ int free)
+{
+ bitalloc_word_t *storage = &pool->storage[offset];
+ int loc, r, bottom = 0;
+
+ if (pool->size > size)
+ r = ba_find_next_helper(pool,
+ offset + words + 1,
+ storage[words],
+ size * 32,
+ index,
+ free);
+ else
+ bottom = 1; /* Bottom of tree */
+
+ loc = (*index % 32);
+ *index = *index / 32;
+
+ if (bottom) {
+ int bit_index = *index * 32;
+
+ loc = ba_ffs(~storage[*index] & ((bitalloc_word_t)-1 << loc));
+ if (loc > 0) {
+ loc--;
+ r = (bit_index + loc);
+ if (r >= (int)pool->size)
+ r = -1;
+ } else {
+ /* Loop over array at bottom of tree */
+ r = -1;
+ bit_index += 32;
+ *index = *index + 1;
+ while ((int)pool->size > bit_index) {
+ loc = ba_ffs(~storage[*index]);
+
+ if (loc > 0) {
+ loc--;
+ r = (bit_index + loc);
+ if (r >= (int)pool->size)
+ r = -1;
+ break;
+ }
+ bit_index += 32;
+ *index = *index + 1;
+ }
+ }
+ }
+
+ if (r >= 0 && (free)) {
+ if (bottom)
+ pool->free_count++;
+ storage[*index] |= (1 << loc);
+ }
+
+ return r;
+}
+
+int
+ba_find_next_inuse(struct bitalloc *pool, int index)
+{
+ if (index < 0 ||
+ index >= (int)pool->size ||
+ pool->free_count == pool->size)
+ return -1;
+
+ return ba_find_next_helper(pool, 0, 1, 32, &index, 0);
+}
+
+int
+ba_find_next_inuse_free(struct bitalloc *pool, int index)
+{
+ if (index < 0 ||
+ index >= (int)pool->size ||
+ pool->free_count == pool->size)
+ return -1;
+
+ return ba_find_next_helper(pool, 0, 1, 32, &index, 1);
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BITALLOC_H_
+#define _BITALLOC_H_
+
+#include <stdint.h>
+
+/* Bitalloc works on uint32_t as its word size */
+typedef uint32_t bitalloc_word_t;
+
+struct bitalloc {
+ bitalloc_word_t size;
+ bitalloc_word_t free_count;
+ bitalloc_word_t storage[1];
+};
+
+#define BA_L0(s) (((s) + 31) / 32)
+#define BA_L1(s) ((BA_L0(s) + 31) / 32)
+#define BA_L2(s) ((BA_L1(s) + 31) / 32)
+#define BA_L3(s) ((BA_L2(s) + 31) / 32)
+#define BA_L4(s) ((BA_L3(s) + 31) / 32)
+
+#define BITALLOC_SIZEOF(size) \
+ (sizeof(struct bitalloc) * \
+ (((sizeof(struct bitalloc) + \
+ sizeof(struct bitalloc) - 1 + \
+ (sizeof(bitalloc_word_t) * \
+ ((BA_L0(size) - 1) + \
+ ((BA_L0(size) == 1) ? 0 : (BA_L1(size) + 1)) + \
+ ((BA_L1(size) == 1) ? 0 : (BA_L2(size) + 1)) + \
+ ((BA_L2(size) == 1) ? 0 : (BA_L3(size) + 1)) + \
+ ((BA_L3(size) == 1) ? 0 : (BA_L4(size) + 1)))))) / \
+ sizeof(struct bitalloc)))
+
+#define BITALLOC_MAX_SIZE (32 * 32 * 32 * 32 * 32 * 32)
+
+/* The instantiation of a bitalloc looks a bit odd. Since a
+ * bit allocator has variable storage, we need a way to get a
+ * a pointer to a bitalloc structure that points to the correct
+ * amount of storage. We do this by creating an array of
+ * bitalloc where the first element in the array is the
+ * actual bitalloc base structure, and the remaining elements
+ * in the array provide the storage for it. This approach allows
+ * instances to be individual variables or members of larger
+ * structures.
+ */
+#define BITALLOC_INST(name, size) \
+ struct bitalloc name[(BITALLOC_SIZEOF(size) / \
+ sizeof(struct bitalloc))]
+
+/* Symbolic return codes */
+#define BA_SUCCESS 0
+#define BA_FAIL -1
+#define BA_ENTRY_FREE 0
+#define BA_ENTRY_IN_USE 1
+#define BA_NO_ENTRY_FOUND -1
+
+/**
+ * Initializates the bitallocator
+ *
+ * Returns 0 on success, -1 on failure. Size is arbitrary up to
+ * BITALLOC_MAX_SIZE
+ */
+int ba_init(struct bitalloc *pool, int size);
+
+/**
+ * Returns -1 on failure, or index of allocated entry
+ */
+int ba_alloc(struct bitalloc *pool);
+int ba_alloc_index(struct bitalloc *pool, int index);
+
+/**
+ * Query a particular index in a pool to check if its in use.
+ *
+ * Returns -1 on invalid index, 1 if the index is allocated, 0 if it
+ * is free
+ */
+int ba_inuse(struct bitalloc *pool, int index);
+
+/**
+ * Variant of ba_inuse that frees the index if it is allocated, same
+ * return codes as ba_inuse
+ */
+int ba_inuse_free(struct bitalloc *pool, int index);
+
+/**
+ * Find next index that is in use, start checking at index 'idx'
+ *
+ * Returns next index that is in use on success, or
+ * -1 if no in use index is found
+ */
+int ba_find_next_inuse(struct bitalloc *pool, int idx);
+
+/**
+ * Variant of ba_find_next_inuse that also frees the next in use index,
+ * same return codes as ba_find_next_inuse
+ */
+int ba_find_next_inuse_free(struct bitalloc *pool, int idx);
+
+/**
+ * Multiple freeing of the same index has no negative side effects,
+ * but will return -1. returns -1 on failure, 0 on success.
+ */
+int ba_free(struct bitalloc *pool, int index);
+
+/**
+ * Returns the pool's free count
+ */
+int ba_free_count(struct bitalloc *pool);
+
+/**
+ * Returns the pool's in use count
+ */
+int ba_inuse_count(struct bitalloc *pool);
+
+#endif /* _BITALLOC_H_ */
#include "hsi_struct_def_dpdk.h"
#include "hwrm_tf.h"
+/**
+ * Endian converts min and max values from the HW response to the query
+ */
+#define TF_HW_RESP_TO_QUERY(query, index, response, element) do { \
+ (query)->hw_query[index].min = \
+ tfp_le_to_cpu_16(response. element ## _min); \
+ (query)->hw_query[index].max = \
+ tfp_le_to_cpu_16(response. element ## _max); \
+} while (0)
+
+/**
+ * Endian converts the number of entries from the alloc to the request
+ */
+#define TF_HW_ALLOC_TO_REQ(alloc, index, request, element) \
+ (request. num_ ## element = tfp_cpu_to_le_16((alloc)->hw_num[index]))
+
+/**
+ * Endian converts the start and stride value from the free to the request
+ */
+#define TF_HW_FREE_TO_REQ(hw_entry, index, request, element) do { \
+ request.element ## _start = \
+ tfp_cpu_to_le_16(hw_entry[index].start); \
+ request.element ## _stride = \
+ tfp_cpu_to_le_16(hw_entry[index].stride); \
+} while (0)
+
+/**
+ * Endian converts the start and stride from the HW response to the
+ * alloc
+ */
+#define TF_HW_RESP_TO_ALLOC(hw_entry, index, response, element) do { \
+ hw_entry[index].start = \
+ tfp_le_to_cpu_16(response.element ## _start); \
+ hw_entry[index].stride = \
+ tfp_le_to_cpu_16(response.element ## _stride); \
+} while (0)
+
+/**
+ * Endian converts min and max values from the SRAM response to the
+ * query
+ */
+#define TF_SRAM_RESP_TO_QUERY(query, index, response, element) do { \
+ (query)->sram_query[index].min = \
+ tfp_le_to_cpu_16(response.element ## _min); \
+ (query)->sram_query[index].max = \
+ tfp_le_to_cpu_16(response.element ## _max); \
+} while (0)
+
+/**
+ * Endian converts the number of entries from the action (alloc) to
+ * the request
+ */
+#define TF_SRAM_ALLOC_TO_REQ(action, index, request, element) \
+ (request. num_ ## element = tfp_cpu_to_le_16((action)->sram_num[index]))
+
+/**
+ * Endian converts the start and stride value from the free to the request
+ */
+#define TF_SRAM_FREE_TO_REQ(sram_entry, index, request, element) do { \
+ request.element ## _start = \
+ tfp_cpu_to_le_16(sram_entry[index].start); \
+ request.element ## _stride = \
+ tfp_cpu_to_le_16(sram_entry[index].stride); \
+} while (0)
+
+/**
+ * Endian converts the start and stride from the HW response to the
+ * alloc
+ */
+#define TF_SRAM_RESP_TO_ALLOC(sram_entry, index, response, element) do { \
+ sram_entry[index].start = \
+ tfp_le_to_cpu_16(response.element ## _start); \
+ sram_entry[index].stride = \
+ tfp_le_to_cpu_16(response.element ## _stride); \
+} while (0)
+
/**
* Sends session open request to TF Firmware
*/
return rc;
}
+/**
+ * Sends session attach request to TF Firmware
+ */
+int
+tf_msg_session_attach(struct tf *tfp __rte_unused,
+ char *ctrl_chan_name __rte_unused,
+ uint8_t tf_fw_session_id __rte_unused)
+{
+ return -1;
+}
+
+/**
+ * Sends session close request to TF Firmware
+ */
+int
+tf_msg_session_close(struct tf *tfp)
+{
+ int rc;
+ struct hwrm_tf_session_close_input req = { 0 };
+ struct hwrm_tf_session_close_output resp = { 0 };
+ struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
+ struct tfp_send_msg_parms parms = { 0 };
+
+ /* Populate the request */
+ req.fw_session_id =
+ tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
+
+ parms.tf_type = HWRM_TF_SESSION_CLOSE;
+ parms.req_data = (uint32_t *)&req;
+ parms.req_size = sizeof(req);
+ parms.resp_data = (uint32_t *)&resp;
+ parms.resp_size = sizeof(resp);
+ parms.mailbox = TF_KONG_MB;
+
+ rc = tfp_send_msg_direct(tfp,
+ &parms);
+ return rc;
+}
+
/**
* Sends session query config request to TF Firmware
*/
&parms);
return rc;
}
+
+/**
+ * Sends session HW resource query capability request to TF Firmware
+ */
+int
+tf_msg_session_hw_resc_qcaps(struct tf *tfp,
+ enum tf_dir dir,
+ struct tf_rm_hw_query *query)
+{
+ int rc;
+ struct tfp_send_msg_parms parms = { 0 };
+ struct tf_session_hw_resc_qcaps_input req = { 0 };
+ struct tf_session_hw_resc_qcaps_output resp = { 0 };
+ struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
+
+ memset(query, 0, sizeof(*query));
+
+ /* Populate the request */
+ req.fw_session_id =
+ tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
+ req.flags = tfp_cpu_to_le_16(dir);
+
+ MSG_PREP(parms,
+ TF_KONG_MB,
+ HWRM_TF,
+ HWRM_TFT_SESSION_HW_RESC_QCAPS,
+ req,
+ resp);
+
+ rc = tfp_send_msg_tunneled(tfp, &parms);
+ if (rc)
+ return rc;
+
+ /* Process the response */
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
+ l2_ctx_tcam_entries);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_FUNC, resp,
+ prof_func);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_PROF_TCAM, resp,
+ prof_tcam_entries);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
+ em_prof_id);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EM_REC, resp,
+ em_record_entries);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
+ wc_tcam_prof_id);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_WC_TCAM, resp,
+ wc_tcam_entries);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_PROF, resp,
+ meter_profiles);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METER_INST,
+ resp, meter_inst);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_MIRROR, resp,
+ mirrors);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_UPAR, resp,
+ upar);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_SP_TCAM, resp,
+ sp_tcam_entries);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_L2_FUNC, resp,
+ l2_func);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_FKB, resp,
+ flex_key_templ);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
+ tbl_scope);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH0, resp,
+ epoch0_entries);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_EPOCH1, resp,
+ epoch1_entries);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_METADATA, resp,
+ metadata);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_CT_STATE, resp,
+ ct_state);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_PROF, resp,
+ range_prof);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
+ range_entries);
+ TF_HW_RESP_TO_QUERY(query, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
+ lag_tbl_entries);
+
+ return tfp_le_to_cpu_32(parms.tf_resp_code);
+}
+
+/**
+ * Sends session HW resource allocation request to TF Firmware
+ */
+int
+tf_msg_session_hw_resc_alloc(struct tf *tfp __rte_unused,
+ enum tf_dir dir,
+ struct tf_rm_hw_alloc *hw_alloc __rte_unused,
+ struct tf_rm_entry *hw_entry __rte_unused)
+{
+ int rc;
+ struct tfp_send_msg_parms parms = { 0 };
+ struct tf_session_hw_resc_alloc_input req = { 0 };
+ struct tf_session_hw_resc_alloc_output resp = { 0 };
+ struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
+
+ memset(hw_entry, 0, sizeof(*hw_entry));
+
+ /* Populate the request */
+ req.fw_session_id =
+ tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
+ req.flags = tfp_cpu_to_le_16(dir);
+
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
+ l2_ctx_tcam_entries);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_FUNC, req,
+ prof_func_entries);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_PROF_TCAM, req,
+ prof_tcam_entries);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_PROF_ID, req,
+ em_prof_id);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EM_REC, req,
+ em_record_entries);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
+ wc_tcam_prof_id);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_WC_TCAM, req,
+ wc_tcam_entries);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_PROF, req,
+ meter_profiles);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METER_INST, req,
+ meter_inst);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_MIRROR, req,
+ mirrors);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_UPAR, req,
+ upar);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_SP_TCAM, req,
+ sp_tcam_entries);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_L2_FUNC, req,
+ l2_func);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_FKB, req,
+ flex_key_templ);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_TBL_SCOPE, req,
+ tbl_scope);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH0, req,
+ epoch0_entries);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_EPOCH1, req,
+ epoch1_entries);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_METADATA, req,
+ metadata);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_CT_STATE, req,
+ ct_state);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_PROF, req,
+ range_prof);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
+ range_entries);
+ TF_HW_ALLOC_TO_REQ(hw_alloc, TF_RESC_TYPE_HW_LAG_ENTRY, req,
+ lag_tbl_entries);
+
+ MSG_PREP(parms,
+ TF_KONG_MB,
+ HWRM_TF,
+ HWRM_TFT_SESSION_HW_RESC_ALLOC,
+ req,
+ resp);
+
+ rc = tfp_send_msg_tunneled(tfp, &parms);
+ if (rc)
+ return rc;
+
+ /* Process the response */
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, resp,
+ l2_ctx_tcam_entries);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, resp,
+ prof_func);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, resp,
+ prof_tcam_entries);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, resp,
+ em_prof_id);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EM_REC, resp,
+ em_record_entries);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, resp,
+ wc_tcam_prof_id);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, resp,
+ wc_tcam_entries);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_PROF, resp,
+ meter_profiles);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METER_INST, resp,
+ meter_inst);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_MIRROR, resp,
+ mirrors);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_UPAR, resp,
+ upar);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, resp,
+ sp_tcam_entries);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, resp,
+ l2_func);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_FKB, resp,
+ flex_key_templ);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, resp,
+ tbl_scope);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH0, resp,
+ epoch0_entries);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_EPOCH1, resp,
+ epoch1_entries);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_METADATA, resp,
+ metadata);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_CT_STATE, resp,
+ ct_state);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, resp,
+ range_prof);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, resp,
+ range_entries);
+ TF_HW_RESP_TO_ALLOC(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, resp,
+ lag_tbl_entries);
+
+ return tfp_le_to_cpu_32(parms.tf_resp_code);
+}
+
+/**
+ * Sends session HW resource free request to TF Firmware
+ */
+int
+tf_msg_session_hw_resc_free(struct tf *tfp,
+ enum tf_dir dir,
+ struct tf_rm_entry *hw_entry)
+{
+ int rc;
+ struct tfp_send_msg_parms parms = { 0 };
+ struct tf_session_hw_resc_free_input req = { 0 };
+ struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
+
+ memset(hw_entry, 0, sizeof(*hw_entry));
+
+ /* Populate the request */
+ req.fw_session_id =
+ tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
+ req.flags = tfp_cpu_to_le_16(dir);
+
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_CTXT_TCAM, req,
+ l2_ctx_tcam_entries);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_FUNC, req,
+ prof_func);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_PROF_TCAM, req,
+ prof_tcam_entries);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_PROF_ID, req,
+ em_prof_id);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EM_REC, req,
+ em_record_entries);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM_PROF_ID, req,
+ wc_tcam_prof_id);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_WC_TCAM, req,
+ wc_tcam_entries);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_PROF, req,
+ meter_profiles);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METER_INST, req,
+ meter_inst);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_MIRROR, req,
+ mirrors);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_UPAR, req,
+ upar);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_SP_TCAM, req,
+ sp_tcam_entries);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_L2_FUNC, req,
+ l2_func);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_FKB, req,
+ flex_key_templ);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_TBL_SCOPE, req,
+ tbl_scope);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH0, req,
+ epoch0_entries);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_EPOCH1, req,
+ epoch1_entries);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_METADATA, req,
+ metadata);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_CT_STATE, req,
+ ct_state);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_PROF, req,
+ range_prof);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_RANGE_ENTRY, req,
+ range_entries);
+ TF_HW_FREE_TO_REQ(hw_entry, TF_RESC_TYPE_HW_LAG_ENTRY, req,
+ lag_tbl_entries);
+
+ MSG_PREP_NO_RESP(parms,
+ TF_KONG_MB,
+ HWRM_TF,
+ HWRM_TFT_SESSION_HW_RESC_FREE,
+ req);
+
+ rc = tfp_send_msg_tunneled(tfp, &parms);
+ if (rc)
+ return rc;
+
+ return tfp_le_to_cpu_32(parms.tf_resp_code);
+}