SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_msg.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/rand.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_rm.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_tbl.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tfp.c
#
} tf_session_sram_resc_flush_input_t, *ptf_session_sram_resc_flush_input_t;
BUILD_BUG_ON(sizeof(tf_session_sram_resc_flush_input_t) <= TF_MAX_REQ_SIZE);
+/* Input params for table type set */
+typedef struct tf_tbl_type_set_input {
+ /* Session Id */
+ uint32_t fw_session_id;
+ /* flags */
+ uint16_t flags;
+ /* When set to 0, indicates the get apply to RX */
+#define TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_RX (0x0)
+ /* When set to 1, indicates the get apply to TX */
+#define TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_TX (0x1)
+ /* Type of the object to set */
+ uint32_t type;
+ /* Size of the data to set in bytes */
+ uint16_t size;
+ /* Data to set */
+ uint8_t data[TF_BULK_SEND];
+ /* Index to set */
+ uint32_t index;
+} tf_tbl_type_set_input_t, *ptf_tbl_type_set_input_t;
+BUILD_BUG_ON(sizeof(tf_tbl_type_set_input_t) <= TF_MAX_REQ_SIZE);
+
/* Input params for table type get */
typedef struct tf_tbl_type_get_input {
/* Session Id */
#include "tf_core.h"
#include "tf_session.h"
+#include "tf_tbl.h"
#include "tf_rm.h"
#include "tf_msg.h"
#include "tfp.h"
/* Setup hash seeds */
tf_seeds_init(session);
+ /* Initialize external pool data structures */
+ tf_init_tbl_pool(session);
+
session->ref_count++;
/* Return session ID */
int tf_free_identifier(struct tf *tfp,
struct tf_free_identifier_parms *parms);
+/**
+ * @page dram_table DRAM Table Scope Interface
+ *
+ * @ref tf_alloc_tbl_scope
+ *
+ * @ref tf_free_tbl_scope
+ *
+ * If we allocate the EEM memory from the core, we need to store it in
+ * the shared session data structure to make sure it can be freed later.
+ * (for example if the PF goes away)
+ *
+ * Current thought is that memory is allocated within core.
+ */
+
+
+/** tf_alloc_tbl_scope_parms definition
+ */
+struct tf_alloc_tbl_scope_parms {
+ /**
+ * [in] All Maximum key size required.
+ */
+ uint16_t rx_max_key_sz_in_bits;
+ /**
+ * [in] Maximum Action size required (includes inlined items)
+ */
+ uint16_t rx_max_action_entry_sz_in_bits;
+ /**
+ * [in] Memory size in Megabytes
+ * Total memory size allocated by user to be divided
+ * up for actions, hash, counters. Only inline external actions.
+ * Use this variable or the number of flows, do not set both.
+ */
+ uint32_t rx_mem_size_in_mb;
+ /**
+ * [in] Number of flows * 1000. If set, rx_mem_size_in_mb must equal 0.
+ */
+ uint32_t rx_num_flows_in_k;
+ /**
+ * [in] SR2 only receive table access interface id
+ */
+ uint32_t rx_tbl_if_id;
+ /**
+ * [in] All Maximum key size required.
+ */
+ uint16_t tx_max_key_sz_in_bits;
+ /**
+ * [in] Maximum Action size required (includes inlined items)
+ */
+ uint16_t tx_max_action_entry_sz_in_bits;
+ /**
+ * [in] Memory size in Megabytes
+ * Total memory size allocated by user to be divided
+ * up for actions, hash, counters. Only inline external actions.
+ */
+ uint32_t tx_mem_size_in_mb;
+ /**
+ * [in] Number of flows * 1000
+ */
+ uint32_t tx_num_flows_in_k;
+ /**
+ * [in] SR2 only receive table access interface id
+ */
+ uint32_t tx_tbl_if_id;
+ /**
+ * [out] table scope identifier
+ */
+ uint32_t tbl_scope_id;
+};
+
+struct tf_free_tbl_scope_parms {
+ /**
+ * [in] table scope identifier
+ */
+ uint32_t tbl_scope_id;
+};
+
+/**
+ * allocate a table scope
+ *
+ * On SR2 Firmware will allocate a scope ID. On other devices, the scope
+ * is a software construct to identify an EEM table. This function will
+ * divide the hash memory/buckets and records according to the device
+ * device constraints based upon calculations using either the number of flows
+ * requested or the size of memory indicated. Other parameters passed in
+ * determine the configuration (maximum key size, maximum external action record
+ * size.
+ *
+ * This API will allocate the table region in
+ * DRAM, program the PTU page table entries, and program the number of static
+ * buckets (if SR2) in the RX and TX CFAs. Buckets are assumed to start at
+ * 0 in the EM memory for the scope. Upon successful completion of this API,
+ * hash tables are fully initialized and ready for entries to be inserted.
+ *
+ * A single API is used to allocate a common table scope identifier in both
+ * receive and transmit CFA. The scope identifier is common due to nature of
+ * connection tracking sending notifications between RX and TX direction.
+ *
+ * The receive and transmit table access identifiers specify which rings will
+ * be used to initialize table DRAM. The application must ensure mutual
+ * exclusivity of ring usage for table scope allocation and any table update
+ * operations.
+ *
+ * The hash table buckets, EM keys, and EM lookup results are stored in the
+ * memory allocated based on the rx_em_hash_mb/tx_em_hash_mb parameters. The
+ * hash table buckets are stored at the beginning of that memory.
+ *
+ * NOTES: No EM internal setup is done here. On chip EM records are managed
+ * internally by TruFlow core.
+ *
+ * Returns success or failure code.
+ */
+int tf_alloc_tbl_scope(struct tf *tfp,
+ struct tf_alloc_tbl_scope_parms *parms);
+
+
+/**
+ * free a table scope
+ *
+ * Firmware checks that the table scope ID is owned by the TruFlow
+ * session, verifies that no references to this table scope remains
+ * (SR2 ILT) or Profile TCAM entries for either CFA (RX/TX) direction,
+ * then frees the table scope ID.
+ *
+ * Returns success or failure code.
+ */
+int tf_free_tbl_scope(struct tf *tfp,
+ struct tf_free_tbl_scope_parms *parms);
+
/**
* TCAM table type
*/
return tfp_le_to_cpu_32(parms.tf_resp_code);
}
+int
+tf_msg_set_tbl_entry(struct tf *tfp,
+ enum tf_dir dir,
+ enum tf_tbl_type type,
+ uint16_t size,
+ uint8_t *data,
+ uint32_t index)
+{
+ int rc;
+ struct tfp_send_msg_parms parms = { 0 };
+ struct tf_tbl_type_set_input req = { 0 };
+ struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
+
+ /* Populate the request */
+ req.fw_session_id =
+ tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
+ req.flags = tfp_cpu_to_le_16(dir);
+ req.type = tfp_cpu_to_le_32(type);
+ req.size = tfp_cpu_to_le_16(size);
+ req.index = tfp_cpu_to_le_32(index);
+
+ tfp_memcpy(&req.data,
+ data,
+ size);
+
+ MSG_PREP_NO_RESP(parms,
+ TF_KONG_MB,
+ HWRM_TF,
+ HWRM_TFT_TBL_TYPE_SET,
+ req);
+
+ rc = tfp_send_msg_tunneled(tfp, &parms);
+ if (rc)
+ return rc;
+
+ return tfp_le_to_cpu_32(parms.tf_resp_code);
+}
+
+int
+tf_msg_get_tbl_entry(struct tf *tfp,
+ enum tf_dir dir,
+ enum tf_tbl_type type,
+ uint16_t size,
+ uint8_t *data,
+ uint32_t index)
+{
+ int rc;
+ struct tfp_send_msg_parms parms = { 0 };
+ struct tf_tbl_type_get_input req = { 0 };
+ struct tf_tbl_type_get_output resp = { 0 };
+ struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
+
+ /* Populate the request */
+ req.fw_session_id =
+ tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
+ req.flags = tfp_cpu_to_le_16(dir);
+ req.type = tfp_cpu_to_le_32(type);
+ req.index = tfp_cpu_to_le_32(index);
+
+ MSG_PREP(parms,
+ TF_KONG_MB,
+ HWRM_TF,
+ HWRM_TFT_TBL_TYPE_GET,
+ req,
+ resp);
+
+ rc = tfp_send_msg_tunneled(tfp, &parms);
+ if (rc)
+ return rc;
+
+ /* Verify that we got enough buffer to return the requested data */
+ if (resp.size < size)
+ return -EINVAL;
+
+ tfp_memcpy(data,
+ &resp.data,
+ resp.size);
+
+ return tfp_le_to_cpu_32(parms.tf_resp_code);
+}
+
#define TF_BYTES_PER_SLICE(tfp) 12
#define NUM_SLICES(tfp, bytes) \
(((bytes) + TF_BYTES_PER_SLICE(tfp) - 1) / TF_BYTES_PER_SLICE(tfp))
#ifndef _TF_MSG_H_
#define _TF_MSG_H_
+#include "tf_tbl.h"
#include "tf_rm.h"
struct tf;
int tf_msg_tcam_entry_free(struct tf *tfp,
struct tf_free_tcam_entry_parms *parms);
+/**
+ * Sends Set message of a Table Type element to the firmware.
+ *
+ * [in] tfp
+ * Pointer to session handle
+ *
+ * [in] dir
+ * Direction location of the element to set
+ *
+ * [in] type
+ * Type of the object to set
+ *
+ * [in] size
+ * Size of the data to set
+ *
+ * [in] data
+ * Data to set
+ *
+ * [in] index
+ * Index to set
+ *
+ * Returns:
+ * 0 - Success
+ */
+int tf_msg_set_tbl_entry(struct tf *tfp,
+ enum tf_dir dir,
+ enum tf_tbl_type type,
+ uint16_t size,
+ uint8_t *data,
+ uint32_t index);
+
+/**
+ * Sends get message of a Table Type element to the firmware.
+ *
+ * [in] tfp
+ * Pointer to session handle
+ *
+ * [in] dir
+ * Direction location of the element to get
+ *
+ * [in] type
+ * Type of the object to get
+ *
+ * [in] size
+ * Size of the data read
+ *
+ * [in] data
+ * Data read
+ *
+ * [in] index
+ * Index to get
+ *
+ * Returns:
+ * 0 - Success
+ */
+int tf_msg_get_tbl_entry(struct tf *tfp,
+ enum tf_dir dir,
+ enum tf_tbl_type type,
+ uint16_t size,
+ uint8_t *data,
+ uint32_t index);
+
#endif /* _TF_MSG_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Broadcom
+ * All rights reserved.
+ */
+
+/* Truflow Table APIs and supporting code */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdbool.h>
+#include <math.h>
+#include <sys/param.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include "hsi_struct_def_dpdk.h"
+
+#include "tf_core.h"
+#include "tf_session.h"
+#include "tf_msg.h"
+#include "tfp.h"
+#include "hwrm_tf.h"
+#include "bnxt.h"
+#include "tf_resources.h"
+#include "tf_rm.h"
+
+#define PTU_PTE_VALID 0x1UL
+#define PTU_PTE_LAST 0x2UL
+#define PTU_PTE_NEXT_TO_LAST 0x4UL
+
+/* Number of pointers per page_size */
+#define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
+
+/* API defined in tf_tbl.h */
+void
+tf_init_tbl_pool(struct tf_session *session)
+{
+ enum tf_dir dir;
+
+ for (dir = 0; dir < TF_DIR_MAX; dir++) {
+ session->ext_pool_2_scope[dir][TF_EXT_POOL_0] =
+ TF_TBL_SCOPE_INVALID;
+ }
+}