#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_tailq.h>
+#include <rte_spinlock.h>
#include "bnxt.h"
#include "bnxt_ulp.h"
#include "ulp_mapper.h"
#include "ulp_port_db.h"
#include "ulp_tun.h"
+#include "ulp_ha_mgr.h"
+#include "bnxt_tf_pmd_shim.h"
/* Linked list of all TF sessions. */
STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
/* Mutex to synchronize bnxt_ulp_session_list operations. */
static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
+/* Spin lock to protect context global list */
+rte_spinlock_t bnxt_ulp_ctxt_lock;
+TAILQ_HEAD(cntx_list_entry_list, ulp_context_list_entry);
+static struct cntx_list_entry_list ulp_cntx_list =
+ TAILQ_HEAD_INITIALIZER(ulp_cntx_list);
+
+/* Static function declarations */
+static int32_t bnxt_ulp_cntxt_list_init(void);
+static int32_t bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx);
+static void bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx);
+
/*
* Allow the deletion of context only for the bnxt device that
* created the session.
enum bnxt_ulp_device_id *ulp_dev_id)
{
if (BNXT_CHIP_P5(bp)) {
- /* TBD: needs to accommodate even SR2 */
*ulp_dev_id = BNXT_ULP_DEVICE_ID_THOR;
return 0;
}
return 0;
}
+struct bnxt_ulp_app_capabilities_info *
+bnxt_ulp_app_cap_list_get(uint32_t *num_entries)
+{
+ if (!num_entries)
+ return NULL;
+ *num_entries = BNXT_ULP_APP_CAP_TBL_MAX_SZ;
+ return ulp_app_cap_info_list;
+}
+
+static struct bnxt_ulp_resource_resv_info *
+bnxt_ulp_app_resource_resv_list_get(uint32_t *num_entries)
+{
+ if (num_entries == NULL)
+ return NULL;
+ *num_entries = BNXT_ULP_APP_RESOURCE_RESV_LIST_MAX_SZ;
+ return ulp_app_resource_resv_list;
+}
+
+struct bnxt_ulp_resource_resv_info *
+bnxt_ulp_resource_resv_list_get(uint32_t *num_entries)
+{
+ if (!num_entries)
+ return NULL;
+ *num_entries = BNXT_ULP_RESOURCE_RESV_LIST_MAX_SZ;
+ return ulp_resource_resv_list;
+}
+
+struct bnxt_ulp_glb_resource_info *
+bnxt_ulp_app_glb_resource_info_list_get(uint32_t *num_entries)
+{
+ if (!num_entries)
+ return NULL;
+ *num_entries = BNXT_ULP_APP_GLB_RESOURCE_TBL_MAX_SZ;
+ return ulp_app_glb_resource_tbl;
+}
+
static int32_t
-bnxt_ulp_tf_session_resources_get(struct bnxt *bp,
- struct tf_session_resources *res)
+bnxt_ulp_named_resources_calc(struct bnxt_ulp_context *ulp_ctx,
+ struct bnxt_ulp_glb_resource_info *info,
+ uint32_t num,
+ struct tf_session_resources *res)
{
- uint32_t dev_id;
+ uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST, res_type, i;
+ enum tf_dir dir;
+ uint8_t app_id;
+ int32_t rc = 0;
+
+ if (ulp_ctx == NULL || info == NULL || res == NULL || num == 0) {
+ BNXT_TF_DBG(ERR, "Invalid parms to named resources calc.\n");
+ return -EINVAL;
+ }
+
+ rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
+ return -EINVAL;
+ }
+
+ rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; i++) {
+ if (dev_id != info[i].device_id || app_id != info[i].app_id)
+ continue;
+ dir = info[i].direction;
+ res_type = info[i].resource_type;
+
+ switch (info[i].resource_func) {
+ case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER:
+ res->ident_cnt[dir].cnt[res_type]++;
+ break;
+ case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
+ res->tbl_cnt[dir].cnt[res_type]++;
+ break;
+ case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
+ res->tcam_cnt[dir].cnt[res_type]++;
+ break;
+ case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
+ res->em_cnt[dir].cnt[res_type]++;
+ break;
+ default:
+ BNXT_TF_DBG(ERR, "Unknown resource func (0x%x)\n,",
+ info[i].resource_func);
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+static int32_t
+bnxt_ulp_unnamed_resources_calc(struct bnxt_ulp_context *ulp_ctx,
+ struct bnxt_ulp_resource_resv_info *info,
+ uint32_t num,
+ struct tf_session_resources *res)
+{
+ uint32_t dev_id, res_type, i;
+ enum tf_dir dir;
+ uint8_t app_id;
+ int32_t rc = 0;
+
+ if (ulp_ctx == NULL || res == NULL || info == NULL || num == 0) {
+ BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
+ return -EINVAL;
+ }
+
+ rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
+ return -EINVAL;
+ }
+
+ rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; i++) {
+ if (app_id != info[i].app_id || dev_id != info[i].device_id)
+ continue;
+ dir = info[i].direction;
+ res_type = info[i].resource_type;
+
+ switch (info[i].resource_func) {
+ case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER:
+ res->ident_cnt[dir].cnt[res_type] = info[i].count;
+ break;
+ case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
+ res->tbl_cnt[dir].cnt[res_type] = info[i].count;
+ break;
+ case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
+ res->tcam_cnt[dir].cnt[res_type] = info[i].count;
+ break;
+ case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
+ res->em_cnt[dir].cnt[res_type] = info[i].count;
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int32_t
+bnxt_ulp_tf_resources_get(struct bnxt_ulp_context *ulp_ctx,
+ struct tf_session_resources *res)
+{
+ struct bnxt_ulp_resource_resv_info *unnamed = NULL;
+ uint32_t unum;
+ int32_t rc = 0;
+
+ if (ulp_ctx == NULL || res == NULL) {
+ BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
+ return -EINVAL;
+ }
+
+ unnamed = bnxt_ulp_resource_resv_list_get(&unum);
+ if (unnamed == NULL) {
+ BNXT_TF_DBG(ERR, "Unable to get resource resv list.\n");
+ return -EINVAL;
+ }
+
+ rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, res);
+ if (rc)
+ BNXT_TF_DBG(ERR, "Unable to calc resources for session.\n");
+
+ return rc;
+}
+
+static int32_t
+bnxt_ulp_tf_shared_session_resources_get(struct bnxt_ulp_context *ulp_ctx,
+ struct tf_session_resources *res)
+{
+ struct bnxt_ulp_resource_resv_info *unnamed;
+ struct bnxt_ulp_glb_resource_info *named;
+ uint32_t unum, nnum;
int32_t rc;
- uint16_t *tmp_cnt;
- rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
+ if (ulp_ctx == NULL || res == NULL) {
+ BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
+ return -EINVAL;
+ }
+
+ /* Make sure the resources are zero before accumulating. */
+ memset(res, 0, sizeof(struct tf_session_resources));
+
+ /*
+ * Shared resources are comprised of both named and unnamed resources.
+ * First get the unnamed counts, and then add the named to the result.
+ */
+ /* Get the baseline counts */
+ unnamed = bnxt_ulp_app_resource_resv_list_get(&unum);
+ if (unnamed == NULL) {
+ BNXT_TF_DBG(ERR, "Unable to get shared resource resv list.\n");
+ return -EINVAL;
+ }
+ rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, res);
if (rc) {
- BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
+ BNXT_TF_DBG(ERR, "Unable to calc resources for shared session.\n");
return -EINVAL;
}
- switch (dev_id) {
- case BNXT_ULP_DEVICE_ID_WH_PLUS:
- /** RX **/
- /* Identifiers */
- res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 422;
- res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 6;
- res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
- res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
- res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
-
- /* Table Types */
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 8192;
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
-
- /* ENCAP */
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 511;
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 63;
-
- /* TCAMs */
- res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
- 422;
- res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
- 6;
- res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
- res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 88;
-
- /* EM */
- res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 13168;
-
- /* EEM */
- res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
-
- /* SP */
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_SP_SMAC] = 255;
-
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_MIRROR_CONFIG] = 1;
-
- /** TX **/
- /* Identifiers */
- res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 292;
- res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 148;
- res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
- res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
- res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
-
- /* Table Types */
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 8192;
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
-
- /* ENCAP */
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 511;
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 223;
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 255;
-
- /* TCAMs */
- res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
- 292;
- res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
- 144;
- res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
- res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 928;
-
- /* EM */
- res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 15232;
-
- /* EEM */
- res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
-
- /* SP */
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 488;
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = 511;
-
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_MIRROR_CONFIG] = 1;
+ /* Get the named list and add the totals */
+ named = bnxt_ulp_app_glb_resource_info_list_get(&nnum);
+ if (named == NULL) {
+ BNXT_TF_DBG(ERR, "Unable to get app global resource list\n");
+ return -EINVAL;
+ }
+ rc = bnxt_ulp_named_resources_calc(ulp_ctx, named, nnum, res);
+ if (rc)
+ BNXT_TF_DBG(ERR, "Unable to calc named resources\n");
+ return rc;
+}
+
+int32_t
+bnxt_ulp_cntxt_app_caps_init(struct bnxt_ulp_context *ulp_ctx,
+ uint8_t app_id, uint32_t dev_id)
+{
+ struct bnxt_ulp_app_capabilities_info *info;
+ uint32_t num = 0;
+ uint16_t i;
+ bool found = false;
+
+ if (ULP_APP_DEV_UNSUPPORTED_ENABLED(ulp_ctx->cfg_data->ulp_flags)) {
+ BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
+ app_id, dev_id);
+ return -EINVAL;
+ }
+
+ info = bnxt_ulp_app_cap_list_get(&num);
+ if (!info || !num) {
+ BNXT_TF_DBG(ERR, "Failed to get app capabilities.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; i++) {
+ if (info[i].app_id != app_id || info[i].device_id != dev_id)
+ continue;
+ found = true;
+ if (info[i].flags & BNXT_ULP_APP_CAP_SHARED_EN)
+ ulp_ctx->cfg_data->ulp_flags |=
+ BNXT_ULP_SHARED_SESSION_ENABLED;
+ if (info[i].flags & BNXT_ULP_APP_CAP_HOT_UPGRADE_EN)
+ ulp_ctx->cfg_data->ulp_flags |=
+ BNXT_ULP_HIGH_AVAIL_ENABLED;
+ if (info[i].flags & BNXT_ULP_APP_CAP_UNICAST_ONLY)
+ ulp_ctx->cfg_data->ulp_flags |=
+ BNXT_ULP_APP_UNICAST_ONLY;
+ }
+ if (!found) {
+ BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
+ app_id, dev_id);
+ ulp_ctx->cfg_data->ulp_flags |= BNXT_ULP_APP_DEV_UNSUPPORTED;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+ulp_ctx_shared_session_close(struct bnxt *bp,
+ struct bnxt_ulp_session_state *session)
+{
+ struct tf *tfp;
+ int32_t rc;
+
+ if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
+ return;
+
+ tfp = bnxt_ulp_cntxt_shared_tfp_get(bp->ulp_ctx);
+ if (!tfp) {
+ /*
+ * Log it under debug since this is likely a case of the
+ * shared session not being created. For example, a failed
+ * initialization.
+ */
+ BNXT_TF_DBG(DEBUG, "Failed to get shared tfp on close.\n");
+ return;
+ }
+ rc = tf_close_session(tfp);
+ if (rc)
+ BNXT_TF_DBG(ERR, "Failed to close the shared session rc=%d.\n",
+ rc);
+ (void)bnxt_ulp_cntxt_shared_tfp_set(bp->ulp_ctx, NULL);
+
+ session->g_shared_tfp.session = NULL;
+}
+
+static int32_t
+ulp_ctx_shared_session_open(struct bnxt *bp,
+ struct bnxt_ulp_session_state *session)
+{
+ struct rte_eth_dev *ethdev = bp->eth_dev;
+ struct tf_session_resources *resources;
+ struct tf_open_session_parms parms;
+ size_t copy_nbytes;
+ uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
+ int32_t rc = 0;
+
+ /* only perform this if shared session is enabled. */
+ if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
+ return 0;
+
+ memset(&parms, 0, sizeof(parms));
+
+ rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
+ parms.ctrl_chan_name);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
+ ethdev->data->port_id, rc);
+ return rc;
+ }
+ resources = &parms.resources;
+
+ /*
+ * Need to account for size of ctrl_chan_name and 1 extra for Null
+ * terminator
+ */
+ copy_nbytes = sizeof(parms.ctrl_chan_name) -
+ strlen(parms.ctrl_chan_name) - 1;
+
+ /*
+ * Build the ctrl_chan_name with shared token.
+ * When HA is enabled, the WC TCAM needs extra management by the core,
+ * so add the wc_tcam string to the control channel.
+ */
+ if (bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx))
+ strncat(parms.ctrl_chan_name, "-tf_shared-wc_tcam",
+ copy_nbytes);
+ else
+ strncat(parms.ctrl_chan_name, "-tf_shared", copy_nbytes);
+
+ rc = bnxt_ulp_tf_shared_session_resources_get(bp->ulp_ctx, resources);
+ if (rc)
+ return rc;
+
+ rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
+ return rc;
+ }
+
+ switch (ulp_dev_id) {
+ case BNXT_ULP_DEVICE_ID_WH_PLUS:
+ parms.device_type = TF_DEVICE_TYPE_WH;
break;
case BNXT_ULP_DEVICE_ID_STINGRAY:
- /** RX **/
- /* Identifiers */
- res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 315;
- res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 6;
- res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
- res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
- res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
-
- /* Table Types */
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384;
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
-
- /* ENCAP */
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 511;
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 63;
-
- /* TCAMs */
- res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
- 315;
- res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
- 6;
- res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
- res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 112;
-
- /* EM */
- res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 13200;
-
- /* EEM */
- res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
-
- /* SP */
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_SP_SMAC] = 256;
-
- /** TX **/
- /* Identifiers */
- res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 292;
- res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 127;
- res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
- res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
- res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
-
- /* Table Types */
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384;
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
-
- /* ENCAP */
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 367;
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 223;
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 255;
-
- /* TCAMs */
- res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
- 292;
- res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
- 127;
- res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
- res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 928;
-
- /* EM */
- res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 15232;
-
- /* EEM */
- res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
-
- /* SP */
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 488;
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = 512;
+ parms.device_type = TF_DEVICE_TYPE_SR;
break;
case BNXT_ULP_DEVICE_ID_THOR:
- /** RX **/
- /* Identifiers */
- res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 26;
- res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 6;
- res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_WC_PROF] = 32;
- res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 32;
- res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_EM_PROF] = 32;
-
- /* Table Types */
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 1024;
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 512;
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_MIRROR_CONFIG] = 14;
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_EM_FKB] = 32;
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_WC_FKB] = 32;
-
- /* ENCAP */
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 64;
-
- /* TCAMs */
- tmp_cnt = &res->tcam_cnt[TF_DIR_RX].cnt[0];
- tmp_cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = 300;
- tmp_cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = 6;
- res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 128;
- res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 112;
-
- /* EM */
- res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 13200;
-
- /* SP */
- res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 64;
-
- /** TX **/
- /* Identifiers */
- res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 26;
- res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 26;
- res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_WC_PROF] = 32;
- res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 63;
- res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_EM_PROF] = 32;
-
- /* Table Types */
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 1024;
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 512;
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_MIRROR_CONFIG] = 14;
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_EM_FKB] = 32;
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_WC_FKB] = 32;
-
- /* ENCAP */
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 64;
-
- /* TCAMs */
- tmp_cnt = &res->tcam_cnt[TF_DIR_TX].cnt[0];
-
- tmp_cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = 200;
- tmp_cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = 110;
- res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 128;
- res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 128;
-
- /* EM */
- res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 15232;
-
- /* SP */
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 100;
-
- res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_MIRROR_CONFIG] = 1;
-
+ parms.device_type = TF_DEVICE_TYPE_THOR;
break;
default:
- return -EINVAL;
+ BNXT_TF_DBG(ERR, "Unable to determine dev for opening session.\n");
+ return rc;
}
- return 0;
+ parms.shadow_copy = true;
+ parms.bp = bp;
+
+ /*
+ * Open the session here, but the collect the resources during the
+ * mapper initialization.
+ */
+ rc = tf_open_session(&bp->tfp_shared, &parms);
+ if (rc)
+ return rc;
+
+ if (parms.shared_session_creator)
+ BNXT_TF_DBG(DEBUG, "Shared session creator.\n");
+ else
+ BNXT_TF_DBG(DEBUG, "Shared session attached.\n");
+
+ /* Save the shared session in global data */
+ if (!session->g_shared_tfp.session)
+ session->g_shared_tfp.session = bp->tfp_shared.session;
+
+ rc = bnxt_ulp_cntxt_shared_tfp_set(bp->ulp_ctx, &bp->tfp_shared);
+ if (rc)
+ BNXT_TF_DBG(ERR, "Failed to add shared tfp to ulp (%d)\n", rc);
+
+ return rc;
+}
+
+static int32_t
+ulp_ctx_shared_session_attach(struct bnxt *bp,
+ struct bnxt_ulp_session_state *session)
+{
+ int32_t rc = 0;
+
+ /* Simply return success if shared session not enabled */
+ if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) {
+ bp->tfp_shared.session = session->g_shared_tfp.session;
+ rc = ulp_ctx_shared_session_open(bp, session);
+ }
+
+ return rc;
+}
+
+static void
+ulp_ctx_shared_session_detach(struct bnxt *bp)
+{
+ if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) {
+ if (bp->tfp_shared.session) {
+ tf_close_session(&bp->tfp_shared);
+ bp->tfp_shared.session = NULL;
+ }
+ }
}
/*
int32_t rc = 0;
struct tf_open_session_parms params;
struct tf_session_resources *resources;
- uint32_t ulp_dev_id;
+ uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
memset(¶ms, 0, sizeof(params));
params.device_type = TF_DEVICE_TYPE_THOR;
break;
default:
- BNXT_TF_DBG(ERR, "Unable to determine device for "
- "opening session.\n");
+ BNXT_TF_DBG(ERR, "Unable to determine device for opening session.\n");
return rc;
}
resources = ¶ms.resources;
- rc = bnxt_ulp_tf_session_resources_get(bp, resources);
- if (rc) {
- BNXT_TF_DBG(ERR, "Unable to determine tf resources for "
- "session open.\n");
+ rc = bnxt_ulp_tf_resources_get(bp->ulp_ctx, resources);
+ if (rc)
return rc;
- }
params.bp = bp;
rc = tf_open_session(&bp->tfp, ¶ms);
if (!ulp_ctx || !ulp_ctx->cfg_data)
return -EINVAL;
- tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx);
+ tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx, BNXT_ULP_SHARED_SESSION_NO);
if (!tfp) {
BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
return -EINVAL;
/* close the tf session */
ulp_ctx_session_close(bp, session);
+ /* The shared session must be closed last. */
+ ulp_ctx_shared_session_close(bp, session);
+
/* Free the contents */
if (session->cfg_data) {
rte_free(session->cfg_data);
int32_t rc = 0;
enum bnxt_ulp_device_id devid;
+ /* Initialize the context entries list */
+ bnxt_ulp_cntxt_list_init();
+
+ /* Add the context to the context entries list */
+ rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to add the context list entry\n");
+ return -ENOMEM;
+ }
+
/* Allocate memory to hold ulp context data. */
ulp_data = rte_zmalloc("bnxt_ulp_data",
sizeof(struct bnxt_ulp_data), 0);
goto error_deinit;
}
+ rc = bnxt_ulp_cntxt_app_id_set(bp->ulp_ctx, bp->app_id);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to set app_id for ULP init.\n");
+ goto error_deinit;
+ }
+
+ rc = bnxt_ulp_cntxt_app_caps_init(bp->ulp_ctx, bp->app_id, devid);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to set caps for app(%x)/dev(%x)\n",
+ bp->app_id, devid);
+ goto error_deinit;
+ }
+
+ /*
+ * Shared session must be created before first regular session but after
+ * the ulp_ctx is valid.
+ */
+ rc = ulp_ctx_shared_session_open(bp, session);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to open shared session (%d)\n", rc);
+ goto error_deinit;
+ }
+
/* Open the ulp session. */
rc = ulp_ctx_session_open(bp, session);
if (rc)
ulp_dparms_init(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
{
struct bnxt_ulp_device_params *dparms;
- uint32_t dev_id;
+ uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST;
if (!bp->max_num_kflows) {
/* Defaults to Internal */
struct bnxt_ulp_session_state *session)
{
int32_t rc = 0;
+ uint32_t flags, dev_id = BNXT_ULP_DEVICE_ID_LAST;
+ uint8_t app_id;
/* Increment the ulp context data reference count usage. */
bp->ulp_ctx->cfg_data = session->cfg_data;
/* update the session details in bnxt tfp */
bp->tfp.session = session->g_tfp->session;
+ /* Add the context to the context entries list */
+ rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to add the context list entry\n");
+ return -EINVAL;
+ }
+
+ /*
+ * The supported flag will be set during the init. Use it now to
+ * know if we should go through the attach.
+ */
+ rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
+ return -EINVAL;
+ }
+
+ rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable do get the dev_id.\n");
+ return -EINVAL;
+ }
+
+ flags = bp->ulp_ctx->cfg_data->ulp_flags;
+ if (ULP_APP_DEV_UNSUPPORTED_ENABLED(flags)) {
+ BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
+ app_id, dev_id);
+ return -EINVAL;
+ }
+
/* Create a TF Client */
rc = ulp_ctx_session_open(bp, session);
if (rc) {
bnxt_ulp_deinit(struct bnxt *bp,
struct bnxt_ulp_session_state *session)
{
+ bool ha_enabled;
+
if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
return;
+ ha_enabled = bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx);
+ if (ha_enabled && session->session_opened) {
+ int32_t rc = ulp_ha_mgr_close(bp->ulp_ctx);
+ if (rc)
+ BNXT_TF_DBG(ERR, "Failed to close HA (%d)\n", rc);
+ }
+
/* clean up default flows */
bnxt_ulp_destroy_df_rules(bp, true);
/* free the flow db lock */
pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
+ if (ha_enabled)
+ ulp_ha_mgr_deinit(bp->ulp_ctx);
+
/* Delete the ulp context and tf session and free the ulp context */
ulp_ctx_deinit(bp, session);
BNXT_TF_DBG(DEBUG, "ulp ctx has been deinitialized\n");
BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
goto jump_to_error;
}
+
+ if (bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx)) {
+ rc = ulp_ha_mgr_init(bp->ulp_ctx);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to initialize HA %d\n", rc);
+ goto jump_to_error;
+ }
+ rc = ulp_ha_mgr_open(bp->ulp_ctx);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to Process HA Open %d\n", rc);
+ goto jump_to_error;
+ }
+ }
BNXT_TF_DBG(DEBUG, "ulp ctx has been initialized\n");
return rc;
{
struct bnxt_ulp_session_state *session;
bool initialized;
+ enum bnxt_ulp_device_id devid = BNXT_ULP_DEVICE_ID_LAST;
+ uint32_t ulp_flags;
int32_t rc = 0;
- if (!bp || !BNXT_TRUFLOW_EN(bp))
- return rc;
-
if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
BNXT_TF_DBG(ERR,
"Skip ulp init for port: %d, not a TVF or PF\n",
- bp->eth_dev->data->port_id);
+ bp->eth_dev->data->port_id);
+ return rc;
+ }
+
+ if (!BNXT_TRUFLOW_EN(bp)) {
+ BNXT_TF_DBG(DEBUG,
+ "Skip ulp init for port: %d, truflow is not enabled\n",
+ bp->eth_dev->data->port_id);
return rc;
}
BNXT_TF_DBG(ERR, "Failed to attach the ulp context\n");
goto jump_to_error;
}
+
+ /*
+ * Attach to the shared session, must be called after the
+ * ulp_ctx_attach in order to ensure that ulp data is available
+ * for attaching.
+ */
+ rc = ulp_ctx_shared_session_attach(bp, session);
+ if (rc) {
+ BNXT_TF_DBG(ERR,
+ "Failed attach to shared session (%d)", rc);
+ goto jump_to_error;
+ }
} else {
rc = bnxt_ulp_init(bp, session);
if (rc) {
goto jump_to_error;
}
- if (BNXT_ACCUM_STATS_EN(bp))
+ rc = bnxt_ulp_devid_get(bp, &devid);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to determine device for ULP port init.\n");
+ goto jump_to_error;
+ }
+
+ if (devid != BNXT_ULP_DEVICE_ID_THOR && BNXT_ACCUM_STATS_EN(bp))
bp->ulp_ctx->cfg_data->accum_stats = true;
- BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init\n",
- bp->eth_dev->data->port_id);
+ BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init, accum_stats:%d\n",
+ bp->eth_dev->data->port_id,
+ bp->ulp_ctx->cfg_data->accum_stats);
+
+ /* set the unicast mode */
+ if (bnxt_ulp_cntxt_ptr2_ulp_flags_get(bp->ulp_ctx, &ulp_flags)) {
+ BNXT_TF_DBG(ERR, "Error in getting ULP context flags\n");
+ goto jump_to_error;
+ }
+ if (ulp_flags & BNXT_ULP_APP_UNICAST_ONLY) {
+ if (bnxt_pmd_set_unicast_rxmask(bp->eth_dev)) {
+ BNXT_TF_DBG(ERR, "Error in setting unicast rxmode\n");
+ goto jump_to_error;
+ }
+ }
+
return rc;
jump_to_error:
struct rte_pci_device *pci_dev;
struct rte_pci_addr *pci_addr;
- if (!BNXT_TRUFLOW_EN(bp))
- return;
-
if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
BNXT_TF_DBG(ERR,
"Skip ULP deinit port:%d, not a TVF or PF\n",
return;
}
+ if (!BNXT_TRUFLOW_EN(bp)) {
+ BNXT_TF_DBG(DEBUG,
+ "Skip ULP deinit for port:%d, truflow is not enabled\n",
+ bp->eth_dev->data->port_id);
+ return;
+ }
+
if (!bp->ulp_ctx) {
BNXT_TF_DBG(DEBUG, "ulp ctx already de-allocated\n");
return;
BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port deinit\n",
bp->eth_dev->data->port_id);
+ /* Free the ulp context in the context entry list */
+ bnxt_ulp_cntxt_list_del(bp->ulp_ctx);
+
/* Get the session details */
pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
pci_addr = &pci_dev->addr;
/* close the session associated with this port */
ulp_ctx_detach(bp);
+
+ /* always detach/close shared after the session. */
+ ulp_ctx_shared_session_detach(bp);
} else {
/* Perform ulp ctx deinit */
bnxt_ulp_deinit(bp, session);
return ulp_ctx->cfg_data->mark_tbl;
}
+bool
+bnxt_ulp_cntxt_shared_session_enabled(struct bnxt_ulp_context *ulp_ctx)
+{
+ return ULP_SHARED_SESSION_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags);
+}
+
+int32_t
+bnxt_ulp_cntxt_app_id_set(struct bnxt_ulp_context *ulp_ctx, uint8_t app_id)
+{
+ if (!ulp_ctx)
+ return -EINVAL;
+ ulp_ctx->cfg_data->app_id = app_id;
+ return 0;
+}
+
+int32_t
+bnxt_ulp_cntxt_app_id_get(struct bnxt_ulp_context *ulp_ctx, uint8_t *app_id)
+{
+ /* Default APP id is zero */
+ if (!ulp_ctx || !app_id)
+ return -EINVAL;
+ *app_id = ulp_ctx->cfg_data->app_id;
+ return 0;
+}
+
/* Function to set the device id of the hardware. */
int32_t
bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx,
*dev_id = ulp_ctx->cfg_data->dev_id;
return 0;
}
-
+ *dev_id = BNXT_ULP_DEVICE_ID_LAST;
BNXT_TF_DBG(ERR, "Failed to read dev_id from ulp ctxt\n");
return -EINVAL;
}
*mem_type = ulp_ctx->cfg_data->mem_type;
return 0;
}
+ *mem_type = BNXT_ULP_FLOW_MEM_TYPE_LAST;
BNXT_TF_DBG(ERR, "Failed to read mem_type in ulp ctxt\n");
return -EINVAL;
}
return -EINVAL;
}
+/* Function to set the shared tfp session details from the ulp context. */
+int32_t
+bnxt_ulp_cntxt_shared_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
+{
+ if (!ulp) {
+ BNXT_TF_DBG(ERR, "Invalid arguments\n");
+ return -EINVAL;
+ }
+
+ if (tfp == NULL) {
+ if (ulp->cfg_data->num_shared_clients > 0)
+ ulp->cfg_data->num_shared_clients--;
+ } else {
+ ulp->cfg_data->num_shared_clients++;
+ }
+
+ ulp->g_shared_tfp = tfp;
+ return 0;
+}
+
+/* Function to get the shared tfp session details from the ulp context. */
+struct tf *
+bnxt_ulp_cntxt_shared_tfp_get(struct bnxt_ulp_context *ulp)
+{
+ if (!ulp) {
+ BNXT_TF_DBG(ERR, "Invalid arguments\n");
+ return NULL;
+ }
+ return ulp->g_shared_tfp;
+}
+
+/* Function to get the number of shared clients attached */
+uint8_t
+bnxt_ulp_cntxt_num_shared_clients_get(struct bnxt_ulp_context *ulp)
+{
+ if (ulp == NULL || ulp->cfg_data == NULL) {
+ BNXT_TF_DBG(ERR, "Invalid arguments\n");
+ return 0;
+ }
+ return ulp->cfg_data->num_shared_clients;
+}
+
/* Function to set the tfp session details from the ulp context. */
int32_t
bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
/* Function to get the tfp session details from the ulp context. */
struct tf *
-bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp)
+bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp,
+ enum bnxt_ulp_shared_session shared)
{
if (!ulp) {
BNXT_TF_DBG(ERR, "Invalid arguments\n");
return NULL;
}
- return ulp->g_tfp;
+ if (shared)
+ return ulp->g_shared_tfp;
+ else
+ return ulp->g_tfp;
}
/*
pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);
}
+
+/* Function to set the ha info into the context */
+int32_t
+bnxt_ulp_cntxt_ptr2_ha_info_set(struct bnxt_ulp_context *ulp_ctx,
+ struct bnxt_ulp_ha_mgr_info *ulp_ha_info)
+{
+ if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL) {
+ BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
+ return -EINVAL;
+ }
+ ulp_ctx->cfg_data->ha_info = ulp_ha_info;
+ return 0;
+}
+
+/* Function to retrieve the ha info from the context. */
+struct bnxt_ulp_ha_mgr_info *
+bnxt_ulp_cntxt_ptr2_ha_info_get(struct bnxt_ulp_context *ulp_ctx)
+{
+ if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL)
+ return NULL;
+ return ulp_ctx->cfg_data->ha_info;
+}
+
+bool
+bnxt_ulp_cntxt_ha_enabled(struct bnxt_ulp_context *ulp_ctx)
+{
+ if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL)
+ return false;
+ return !!ULP_HIGH_AVAIL_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags);
+}
+
+static int32_t
+bnxt_ulp_cntxt_list_init(void)
+{
+ /* Create the cntxt spin lock */
+ rte_spinlock_init(&bnxt_ulp_ctxt_lock);
+
+ return 0;
+}
+
+static int32_t
+bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx)
+{
+ struct ulp_context_list_entry *entry;
+
+ entry = rte_zmalloc(NULL, sizeof(struct ulp_context_list_entry), 0);
+ if (entry == NULL) {
+ BNXT_TF_DBG(ERR, "unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
+ entry->ulp_ctx = ulp_ctx;
+ TAILQ_INSERT_TAIL(&ulp_cntx_list, entry, next);
+ rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
+ return 0;
+}
+
+static void
+bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx)
+{
+ struct ulp_context_list_entry *entry, *temp;
+
+ rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
+ TAILQ_FOREACH_SAFE(entry, &ulp_cntx_list, next, temp) {
+ if (entry->ulp_ctx == ulp_ctx) {
+ TAILQ_REMOVE(&ulp_cntx_list, entry, next);
+ rte_free(entry);
+ break;
+ }
+ }
+ rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
+}
+
+struct bnxt_ulp_context *
+bnxt_ulp_cntxt_entry_acquire(void)
+{
+ struct ulp_context_list_entry *entry;
+
+ /* take a lock and get the first ulp context available */
+ if (rte_spinlock_trylock(&bnxt_ulp_ctxt_lock)) {
+ TAILQ_FOREACH(entry, &ulp_cntx_list, next)
+ if (entry->ulp_ctx)
+ return entry->ulp_ctx;
+ }
+ return NULL;
+}
+
+void
+bnxt_ulp_cntxt_entry_release(void)
+{
+ rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
+}