#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_tailq.h>
+#include <rte_spinlock.h>
#include "bnxt.h"
#include "bnxt_ulp.h"
#include "ulp_port_db.h"
#include "ulp_tun.h"
#include "ulp_ha_mgr.h"
+#include "bnxt_tf_pmd_shim.h"
/* Linked list of all TF sessions. */
STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
/* Mutex to synchronize bnxt_ulp_session_list operations. */
static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
+/* Spin lock to protect context global list */
+uint32_t bnxt_ulp_ctxt_lock_created;
+rte_spinlock_t bnxt_ulp_ctxt_lock;
+TAILQ_HEAD(cntx_list_entry_list, ulp_context_list_entry);
+static struct cntx_list_entry_list ulp_cntx_list =
+ TAILQ_HEAD_INITIALIZER(ulp_cntx_list);
+
+/* Static function declarations */
+static int32_t bnxt_ulp_cntxt_list_init(void);
+static int32_t bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx);
+static void bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx);
+
/*
* Allow the deletion of context only for the bnxt device that
* created the session.
enum bnxt_ulp_device_id *ulp_dev_id)
{
if (BNXT_CHIP_P5(bp)) {
- /* TBD: needs to accommodate even SR2 */
*ulp_dev_id = BNXT_ULP_DEVICE_ID_THOR;
return 0;
}
uint32_t num,
struct tf_session_resources *res)
{
- uint32_t dev_id, res_type, i;
+ uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST, res_type, i;
enum tf_dir dir;
uint8_t app_id;
int32_t rc = 0;
}
int32_t
-bnxt_ulp_cntxt_app_caps_init(struct bnxt_ulp_context *ulp_ctx,
+bnxt_ulp_cntxt_app_caps_init(struct bnxt *bp,
uint8_t app_id, uint32_t dev_id)
{
struct bnxt_ulp_app_capabilities_info *info;
uint32_t num = 0;
uint16_t i;
bool found = false;
+ struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx;
if (ULP_APP_DEV_UNSUPPORTED_ENABLED(ulp_ctx->cfg_data->ulp_flags)) {
BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
if (info[i].flags & BNXT_ULP_APP_CAP_HOT_UPGRADE_EN)
ulp_ctx->cfg_data->ulp_flags |=
BNXT_ULP_HIGH_AVAIL_ENABLED;
+ if (info[i].flags & BNXT_ULP_APP_CAP_UNICAST_ONLY)
+ ulp_ctx->cfg_data->ulp_flags |=
+ BNXT_ULP_APP_UNICAST_ONLY;
+ if (info[i].flags & BNXT_ULP_APP_CAP_SOCKET_DIRECT) {
+ /* Enable socket direction only if MR is enabled in fw*/
+ if (BNXT_MULTIROOT_EN(bp)) {
+ ulp_ctx->cfg_data->ulp_flags |=
+ BNXT_ULP_APP_SOCKET_DIRECT;
+ BNXT_TF_DBG(DEBUG,
+ "Socket Direct feature is enabled");
+ }
+ }
}
if (!found) {
BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
struct rte_eth_dev *ethdev = bp->eth_dev;
struct tf_session_resources *resources;
struct tf_open_session_parms parms;
- size_t copy_num_bytes;
- uint32_t ulp_dev_id;
+ size_t copy_nbytes;
+ uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
int32_t rc = 0;
+ uint8_t app_id;
/* only perform this if shared session is enabled. */
if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
* Need to account for size of ctrl_chan_name and 1 extra for Null
* terminator
*/
- copy_num_bytes = sizeof(parms.ctrl_chan_name) -
+ copy_nbytes = sizeof(parms.ctrl_chan_name) -
strlen(parms.ctrl_chan_name) - 1;
- /* Build the ctrl_chan_name with shared token */
- strncat(parms.ctrl_chan_name, "-tf_shared", copy_num_bytes);
+ /*
+ * Build the ctrl_chan_name with shared token.
+ * When HA is enabled, the WC TCAM needs extra management by the core,
+ * so add the wc_tcam string to the control channel.
+ */
+ if (bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx))
+ strncat(parms.ctrl_chan_name, "-tf_shared-wc_tcam",
+ copy_nbytes);
+ else
+ strncat(parms.ctrl_chan_name, "-tf_shared", copy_nbytes);
rc = bnxt_ulp_tf_shared_session_resources_get(bp->ulp_ctx, resources);
if (rc)
return rc;
+ rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
+ return -EINVAL;
+ }
+
rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
if (rc) {
BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
parms.shadow_copy = true;
parms.bp = bp;
+ if (app_id == 0)
+ parms.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;
+ else
+ parms.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;
/*
* Open the session here, but the collect the resources during the
int32_t rc = 0;
struct tf_open_session_parms params;
struct tf_session_resources *resources;
- uint32_t ulp_dev_id;
+ uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
+ uint8_t app_id;
memset(¶ms, 0, sizeof(params));
params.shadow_copy = true;
+ rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
+ return -EINVAL;
+ }
+
rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
if (rc) {
BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
return rc;
params.bp = bp;
+ if (app_id == 0)
+ params.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;
+ else
+ params.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;
+
rc = tf_open_session(&bp->tfp, ¶ms);
if (rc) {
BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
int32_t rc = 0;
enum bnxt_ulp_device_id devid;
+ /* Initialize the context entries list */
+ bnxt_ulp_cntxt_list_init();
+
+ /* Add the context to the context entries list */
+ rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to add the context list entry\n");
+ return -ENOMEM;
+ }
+
/* Allocate memory to hold ulp context data. */
ulp_data = rte_zmalloc("bnxt_ulp_data",
sizeof(struct bnxt_ulp_data), 0);
BNXT_TF_DBG(ERR, "Unable to set app_id for ULP init.\n");
goto error_deinit;
}
+ BNXT_TF_DBG(DEBUG, "Ulp initialized with app id %d\n", bp->app_id);
- rc = bnxt_ulp_cntxt_app_caps_init(bp->ulp_ctx, bp->app_id, devid);
+ rc = bnxt_ulp_cntxt_app_caps_init(bp, bp->app_id, devid);
if (rc) {
BNXT_TF_DBG(ERR, "Unable to set caps for app(%x)/dev(%x)\n",
bp->app_id, devid);
if (rc)
goto error_deinit;
- ulp_tun_tbl_init(ulp_data->tun_tbl);
-
bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
return rc;
ulp_dparms_init(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
{
struct bnxt_ulp_device_params *dparms;
- uint32_t dev_id;
+ uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST;
if (!bp->max_num_kflows) {
/* Defaults to Internal */
dparms->ext_flow_db_num_entries = bp->max_num_kflows * 1024;
/* GFID = 2 * num_flows */
dparms->mark_db_gfid_entries = dparms->ext_flow_db_num_entries * 2;
- BNXT_TF_DBG(DEBUG, "Set the number of flows = %"PRIu64"\n",
+ BNXT_TF_DBG(DEBUG, "Set the number of flows = %" PRIu64 "\n",
dparms->ext_flow_db_num_entries);
return 0;
struct bnxt_ulp_session_state *session)
{
int32_t rc = 0;
- uint32_t flags, dev_id;
+ uint32_t flags, dev_id = BNXT_ULP_DEVICE_ID_LAST;
uint8_t app_id;
/* Increment the ulp context data reference count usage. */
/* update the session details in bnxt tfp */
bp->tfp.session = session->g_tfp->session;
+ /* Add the context to the context entries list */
+ rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to add the context list entry\n");
+ return -EINVAL;
+ }
+
/*
* The supported flag will be set during the init. Use it now to
* know if we should go through the attach.
* pointer, otherwise allocate a new session.
*/
static struct bnxt_ulp_session_state *
-ulp_get_session(struct rte_pci_addr *pci_addr)
+ulp_get_session(struct bnxt *bp, struct rte_pci_addr *pci_addr)
{
struct bnxt_ulp_session_state *session;
+ /* if multi root capability is enabled, then ignore the pci bus id */
STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) {
if (session->pci_info.domain == pci_addr->domain &&
- session->pci_info.bus == pci_addr->bus) {
+ (BNXT_MULTIROOT_EN(bp) ||
+ session->pci_info.bus == pci_addr->bus)) {
return session;
}
}
pthread_mutex_lock(&bnxt_ulp_global_mutex);
- session = ulp_get_session(pci_addr);
+ session = ulp_get_session(bp, pci_addr);
if (!session) {
/* Not Found the session Allocate a new one */
session = rte_zmalloc("bnxt_ulp_session",
{
struct bnxt_ulp_session_state *session;
bool initialized;
+ enum bnxt_ulp_device_id devid = BNXT_ULP_DEVICE_ID_LAST;
+ uint32_t ulp_flags;
int32_t rc = 0;
- if (!bp || !BNXT_TRUFLOW_EN(bp))
+ if (!BNXT_TRUFLOW_EN(bp)) {
+ BNXT_TF_DBG(DEBUG,
+ "Skip ulp init for port: %d, TF is not enabled\n",
+ bp->eth_dev->data->port_id);
return rc;
+ }
if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
- BNXT_TF_DBG(ERR,
+ BNXT_TF_DBG(DEBUG,
"Skip ulp init for port: %d, not a TVF or PF\n",
- bp->eth_dev->data->port_id);
+ bp->eth_dev->data->port_id);
return rc;
}
goto jump_to_error;
}
- if (BNXT_ACCUM_STATS_EN(bp))
- bp->ulp_ctx->cfg_data->accum_stats = true;
+ rc = bnxt_ulp_devid_get(bp, &devid);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Unable to determine device for ULP port init.\n");
+ goto jump_to_error;
+ }
+
+ /* set the unicast mode */
+ if (bnxt_ulp_cntxt_ptr2_ulp_flags_get(bp->ulp_ctx, &ulp_flags)) {
+ BNXT_TF_DBG(ERR, "Error in getting ULP context flags\n");
+ goto jump_to_error;
+ }
+ if (ulp_flags & BNXT_ULP_APP_UNICAST_ONLY) {
+ if (bnxt_pmd_set_unicast_rxmask(bp->eth_dev)) {
+ BNXT_TF_DBG(ERR, "Error in setting unicast rxmode\n");
+ goto jump_to_error;
+ }
+ }
- BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init\n",
- bp->eth_dev->data->port_id);
return rc;
jump_to_error:
struct rte_pci_device *pci_dev;
struct rte_pci_addr *pci_addr;
- if (!BNXT_TRUFLOW_EN(bp))
+ if (!BNXT_TRUFLOW_EN(bp)) {
+ BNXT_TF_DBG(DEBUG,
+ "Skip ULP deinit for port:%d, TF is not enabled\n",
+ bp->eth_dev->data->port_id);
return;
+ }
if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
- BNXT_TF_DBG(ERR,
+ BNXT_TF_DBG(DEBUG,
"Skip ULP deinit port:%d, not a TVF or PF\n",
bp->eth_dev->data->port_id);
return;
pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
pci_addr = &pci_dev->addr;
pthread_mutex_lock(&bnxt_ulp_global_mutex);
- session = ulp_get_session(pci_addr);
+ session = ulp_get_session(bp, pci_addr);
pthread_mutex_unlock(&bnxt_ulp_global_mutex);
/* session not found then just exit */
}
}
+ /* Free the ulp context in the context entry list */
+ bnxt_ulp_cntxt_list_del(bp->ulp_ctx);
+
/* clean up the session */
ulp_session_deinit(session);
*dev_id = ulp_ctx->cfg_data->dev_id;
return 0;
}
-
+ *dev_id = BNXT_ULP_DEVICE_ID_LAST;
BNXT_TF_DBG(ERR, "Failed to read dev_id from ulp ctxt\n");
return -EINVAL;
}
*mem_type = ulp_ctx->cfg_data->mem_type;
return 0;
}
+ *mem_type = BNXT_ULP_FLOW_MEM_TYPE_LAST;
BNXT_TF_DBG(ERR, "Failed to read mem_type in ulp ctxt\n");
return -EINVAL;
}
return -EINVAL;
}
+ if (tfp == NULL) {
+ if (ulp->cfg_data->num_shared_clients > 0)
+ ulp->cfg_data->num_shared_clients--;
+ } else {
+ ulp->cfg_data->num_shared_clients++;
+ }
+
ulp->g_shared_tfp = tfp;
return 0;
}
return ulp->g_shared_tfp;
}
+/* Function to get the number of shared clients attached */
+uint8_t
+bnxt_ulp_cntxt_num_shared_clients_get(struct bnxt_ulp_context *ulp)
+{
+ if (ulp == NULL || ulp->cfg_data == NULL) {
+ BNXT_TF_DBG(ERR, "Invalid arguments\n");
+ return 0;
+ }
+ return ulp->cfg_data->num_shared_clients;
+}
+
/* Function to set the tfp session details from the ulp context. */
int32_t
bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
return false;
return !!ULP_HIGH_AVAIL_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags);
}
+
+static int32_t
+bnxt_ulp_cntxt_list_init(void)
+{
+ /* Create the cntxt spin lock only once*/
+ if (!bnxt_ulp_ctxt_lock_created)
+ rte_spinlock_init(&bnxt_ulp_ctxt_lock);
+ bnxt_ulp_ctxt_lock_created = 1;
+ return 0;
+}
+
+static int32_t
+bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx)
+{
+ struct ulp_context_list_entry *entry;
+
+ entry = rte_zmalloc(NULL, sizeof(struct ulp_context_list_entry), 0);
+ if (entry == NULL) {
+ BNXT_TF_DBG(ERR, "unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
+ entry->ulp_ctx = ulp_ctx;
+ TAILQ_INSERT_TAIL(&ulp_cntx_list, entry, next);
+ rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
+ return 0;
+}
+
+static void
+bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx)
+{
+ struct ulp_context_list_entry *entry, *temp;
+
+ rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
+ RTE_TAILQ_FOREACH_SAFE(entry, &ulp_cntx_list, next, temp) {
+ if (entry->ulp_ctx == ulp_ctx) {
+ TAILQ_REMOVE(&ulp_cntx_list, entry, next);
+ rte_free(entry);
+ break;
+ }
+ }
+ rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
+}
+
+struct bnxt_ulp_context *
+bnxt_ulp_cntxt_entry_acquire(void *arg)
+{
+ struct ulp_context_list_entry *entry;
+
+ /* take a lock and get the first ulp context available */
+ if (rte_spinlock_trylock(&bnxt_ulp_ctxt_lock)) {
+ TAILQ_FOREACH(entry, &ulp_cntx_list, next)
+ if (entry->ulp_ctx->cfg_data == arg)
+ return entry->ulp_ctx;
+ rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
+ }
+ return NULL;
+}
+
+void
+bnxt_ulp_cntxt_entry_release(void)
+{
+ rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
+}
+
+/* Function to get the app tunnel details from the ulp context. */
+struct bnxt_flow_app_tun_ent *
+bnxt_ulp_cntxt_ptr2_app_tun_list_get(struct bnxt_ulp_context *ulp)
+{
+ if (!ulp || !ulp->cfg_data)
+ return NULL;
+
+ return ulp->cfg_data->app_tun;
+}