1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
7 #include <rte_malloc.h>
9 #include <rte_flow_driver.h>
10 #include <rte_tailq.h>
14 #include "bnxt_tf_common.h"
16 #include "tf_ext_flow_handle.h"
18 #include "ulp_template_db_enum.h"
19 #include "ulp_template_struct.h"
20 #include "ulp_mark_mgr.h"
21 #include "ulp_fc_mgr.h"
22 #include "ulp_flow_db.h"
23 #include "ulp_mapper.h"
24 #include "ulp_port_db.h"
27 /* Linked list of all TF sessions. */
28 STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
29 STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
31 /* Mutex to synchronize bnxt_ulp_session_list operations. */
32 static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
35 * Allow the deletion of context only for the bnxt device that
36 * created the session.
39 ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx)
41 if (!ulp_ctx || !ulp_ctx->cfg_data)
44 if (!ulp_ctx->cfg_data->ref_cnt) {
45 BNXT_TF_DBG(DEBUG, "ulp ctx shall initiate deinit\n");
53 bnxt_ulp_devid_get(struct bnxt *bp,
54 enum bnxt_ulp_device_id *ulp_dev_id)
56 if (BNXT_CHIP_P5(bp)) {
57 /* TBD: needs to accommodate even SR2 */
58 *ulp_dev_id = BNXT_ULP_DEVICE_ID_THOR;
62 if (BNXT_STINGRAY(bp))
63 *ulp_dev_id = BNXT_ULP_DEVICE_ID_STINGRAY;
65 /* Assuming Whitney */
66 *ulp_dev_id = BNXT_ULP_DEVICE_ID_WH_PLUS;
71 struct bnxt_ulp_app_capabilities_info *
72 bnxt_ulp_app_cap_list_get(uint32_t *num_entries)
76 *num_entries = BNXT_ULP_APP_CAP_TBL_MAX_SZ;
77 return ulp_app_cap_info_list;
80 static struct bnxt_ulp_resource_resv_info *
81 bnxt_ulp_app_resource_resv_list_get(uint32_t *num_entries)
83 if (num_entries == NULL)
85 *num_entries = BNXT_ULP_APP_RESOURCE_RESV_LIST_MAX_SZ;
86 return ulp_app_resource_resv_list;
89 struct bnxt_ulp_resource_resv_info *
90 bnxt_ulp_resource_resv_list_get(uint32_t *num_entries)
94 *num_entries = BNXT_ULP_RESOURCE_RESV_LIST_MAX_SZ;
95 return ulp_resource_resv_list;
98 struct bnxt_ulp_glb_resource_info *
99 bnxt_ulp_app_glb_resource_info_list_get(uint32_t *num_entries)
103 *num_entries = BNXT_ULP_APP_GLB_RESOURCE_TBL_MAX_SZ;
104 return ulp_app_glb_resource_tbl;
108 bnxt_ulp_named_resources_calc(struct bnxt_ulp_context *ulp_ctx,
109 struct bnxt_ulp_glb_resource_info *info,
111 struct tf_session_resources *res)
113 uint32_t dev_id, res_type, i;
118 if (ulp_ctx == NULL || info == NULL || res == NULL || num == 0) {
119 BNXT_TF_DBG(ERR, "Invalid parms to named resources calc.\n");
123 rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
125 BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
129 rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
131 BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
135 for (i = 0; i < num; i++) {
136 if (dev_id != info[i].device_id || app_id != info[i].app_id)
138 dir = info[i].direction;
139 res_type = info[i].resource_type;
141 switch (info[i].resource_func) {
142 case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER:
143 res->ident_cnt[dir].cnt[res_type]++;
145 case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
146 res->tbl_cnt[dir].cnt[res_type]++;
148 case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
149 res->tcam_cnt[dir].cnt[res_type]++;
151 case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
152 res->em_cnt[dir].cnt[res_type]++;
155 BNXT_TF_DBG(ERR, "Unknown resource func (0x%x)\n,",
156 info[i].resource_func);
165 bnxt_ulp_unnamed_resources_calc(struct bnxt_ulp_context *ulp_ctx,
166 struct bnxt_ulp_resource_resv_info *info,
168 struct tf_session_resources *res)
170 uint32_t dev_id, res_type, i;
175 if (ulp_ctx == NULL || res == NULL || info == NULL || num == 0) {
176 BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
180 rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
182 BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
186 rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
188 BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
192 for (i = 0; i < num; i++) {
193 if (app_id != info[i].app_id || dev_id != info[i].device_id)
195 dir = info[i].direction;
196 res_type = info[i].resource_type;
198 switch (info[i].resource_func) {
199 case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER:
200 res->ident_cnt[dir].cnt[res_type] = info[i].count;
202 case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
203 res->tbl_cnt[dir].cnt[res_type] = info[i].count;
205 case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
206 res->tcam_cnt[dir].cnt[res_type] = info[i].count;
208 case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
209 res->em_cnt[dir].cnt[res_type] = info[i].count;
219 bnxt_ulp_tf_resources_get(struct bnxt_ulp_context *ulp_ctx,
220 struct tf_session_resources *res)
222 struct bnxt_ulp_resource_resv_info *unnamed = NULL;
226 if (ulp_ctx == NULL || res == NULL) {
227 BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
231 unnamed = bnxt_ulp_resource_resv_list_get(&unum);
232 if (unnamed == NULL) {
233 BNXT_TF_DBG(ERR, "Unable to get resource resv list.\n");
237 rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, res);
239 BNXT_TF_DBG(ERR, "Unable to calc resources for session.\n");
245 bnxt_ulp_tf_shared_session_resources_get(struct bnxt_ulp_context *ulp_ctx,
246 struct tf_session_resources *res)
248 struct bnxt_ulp_resource_resv_info *unnamed;
249 struct bnxt_ulp_glb_resource_info *named;
253 if (ulp_ctx == NULL || res == NULL) {
254 BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
258 /* Make sure the resources are zero before accumulating. */
259 memset(res, 0, sizeof(struct tf_session_resources));
262 * Shared resources are comprised of both named and unnamed resources.
263 * First get the unnamed counts, and then add the named to the result.
265 /* Get the baseline counts */
266 unnamed = bnxt_ulp_app_resource_resv_list_get(&unum);
267 if (unnamed == NULL) {
268 BNXT_TF_DBG(ERR, "Unable to get shared resource resv list.\n");
271 rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, res);
273 BNXT_TF_DBG(ERR, "Unable to calc resources for shared session.\n");
277 /* Get the named list and add the totals */
278 named = bnxt_ulp_app_glb_resource_info_list_get(&nnum);
280 BNXT_TF_DBG(ERR, "Unable to get app global resource list\n");
283 rc = bnxt_ulp_named_resources_calc(ulp_ctx, named, nnum, res);
285 BNXT_TF_DBG(ERR, "Unable to calc named resources\n");
291 bnxt_ulp_cntxt_app_caps_init(struct bnxt_ulp_context *ulp_ctx,
292 uint8_t app_id, uint32_t dev_id)
294 struct bnxt_ulp_app_capabilities_info *info;
299 if (ULP_APP_DEV_UNSUPPORTED_ENABLED(ulp_ctx->cfg_data->ulp_flags)) {
300 BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
305 info = bnxt_ulp_app_cap_list_get(&num);
307 BNXT_TF_DBG(ERR, "Failed to get app capabilities.\n");
311 for (i = 0; i < num; i++) {
312 if (info[i].app_id != app_id || info[i].device_id != dev_id)
315 if (info[i].flags & BNXT_ULP_APP_CAP_SHARED_EN)
316 ulp_ctx->cfg_data->ulp_flags |=
317 BNXT_ULP_SHARED_SESSION_ENABLED;
320 BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
322 ulp_ctx->cfg_data->ulp_flags |= BNXT_ULP_APP_DEV_UNSUPPORTED;
330 ulp_ctx_shared_session_close(struct bnxt *bp,
331 struct bnxt_ulp_session_state *session)
336 if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
339 tfp = bnxt_ulp_cntxt_shared_tfp_get(bp->ulp_ctx);
342 * Log it under debug since this is likely a case of the
343 * shared session not being created. For example, a failed
346 BNXT_TF_DBG(DEBUG, "Failed to get shared tfp on close.\n");
349 rc = tf_close_session(tfp);
351 BNXT_TF_DBG(ERR, "Failed to close the shared session rc=%d.\n",
353 (void)bnxt_ulp_cntxt_shared_tfp_set(bp->ulp_ctx, NULL);
355 session->g_shared_tfp.session = NULL;
359 ulp_ctx_shared_session_open(struct bnxt *bp,
360 struct bnxt_ulp_session_state *session)
362 struct rte_eth_dev *ethdev = bp->eth_dev;
363 struct tf_session_resources *resources;
364 struct tf_open_session_parms parms;
365 size_t copy_num_bytes;
369 /* only perform this if shared session is enabled. */
370 if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
373 memset(&parms, 0, sizeof(parms));
375 rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
376 parms.ctrl_chan_name);
378 BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
379 ethdev->data->port_id, rc);
382 resources = &parms.resources;
385 * Need to account for size of ctrl_chan_name and 1 extra for Null
388 copy_num_bytes = sizeof(parms.ctrl_chan_name) -
389 strlen(parms.ctrl_chan_name) - 1;
391 /* Build the ctrl_chan_name with shared token */
392 strncat(parms.ctrl_chan_name, "-tf_shared", copy_num_bytes);
394 rc = bnxt_ulp_tf_shared_session_resources_get(bp->ulp_ctx, resources);
398 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
400 BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
404 switch (ulp_dev_id) {
405 case BNXT_ULP_DEVICE_ID_WH_PLUS:
406 parms.device_type = TF_DEVICE_TYPE_WH;
408 case BNXT_ULP_DEVICE_ID_STINGRAY:
409 parms.device_type = TF_DEVICE_TYPE_SR;
411 case BNXT_ULP_DEVICE_ID_THOR:
412 parms.device_type = TF_DEVICE_TYPE_THOR;
415 BNXT_TF_DBG(ERR, "Unable to determine dev for opening session.\n");
419 parms.shadow_copy = true;
423 * Open the session here, but the collect the resources during the
424 * mapper initialization.
426 rc = tf_open_session(&bp->tfp_shared, &parms);
430 if (parms.shared_session_creator)
431 BNXT_TF_DBG(DEBUG, "Shared session creator.\n");
433 BNXT_TF_DBG(DEBUG, "Shared session attached.\n");
435 /* Save the shared session in global data */
436 if (!session->g_shared_tfp.session)
437 session->g_shared_tfp.session = bp->tfp_shared.session;
439 rc = bnxt_ulp_cntxt_shared_tfp_set(bp->ulp_ctx, &bp->tfp_shared);
441 BNXT_TF_DBG(ERR, "Failed to add shared tfp to ulp (%d)\n", rc);
447 ulp_ctx_shared_session_attach(struct bnxt *bp,
448 struct bnxt_ulp_session_state *session)
452 /* Simply return success if shared session not enabled */
453 if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) {
454 bp->tfp_shared.session = session->g_shared_tfp.session;
455 rc = ulp_ctx_shared_session_open(bp, session);
462 ulp_ctx_shared_session_detach(struct bnxt *bp)
464 if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) {
465 if (bp->tfp_shared.session) {
466 tf_close_session(&bp->tfp_shared);
467 bp->tfp_shared.session = NULL;
473 * Initialize an ULP session.
474 * An ULP session will contain all the resources needed to support rte flow
475 * offloads. A session is initialized as part of rte_eth_device start.
476 * A single vswitch instance can have multiple uplinks which means
477 * rte_eth_device start will be called for each of these devices.
478 * ULP session manager will make sure that a single ULP session is only
479 * initialized once. Apart from this, it also initializes MARK database,
480 * EEM table & flow database. ULP session manager also manages a list of
481 * all opened ULP sessions.
484 ulp_ctx_session_open(struct bnxt *bp,
485 struct bnxt_ulp_session_state *session)
487 struct rte_eth_dev *ethdev = bp->eth_dev;
489 struct tf_open_session_parms params;
490 struct tf_session_resources *resources;
493 memset(¶ms, 0, sizeof(params));
495 rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
496 params.ctrl_chan_name);
498 BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
499 ethdev->data->port_id, rc);
503 params.shadow_copy = true;
505 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
507 BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
511 switch (ulp_dev_id) {
512 case BNXT_ULP_DEVICE_ID_WH_PLUS:
513 params.device_type = TF_DEVICE_TYPE_WH;
515 case BNXT_ULP_DEVICE_ID_STINGRAY:
516 params.device_type = TF_DEVICE_TYPE_SR;
518 case BNXT_ULP_DEVICE_ID_THOR:
519 params.device_type = TF_DEVICE_TYPE_THOR;
522 BNXT_TF_DBG(ERR, "Unable to determine device for opening session.\n");
526 resources = ¶ms.resources;
527 rc = bnxt_ulp_tf_resources_get(bp->ulp_ctx, resources);
532 rc = tf_open_session(&bp->tfp, ¶ms);
534 BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
535 params.ctrl_chan_name, rc);
538 if (!session->session_opened) {
539 session->session_opened = 1;
540 session->g_tfp = rte_zmalloc("bnxt_ulp_session_tfp",
541 sizeof(struct tf), 0);
542 session->g_tfp->session = bp->tfp.session;
548 * Close the ULP session.
549 * It takes the ulp context pointer.
552 ulp_ctx_session_close(struct bnxt *bp,
553 struct bnxt_ulp_session_state *session)
555 /* close the session in the hardware */
556 if (session->session_opened)
557 tf_close_session(&bp->tfp);
558 session->session_opened = 0;
559 rte_free(session->g_tfp);
560 session->g_tfp = NULL;
564 bnxt_init_tbl_scope_parms(struct bnxt *bp,
565 struct tf_alloc_tbl_scope_parms *params)
567 struct bnxt_ulp_device_params *dparms;
571 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
573 /* TBD: For now, just use default. */
576 dparms = bnxt_ulp_device_params_get(dev_id);
579 * Set the flush timer for EEM entries. The value is in 100ms intervals,
582 params->hw_flow_cache_flush_timer = 100;
585 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
586 params->rx_max_action_entry_sz_in_bits =
587 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
588 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
589 params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS;
591 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
592 params->tx_max_action_entry_sz_in_bits =
593 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
594 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
595 params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS;
597 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
598 params->rx_max_action_entry_sz_in_bits =
599 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
600 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
601 params->rx_num_flows_in_k =
602 dparms->ext_flow_db_num_entries / 1024;
604 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
605 params->tx_max_action_entry_sz_in_bits =
606 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
607 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
608 params->tx_num_flows_in_k =
609 dparms->ext_flow_db_num_entries / 1024;
611 BNXT_TF_DBG(INFO, "Table Scope initialized with %uK flows.\n",
612 params->rx_num_flows_in_k);
615 /* Initialize Extended Exact Match host memory. */
617 ulp_eem_tbl_scope_init(struct bnxt *bp)
619 struct tf_alloc_tbl_scope_parms params = {0};
620 struct bnxt_ulp_device_params *dparms;
621 enum bnxt_ulp_flow_mem_type mtype;
625 /* Get the dev specific number of flows that needed to be supported. */
626 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
627 BNXT_TF_DBG(ERR, "Invalid device id\n");
631 dparms = bnxt_ulp_device_params_get(dev_id);
633 BNXT_TF_DBG(ERR, "could not fetch the device params\n");
637 if (bnxt_ulp_cntxt_mem_type_get(bp->ulp_ctx, &mtype))
639 if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
640 BNXT_TF_DBG(INFO, "Table Scope alloc is not required\n");
644 bnxt_init_tbl_scope_parms(bp, ¶ms);
645 rc = tf_alloc_tbl_scope(&bp->tfp, ¶ms);
647 BNXT_TF_DBG(ERR, "Unable to allocate eem table scope rc = %d\n",
651 rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id);
653 BNXT_TF_DBG(ERR, "Unable to set table scope id\n");
660 /* Free Extended Exact Match host memory */
662 ulp_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
664 struct tf_free_tbl_scope_parms params = {0};
667 struct bnxt_ulp_device_params *dparms;
668 enum bnxt_ulp_flow_mem_type mtype;
671 if (!ulp_ctx || !ulp_ctx->cfg_data)
674 tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx, BNXT_ULP_SHARED_SESSION_NO);
676 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
680 /* Get the dev specific number of flows that needed to be supported. */
681 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
682 BNXT_TF_DBG(ERR, "Invalid device id\n");
686 dparms = bnxt_ulp_device_params_get(dev_id);
688 BNXT_TF_DBG(ERR, "could not fetch the device params\n");
692 if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
694 if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
695 BNXT_TF_DBG(INFO, "Table Scope free is not required\n");
699 rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, ¶ms.tbl_scope_id);
701 BNXT_TF_DBG(ERR, "Failed to get the table scope id\n");
705 rc = tf_free_tbl_scope(tfp, ¶ms);
707 BNXT_TF_DBG(ERR, "Unable to free table scope\n");
713 /* The function to free and deinit the ulp context data. */
715 ulp_ctx_deinit(struct bnxt *bp,
716 struct bnxt_ulp_session_state *session)
718 /* close the tf session */
719 ulp_ctx_session_close(bp, session);
721 /* The shared session must be closed last. */
722 ulp_ctx_shared_session_close(bp, session);
724 /* Free the contents */
725 if (session->cfg_data) {
726 rte_free(session->cfg_data);
727 bp->ulp_ctx->cfg_data = NULL;
728 session->cfg_data = NULL;
733 /* The function to allocate and initialize the ulp context data. */
735 ulp_ctx_init(struct bnxt *bp,
736 struct bnxt_ulp_session_state *session)
738 struct bnxt_ulp_data *ulp_data;
740 enum bnxt_ulp_device_id devid;
742 /* Allocate memory to hold ulp context data. */
743 ulp_data = rte_zmalloc("bnxt_ulp_data",
744 sizeof(struct bnxt_ulp_data), 0);
746 BNXT_TF_DBG(ERR, "Failed to allocate memory for ulp data\n");
750 /* Increment the ulp context data reference count usage. */
751 bp->ulp_ctx->cfg_data = ulp_data;
752 session->cfg_data = ulp_data;
754 ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED;
756 rc = bnxt_ulp_devid_get(bp, &devid);
758 BNXT_TF_DBG(ERR, "Unable to determine device for ULP init.\n");
762 rc = bnxt_ulp_cntxt_dev_id_set(bp->ulp_ctx, devid);
764 BNXT_TF_DBG(ERR, "Unable to set device for ULP init.\n");
768 rc = bnxt_ulp_cntxt_app_id_set(bp->ulp_ctx, bp->app_id);
770 BNXT_TF_DBG(ERR, "Unable to set app_id for ULP init.\n");
774 rc = bnxt_ulp_cntxt_app_caps_init(bp->ulp_ctx, bp->app_id, devid);
776 BNXT_TF_DBG(ERR, "Unable to set caps for app(%x)/dev(%x)\n",
782 * Shared session must be created before first regular session but after
783 * the ulp_ctx is valid.
785 rc = ulp_ctx_shared_session_open(bp, session);
787 BNXT_TF_DBG(ERR, "Unable to open shared session (%d)\n", rc);
791 /* Open the ulp session. */
792 rc = ulp_ctx_session_open(bp, session);
796 ulp_tun_tbl_init(ulp_data->tun_tbl);
798 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
802 session->session_opened = 1;
803 (void)ulp_ctx_deinit(bp, session);
807 /* The function to initialize ulp dparms with devargs */
809 ulp_dparms_init(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
811 struct bnxt_ulp_device_params *dparms;
814 if (!bp->max_num_kflows) {
815 /* Defaults to Internal */
816 bnxt_ulp_cntxt_mem_type_set(ulp_ctx,
817 BNXT_ULP_FLOW_MEM_TYPE_INT);
821 /* The max_num_kflows were set, so move to external */
822 if (bnxt_ulp_cntxt_mem_type_set(ulp_ctx, BNXT_ULP_FLOW_MEM_TYPE_EXT))
825 if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) {
826 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
830 dparms = bnxt_ulp_device_params_get(dev_id);
832 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
836 /* num_flows = max_num_kflows * 1024 */
837 dparms->ext_flow_db_num_entries = bp->max_num_kflows * 1024;
838 /* GFID = 2 * num_flows */
839 dparms->mark_db_gfid_entries = dparms->ext_flow_db_num_entries * 2;
840 BNXT_TF_DBG(DEBUG, "Set the number of flows = %"PRIu64"\n",
841 dparms->ext_flow_db_num_entries);
846 /* The function to initialize bp flags with truflow features */
848 ulp_dparms_dev_port_intf_update(struct bnxt *bp,
849 struct bnxt_ulp_context *ulp_ctx)
851 enum bnxt_ulp_flow_mem_type mtype;
853 if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
855 /* Update the bp flag with gfid flag */
856 if (mtype == BNXT_ULP_FLOW_MEM_TYPE_EXT)
857 bp->flags |= BNXT_FLAG_GFID_ENABLE;
863 ulp_ctx_attach(struct bnxt *bp,
864 struct bnxt_ulp_session_state *session)
867 uint32_t flags, dev_id;
870 /* Increment the ulp context data reference count usage. */
871 bp->ulp_ctx->cfg_data = session->cfg_data;
872 bp->ulp_ctx->cfg_data->ref_cnt++;
874 /* update the session details in bnxt tfp */
875 bp->tfp.session = session->g_tfp->session;
878 * The supported flag will be set during the init. Use it now to
879 * know if we should go through the attach.
881 rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
883 BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
887 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
889 BNXT_TF_DBG(ERR, "Unable do get the dev_id.\n");
893 flags = bp->ulp_ctx->cfg_data->ulp_flags;
894 if (ULP_APP_DEV_UNSUPPORTED_ENABLED(flags)) {
895 BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
900 /* Create a TF Client */
901 rc = ulp_ctx_session_open(bp, session);
903 PMD_DRV_LOG(ERR, "Failed to open ctxt session, rc:%d\n", rc);
904 bp->tfp.session = NULL;
908 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
913 ulp_ctx_detach(struct bnxt *bp)
915 if (bp->tfp.session) {
916 tf_close_session(&bp->tfp);
917 bp->tfp.session = NULL;
922 * Initialize the state of an ULP session.
923 * If the state of an ULP session is not initialized, set it's state to
924 * initialized. If the state is already initialized, do nothing.
927 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
929 pthread_mutex_lock(&session->bnxt_ulp_mutex);
931 if (!session->bnxt_ulp_init) {
932 session->bnxt_ulp_init = true;
938 pthread_mutex_unlock(&session->bnxt_ulp_mutex);
942 * Check if an ULP session is already allocated for a specific PCI
943 * domain & bus. If it is already allocated simply return the session
944 * pointer, otherwise allocate a new session.
946 static struct bnxt_ulp_session_state *
947 ulp_get_session(struct rte_pci_addr *pci_addr)
949 struct bnxt_ulp_session_state *session;
951 STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) {
952 if (session->pci_info.domain == pci_addr->domain &&
953 session->pci_info.bus == pci_addr->bus) {
961 * Allocate and Initialize an ULP session and set it's state to INITIALIZED.
962 * If it's already initialized simply return the already existing session.
964 static struct bnxt_ulp_session_state *
965 ulp_session_init(struct bnxt *bp,
968 struct rte_pci_device *pci_dev;
969 struct rte_pci_addr *pci_addr;
970 struct bnxt_ulp_session_state *session;
976 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
977 pci_addr = &pci_dev->addr;
979 pthread_mutex_lock(&bnxt_ulp_global_mutex);
981 session = ulp_get_session(pci_addr);
983 /* Not Found the session Allocate a new one */
984 session = rte_zmalloc("bnxt_ulp_session",
985 sizeof(struct bnxt_ulp_session_state),
989 "Allocation failed for bnxt_ulp_session\n");
990 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
994 /* Add it to the queue */
995 session->pci_info.domain = pci_addr->domain;
996 session->pci_info.bus = pci_addr->bus;
997 rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
999 BNXT_TF_DBG(ERR, "mutex create failed\n");
1000 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1003 STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
1007 ulp_context_initialized(session, init);
1008 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1013 * When a device is closed, remove it's associated session from the global
1017 ulp_session_deinit(struct bnxt_ulp_session_state *session)
1022 if (!session->cfg_data) {
1023 pthread_mutex_lock(&bnxt_ulp_global_mutex);
1024 STAILQ_REMOVE(&bnxt_ulp_session_list, session,
1025 bnxt_ulp_session_state, next);
1026 pthread_mutex_destroy(&session->bnxt_ulp_mutex);
1028 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1033 * Internal api to enable NAT feature.
1034 * Set set_flag to 1 to set the value or zero to reset the value.
1035 * returns 0 on success.
1038 bnxt_ulp_global_cfg_update(struct bnxt *bp,
1040 enum tf_global_config_type type,
1045 uint32_t global_cfg = 0;
1047 struct tf_global_cfg_parms parms = { 0 };
1049 /* Initialize the params */
1052 parms.offset = offset,
1053 parms.config = (uint8_t *)&global_cfg,
1054 parms.config_sz_in_bytes = sizeof(global_cfg);
1056 rc = tf_get_global_cfg(&bp->tfp, &parms);
1058 BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n",
1064 global_cfg |= value;
1066 global_cfg &= ~value;
1068 /* SET the register RE_CFA_REG_ACT_TECT */
1069 rc = tf_set_global_cfg(&bp->tfp, &parms);
1071 BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n",
1078 /* Internal function to delete all the flows belonging to the given port */
1080 bnxt_ulp_flush_port_flows(struct bnxt *bp)
1084 /* it is assumed that port is either TVF or PF */
1085 if (ulp_port_db_port_func_id_get(bp->ulp_ctx,
1086 bp->eth_dev->data->port_id,
1088 BNXT_TF_DBG(ERR, "Invalid argument\n");
1091 (void)ulp_flow_db_function_flow_flush(bp->ulp_ctx, func_id);
1094 /* Internal function to delete the VFR default flows */
1096 bnxt_ulp_destroy_vfr_default_rules(struct bnxt *bp, bool global)
1098 struct bnxt_ulp_vfr_rule_info *info;
1100 struct rte_eth_dev *vfr_eth_dev;
1101 struct bnxt_representor *vfr_bp;
1103 if (!BNXT_TRUFLOW_EN(bp) || BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
1106 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
1109 /* Delete default rules for all ports */
1110 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
1111 info = &bp->ulp_ctx->cfg_data->vfr_rule_info[port_id];
1115 if (!global && info->parent_port_id !=
1116 bp->eth_dev->data->port_id)
1119 /* Destroy the flows */
1120 ulp_default_flow_destroy(bp->eth_dev, info->vfr_flow_id);
1121 /* Clean up the tx action pointer */
1122 vfr_eth_dev = &rte_eth_devices[port_id];
1124 vfr_bp = vfr_eth_dev->data->dev_private;
1125 vfr_bp->vfr_tx_cfa_action = 0;
1127 memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info));
1132 * When a port is deinit'ed by dpdk. This function is called
1133 * and this function clears the ULP context and rest of the
1134 * infrastructure associated with it.
1137 bnxt_ulp_deinit(struct bnxt *bp,
1138 struct bnxt_ulp_session_state *session)
1140 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
1143 /* clean up default flows */
1144 bnxt_ulp_destroy_df_rules(bp, true);
1146 /* clean up default VFR flows */
1147 bnxt_ulp_destroy_vfr_default_rules(bp, true);
1149 /* clean up regular flows */
1150 ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR);
1152 /* cleanup the eem table scope */
1153 ulp_eem_tbl_scope_deinit(bp, bp->ulp_ctx);
1155 /* cleanup the flow database */
1156 ulp_flow_db_deinit(bp->ulp_ctx);
1158 /* Delete the Mark database */
1159 ulp_mark_db_deinit(bp->ulp_ctx);
1161 /* cleanup the ulp mapper */
1162 ulp_mapper_deinit(bp->ulp_ctx);
1164 /* Delete the Flow Counter Manager */
1165 ulp_fc_mgr_deinit(bp->ulp_ctx);
1167 /* Delete the Port database */
1168 ulp_port_db_deinit(bp->ulp_ctx);
1170 /* Disable NAT feature */
1171 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
1172 TF_TUNNEL_ENCAP_NAT,
1173 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
1175 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
1176 TF_TUNNEL_ENCAP_NAT,
1177 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
1179 /* free the flow db lock */
1180 pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
1182 /* Delete the ulp context and tf session and free the ulp context */
1183 ulp_ctx_deinit(bp, session);
1184 BNXT_TF_DBG(DEBUG, "ulp ctx has been deinitialized\n");
1188 * When a port is initialized by dpdk. This functions is called
1189 * and this function initializes the ULP context and rest of the
1190 * infrastructure associated with it.
1193 bnxt_ulp_init(struct bnxt *bp,
1194 struct bnxt_ulp_session_state *session)
1198 /* Allocate and Initialize the ulp context. */
1199 rc = ulp_ctx_init(bp, session);
1201 BNXT_TF_DBG(ERR, "Failed to create the ulp context\n");
1205 rc = pthread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock, NULL);
1207 BNXT_TF_DBG(ERR, "Unable to initialize flow db lock\n");
1211 /* Initialize ulp dparms with values devargs passed */
1212 rc = ulp_dparms_init(bp, bp->ulp_ctx);
1214 BNXT_TF_DBG(ERR, "Failed to initialize the dparms\n");
1218 /* create the port database */
1219 rc = ulp_port_db_init(bp->ulp_ctx, bp->port_cnt);
1221 BNXT_TF_DBG(ERR, "Failed to create the port database\n");
1225 /* Create the Mark database. */
1226 rc = ulp_mark_db_init(bp->ulp_ctx);
1228 BNXT_TF_DBG(ERR, "Failed to create the mark database\n");
1232 /* Create the flow database. */
1233 rc = ulp_flow_db_init(bp->ulp_ctx);
1235 BNXT_TF_DBG(ERR, "Failed to create the flow database\n");
1239 /* Create the eem table scope. */
1240 rc = ulp_eem_tbl_scope_init(bp);
1242 BNXT_TF_DBG(ERR, "Failed to create the eem scope table\n");
1246 rc = ulp_mapper_init(bp->ulp_ctx);
1248 BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n");
1252 rc = ulp_fc_mgr_init(bp->ulp_ctx);
1254 BNXT_TF_DBG(ERR, "Failed to initialize ulp flow counter mgr\n");
1259 * Enable NAT feature. Set the global configuration register
1260 * Tunnel encap to enable NAT with the reuse of existing inner
1261 * L2 header smac and dmac
1263 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
1264 TF_TUNNEL_ENCAP_NAT,
1265 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
1267 BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n");
1271 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
1272 TF_TUNNEL_ENCAP_NAT,
1273 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
1275 BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
1278 BNXT_TF_DBG(DEBUG, "ulp ctx has been initialized\n");
1282 bnxt_ulp_deinit(bp, session);
1287 * When a port is initialized by dpdk. This functions sets up
1288 * the port specific details.
1291 bnxt_ulp_port_init(struct bnxt *bp)
1293 struct bnxt_ulp_session_state *session;
1297 if (!bp || !BNXT_TRUFLOW_EN(bp))
1300 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1302 "Skip ulp init for port: %d, not a TVF or PF\n",
1303 bp->eth_dev->data->port_id);
1308 BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n");
1312 bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx",
1313 sizeof(struct bnxt_ulp_context), 0);
1315 BNXT_TF_DBG(ERR, "Failed to allocate ulp ctx\n");
1320 * Multiple uplink ports can be associated with a single vswitch.
1321 * Make sure only the port that is started first will initialize
1324 session = ulp_session_init(bp, &initialized);
1326 BNXT_TF_DBG(ERR, "Failed to initialize the tf session\n");
1333 * If ULP is already initialized for a specific domain then
1334 * simply assign the ulp context to this rte_eth_dev.
1336 rc = ulp_ctx_attach(bp, session);
1338 BNXT_TF_DBG(ERR, "Failed to attach the ulp context\n");
1343 * Attach to the shared session, must be called after the
1344 * ulp_ctx_attach in order to ensure that ulp data is available
1347 rc = ulp_ctx_shared_session_attach(bp, session);
1350 "Failed attach to shared session (%d)", rc);
1354 rc = bnxt_ulp_init(bp, session);
1356 BNXT_TF_DBG(ERR, "Failed to initialize the ulp init\n");
1361 /* Update bnxt driver flags */
1362 rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx);
1364 BNXT_TF_DBG(ERR, "Failed to update driver flags\n");
1368 /* update the port database for the given interface */
1369 rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp->eth_dev);
1371 BNXT_TF_DBG(ERR, "Failed to update port database\n");
1374 /* create the default rules */
1375 rc = bnxt_ulp_create_df_rules(bp);
1377 BNXT_TF_DBG(ERR, "Failed to create default flow\n");
1381 if (BNXT_ACCUM_STATS_EN(bp))
1382 bp->ulp_ctx->cfg_data->accum_stats = true;
1384 BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init\n",
1385 bp->eth_dev->data->port_id);
1389 bnxt_ulp_port_deinit(bp);
1394 * When a port is de-initialized by dpdk. This functions clears up
1395 * the port specific details.
1398 bnxt_ulp_port_deinit(struct bnxt *bp)
1400 struct bnxt_ulp_session_state *session;
1401 struct rte_pci_device *pci_dev;
1402 struct rte_pci_addr *pci_addr;
1404 if (!BNXT_TRUFLOW_EN(bp))
1407 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1409 "Skip ULP deinit port:%d, not a TVF or PF\n",
1410 bp->eth_dev->data->port_id);
1415 BNXT_TF_DBG(DEBUG, "ulp ctx already de-allocated\n");
1419 BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port deinit\n",
1420 bp->eth_dev->data->port_id);
1422 /* Get the session details */
1423 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
1424 pci_addr = &pci_dev->addr;
1425 pthread_mutex_lock(&bnxt_ulp_global_mutex);
1426 session = ulp_get_session(pci_addr);
1427 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1429 /* session not found then just exit */
1431 /* Free the ulp context */
1432 rte_free(bp->ulp_ctx);
1437 /* Check the reference count to deinit or deattach*/
1438 if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) {
1439 bp->ulp_ctx->cfg_data->ref_cnt--;
1440 if (bp->ulp_ctx->cfg_data->ref_cnt) {
1441 /* free the port details */
1442 /* Free the default flow rule associated to this port */
1443 bnxt_ulp_destroy_df_rules(bp, false);
1444 bnxt_ulp_destroy_vfr_default_rules(bp, false);
1446 /* free flows associated with this port */
1447 bnxt_ulp_flush_port_flows(bp);
1449 /* close the session associated with this port */
1452 /* always detach/close shared after the session. */
1453 ulp_ctx_shared_session_detach(bp);
1455 /* Perform ulp ctx deinit */
1456 bnxt_ulp_deinit(bp, session);
1460 /* clean up the session */
1461 ulp_session_deinit(session);
1463 /* Free the ulp context */
1464 rte_free(bp->ulp_ctx);
1468 /* Below are the access functions to access internal data of ulp context. */
1469 /* Function to set the Mark DB into the context */
1471 bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx,
1472 struct bnxt_ulp_mark_tbl *mark_tbl)
1474 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1475 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1479 ulp_ctx->cfg_data->mark_tbl = mark_tbl;
1484 /* Function to retrieve the Mark DB from the context. */
1485 struct bnxt_ulp_mark_tbl *
1486 bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx)
1488 if (!ulp_ctx || !ulp_ctx->cfg_data)
1491 return ulp_ctx->cfg_data->mark_tbl;
1495 bnxt_ulp_cntxt_shared_session_enabled(struct bnxt_ulp_context *ulp_ctx)
1497 return ULP_SHARED_SESSION_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags);
1501 bnxt_ulp_cntxt_app_id_set(struct bnxt_ulp_context *ulp_ctx, uint8_t app_id)
1505 ulp_ctx->cfg_data->app_id = app_id;
1510 bnxt_ulp_cntxt_app_id_get(struct bnxt_ulp_context *ulp_ctx, uint8_t *app_id)
1512 /* Default APP id is zero */
1513 if (!ulp_ctx || !app_id)
1515 *app_id = ulp_ctx->cfg_data->app_id;
1519 /* Function to set the device id of the hardware. */
1521 bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx,
1524 if (ulp_ctx && ulp_ctx->cfg_data) {
1525 ulp_ctx->cfg_data->dev_id = dev_id;
1532 /* Function to get the device id of the hardware. */
1534 bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx,
1537 if (ulp_ctx && ulp_ctx->cfg_data) {
1538 *dev_id = ulp_ctx->cfg_data->dev_id;
1542 BNXT_TF_DBG(ERR, "Failed to read dev_id from ulp ctxt\n");
1547 bnxt_ulp_cntxt_mem_type_set(struct bnxt_ulp_context *ulp_ctx,
1548 enum bnxt_ulp_flow_mem_type mem_type)
1550 if (ulp_ctx && ulp_ctx->cfg_data) {
1551 ulp_ctx->cfg_data->mem_type = mem_type;
1554 BNXT_TF_DBG(ERR, "Failed to write mem_type in ulp ctxt\n");
1559 bnxt_ulp_cntxt_mem_type_get(struct bnxt_ulp_context *ulp_ctx,
1560 enum bnxt_ulp_flow_mem_type *mem_type)
1562 if (ulp_ctx && ulp_ctx->cfg_data) {
1563 *mem_type = ulp_ctx->cfg_data->mem_type;
1566 BNXT_TF_DBG(ERR, "Failed to read mem_type in ulp ctxt\n");
1570 /* Function to get the table scope id of the EEM table. */
1572 bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx,
1573 uint32_t *tbl_scope_id)
1575 if (ulp_ctx && ulp_ctx->cfg_data) {
1576 *tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id;
1583 /* Function to set the table scope id of the EEM table. */
1585 bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx,
1586 uint32_t tbl_scope_id)
1588 if (ulp_ctx && ulp_ctx->cfg_data) {
1589 ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id;
1596 /* Function to set the shared tfp session details from the ulp context. */
1598 bnxt_ulp_cntxt_shared_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1601 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1605 ulp->g_shared_tfp = tfp;
1609 /* Function to get the shared tfp session details from the ulp context. */
1611 bnxt_ulp_cntxt_shared_tfp_get(struct bnxt_ulp_context *ulp)
1614 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1617 return ulp->g_shared_tfp;
1620 /* Function to set the tfp session details from the ulp context. */
1622 bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1625 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1633 /* Function to get the tfp session details from the ulp context. */
1635 bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp,
1636 enum bnxt_ulp_shared_session shared)
1639 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1643 return ulp->g_shared_tfp;
1649 * Get the device table entry based on the device id.
1651 * dev_id [in] The device id of the hardware
1653 * Returns the pointer to the device parameters.
1655 struct bnxt_ulp_device_params *
1656 bnxt_ulp_device_params_get(uint32_t dev_id)
1658 if (dev_id < BNXT_ULP_MAX_NUM_DEVICES)
1659 return &ulp_device_params[dev_id];
1663 /* Function to set the flow database to the ulp context. */
1665 bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context *ulp_ctx,
1666 struct bnxt_ulp_flow_db *flow_db)
1668 if (!ulp_ctx || !ulp_ctx->cfg_data)
1671 ulp_ctx->cfg_data->flow_db = flow_db;
1675 /* Function to get the flow database from the ulp context. */
1676 struct bnxt_ulp_flow_db *
1677 bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx)
1679 if (!ulp_ctx || !ulp_ctx->cfg_data)
1682 return ulp_ctx->cfg_data->flow_db;
1685 /* Function to get the tunnel cache table info from the ulp context. */
1686 struct bnxt_tun_cache_entry *
1687 bnxt_ulp_cntxt_ptr2_tun_tbl_get(struct bnxt_ulp_context *ulp_ctx)
1689 if (!ulp_ctx || !ulp_ctx->cfg_data)
1692 return ulp_ctx->cfg_data->tun_tbl;
1695 /* Function to get the ulp context from eth device. */
1696 struct bnxt_ulp_context *
1697 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev)
1699 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1701 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
1702 struct bnxt_representor *vfr = dev->data->dev_private;
1704 bp = vfr->parent_dev->data->dev_private;
1708 BNXT_TF_DBG(ERR, "Bnxt private data is not initialized\n");
1715 bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx,
1718 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1719 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1723 ulp_ctx->cfg_data->mapper_data = mapper_data;
1728 bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx)
1730 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1731 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1735 return ulp_ctx->cfg_data->mapper_data;
1738 /* Function to set the port database to the ulp context. */
1740 bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context *ulp_ctx,
1741 struct bnxt_ulp_port_db *port_db)
1743 if (!ulp_ctx || !ulp_ctx->cfg_data)
1746 ulp_ctx->cfg_data->port_db = port_db;
1750 /* Function to get the port database from the ulp context. */
1751 struct bnxt_ulp_port_db *
1752 bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context *ulp_ctx)
1754 if (!ulp_ctx || !ulp_ctx->cfg_data)
1757 return ulp_ctx->cfg_data->port_db;
1760 /* Function to set the flow counter info into the context */
1762 bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx,
1763 struct bnxt_ulp_fc_info *ulp_fc_info)
1765 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1766 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1770 ulp_ctx->cfg_data->fc_info = ulp_fc_info;
1775 /* Function to retrieve the flow counter info from the context. */
1776 struct bnxt_ulp_fc_info *
1777 bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx)
1779 if (!ulp_ctx || !ulp_ctx->cfg_data)
1782 return ulp_ctx->cfg_data->fc_info;
1785 /* Function to get the ulp flags from the ulp context. */
1787 bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
1790 if (!ulp_ctx || !ulp_ctx->cfg_data)
1793 *flags = ulp_ctx->cfg_data->ulp_flags;
1797 /* Function to get the ulp vfr info from the ulp context. */
1798 struct bnxt_ulp_vfr_rule_info*
1799 bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(struct bnxt_ulp_context *ulp_ctx,
1802 if (!ulp_ctx || !ulp_ctx->cfg_data || port_id >= RTE_MAX_ETHPORTS)
1805 return &ulp_ctx->cfg_data->vfr_rule_info[port_id];
1808 /* Function to acquire the flow database lock from the ulp context. */
1810 bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
1812 if (!ulp_ctx || !ulp_ctx->cfg_data)
1815 if (pthread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
1816 BNXT_TF_DBG(ERR, "unable to acquire fdb lock\n");
1822 /* Function to release the flow database lock from the ulp context. */
1824 bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
1826 if (!ulp_ctx || !ulp_ctx->cfg_data)
1829 pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);