1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
7 #include <rte_malloc.h>
9 #include <rte_flow_driver.h>
10 #include <rte_tailq.h>
11 #include <rte_spinlock.h>
15 #include "bnxt_tf_common.h"
17 #include "tf_ext_flow_handle.h"
19 #include "ulp_template_db_enum.h"
20 #include "ulp_template_struct.h"
21 #include "ulp_mark_mgr.h"
22 #include "ulp_fc_mgr.h"
23 #include "ulp_flow_db.h"
24 #include "ulp_mapper.h"
25 #include "ulp_port_db.h"
27 #include "ulp_ha_mgr.h"
28 #include "bnxt_tf_pmd_shim.h"
30 /* Linked list of all TF sessions. */
31 STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
32 STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
34 /* Mutex to synchronize bnxt_ulp_session_list operations. */
35 static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
37 /* Spin lock to protect context global list */
38 rte_spinlock_t bnxt_ulp_ctxt_lock;
39 TAILQ_HEAD(cntx_list_entry_list, ulp_context_list_entry);
40 static struct cntx_list_entry_list ulp_cntx_list =
41 TAILQ_HEAD_INITIALIZER(ulp_cntx_list);
43 /* Static function declarations */
44 static int32_t bnxt_ulp_cntxt_list_init(void);
45 static int32_t bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx);
46 static void bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx);
49 * Allow the deletion of context only for the bnxt device that
50 * created the session.
53 ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx)
55 if (!ulp_ctx || !ulp_ctx->cfg_data)
58 if (!ulp_ctx->cfg_data->ref_cnt) {
59 BNXT_TF_DBG(DEBUG, "ulp ctx shall initiate deinit\n");
67 bnxt_ulp_devid_get(struct bnxt *bp,
68 enum bnxt_ulp_device_id *ulp_dev_id)
70 if (BNXT_CHIP_P5(bp)) {
71 *ulp_dev_id = BNXT_ULP_DEVICE_ID_THOR;
75 if (BNXT_STINGRAY(bp))
76 *ulp_dev_id = BNXT_ULP_DEVICE_ID_STINGRAY;
78 /* Assuming Whitney */
79 *ulp_dev_id = BNXT_ULP_DEVICE_ID_WH_PLUS;
84 struct bnxt_ulp_app_capabilities_info *
85 bnxt_ulp_app_cap_list_get(uint32_t *num_entries)
89 *num_entries = BNXT_ULP_APP_CAP_TBL_MAX_SZ;
90 return ulp_app_cap_info_list;
93 static struct bnxt_ulp_resource_resv_info *
94 bnxt_ulp_app_resource_resv_list_get(uint32_t *num_entries)
96 if (num_entries == NULL)
98 *num_entries = BNXT_ULP_APP_RESOURCE_RESV_LIST_MAX_SZ;
99 return ulp_app_resource_resv_list;
102 struct bnxt_ulp_resource_resv_info *
103 bnxt_ulp_resource_resv_list_get(uint32_t *num_entries)
107 *num_entries = BNXT_ULP_RESOURCE_RESV_LIST_MAX_SZ;
108 return ulp_resource_resv_list;
111 struct bnxt_ulp_glb_resource_info *
112 bnxt_ulp_app_glb_resource_info_list_get(uint32_t *num_entries)
116 *num_entries = BNXT_ULP_APP_GLB_RESOURCE_TBL_MAX_SZ;
117 return ulp_app_glb_resource_tbl;
121 bnxt_ulp_named_resources_calc(struct bnxt_ulp_context *ulp_ctx,
122 struct bnxt_ulp_glb_resource_info *info,
124 struct tf_session_resources *res)
126 uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST, res_type, i;
131 if (ulp_ctx == NULL || info == NULL || res == NULL || num == 0) {
132 BNXT_TF_DBG(ERR, "Invalid parms to named resources calc.\n");
136 rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
138 BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
142 rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
144 BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
148 for (i = 0; i < num; i++) {
149 if (dev_id != info[i].device_id || app_id != info[i].app_id)
151 dir = info[i].direction;
152 res_type = info[i].resource_type;
154 switch (info[i].resource_func) {
155 case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER:
156 res->ident_cnt[dir].cnt[res_type]++;
158 case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
159 res->tbl_cnt[dir].cnt[res_type]++;
161 case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
162 res->tcam_cnt[dir].cnt[res_type]++;
164 case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
165 res->em_cnt[dir].cnt[res_type]++;
168 BNXT_TF_DBG(ERR, "Unknown resource func (0x%x)\n,",
169 info[i].resource_func);
178 bnxt_ulp_unnamed_resources_calc(struct bnxt_ulp_context *ulp_ctx,
179 struct bnxt_ulp_resource_resv_info *info,
181 struct tf_session_resources *res)
183 uint32_t dev_id, res_type, i;
188 if (ulp_ctx == NULL || res == NULL || info == NULL || num == 0) {
189 BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
193 rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id);
195 BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
199 rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
201 BNXT_TF_DBG(ERR, "Unable to get the dev id from ulp.\n");
205 for (i = 0; i < num; i++) {
206 if (app_id != info[i].app_id || dev_id != info[i].device_id)
208 dir = info[i].direction;
209 res_type = info[i].resource_type;
211 switch (info[i].resource_func) {
212 case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER:
213 res->ident_cnt[dir].cnt[res_type] = info[i].count;
215 case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
216 res->tbl_cnt[dir].cnt[res_type] = info[i].count;
218 case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
219 res->tcam_cnt[dir].cnt[res_type] = info[i].count;
221 case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
222 res->em_cnt[dir].cnt[res_type] = info[i].count;
232 bnxt_ulp_tf_resources_get(struct bnxt_ulp_context *ulp_ctx,
233 struct tf_session_resources *res)
235 struct bnxt_ulp_resource_resv_info *unnamed = NULL;
239 if (ulp_ctx == NULL || res == NULL) {
240 BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
244 unnamed = bnxt_ulp_resource_resv_list_get(&unum);
245 if (unnamed == NULL) {
246 BNXT_TF_DBG(ERR, "Unable to get resource resv list.\n");
250 rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, res);
252 BNXT_TF_DBG(ERR, "Unable to calc resources for session.\n");
258 bnxt_ulp_tf_shared_session_resources_get(struct bnxt_ulp_context *ulp_ctx,
259 struct tf_session_resources *res)
261 struct bnxt_ulp_resource_resv_info *unnamed;
262 struct bnxt_ulp_glb_resource_info *named;
266 if (ulp_ctx == NULL || res == NULL) {
267 BNXT_TF_DBG(ERR, "Invalid arguments to get resources.\n");
271 /* Make sure the resources are zero before accumulating. */
272 memset(res, 0, sizeof(struct tf_session_resources));
275 * Shared resources are comprised of both named and unnamed resources.
276 * First get the unnamed counts, and then add the named to the result.
278 /* Get the baseline counts */
279 unnamed = bnxt_ulp_app_resource_resv_list_get(&unum);
280 if (unnamed == NULL) {
281 BNXT_TF_DBG(ERR, "Unable to get shared resource resv list.\n");
284 rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, res);
286 BNXT_TF_DBG(ERR, "Unable to calc resources for shared session.\n");
290 /* Get the named list and add the totals */
291 named = bnxt_ulp_app_glb_resource_info_list_get(&nnum);
293 BNXT_TF_DBG(ERR, "Unable to get app global resource list\n");
296 rc = bnxt_ulp_named_resources_calc(ulp_ctx, named, nnum, res);
298 BNXT_TF_DBG(ERR, "Unable to calc named resources\n");
304 bnxt_ulp_cntxt_app_caps_init(struct bnxt_ulp_context *ulp_ctx,
305 uint8_t app_id, uint32_t dev_id)
307 struct bnxt_ulp_app_capabilities_info *info;
312 if (ULP_APP_DEV_UNSUPPORTED_ENABLED(ulp_ctx->cfg_data->ulp_flags)) {
313 BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
318 info = bnxt_ulp_app_cap_list_get(&num);
320 BNXT_TF_DBG(ERR, "Failed to get app capabilities.\n");
324 for (i = 0; i < num; i++) {
325 if (info[i].app_id != app_id || info[i].device_id != dev_id)
328 if (info[i].flags & BNXT_ULP_APP_CAP_SHARED_EN)
329 ulp_ctx->cfg_data->ulp_flags |=
330 BNXT_ULP_SHARED_SESSION_ENABLED;
331 if (info[i].flags & BNXT_ULP_APP_CAP_HOT_UPGRADE_EN)
332 ulp_ctx->cfg_data->ulp_flags |=
333 BNXT_ULP_HIGH_AVAIL_ENABLED;
334 if (info[i].flags & BNXT_ULP_APP_CAP_UNICAST_ONLY)
335 ulp_ctx->cfg_data->ulp_flags |=
336 BNXT_ULP_APP_UNICAST_ONLY;
339 BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
341 ulp_ctx->cfg_data->ulp_flags |= BNXT_ULP_APP_DEV_UNSUPPORTED;
349 ulp_ctx_shared_session_close(struct bnxt *bp,
350 struct bnxt_ulp_session_state *session)
355 if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
358 tfp = bnxt_ulp_cntxt_shared_tfp_get(bp->ulp_ctx);
361 * Log it under debug since this is likely a case of the
362 * shared session not being created. For example, a failed
365 BNXT_TF_DBG(DEBUG, "Failed to get shared tfp on close.\n");
368 rc = tf_close_session(tfp);
370 BNXT_TF_DBG(ERR, "Failed to close the shared session rc=%d.\n",
372 (void)bnxt_ulp_cntxt_shared_tfp_set(bp->ulp_ctx, NULL);
374 session->g_shared_tfp.session = NULL;
378 ulp_ctx_shared_session_open(struct bnxt *bp,
379 struct bnxt_ulp_session_state *session)
381 struct rte_eth_dev *ethdev = bp->eth_dev;
382 struct tf_session_resources *resources;
383 struct tf_open_session_parms parms;
385 uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
389 /* only perform this if shared session is enabled. */
390 if (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))
393 memset(&parms, 0, sizeof(parms));
395 rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
396 parms.ctrl_chan_name);
398 BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
399 ethdev->data->port_id, rc);
402 resources = &parms.resources;
405 * Need to account for size of ctrl_chan_name and 1 extra for Null
408 copy_nbytes = sizeof(parms.ctrl_chan_name) -
409 strlen(parms.ctrl_chan_name) - 1;
412 * Build the ctrl_chan_name with shared token.
413 * When HA is enabled, the WC TCAM needs extra management by the core,
414 * so add the wc_tcam string to the control channel.
416 if (bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx))
417 strncat(parms.ctrl_chan_name, "-tf_shared-wc_tcam",
420 strncat(parms.ctrl_chan_name, "-tf_shared", copy_nbytes);
422 rc = bnxt_ulp_tf_shared_session_resources_get(bp->ulp_ctx, resources);
426 rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
428 BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
432 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
434 BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
438 switch (ulp_dev_id) {
439 case BNXT_ULP_DEVICE_ID_WH_PLUS:
440 parms.device_type = TF_DEVICE_TYPE_WH;
442 case BNXT_ULP_DEVICE_ID_STINGRAY:
443 parms.device_type = TF_DEVICE_TYPE_SR;
445 case BNXT_ULP_DEVICE_ID_THOR:
446 parms.device_type = TF_DEVICE_TYPE_THOR;
449 BNXT_TF_DBG(ERR, "Unable to determine dev for opening session.\n");
453 parms.shadow_copy = true;
455 if (app_id == 0 || app_id == 3)
456 parms.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;
458 parms.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;
461 * Open the session here, but the collect the resources during the
462 * mapper initialization.
464 rc = tf_open_session(&bp->tfp_shared, &parms);
468 if (parms.shared_session_creator)
469 BNXT_TF_DBG(DEBUG, "Shared session creator.\n");
471 BNXT_TF_DBG(DEBUG, "Shared session attached.\n");
473 /* Save the shared session in global data */
474 if (!session->g_shared_tfp.session)
475 session->g_shared_tfp.session = bp->tfp_shared.session;
477 rc = bnxt_ulp_cntxt_shared_tfp_set(bp->ulp_ctx, &bp->tfp_shared);
479 BNXT_TF_DBG(ERR, "Failed to add shared tfp to ulp (%d)\n", rc);
485 ulp_ctx_shared_session_attach(struct bnxt *bp,
486 struct bnxt_ulp_session_state *session)
490 /* Simply return success if shared session not enabled */
491 if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) {
492 bp->tfp_shared.session = session->g_shared_tfp.session;
493 rc = ulp_ctx_shared_session_open(bp, session);
500 ulp_ctx_shared_session_detach(struct bnxt *bp)
502 if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) {
503 if (bp->tfp_shared.session) {
504 tf_close_session(&bp->tfp_shared);
505 bp->tfp_shared.session = NULL;
511 * Initialize an ULP session.
512 * An ULP session will contain all the resources needed to support rte flow
513 * offloads. A session is initialized as part of rte_eth_device start.
514 * A single vswitch instance can have multiple uplinks which means
515 * rte_eth_device start will be called for each of these devices.
516 * ULP session manager will make sure that a single ULP session is only
517 * initialized once. Apart from this, it also initializes MARK database,
518 * EEM table & flow database. ULP session manager also manages a list of
519 * all opened ULP sessions.
522 ulp_ctx_session_open(struct bnxt *bp,
523 struct bnxt_ulp_session_state *session)
525 struct rte_eth_dev *ethdev = bp->eth_dev;
527 struct tf_open_session_parms params;
528 struct tf_session_resources *resources;
529 uint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;
532 memset(¶ms, 0, sizeof(params));
534 rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
535 params.ctrl_chan_name);
537 BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
538 ethdev->data->port_id, rc);
542 params.shadow_copy = true;
544 rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
546 BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
550 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
552 BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
556 switch (ulp_dev_id) {
557 case BNXT_ULP_DEVICE_ID_WH_PLUS:
558 params.device_type = TF_DEVICE_TYPE_WH;
560 case BNXT_ULP_DEVICE_ID_STINGRAY:
561 params.device_type = TF_DEVICE_TYPE_SR;
563 case BNXT_ULP_DEVICE_ID_THOR:
564 params.device_type = TF_DEVICE_TYPE_THOR;
567 BNXT_TF_DBG(ERR, "Unable to determine device for opening session.\n");
571 resources = ¶ms.resources;
572 rc = bnxt_ulp_tf_resources_get(bp->ulp_ctx, resources);
577 if (app_id == 0 || app_id == 3)
578 params.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;
580 params.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;
582 rc = tf_open_session(&bp->tfp, ¶ms);
584 BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
585 params.ctrl_chan_name, rc);
588 if (!session->session_opened) {
589 session->session_opened = 1;
590 session->g_tfp = rte_zmalloc("bnxt_ulp_session_tfp",
591 sizeof(struct tf), 0);
592 session->g_tfp->session = bp->tfp.session;
598 * Close the ULP session.
599 * It takes the ulp context pointer.
602 ulp_ctx_session_close(struct bnxt *bp,
603 struct bnxt_ulp_session_state *session)
605 /* close the session in the hardware */
606 if (session->session_opened)
607 tf_close_session(&bp->tfp);
608 session->session_opened = 0;
609 rte_free(session->g_tfp);
610 session->g_tfp = NULL;
614 bnxt_init_tbl_scope_parms(struct bnxt *bp,
615 struct tf_alloc_tbl_scope_parms *params)
617 struct bnxt_ulp_device_params *dparms;
621 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
623 /* TBD: For now, just use default. */
626 dparms = bnxt_ulp_device_params_get(dev_id);
629 * Set the flush timer for EEM entries. The value is in 100ms intervals,
632 params->hw_flow_cache_flush_timer = 100;
635 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
636 params->rx_max_action_entry_sz_in_bits =
637 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
638 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
639 params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS;
641 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
642 params->tx_max_action_entry_sz_in_bits =
643 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
644 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
645 params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS;
647 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
648 params->rx_max_action_entry_sz_in_bits =
649 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
650 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
651 params->rx_num_flows_in_k =
652 dparms->ext_flow_db_num_entries / 1024;
654 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
655 params->tx_max_action_entry_sz_in_bits =
656 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
657 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
658 params->tx_num_flows_in_k =
659 dparms->ext_flow_db_num_entries / 1024;
661 BNXT_TF_DBG(INFO, "Table Scope initialized with %uK flows.\n",
662 params->rx_num_flows_in_k);
665 /* Initialize Extended Exact Match host memory. */
667 ulp_eem_tbl_scope_init(struct bnxt *bp)
669 struct tf_alloc_tbl_scope_parms params = {0};
670 struct bnxt_ulp_device_params *dparms;
671 enum bnxt_ulp_flow_mem_type mtype;
675 /* Get the dev specific number of flows that needed to be supported. */
676 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
677 BNXT_TF_DBG(ERR, "Invalid device id\n");
681 dparms = bnxt_ulp_device_params_get(dev_id);
683 BNXT_TF_DBG(ERR, "could not fetch the device params\n");
687 if (bnxt_ulp_cntxt_mem_type_get(bp->ulp_ctx, &mtype))
689 if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
690 BNXT_TF_DBG(INFO, "Table Scope alloc is not required\n");
694 bnxt_init_tbl_scope_parms(bp, ¶ms);
695 rc = tf_alloc_tbl_scope(&bp->tfp, ¶ms);
697 BNXT_TF_DBG(ERR, "Unable to allocate eem table scope rc = %d\n",
701 #ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG
702 BNXT_TF_DBG(DEBUG, "TableScope=0x%0x %d\n",
704 params.tbl_scope_id);
706 rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id);
708 BNXT_TF_DBG(ERR, "Unable to set table scope id\n");
715 /* Free Extended Exact Match host memory */
717 ulp_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
719 struct tf_free_tbl_scope_parms params = {0};
722 struct bnxt_ulp_device_params *dparms;
723 enum bnxt_ulp_flow_mem_type mtype;
726 if (!ulp_ctx || !ulp_ctx->cfg_data)
729 tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx, BNXT_ULP_SHARED_SESSION_NO);
731 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
735 /* Get the dev specific number of flows that needed to be supported. */
736 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
737 BNXT_TF_DBG(ERR, "Invalid device id\n");
741 dparms = bnxt_ulp_device_params_get(dev_id);
743 BNXT_TF_DBG(ERR, "could not fetch the device params\n");
747 if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
749 if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
750 BNXT_TF_DBG(INFO, "Table Scope free is not required\n");
754 rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, ¶ms.tbl_scope_id);
756 BNXT_TF_DBG(ERR, "Failed to get the table scope id\n");
760 rc = tf_free_tbl_scope(tfp, ¶ms);
762 BNXT_TF_DBG(ERR, "Unable to free table scope\n");
768 /* The function to free and deinit the ulp context data. */
770 ulp_ctx_deinit(struct bnxt *bp,
771 struct bnxt_ulp_session_state *session)
773 /* close the tf session */
774 ulp_ctx_session_close(bp, session);
776 /* The shared session must be closed last. */
777 ulp_ctx_shared_session_close(bp, session);
779 /* Free the contents */
780 if (session->cfg_data) {
781 rte_free(session->cfg_data);
782 bp->ulp_ctx->cfg_data = NULL;
783 session->cfg_data = NULL;
788 /* The function to allocate and initialize the ulp context data. */
790 ulp_ctx_init(struct bnxt *bp,
791 struct bnxt_ulp_session_state *session)
793 struct bnxt_ulp_data *ulp_data;
795 enum bnxt_ulp_device_id devid;
797 /* Initialize the context entries list */
798 bnxt_ulp_cntxt_list_init();
800 /* Add the context to the context entries list */
801 rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx);
803 BNXT_TF_DBG(ERR, "Failed to add the context list entry\n");
807 /* Allocate memory to hold ulp context data. */
808 ulp_data = rte_zmalloc("bnxt_ulp_data",
809 sizeof(struct bnxt_ulp_data), 0);
811 BNXT_TF_DBG(ERR, "Failed to allocate memory for ulp data\n");
815 /* Increment the ulp context data reference count usage. */
816 bp->ulp_ctx->cfg_data = ulp_data;
817 session->cfg_data = ulp_data;
819 ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED;
821 rc = bnxt_ulp_devid_get(bp, &devid);
823 BNXT_TF_DBG(ERR, "Unable to determine device for ULP init.\n");
827 rc = bnxt_ulp_cntxt_dev_id_set(bp->ulp_ctx, devid);
829 BNXT_TF_DBG(ERR, "Unable to set device for ULP init.\n");
833 rc = bnxt_ulp_cntxt_app_id_set(bp->ulp_ctx, bp->app_id);
835 BNXT_TF_DBG(ERR, "Unable to set app_id for ULP init.\n");
838 BNXT_TF_DBG(DEBUG, "Ulp initialized with app id %d\n", bp->app_id);
840 rc = bnxt_ulp_cntxt_app_caps_init(bp->ulp_ctx, bp->app_id, devid);
842 BNXT_TF_DBG(ERR, "Unable to set caps for app(%x)/dev(%x)\n",
848 * Shared session must be created before first regular session but after
849 * the ulp_ctx is valid.
851 rc = ulp_ctx_shared_session_open(bp, session);
853 BNXT_TF_DBG(ERR, "Unable to open shared session (%d)\n", rc);
857 /* Open the ulp session. */
858 rc = ulp_ctx_session_open(bp, session);
862 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
866 session->session_opened = 1;
867 (void)ulp_ctx_deinit(bp, session);
871 /* The function to initialize ulp dparms with devargs */
873 ulp_dparms_init(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
875 struct bnxt_ulp_device_params *dparms;
876 uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST;
878 if (!bp->max_num_kflows) {
879 /* Defaults to Internal */
880 bnxt_ulp_cntxt_mem_type_set(ulp_ctx,
881 BNXT_ULP_FLOW_MEM_TYPE_INT);
885 /* The max_num_kflows were set, so move to external */
886 if (bnxt_ulp_cntxt_mem_type_set(ulp_ctx, BNXT_ULP_FLOW_MEM_TYPE_EXT))
889 if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) {
890 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
894 dparms = bnxt_ulp_device_params_get(dev_id);
896 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
900 /* num_flows = max_num_kflows * 1024 */
901 dparms->ext_flow_db_num_entries = bp->max_num_kflows * 1024;
902 /* GFID = 2 * num_flows */
903 dparms->mark_db_gfid_entries = dparms->ext_flow_db_num_entries * 2;
904 BNXT_TF_DBG(DEBUG, "Set the number of flows = %" PRIu64 "\n",
905 dparms->ext_flow_db_num_entries);
910 /* The function to initialize bp flags with truflow features */
912 ulp_dparms_dev_port_intf_update(struct bnxt *bp,
913 struct bnxt_ulp_context *ulp_ctx)
915 enum bnxt_ulp_flow_mem_type mtype;
917 if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
919 /* Update the bp flag with gfid flag */
920 if (mtype == BNXT_ULP_FLOW_MEM_TYPE_EXT)
921 bp->flags |= BNXT_FLAG_GFID_ENABLE;
927 ulp_ctx_attach(struct bnxt *bp,
928 struct bnxt_ulp_session_state *session)
931 uint32_t flags, dev_id = BNXT_ULP_DEVICE_ID_LAST;
934 /* Increment the ulp context data reference count usage. */
935 bp->ulp_ctx->cfg_data = session->cfg_data;
936 bp->ulp_ctx->cfg_data->ref_cnt++;
938 /* update the session details in bnxt tfp */
939 bp->tfp.session = session->g_tfp->session;
941 /* Add the context to the context entries list */
942 rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx);
944 BNXT_TF_DBG(ERR, "Failed to add the context list entry\n");
949 * The supported flag will be set during the init. Use it now to
950 * know if we should go through the attach.
952 rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);
954 BNXT_TF_DBG(ERR, "Unable to get the app id from ulp.\n");
958 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
960 BNXT_TF_DBG(ERR, "Unable do get the dev_id.\n");
964 flags = bp->ulp_ctx->cfg_data->ulp_flags;
965 if (ULP_APP_DEV_UNSUPPORTED_ENABLED(flags)) {
966 BNXT_TF_DBG(ERR, "APP ID %d, Device ID: 0x%x not supported.\n",
971 /* Create a TF Client */
972 rc = ulp_ctx_session_open(bp, session);
974 PMD_DRV_LOG(ERR, "Failed to open ctxt session, rc:%d\n", rc);
975 bp->tfp.session = NULL;
979 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
984 ulp_ctx_detach(struct bnxt *bp)
986 if (bp->tfp.session) {
987 tf_close_session(&bp->tfp);
988 bp->tfp.session = NULL;
993 * Initialize the state of an ULP session.
994 * If the state of an ULP session is not initialized, set it's state to
995 * initialized. If the state is already initialized, do nothing.
998 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
1000 pthread_mutex_lock(&session->bnxt_ulp_mutex);
1002 if (!session->bnxt_ulp_init) {
1003 session->bnxt_ulp_init = true;
1009 pthread_mutex_unlock(&session->bnxt_ulp_mutex);
1013 * Check if an ULP session is already allocated for a specific PCI
1014 * domain & bus. If it is already allocated simply return the session
1015 * pointer, otherwise allocate a new session.
1017 static struct bnxt_ulp_session_state *
1018 ulp_get_session(struct rte_pci_addr *pci_addr)
1020 struct bnxt_ulp_session_state *session;
1022 STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) {
1023 if (session->pci_info.domain == pci_addr->domain &&
1024 session->pci_info.bus == pci_addr->bus) {
1032 * Allocate and Initialize an ULP session and set it's state to INITIALIZED.
1033 * If it's already initialized simply return the already existing session.
1035 static struct bnxt_ulp_session_state *
1036 ulp_session_init(struct bnxt *bp,
1039 struct rte_pci_device *pci_dev;
1040 struct rte_pci_addr *pci_addr;
1041 struct bnxt_ulp_session_state *session;
1047 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
1048 pci_addr = &pci_dev->addr;
1050 pthread_mutex_lock(&bnxt_ulp_global_mutex);
1052 session = ulp_get_session(pci_addr);
1054 /* Not Found the session Allocate a new one */
1055 session = rte_zmalloc("bnxt_ulp_session",
1056 sizeof(struct bnxt_ulp_session_state),
1060 "Allocation failed for bnxt_ulp_session\n");
1061 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1065 /* Add it to the queue */
1066 session->pci_info.domain = pci_addr->domain;
1067 session->pci_info.bus = pci_addr->bus;
1068 rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
1070 BNXT_TF_DBG(ERR, "mutex create failed\n");
1071 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1074 STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
1078 ulp_context_initialized(session, init);
1079 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1084 * When a device is closed, remove it's associated session from the global
1088 ulp_session_deinit(struct bnxt_ulp_session_state *session)
1093 if (!session->cfg_data) {
1094 pthread_mutex_lock(&bnxt_ulp_global_mutex);
1095 STAILQ_REMOVE(&bnxt_ulp_session_list, session,
1096 bnxt_ulp_session_state, next);
1097 pthread_mutex_destroy(&session->bnxt_ulp_mutex);
1099 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1104 * Internal api to enable NAT feature.
1105 * Set set_flag to 1 to set the value or zero to reset the value.
1106 * returns 0 on success.
1109 bnxt_ulp_global_cfg_update(struct bnxt *bp,
1111 enum tf_global_config_type type,
1116 uint32_t global_cfg = 0;
1118 struct tf_global_cfg_parms parms = { 0 };
1120 /* Initialize the params */
1123 parms.offset = offset,
1124 parms.config = (uint8_t *)&global_cfg,
1125 parms.config_sz_in_bytes = sizeof(global_cfg);
1127 rc = tf_get_global_cfg(&bp->tfp, &parms);
1129 BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n",
1135 global_cfg |= value;
1137 global_cfg &= ~value;
1139 /* SET the register RE_CFA_REG_ACT_TECT */
1140 rc = tf_set_global_cfg(&bp->tfp, &parms);
1142 BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n",
1149 /* Internal function to delete all the flows belonging to the given port */
1151 bnxt_ulp_flush_port_flows(struct bnxt *bp)
1155 /* it is assumed that port is either TVF or PF */
1156 if (ulp_port_db_port_func_id_get(bp->ulp_ctx,
1157 bp->eth_dev->data->port_id,
1159 BNXT_TF_DBG(ERR, "Invalid argument\n");
1162 (void)ulp_flow_db_function_flow_flush(bp->ulp_ctx, func_id);
1165 /* Internal function to delete the VFR default flows */
1167 bnxt_ulp_destroy_vfr_default_rules(struct bnxt *bp, bool global)
1169 struct bnxt_ulp_vfr_rule_info *info;
1171 struct rte_eth_dev *vfr_eth_dev;
1172 struct bnxt_representor *vfr_bp;
1174 if (!BNXT_TRUFLOW_EN(bp) || BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
1177 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
1180 /* Delete default rules for all ports */
1181 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
1182 info = &bp->ulp_ctx->cfg_data->vfr_rule_info[port_id];
1186 if (!global && info->parent_port_id !=
1187 bp->eth_dev->data->port_id)
1190 /* Destroy the flows */
1191 ulp_default_flow_destroy(bp->eth_dev, info->vfr_flow_id);
1192 /* Clean up the tx action pointer */
1193 vfr_eth_dev = &rte_eth_devices[port_id];
1195 vfr_bp = vfr_eth_dev->data->dev_private;
1196 vfr_bp->vfr_tx_cfa_action = 0;
1198 memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info));
1203 * When a port is deinit'ed by dpdk. This function is called
1204 * and this function clears the ULP context and rest of the
1205 * infrastructure associated with it.
1208 bnxt_ulp_deinit(struct bnxt *bp,
1209 struct bnxt_ulp_session_state *session)
1213 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
1216 ha_enabled = bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx);
1217 if (ha_enabled && session->session_opened) {
1218 int32_t rc = ulp_ha_mgr_close(bp->ulp_ctx);
1220 BNXT_TF_DBG(ERR, "Failed to close HA (%d)\n", rc);
1223 /* clean up default flows */
1224 bnxt_ulp_destroy_df_rules(bp, true);
1226 /* clean up default VFR flows */
1227 bnxt_ulp_destroy_vfr_default_rules(bp, true);
1229 /* clean up regular flows */
1230 ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR);
1232 /* cleanup the eem table scope */
1233 ulp_eem_tbl_scope_deinit(bp, bp->ulp_ctx);
1235 /* cleanup the flow database */
1236 ulp_flow_db_deinit(bp->ulp_ctx);
1238 /* Delete the Mark database */
1239 ulp_mark_db_deinit(bp->ulp_ctx);
1241 /* cleanup the ulp mapper */
1242 ulp_mapper_deinit(bp->ulp_ctx);
1244 /* Delete the Flow Counter Manager */
1245 ulp_fc_mgr_deinit(bp->ulp_ctx);
1247 /* Delete the Port database */
1248 ulp_port_db_deinit(bp->ulp_ctx);
1250 /* Disable NAT feature */
1251 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
1252 TF_TUNNEL_ENCAP_NAT,
1253 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
1255 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
1256 TF_TUNNEL_ENCAP_NAT,
1257 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
1259 /* free the flow db lock */
1260 pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
1263 ulp_ha_mgr_deinit(bp->ulp_ctx);
1265 /* Delete the ulp context and tf session and free the ulp context */
1266 ulp_ctx_deinit(bp, session);
1267 BNXT_TF_DBG(DEBUG, "ulp ctx has been deinitialized\n");
1271 * When a port is initialized by dpdk. This functions is called
1272 * and this function initializes the ULP context and rest of the
1273 * infrastructure associated with it.
1276 bnxt_ulp_init(struct bnxt *bp,
1277 struct bnxt_ulp_session_state *session)
1281 /* Allocate and Initialize the ulp context. */
1282 rc = ulp_ctx_init(bp, session);
1284 BNXT_TF_DBG(ERR, "Failed to create the ulp context\n");
1288 rc = pthread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock, NULL);
1290 BNXT_TF_DBG(ERR, "Unable to initialize flow db lock\n");
1294 /* Initialize ulp dparms with values devargs passed */
1295 rc = ulp_dparms_init(bp, bp->ulp_ctx);
1297 BNXT_TF_DBG(ERR, "Failed to initialize the dparms\n");
1301 /* create the port database */
1302 rc = ulp_port_db_init(bp->ulp_ctx, bp->port_cnt);
1304 BNXT_TF_DBG(ERR, "Failed to create the port database\n");
1308 /* Create the Mark database. */
1309 rc = ulp_mark_db_init(bp->ulp_ctx);
1311 BNXT_TF_DBG(ERR, "Failed to create the mark database\n");
1315 /* Create the flow database. */
1316 rc = ulp_flow_db_init(bp->ulp_ctx);
1318 BNXT_TF_DBG(ERR, "Failed to create the flow database\n");
1322 /* Create the eem table scope. */
1323 rc = ulp_eem_tbl_scope_init(bp);
1325 BNXT_TF_DBG(ERR, "Failed to create the eem scope table\n");
1329 rc = ulp_mapper_init(bp->ulp_ctx);
1331 BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n");
1335 rc = ulp_fc_mgr_init(bp->ulp_ctx);
1337 BNXT_TF_DBG(ERR, "Failed to initialize ulp flow counter mgr\n");
1342 * Enable NAT feature. Set the global configuration register
1343 * Tunnel encap to enable NAT with the reuse of existing inner
1344 * L2 header smac and dmac
1346 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
1347 TF_TUNNEL_ENCAP_NAT,
1348 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
1350 BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n");
1354 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
1355 TF_TUNNEL_ENCAP_NAT,
1356 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
1358 BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
1362 if (bnxt_ulp_cntxt_ha_enabled(bp->ulp_ctx)) {
1363 rc = ulp_ha_mgr_init(bp->ulp_ctx);
1365 BNXT_TF_DBG(ERR, "Failed to initialize HA %d\n", rc);
1368 rc = ulp_ha_mgr_open(bp->ulp_ctx);
1370 BNXT_TF_DBG(ERR, "Failed to Process HA Open %d\n", rc);
1374 BNXT_TF_DBG(DEBUG, "ulp ctx has been initialized\n");
1378 bnxt_ulp_deinit(bp, session);
1383 * When a port is initialized by dpdk. This functions sets up
1384 * the port specific details.
1387 bnxt_ulp_port_init(struct bnxt *bp)
1389 struct bnxt_ulp_session_state *session;
1391 enum bnxt_ulp_device_id devid = BNXT_ULP_DEVICE_ID_LAST;
1395 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1397 "Skip ulp init for port: %d, not a TVF or PF\n",
1398 bp->eth_dev->data->port_id);
1402 if (!BNXT_TRUFLOW_EN(bp)) {
1404 "Skip ulp init for port: %d, truflow is not enabled\n",
1405 bp->eth_dev->data->port_id);
1410 BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n");
1414 bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx",
1415 sizeof(struct bnxt_ulp_context), 0);
1417 BNXT_TF_DBG(ERR, "Failed to allocate ulp ctx\n");
1422 * Multiple uplink ports can be associated with a single vswitch.
1423 * Make sure only the port that is started first will initialize
1426 session = ulp_session_init(bp, &initialized);
1428 BNXT_TF_DBG(ERR, "Failed to initialize the tf session\n");
1435 * If ULP is already initialized for a specific domain then
1436 * simply assign the ulp context to this rte_eth_dev.
1438 rc = ulp_ctx_attach(bp, session);
1440 BNXT_TF_DBG(ERR, "Failed to attach the ulp context\n");
1445 * Attach to the shared session, must be called after the
1446 * ulp_ctx_attach in order to ensure that ulp data is available
1449 rc = ulp_ctx_shared_session_attach(bp, session);
1452 "Failed attach to shared session (%d)", rc);
1456 rc = bnxt_ulp_init(bp, session);
1458 BNXT_TF_DBG(ERR, "Failed to initialize the ulp init\n");
1463 /* Update bnxt driver flags */
1464 rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx);
1466 BNXT_TF_DBG(ERR, "Failed to update driver flags\n");
1470 /* update the port database for the given interface */
1471 rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp->eth_dev);
1473 BNXT_TF_DBG(ERR, "Failed to update port database\n");
1476 /* create the default rules */
1477 rc = bnxt_ulp_create_df_rules(bp);
1479 BNXT_TF_DBG(ERR, "Failed to create default flow\n");
1483 rc = bnxt_ulp_devid_get(bp, &devid);
1485 BNXT_TF_DBG(ERR, "Unable to determine device for ULP port init.\n");
1489 if (devid != BNXT_ULP_DEVICE_ID_THOR && BNXT_ACCUM_STATS_EN(bp))
1490 bp->ulp_ctx->cfg_data->accum_stats = true;
1492 BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init, accum_stats:%d\n",
1493 bp->eth_dev->data->port_id,
1494 bp->ulp_ctx->cfg_data->accum_stats);
1496 /* set the unicast mode */
1497 if (bnxt_ulp_cntxt_ptr2_ulp_flags_get(bp->ulp_ctx, &ulp_flags)) {
1498 BNXT_TF_DBG(ERR, "Error in getting ULP context flags\n");
1501 if (ulp_flags & BNXT_ULP_APP_UNICAST_ONLY) {
1502 if (bnxt_pmd_set_unicast_rxmask(bp->eth_dev)) {
1503 BNXT_TF_DBG(ERR, "Error in setting unicast rxmode\n");
1511 bnxt_ulp_port_deinit(bp);
1516 * When a port is de-initialized by dpdk. This functions clears up
1517 * the port specific details.
1520 bnxt_ulp_port_deinit(struct bnxt *bp)
1522 struct bnxt_ulp_session_state *session;
1523 struct rte_pci_device *pci_dev;
1524 struct rte_pci_addr *pci_addr;
1526 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1528 "Skip ULP deinit port:%d, not a TVF or PF\n",
1529 bp->eth_dev->data->port_id);
1533 if (!BNXT_TRUFLOW_EN(bp)) {
1535 "Skip ULP deinit for port:%d, truflow is not enabled\n",
1536 bp->eth_dev->data->port_id);
1541 BNXT_TF_DBG(DEBUG, "ulp ctx already de-allocated\n");
1545 BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port deinit\n",
1546 bp->eth_dev->data->port_id);
1548 /* Free the ulp context in the context entry list */
1549 bnxt_ulp_cntxt_list_del(bp->ulp_ctx);
1551 /* Get the session details */
1552 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
1553 pci_addr = &pci_dev->addr;
1554 pthread_mutex_lock(&bnxt_ulp_global_mutex);
1555 session = ulp_get_session(pci_addr);
1556 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1558 /* session not found then just exit */
1560 /* Free the ulp context */
1561 rte_free(bp->ulp_ctx);
1566 /* Check the reference count to deinit or deattach*/
1567 if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) {
1568 bp->ulp_ctx->cfg_data->ref_cnt--;
1569 if (bp->ulp_ctx->cfg_data->ref_cnt) {
1570 /* free the port details */
1571 /* Free the default flow rule associated to this port */
1572 bnxt_ulp_destroy_df_rules(bp, false);
1573 bnxt_ulp_destroy_vfr_default_rules(bp, false);
1575 /* free flows associated with this port */
1576 bnxt_ulp_flush_port_flows(bp);
1578 /* close the session associated with this port */
1581 /* always detach/close shared after the session. */
1582 ulp_ctx_shared_session_detach(bp);
1584 /* Perform ulp ctx deinit */
1585 bnxt_ulp_deinit(bp, session);
1589 /* clean up the session */
1590 ulp_session_deinit(session);
1592 /* Free the ulp context */
1593 rte_free(bp->ulp_ctx);
1597 /* Below are the access functions to access internal data of ulp context. */
1598 /* Function to set the Mark DB into the context */
1600 bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx,
1601 struct bnxt_ulp_mark_tbl *mark_tbl)
1603 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1604 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1608 ulp_ctx->cfg_data->mark_tbl = mark_tbl;
1613 /* Function to retrieve the Mark DB from the context. */
1614 struct bnxt_ulp_mark_tbl *
1615 bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx)
1617 if (!ulp_ctx || !ulp_ctx->cfg_data)
1620 return ulp_ctx->cfg_data->mark_tbl;
1624 bnxt_ulp_cntxt_shared_session_enabled(struct bnxt_ulp_context *ulp_ctx)
1626 return ULP_SHARED_SESSION_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags);
1630 bnxt_ulp_cntxt_app_id_set(struct bnxt_ulp_context *ulp_ctx, uint8_t app_id)
1634 ulp_ctx->cfg_data->app_id = app_id;
1639 bnxt_ulp_cntxt_app_id_get(struct bnxt_ulp_context *ulp_ctx, uint8_t *app_id)
1641 /* Default APP id is zero */
1642 if (!ulp_ctx || !app_id)
1644 *app_id = ulp_ctx->cfg_data->app_id;
1648 /* Function to set the device id of the hardware. */
1650 bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx,
1653 if (ulp_ctx && ulp_ctx->cfg_data) {
1654 ulp_ctx->cfg_data->dev_id = dev_id;
1661 /* Function to get the device id of the hardware. */
1663 bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx,
1666 if (ulp_ctx && ulp_ctx->cfg_data) {
1667 *dev_id = ulp_ctx->cfg_data->dev_id;
1670 *dev_id = BNXT_ULP_DEVICE_ID_LAST;
1671 BNXT_TF_DBG(ERR, "Failed to read dev_id from ulp ctxt\n");
1676 bnxt_ulp_cntxt_mem_type_set(struct bnxt_ulp_context *ulp_ctx,
1677 enum bnxt_ulp_flow_mem_type mem_type)
1679 if (ulp_ctx && ulp_ctx->cfg_data) {
1680 ulp_ctx->cfg_data->mem_type = mem_type;
1683 BNXT_TF_DBG(ERR, "Failed to write mem_type in ulp ctxt\n");
1688 bnxt_ulp_cntxt_mem_type_get(struct bnxt_ulp_context *ulp_ctx,
1689 enum bnxt_ulp_flow_mem_type *mem_type)
1691 if (ulp_ctx && ulp_ctx->cfg_data) {
1692 *mem_type = ulp_ctx->cfg_data->mem_type;
1695 *mem_type = BNXT_ULP_FLOW_MEM_TYPE_LAST;
1696 BNXT_TF_DBG(ERR, "Failed to read mem_type in ulp ctxt\n");
1700 /* Function to get the table scope id of the EEM table. */
1702 bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx,
1703 uint32_t *tbl_scope_id)
1705 if (ulp_ctx && ulp_ctx->cfg_data) {
1706 *tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id;
1713 /* Function to set the table scope id of the EEM table. */
1715 bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx,
1716 uint32_t tbl_scope_id)
1718 if (ulp_ctx && ulp_ctx->cfg_data) {
1719 ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id;
1726 /* Function to set the shared tfp session details from the ulp context. */
1728 bnxt_ulp_cntxt_shared_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1731 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1736 if (ulp->cfg_data->num_shared_clients > 0)
1737 ulp->cfg_data->num_shared_clients--;
1739 ulp->cfg_data->num_shared_clients++;
1742 ulp->g_shared_tfp = tfp;
1746 /* Function to get the shared tfp session details from the ulp context. */
1748 bnxt_ulp_cntxt_shared_tfp_get(struct bnxt_ulp_context *ulp)
1751 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1754 return ulp->g_shared_tfp;
1757 /* Function to get the number of shared clients attached */
1759 bnxt_ulp_cntxt_num_shared_clients_get(struct bnxt_ulp_context *ulp)
1761 if (ulp == NULL || ulp->cfg_data == NULL) {
1762 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1765 return ulp->cfg_data->num_shared_clients;
1768 /* Function to set the tfp session details from the ulp context. */
1770 bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1773 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1781 /* Function to get the tfp session details from the ulp context. */
1783 bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp,
1784 enum bnxt_ulp_shared_session shared)
1787 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1791 return ulp->g_shared_tfp;
1797 * Get the device table entry based on the device id.
1799 * dev_id [in] The device id of the hardware
1801 * Returns the pointer to the device parameters.
1803 struct bnxt_ulp_device_params *
1804 bnxt_ulp_device_params_get(uint32_t dev_id)
1806 if (dev_id < BNXT_ULP_MAX_NUM_DEVICES)
1807 return &ulp_device_params[dev_id];
1811 /* Function to set the flow database to the ulp context. */
1813 bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context *ulp_ctx,
1814 struct bnxt_ulp_flow_db *flow_db)
1816 if (!ulp_ctx || !ulp_ctx->cfg_data)
1819 ulp_ctx->cfg_data->flow_db = flow_db;
1823 /* Function to get the flow database from the ulp context. */
1824 struct bnxt_ulp_flow_db *
1825 bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx)
1827 if (!ulp_ctx || !ulp_ctx->cfg_data)
1830 return ulp_ctx->cfg_data->flow_db;
1833 /* Function to get the tunnel cache table info from the ulp context. */
1834 struct bnxt_tun_cache_entry *
1835 bnxt_ulp_cntxt_ptr2_tun_tbl_get(struct bnxt_ulp_context *ulp_ctx)
1837 if (!ulp_ctx || !ulp_ctx->cfg_data)
1840 return ulp_ctx->cfg_data->tun_tbl;
1843 /* Function to get the ulp context from eth device. */
1844 struct bnxt_ulp_context *
1845 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev)
1847 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1849 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
1850 struct bnxt_representor *vfr = dev->data->dev_private;
1852 bp = vfr->parent_dev->data->dev_private;
1856 BNXT_TF_DBG(ERR, "Bnxt private data is not initialized\n");
1863 bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx,
1866 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1867 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1871 ulp_ctx->cfg_data->mapper_data = mapper_data;
1876 bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx)
1878 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1879 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1883 return ulp_ctx->cfg_data->mapper_data;
1886 /* Function to set the port database to the ulp context. */
1888 bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context *ulp_ctx,
1889 struct bnxt_ulp_port_db *port_db)
1891 if (!ulp_ctx || !ulp_ctx->cfg_data)
1894 ulp_ctx->cfg_data->port_db = port_db;
1898 /* Function to get the port database from the ulp context. */
1899 struct bnxt_ulp_port_db *
1900 bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context *ulp_ctx)
1902 if (!ulp_ctx || !ulp_ctx->cfg_data)
1905 return ulp_ctx->cfg_data->port_db;
1908 /* Function to set the flow counter info into the context */
1910 bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx,
1911 struct bnxt_ulp_fc_info *ulp_fc_info)
1913 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1914 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1918 ulp_ctx->cfg_data->fc_info = ulp_fc_info;
1923 /* Function to retrieve the flow counter info from the context. */
1924 struct bnxt_ulp_fc_info *
1925 bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx)
1927 if (!ulp_ctx || !ulp_ctx->cfg_data)
1930 return ulp_ctx->cfg_data->fc_info;
1933 /* Function to get the ulp flags from the ulp context. */
1935 bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
1938 if (!ulp_ctx || !ulp_ctx->cfg_data)
1941 *flags = ulp_ctx->cfg_data->ulp_flags;
1945 /* Function to get the ulp vfr info from the ulp context. */
1946 struct bnxt_ulp_vfr_rule_info*
1947 bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(struct bnxt_ulp_context *ulp_ctx,
1950 if (!ulp_ctx || !ulp_ctx->cfg_data || port_id >= RTE_MAX_ETHPORTS)
1953 return &ulp_ctx->cfg_data->vfr_rule_info[port_id];
1956 /* Function to acquire the flow database lock from the ulp context. */
1958 bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
1960 if (!ulp_ctx || !ulp_ctx->cfg_data)
1963 if (pthread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
1964 BNXT_TF_DBG(ERR, "unable to acquire fdb lock\n");
1970 /* Function to release the flow database lock from the ulp context. */
1972 bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
1974 if (!ulp_ctx || !ulp_ctx->cfg_data)
1977 pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);
1980 /* Function to set the ha info into the context */
1982 bnxt_ulp_cntxt_ptr2_ha_info_set(struct bnxt_ulp_context *ulp_ctx,
1983 struct bnxt_ulp_ha_mgr_info *ulp_ha_info)
1985 if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL) {
1986 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1989 ulp_ctx->cfg_data->ha_info = ulp_ha_info;
1993 /* Function to retrieve the ha info from the context. */
1994 struct bnxt_ulp_ha_mgr_info *
1995 bnxt_ulp_cntxt_ptr2_ha_info_get(struct bnxt_ulp_context *ulp_ctx)
1997 if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL)
1999 return ulp_ctx->cfg_data->ha_info;
2003 bnxt_ulp_cntxt_ha_enabled(struct bnxt_ulp_context *ulp_ctx)
2005 if (ulp_ctx == NULL || ulp_ctx->cfg_data == NULL)
2007 return !!ULP_HIGH_AVAIL_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags);
2011 bnxt_ulp_cntxt_list_init(void)
2013 /* Create the cntxt spin lock */
2014 rte_spinlock_init(&bnxt_ulp_ctxt_lock);
2020 bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx)
2022 struct ulp_context_list_entry *entry;
2024 entry = rte_zmalloc(NULL, sizeof(struct ulp_context_list_entry), 0);
2025 if (entry == NULL) {
2026 BNXT_TF_DBG(ERR, "unable to allocate memory\n");
2030 rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
2031 entry->ulp_ctx = ulp_ctx;
2032 TAILQ_INSERT_TAIL(&ulp_cntx_list, entry, next);
2033 rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2038 bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx)
2040 struct ulp_context_list_entry *entry, *temp;
2042 rte_spinlock_lock(&bnxt_ulp_ctxt_lock);
2043 RTE_TAILQ_FOREACH_SAFE(entry, &ulp_cntx_list, next, temp) {
2044 if (entry->ulp_ctx == ulp_ctx) {
2045 TAILQ_REMOVE(&ulp_cntx_list, entry, next);
2050 rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2053 struct bnxt_ulp_context *
2054 bnxt_ulp_cntxt_entry_acquire(void)
2056 struct ulp_context_list_entry *entry;
2058 /* take a lock and get the first ulp context available */
2059 if (rte_spinlock_trylock(&bnxt_ulp_ctxt_lock)) {
2060 TAILQ_FOREACH(entry, &ulp_cntx_list, next)
2062 return entry->ulp_ctx;
2068 bnxt_ulp_cntxt_entry_release(void)
2070 rte_spinlock_unlock(&bnxt_ulp_ctxt_lock);
2073 /* Function to get the app tunnel details from the ulp context. */
2074 struct bnxt_flow_app_tun_ent *
2075 bnxt_ulp_cntxt_ptr2_app_tun_list_get(struct bnxt_ulp_context *ulp)
2077 if (!ulp || !ulp->cfg_data)
2080 return ulp->cfg_data->app_tun;