1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
7 #include <rte_malloc.h>
9 #include <rte_flow_driver.h>
10 #include <rte_tailq.h>
14 #include "bnxt_tf_common.h"
16 #include "tf_ext_flow_handle.h"
18 #include "ulp_template_db_enum.h"
19 #include "ulp_template_struct.h"
20 #include "ulp_mark_mgr.h"
21 #include "ulp_fc_mgr.h"
22 #include "ulp_flow_db.h"
23 #include "ulp_mapper.h"
24 #include "ulp_port_db.h"
26 /* Linked list of all TF sessions. */
27 STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
28 STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
30 /* Mutex to synchronize bnxt_ulp_session_list operations. */
31 static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
34 * Allow the deletion of context only for the bnxt device that
35 * created the session.
38 ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx)
40 if (!ulp_ctx || !ulp_ctx->cfg_data)
43 if (!ulp_ctx->cfg_data->ref_cnt) {
44 BNXT_TF_DBG(DEBUG, "ulp ctx shall initiate deinit\n");
52 bnxt_ulp_devid_get(struct bnxt *bp,
53 enum bnxt_ulp_device_id *ulp_dev_id)
58 if (BNXT_STINGRAY(bp))
59 *ulp_dev_id = BNXT_ULP_DEVICE_ID_STINGRAY;
61 /* Assuming Whitney */
62 *ulp_dev_id = BNXT_ULP_DEVICE_ID_WH_PLUS;
68 bnxt_ulp_tf_session_resources_get(struct bnxt *bp,
69 struct tf_session_resources *res)
74 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
76 BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
81 case BNXT_ULP_DEVICE_ID_WH_PLUS:
84 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 422;
85 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 6;
86 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
87 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
88 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
91 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
92 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384;
93 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
96 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 511;
97 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 63;
100 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
102 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
104 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
105 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 88;
108 res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 13168;
111 res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
114 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_SP_SMAC] = 255;
118 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 292;
119 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 148;
120 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
121 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
122 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
125 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
126 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384;
127 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
130 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 511;
131 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 223;
132 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 255;
135 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
137 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
139 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
140 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 928;
143 res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 15232;
146 res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
149 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 488;
150 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = 511;
152 case BNXT_ULP_DEVICE_ID_STINGRAY:
155 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 315;
156 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 6;
157 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
158 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
159 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
162 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
163 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384;
164 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
167 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 511;
168 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 63;
171 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
173 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
175 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
176 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 112;
179 res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 13200;
182 res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
185 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_SP_SMAC] = 256;
189 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 292;
190 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 127;
191 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
192 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
193 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
196 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
197 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384;
198 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
201 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 367;
202 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 223;
203 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 255;
206 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
208 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
210 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
211 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 928;
214 res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 15232;
217 res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
220 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 488;
221 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = 512;
231 * Initialize an ULP session.
232 * An ULP session will contain all the resources needed to support rte flow
233 * offloads. A session is initialized as part of rte_eth_device start.
234 * A single vswitch instance can have multiple uplinks which means
235 * rte_eth_device start will be called for each of these devices.
236 * ULP session manager will make sure that a single ULP session is only
237 * initialized once. Apart from this, it also initializes MARK database,
238 * EEM table & flow database. ULP session manager also manages a list of
239 * all opened ULP sessions.
242 ulp_ctx_session_open(struct bnxt *bp,
243 struct bnxt_ulp_session_state *session)
245 struct rte_eth_dev *ethdev = bp->eth_dev;
247 struct tf_open_session_parms params;
248 struct tf_session_resources *resources;
251 memset(¶ms, 0, sizeof(params));
253 rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
254 params.ctrl_chan_name);
256 BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
257 ethdev->data->port_id, rc);
261 params.shadow_copy = true;
263 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
265 BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
269 switch (ulp_dev_id) {
270 case BNXT_ULP_DEVICE_ID_WH_PLUS:
271 params.device_type = TF_DEVICE_TYPE_WH;
273 case BNXT_ULP_DEVICE_ID_STINGRAY:
274 params.device_type = TF_DEVICE_TYPE_SR;
277 BNXT_TF_DBG(ERR, "Unable to determine device for "
278 "opening session.\n");
282 resources = ¶ms.resources;
283 rc = bnxt_ulp_tf_session_resources_get(bp, resources);
285 BNXT_TF_DBG(ERR, "Unable to determine tf resources for "
291 rc = tf_open_session(&bp->tfp, ¶ms);
293 BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
294 params.ctrl_chan_name, rc);
297 if (!session->session_opened) {
298 session->session_opened = 1;
299 session->g_tfp = rte_zmalloc("bnxt_ulp_session_tfp",
300 sizeof(struct tf), 0);
301 session->g_tfp->session = bp->tfp.session;
307 * Close the ULP session.
308 * It takes the ulp context pointer.
311 ulp_ctx_session_close(struct bnxt *bp,
312 struct bnxt_ulp_session_state *session)
314 /* close the session in the hardware */
315 if (session->session_opened)
316 tf_close_session(&bp->tfp);
317 session->session_opened = 0;
318 rte_free(session->g_tfp);
319 session->g_tfp = NULL;
323 bnxt_init_tbl_scope_parms(struct bnxt *bp,
324 struct tf_alloc_tbl_scope_parms *params)
326 struct bnxt_ulp_device_params *dparms;
330 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
332 /* TBD: For now, just use default. */
335 dparms = bnxt_ulp_device_params_get(dev_id);
338 * Set the flush timer for EEM entries. The value is in 100ms intervals,
341 params->hw_flow_cache_flush_timer = 100;
344 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
345 params->rx_max_action_entry_sz_in_bits =
346 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
347 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
348 params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS;
349 params->rx_tbl_if_id = BNXT_ULP_RX_TBL_IF_ID;
351 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
352 params->tx_max_action_entry_sz_in_bits =
353 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
354 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
355 params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS;
356 params->tx_tbl_if_id = BNXT_ULP_TX_TBL_IF_ID;
358 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
359 params->rx_max_action_entry_sz_in_bits =
360 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
361 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
362 params->rx_num_flows_in_k =
363 dparms->ext_flow_db_num_entries / 1024;
364 params->rx_tbl_if_id = BNXT_ULP_RX_TBL_IF_ID;
366 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
367 params->tx_max_action_entry_sz_in_bits =
368 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
369 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
370 params->tx_num_flows_in_k =
371 dparms->ext_flow_db_num_entries / 1024;
372 params->tx_tbl_if_id = BNXT_ULP_TX_TBL_IF_ID;
374 BNXT_TF_DBG(INFO, "Table Scope initialized with %uK flows.\n",
375 params->rx_num_flows_in_k);
378 /* Initialize Extended Exact Match host memory. */
380 ulp_eem_tbl_scope_init(struct bnxt *bp)
382 struct tf_alloc_tbl_scope_parms params = {0};
383 struct bnxt_ulp_device_params *dparms;
384 enum bnxt_ulp_flow_mem_type mtype;
388 /* Get the dev specific number of flows that needed to be supported. */
389 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
390 BNXT_TF_DBG(ERR, "Invalid device id\n");
394 dparms = bnxt_ulp_device_params_get(dev_id);
396 BNXT_TF_DBG(ERR, "could not fetch the device params\n");
400 if (bnxt_ulp_cntxt_mem_type_get(bp->ulp_ctx, &mtype))
402 if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
403 BNXT_TF_DBG(INFO, "Table Scope alloc is not required\n");
407 bnxt_init_tbl_scope_parms(bp, ¶ms);
408 rc = tf_alloc_tbl_scope(&bp->tfp, ¶ms);
410 BNXT_TF_DBG(ERR, "Unable to allocate eem table scope rc = %d\n",
414 rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id);
416 BNXT_TF_DBG(ERR, "Unable to set table scope id\n");
423 /* Free Extended Exact Match host memory */
425 ulp_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
427 struct tf_free_tbl_scope_parms params = {0};
430 struct bnxt_ulp_device_params *dparms;
431 enum bnxt_ulp_flow_mem_type mtype;
434 if (!ulp_ctx || !ulp_ctx->cfg_data)
437 tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx);
439 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
443 /* Get the dev specific number of flows that needed to be supported. */
444 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
445 BNXT_TF_DBG(ERR, "Invalid device id\n");
449 dparms = bnxt_ulp_device_params_get(dev_id);
451 BNXT_TF_DBG(ERR, "could not fetch the device params\n");
455 if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
457 if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
458 BNXT_TF_DBG(INFO, "Table Scope free is not required\n");
462 rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, ¶ms.tbl_scope_id);
464 BNXT_TF_DBG(ERR, "Failed to get the table scope id\n");
468 rc = tf_free_tbl_scope(tfp, ¶ms);
470 BNXT_TF_DBG(ERR, "Unable to free table scope\n");
476 /* The function to free and deinit the ulp context data. */
478 ulp_ctx_deinit(struct bnxt *bp,
479 struct bnxt_ulp_session_state *session)
481 /* close the tf session */
482 ulp_ctx_session_close(bp, session);
484 /* Free the contents */
485 if (session->cfg_data) {
486 rte_free(session->cfg_data);
487 bp->ulp_ctx->cfg_data = NULL;
488 session->cfg_data = NULL;
493 /* The function to allocate and initialize the ulp context data. */
495 ulp_ctx_init(struct bnxt *bp,
496 struct bnxt_ulp_session_state *session)
498 struct bnxt_ulp_data *ulp_data;
500 enum bnxt_ulp_device_id devid;
502 /* Allocate memory to hold ulp context data. */
503 ulp_data = rte_zmalloc("bnxt_ulp_data",
504 sizeof(struct bnxt_ulp_data), 0);
506 BNXT_TF_DBG(ERR, "Failed to allocate memory for ulp data\n");
510 /* Increment the ulp context data reference count usage. */
511 bp->ulp_ctx->cfg_data = ulp_data;
512 session->cfg_data = ulp_data;
514 ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED;
516 rc = bnxt_ulp_devid_get(bp, &devid);
518 BNXT_TF_DBG(ERR, "Unable to determine device for ULP init.\n");
522 rc = bnxt_ulp_cntxt_dev_id_set(bp->ulp_ctx, devid);
524 BNXT_TF_DBG(ERR, "Unable to set device for ULP init.\n");
528 /* Open the ulp session. */
529 rc = ulp_ctx_session_open(bp, session);
533 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
537 session->session_opened = 1;
538 (void)ulp_ctx_deinit(bp, session);
542 /* The function to initialize ulp dparms with devargs */
544 ulp_dparms_init(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
546 struct bnxt_ulp_device_params *dparms;
549 if (!bp->max_num_kflows) {
550 /* Defaults to Internal */
551 bnxt_ulp_cntxt_mem_type_set(ulp_ctx,
552 BNXT_ULP_FLOW_MEM_TYPE_INT);
556 /* The max_num_kflows were set, so move to external */
557 if (bnxt_ulp_cntxt_mem_type_set(ulp_ctx, BNXT_ULP_FLOW_MEM_TYPE_EXT))
560 if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) {
561 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
565 dparms = bnxt_ulp_device_params_get(dev_id);
567 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
571 /* num_flows = max_num_kflows * 1024 */
572 dparms->ext_flow_db_num_entries = bp->max_num_kflows * 1024;
573 /* GFID = 2 * num_flows */
574 dparms->mark_db_gfid_entries = dparms->ext_flow_db_num_entries * 2;
575 BNXT_TF_DBG(DEBUG, "Set the number of flows = %"PRIu64"\n",
576 dparms->ext_flow_db_num_entries);
581 /* The function to initialize bp flags with truflow features */
583 ulp_dparms_dev_port_intf_update(struct bnxt *bp,
584 struct bnxt_ulp_context *ulp_ctx)
586 enum bnxt_ulp_flow_mem_type mtype;
588 if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
590 /* Update the bp flag with gfid flag */
591 if (mtype == BNXT_ULP_FLOW_MEM_TYPE_EXT)
592 bp->flags |= BNXT_FLAG_GFID_ENABLE;
598 ulp_ctx_attach(struct bnxt *bp,
599 struct bnxt_ulp_session_state *session)
603 /* Increment the ulp context data reference count usage. */
604 bp->ulp_ctx->cfg_data = session->cfg_data;
605 bp->ulp_ctx->cfg_data->ref_cnt++;
607 /* update the session details in bnxt tfp */
608 bp->tfp.session = session->g_tfp->session;
610 /* Create a TF Client */
611 rc = ulp_ctx_session_open(bp, session);
613 PMD_DRV_LOG(ERR, "Failed to open ctxt session, rc:%d\n", rc);
614 bp->tfp.session = NULL;
618 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
623 ulp_ctx_detach(struct bnxt *bp)
625 if (bp->tfp.session) {
626 tf_close_session(&bp->tfp);
627 bp->tfp.session = NULL;
632 * Initialize the state of an ULP session.
633 * If the state of an ULP session is not initialized, set it's state to
634 * initialized. If the state is already initialized, do nothing.
637 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
639 pthread_mutex_lock(&session->bnxt_ulp_mutex);
641 if (!session->bnxt_ulp_init) {
642 session->bnxt_ulp_init = true;
648 pthread_mutex_unlock(&session->bnxt_ulp_mutex);
652 * Check if an ULP session is already allocated for a specific PCI
653 * domain & bus. If it is already allocated simply return the session
654 * pointer, otherwise allocate a new session.
656 static struct bnxt_ulp_session_state *
657 ulp_get_session(struct rte_pci_addr *pci_addr)
659 struct bnxt_ulp_session_state *session;
661 STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) {
662 if (session->pci_info.domain == pci_addr->domain &&
663 session->pci_info.bus == pci_addr->bus) {
671 * Allocate and Initialize an ULP session and set it's state to INITIALIZED.
672 * If it's already initialized simply return the already existing session.
674 static struct bnxt_ulp_session_state *
675 ulp_session_init(struct bnxt *bp,
678 struct rte_pci_device *pci_dev;
679 struct rte_pci_addr *pci_addr;
680 struct bnxt_ulp_session_state *session;
686 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
687 pci_addr = &pci_dev->addr;
689 pthread_mutex_lock(&bnxt_ulp_global_mutex);
691 session = ulp_get_session(pci_addr);
693 /* Not Found the session Allocate a new one */
694 session = rte_zmalloc("bnxt_ulp_session",
695 sizeof(struct bnxt_ulp_session_state),
699 "Allocation failed for bnxt_ulp_session\n");
700 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
704 /* Add it to the queue */
705 session->pci_info.domain = pci_addr->domain;
706 session->pci_info.bus = pci_addr->bus;
707 rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
709 BNXT_TF_DBG(ERR, "mutex create failed\n");
710 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
713 STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
717 ulp_context_initialized(session, init);
718 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
723 * When a device is closed, remove it's associated session from the global
727 ulp_session_deinit(struct bnxt_ulp_session_state *session)
732 if (!session->cfg_data) {
733 pthread_mutex_lock(&bnxt_ulp_global_mutex);
734 STAILQ_REMOVE(&bnxt_ulp_session_list, session,
735 bnxt_ulp_session_state, next);
736 pthread_mutex_destroy(&session->bnxt_ulp_mutex);
738 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
743 * Internal api to enable NAT feature.
744 * Set set_flag to 1 to set the value or zero to reset the value.
745 * returns 0 on success.
748 bnxt_ulp_global_cfg_update(struct bnxt *bp,
750 enum tf_global_config_type type,
755 uint32_t global_cfg = 0;
757 struct tf_global_cfg_parms parms = { 0 };
759 /* Initialize the params */
762 parms.offset = offset,
763 parms.config = (uint8_t *)&global_cfg,
764 parms.config_sz_in_bytes = sizeof(global_cfg);
766 rc = tf_get_global_cfg(&bp->tfp, &parms);
768 BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n",
776 global_cfg &= ~value;
778 /* SET the register RE_CFA_REG_ACT_TECT */
779 rc = tf_set_global_cfg(&bp->tfp, &parms);
781 BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n",
788 /* Internal function to delete all the flows belonging to the given port */
790 bnxt_ulp_flush_port_flows(struct bnxt *bp)
794 /* it is assumed that port is either TVF or PF */
795 if (ulp_port_db_port_func_id_get(bp->ulp_ctx,
796 bp->eth_dev->data->port_id,
798 BNXT_TF_DBG(ERR, "Invalid argument\n");
801 (void)ulp_flow_db_function_flow_flush(bp->ulp_ctx, func_id);
804 /* Internal function to delete the VFR default flows */
806 bnxt_ulp_destroy_vfr_default_rules(struct bnxt *bp, bool global)
808 struct bnxt_ulp_vfr_rule_info *info;
810 struct rte_eth_dev *vfr_eth_dev;
811 struct bnxt_representor *vfr_bp;
813 if (!BNXT_TRUFLOW_EN(bp) || BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
816 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
819 /* Delete default rules for all ports */
820 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
821 info = &bp->ulp_ctx->cfg_data->vfr_rule_info[port_id];
825 if (!global && info->parent_port_id !=
826 bp->eth_dev->data->port_id)
829 /* Destroy the flows */
830 ulp_default_flow_destroy(bp->eth_dev, info->rep2vf_flow_id);
831 ulp_default_flow_destroy(bp->eth_dev, info->vf2rep_flow_id);
832 /* Clean up the tx action pointer */
833 vfr_eth_dev = &rte_eth_devices[port_id];
835 vfr_bp = vfr_eth_dev->data->dev_private;
836 vfr_bp->vfr_tx_cfa_action = 0;
838 memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info));
843 * When a port is deinit'ed by dpdk. This function is called
844 * and this function clears the ULP context and rest of the
845 * infrastructure associated with it.
848 bnxt_ulp_deinit(struct bnxt *bp,
849 struct bnxt_ulp_session_state *session)
851 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
854 /* clean up default flows */
855 bnxt_ulp_destroy_df_rules(bp, true);
857 /* clean up default VFR flows */
858 bnxt_ulp_destroy_vfr_default_rules(bp, true);
860 /* clean up regular flows */
861 ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR);
863 /* cleanup the eem table scope */
864 ulp_eem_tbl_scope_deinit(bp, bp->ulp_ctx);
866 /* cleanup the flow database */
867 ulp_flow_db_deinit(bp->ulp_ctx);
869 /* Delete the Mark database */
870 ulp_mark_db_deinit(bp->ulp_ctx);
872 /* cleanup the ulp mapper */
873 ulp_mapper_deinit(bp->ulp_ctx);
875 /* Delete the Flow Counter Manager */
876 ulp_fc_mgr_deinit(bp->ulp_ctx);
878 /* Delete the Port database */
879 ulp_port_db_deinit(bp->ulp_ctx);
881 /* Disable NAT feature */
882 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
884 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
886 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
888 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
890 /* free the flow db lock */
891 pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
893 /* Delete the ulp context and tf session and free the ulp context */
894 ulp_ctx_deinit(bp, session);
895 BNXT_TF_DBG(DEBUG, "ulp ctx has been deinitialized\n");
899 * When a port is initialized by dpdk. This functions is called
900 * and this function initializes the ULP context and rest of the
901 * infrastructure associated with it.
904 bnxt_ulp_init(struct bnxt *bp,
905 struct bnxt_ulp_session_state *session)
909 /* Allocate and Initialize the ulp context. */
910 rc = ulp_ctx_init(bp, session);
912 BNXT_TF_DBG(ERR, "Failed to create the ulp context\n");
916 rc = pthread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock, NULL);
918 BNXT_TF_DBG(ERR, "Unable to initialize flow db lock\n");
922 /* Initialize ulp dparms with values devargs passed */
923 rc = ulp_dparms_init(bp, bp->ulp_ctx);
925 BNXT_TF_DBG(ERR, "Failed to initialize the dparms\n");
929 /* create the port database */
930 rc = ulp_port_db_init(bp->ulp_ctx, bp->port_cnt);
932 BNXT_TF_DBG(ERR, "Failed to create the port database\n");
936 /* Create the Mark database. */
937 rc = ulp_mark_db_init(bp->ulp_ctx);
939 BNXT_TF_DBG(ERR, "Failed to create the mark database\n");
943 /* Create the flow database. */
944 rc = ulp_flow_db_init(bp->ulp_ctx);
946 BNXT_TF_DBG(ERR, "Failed to create the flow database\n");
950 /* Create the eem table scope. */
951 rc = ulp_eem_tbl_scope_init(bp);
953 BNXT_TF_DBG(ERR, "Failed to create the eem scope table\n");
957 rc = ulp_mapper_init(bp->ulp_ctx);
959 BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n");
963 rc = ulp_fc_mgr_init(bp->ulp_ctx);
965 BNXT_TF_DBG(ERR, "Failed to initialize ulp flow counter mgr\n");
970 * Enable NAT feature. Set the global configuration register
971 * Tunnel encap to enable NAT with the reuse of existing inner
972 * L2 header smac and dmac
974 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
976 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
978 BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n");
982 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
984 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
986 BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
989 BNXT_TF_DBG(DEBUG, "ulp ctx has been initialized\n");
993 bnxt_ulp_deinit(bp, session);
998 * When a port is initialized by dpdk. This functions sets up
999 * the port specific details.
1002 bnxt_ulp_port_init(struct bnxt *bp)
1004 struct bnxt_ulp_session_state *session;
1008 if (!bp || !BNXT_TRUFLOW_EN(bp))
1011 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1013 "Skip ulp init for port: %d, not a TVF or PF\n",
1014 bp->eth_dev->data->port_id);
1019 BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n");
1023 bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx",
1024 sizeof(struct bnxt_ulp_context), 0);
1026 BNXT_TF_DBG(ERR, "Failed to allocate ulp ctx\n");
1031 * Multiple uplink ports can be associated with a single vswitch.
1032 * Make sure only the port that is started first will initialize
1035 session = ulp_session_init(bp, &initialized);
1037 BNXT_TF_DBG(ERR, "Failed to initialize the tf session\n");
1044 * If ULP is already initialized for a specific domain then
1045 * simply assign the ulp context to this rte_eth_dev.
1047 rc = ulp_ctx_attach(bp, session);
1049 BNXT_TF_DBG(ERR, "Failed to attach the ulp context\n");
1053 rc = bnxt_ulp_init(bp, session);
1055 BNXT_TF_DBG(ERR, "Failed to initialize the ulp init\n");
1060 /* Update bnxt driver flags */
1061 rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx);
1063 BNXT_TF_DBG(ERR, "Failed to update driver flags\n");
1067 /* update the port database for the given interface */
1068 rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp->eth_dev);
1070 BNXT_TF_DBG(ERR, "Failed to update port database\n");
1073 /* create the default rules */
1074 bnxt_ulp_create_df_rules(bp);
1076 if (BNXT_ACCUM_STATS_EN(bp))
1077 bp->ulp_ctx->cfg_data->accum_stats = true;
1079 BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init\n",
1080 bp->eth_dev->data->port_id);
1084 bnxt_ulp_port_deinit(bp);
1089 * When a port is de-initialized by dpdk. This functions clears up
1090 * the port specific details.
1093 bnxt_ulp_port_deinit(struct bnxt *bp)
1095 struct bnxt_ulp_session_state *session;
1096 struct rte_pci_device *pci_dev;
1097 struct rte_pci_addr *pci_addr;
1099 if (!BNXT_TRUFLOW_EN(bp))
1102 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1104 "Skip ULP deinit port:%d, not a TVF or PF\n",
1105 bp->eth_dev->data->port_id);
1110 BNXT_TF_DBG(DEBUG, "ulp ctx already de-allocated\n");
1114 BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port deinit\n",
1115 bp->eth_dev->data->port_id);
1117 /* Get the session details */
1118 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
1119 pci_addr = &pci_dev->addr;
1120 pthread_mutex_lock(&bnxt_ulp_global_mutex);
1121 session = ulp_get_session(pci_addr);
1122 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1124 /* session not found then just exit */
1126 /* Free the ulp context */
1127 rte_free(bp->ulp_ctx);
1132 /* Check the reference count to deinit or deattach*/
1133 if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) {
1134 bp->ulp_ctx->cfg_data->ref_cnt--;
1135 if (bp->ulp_ctx->cfg_data->ref_cnt) {
1136 /* free the port details */
1137 /* Free the default flow rule associated to this port */
1138 bnxt_ulp_destroy_df_rules(bp, false);
1139 bnxt_ulp_destroy_vfr_default_rules(bp, false);
1141 /* free flows associated with this port */
1142 bnxt_ulp_flush_port_flows(bp);
1144 /* close the session associated with this port */
1147 /* Perform ulp ctx deinit */
1148 bnxt_ulp_deinit(bp, session);
1152 /* clean up the session */
1153 ulp_session_deinit(session);
1155 /* Free the ulp context */
1156 rte_free(bp->ulp_ctx);
1160 /* Below are the access functions to access internal data of ulp context. */
1161 /* Function to set the Mark DB into the context */
1163 bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx,
1164 struct bnxt_ulp_mark_tbl *mark_tbl)
1166 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1167 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1171 ulp_ctx->cfg_data->mark_tbl = mark_tbl;
1176 /* Function to retrieve the Mark DB from the context. */
1177 struct bnxt_ulp_mark_tbl *
1178 bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx)
1180 if (!ulp_ctx || !ulp_ctx->cfg_data)
1183 return ulp_ctx->cfg_data->mark_tbl;
1186 /* Function to set the device id of the hardware. */
1188 bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx,
1191 if (ulp_ctx && ulp_ctx->cfg_data) {
1192 ulp_ctx->cfg_data->dev_id = dev_id;
1199 /* Function to get the device id of the hardware. */
1201 bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx,
1204 if (ulp_ctx && ulp_ctx->cfg_data) {
1205 *dev_id = ulp_ctx->cfg_data->dev_id;
1209 BNXT_TF_DBG(ERR, "Failed to read dev_id from ulp ctxt\n");
1214 bnxt_ulp_cntxt_mem_type_set(struct bnxt_ulp_context *ulp_ctx,
1215 enum bnxt_ulp_flow_mem_type mem_type)
1217 if (ulp_ctx && ulp_ctx->cfg_data) {
1218 ulp_ctx->cfg_data->mem_type = mem_type;
1221 BNXT_TF_DBG(ERR, "Failed to write mem_type in ulp ctxt\n");
1226 bnxt_ulp_cntxt_mem_type_get(struct bnxt_ulp_context *ulp_ctx,
1227 enum bnxt_ulp_flow_mem_type *mem_type)
1229 if (ulp_ctx && ulp_ctx->cfg_data) {
1230 *mem_type = ulp_ctx->cfg_data->mem_type;
1233 BNXT_TF_DBG(ERR, "Failed to read mem_type in ulp ctxt\n");
1237 /* Function to get the table scope id of the EEM table. */
1239 bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx,
1240 uint32_t *tbl_scope_id)
1242 if (ulp_ctx && ulp_ctx->cfg_data) {
1243 *tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id;
1250 /* Function to set the table scope id of the EEM table. */
1252 bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx,
1253 uint32_t tbl_scope_id)
1255 if (ulp_ctx && ulp_ctx->cfg_data) {
1256 ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id;
1263 /* Function to set the tfp session details from the ulp context. */
1265 bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1268 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1276 /* Function to get the tfp session details from the ulp context. */
1278 bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp)
1281 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1288 * Get the device table entry based on the device id.
1290 * dev_id [in] The device id of the hardware
1292 * Returns the pointer to the device parameters.
1294 struct bnxt_ulp_device_params *
1295 bnxt_ulp_device_params_get(uint32_t dev_id)
1297 if (dev_id < BNXT_ULP_MAX_NUM_DEVICES)
1298 return &ulp_device_params[dev_id];
1302 /* Function to set the flow database to the ulp context. */
1304 bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context *ulp_ctx,
1305 struct bnxt_ulp_flow_db *flow_db)
1307 if (!ulp_ctx || !ulp_ctx->cfg_data)
1310 ulp_ctx->cfg_data->flow_db = flow_db;
1314 /* Function to get the flow database from the ulp context. */
1315 struct bnxt_ulp_flow_db *
1316 bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx)
1318 if (!ulp_ctx || !ulp_ctx->cfg_data)
1321 return ulp_ctx->cfg_data->flow_db;
1324 /* Function to get the tunnel cache table info from the ulp context. */
1325 struct bnxt_tun_cache_entry *
1326 bnxt_ulp_cntxt_ptr2_tun_tbl_get(struct bnxt_ulp_context *ulp_ctx)
1328 if (!ulp_ctx || !ulp_ctx->cfg_data)
1331 return ulp_ctx->cfg_data->tun_tbl;
1334 /* Function to get the ulp context from eth device. */
1335 struct bnxt_ulp_context *
1336 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev)
1338 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1340 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
1341 struct bnxt_representor *vfr = dev->data->dev_private;
1343 bp = vfr->parent_dev->data->dev_private;
1347 BNXT_TF_DBG(ERR, "Bnxt private data is not initialized\n");
1354 bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx,
1357 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1358 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1362 ulp_ctx->cfg_data->mapper_data = mapper_data;
1367 bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx)
1369 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1370 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1374 return ulp_ctx->cfg_data->mapper_data;
1377 /* Function to set the port database to the ulp context. */
1379 bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context *ulp_ctx,
1380 struct bnxt_ulp_port_db *port_db)
1382 if (!ulp_ctx || !ulp_ctx->cfg_data)
1385 ulp_ctx->cfg_data->port_db = port_db;
1389 /* Function to get the port database from the ulp context. */
1390 struct bnxt_ulp_port_db *
1391 bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context *ulp_ctx)
1393 if (!ulp_ctx || !ulp_ctx->cfg_data)
1396 return ulp_ctx->cfg_data->port_db;
1399 /* Function to set the flow counter info into the context */
1401 bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx,
1402 struct bnxt_ulp_fc_info *ulp_fc_info)
1404 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1405 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1409 ulp_ctx->cfg_data->fc_info = ulp_fc_info;
1414 /* Function to retrieve the flow counter info from the context. */
1415 struct bnxt_ulp_fc_info *
1416 bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx)
1418 if (!ulp_ctx || !ulp_ctx->cfg_data)
1421 return ulp_ctx->cfg_data->fc_info;
1424 /* Function to get the ulp flags from the ulp context. */
1426 bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
1429 if (!ulp_ctx || !ulp_ctx->cfg_data)
1432 *flags = ulp_ctx->cfg_data->ulp_flags;
1436 /* Function to get the ulp vfr info from the ulp context. */
1437 struct bnxt_ulp_vfr_rule_info*
1438 bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(struct bnxt_ulp_context *ulp_ctx,
1441 if (!ulp_ctx || !ulp_ctx->cfg_data || port_id >= RTE_MAX_ETHPORTS)
1444 return &ulp_ctx->cfg_data->vfr_rule_info[port_id];
1447 /* Function to acquire the flow database lock from the ulp context. */
1449 bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
1451 if (!ulp_ctx || !ulp_ctx->cfg_data)
1454 if (pthread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
1455 BNXT_TF_DBG(ERR, "unable to acquire fdb lock\n");
1461 /* Function to release the flow database lock from the ulp context. */
1463 bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
1465 if (!ulp_ctx || !ulp_ctx->cfg_data)
1468 pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);