1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
7 #include <rte_malloc.h>
9 #include <rte_flow_driver.h>
10 #include <rte_tailq.h>
14 #include "bnxt_tf_common.h"
16 #include "tf_ext_flow_handle.h"
18 #include "ulp_template_db_enum.h"
19 #include "ulp_template_struct.h"
20 #include "ulp_mark_mgr.h"
21 #include "ulp_fc_mgr.h"
22 #include "ulp_flow_db.h"
23 #include "ulp_mapper.h"
24 #include "ulp_port_db.h"
27 /* Linked list of all TF sessions. */
28 STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
29 STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
31 /* Mutex to synchronize bnxt_ulp_session_list operations. */
32 static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
35 * Allow the deletion of context only for the bnxt device that
36 * created the session.
39 ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx)
41 if (!ulp_ctx || !ulp_ctx->cfg_data)
44 if (!ulp_ctx->cfg_data->ref_cnt) {
45 BNXT_TF_DBG(DEBUG, "ulp ctx shall initiate deinit\n");
53 bnxt_ulp_devid_get(struct bnxt *bp,
54 enum bnxt_ulp_device_id *ulp_dev_id)
58 /* Assuming Whitney */
59 *ulp_dev_id = BNXT_ULP_DEVICE_ID_WH_PLUS;
61 if (BNXT_STINGRAY(bp))
62 *ulp_dev_id = BNXT_ULP_DEVICE_ID_STINGRAY;
64 /* Assuming Whitney */
65 *ulp_dev_id = BNXT_ULP_DEVICE_ID_WH_PLUS;
70 bnxt_ulp_tf_session_resources_get(struct bnxt *bp,
71 struct tf_session_resources *res)
76 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
78 BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
83 case BNXT_ULP_DEVICE_ID_WH_PLUS:
86 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 422;
87 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 6;
88 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
89 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
90 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
93 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
94 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384;
95 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
98 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 511;
99 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 63;
102 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
104 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
106 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
107 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 88;
110 res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 13168;
113 res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
116 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_SP_SMAC] = 255;
120 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 292;
121 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 148;
122 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
123 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
124 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
127 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
128 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384;
129 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
132 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 511;
133 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 223;
134 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 255;
137 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
139 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
141 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
142 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 928;
145 res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 15232;
148 res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
151 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 488;
152 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = 511;
154 case BNXT_ULP_DEVICE_ID_STINGRAY:
157 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 315;
158 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 6;
159 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
160 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
161 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
164 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
165 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384;
166 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
169 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 511;
170 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 63;
173 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
175 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
177 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
178 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 112;
181 res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 13200;
184 res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
187 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_SP_SMAC] = 256;
191 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 292;
192 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 127;
193 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
194 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
195 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
198 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
199 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384;
200 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
203 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 367;
204 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 223;
205 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 255;
208 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
210 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
212 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
213 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 928;
216 res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 15232;
219 res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
222 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 488;
223 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = 512;
233 * Initialize an ULP session.
234 * An ULP session will contain all the resources needed to support rte flow
235 * offloads. A session is initialized as part of rte_eth_device start.
236 * A single vswitch instance can have multiple uplinks which means
237 * rte_eth_device start will be called for each of these devices.
238 * ULP session manager will make sure that a single ULP session is only
239 * initialized once. Apart from this, it also initializes MARK database,
240 * EEM table & flow database. ULP session manager also manages a list of
241 * all opened ULP sessions.
244 ulp_ctx_session_open(struct bnxt *bp,
245 struct bnxt_ulp_session_state *session)
247 struct rte_eth_dev *ethdev = bp->eth_dev;
249 struct tf_open_session_parms params;
250 struct tf_session_resources *resources;
253 memset(¶ms, 0, sizeof(params));
255 rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
256 params.ctrl_chan_name);
258 BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
259 ethdev->data->port_id, rc);
263 params.shadow_copy = true;
265 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
267 BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
271 switch (ulp_dev_id) {
272 case BNXT_ULP_DEVICE_ID_WH_PLUS:
273 params.device_type = TF_DEVICE_TYPE_WH;
275 case BNXT_ULP_DEVICE_ID_STINGRAY:
276 params.device_type = TF_DEVICE_TYPE_SR;
279 BNXT_TF_DBG(ERR, "Unable to determine device for "
280 "opening session.\n");
284 resources = ¶ms.resources;
285 rc = bnxt_ulp_tf_session_resources_get(bp, resources);
287 BNXT_TF_DBG(ERR, "Unable to determine tf resources for "
292 rc = tf_open_session(&bp->tfp, ¶ms);
294 BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
295 params.ctrl_chan_name, rc);
298 if (!session->session_opened) {
299 session->session_opened = 1;
300 session->g_tfp = rte_zmalloc("bnxt_ulp_session_tfp",
301 sizeof(struct tf), 0);
302 session->g_tfp->session = bp->tfp.session;
308 * Close the ULP session.
309 * It takes the ulp context pointer.
312 ulp_ctx_session_close(struct bnxt *bp,
313 struct bnxt_ulp_session_state *session)
315 /* close the session in the hardware */
316 if (session->session_opened)
317 tf_close_session(&bp->tfp);
318 session->session_opened = 0;
319 rte_free(session->g_tfp);
320 session->g_tfp = NULL;
324 bnxt_init_tbl_scope_parms(struct bnxt *bp,
325 struct tf_alloc_tbl_scope_parms *params)
327 struct bnxt_ulp_device_params *dparms;
331 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
333 /* TBD: For now, just use default. */
336 dparms = bnxt_ulp_device_params_get(dev_id);
339 * Set the flush timer for EEM entries. The value is in 100ms intervals,
342 params->hw_flow_cache_flush_timer = 100;
345 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
346 params->rx_max_action_entry_sz_in_bits =
347 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
348 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
349 params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS;
350 params->rx_tbl_if_id = BNXT_ULP_RX_TBL_IF_ID;
352 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
353 params->tx_max_action_entry_sz_in_bits =
354 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
355 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
356 params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS;
357 params->tx_tbl_if_id = BNXT_ULP_TX_TBL_IF_ID;
359 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
360 params->rx_max_action_entry_sz_in_bits =
361 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
362 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
363 params->rx_num_flows_in_k =
364 dparms->ext_flow_db_num_entries / 1024;
365 params->rx_tbl_if_id = BNXT_ULP_RX_TBL_IF_ID;
367 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
368 params->tx_max_action_entry_sz_in_bits =
369 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
370 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
371 params->tx_num_flows_in_k =
372 dparms->ext_flow_db_num_entries / 1024;
373 params->tx_tbl_if_id = BNXT_ULP_TX_TBL_IF_ID;
375 BNXT_TF_DBG(INFO, "Table Scope initialized with %uK flows.\n",
376 params->rx_num_flows_in_k);
379 /* Initialize Extended Exact Match host memory. */
381 ulp_eem_tbl_scope_init(struct bnxt *bp)
383 struct tf_alloc_tbl_scope_parms params = {0};
384 struct bnxt_ulp_device_params *dparms;
385 enum bnxt_ulp_flow_mem_type mtype;
389 /* Get the dev specific number of flows that needed to be supported. */
390 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
391 BNXT_TF_DBG(ERR, "Invalid device id\n");
395 dparms = bnxt_ulp_device_params_get(dev_id);
397 BNXT_TF_DBG(ERR, "could not fetch the device params\n");
401 if (bnxt_ulp_cntxt_mem_type_get(bp->ulp_ctx, &mtype))
404 if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
405 BNXT_TF_DBG(INFO, "Table Scope alloc is not required\n");
409 bnxt_init_tbl_scope_parms(bp, ¶ms);
411 rc = tf_alloc_tbl_scope(&bp->tfp, ¶ms);
413 BNXT_TF_DBG(ERR, "Unable to allocate eem table scope rc = %d\n",
418 rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id);
420 BNXT_TF_DBG(ERR, "Unable to set table scope id\n");
427 /* Free Extended Exact Match host memory */
429 ulp_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
431 struct tf_free_tbl_scope_parms params = {0};
434 struct bnxt_ulp_device_params *dparms;
435 enum bnxt_ulp_flow_mem_type mtype;
438 if (!ulp_ctx || !ulp_ctx->cfg_data)
441 tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx);
443 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
447 /* Get the dev specific number of flows that needed to be supported. */
448 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
449 BNXT_TF_DBG(ERR, "Invalid device id\n");
453 dparms = bnxt_ulp_device_params_get(dev_id);
455 BNXT_TF_DBG(ERR, "could not fetch the device params\n");
459 if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
461 if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
462 BNXT_TF_DBG(INFO, "Table Scope free is not required\n");
466 rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, ¶ms.tbl_scope_id);
468 BNXT_TF_DBG(ERR, "Failed to get the table scope id\n");
472 rc = tf_free_tbl_scope(tfp, ¶ms);
474 BNXT_TF_DBG(ERR, "Unable to free table scope\n");
480 /* The function to free and deinit the ulp context data. */
482 ulp_ctx_deinit(struct bnxt *bp,
483 struct bnxt_ulp_session_state *session)
485 /* close the tf session */
486 ulp_ctx_session_close(bp, session);
488 /* Free the contents */
489 if (session->cfg_data) {
490 rte_free(session->cfg_data);
491 bp->ulp_ctx->cfg_data = NULL;
492 session->cfg_data = NULL;
497 /* The function to allocate and initialize the ulp context data. */
499 ulp_ctx_init(struct bnxt *bp,
500 struct bnxt_ulp_session_state *session)
502 struct bnxt_ulp_data *ulp_data;
504 enum bnxt_ulp_device_id devid;
506 /* Allocate memory to hold ulp context data. */
507 ulp_data = rte_zmalloc("bnxt_ulp_data",
508 sizeof(struct bnxt_ulp_data), 0);
510 BNXT_TF_DBG(ERR, "Failed to allocate memory for ulp data\n");
514 /* Increment the ulp context data reference count usage. */
515 bp->ulp_ctx->cfg_data = ulp_data;
516 session->cfg_data = ulp_data;
518 ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED;
520 rc = bnxt_ulp_devid_get(bp, &devid);
522 BNXT_TF_DBG(ERR, "Unable to determine device for ULP init.\n");
526 rc = bnxt_ulp_cntxt_dev_id_set(bp->ulp_ctx, devid);
528 BNXT_TF_DBG(ERR, "Unable to set device for ULP init.\n");
532 /* Open the ulp session. */
533 rc = ulp_ctx_session_open(bp, session);
537 ulp_tun_tbl_init(ulp_data->tun_tbl);
539 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
543 session->session_opened = 1;
544 (void)ulp_ctx_deinit(bp, session);
548 /* The function to initialize ulp dparms with devargs */
550 ulp_dparms_init(struct bnxt *bp,
551 struct bnxt_ulp_context *ulp_ctx)
553 struct bnxt_ulp_device_params *dparms;
556 if (!bp->max_num_kflows) {
557 /* Defaults to Internal */
558 bnxt_ulp_cntxt_mem_type_set(ulp_ctx,
559 BNXT_ULP_FLOW_MEM_TYPE_INT);
563 /* The max_num_kflows were set, so move to external */
564 if (bnxt_ulp_cntxt_mem_type_set(ulp_ctx, BNXT_ULP_FLOW_MEM_TYPE_EXT))
567 if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) {
568 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
572 dparms = bnxt_ulp_device_params_get(dev_id);
574 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
578 /* num_flows = max_num_kflows * 1024 */
579 dparms->ext_flow_db_num_entries = bp->max_num_kflows * 1024;
580 /* GFID = 2 * num_flows */
581 dparms->mark_db_gfid_entries = dparms->ext_flow_db_num_entries * 2;
582 BNXT_TF_DBG(DEBUG, "Set the number of flows = %"PRIu64"\n",
583 dparms->ext_flow_db_num_entries);
588 /* The function to initialize bp flags with truflow features */
590 ulp_dparms_dev_port_intf_update(struct bnxt *bp,
591 struct bnxt_ulp_context *ulp_ctx)
593 enum bnxt_ulp_flow_mem_type mtype;
595 if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
597 /* Update the bp flag with gfid flag */
598 if (mtype == BNXT_ULP_FLOW_MEM_TYPE_EXT)
599 bp->flags |= BNXT_FLAG_GFID_ENABLE;
605 ulp_ctx_attach(struct bnxt *bp,
606 struct bnxt_ulp_session_state *session)
610 /* Increment the ulp context data reference count usage. */
611 bp->ulp_ctx->cfg_data = session->cfg_data;
612 bp->ulp_ctx->cfg_data->ref_cnt++;
614 /* update the session details in bnxt tfp */
615 bp->tfp.session = session->g_tfp->session;
617 /* Create a TF Client */
618 rc = ulp_ctx_session_open(bp, session);
620 PMD_DRV_LOG(ERR, "Failed to open ctxt session, rc:%d\n", rc);
621 bp->tfp.session = NULL;
625 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
630 ulp_ctx_detach(struct bnxt *bp)
632 if (bp->tfp.session) {
633 tf_close_session(&bp->tfp);
634 bp->tfp.session = NULL;
639 * Initialize the state of an ULP session.
640 * If the state of an ULP session is not initialized, set it's state to
641 * initialized. If the state is already initialized, do nothing.
644 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
646 pthread_mutex_lock(&session->bnxt_ulp_mutex);
648 if (!session->bnxt_ulp_init) {
649 session->bnxt_ulp_init = true;
655 pthread_mutex_unlock(&session->bnxt_ulp_mutex);
659 * Check if an ULP session is already allocated for a specific PCI
660 * domain & bus. If it is already allocated simply return the session
661 * pointer, otherwise allocate a new session.
663 static struct bnxt_ulp_session_state *
664 ulp_get_session(struct rte_pci_addr *pci_addr)
666 struct bnxt_ulp_session_state *session;
668 STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) {
669 if (session->pci_info.domain == pci_addr->domain &&
670 session->pci_info.bus == pci_addr->bus) {
678 * Allocate and Initialize an ULP session and set it's state to INITIALIZED.
679 * If it's already initialized simply return the already existing session.
681 static struct bnxt_ulp_session_state *
682 ulp_session_init(struct bnxt *bp,
685 struct rte_pci_device *pci_dev;
686 struct rte_pci_addr *pci_addr;
687 struct bnxt_ulp_session_state *session;
693 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
694 pci_addr = &pci_dev->addr;
696 pthread_mutex_lock(&bnxt_ulp_global_mutex);
698 session = ulp_get_session(pci_addr);
700 /* Not Found the session Allocate a new one */
701 session = rte_zmalloc("bnxt_ulp_session",
702 sizeof(struct bnxt_ulp_session_state),
706 "Allocation failed for bnxt_ulp_session\n");
707 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
711 /* Add it to the queue */
712 session->pci_info.domain = pci_addr->domain;
713 session->pci_info.bus = pci_addr->bus;
714 rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
716 BNXT_TF_DBG(ERR, "mutex create failed\n");
717 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
720 STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
724 ulp_context_initialized(session, init);
725 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
730 * When a device is closed, remove it's associated session from the global
734 ulp_session_deinit(struct bnxt_ulp_session_state *session)
739 if (!session->cfg_data) {
740 pthread_mutex_lock(&bnxt_ulp_global_mutex);
741 STAILQ_REMOVE(&bnxt_ulp_session_list, session,
742 bnxt_ulp_session_state, next);
743 pthread_mutex_destroy(&session->bnxt_ulp_mutex);
745 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
750 * Internal api to enable NAT feature.
751 * Set set_flag to 1 to set the value or zero to reset the value.
752 * returns 0 on success.
755 bnxt_ulp_global_cfg_update(struct bnxt *bp,
757 enum tf_global_config_type type,
762 uint32_t global_cfg = 0;
764 struct tf_global_cfg_parms parms = { 0 };
766 /* Initialize the params */
769 parms.offset = offset,
770 parms.config = (uint8_t *)&global_cfg,
771 parms.config_sz_in_bytes = sizeof(global_cfg);
773 rc = tf_get_global_cfg(&bp->tfp, &parms);
775 BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n",
783 global_cfg &= ~value;
785 /* SET the register RE_CFA_REG_ACT_TECT */
786 rc = tf_set_global_cfg(&bp->tfp, &parms);
788 BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n",
795 /* Internal function to delete all the flows belonging to the given port */
797 bnxt_ulp_flush_port_flows(struct bnxt *bp)
801 /* it is assumed that port is either TVF or PF */
802 if (ulp_port_db_port_func_id_get(bp->ulp_ctx,
803 bp->eth_dev->data->port_id,
805 BNXT_TF_DBG(ERR, "Invalid argument\n");
808 (void)ulp_flow_db_function_flow_flush(bp->ulp_ctx, func_id);
811 /* Internal function to delete the VFR default flows */
813 bnxt_ulp_destroy_vfr_default_rules(struct bnxt *bp, bool global)
815 struct bnxt_ulp_vfr_rule_info *info;
817 struct rte_eth_dev *vfr_eth_dev;
818 struct bnxt_representor *vfr_bp;
820 if (!BNXT_TRUFLOW_EN(bp) || BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
823 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
826 /* Delete default rules for all ports */
827 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
828 info = &bp->ulp_ctx->cfg_data->vfr_rule_info[port_id];
832 if (!global && info->parent_port_id !=
833 bp->eth_dev->data->port_id)
836 /* Destroy the flows */
837 ulp_default_flow_destroy(bp->eth_dev, info->rep2vf_flow_id);
838 ulp_default_flow_destroy(bp->eth_dev, info->vf2rep_flow_id);
839 /* Clean up the tx action pointer */
840 vfr_eth_dev = &rte_eth_devices[port_id];
842 vfr_bp = vfr_eth_dev->data->dev_private;
843 vfr_bp->vfr_tx_cfa_action = 0;
845 memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info));
850 * When a port is deinit'ed by dpdk. This function is called
851 * and this function clears the ULP context and rest of the
852 * infrastructure associated with it.
855 bnxt_ulp_deinit(struct bnxt *bp,
856 struct bnxt_ulp_session_state *session)
858 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
861 /* clean up default flows */
862 bnxt_ulp_destroy_df_rules(bp, true);
864 /* clean up default VFR flows */
865 bnxt_ulp_destroy_vfr_default_rules(bp, true);
867 /* clean up regular flows */
868 ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR);
870 /* cleanup the eem table scope */
871 ulp_eem_tbl_scope_deinit(bp, bp->ulp_ctx);
873 /* cleanup the flow database */
874 ulp_flow_db_deinit(bp->ulp_ctx);
876 /* Delete the Mark database */
877 ulp_mark_db_deinit(bp->ulp_ctx);
879 /* cleanup the ulp mapper */
880 ulp_mapper_deinit(bp->ulp_ctx);
882 /* Delete the Flow Counter Manager */
883 ulp_fc_mgr_deinit(bp->ulp_ctx);
885 /* Delete the Port database */
886 ulp_port_db_deinit(bp->ulp_ctx);
888 /* Disable NAT feature */
889 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
891 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
893 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
895 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
897 /* free the flow db lock */
898 pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
900 /* Delete the ulp context and tf session and free the ulp context */
901 ulp_ctx_deinit(bp, session);
902 BNXT_TF_DBG(DEBUG, "ulp ctx has been deinitialized\n");
906 * When a port is initialized by dpdk. This functions is called
907 * and this function initializes the ULP context and rest of the
908 * infrastructure associated with it.
911 bnxt_ulp_init(struct bnxt *bp,
912 struct bnxt_ulp_session_state *session)
916 /* Allocate and Initialize the ulp context. */
917 rc = ulp_ctx_init(bp, session);
919 BNXT_TF_DBG(ERR, "Failed to create the ulp context\n");
923 rc = pthread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock, NULL);
925 BNXT_TF_DBG(ERR, "Unable to initialize flow db lock\n");
929 /* Initialize ulp dparms with values devargs passed */
930 rc = ulp_dparms_init(bp, bp->ulp_ctx);
932 BNXT_TF_DBG(ERR, "Failed to initialize the dparms\n");
936 /* create the port database */
937 rc = ulp_port_db_init(bp->ulp_ctx, bp->port_cnt);
939 BNXT_TF_DBG(ERR, "Failed to create the port database\n");
943 /* Create the Mark database. */
944 rc = ulp_mark_db_init(bp->ulp_ctx);
946 BNXT_TF_DBG(ERR, "Failed to create the mark database\n");
950 /* Create the flow database. */
951 rc = ulp_flow_db_init(bp->ulp_ctx);
953 BNXT_TF_DBG(ERR, "Failed to create the flow database\n");
957 /* Create the eem table scope. */
958 rc = ulp_eem_tbl_scope_init(bp);
960 BNXT_TF_DBG(ERR, "Failed to create the eem scope table\n");
964 rc = ulp_mapper_init(bp->ulp_ctx);
966 BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n");
970 rc = ulp_fc_mgr_init(bp->ulp_ctx);
972 BNXT_TF_DBG(ERR, "Failed to initialize ulp flow counter mgr\n");
977 * Enable NAT feature. Set the global configuration register
978 * Tunnel encap to enable NAT with the reuse of existing inner
979 * L2 header smac and dmac
981 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
983 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
985 BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n");
989 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
991 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
993 BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
996 BNXT_TF_DBG(DEBUG, "ulp ctx has been initialized\n");
1000 bnxt_ulp_deinit(bp, session);
1005 * When a port is initialized by dpdk. This functions sets up
1006 * the port specific details.
1009 bnxt_ulp_port_init(struct bnxt *bp)
1011 struct bnxt_ulp_session_state *session;
1015 if (!bp || !BNXT_TRUFLOW_EN(bp))
1018 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1020 "Skip ulp init for port: %d, not a TVF or PF\n",
1021 bp->eth_dev->data->port_id);
1026 BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n");
1030 bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx",
1031 sizeof(struct bnxt_ulp_context), 0);
1033 BNXT_TF_DBG(ERR, "Failed to allocate ulp ctx\n");
1038 * Multiple uplink ports can be associated with a single vswitch.
1039 * Make sure only the port that is started first will initialize
1042 session = ulp_session_init(bp, &initialized);
1044 BNXT_TF_DBG(ERR, "Failed to initialize the tf session\n");
1051 * If ULP is already initialized for a specific domain then
1052 * simply assign the ulp context to this rte_eth_dev.
1054 rc = ulp_ctx_attach(bp, session);
1056 BNXT_TF_DBG(ERR, "Failed to attach the ulp context\n");
1060 rc = bnxt_ulp_init(bp, session);
1062 BNXT_TF_DBG(ERR, "Failed to initialize the ulp init\n");
1067 /* Update bnxt driver flags */
1068 rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx);
1070 BNXT_TF_DBG(ERR, "Failed to update driver flags\n");
1074 /* update the port database for the given interface */
1075 rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp->eth_dev);
1077 BNXT_TF_DBG(ERR, "Failed to update port database\n");
1080 /* create the default rules */
1081 bnxt_ulp_create_df_rules(bp);
1082 BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init\n",
1083 bp->eth_dev->data->port_id);
1087 bnxt_ulp_port_deinit(bp);
1092 * When a port is de-initialized by dpdk. This functions clears up
1093 * the port specific details.
1096 bnxt_ulp_port_deinit(struct bnxt *bp)
1098 struct bnxt_ulp_session_state *session;
1099 struct rte_pci_device *pci_dev;
1100 struct rte_pci_addr *pci_addr;
1102 if (!BNXT_TRUFLOW_EN(bp))
1105 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1107 "Skip ULP deinit port:%d, not a TVF or PF\n",
1108 bp->eth_dev->data->port_id);
1113 BNXT_TF_DBG(DEBUG, "ulp ctx already de-allocated\n");
1117 BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port deinit\n",
1118 bp->eth_dev->data->port_id);
1120 /* Get the session details */
1121 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
1122 pci_addr = &pci_dev->addr;
1123 pthread_mutex_lock(&bnxt_ulp_global_mutex);
1124 session = ulp_get_session(pci_addr);
1125 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1127 /* session not found then just exit */
1129 /* Free the ulp context */
1130 rte_free(bp->ulp_ctx);
1135 /* Check the reference count to deinit or deattach*/
1136 if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) {
1137 bp->ulp_ctx->cfg_data->ref_cnt--;
1138 if (bp->ulp_ctx->cfg_data->ref_cnt) {
1139 /* free the port details */
1140 /* Free the default flow rule associated to this port */
1141 bnxt_ulp_destroy_df_rules(bp, false);
1142 bnxt_ulp_destroy_vfr_default_rules(bp, false);
1144 /* free flows associated with this port */
1145 bnxt_ulp_flush_port_flows(bp);
1147 /* close the session associated with this port */
1150 /* Perform ulp ctx deinit */
1151 bnxt_ulp_deinit(bp, session);
1155 /* clean up the session */
1156 ulp_session_deinit(session);
1158 /* Free the ulp context */
1159 rte_free(bp->ulp_ctx);
1163 /* Below are the access functions to access internal data of ulp context. */
1164 /* Function to set the Mark DB into the context */
1166 bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx,
1167 struct bnxt_ulp_mark_tbl *mark_tbl)
1169 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1170 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1174 ulp_ctx->cfg_data->mark_tbl = mark_tbl;
1179 /* Function to retrieve the Mark DB from the context. */
1180 struct bnxt_ulp_mark_tbl *
1181 bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx)
1183 if (!ulp_ctx || !ulp_ctx->cfg_data)
1186 return ulp_ctx->cfg_data->mark_tbl;
1189 /* Function to set the device id of the hardware. */
1191 bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx,
1194 if (ulp_ctx && ulp_ctx->cfg_data) {
1195 ulp_ctx->cfg_data->dev_id = dev_id;
1202 /* Function to get the device id of the hardware. */
1204 bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx,
1207 if (ulp_ctx && ulp_ctx->cfg_data) {
1208 *dev_id = ulp_ctx->cfg_data->dev_id;
1212 BNXT_TF_DBG(ERR, "Failed to read dev_id from ulp ctxt\n");
1217 bnxt_ulp_cntxt_mem_type_set(struct bnxt_ulp_context *ulp_ctx,
1218 enum bnxt_ulp_flow_mem_type mem_type)
1220 if (ulp_ctx && ulp_ctx->cfg_data) {
1221 ulp_ctx->cfg_data->mem_type = mem_type;
1224 BNXT_TF_DBG(ERR, "Failed to write mem_type in ulp ctxt\n");
1229 bnxt_ulp_cntxt_mem_type_get(struct bnxt_ulp_context *ulp_ctx,
1230 enum bnxt_ulp_flow_mem_type *mem_type)
1232 if (ulp_ctx && ulp_ctx->cfg_data) {
1233 *mem_type = ulp_ctx->cfg_data->mem_type;
1236 BNXT_TF_DBG(ERR, "Failed to read mem_type in ulp ctxt\n");
1240 /* Function to get the table scope id of the EEM table. */
1242 bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx,
1243 uint32_t *tbl_scope_id)
1245 if (ulp_ctx && ulp_ctx->cfg_data) {
1246 *tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id;
1253 /* Function to set the table scope id of the EEM table. */
1255 bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx,
1256 uint32_t tbl_scope_id)
1258 if (ulp_ctx && ulp_ctx->cfg_data) {
1259 ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id;
1266 /* Function to set the tfp session details from the ulp context. */
1268 bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1271 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1279 /* Function to get the tfp session details from the ulp context. */
1281 bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp)
1284 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1291 * Get the device table entry based on the device id.
1293 * dev_id [in] The device id of the hardware
1295 * Returns the pointer to the device parameters.
1297 struct bnxt_ulp_device_params *
1298 bnxt_ulp_device_params_get(uint32_t dev_id)
1300 if (dev_id < BNXT_ULP_MAX_NUM_DEVICES)
1301 return &ulp_device_params[dev_id];
1305 /* Function to set the flow database to the ulp context. */
1307 bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context *ulp_ctx,
1308 struct bnxt_ulp_flow_db *flow_db)
1310 if (!ulp_ctx || !ulp_ctx->cfg_data)
1313 ulp_ctx->cfg_data->flow_db = flow_db;
1317 /* Function to get the flow database from the ulp context. */
1318 struct bnxt_ulp_flow_db *
1319 bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx)
1321 if (!ulp_ctx || !ulp_ctx->cfg_data)
1324 return ulp_ctx->cfg_data->flow_db;
1327 /* Function to get the tunnel cache table info from the ulp context. */
1328 struct bnxt_tun_cache_entry *
1329 bnxt_ulp_cntxt_ptr2_tun_tbl_get(struct bnxt_ulp_context *ulp_ctx)
1331 if (!ulp_ctx || !ulp_ctx->cfg_data)
1334 return ulp_ctx->cfg_data->tun_tbl;
1337 /* Function to get the ulp context from eth device. */
1338 struct bnxt_ulp_context *
1339 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev)
1341 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1343 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
1344 struct bnxt_representor *vfr = dev->data->dev_private;
1346 bp = vfr->parent_dev->data->dev_private;
1350 BNXT_TF_DBG(ERR, "Bnxt private data is not initialized\n");
1357 bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx,
1360 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1361 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1365 ulp_ctx->cfg_data->mapper_data = mapper_data;
1370 bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx)
1372 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1373 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1377 return ulp_ctx->cfg_data->mapper_data;
1380 /* Function to set the port database to the ulp context. */
1382 bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context *ulp_ctx,
1383 struct bnxt_ulp_port_db *port_db)
1385 if (!ulp_ctx || !ulp_ctx->cfg_data)
1388 ulp_ctx->cfg_data->port_db = port_db;
1392 /* Function to get the port database from the ulp context. */
1393 struct bnxt_ulp_port_db *
1394 bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context *ulp_ctx)
1396 if (!ulp_ctx || !ulp_ctx->cfg_data)
1399 return ulp_ctx->cfg_data->port_db;
1402 /* Function to set the flow counter info into the context */
1404 bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx,
1405 struct bnxt_ulp_fc_info *ulp_fc_info)
1407 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1408 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1412 ulp_ctx->cfg_data->fc_info = ulp_fc_info;
1417 /* Function to retrieve the flow counter info from the context. */
1418 struct bnxt_ulp_fc_info *
1419 bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx)
1421 if (!ulp_ctx || !ulp_ctx->cfg_data)
1424 return ulp_ctx->cfg_data->fc_info;
1427 /* Function to get the ulp flags from the ulp context. */
1429 bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
1432 if (!ulp_ctx || !ulp_ctx->cfg_data)
1435 *flags = ulp_ctx->cfg_data->ulp_flags;
1439 /* Function to get the ulp vfr info from the ulp context. */
1440 struct bnxt_ulp_vfr_rule_info*
1441 bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(struct bnxt_ulp_context *ulp_ctx,
1444 if (!ulp_ctx || !ulp_ctx->cfg_data || port_id >= RTE_MAX_ETHPORTS)
1447 return &ulp_ctx->cfg_data->vfr_rule_info[port_id];
1450 /* Function to acquire the flow database lock from the ulp context. */
1452 bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
1454 if (!ulp_ctx || !ulp_ctx->cfg_data)
1457 if (pthread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
1458 BNXT_TF_DBG(ERR, "unable to acquire fdb lock\n");
1464 /* Function to release the flow database lock from the ulp context. */
1466 bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
1468 if (!ulp_ctx || !ulp_ctx->cfg_data)
1471 pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);