1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
7 #include <rte_malloc.h>
9 #include <rte_flow_driver.h>
10 #include <rte_tailq.h>
14 #include "bnxt_tf_common.h"
16 #include "tf_ext_flow_handle.h"
18 #include "ulp_template_db_enum.h"
19 #include "ulp_template_struct.h"
20 #include "ulp_mark_mgr.h"
21 #include "ulp_fc_mgr.h"
22 #include "ulp_flow_db.h"
23 #include "ulp_mapper.h"
24 #include "ulp_port_db.h"
27 /* Linked list of all TF sessions. */
28 STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
29 STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
31 /* Mutex to synchronize bnxt_ulp_session_list operations. */
32 static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
35 * Allow the deletion of context only for the bnxt device that
36 * created the session.
39 ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx)
41 if (!ulp_ctx || !ulp_ctx->cfg_data)
44 if (!ulp_ctx->cfg_data->ref_cnt) {
45 BNXT_TF_DBG(DEBUG, "ulp ctx shall initiate deinit\n");
53 bnxt_ulp_devid_get(struct bnxt *bp,
54 enum bnxt_ulp_device_id *ulp_dev_id)
56 if (BNXT_CHIP_P5(bp)) {
57 /* TBD: needs to accommodate even SR2 */
58 *ulp_dev_id = BNXT_ULP_DEVICE_ID_THOR;
62 if (BNXT_STINGRAY(bp))
63 *ulp_dev_id = BNXT_ULP_DEVICE_ID_STINGRAY;
65 /* Assuming Whitney */
66 *ulp_dev_id = BNXT_ULP_DEVICE_ID_WH_PLUS;
72 bnxt_ulp_tf_session_resources_get(struct bnxt *bp,
73 struct tf_session_resources *res)
79 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
81 BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
86 case BNXT_ULP_DEVICE_ID_WH_PLUS:
89 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 422;
90 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 6;
91 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
92 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
93 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
96 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
97 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 8192;
98 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
101 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 511;
102 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 63;
105 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
107 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
109 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
110 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 88;
113 res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 13168;
116 res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
119 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_SP_SMAC] = 255;
121 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_MIRROR_CONFIG] = 1;
125 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 292;
126 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 148;
127 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
128 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
129 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
132 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
133 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 8192;
134 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
137 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 511;
138 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 223;
139 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 255;
142 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
144 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
146 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
147 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 928;
150 res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 15232;
153 res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
156 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 488;
157 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = 511;
159 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_MIRROR_CONFIG] = 1;
162 case BNXT_ULP_DEVICE_ID_STINGRAY:
165 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 315;
166 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 6;
167 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
168 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
169 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
172 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
173 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384;
174 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
177 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 511;
178 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 63;
181 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
183 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
185 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
186 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 112;
189 res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 13200;
192 res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
195 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_SP_SMAC] = 256;
199 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 292;
200 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 127;
201 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
202 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
203 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
206 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
207 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384;
208 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
211 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 367;
212 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 223;
213 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 255;
216 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
218 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
220 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
221 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 928;
224 res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 15232;
227 res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
230 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 488;
231 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = 512;
233 case BNXT_ULP_DEVICE_ID_THOR:
236 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 26;
237 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 6;
238 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_WC_PROF] = 32;
239 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 32;
240 res->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_EM_PROF] = 32;
243 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 1024;
244 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 512;
245 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_MIRROR_CONFIG] = 14;
246 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_EM_FKB] = 32;
247 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_WC_FKB] = 32;
250 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 64;
253 tmp_cnt = &res->tcam_cnt[TF_DIR_RX].cnt[0];
254 tmp_cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = 300;
255 tmp_cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = 6;
256 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 128;
257 res->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 112;
260 res->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 13200;
263 res->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 64;
267 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 26;
268 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 26;
269 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_WC_PROF] = 32;
270 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 63;
271 res->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_EM_PROF] = 32;
274 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 1024;
275 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 512;
276 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_MIRROR_CONFIG] = 14;
277 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_EM_FKB] = 32;
278 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_WC_FKB] = 32;
281 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 64;
284 tmp_cnt = &res->tcam_cnt[TF_DIR_TX].cnt[0];
286 tmp_cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = 200;
287 tmp_cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = 110;
288 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 128;
289 res->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 128;
292 res->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 15232;
295 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 100;
297 res->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_MIRROR_CONFIG] = 1;
308 * Initialize an ULP session.
309 * An ULP session will contain all the resources needed to support rte flow
310 * offloads. A session is initialized as part of rte_eth_device start.
311 * A single vswitch instance can have multiple uplinks which means
312 * rte_eth_device start will be called for each of these devices.
313 * ULP session manager will make sure that a single ULP session is only
314 * initialized once. Apart from this, it also initializes MARK database,
315 * EEM table & flow database. ULP session manager also manages a list of
316 * all opened ULP sessions.
319 ulp_ctx_session_open(struct bnxt *bp,
320 struct bnxt_ulp_session_state *session)
322 struct rte_eth_dev *ethdev = bp->eth_dev;
324 struct tf_open_session_parms params;
325 struct tf_session_resources *resources;
328 memset(¶ms, 0, sizeof(params));
330 rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
331 params.ctrl_chan_name);
333 BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
334 ethdev->data->port_id, rc);
338 params.shadow_copy = true;
340 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);
342 BNXT_TF_DBG(ERR, "Unable to get device id from ulp.\n");
346 switch (ulp_dev_id) {
347 case BNXT_ULP_DEVICE_ID_WH_PLUS:
348 params.device_type = TF_DEVICE_TYPE_WH;
350 case BNXT_ULP_DEVICE_ID_STINGRAY:
351 params.device_type = TF_DEVICE_TYPE_SR;
353 case BNXT_ULP_DEVICE_ID_THOR:
354 params.device_type = TF_DEVICE_TYPE_THOR;
357 BNXT_TF_DBG(ERR, "Unable to determine device for "
358 "opening session.\n");
362 resources = ¶ms.resources;
363 rc = bnxt_ulp_tf_session_resources_get(bp, resources);
365 BNXT_TF_DBG(ERR, "Unable to determine tf resources for "
371 rc = tf_open_session(&bp->tfp, ¶ms);
373 BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
374 params.ctrl_chan_name, rc);
377 if (!session->session_opened) {
378 session->session_opened = 1;
379 session->g_tfp = rte_zmalloc("bnxt_ulp_session_tfp",
380 sizeof(struct tf), 0);
381 session->g_tfp->session = bp->tfp.session;
387 * Close the ULP session.
388 * It takes the ulp context pointer.
391 ulp_ctx_session_close(struct bnxt *bp,
392 struct bnxt_ulp_session_state *session)
394 /* close the session in the hardware */
395 if (session->session_opened)
396 tf_close_session(&bp->tfp);
397 session->session_opened = 0;
398 rte_free(session->g_tfp);
399 session->g_tfp = NULL;
403 bnxt_init_tbl_scope_parms(struct bnxt *bp,
404 struct tf_alloc_tbl_scope_parms *params)
406 struct bnxt_ulp_device_params *dparms;
410 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
412 /* TBD: For now, just use default. */
415 dparms = bnxt_ulp_device_params_get(dev_id);
418 * Set the flush timer for EEM entries. The value is in 100ms intervals,
421 params->hw_flow_cache_flush_timer = 100;
424 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
425 params->rx_max_action_entry_sz_in_bits =
426 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
427 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
428 params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS;
430 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
431 params->tx_max_action_entry_sz_in_bits =
432 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
433 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
434 params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS;
436 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
437 params->rx_max_action_entry_sz_in_bits =
438 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
439 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
440 params->rx_num_flows_in_k =
441 dparms->ext_flow_db_num_entries / 1024;
443 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
444 params->tx_max_action_entry_sz_in_bits =
445 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
446 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
447 params->tx_num_flows_in_k =
448 dparms->ext_flow_db_num_entries / 1024;
450 BNXT_TF_DBG(INFO, "Table Scope initialized with %uK flows.\n",
451 params->rx_num_flows_in_k);
454 /* Initialize Extended Exact Match host memory. */
456 ulp_eem_tbl_scope_init(struct bnxt *bp)
458 struct tf_alloc_tbl_scope_parms params = {0};
459 struct bnxt_ulp_device_params *dparms;
460 enum bnxt_ulp_flow_mem_type mtype;
464 /* Get the dev specific number of flows that needed to be supported. */
465 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
466 BNXT_TF_DBG(ERR, "Invalid device id\n");
470 dparms = bnxt_ulp_device_params_get(dev_id);
472 BNXT_TF_DBG(ERR, "could not fetch the device params\n");
476 if (bnxt_ulp_cntxt_mem_type_get(bp->ulp_ctx, &mtype))
478 if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
479 BNXT_TF_DBG(INFO, "Table Scope alloc is not required\n");
483 bnxt_init_tbl_scope_parms(bp, ¶ms);
484 rc = tf_alloc_tbl_scope(&bp->tfp, ¶ms);
486 BNXT_TF_DBG(ERR, "Unable to allocate eem table scope rc = %d\n",
490 rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id);
492 BNXT_TF_DBG(ERR, "Unable to set table scope id\n");
499 /* Free Extended Exact Match host memory */
501 ulp_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
503 struct tf_free_tbl_scope_parms params = {0};
506 struct bnxt_ulp_device_params *dparms;
507 enum bnxt_ulp_flow_mem_type mtype;
510 if (!ulp_ctx || !ulp_ctx->cfg_data)
513 tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx);
515 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
519 /* Get the dev specific number of flows that needed to be supported. */
520 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
521 BNXT_TF_DBG(ERR, "Invalid device id\n");
525 dparms = bnxt_ulp_device_params_get(dev_id);
527 BNXT_TF_DBG(ERR, "could not fetch the device params\n");
531 if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
533 if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
534 BNXT_TF_DBG(INFO, "Table Scope free is not required\n");
538 rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, ¶ms.tbl_scope_id);
540 BNXT_TF_DBG(ERR, "Failed to get the table scope id\n");
544 rc = tf_free_tbl_scope(tfp, ¶ms);
546 BNXT_TF_DBG(ERR, "Unable to free table scope\n");
552 /* The function to free and deinit the ulp context data. */
554 ulp_ctx_deinit(struct bnxt *bp,
555 struct bnxt_ulp_session_state *session)
557 /* close the tf session */
558 ulp_ctx_session_close(bp, session);
560 /* Free the contents */
561 if (session->cfg_data) {
562 rte_free(session->cfg_data);
563 bp->ulp_ctx->cfg_data = NULL;
564 session->cfg_data = NULL;
569 /* The function to allocate and initialize the ulp context data. */
571 ulp_ctx_init(struct bnxt *bp,
572 struct bnxt_ulp_session_state *session)
574 struct bnxt_ulp_data *ulp_data;
576 enum bnxt_ulp_device_id devid;
578 /* Allocate memory to hold ulp context data. */
579 ulp_data = rte_zmalloc("bnxt_ulp_data",
580 sizeof(struct bnxt_ulp_data), 0);
582 BNXT_TF_DBG(ERR, "Failed to allocate memory for ulp data\n");
586 /* Increment the ulp context data reference count usage. */
587 bp->ulp_ctx->cfg_data = ulp_data;
588 session->cfg_data = ulp_data;
590 ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED;
592 rc = bnxt_ulp_devid_get(bp, &devid);
594 BNXT_TF_DBG(ERR, "Unable to determine device for ULP init.\n");
598 rc = bnxt_ulp_cntxt_dev_id_set(bp->ulp_ctx, devid);
600 BNXT_TF_DBG(ERR, "Unable to set device for ULP init.\n");
604 /* Open the ulp session. */
605 rc = ulp_ctx_session_open(bp, session);
609 ulp_tun_tbl_init(ulp_data->tun_tbl);
611 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
615 session->session_opened = 1;
616 (void)ulp_ctx_deinit(bp, session);
620 /* The function to initialize ulp dparms with devargs */
622 ulp_dparms_init(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
624 struct bnxt_ulp_device_params *dparms;
627 if (!bp->max_num_kflows) {
628 /* Defaults to Internal */
629 bnxt_ulp_cntxt_mem_type_set(ulp_ctx,
630 BNXT_ULP_FLOW_MEM_TYPE_INT);
634 /* The max_num_kflows were set, so move to external */
635 if (bnxt_ulp_cntxt_mem_type_set(ulp_ctx, BNXT_ULP_FLOW_MEM_TYPE_EXT))
638 if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) {
639 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
643 dparms = bnxt_ulp_device_params_get(dev_id);
645 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
649 /* num_flows = max_num_kflows * 1024 */
650 dparms->ext_flow_db_num_entries = bp->max_num_kflows * 1024;
651 /* GFID = 2 * num_flows */
652 dparms->mark_db_gfid_entries = dparms->ext_flow_db_num_entries * 2;
653 BNXT_TF_DBG(DEBUG, "Set the number of flows = %"PRIu64"\n",
654 dparms->ext_flow_db_num_entries);
659 /* The function to initialize bp flags with truflow features */
661 ulp_dparms_dev_port_intf_update(struct bnxt *bp,
662 struct bnxt_ulp_context *ulp_ctx)
664 enum bnxt_ulp_flow_mem_type mtype;
666 if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype))
668 /* Update the bp flag with gfid flag */
669 if (mtype == BNXT_ULP_FLOW_MEM_TYPE_EXT)
670 bp->flags |= BNXT_FLAG_GFID_ENABLE;
676 ulp_ctx_attach(struct bnxt *bp,
677 struct bnxt_ulp_session_state *session)
681 /* Increment the ulp context data reference count usage. */
682 bp->ulp_ctx->cfg_data = session->cfg_data;
683 bp->ulp_ctx->cfg_data->ref_cnt++;
685 /* update the session details in bnxt tfp */
686 bp->tfp.session = session->g_tfp->session;
688 /* Create a TF Client */
689 rc = ulp_ctx_session_open(bp, session);
691 PMD_DRV_LOG(ERR, "Failed to open ctxt session, rc:%d\n", rc);
692 bp->tfp.session = NULL;
696 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
701 ulp_ctx_detach(struct bnxt *bp)
703 if (bp->tfp.session) {
704 tf_close_session(&bp->tfp);
705 bp->tfp.session = NULL;
710 * Initialize the state of an ULP session.
711 * If the state of an ULP session is not initialized, set it's state to
712 * initialized. If the state is already initialized, do nothing.
715 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
717 pthread_mutex_lock(&session->bnxt_ulp_mutex);
719 if (!session->bnxt_ulp_init) {
720 session->bnxt_ulp_init = true;
726 pthread_mutex_unlock(&session->bnxt_ulp_mutex);
730 * Check if an ULP session is already allocated for a specific PCI
731 * domain & bus. If it is already allocated simply return the session
732 * pointer, otherwise allocate a new session.
734 static struct bnxt_ulp_session_state *
735 ulp_get_session(struct rte_pci_addr *pci_addr)
737 struct bnxt_ulp_session_state *session;
739 STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) {
740 if (session->pci_info.domain == pci_addr->domain &&
741 session->pci_info.bus == pci_addr->bus) {
749 * Allocate and Initialize an ULP session and set it's state to INITIALIZED.
750 * If it's already initialized simply return the already existing session.
752 static struct bnxt_ulp_session_state *
753 ulp_session_init(struct bnxt *bp,
756 struct rte_pci_device *pci_dev;
757 struct rte_pci_addr *pci_addr;
758 struct bnxt_ulp_session_state *session;
764 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
765 pci_addr = &pci_dev->addr;
767 pthread_mutex_lock(&bnxt_ulp_global_mutex);
769 session = ulp_get_session(pci_addr);
771 /* Not Found the session Allocate a new one */
772 session = rte_zmalloc("bnxt_ulp_session",
773 sizeof(struct bnxt_ulp_session_state),
777 "Allocation failed for bnxt_ulp_session\n");
778 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
782 /* Add it to the queue */
783 session->pci_info.domain = pci_addr->domain;
784 session->pci_info.bus = pci_addr->bus;
785 rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
787 BNXT_TF_DBG(ERR, "mutex create failed\n");
788 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
791 STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
795 ulp_context_initialized(session, init);
796 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
801 * When a device is closed, remove it's associated session from the global
805 ulp_session_deinit(struct bnxt_ulp_session_state *session)
810 if (!session->cfg_data) {
811 pthread_mutex_lock(&bnxt_ulp_global_mutex);
812 STAILQ_REMOVE(&bnxt_ulp_session_list, session,
813 bnxt_ulp_session_state, next);
814 pthread_mutex_destroy(&session->bnxt_ulp_mutex);
816 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
821 * Internal api to enable NAT feature.
822 * Set set_flag to 1 to set the value or zero to reset the value.
823 * returns 0 on success.
826 bnxt_ulp_global_cfg_update(struct bnxt *bp,
828 enum tf_global_config_type type,
833 uint32_t global_cfg = 0;
835 struct tf_global_cfg_parms parms = { 0 };
837 /* Initialize the params */
840 parms.offset = offset,
841 parms.config = (uint8_t *)&global_cfg,
842 parms.config_sz_in_bytes = sizeof(global_cfg);
844 rc = tf_get_global_cfg(&bp->tfp, &parms);
846 BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n",
854 global_cfg &= ~value;
856 /* SET the register RE_CFA_REG_ACT_TECT */
857 rc = tf_set_global_cfg(&bp->tfp, &parms);
859 BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n",
866 /* Internal function to delete all the flows belonging to the given port */
868 bnxt_ulp_flush_port_flows(struct bnxt *bp)
872 /* it is assumed that port is either TVF or PF */
873 if (ulp_port_db_port_func_id_get(bp->ulp_ctx,
874 bp->eth_dev->data->port_id,
876 BNXT_TF_DBG(ERR, "Invalid argument\n");
879 (void)ulp_flow_db_function_flow_flush(bp->ulp_ctx, func_id);
882 /* Internal function to delete the VFR default flows */
884 bnxt_ulp_destroy_vfr_default_rules(struct bnxt *bp, bool global)
886 struct bnxt_ulp_vfr_rule_info *info;
888 struct rte_eth_dev *vfr_eth_dev;
889 struct bnxt_representor *vfr_bp;
891 if (!BNXT_TRUFLOW_EN(bp) || BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
894 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
897 /* Delete default rules for all ports */
898 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
899 info = &bp->ulp_ctx->cfg_data->vfr_rule_info[port_id];
903 if (!global && info->parent_port_id !=
904 bp->eth_dev->data->port_id)
907 /* Destroy the flows */
908 ulp_default_flow_destroy(bp->eth_dev, info->vfr_flow_id);
909 /* Clean up the tx action pointer */
910 vfr_eth_dev = &rte_eth_devices[port_id];
912 vfr_bp = vfr_eth_dev->data->dev_private;
913 vfr_bp->vfr_tx_cfa_action = 0;
915 memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info));
920 * When a port is deinit'ed by dpdk. This function is called
921 * and this function clears the ULP context and rest of the
922 * infrastructure associated with it.
925 bnxt_ulp_deinit(struct bnxt *bp,
926 struct bnxt_ulp_session_state *session)
928 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
931 /* clean up default flows */
932 bnxt_ulp_destroy_df_rules(bp, true);
934 /* clean up default VFR flows */
935 bnxt_ulp_destroy_vfr_default_rules(bp, true);
937 /* clean up regular flows */
938 ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR);
940 /* cleanup the eem table scope */
941 ulp_eem_tbl_scope_deinit(bp, bp->ulp_ctx);
943 /* cleanup the flow database */
944 ulp_flow_db_deinit(bp->ulp_ctx);
946 /* Delete the Mark database */
947 ulp_mark_db_deinit(bp->ulp_ctx);
949 /* cleanup the ulp mapper */
950 ulp_mapper_deinit(bp->ulp_ctx);
952 /* Delete the Flow Counter Manager */
953 ulp_fc_mgr_deinit(bp->ulp_ctx);
955 /* Delete the Port database */
956 ulp_port_db_deinit(bp->ulp_ctx);
958 /* Disable NAT feature */
959 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
961 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
963 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
965 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
967 /* free the flow db lock */
968 pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
970 /* Delete the ulp context and tf session and free the ulp context */
971 ulp_ctx_deinit(bp, session);
972 BNXT_TF_DBG(DEBUG, "ulp ctx has been deinitialized\n");
976 * When a port is initialized by dpdk. This functions is called
977 * and this function initializes the ULP context and rest of the
978 * infrastructure associated with it.
981 bnxt_ulp_init(struct bnxt *bp,
982 struct bnxt_ulp_session_state *session)
986 /* Allocate and Initialize the ulp context. */
987 rc = ulp_ctx_init(bp, session);
989 BNXT_TF_DBG(ERR, "Failed to create the ulp context\n");
993 rc = pthread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock, NULL);
995 BNXT_TF_DBG(ERR, "Unable to initialize flow db lock\n");
999 /* Initialize ulp dparms with values devargs passed */
1000 rc = ulp_dparms_init(bp, bp->ulp_ctx);
1002 BNXT_TF_DBG(ERR, "Failed to initialize the dparms\n");
1006 /* create the port database */
1007 rc = ulp_port_db_init(bp->ulp_ctx, bp->port_cnt);
1009 BNXT_TF_DBG(ERR, "Failed to create the port database\n");
1013 /* Create the Mark database. */
1014 rc = ulp_mark_db_init(bp->ulp_ctx);
1016 BNXT_TF_DBG(ERR, "Failed to create the mark database\n");
1020 /* Create the flow database. */
1021 rc = ulp_flow_db_init(bp->ulp_ctx);
1023 BNXT_TF_DBG(ERR, "Failed to create the flow database\n");
1027 /* Create the eem table scope. */
1028 rc = ulp_eem_tbl_scope_init(bp);
1030 BNXT_TF_DBG(ERR, "Failed to create the eem scope table\n");
1034 rc = ulp_mapper_init(bp->ulp_ctx);
1036 BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n");
1040 rc = ulp_fc_mgr_init(bp->ulp_ctx);
1042 BNXT_TF_DBG(ERR, "Failed to initialize ulp flow counter mgr\n");
1047 * Enable NAT feature. Set the global configuration register
1048 * Tunnel encap to enable NAT with the reuse of existing inner
1049 * L2 header smac and dmac
1051 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
1052 TF_TUNNEL_ENCAP_NAT,
1053 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
1055 BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n");
1059 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
1060 TF_TUNNEL_ENCAP_NAT,
1061 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
1063 BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
1066 BNXT_TF_DBG(DEBUG, "ulp ctx has been initialized\n");
1070 bnxt_ulp_deinit(bp, session);
1075 * When a port is initialized by dpdk. This functions sets up
1076 * the port specific details.
1079 bnxt_ulp_port_init(struct bnxt *bp)
1081 struct bnxt_ulp_session_state *session;
1085 if (!bp || !BNXT_TRUFLOW_EN(bp))
1088 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1090 "Skip ulp init for port: %d, not a TVF or PF\n",
1091 bp->eth_dev->data->port_id);
1096 BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n");
1100 bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx",
1101 sizeof(struct bnxt_ulp_context), 0);
1103 BNXT_TF_DBG(ERR, "Failed to allocate ulp ctx\n");
1108 * Multiple uplink ports can be associated with a single vswitch.
1109 * Make sure only the port that is started first will initialize
1112 session = ulp_session_init(bp, &initialized);
1114 BNXT_TF_DBG(ERR, "Failed to initialize the tf session\n");
1121 * If ULP is already initialized for a specific domain then
1122 * simply assign the ulp context to this rte_eth_dev.
1124 rc = ulp_ctx_attach(bp, session);
1126 BNXT_TF_DBG(ERR, "Failed to attach the ulp context\n");
1130 rc = bnxt_ulp_init(bp, session);
1132 BNXT_TF_DBG(ERR, "Failed to initialize the ulp init\n");
1137 /* Update bnxt driver flags */
1138 rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx);
1140 BNXT_TF_DBG(ERR, "Failed to update driver flags\n");
1144 /* update the port database for the given interface */
1145 rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp->eth_dev);
1147 BNXT_TF_DBG(ERR, "Failed to update port database\n");
1150 /* create the default rules */
1151 rc = bnxt_ulp_create_df_rules(bp);
1153 BNXT_TF_DBG(ERR, "Failed to create default flow\n");
1157 if (BNXT_ACCUM_STATS_EN(bp))
1158 bp->ulp_ctx->cfg_data->accum_stats = true;
1160 BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init\n",
1161 bp->eth_dev->data->port_id);
1165 bnxt_ulp_port_deinit(bp);
1170 * When a port is de-initialized by dpdk. This functions clears up
1171 * the port specific details.
1174 bnxt_ulp_port_deinit(struct bnxt *bp)
1176 struct bnxt_ulp_session_state *session;
1177 struct rte_pci_device *pci_dev;
1178 struct rte_pci_addr *pci_addr;
1180 if (!BNXT_TRUFLOW_EN(bp))
1183 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1185 "Skip ULP deinit port:%d, not a TVF or PF\n",
1186 bp->eth_dev->data->port_id);
1191 BNXT_TF_DBG(DEBUG, "ulp ctx already de-allocated\n");
1195 BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port deinit\n",
1196 bp->eth_dev->data->port_id);
1198 /* Get the session details */
1199 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
1200 pci_addr = &pci_dev->addr;
1201 pthread_mutex_lock(&bnxt_ulp_global_mutex);
1202 session = ulp_get_session(pci_addr);
1203 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
1205 /* session not found then just exit */
1207 /* Free the ulp context */
1208 rte_free(bp->ulp_ctx);
1213 /* Check the reference count to deinit or deattach*/
1214 if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) {
1215 bp->ulp_ctx->cfg_data->ref_cnt--;
1216 if (bp->ulp_ctx->cfg_data->ref_cnt) {
1217 /* free the port details */
1218 /* Free the default flow rule associated to this port */
1219 bnxt_ulp_destroy_df_rules(bp, false);
1220 bnxt_ulp_destroy_vfr_default_rules(bp, false);
1222 /* free flows associated with this port */
1223 bnxt_ulp_flush_port_flows(bp);
1225 /* close the session associated with this port */
1228 /* Perform ulp ctx deinit */
1229 bnxt_ulp_deinit(bp, session);
1233 /* clean up the session */
1234 ulp_session_deinit(session);
1236 /* Free the ulp context */
1237 rte_free(bp->ulp_ctx);
1241 /* Below are the access functions to access internal data of ulp context. */
1242 /* Function to set the Mark DB into the context */
1244 bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx,
1245 struct bnxt_ulp_mark_tbl *mark_tbl)
1247 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1248 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1252 ulp_ctx->cfg_data->mark_tbl = mark_tbl;
1257 /* Function to retrieve the Mark DB from the context. */
1258 struct bnxt_ulp_mark_tbl *
1259 bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx)
1261 if (!ulp_ctx || !ulp_ctx->cfg_data)
1264 return ulp_ctx->cfg_data->mark_tbl;
1267 /* Function to set the device id of the hardware. */
1269 bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx,
1272 if (ulp_ctx && ulp_ctx->cfg_data) {
1273 ulp_ctx->cfg_data->dev_id = dev_id;
1280 /* Function to get the device id of the hardware. */
1282 bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx,
1285 if (ulp_ctx && ulp_ctx->cfg_data) {
1286 *dev_id = ulp_ctx->cfg_data->dev_id;
1290 BNXT_TF_DBG(ERR, "Failed to read dev_id from ulp ctxt\n");
1295 bnxt_ulp_cntxt_mem_type_set(struct bnxt_ulp_context *ulp_ctx,
1296 enum bnxt_ulp_flow_mem_type mem_type)
1298 if (ulp_ctx && ulp_ctx->cfg_data) {
1299 ulp_ctx->cfg_data->mem_type = mem_type;
1302 BNXT_TF_DBG(ERR, "Failed to write mem_type in ulp ctxt\n");
1307 bnxt_ulp_cntxt_mem_type_get(struct bnxt_ulp_context *ulp_ctx,
1308 enum bnxt_ulp_flow_mem_type *mem_type)
1310 if (ulp_ctx && ulp_ctx->cfg_data) {
1311 *mem_type = ulp_ctx->cfg_data->mem_type;
1314 BNXT_TF_DBG(ERR, "Failed to read mem_type in ulp ctxt\n");
1318 /* Function to get the table scope id of the EEM table. */
1320 bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx,
1321 uint32_t *tbl_scope_id)
1323 if (ulp_ctx && ulp_ctx->cfg_data) {
1324 *tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id;
1331 /* Function to set the table scope id of the EEM table. */
1333 bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx,
1334 uint32_t tbl_scope_id)
1336 if (ulp_ctx && ulp_ctx->cfg_data) {
1337 ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id;
1344 /* Function to set the tfp session details from the ulp context. */
1346 bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1349 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1357 /* Function to get the tfp session details from the ulp context. */
1359 bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp)
1362 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1369 * Get the device table entry based on the device id.
1371 * dev_id [in] The device id of the hardware
1373 * Returns the pointer to the device parameters.
1375 struct bnxt_ulp_device_params *
1376 bnxt_ulp_device_params_get(uint32_t dev_id)
1378 if (dev_id < BNXT_ULP_MAX_NUM_DEVICES)
1379 return &ulp_device_params[dev_id];
1383 /* Function to set the flow database to the ulp context. */
1385 bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context *ulp_ctx,
1386 struct bnxt_ulp_flow_db *flow_db)
1388 if (!ulp_ctx || !ulp_ctx->cfg_data)
1391 ulp_ctx->cfg_data->flow_db = flow_db;
1395 /* Function to get the flow database from the ulp context. */
1396 struct bnxt_ulp_flow_db *
1397 bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx)
1399 if (!ulp_ctx || !ulp_ctx->cfg_data)
1402 return ulp_ctx->cfg_data->flow_db;
1405 /* Function to get the tunnel cache table info from the ulp context. */
1406 struct bnxt_tun_cache_entry *
1407 bnxt_ulp_cntxt_ptr2_tun_tbl_get(struct bnxt_ulp_context *ulp_ctx)
1409 if (!ulp_ctx || !ulp_ctx->cfg_data)
1412 return ulp_ctx->cfg_data->tun_tbl;
1415 /* Function to get the ulp context from eth device. */
1416 struct bnxt_ulp_context *
1417 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev)
1419 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1421 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
1422 struct bnxt_representor *vfr = dev->data->dev_private;
1424 bp = vfr->parent_dev->data->dev_private;
1428 BNXT_TF_DBG(ERR, "Bnxt private data is not initialized\n");
1435 bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx,
1438 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1439 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1443 ulp_ctx->cfg_data->mapper_data = mapper_data;
1448 bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx)
1450 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1451 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1455 return ulp_ctx->cfg_data->mapper_data;
1458 /* Function to set the port database to the ulp context. */
1460 bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context *ulp_ctx,
1461 struct bnxt_ulp_port_db *port_db)
1463 if (!ulp_ctx || !ulp_ctx->cfg_data)
1466 ulp_ctx->cfg_data->port_db = port_db;
1470 /* Function to get the port database from the ulp context. */
1471 struct bnxt_ulp_port_db *
1472 bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context *ulp_ctx)
1474 if (!ulp_ctx || !ulp_ctx->cfg_data)
1477 return ulp_ctx->cfg_data->port_db;
1480 /* Function to set the flow counter info into the context */
1482 bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx,
1483 struct bnxt_ulp_fc_info *ulp_fc_info)
1485 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1486 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1490 ulp_ctx->cfg_data->fc_info = ulp_fc_info;
1495 /* Function to retrieve the flow counter info from the context. */
1496 struct bnxt_ulp_fc_info *
1497 bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx)
1499 if (!ulp_ctx || !ulp_ctx->cfg_data)
1502 return ulp_ctx->cfg_data->fc_info;
1505 /* Function to get the ulp flags from the ulp context. */
1507 bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
1510 if (!ulp_ctx || !ulp_ctx->cfg_data)
1513 *flags = ulp_ctx->cfg_data->ulp_flags;
1517 /* Function to get the ulp vfr info from the ulp context. */
1518 struct bnxt_ulp_vfr_rule_info*
1519 bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(struct bnxt_ulp_context *ulp_ctx,
1522 if (!ulp_ctx || !ulp_ctx->cfg_data || port_id >= RTE_MAX_ETHPORTS)
1525 return &ulp_ctx->cfg_data->vfr_rule_info[port_id];
1528 /* Function to acquire the flow database lock from the ulp context. */
1530 bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
1532 if (!ulp_ctx || !ulp_ctx->cfg_data)
1535 if (pthread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
1536 BNXT_TF_DBG(ERR, "unable to acquire fdb lock\n");
1542 /* Function to release the flow database lock from the ulp context. */
1544 bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
1546 if (!ulp_ctx || !ulp_ctx->cfg_data)
1549 pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);