1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
7 #include <rte_malloc.h>
9 #include <rte_flow_driver.h>
10 #include <rte_tailq.h>
14 #include "bnxt_tf_common.h"
16 #include "tf_ext_flow_handle.h"
18 #include "ulp_template_db_enum.h"
19 #include "ulp_template_struct.h"
20 #include "ulp_mark_mgr.h"
21 #include "ulp_fc_mgr.h"
22 #include "ulp_flow_db.h"
23 #include "ulp_mapper.h"
24 #include "ulp_port_db.h"
26 /* Linked list of all TF sessions. */
27 STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
28 STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
30 /* Mutex to synchronize bnxt_ulp_session_list operations. */
31 static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
34 * Allow the deletion of context only for the bnxt device that
35 * created the session.
38 ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx)
40 if (!ulp_ctx || !ulp_ctx->cfg_data)
43 if (!ulp_ctx->cfg_data->ref_cnt) {
44 BNXT_TF_DBG(DEBUG, "ulp ctx shall initiate deinit\n");
52 * Initialize an ULP session.
53 * An ULP session will contain all the resources needed to support rte flow
54 * offloads. A session is initialized as part of rte_eth_device start.
55 * A single vswitch instance can have multiple uplinks which means
56 * rte_eth_device start will be called for each of these devices.
57 * ULP session manager will make sure that a single ULP session is only
58 * initialized once. Apart from this, it also initializes MARK database,
59 * EEM table & flow database. ULP session manager also manages a list of
60 * all opened ULP sessions.
63 ulp_ctx_session_open(struct bnxt *bp,
64 struct bnxt_ulp_session_state *session)
66 struct rte_eth_dev *ethdev = bp->eth_dev;
68 struct tf_open_session_parms params;
69 struct tf_session_resources *resources;
71 memset(¶ms, 0, sizeof(params));
73 rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
74 params.ctrl_chan_name);
76 BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
77 ethdev->data->port_id, rc);
81 params.shadow_copy = true;
82 params.device_type = TF_DEVICE_TYPE_WH;
83 resources = ¶ms.resources;
86 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 422;
87 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 6;
88 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
89 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
90 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
93 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
94 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384;
95 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
98 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 511;
99 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 63;
102 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
104 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
106 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
107 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 88;
110 resources->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 13168;
113 resources->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
116 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_SP_SMAC] = 255;
120 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 292;
121 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 148;
122 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
123 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
124 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
127 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
128 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384;
129 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
132 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 511;
133 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 223;
134 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 255;
137 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
139 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
141 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
142 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 928;
145 resources->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 15232;
148 resources->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
151 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 488;
152 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = 511;
154 rc = tf_open_session(&bp->tfp, ¶ms);
156 BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
157 params.ctrl_chan_name, rc);
160 if (!session->session_opened) {
161 session->session_opened = 1;
162 session->g_tfp = &bp->tfp;
168 * Close the ULP session.
169 * It takes the ulp context pointer.
172 ulp_ctx_session_close(struct bnxt *bp,
173 struct bnxt_ulp_session_state *session)
175 /* close the session in the hardware */
176 if (session->session_opened)
177 tf_close_session(&bp->tfp);
178 session->session_opened = 0;
179 session->g_tfp = NULL;
183 bnxt_init_tbl_scope_parms(struct bnxt *bp,
184 struct tf_alloc_tbl_scope_parms *params)
186 struct bnxt_ulp_device_params *dparms;
190 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
192 /* TBD: For now, just use default. */
195 dparms = bnxt_ulp_device_params_get(dev_id);
198 * Set the flush timer for EEM entries. The value is in 100ms intervals,
201 params->hw_flow_cache_flush_timer = 100;
204 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
205 params->rx_max_action_entry_sz_in_bits =
206 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
207 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
208 params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS;
209 params->rx_tbl_if_id = BNXT_ULP_RX_TBL_IF_ID;
211 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
212 params->tx_max_action_entry_sz_in_bits =
213 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
214 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
215 params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS;
216 params->tx_tbl_if_id = BNXT_ULP_TX_TBL_IF_ID;
218 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
219 params->rx_max_action_entry_sz_in_bits =
220 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
221 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
222 params->rx_num_flows_in_k = dparms->flow_db_num_entries / 1024;
223 params->rx_tbl_if_id = BNXT_ULP_RX_TBL_IF_ID;
225 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
226 params->tx_max_action_entry_sz_in_bits =
227 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
228 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
229 params->tx_num_flows_in_k = dparms->flow_db_num_entries / 1024;
230 params->tx_tbl_if_id = BNXT_ULP_TX_TBL_IF_ID;
234 /* Initialize Extended Exact Match host memory. */
236 ulp_eem_tbl_scope_init(struct bnxt *bp)
238 struct tf_alloc_tbl_scope_parms params = {0};
240 struct bnxt_ulp_device_params *dparms;
243 /* Get the dev specific number of flows that needed to be supported. */
244 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
245 BNXT_TF_DBG(ERR, "Invalid device id\n");
249 dparms = bnxt_ulp_device_params_get(dev_id);
251 BNXT_TF_DBG(ERR, "could not fetch the device params\n");
255 if (dparms->flow_mem_type != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
256 BNXT_TF_DBG(INFO, "Table Scope alloc is not required\n");
260 bnxt_init_tbl_scope_parms(bp, ¶ms);
262 rc = tf_alloc_tbl_scope(&bp->tfp, ¶ms);
264 BNXT_TF_DBG(ERR, "Unable to allocate eem table scope rc = %d\n",
269 rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id);
271 BNXT_TF_DBG(ERR, "Unable to set table scope id\n");
278 /* Free Extended Exact Match host memory */
280 ulp_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
282 struct tf_free_tbl_scope_parms params = {0};
285 struct bnxt_ulp_device_params *dparms;
288 if (!ulp_ctx || !ulp_ctx->cfg_data)
291 tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx);
293 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
297 /* Get the dev specific number of flows that needed to be supported. */
298 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
299 BNXT_TF_DBG(ERR, "Invalid device id\n");
303 dparms = bnxt_ulp_device_params_get(dev_id);
305 BNXT_TF_DBG(ERR, "could not fetch the device params\n");
309 if (dparms->flow_mem_type != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
310 BNXT_TF_DBG(INFO, "Table Scope free is not required\n");
314 rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, ¶ms.tbl_scope_id);
316 BNXT_TF_DBG(ERR, "Failed to get the table scope id\n");
320 rc = tf_free_tbl_scope(tfp, ¶ms);
322 BNXT_TF_DBG(ERR, "Unable to free table scope\n");
328 /* The function to free and deinit the ulp context data. */
330 ulp_ctx_deinit(struct bnxt *bp,
331 struct bnxt_ulp_session_state *session)
333 /* close the tf session */
334 ulp_ctx_session_close(bp, session);
336 /* Free the contents */
337 if (session->cfg_data) {
338 rte_free(session->cfg_data);
339 bp->ulp_ctx->cfg_data = NULL;
340 session->cfg_data = NULL;
345 /* The function to allocate and initialize the ulp context data. */
347 ulp_ctx_init(struct bnxt *bp,
348 struct bnxt_ulp_session_state *session)
350 struct bnxt_ulp_data *ulp_data;
353 /* Allocate memory to hold ulp context data. */
354 ulp_data = rte_zmalloc("bnxt_ulp_data",
355 sizeof(struct bnxt_ulp_data), 0);
357 BNXT_TF_DBG(ERR, "Failed to allocate memory for ulp data\n");
361 /* Increment the ulp context data reference count usage. */
362 bp->ulp_ctx->cfg_data = ulp_data;
363 session->cfg_data = ulp_data;
365 ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED;
367 /* Open the ulp session. */
368 rc = ulp_ctx_session_open(bp, session);
370 session->session_opened = 1;
371 (void)ulp_ctx_deinit(bp, session);
375 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
379 /* The function to initialize ulp dparms with devargs */
381 ulp_dparms_init(struct bnxt *bp,
382 struct bnxt_ulp_context *ulp_ctx)
384 struct bnxt_ulp_device_params *dparms;
387 if (!bp->max_num_kflows)
390 if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) {
391 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
395 dparms = bnxt_ulp_device_params_get(dev_id);
397 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
401 /* num_flows = max_num_kflows * 1024 */
402 dparms->flow_db_num_entries = bp->max_num_kflows * 1024;
403 /* GFID = 2 * num_flows */
404 dparms->mark_db_gfid_entries = dparms->flow_db_num_entries * 2;
405 BNXT_TF_DBG(DEBUG, "Set the number of flows = %"PRIu64"\n",
406 dparms->flow_db_num_entries);
411 /* The function to initialize bp flags with truflow features */
413 ulp_dparms_dev_port_intf_update(struct bnxt *bp,
414 struct bnxt_ulp_context *ulp_ctx)
416 struct bnxt_ulp_device_params *dparms;
419 if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) {
420 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
424 dparms = bnxt_ulp_device_params_get(dev_id);
426 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
430 /* Update the bp flag with gfid flag */
431 if (dparms->flow_mem_type == BNXT_ULP_FLOW_MEM_TYPE_EXT)
432 bp->flags |= BNXT_FLAG_GFID_ENABLE;
438 ulp_ctx_attach(struct bnxt *bp,
439 struct bnxt_ulp_session_state *session)
443 /* Increment the ulp context data reference count usage. */
444 bp->ulp_ctx->cfg_data = session->cfg_data;
445 bp->ulp_ctx->cfg_data->ref_cnt++;
447 /* update the session details in bnxt tfp */
448 bp->tfp.session = session->g_tfp->session;
450 /* Create a TF Client */
451 rc = ulp_ctx_session_open(bp, session);
453 PMD_DRV_LOG(ERR, "Failed to open ctxt session, rc:%d\n", rc);
454 bp->tfp.session = NULL;
458 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
463 ulp_ctx_detach(struct bnxt *bp)
465 if (bp->tfp.session) {
466 tf_close_session(&bp->tfp);
467 bp->tfp.session = NULL;
472 * Initialize the state of an ULP session.
473 * If the state of an ULP session is not initialized, set it's state to
474 * initialized. If the state is already initialized, do nothing.
477 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
479 pthread_mutex_lock(&session->bnxt_ulp_mutex);
481 if (!session->bnxt_ulp_init) {
482 session->bnxt_ulp_init = true;
488 pthread_mutex_unlock(&session->bnxt_ulp_mutex);
492 * Check if an ULP session is already allocated for a specific PCI
493 * domain & bus. If it is already allocated simply return the session
494 * pointer, otherwise allocate a new session.
496 static struct bnxt_ulp_session_state *
497 ulp_get_session(struct rte_pci_addr *pci_addr)
499 struct bnxt_ulp_session_state *session;
501 STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) {
502 if (session->pci_info.domain == pci_addr->domain &&
503 session->pci_info.bus == pci_addr->bus) {
511 * Allocate and Initialize an ULP session and set it's state to INITIALIZED.
512 * If it's already initialized simply return the already existing session.
514 static struct bnxt_ulp_session_state *
515 ulp_session_init(struct bnxt *bp,
518 struct rte_pci_device *pci_dev;
519 struct rte_pci_addr *pci_addr;
520 struct bnxt_ulp_session_state *session;
526 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
527 pci_addr = &pci_dev->addr;
529 pthread_mutex_lock(&bnxt_ulp_global_mutex);
531 session = ulp_get_session(pci_addr);
533 /* Not Found the session Allocate a new one */
534 session = rte_zmalloc("bnxt_ulp_session",
535 sizeof(struct bnxt_ulp_session_state),
539 "Allocation failed for bnxt_ulp_session\n");
540 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
544 /* Add it to the queue */
545 session->pci_info.domain = pci_addr->domain;
546 session->pci_info.bus = pci_addr->bus;
547 rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
549 BNXT_TF_DBG(ERR, "mutex create failed\n");
550 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
553 STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
557 ulp_context_initialized(session, init);
558 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
563 * When a device is closed, remove it's associated session from the global
567 ulp_session_deinit(struct bnxt_ulp_session_state *session)
572 if (!session->cfg_data) {
573 pthread_mutex_lock(&bnxt_ulp_global_mutex);
574 STAILQ_REMOVE(&bnxt_ulp_session_list, session,
575 bnxt_ulp_session_state, next);
576 pthread_mutex_destroy(&session->bnxt_ulp_mutex);
578 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
583 * Internal api to enable NAT feature.
584 * Set set_flag to 1 to set the value or zero to reset the value.
585 * returns 0 on success.
588 bnxt_ulp_global_cfg_update(struct bnxt *bp,
590 enum tf_global_config_type type,
595 uint32_t global_cfg = 0;
597 struct tf_global_cfg_parms parms = { 0 };
599 /* Initialize the params */
602 parms.offset = offset,
603 parms.config = (uint8_t *)&global_cfg,
604 parms.config_sz_in_bytes = sizeof(global_cfg);
606 rc = tf_get_global_cfg(&bp->tfp, &parms);
608 BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n",
616 global_cfg &= ~value;
618 /* SET the register RE_CFA_REG_ACT_TECT */
619 rc = tf_set_global_cfg(&bp->tfp, &parms);
621 BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n",
628 /* Internal function to delete all the flows belonging to the given port */
630 bnxt_ulp_flush_port_flows(struct bnxt *bp)
634 /* it is assumed that port is either TVF or PF */
635 if (ulp_port_db_port_func_id_get(bp->ulp_ctx,
636 bp->eth_dev->data->port_id,
638 BNXT_TF_DBG(ERR, "Invalid argument\n");
641 (void)ulp_flow_db_function_flow_flush(bp->ulp_ctx, func_id);
644 /* Internal function to delete the VFR default flows */
646 bnxt_ulp_destroy_vfr_default_rules(struct bnxt *bp, bool global)
648 struct bnxt_ulp_vfr_rule_info *info;
650 struct rte_eth_dev *vfr_eth_dev;
651 struct bnxt_representor *vfr_bp;
653 if (!BNXT_TRUFLOW_EN(bp) || BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
656 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
659 /* Delete default rules for all ports */
660 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
661 info = &bp->ulp_ctx->cfg_data->vfr_rule_info[port_id];
665 if (!global && info->parent_port_id !=
666 bp->eth_dev->data->port_id)
669 /* Destroy the flows */
670 ulp_default_flow_destroy(bp->eth_dev, info->rep2vf_flow_id);
671 ulp_default_flow_destroy(bp->eth_dev, info->vf2rep_flow_id);
672 /* Clean up the tx action pointer */
673 vfr_eth_dev = &rte_eth_devices[port_id];
675 vfr_bp = vfr_eth_dev->data->dev_private;
676 vfr_bp->vfr_tx_cfa_action = 0;
678 memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info));
683 * When a port is deinit'ed by dpdk. This function is called
684 * and this function clears the ULP context and rest of the
685 * infrastructure associated with it.
688 bnxt_ulp_deinit(struct bnxt *bp,
689 struct bnxt_ulp_session_state *session)
691 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
694 /* clean up default flows */
695 bnxt_ulp_destroy_df_rules(bp, true);
697 /* clean up default VFR flows */
698 bnxt_ulp_destroy_vfr_default_rules(bp, true);
700 /* clean up regular flows */
701 ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_REGULAR_FLOW_TABLE);
703 /* cleanup the eem table scope */
704 ulp_eem_tbl_scope_deinit(bp, bp->ulp_ctx);
706 /* cleanup the flow database */
707 ulp_flow_db_deinit(bp->ulp_ctx);
709 /* Delete the Mark database */
710 ulp_mark_db_deinit(bp->ulp_ctx);
712 /* cleanup the ulp mapper */
713 ulp_mapper_deinit(bp->ulp_ctx);
715 /* Delete the Flow Counter Manager */
716 ulp_fc_mgr_deinit(bp->ulp_ctx);
718 /* Delete the Port database */
719 ulp_port_db_deinit(bp->ulp_ctx);
721 /* Disable NAT feature */
722 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
724 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
726 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
728 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0);
730 /* free the flow db lock */
731 pthread_mutex_destroy(&bp->ulp_ctx->cfg_data->flow_db_lock);
733 /* Delete the ulp context and tf session and free the ulp context */
734 ulp_ctx_deinit(bp, session);
735 BNXT_TF_DBG(DEBUG, "ulp ctx has been deinitialized\n");
739 * When a port is initialized by dpdk. This functions is called
740 * and this function initializes the ULP context and rest of the
741 * infrastructure associated with it.
744 bnxt_ulp_init(struct bnxt *bp,
745 struct bnxt_ulp_session_state *session)
749 /* Allocate and Initialize the ulp context. */
750 rc = ulp_ctx_init(bp, session);
752 BNXT_TF_DBG(ERR, "Failed to create the ulp context\n");
756 rc = pthread_mutex_init(&bp->ulp_ctx->cfg_data->flow_db_lock, NULL);
758 BNXT_TF_DBG(ERR, "Unable to initialize flow db lock\n");
762 /* Initialize ulp dparms with values devargs passed */
763 rc = ulp_dparms_init(bp, bp->ulp_ctx);
765 BNXT_TF_DBG(ERR, "Failed to initialize the dparms\n");
769 /* create the port database */
770 rc = ulp_port_db_init(bp->ulp_ctx, bp->port_cnt);
772 BNXT_TF_DBG(ERR, "Failed to create the port database\n");
776 /* Create the Mark database. */
777 rc = ulp_mark_db_init(bp->ulp_ctx);
779 BNXT_TF_DBG(ERR, "Failed to create the mark database\n");
783 /* Create the flow database. */
784 rc = ulp_flow_db_init(bp->ulp_ctx);
786 BNXT_TF_DBG(ERR, "Failed to create the flow database\n");
790 /* Create the eem table scope. */
791 rc = ulp_eem_tbl_scope_init(bp);
793 BNXT_TF_DBG(ERR, "Failed to create the eem scope table\n");
797 rc = ulp_mapper_init(bp->ulp_ctx);
799 BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n");
803 rc = ulp_fc_mgr_init(bp->ulp_ctx);
805 BNXT_TF_DBG(ERR, "Failed to initialize ulp flow counter mgr\n");
810 * Enable NAT feature. Set the global configuration register
811 * Tunnel encap to enable NAT with the reuse of existing inner
812 * L2 header smac and dmac
814 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
816 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
818 BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n");
822 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
824 BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1);
826 BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
829 BNXT_TF_DBG(DEBUG, "ulp ctx has been initialized\n");
833 bnxt_ulp_deinit(bp, session);
838 * When a port is initialized by dpdk. This functions sets up
839 * the port specific details.
842 bnxt_ulp_port_init(struct bnxt *bp)
844 struct bnxt_ulp_session_state *session;
848 if (!bp || !BNXT_TRUFLOW_EN(bp))
851 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
853 "Skip ulp init for port: %d, not a TVF or PF\n",
854 bp->eth_dev->data->port_id);
859 BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n");
863 bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx",
864 sizeof(struct bnxt_ulp_context), 0);
866 BNXT_TF_DBG(ERR, "Failed to allocate ulp ctx\n");
871 * Multiple uplink ports can be associated with a single vswitch.
872 * Make sure only the port that is started first will initialize
875 session = ulp_session_init(bp, &initialized);
877 BNXT_TF_DBG(ERR, "Failed to initialize the tf session\n");
884 * If ULP is already initialized for a specific domain then
885 * simply assign the ulp context to this rte_eth_dev.
887 rc = ulp_ctx_attach(bp, session);
889 BNXT_TF_DBG(ERR, "Failed to attach the ulp context\n");
893 rc = bnxt_ulp_init(bp, session);
895 BNXT_TF_DBG(ERR, "Failed to initialize the ulp init\n");
900 /* Update bnxt driver flags */
901 rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx);
903 BNXT_TF_DBG(ERR, "Failed to update driver flags\n");
907 /* update the port database for the given interface */
908 rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp->eth_dev);
910 BNXT_TF_DBG(ERR, "Failed to update port database\n");
913 /* create the default rules */
914 bnxt_ulp_create_df_rules(bp);
915 BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init\n",
916 bp->eth_dev->data->port_id);
920 bnxt_ulp_port_deinit(bp);
925 * When a port is de-initialized by dpdk. This functions clears up
926 * the port specific details.
929 bnxt_ulp_port_deinit(struct bnxt *bp)
931 struct bnxt_ulp_session_state *session;
932 struct rte_pci_device *pci_dev;
933 struct rte_pci_addr *pci_addr;
935 if (!BNXT_TRUFLOW_EN(bp))
938 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
940 "Skip ULP deinit port:%d, not a TVF or PF\n",
941 bp->eth_dev->data->port_id);
946 BNXT_TF_DBG(DEBUG, "ulp ctx already de-allocated\n");
950 BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port deinit\n",
951 bp->eth_dev->data->port_id);
953 /* Get the session details */
954 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
955 pci_addr = &pci_dev->addr;
956 pthread_mutex_lock(&bnxt_ulp_global_mutex);
957 session = ulp_get_session(pci_addr);
958 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
960 /* session not found then just exit */
962 /* Free the ulp context */
963 rte_free(bp->ulp_ctx);
968 /* Check the reference count to deinit or deattach*/
969 if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) {
970 bp->ulp_ctx->cfg_data->ref_cnt--;
971 if (bp->ulp_ctx->cfg_data->ref_cnt) {
972 /* free the port details */
973 /* Free the default flow rule associated to this port */
974 bnxt_ulp_destroy_df_rules(bp, false);
975 bnxt_ulp_destroy_vfr_default_rules(bp, false);
977 /* free flows associated with this port */
978 bnxt_ulp_flush_port_flows(bp);
980 /* close the session associated with this port */
983 /* Perform ulp ctx deinit */
984 bnxt_ulp_deinit(bp, session);
988 /* clean up the session */
989 ulp_session_deinit(session);
991 /* Free the ulp context */
992 rte_free(bp->ulp_ctx);
996 /* Below are the access functions to access internal data of ulp context. */
997 /* Function to set the Mark DB into the context */
999 bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx,
1000 struct bnxt_ulp_mark_tbl *mark_tbl)
1002 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1003 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1007 ulp_ctx->cfg_data->mark_tbl = mark_tbl;
1012 /* Function to retrieve the Mark DB from the context. */
1013 struct bnxt_ulp_mark_tbl *
1014 bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx)
1016 if (!ulp_ctx || !ulp_ctx->cfg_data)
1019 return ulp_ctx->cfg_data->mark_tbl;
1022 /* Function to set the device id of the hardware. */
1024 bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx,
1027 if (ulp_ctx && ulp_ctx->cfg_data) {
1028 ulp_ctx->cfg_data->dev_id = dev_id;
1035 /* Function to get the device id of the hardware. */
1037 bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx,
1040 if (ulp_ctx && ulp_ctx->cfg_data) {
1041 *dev_id = ulp_ctx->cfg_data->dev_id;
1048 /* Function to get the table scope id of the EEM table. */
1050 bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx,
1051 uint32_t *tbl_scope_id)
1053 if (ulp_ctx && ulp_ctx->cfg_data) {
1054 *tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id;
1061 /* Function to set the table scope id of the EEM table. */
1063 bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx,
1064 uint32_t tbl_scope_id)
1066 if (ulp_ctx && ulp_ctx->cfg_data) {
1067 ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id;
1074 /* Function to set the tfp session details from the ulp context. */
1076 bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1079 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1087 /* Function to get the tfp session details from the ulp context. */
1089 bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp)
1092 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1099 * Get the device table entry based on the device id.
1101 * dev_id [in] The device id of the hardware
1103 * Returns the pointer to the device parameters.
1105 struct bnxt_ulp_device_params *
1106 bnxt_ulp_device_params_get(uint32_t dev_id)
1108 if (dev_id < BNXT_ULP_MAX_NUM_DEVICES)
1109 return &ulp_device_params[dev_id];
1113 /* Function to set the flow database to the ulp context. */
1115 bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context *ulp_ctx,
1116 struct bnxt_ulp_flow_db *flow_db)
1118 if (!ulp_ctx || !ulp_ctx->cfg_data)
1121 ulp_ctx->cfg_data->flow_db = flow_db;
1125 /* Function to get the flow database from the ulp context. */
1126 struct bnxt_ulp_flow_db *
1127 bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx)
1129 if (!ulp_ctx || !ulp_ctx->cfg_data)
1132 return ulp_ctx->cfg_data->flow_db;
1135 /* Function to get the ulp context from eth device. */
1136 struct bnxt_ulp_context *
1137 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev)
1139 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1141 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
1142 struct bnxt_representor *vfr = dev->data->dev_private;
1144 bp = vfr->parent_dev->data->dev_private;
1148 BNXT_TF_DBG(ERR, "Bnxt private data is not initialized\n");
1155 bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx,
1158 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1159 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1163 ulp_ctx->cfg_data->mapper_data = mapper_data;
1168 bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx)
1170 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1171 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1175 return ulp_ctx->cfg_data->mapper_data;
1178 /* Function to set the port database to the ulp context. */
1180 bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context *ulp_ctx,
1181 struct bnxt_ulp_port_db *port_db)
1183 if (!ulp_ctx || !ulp_ctx->cfg_data)
1186 ulp_ctx->cfg_data->port_db = port_db;
1190 /* Function to get the port database from the ulp context. */
1191 struct bnxt_ulp_port_db *
1192 bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context *ulp_ctx)
1194 if (!ulp_ctx || !ulp_ctx->cfg_data)
1197 return ulp_ctx->cfg_data->port_db;
1200 /* Function to set the flow counter info into the context */
1202 bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx,
1203 struct bnxt_ulp_fc_info *ulp_fc_info)
1205 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1206 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1210 ulp_ctx->cfg_data->fc_info = ulp_fc_info;
1215 /* Function to retrieve the flow counter info from the context. */
1216 struct bnxt_ulp_fc_info *
1217 bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx)
1219 if (!ulp_ctx || !ulp_ctx->cfg_data)
1222 return ulp_ctx->cfg_data->fc_info;
1225 /* Function to get the ulp flags from the ulp context. */
1227 bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
1230 if (!ulp_ctx || !ulp_ctx->cfg_data)
1233 *flags = ulp_ctx->cfg_data->ulp_flags;
1237 /* Function to get the ulp vfr info from the ulp context. */
1238 struct bnxt_ulp_vfr_rule_info*
1239 bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(struct bnxt_ulp_context *ulp_ctx,
1242 if (!ulp_ctx || !ulp_ctx->cfg_data || port_id >= RTE_MAX_ETHPORTS)
1245 return &ulp_ctx->cfg_data->vfr_rule_info[port_id];
1248 /* Function to acquire the flow database lock from the ulp context. */
1250 bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
1252 if (!ulp_ctx || !ulp_ctx->cfg_data)
1255 if (pthread_mutex_lock(&ulp_ctx->cfg_data->flow_db_lock)) {
1256 BNXT_TF_DBG(ERR, "unable to acquire fdb lock\n");
1262 /* Function to release the flow database lock from the ulp context. */
1264 bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context *ulp_ctx)
1266 if (!ulp_ctx || !ulp_ctx->cfg_data)
1269 pthread_mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock);