1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
7 #include <rte_malloc.h>
9 #include <rte_flow_driver.h>
10 #include <rte_tailq.h>
14 #include "bnxt_tf_common.h"
16 #include "tf_ext_flow_handle.h"
18 #include "ulp_template_db_enum.h"
19 #include "ulp_template_struct.h"
20 #include "ulp_mark_mgr.h"
21 #include "ulp_fc_mgr.h"
22 #include "ulp_flow_db.h"
23 #include "ulp_mapper.h"
24 #include "ulp_port_db.h"
26 /* Linked list of all TF sessions. */
27 STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
28 STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
30 /* Mutex to synchronize bnxt_ulp_session_list operations. */
31 static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
34 * Allow the deletion of context only for the bnxt device that
35 * created the session.
38 ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx)
40 if (!ulp_ctx || !ulp_ctx->cfg_data)
43 if (!ulp_ctx->cfg_data->ref_cnt) {
44 BNXT_TF_DBG(DEBUG, "ulp ctx shall initiate deinit\n");
52 * Initialize an ULP session.
53 * An ULP session will contain all the resources needed to support rte flow
54 * offloads. A session is initialized as part of rte_eth_device start.
55 * A single vswitch instance can have multiple uplinks which means
56 * rte_eth_device start will be called for each of these devices.
57 * ULP session manager will make sure that a single ULP session is only
58 * initialized once. Apart from this, it also initializes MARK database,
59 * EEM table & flow database. ULP session manager also manages a list of
60 * all opened ULP sessions.
63 ulp_ctx_session_open(struct bnxt *bp,
64 struct bnxt_ulp_session_state *session)
66 struct rte_eth_dev *ethdev = bp->eth_dev;
68 struct tf_open_session_parms params;
69 struct tf_session_resources *resources;
71 memset(¶ms, 0, sizeof(params));
73 rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
74 params.ctrl_chan_name);
76 BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
77 ethdev->data->port_id, rc);
81 params.shadow_copy = true;
82 params.device_type = TF_DEVICE_TYPE_WH;
83 resources = ¶ms.resources;
86 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 422;
87 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 6;
88 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_WC_PROF] = 8;
89 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 8;
90 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_EM_PROF] = 8;
93 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
94 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 8192;
95 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
98 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 255;
99 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 63;
102 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
104 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
106 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 8;
107 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 88;
110 resources->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 13176;
113 resources->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
117 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 292;
118 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 144;
119 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_WC_PROF] = 8;
120 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 8;
121 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_EM_PROF] = 8;
124 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
125 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 8192;
126 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
129 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 511;
130 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 200;
131 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 255;
134 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
136 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
138 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 8;
139 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 8;
142 resources->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 15232;
145 resources->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
148 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 488;
150 rc = tf_open_session(&bp->tfp, ¶ms);
152 BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
153 params.ctrl_chan_name, rc);
156 if (!session->session_opened) {
157 session->session_opened = 1;
158 session->g_tfp = &bp->tfp;
164 * Close the ULP session.
165 * It takes the ulp context pointer.
168 ulp_ctx_session_close(struct bnxt *bp,
169 struct bnxt_ulp_session_state *session)
171 /* close the session in the hardware */
172 if (session->session_opened)
173 tf_close_session(&bp->tfp);
174 session->session_opened = 0;
175 session->g_tfp = NULL;
179 bnxt_init_tbl_scope_parms(struct bnxt *bp,
180 struct tf_alloc_tbl_scope_parms *params)
182 struct bnxt_ulp_device_params *dparms;
186 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
188 /* TBD: For now, just use default. */
191 dparms = bnxt_ulp_device_params_get(dev_id);
194 * Set the flush timer for EEM entries. The value is in 100ms intervals,
197 params->hw_flow_cache_flush_timer = 100;
200 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
201 params->rx_max_action_entry_sz_in_bits =
202 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
203 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
204 params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS;
205 params->rx_tbl_if_id = BNXT_ULP_RX_TBL_IF_ID;
207 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
208 params->tx_max_action_entry_sz_in_bits =
209 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
210 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
211 params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS;
212 params->tx_tbl_if_id = BNXT_ULP_TX_TBL_IF_ID;
214 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
215 params->rx_max_action_entry_sz_in_bits =
216 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
217 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
218 params->rx_num_flows_in_k = dparms->flow_db_num_entries / 1024;
219 params->rx_tbl_if_id = BNXT_ULP_RX_TBL_IF_ID;
221 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
222 params->tx_max_action_entry_sz_in_bits =
223 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
224 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
225 params->tx_num_flows_in_k = dparms->flow_db_num_entries / 1024;
226 params->tx_tbl_if_id = BNXT_ULP_TX_TBL_IF_ID;
230 /* Initialize Extended Exact Match host memory. */
232 ulp_eem_tbl_scope_init(struct bnxt *bp)
234 struct tf_alloc_tbl_scope_parms params = {0};
236 struct bnxt_ulp_device_params *dparms;
239 /* Get the dev specific number of flows that needed to be supported. */
240 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
241 BNXT_TF_DBG(ERR, "Invalid device id\n");
245 dparms = bnxt_ulp_device_params_get(dev_id);
247 BNXT_TF_DBG(ERR, "could not fetch the device params\n");
251 if (dparms->flow_mem_type != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
252 BNXT_TF_DBG(INFO, "Table Scope alloc is not required\n");
256 bnxt_init_tbl_scope_parms(bp, ¶ms);
258 rc = tf_alloc_tbl_scope(&bp->tfp, ¶ms);
260 BNXT_TF_DBG(ERR, "Unable to allocate eem table scope rc = %d\n",
265 rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id);
267 BNXT_TF_DBG(ERR, "Unable to set table scope id\n");
274 /* Free Extended Exact Match host memory */
276 ulp_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
278 struct tf_free_tbl_scope_parms params = {0};
281 struct bnxt_ulp_device_params *dparms;
284 if (!ulp_ctx || !ulp_ctx->cfg_data)
287 tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx);
289 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
293 /* Get the dev specific number of flows that needed to be supported. */
294 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
295 BNXT_TF_DBG(ERR, "Invalid device id\n");
299 dparms = bnxt_ulp_device_params_get(dev_id);
301 BNXT_TF_DBG(ERR, "could not fetch the device params\n");
305 if (dparms->flow_mem_type != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
306 BNXT_TF_DBG(INFO, "Table Scope free is not required\n");
310 rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, ¶ms.tbl_scope_id);
312 BNXT_TF_DBG(ERR, "Failed to get the table scope id\n");
316 rc = tf_free_tbl_scope(tfp, ¶ms);
318 BNXT_TF_DBG(ERR, "Unable to free table scope\n");
324 /* The function to free and deinit the ulp context data. */
326 ulp_ctx_deinit(struct bnxt *bp,
327 struct bnxt_ulp_session_state *session)
329 /* close the tf session */
330 ulp_ctx_session_close(bp, session);
332 /* Free the contents */
333 if (session->cfg_data) {
334 rte_free(session->cfg_data);
335 bp->ulp_ctx->cfg_data = NULL;
336 session->cfg_data = NULL;
341 /* The function to allocate and initialize the ulp context data. */
343 ulp_ctx_init(struct bnxt *bp,
344 struct bnxt_ulp_session_state *session)
346 struct bnxt_ulp_data *ulp_data;
349 /* Allocate memory to hold ulp context data. */
350 ulp_data = rte_zmalloc("bnxt_ulp_data",
351 sizeof(struct bnxt_ulp_data), 0);
353 BNXT_TF_DBG(ERR, "Failed to allocate memory for ulp data\n");
357 /* Increment the ulp context data reference count usage. */
358 bp->ulp_ctx->cfg_data = ulp_data;
359 session->cfg_data = ulp_data;
361 ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED;
363 /* Open the ulp session. */
364 rc = ulp_ctx_session_open(bp, session);
366 session->session_opened = 1;
367 (void)ulp_ctx_deinit(bp, session);
371 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
375 /* The function to initialize ulp dparms with devargs */
377 ulp_dparms_init(struct bnxt *bp,
378 struct bnxt_ulp_context *ulp_ctx)
380 struct bnxt_ulp_device_params *dparms;
383 if (!bp->max_num_kflows)
386 if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) {
387 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
391 dparms = bnxt_ulp_device_params_get(dev_id);
393 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
397 /* num_flows = max_num_kflows * 1024 */
398 dparms->flow_db_num_entries = bp->max_num_kflows * 1024;
399 /* GFID = 2 * num_flows */
400 dparms->mark_db_gfid_entries = dparms->flow_db_num_entries * 2;
401 BNXT_TF_DBG(DEBUG, "Set the number of flows = %"PRIu64"\n",
402 dparms->flow_db_num_entries);
407 /* The function to initialize bp flags with truflow features */
409 ulp_dparms_dev_port_intf_update(struct bnxt *bp,
410 struct bnxt_ulp_context *ulp_ctx)
412 struct bnxt_ulp_device_params *dparms;
415 if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) {
416 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
420 dparms = bnxt_ulp_device_params_get(dev_id);
422 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
426 /* Update the bp flag with gfid flag */
427 if (dparms->flow_mem_type == BNXT_ULP_FLOW_MEM_TYPE_EXT)
428 bp->flags |= BNXT_FLAG_GFID_ENABLE;
434 ulp_ctx_attach(struct bnxt *bp,
435 struct bnxt_ulp_session_state *session)
439 /* Increment the ulp context data reference count usage. */
440 bp->ulp_ctx->cfg_data = session->cfg_data;
441 bp->ulp_ctx->cfg_data->ref_cnt++;
443 /* update the session details in bnxt tfp */
444 bp->tfp.session = session->g_tfp->session;
446 /* Create a TF Client */
447 rc = ulp_ctx_session_open(bp, session);
449 PMD_DRV_LOG(ERR, "Failed to open ctxt session, rc:%d\n", rc);
450 bp->tfp.session = NULL;
454 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
459 ulp_ctx_detach(struct bnxt *bp)
461 if (bp->tfp.session) {
462 tf_close_session(&bp->tfp);
463 bp->tfp.session = NULL;
468 * Initialize the state of an ULP session.
469 * If the state of an ULP session is not initialized, set it's state to
470 * initialized. If the state is already initialized, do nothing.
473 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
475 pthread_mutex_lock(&session->bnxt_ulp_mutex);
477 if (!session->bnxt_ulp_init) {
478 session->bnxt_ulp_init = true;
484 pthread_mutex_unlock(&session->bnxt_ulp_mutex);
488 * Check if an ULP session is already allocated for a specific PCI
489 * domain & bus. If it is already allocated simply return the session
490 * pointer, otherwise allocate a new session.
492 static struct bnxt_ulp_session_state *
493 ulp_get_session(struct rte_pci_addr *pci_addr)
495 struct bnxt_ulp_session_state *session;
497 STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) {
498 if (session->pci_info.domain == pci_addr->domain &&
499 session->pci_info.bus == pci_addr->bus) {
507 * Allocate and Initialize an ULP session and set it's state to INITIALIZED.
508 * If it's already initialized simply return the already existing session.
510 static struct bnxt_ulp_session_state *
511 ulp_session_init(struct bnxt *bp,
514 struct rte_pci_device *pci_dev;
515 struct rte_pci_addr *pci_addr;
516 struct bnxt_ulp_session_state *session;
522 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
523 pci_addr = &pci_dev->addr;
525 pthread_mutex_lock(&bnxt_ulp_global_mutex);
527 session = ulp_get_session(pci_addr);
529 /* Not Found the session Allocate a new one */
530 session = rte_zmalloc("bnxt_ulp_session",
531 sizeof(struct bnxt_ulp_session_state),
535 "Allocation failed for bnxt_ulp_session\n");
536 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
540 /* Add it to the queue */
541 session->pci_info.domain = pci_addr->domain;
542 session->pci_info.bus = pci_addr->bus;
543 rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
545 BNXT_TF_DBG(ERR, "mutex create failed\n");
546 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
549 STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
553 ulp_context_initialized(session, init);
554 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
559 * When a device is closed, remove it's associated session from the global
563 ulp_session_deinit(struct bnxt_ulp_session_state *session)
568 if (!session->cfg_data) {
569 pthread_mutex_lock(&bnxt_ulp_global_mutex);
570 STAILQ_REMOVE(&bnxt_ulp_session_list, session,
571 bnxt_ulp_session_state, next);
572 pthread_mutex_destroy(&session->bnxt_ulp_mutex);
574 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
579 * Internal api to enable NAT feature.
580 * Set set_flag to 1 to set the value or zero to reset the value.
581 * returns 0 on success.
584 bnxt_ulp_global_cfg_update(struct bnxt *bp,
586 enum tf_global_config_type type,
591 uint32_t global_cfg = 0;
593 struct tf_global_cfg_parms parms;
595 /* Initialize the params */
598 parms.offset = offset,
599 parms.config = (uint8_t *)&global_cfg,
600 parms.config_sz_in_bytes = sizeof(global_cfg);
602 rc = tf_get_global_cfg(&bp->tfp, &parms);
604 BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n",
612 global_cfg &= ~value;
614 /* SET the register RE_CFA_REG_ACT_TECT */
615 rc = tf_set_global_cfg(&bp->tfp, &parms);
617 BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n",
624 /* Internal function to delete all the flows belonging to the given port */
626 bnxt_ulp_flush_port_flows(struct bnxt *bp)
630 /* it is assumed that port is either TVF or PF */
631 if (ulp_port_db_port_func_id_get(bp->ulp_ctx,
632 bp->eth_dev->data->port_id,
634 BNXT_TF_DBG(ERR, "Invalid argument\n");
637 (void)ulp_flow_db_function_flow_flush(bp->ulp_ctx, func_id);
640 /* Internal function to delete the VFR default flows */
642 bnxt_ulp_destroy_vfr_default_rules(struct bnxt *bp, bool global)
644 struct bnxt_ulp_vfr_rule_info *info;
646 struct rte_eth_dev *vfr_eth_dev;
647 struct bnxt_vf_representor *vfr_bp;
649 if (!BNXT_TRUFLOW_EN(bp) || BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
652 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
655 /* Delete default rules for all ports */
656 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
657 info = &bp->ulp_ctx->cfg_data->vfr_rule_info[port_id];
661 if (!global && info->parent_port_id !=
662 bp->eth_dev->data->port_id)
665 /* Destroy the flows */
666 ulp_default_flow_destroy(bp->eth_dev, info->rep2vf_flow_id);
667 ulp_default_flow_destroy(bp->eth_dev, info->vf2rep_flow_id);
668 /* Clean up the tx action pointer */
669 vfr_eth_dev = &rte_eth_devices[port_id];
671 vfr_bp = vfr_eth_dev->data->dev_private;
672 vfr_bp->vfr_tx_cfa_action = 0;
674 memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info));
679 * When a port is deinit'ed by dpdk. This function is called
680 * and this function clears the ULP context and rest of the
681 * infrastructure associated with it.
684 bnxt_ulp_deinit(struct bnxt *bp,
685 struct bnxt_ulp_session_state *session)
687 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
690 /* clean up default flows */
691 bnxt_ulp_destroy_df_rules(bp, true);
693 /* clean up default VFR flows */
694 bnxt_ulp_destroy_vfr_default_rules(bp, true);
696 /* clean up regular flows */
697 ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_REGULAR_FLOW_TABLE);
699 /* cleanup the eem table scope */
700 ulp_eem_tbl_scope_deinit(bp, bp->ulp_ctx);
702 /* cleanup the flow database */
703 ulp_flow_db_deinit(bp->ulp_ctx);
705 /* Delete the Mark database */
706 ulp_mark_db_deinit(bp->ulp_ctx);
708 /* cleanup the ulp mapper */
709 ulp_mapper_deinit(bp->ulp_ctx);
711 /* Delete the Flow Counter Manager */
712 ulp_fc_mgr_deinit(bp->ulp_ctx);
714 /* Delete the Port database */
715 ulp_port_db_deinit(bp->ulp_ctx);
717 /* Disable NAT feature */
718 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
720 (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
721 BNXT_ULP_NAT_INNER_L2_HEADER_DMAC),
724 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
726 (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
727 BNXT_ULP_NAT_INNER_L2_HEADER_DMAC),
730 /* Delete the ulp context and tf session and free the ulp context */
731 ulp_ctx_deinit(bp, session);
732 BNXT_TF_DBG(DEBUG, "ulp ctx has been deinitialized\n");
736 * When a port is initialized by dpdk. This functions is called
737 * and this function initializes the ULP context and rest of the
738 * infrastructure associated with it.
741 bnxt_ulp_init(struct bnxt *bp,
742 struct bnxt_ulp_session_state *session)
746 /* Allocate and Initialize the ulp context. */
747 rc = ulp_ctx_init(bp, session);
749 BNXT_TF_DBG(ERR, "Failed to create the ulp context\n");
753 /* Initialize ulp dparms with values devargs passed */
754 rc = ulp_dparms_init(bp, bp->ulp_ctx);
756 BNXT_TF_DBG(ERR, "Failed to initialize the dparms\n");
760 /* create the port database */
761 rc = ulp_port_db_init(bp->ulp_ctx, bp->port_cnt);
763 BNXT_TF_DBG(ERR, "Failed to create the port database\n");
767 /* Create the Mark database. */
768 rc = ulp_mark_db_init(bp->ulp_ctx);
770 BNXT_TF_DBG(ERR, "Failed to create the mark database\n");
774 /* Create the flow database. */
775 rc = ulp_flow_db_init(bp->ulp_ctx);
777 BNXT_TF_DBG(ERR, "Failed to create the flow database\n");
781 /* Create the eem table scope. */
782 rc = ulp_eem_tbl_scope_init(bp);
784 BNXT_TF_DBG(ERR, "Failed to create the eem scope table\n");
788 rc = ulp_mapper_init(bp->ulp_ctx);
790 BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n");
794 rc = ulp_fc_mgr_init(bp->ulp_ctx);
796 BNXT_TF_DBG(ERR, "Failed to initialize ulp flow counter mgr\n");
801 * Enable NAT feature. Set the global configuration register
802 * Tunnel encap to enable NAT with the reuse of existing inner
803 * L2 header smac and dmac
805 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
807 (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
808 BNXT_ULP_NAT_INNER_L2_HEADER_DMAC), 1);
810 BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n");
814 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
816 (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
817 BNXT_ULP_NAT_INNER_L2_HEADER_DMAC), 1);
819 BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
822 BNXT_TF_DBG(DEBUG, "ulp ctx has been initialized\n");
826 bnxt_ulp_deinit(bp, session);
831 * When a port is initialized by dpdk. This functions sets up
832 * the port specific details.
835 bnxt_ulp_port_init(struct bnxt *bp)
837 struct bnxt_ulp_session_state *session;
841 if (!bp || !BNXT_TRUFLOW_EN(bp))
844 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
846 "Skip ulp init for port: %d, not a TVF or PF\n",
847 bp->eth_dev->data->port_id);
852 BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n");
856 bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx",
857 sizeof(struct bnxt_ulp_context), 0);
859 BNXT_TF_DBG(ERR, "Failed to allocate ulp ctx\n");
864 * Multiple uplink ports can be associated with a single vswitch.
865 * Make sure only the port that is started first will initialize
868 session = ulp_session_init(bp, &initialized);
870 BNXT_TF_DBG(ERR, "Failed to initialize the tf session\n");
877 * If ULP is already initialized for a specific domain then
878 * simply assign the ulp context to this rte_eth_dev.
880 rc = ulp_ctx_attach(bp, session);
882 BNXT_TF_DBG(ERR, "Failed to attach the ulp context\n");
886 rc = bnxt_ulp_init(bp, session);
888 BNXT_TF_DBG(ERR, "Failed to initialize the ulp init\n");
893 /* Update bnxt driver flags */
894 rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx);
896 BNXT_TF_DBG(ERR, "Failed to update driver flags\n");
900 /* update the port database for the given interface */
901 rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp->eth_dev);
903 BNXT_TF_DBG(ERR, "Failed to update port database\n");
906 /* create the default rules */
907 bnxt_ulp_create_df_rules(bp);
908 BNXT_TF_DBG(DEBUG, "ULP Port:%d created and initialized\n",
909 bp->eth_dev->data->port_id);
913 bnxt_ulp_port_deinit(bp);
918 * When a port is de-initialized by dpdk. This functions clears up
919 * the port specific details.
922 bnxt_ulp_port_deinit(struct bnxt *bp)
924 struct bnxt_ulp_session_state *session;
925 struct rte_pci_device *pci_dev;
926 struct rte_pci_addr *pci_addr;
928 if (!BNXT_TRUFLOW_EN(bp))
931 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
933 "Skip ULP deinit port:%d, not a TVF or PF\n",
934 bp->eth_dev->data->port_id);
939 BNXT_TF_DBG(DEBUG, "ulp ctx already de-allocated\n");
943 BNXT_TF_DBG(DEBUG, "ULP Port:%d destroyed\n",
944 bp->eth_dev->data->port_id);
946 /* Get the session details */
947 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
948 pci_addr = &pci_dev->addr;
949 pthread_mutex_lock(&bnxt_ulp_global_mutex);
950 session = ulp_get_session(pci_addr);
951 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
953 /* session not found then just exit */
955 /* Free the ulp context */
956 rte_free(bp->ulp_ctx);
961 /* Check the reference count to deinit or deattach*/
962 if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) {
963 bp->ulp_ctx->cfg_data->ref_cnt--;
964 if (bp->ulp_ctx->cfg_data->ref_cnt) {
965 /* free the port details */
966 /* Free the default flow rule associated to this port */
967 bnxt_ulp_destroy_df_rules(bp, false);
968 bnxt_ulp_destroy_vfr_default_rules(bp, false);
970 /* free flows associated with this port */
971 bnxt_ulp_flush_port_flows(bp);
973 /* close the session associated with this port */
976 /* Perform ulp ctx deinit */
977 bnxt_ulp_deinit(bp, session);
981 /* clean up the session */
982 ulp_session_deinit(session);
984 /* Free the ulp context */
985 rte_free(bp->ulp_ctx);
989 /* Below are the access functions to access internal data of ulp context. */
990 /* Function to set the Mark DB into the context */
992 bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx,
993 struct bnxt_ulp_mark_tbl *mark_tbl)
995 if (!ulp_ctx || !ulp_ctx->cfg_data) {
996 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1000 ulp_ctx->cfg_data->mark_tbl = mark_tbl;
1005 /* Function to retrieve the Mark DB from the context. */
1006 struct bnxt_ulp_mark_tbl *
1007 bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx)
1009 if (!ulp_ctx || !ulp_ctx->cfg_data)
1012 return ulp_ctx->cfg_data->mark_tbl;
1015 /* Function to set the device id of the hardware. */
1017 bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx,
1020 if (ulp_ctx && ulp_ctx->cfg_data) {
1021 ulp_ctx->cfg_data->dev_id = dev_id;
1028 /* Function to get the device id of the hardware. */
1030 bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx,
1033 if (ulp_ctx && ulp_ctx->cfg_data) {
1034 *dev_id = ulp_ctx->cfg_data->dev_id;
1041 /* Function to get the table scope id of the EEM table. */
1043 bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx,
1044 uint32_t *tbl_scope_id)
1046 if (ulp_ctx && ulp_ctx->cfg_data) {
1047 *tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id;
1054 /* Function to set the table scope id of the EEM table. */
1056 bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx,
1057 uint32_t tbl_scope_id)
1059 if (ulp_ctx && ulp_ctx->cfg_data) {
1060 ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id;
1067 /* Function to set the tfp session details from the ulp context. */
1069 bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1072 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1080 /* Function to get the tfp session details from the ulp context. */
1082 bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp)
1085 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1092 * Get the device table entry based on the device id.
1094 * dev_id [in] The device id of the hardware
1096 * Returns the pointer to the device parameters.
1098 struct bnxt_ulp_device_params *
1099 bnxt_ulp_device_params_get(uint32_t dev_id)
1101 if (dev_id < BNXT_ULP_MAX_NUM_DEVICES)
1102 return &ulp_device_params[dev_id];
1106 /* Function to set the flow database to the ulp context. */
1108 bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context *ulp_ctx,
1109 struct bnxt_ulp_flow_db *flow_db)
1111 if (!ulp_ctx || !ulp_ctx->cfg_data)
1114 ulp_ctx->cfg_data->flow_db = flow_db;
1118 /* Function to get the flow database from the ulp context. */
1119 struct bnxt_ulp_flow_db *
1120 bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx)
1122 if (!ulp_ctx || !ulp_ctx->cfg_data)
1125 return ulp_ctx->cfg_data->flow_db;
1128 /* Function to get the ulp context from eth device. */
1129 struct bnxt_ulp_context *
1130 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev)
1132 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1134 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
1135 struct bnxt_vf_representor *vfr = dev->data->dev_private;
1137 bp = vfr->parent_dev->data->dev_private;
1141 BNXT_TF_DBG(ERR, "Bnxt private data is not initialized\n");
1148 bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx,
1151 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1152 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1156 ulp_ctx->cfg_data->mapper_data = mapper_data;
1161 bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx)
1163 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1164 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1168 return ulp_ctx->cfg_data->mapper_data;
1171 /* Function to set the port database to the ulp context. */
1173 bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context *ulp_ctx,
1174 struct bnxt_ulp_port_db *port_db)
1176 if (!ulp_ctx || !ulp_ctx->cfg_data)
1179 ulp_ctx->cfg_data->port_db = port_db;
1183 /* Function to get the port database from the ulp context. */
1184 struct bnxt_ulp_port_db *
1185 bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context *ulp_ctx)
1187 if (!ulp_ctx || !ulp_ctx->cfg_data)
1190 return ulp_ctx->cfg_data->port_db;
1193 /* Function to set the flow counter info into the context */
1195 bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx,
1196 struct bnxt_ulp_fc_info *ulp_fc_info)
1198 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1199 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1203 ulp_ctx->cfg_data->fc_info = ulp_fc_info;
1208 /* Function to retrieve the flow counter info from the context. */
1209 struct bnxt_ulp_fc_info *
1210 bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx)
1212 if (!ulp_ctx || !ulp_ctx->cfg_data)
1215 return ulp_ctx->cfg_data->fc_info;
1218 /* Function to get the ulp flags from the ulp context. */
1220 bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
1223 if (!ulp_ctx || !ulp_ctx->cfg_data)
1226 *flags = ulp_ctx->cfg_data->ulp_flags;
1230 /* Function to get the ulp vfr info from the ulp context. */
1231 struct bnxt_ulp_vfr_rule_info*
1232 bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(struct bnxt_ulp_context *ulp_ctx,
1235 if (!ulp_ctx || !ulp_ctx->cfg_data || port_id >= RTE_MAX_ETHPORTS)
1238 return &ulp_ctx->cfg_data->vfr_rule_info[port_id];