1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
7 #include <rte_malloc.h>
9 #include <rte_flow_driver.h>
10 #include <rte_tailq.h>
14 #include "bnxt_tf_common.h"
16 #include "tf_ext_flow_handle.h"
18 #include "ulp_template_db_enum.h"
19 #include "ulp_template_struct.h"
20 #include "ulp_mark_mgr.h"
21 #include "ulp_fc_mgr.h"
22 #include "ulp_flow_db.h"
23 #include "ulp_mapper.h"
24 #include "ulp_port_db.h"
26 /* Linked list of all TF sessions. */
27 STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
28 STAILQ_HEAD_INITIALIZER(bnxt_ulp_session_list);
30 /* Mutex to synchronize bnxt_ulp_session_list operations. */
31 static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
34 * Allow the deletion of context only for the bnxt device that
35 * created the session.
38 ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx)
40 if (!ulp_ctx || !ulp_ctx->cfg_data)
43 if (!ulp_ctx->cfg_data->ref_cnt) {
44 BNXT_TF_DBG(DEBUG, "ulp ctx shall initiate deinit\n");
52 * Initialize an ULP session.
53 * An ULP session will contain all the resources needed to support rte flow
54 * offloads. A session is initialized as part of rte_eth_device start.
55 * A single vswitch instance can have multiple uplinks which means
56 * rte_eth_device start will be called for each of these devices.
57 * ULP session manager will make sure that a single ULP session is only
58 * initialized once. Apart from this, it also initializes MARK database,
59 * EEM table & flow database. ULP session manager also manages a list of
60 * all opened ULP sessions.
63 ulp_ctx_session_open(struct bnxt *bp,
64 struct bnxt_ulp_session_state *session)
66 struct rte_eth_dev *ethdev = bp->eth_dev;
68 struct tf_open_session_parms params;
69 struct tf_session_resources *resources;
71 memset(¶ms, 0, sizeof(params));
73 rc = rte_eth_dev_get_name_by_port(ethdev->data->port_id,
74 params.ctrl_chan_name);
76 BNXT_TF_DBG(ERR, "Invalid port %d, rc = %d\n",
77 ethdev->data->port_id, rc);
81 params.shadow_copy = true;
82 params.device_type = TF_DEVICE_TYPE_WH;
83 resources = ¶ms.resources;
86 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 422;
87 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 6;
88 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
89 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
90 resources->ident_cnt[TF_DIR_RX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
93 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
94 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384;
95 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
98 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 511;
99 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 63;
102 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
104 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
106 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
107 resources->tcam_cnt[TF_DIR_RX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 88;
110 resources->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 13168;
113 resources->em_cnt[TF_DIR_RX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
116 resources->tbl_cnt[TF_DIR_RX].cnt[TF_TBL_TYPE_ACT_SP_SMAC] = 255;
120 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_HIGH] = 292;
121 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_L2_CTXT_LOW] = 148;
122 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_WC_PROF] = 192;
123 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_PROF_FUNC] = 64;
124 resources->ident_cnt[TF_DIR_TX].cnt[TF_IDENT_TYPE_EM_PROF] = 192;
127 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_FULL_ACT_RECORD] = 8192;
128 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_STATS_64] = 16384;
129 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_MODIFY_IPV4] = 1023;
132 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_64B] = 511;
133 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_16B] = 223;
134 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_ENCAP_8B] = 255;
137 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] =
139 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] =
141 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_PROF_TCAM] = 960;
142 resources->tcam_cnt[TF_DIR_TX].cnt[TF_TCAM_TBL_TYPE_WC_TCAM] = 928;
145 resources->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_EM_RECORD] = 15232;
148 resources->em_cnt[TF_DIR_TX].cnt[TF_EM_TBL_TYPE_TBL_SCOPE] = 1;
151 resources->tbl_cnt[TF_DIR_TX].cnt[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = 488;
153 rc = tf_open_session(&bp->tfp, ¶ms);
155 BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
156 params.ctrl_chan_name, rc);
159 if (!session->session_opened) {
160 session->session_opened = 1;
161 session->g_tfp = &bp->tfp;
167 * Close the ULP session.
168 * It takes the ulp context pointer.
171 ulp_ctx_session_close(struct bnxt *bp,
172 struct bnxt_ulp_session_state *session)
174 /* close the session in the hardware */
175 if (session->session_opened)
176 tf_close_session(&bp->tfp);
177 session->session_opened = 0;
178 session->g_tfp = NULL;
182 bnxt_init_tbl_scope_parms(struct bnxt *bp,
183 struct tf_alloc_tbl_scope_parms *params)
185 struct bnxt_ulp_device_params *dparms;
189 rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id);
191 /* TBD: For now, just use default. */
194 dparms = bnxt_ulp_device_params_get(dev_id);
197 * Set the flush timer for EEM entries. The value is in 100ms intervals,
200 params->hw_flow_cache_flush_timer = 100;
203 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
204 params->rx_max_action_entry_sz_in_bits =
205 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
206 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
207 params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS;
208 params->rx_tbl_if_id = BNXT_ULP_RX_TBL_IF_ID;
210 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
211 params->tx_max_action_entry_sz_in_bits =
212 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
213 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
214 params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS;
215 params->tx_tbl_if_id = BNXT_ULP_TX_TBL_IF_ID;
217 params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY;
218 params->rx_max_action_entry_sz_in_bits =
219 BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY;
220 params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM;
221 params->rx_num_flows_in_k = dparms->flow_db_num_entries / 1024;
222 params->rx_tbl_if_id = BNXT_ULP_RX_TBL_IF_ID;
224 params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY;
225 params->tx_max_action_entry_sz_in_bits =
226 BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY;
227 params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM;
228 params->tx_num_flows_in_k = dparms->flow_db_num_entries / 1024;
229 params->tx_tbl_if_id = BNXT_ULP_TX_TBL_IF_ID;
233 /* Initialize Extended Exact Match host memory. */
235 ulp_eem_tbl_scope_init(struct bnxt *bp)
237 struct tf_alloc_tbl_scope_parms params = {0};
239 struct bnxt_ulp_device_params *dparms;
242 /* Get the dev specific number of flows that needed to be supported. */
243 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
244 BNXT_TF_DBG(ERR, "Invalid device id\n");
248 dparms = bnxt_ulp_device_params_get(dev_id);
250 BNXT_TF_DBG(ERR, "could not fetch the device params\n");
254 if (dparms->flow_mem_type != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
255 BNXT_TF_DBG(INFO, "Table Scope alloc is not required\n");
259 bnxt_init_tbl_scope_parms(bp, ¶ms);
261 rc = tf_alloc_tbl_scope(&bp->tfp, ¶ms);
263 BNXT_TF_DBG(ERR, "Unable to allocate eem table scope rc = %d\n",
268 rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id);
270 BNXT_TF_DBG(ERR, "Unable to set table scope id\n");
277 /* Free Extended Exact Match host memory */
279 ulp_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx)
281 struct tf_free_tbl_scope_parms params = {0};
284 struct bnxt_ulp_device_params *dparms;
287 if (!ulp_ctx || !ulp_ctx->cfg_data)
290 tfp = bnxt_ulp_cntxt_tfp_get(ulp_ctx);
292 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
296 /* Get the dev specific number of flows that needed to be supported. */
297 if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) {
298 BNXT_TF_DBG(ERR, "Invalid device id\n");
302 dparms = bnxt_ulp_device_params_get(dev_id);
304 BNXT_TF_DBG(ERR, "could not fetch the device params\n");
308 if (dparms->flow_mem_type != BNXT_ULP_FLOW_MEM_TYPE_EXT) {
309 BNXT_TF_DBG(INFO, "Table Scope free is not required\n");
313 rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, ¶ms.tbl_scope_id);
315 BNXT_TF_DBG(ERR, "Failed to get the table scope id\n");
319 rc = tf_free_tbl_scope(tfp, ¶ms);
321 BNXT_TF_DBG(ERR, "Unable to free table scope\n");
327 /* The function to free and deinit the ulp context data. */
329 ulp_ctx_deinit(struct bnxt *bp,
330 struct bnxt_ulp_session_state *session)
332 /* close the tf session */
333 ulp_ctx_session_close(bp, session);
335 /* Free the contents */
336 if (session->cfg_data) {
337 rte_free(session->cfg_data);
338 bp->ulp_ctx->cfg_data = NULL;
339 session->cfg_data = NULL;
344 /* The function to allocate and initialize the ulp context data. */
346 ulp_ctx_init(struct bnxt *bp,
347 struct bnxt_ulp_session_state *session)
349 struct bnxt_ulp_data *ulp_data;
352 /* Allocate memory to hold ulp context data. */
353 ulp_data = rte_zmalloc("bnxt_ulp_data",
354 sizeof(struct bnxt_ulp_data), 0);
356 BNXT_TF_DBG(ERR, "Failed to allocate memory for ulp data\n");
360 /* Increment the ulp context data reference count usage. */
361 bp->ulp_ctx->cfg_data = ulp_data;
362 session->cfg_data = ulp_data;
364 ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED;
366 /* Open the ulp session. */
367 rc = ulp_ctx_session_open(bp, session);
369 session->session_opened = 1;
370 (void)ulp_ctx_deinit(bp, session);
374 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
378 /* The function to initialize ulp dparms with devargs */
380 ulp_dparms_init(struct bnxt *bp,
381 struct bnxt_ulp_context *ulp_ctx)
383 struct bnxt_ulp_device_params *dparms;
386 if (!bp->max_num_kflows)
389 if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) {
390 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
394 dparms = bnxt_ulp_device_params_get(dev_id);
396 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
400 /* num_flows = max_num_kflows * 1024 */
401 dparms->flow_db_num_entries = bp->max_num_kflows * 1024;
402 /* GFID = 2 * num_flows */
403 dparms->mark_db_gfid_entries = dparms->flow_db_num_entries * 2;
404 BNXT_TF_DBG(DEBUG, "Set the number of flows = %"PRIu64"\n",
405 dparms->flow_db_num_entries);
410 /* The function to initialize bp flags with truflow features */
412 ulp_dparms_dev_port_intf_update(struct bnxt *bp,
413 struct bnxt_ulp_context *ulp_ctx)
415 struct bnxt_ulp_device_params *dparms;
418 if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) {
419 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
423 dparms = bnxt_ulp_device_params_get(dev_id);
425 BNXT_TF_DBG(DEBUG, "Failed to get device parms\n");
429 /* Update the bp flag with gfid flag */
430 if (dparms->flow_mem_type == BNXT_ULP_FLOW_MEM_TYPE_EXT)
431 bp->flags |= BNXT_FLAG_GFID_ENABLE;
437 ulp_ctx_attach(struct bnxt *bp,
438 struct bnxt_ulp_session_state *session)
442 /* Increment the ulp context data reference count usage. */
443 bp->ulp_ctx->cfg_data = session->cfg_data;
444 bp->ulp_ctx->cfg_data->ref_cnt++;
446 /* update the session details in bnxt tfp */
447 bp->tfp.session = session->g_tfp->session;
449 /* Create a TF Client */
450 rc = ulp_ctx_session_open(bp, session);
452 PMD_DRV_LOG(ERR, "Failed to open ctxt session, rc:%d\n", rc);
453 bp->tfp.session = NULL;
457 bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);
462 ulp_ctx_detach(struct bnxt *bp)
464 if (bp->tfp.session) {
465 tf_close_session(&bp->tfp);
466 bp->tfp.session = NULL;
471 * Initialize the state of an ULP session.
472 * If the state of an ULP session is not initialized, set it's state to
473 * initialized. If the state is already initialized, do nothing.
476 ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init)
478 pthread_mutex_lock(&session->bnxt_ulp_mutex);
480 if (!session->bnxt_ulp_init) {
481 session->bnxt_ulp_init = true;
487 pthread_mutex_unlock(&session->bnxt_ulp_mutex);
491 * Check if an ULP session is already allocated for a specific PCI
492 * domain & bus. If it is already allocated simply return the session
493 * pointer, otherwise allocate a new session.
495 static struct bnxt_ulp_session_state *
496 ulp_get_session(struct rte_pci_addr *pci_addr)
498 struct bnxt_ulp_session_state *session;
500 STAILQ_FOREACH(session, &bnxt_ulp_session_list, next) {
501 if (session->pci_info.domain == pci_addr->domain &&
502 session->pci_info.bus == pci_addr->bus) {
510 * Allocate and Initialize an ULP session and set it's state to INITIALIZED.
511 * If it's already initialized simply return the already existing session.
513 static struct bnxt_ulp_session_state *
514 ulp_session_init(struct bnxt *bp,
517 struct rte_pci_device *pci_dev;
518 struct rte_pci_addr *pci_addr;
519 struct bnxt_ulp_session_state *session;
525 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
526 pci_addr = &pci_dev->addr;
528 pthread_mutex_lock(&bnxt_ulp_global_mutex);
530 session = ulp_get_session(pci_addr);
532 /* Not Found the session Allocate a new one */
533 session = rte_zmalloc("bnxt_ulp_session",
534 sizeof(struct bnxt_ulp_session_state),
538 "Allocation failed for bnxt_ulp_session\n");
539 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
543 /* Add it to the queue */
544 session->pci_info.domain = pci_addr->domain;
545 session->pci_info.bus = pci_addr->bus;
546 rc = pthread_mutex_init(&session->bnxt_ulp_mutex, NULL);
548 BNXT_TF_DBG(ERR, "mutex create failed\n");
549 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
552 STAILQ_INSERT_TAIL(&bnxt_ulp_session_list,
556 ulp_context_initialized(session, init);
557 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
562 * When a device is closed, remove it's associated session from the global
566 ulp_session_deinit(struct bnxt_ulp_session_state *session)
571 if (!session->cfg_data) {
572 pthread_mutex_lock(&bnxt_ulp_global_mutex);
573 STAILQ_REMOVE(&bnxt_ulp_session_list, session,
574 bnxt_ulp_session_state, next);
575 pthread_mutex_destroy(&session->bnxt_ulp_mutex);
577 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
582 * Internal api to enable NAT feature.
583 * Set set_flag to 1 to set the value or zero to reset the value.
584 * returns 0 on success.
587 bnxt_ulp_global_cfg_update(struct bnxt *bp,
589 enum tf_global_config_type type,
594 uint32_t global_cfg = 0;
596 struct tf_global_cfg_parms parms;
598 /* Initialize the params */
601 parms.offset = offset,
602 parms.config = (uint8_t *)&global_cfg,
603 parms.config_sz_in_bytes = sizeof(global_cfg);
605 rc = tf_get_global_cfg(&bp->tfp, &parms);
607 BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n",
615 global_cfg &= ~value;
617 /* SET the register RE_CFA_REG_ACT_TECT */
618 rc = tf_set_global_cfg(&bp->tfp, &parms);
620 BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n",
627 /* Internal function to delete all the flows belonging to the given port */
629 bnxt_ulp_flush_port_flows(struct bnxt *bp)
633 /* it is assumed that port is either TVF or PF */
634 if (ulp_port_db_port_func_id_get(bp->ulp_ctx,
635 bp->eth_dev->data->port_id,
637 BNXT_TF_DBG(ERR, "Invalid argument\n");
640 (void)ulp_flow_db_function_flow_flush(bp->ulp_ctx, func_id);
643 /* Internal function to delete the VFR default flows */
645 bnxt_ulp_destroy_vfr_default_rules(struct bnxt *bp, bool global)
647 struct bnxt_ulp_vfr_rule_info *info;
649 struct rte_eth_dev *vfr_eth_dev;
650 struct bnxt_vf_representor *vfr_bp;
652 if (!BNXT_TRUFLOW_EN(bp) || BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
655 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
658 /* Delete default rules for all ports */
659 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
660 info = &bp->ulp_ctx->cfg_data->vfr_rule_info[port_id];
664 if (!global && info->parent_port_id !=
665 bp->eth_dev->data->port_id)
668 /* Destroy the flows */
669 ulp_default_flow_destroy(bp->eth_dev, info->rep2vf_flow_id);
670 ulp_default_flow_destroy(bp->eth_dev, info->vf2rep_flow_id);
671 /* Clean up the tx action pointer */
672 vfr_eth_dev = &rte_eth_devices[port_id];
674 vfr_bp = vfr_eth_dev->data->dev_private;
675 vfr_bp->vfr_tx_cfa_action = 0;
677 memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info));
682 * When a port is deinit'ed by dpdk. This function is called
683 * and this function clears the ULP context and rest of the
684 * infrastructure associated with it.
687 bnxt_ulp_deinit(struct bnxt *bp,
688 struct bnxt_ulp_session_state *session)
690 if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
693 /* clean up default flows */
694 bnxt_ulp_destroy_df_rules(bp, true);
696 /* clean up default VFR flows */
697 bnxt_ulp_destroy_vfr_default_rules(bp, true);
699 /* clean up regular flows */
700 ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_REGULAR_FLOW_TABLE);
702 /* cleanup the eem table scope */
703 ulp_eem_tbl_scope_deinit(bp, bp->ulp_ctx);
705 /* cleanup the flow database */
706 ulp_flow_db_deinit(bp->ulp_ctx);
708 /* Delete the Mark database */
709 ulp_mark_db_deinit(bp->ulp_ctx);
711 /* cleanup the ulp mapper */
712 ulp_mapper_deinit(bp->ulp_ctx);
714 /* Delete the Flow Counter Manager */
715 ulp_fc_mgr_deinit(bp->ulp_ctx);
717 /* Delete the Port database */
718 ulp_port_db_deinit(bp->ulp_ctx);
720 /* Disable NAT feature */
721 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
723 (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
724 BNXT_ULP_NAT_INNER_L2_HEADER_DMAC),
727 (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
729 (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
730 BNXT_ULP_NAT_INNER_L2_HEADER_DMAC),
733 /* Delete the ulp context and tf session and free the ulp context */
734 ulp_ctx_deinit(bp, session);
735 BNXT_TF_DBG(DEBUG, "ulp ctx has been deinitialized\n");
739 * When a port is initialized by dpdk. This functions is called
740 * and this function initializes the ULP context and rest of the
741 * infrastructure associated with it.
744 bnxt_ulp_init(struct bnxt *bp,
745 struct bnxt_ulp_session_state *session)
749 /* Allocate and Initialize the ulp context. */
750 rc = ulp_ctx_init(bp, session);
752 BNXT_TF_DBG(ERR, "Failed to create the ulp context\n");
756 /* Initialize ulp dparms with values devargs passed */
757 rc = ulp_dparms_init(bp, bp->ulp_ctx);
759 BNXT_TF_DBG(ERR, "Failed to initialize the dparms\n");
763 /* create the port database */
764 rc = ulp_port_db_init(bp->ulp_ctx, bp->port_cnt);
766 BNXT_TF_DBG(ERR, "Failed to create the port database\n");
770 /* Create the Mark database. */
771 rc = ulp_mark_db_init(bp->ulp_ctx);
773 BNXT_TF_DBG(ERR, "Failed to create the mark database\n");
777 /* Create the flow database. */
778 rc = ulp_flow_db_init(bp->ulp_ctx);
780 BNXT_TF_DBG(ERR, "Failed to create the flow database\n");
784 /* Create the eem table scope. */
785 rc = ulp_eem_tbl_scope_init(bp);
787 BNXT_TF_DBG(ERR, "Failed to create the eem scope table\n");
791 rc = ulp_mapper_init(bp->ulp_ctx);
793 BNXT_TF_DBG(ERR, "Failed to initialize ulp mapper\n");
797 rc = ulp_fc_mgr_init(bp->ulp_ctx);
799 BNXT_TF_DBG(ERR, "Failed to initialize ulp flow counter mgr\n");
804 * Enable NAT feature. Set the global configuration register
805 * Tunnel encap to enable NAT with the reuse of existing inner
806 * L2 header smac and dmac
808 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
810 (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
811 BNXT_ULP_NAT_INNER_L2_HEADER_DMAC), 1);
813 BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n");
817 rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
819 (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
820 BNXT_ULP_NAT_INNER_L2_HEADER_DMAC), 1);
822 BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
825 BNXT_TF_DBG(DEBUG, "ulp ctx has been initialized\n");
829 bnxt_ulp_deinit(bp, session);
834 * When a port is initialized by dpdk. This functions sets up
835 * the port specific details.
838 bnxt_ulp_port_init(struct bnxt *bp)
840 struct bnxt_ulp_session_state *session;
844 if (!bp || !BNXT_TRUFLOW_EN(bp))
847 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
849 "Skip ulp init for port: %d, not a TVF or PF\n",
850 bp->eth_dev->data->port_id);
855 BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n");
859 bp->ulp_ctx = rte_zmalloc("bnxt_ulp_ctx",
860 sizeof(struct bnxt_ulp_context), 0);
862 BNXT_TF_DBG(ERR, "Failed to allocate ulp ctx\n");
867 * Multiple uplink ports can be associated with a single vswitch.
868 * Make sure only the port that is started first will initialize
871 session = ulp_session_init(bp, &initialized);
873 BNXT_TF_DBG(ERR, "Failed to initialize the tf session\n");
880 * If ULP is already initialized for a specific domain then
881 * simply assign the ulp context to this rte_eth_dev.
883 rc = ulp_ctx_attach(bp, session);
885 BNXT_TF_DBG(ERR, "Failed to attach the ulp context\n");
889 rc = bnxt_ulp_init(bp, session);
891 BNXT_TF_DBG(ERR, "Failed to initialize the ulp init\n");
896 /* Update bnxt driver flags */
897 rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx);
899 BNXT_TF_DBG(ERR, "Failed to update driver flags\n");
903 /* update the port database for the given interface */
904 rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp->eth_dev);
906 BNXT_TF_DBG(ERR, "Failed to update port database\n");
909 /* create the default rules */
910 bnxt_ulp_create_df_rules(bp);
911 BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init\n",
912 bp->eth_dev->data->port_id);
916 bnxt_ulp_port_deinit(bp);
921 * When a port is de-initialized by dpdk. This functions clears up
922 * the port specific details.
925 bnxt_ulp_port_deinit(struct bnxt *bp)
927 struct bnxt_ulp_session_state *session;
928 struct rte_pci_device *pci_dev;
929 struct rte_pci_addr *pci_addr;
931 if (!BNXT_TRUFLOW_EN(bp))
934 if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
936 "Skip ULP deinit port:%d, not a TVF or PF\n",
937 bp->eth_dev->data->port_id);
942 BNXT_TF_DBG(DEBUG, "ulp ctx already de-allocated\n");
946 BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port deinit\n",
947 bp->eth_dev->data->port_id);
949 /* Get the session details */
950 pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
951 pci_addr = &pci_dev->addr;
952 pthread_mutex_lock(&bnxt_ulp_global_mutex);
953 session = ulp_get_session(pci_addr);
954 pthread_mutex_unlock(&bnxt_ulp_global_mutex);
956 /* session not found then just exit */
958 /* Free the ulp context */
959 rte_free(bp->ulp_ctx);
964 /* Check the reference count to deinit or deattach*/
965 if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) {
966 bp->ulp_ctx->cfg_data->ref_cnt--;
967 if (bp->ulp_ctx->cfg_data->ref_cnt) {
968 /* free the port details */
969 /* Free the default flow rule associated to this port */
970 bnxt_ulp_destroy_df_rules(bp, false);
971 bnxt_ulp_destroy_vfr_default_rules(bp, false);
973 /* free flows associated with this port */
974 bnxt_ulp_flush_port_flows(bp);
976 /* close the session associated with this port */
979 /* Perform ulp ctx deinit */
980 bnxt_ulp_deinit(bp, session);
984 /* clean up the session */
985 ulp_session_deinit(session);
987 /* Free the ulp context */
988 rte_free(bp->ulp_ctx);
992 /* Below are the access functions to access internal data of ulp context. */
993 /* Function to set the Mark DB into the context */
995 bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx,
996 struct bnxt_ulp_mark_tbl *mark_tbl)
998 if (!ulp_ctx || !ulp_ctx->cfg_data) {
999 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1003 ulp_ctx->cfg_data->mark_tbl = mark_tbl;
1008 /* Function to retrieve the Mark DB from the context. */
1009 struct bnxt_ulp_mark_tbl *
1010 bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx)
1012 if (!ulp_ctx || !ulp_ctx->cfg_data)
1015 return ulp_ctx->cfg_data->mark_tbl;
1018 /* Function to set the device id of the hardware. */
1020 bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx,
1023 if (ulp_ctx && ulp_ctx->cfg_data) {
1024 ulp_ctx->cfg_data->dev_id = dev_id;
1031 /* Function to get the device id of the hardware. */
1033 bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx,
1036 if (ulp_ctx && ulp_ctx->cfg_data) {
1037 *dev_id = ulp_ctx->cfg_data->dev_id;
1044 /* Function to get the table scope id of the EEM table. */
1046 bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx,
1047 uint32_t *tbl_scope_id)
1049 if (ulp_ctx && ulp_ctx->cfg_data) {
1050 *tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id;
1057 /* Function to set the table scope id of the EEM table. */
1059 bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx,
1060 uint32_t tbl_scope_id)
1062 if (ulp_ctx && ulp_ctx->cfg_data) {
1063 ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id;
1070 /* Function to set the tfp session details from the ulp context. */
1072 bnxt_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, struct tf *tfp)
1075 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1083 /* Function to get the tfp session details from the ulp context. */
1085 bnxt_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp)
1088 BNXT_TF_DBG(ERR, "Invalid arguments\n");
1095 * Get the device table entry based on the device id.
1097 * dev_id [in] The device id of the hardware
1099 * Returns the pointer to the device parameters.
1101 struct bnxt_ulp_device_params *
1102 bnxt_ulp_device_params_get(uint32_t dev_id)
1104 if (dev_id < BNXT_ULP_MAX_NUM_DEVICES)
1105 return &ulp_device_params[dev_id];
1109 /* Function to set the flow database to the ulp context. */
1111 bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context *ulp_ctx,
1112 struct bnxt_ulp_flow_db *flow_db)
1114 if (!ulp_ctx || !ulp_ctx->cfg_data)
1117 ulp_ctx->cfg_data->flow_db = flow_db;
1121 /* Function to get the flow database from the ulp context. */
1122 struct bnxt_ulp_flow_db *
1123 bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx)
1125 if (!ulp_ctx || !ulp_ctx->cfg_data)
1128 return ulp_ctx->cfg_data->flow_db;
1131 /* Function to get the ulp context from eth device. */
1132 struct bnxt_ulp_context *
1133 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev)
1135 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1137 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
1138 struct bnxt_vf_representor *vfr = dev->data->dev_private;
1140 bp = vfr->parent_dev->data->dev_private;
1144 BNXT_TF_DBG(ERR, "Bnxt private data is not initialized\n");
1151 bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx,
1154 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1155 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1159 ulp_ctx->cfg_data->mapper_data = mapper_data;
1164 bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx)
1166 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1167 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1171 return ulp_ctx->cfg_data->mapper_data;
1174 /* Function to set the port database to the ulp context. */
1176 bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context *ulp_ctx,
1177 struct bnxt_ulp_port_db *port_db)
1179 if (!ulp_ctx || !ulp_ctx->cfg_data)
1182 ulp_ctx->cfg_data->port_db = port_db;
1186 /* Function to get the port database from the ulp context. */
1187 struct bnxt_ulp_port_db *
1188 bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context *ulp_ctx)
1190 if (!ulp_ctx || !ulp_ctx->cfg_data)
1193 return ulp_ctx->cfg_data->port_db;
1196 /* Function to set the flow counter info into the context */
1198 bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx,
1199 struct bnxt_ulp_fc_info *ulp_fc_info)
1201 if (!ulp_ctx || !ulp_ctx->cfg_data) {
1202 BNXT_TF_DBG(ERR, "Invalid ulp context data\n");
1206 ulp_ctx->cfg_data->fc_info = ulp_fc_info;
1211 /* Function to retrieve the flow counter info from the context. */
1212 struct bnxt_ulp_fc_info *
1213 bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx)
1215 if (!ulp_ctx || !ulp_ctx->cfg_data)
1218 return ulp_ctx->cfg_data->fc_info;
1221 /* Function to get the ulp flags from the ulp context. */
1223 bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
1226 if (!ulp_ctx || !ulp_ctx->cfg_data)
1229 *flags = ulp_ctx->cfg_data->ulp_flags;
1233 /* Function to get the ulp vfr info from the ulp context. */
1234 struct bnxt_ulp_vfr_rule_info*
1235 bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(struct bnxt_ulp_context *ulp_ctx,
1238 if (!ulp_ctx || !ulp_ctx->cfg_data || port_id >= RTE_MAX_ETHPORTS)
1241 return &ulp_ctx->cfg_data->vfr_rule_info[port_id];