1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
10 #include <rte_alarm.h>
13 #include "bnxt_tf_common.h"
14 #include "ulp_fc_mgr.h"
15 #include "ulp_flow_db.h"
16 #include "ulp_template_db_enum.h"
17 #include "ulp_template_struct.h"
21 ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info *parms, int size)
27 parms->mem_va = rte_zmalloc("ulp_fc_info",
28 RTE_CACHE_LINE_ROUNDUP(size),
31 BNXT_TF_DBG(ERR, "Allocate failed mem_va\n");
35 rte_mem_lock_page(parms->mem_va);
37 parms->mem_pa = (void *)(uintptr_t)rte_mem_virt2phy(parms->mem_va);
38 if (parms->mem_pa == (void *)RTE_BAD_IOVA) {
39 BNXT_TF_DBG(ERR, "Allocate failed mem_pa\n");
47 ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info *parms)
49 rte_free(parms->mem_va);
53 * Allocate and Initialize all Flow Counter Manager resources for this ulp
56 * ctxt [in] The ulp context for the Flow Counter manager.
60 ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
62 struct bnxt_ulp_device_params *dparms;
63 uint32_t dev_id, sw_acc_cntr_tbl_sz, hw_fc_mem_info_sz;
64 struct bnxt_ulp_fc_info *ulp_fc_info;
68 BNXT_TF_DBG(DEBUG, "Invalid ULP CTXT\n");
72 if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
73 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
77 dparms = bnxt_ulp_device_params_get(dev_id);
79 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
83 ulp_fc_info = rte_zmalloc("ulp_fc_info", sizeof(*ulp_fc_info), 0);
87 rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
89 PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n");
93 /* Add the FC info tbl to the ulp context. */
94 bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info);
96 ulp_fc_info->num_counters = dparms->flow_count_db_entries;
97 if (!ulp_fc_info->num_counters) {
98 /* No need for software counters, call fw directly */
99 BNXT_TF_DBG(DEBUG, "Sw flow counter support not enabled\n");
103 sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) *
104 dparms->flow_count_db_entries;
106 for (i = 0; i < TF_DIR_MAX; i++) {
107 ulp_fc_info->sw_acc_tbl[i] = rte_zmalloc("ulp_sw_acc_cntr_tbl",
108 sw_acc_cntr_tbl_sz, 0);
109 if (!ulp_fc_info->sw_acc_tbl[i])
113 hw_fc_mem_info_sz = sizeof(uint64_t) * dparms->flow_count_db_entries;
115 for (i = 0; i < TF_DIR_MAX; i++) {
116 rc = ulp_fc_mgr_shadow_mem_alloc(&ulp_fc_info->shadow_hw_tbl[i],
125 ulp_fc_mgr_deinit(ctxt);
127 "Failed to allocate memory for fc mgr\n");
133 * Release all resources in the Flow Counter Manager for this ulp context
135 * ctxt [in] The ulp context for the Flow Counter manager
139 ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
141 struct bnxt_ulp_fc_info *ulp_fc_info;
142 struct hw_fc_mem_info *shd_info;
145 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
150 ulp_fc_mgr_thread_cancel(ctxt);
152 pthread_mutex_destroy(&ulp_fc_info->fc_lock);
154 if (ulp_fc_info->num_counters) {
155 for (i = 0; i < TF_DIR_MAX; i++)
156 rte_free(ulp_fc_info->sw_acc_tbl[i]);
158 for (i = 0; i < TF_DIR_MAX; i++) {
159 shd_info = &ulp_fc_info->shadow_hw_tbl[i];
160 ulp_fc_mgr_shadow_mem_free(shd_info);
164 rte_free(ulp_fc_info);
166 /* Safe to ignore on deinit */
167 (void)bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
173 * Check if the alarm thread that walks through the flows is started
175 * ctxt [in] The ulp context for the flow counter manager
178 bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
180 struct bnxt_ulp_fc_info *ulp_fc_info;
182 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
185 return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD);
191 * Setup the Flow counter timer thread that will fetch/accumulate raw counter
192 * data from the chip's internal flow counters
194 * ctxt [in] The ulp context for the flow counter manager
198 ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
200 struct bnxt_ulp_fc_info *ulp_fc_info;
202 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
204 if (ulp_fc_info && !(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
205 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
206 ulp_fc_mgr_alarm_cb, NULL);
207 ulp_fc_info->flags |= ULP_FLAG_FC_THREAD;
214 * Cancel the alarm handler
216 * ctxt [in] The ulp context for the flow counter manager
219 void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
221 struct bnxt_ulp_fc_info *ulp_fc_info;
223 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
227 ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD;
228 rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, NULL);
232 * DMA-in the raw counter data from the HW and accumulate in the
233 * local accumulator table using the TF-Core API
235 * tfp [in] The TF-Core context
237 * fc_info [in] The ULP Flow counter info ptr
239 * dir [in] The direction of the flow
241 * num_counters [in] The number of counters
244 __rte_unused static int32_t
245 ulp_bulk_get_flow_stats(struct tf *tfp,
246 struct bnxt_ulp_fc_info *fc_info,
248 struct bnxt_ulp_device_params *dparms)
249 /* MARK AS UNUSED FOR NOW TO AVOID COMPILATION ERRORS TILL API is RESOLVED */
252 struct tf_tbl_get_bulk_parms parms = { 0 };
253 enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64; /* TBD: Template? */
254 struct sw_acc_counter *sw_acc_tbl_entry = NULL;
255 uint64_t *stats = NULL;
260 parms.starting_idx = fc_info->shadow_hw_tbl[dir].start_idx;
261 parms.num_entries = dparms->flow_count_db_entries / 2; /* direction */
264 * Size of an entry needs to obtained from template
266 parms.entry_sz_in_bytes = sizeof(uint64_t);
267 stats = (uint64_t *)fc_info->shadow_hw_tbl[dir].mem_va;
268 parms.physical_mem_addr = (uint64_t)
269 ((uintptr_t)(fc_info->shadow_hw_tbl[dir].mem_pa));
273 "BULK: Memory not initialized id:0x%x dir:%d\n",
274 parms.starting_idx, dir);
278 rc = tf_tbl_bulk_get(tfp, &parms);
281 "BULK: Get failed for id:0x%x rc:%d\n",
282 parms.starting_idx, rc);
286 for (i = 0; i < parms.num_entries; i++) {
287 /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
288 sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][i];
289 if (!sw_acc_tbl_entry->valid)
291 sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats[i],
293 sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats[i],
301 ulp_fc_tf_flow_stat_get(struct bnxt_ulp_context *ctxt,
302 struct ulp_flow_db_res_params *res,
303 struct rte_flow_query_count *qcount)
306 struct bnxt_ulp_device_params *dparms;
307 struct tf_get_tbl_entry_parms parms = { 0 };
308 struct tf_set_tbl_entry_parms sparms = { 0 };
309 enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;
314 tfp = bnxt_ulp_cntxt_tfp_get(ctxt, BNXT_ULP_SHARED_SESSION_NO);
316 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
320 if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
321 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
322 bnxt_ulp_cntxt_entry_release();
326 dparms = bnxt_ulp_device_params_get(dev_id);
328 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
329 bnxt_ulp_cntxt_entry_release();
332 parms.dir = res->direction;
334 parms.idx = res->resource_hndl;
335 parms.data_sz_in_bytes = sizeof(uint64_t);
336 parms.data = (uint8_t *)&stats;
337 rc = tf_get_tbl_entry(tfp, &parms);
340 "Get failed for id:0x%x rc:%d\n",
344 qcount->hits = FLOW_CNTR_PKTS(stats, dparms);
346 qcount->hits_set = 1;
347 qcount->bytes = FLOW_CNTR_BYTES(stats, dparms);
349 qcount->bytes_set = 1;
353 sparms.dir = res->direction;
355 sparms.idx = res->resource_hndl;
356 sparms.data = (uint8_t *)&stats;
357 sparms.data_sz_in_bytes = sizeof(uint64_t);
358 rc = tf_set_tbl_entry(tfp, &sparms);
360 PMD_DRV_LOG(ERR, "Set failed for id:0x%x rc:%d\n",
368 static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
370 struct bnxt_ulp_fc_info *fc_info,
373 struct bnxt_ulp_device_params *dparms)
376 struct tf_get_tbl_entry_parms parms = { 0 };
377 enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64; /* TBD:Template? */
378 struct sw_acc_counter *sw_acc_tbl_entry = NULL, *t_sw;
380 uint32_t sw_cntr_indx = 0;
384 parms.idx = hw_cntr_id;
387 * Size of an entry needs to obtained from template
389 parms.data_sz_in_bytes = sizeof(uint64_t);
390 parms.data = (uint8_t *)&stats;
391 rc = tf_get_tbl_entry(tfp, &parms);
394 "Get failed for id:0x%x rc:%d\n",
399 /* PKT/BYTE COUNT SHIFT/MASK are device specific */
400 sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx;
401 sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx];
403 /* Some dpdk applications may accumulate the flow counters while some
404 * may not. In cases where the application is accumulating the counters
405 * the PMD need not do the accumulation itself and viceversa to report
406 * the correct flow counters.
408 sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats, dparms);
409 sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats, dparms);
411 /* Update the parent counters if it is child flow */
412 if (sw_acc_tbl_entry->pc_flow_idx & FLOW_CNTR_PC_FLOW_VALID) {
415 /* Update the parent counters */
416 t_sw = sw_acc_tbl_entry;
417 pc_idx = t_sw->pc_flow_idx & ~FLOW_CNTR_PC_FLOW_VALID;
418 if (ulp_flow_db_parent_flow_count_update(ctxt, pc_idx,
421 PMD_DRV_LOG(ERR, "Error updating parent counters\n");
429 * Alarm handler that will issue the TF-Core API to fetch
430 * data from the chip's internal flow counters
432 * ctxt [in] The ulp context for the flow counter manager
437 ulp_fc_mgr_alarm_cb(void *arg __rte_unused)
442 struct bnxt_ulp_context *ctxt;
443 struct bnxt_ulp_fc_info *ulp_fc_info;
444 struct bnxt_ulp_device_params *dparms;
446 uint32_t dev_id, hw_cntr_id = 0, num_entries = 0;
448 ctxt = bnxt_ulp_cntxt_entry_acquire();
450 BNXT_TF_DBG(INFO, "could not get the ulp context lock\n");
451 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
452 ulp_fc_mgr_alarm_cb, NULL);
456 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
458 bnxt_ulp_cntxt_entry_release();
462 if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
463 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
464 bnxt_ulp_cntxt_entry_release();
468 dparms = bnxt_ulp_device_params_get(dev_id);
470 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
471 bnxt_ulp_cntxt_entry_release();
475 tfp = bnxt_ulp_cntxt_tfp_get(ctxt, BNXT_ULP_SHARED_SESSION_NO);
477 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
478 bnxt_ulp_cntxt_entry_release();
483 * Take the fc_lock to ensure no flow is destroyed
484 * during the bulk get
486 if (pthread_mutex_trylock(&ulp_fc_info->fc_lock))
489 if (!ulp_fc_info->num_entries) {
490 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
491 ulp_fc_mgr_thread_cancel(ctxt);
492 bnxt_ulp_cntxt_entry_release();
496 * Commented for now till GET_BULK is resolved, just get the first flow
498 for (i = 0; i < TF_DIR_MAX; i++) {
499 rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i,
500 dparms->flow_count_db_entries);
506 /* reset the parent accumulation counters before accumulation if any */
507 ulp_flow_db_parent_flow_count_reset(ctxt);
509 num_entries = dparms->flow_count_db_entries / 2;
510 for (i = 0; i < TF_DIR_MAX; i++) {
511 for (j = 0; j < num_entries; j++) {
512 if (!ulp_fc_info->sw_acc_tbl[i][j].valid)
514 hw_cntr_id = ulp_fc_info->sw_acc_tbl[i][j].hw_cntr_id;
515 rc = ulp_get_single_flow_stat(ctxt, tfp, ulp_fc_info, i,
522 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
525 * If cmd fails once, no need of
526 * invoking again every second
530 ulp_fc_mgr_thread_cancel(ctxt);
531 bnxt_ulp_cntxt_entry_release();
535 bnxt_ulp_cntxt_entry_release();
536 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
537 ulp_fc_mgr_alarm_cb, NULL);
541 * Set the starting index that indicates the first HW flow
544 * ctxt [in] The ulp context for the flow counter manager
546 * dir [in] The direction of the flow
548 * start_idx [in] The HW flow counter ID
551 bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, enum tf_dir dir)
553 struct bnxt_ulp_fc_info *ulp_fc_info;
555 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
558 return ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set;
564 * Set the starting index that indicates the first HW flow
567 * ctxt [in] The ulp context for the flow counter manager
569 * dir [in] The direction of the flow
571 * start_idx [in] The HW flow counter ID
574 int32_t ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
577 struct bnxt_ulp_fc_info *ulp_fc_info;
579 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
584 if (!ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set) {
585 ulp_fc_info->shadow_hw_tbl[dir].start_idx = start_idx;
586 ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set = true;
593 * Set the corresponding SW accumulator table entry based on
594 * the difference between this counter ID and the starting
595 * counter ID. Also, keep track of num of active counter enabled
598 * ctxt [in] The ulp context for the flow counter manager
600 * dir [in] The direction of the flow
602 * hw_cntr_id [in] The HW flow counter ID
605 int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
608 struct bnxt_ulp_fc_info *ulp_fc_info;
609 uint32_t sw_cntr_idx;
611 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
615 if (!ulp_fc_info->num_counters)
618 pthread_mutex_lock(&ulp_fc_info->fc_lock);
619 sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
620 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
621 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
622 ulp_fc_info->num_entries++;
623 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
629 * Reset the corresponding SW accumulator table entry based on
630 * the difference between this counter ID and the starting
633 * ctxt [in] The ulp context for the flow counter manager
635 * dir [in] The direction of the flow
637 * hw_cntr_id [in] The HW flow counter ID
640 int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
643 struct bnxt_ulp_fc_info *ulp_fc_info;
644 uint32_t sw_cntr_idx;
646 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
650 if (!ulp_fc_info->num_counters)
653 pthread_mutex_lock(&ulp_fc_info->fc_lock);
654 sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
655 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
656 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
657 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
658 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
659 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pc_flow_idx = 0;
660 ulp_fc_info->num_entries--;
661 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
667 * Fill the rte_flow_query_count 'data' argument passed
668 * in the rte_flow_query() with the values obtained and
669 * accumulated locally.
671 * ctxt [in] The ulp context for the flow counter manager
673 * flow_id [in] The HW flow ID
675 * count [out] The rte_flow_query_count 'data' that is set
678 int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
680 struct rte_flow_query_count *count)
683 uint32_t nxt_resource_index = 0;
684 struct bnxt_ulp_fc_info *ulp_fc_info;
685 struct ulp_flow_db_res_params params;
687 uint32_t hw_cntr_id = 0, sw_cntr_idx = 0;
688 struct sw_acc_counter *sw_acc_tbl_entry;
689 bool found_cntr_resource = false;
690 bool found_parent_flow = false;
693 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
697 if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt))
701 rc = ulp_flow_db_resource_get(ctxt,
702 BNXT_ULP_FDB_TYPE_REGULAR,
706 if (params.resource_func ==
707 BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE &&
708 (params.resource_sub_type ==
709 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT ||
710 params.resource_sub_type ==
711 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_EXT_COUNT)) {
712 found_cntr_resource = true;
715 if (params.resource_func ==
716 BNXT_ULP_RESOURCE_FUNC_PARENT_FLOW) {
717 found_parent_flow = true;
718 pc_idx = params.resource_hndl;
721 } while (!rc && nxt_resource_index);
723 bnxt_ulp_cntxt_release_fdb_lock(ctxt);
725 if (rc || !found_cntr_resource)
728 dir = params.direction;
729 hw_cntr_id = params.resource_hndl;
730 if (!found_parent_flow &&
731 params.resource_sub_type ==
732 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) {
733 if (!ulp_fc_info->num_counters)
734 return ulp_fc_tf_flow_stat_get(ctxt, ¶ms, count);
737 * Think about optimizing with try_lock later
739 pthread_mutex_lock(&ulp_fc_info->fc_lock);
740 sw_cntr_idx = hw_cntr_id -
741 ulp_fc_info->shadow_hw_tbl[dir].start_idx;
742 sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
743 if (sw_acc_tbl_entry->pkt_count) {
745 count->bytes_set = 1;
746 count->hits = sw_acc_tbl_entry->pkt_count;
747 count->bytes = sw_acc_tbl_entry->byte_count;
750 sw_acc_tbl_entry->pkt_count = 0;
751 sw_acc_tbl_entry->byte_count = 0;
753 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
754 } else if (found_parent_flow &&
755 params.resource_sub_type ==
756 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) {
757 /* Get stats from the parent child table */
758 ulp_flow_db_parent_flow_count_get(ctxt, pc_idx,
759 &count->hits, &count->bytes,
764 count->bytes_set = 1;
766 /* TBD: Handle External counters */
774 * Set the parent flow if it is SW accumulation counter entry.
776 * ctxt [in] The ulp context for the flow counter manager
778 * dir [in] The direction of the flow
780 * hw_cntr_id [in] The HW flow counter ID
782 * pc_idx [in] parent child db index
785 int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
790 struct bnxt_ulp_fc_info *ulp_fc_info;
791 uint32_t sw_cntr_idx;
794 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
798 pthread_mutex_lock(&ulp_fc_info->fc_lock);
799 sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
800 if (ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid) {
801 pc_idx |= FLOW_CNTR_PC_FLOW_VALID;
802 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pc_flow_idx = pc_idx;
804 BNXT_TF_DBG(ERR, "Failed to set parent flow id %x:%x\n",
808 pthread_mutex_unlock(&ulp_fc_info->fc_lock);