1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
10 #include <rte_alarm.h>
13 #include "bnxt_tf_common.h"
14 #include "ulp_fc_mgr.h"
15 #include "ulp_flow_db.h"
16 #include "ulp_template_db_enum.h"
17 #include "ulp_template_struct.h"
21 ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info *parms, int size)
27 parms->mem_va = rte_zmalloc("ulp_fc_info",
28 RTE_CACHE_LINE_ROUNDUP(size),
31 BNXT_TF_DBG(ERR, "Allocate failed mem_va\n");
35 rte_mem_lock_page(parms->mem_va);
37 parms->mem_pa = (void *)(uintptr_t)rte_mem_virt2phy(parms->mem_va);
38 if (parms->mem_pa == (void *)(uintptr_t)RTE_BAD_IOVA) {
39 BNXT_TF_DBG(ERR, "Allocate failed mem_pa\n");
47 ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info *parms)
49 rte_free(parms->mem_va);
53 * Allocate and Initialize all Flow Counter Manager resources for this ulp
56 * ctxt [in] The ulp context for the Flow Counter manager.
60 ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
62 struct bnxt_ulp_device_params *dparms;
63 uint32_t dev_id, sw_acc_cntr_tbl_sz, hw_fc_mem_info_sz;
64 struct bnxt_ulp_fc_info *ulp_fc_info;
68 BNXT_TF_DBG(DEBUG, "Invalid ULP CTXT\n");
72 if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
73 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
77 dparms = bnxt_ulp_device_params_get(dev_id);
79 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
83 if (!dparms->flow_count_db_entries) {
84 BNXT_TF_DBG(DEBUG, "flow counter support is not enabled\n");
85 bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
89 ulp_fc_info = rte_zmalloc("ulp_fc_info", sizeof(*ulp_fc_info), 0);
93 rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
95 PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n");
99 /* Add the FC info tbl to the ulp context. */
100 bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info);
102 sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) *
103 dparms->flow_count_db_entries;
105 for (i = 0; i < TF_DIR_MAX; i++) {
106 ulp_fc_info->sw_acc_tbl[i] = rte_zmalloc("ulp_sw_acc_cntr_tbl",
107 sw_acc_cntr_tbl_sz, 0);
108 if (!ulp_fc_info->sw_acc_tbl[i])
112 hw_fc_mem_info_sz = sizeof(uint64_t) * dparms->flow_count_db_entries;
114 for (i = 0; i < TF_DIR_MAX; i++) {
115 rc = ulp_fc_mgr_shadow_mem_alloc(&ulp_fc_info->shadow_hw_tbl[i],
124 ulp_fc_mgr_deinit(ctxt);
126 "Failed to allocate memory for fc mgr\n");
132 * Release all resources in the Flow Counter Manager for this ulp context
134 * ctxt [in] The ulp context for the Flow Counter manager
138 ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
140 struct bnxt_ulp_fc_info *ulp_fc_info;
143 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
148 ulp_fc_mgr_thread_cancel(ctxt);
150 pthread_mutex_destroy(&ulp_fc_info->fc_lock);
152 for (i = 0; i < TF_DIR_MAX; i++)
153 rte_free(ulp_fc_info->sw_acc_tbl[i]);
155 for (i = 0; i < TF_DIR_MAX; i++)
156 ulp_fc_mgr_shadow_mem_free(&ulp_fc_info->shadow_hw_tbl[i]);
158 rte_free(ulp_fc_info);
160 /* Safe to ignore on deinit */
161 (void)bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
167 * Check if the alarm thread that walks through the flows is started
169 * ctxt [in] The ulp context for the flow counter manager
172 bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
174 struct bnxt_ulp_fc_info *ulp_fc_info;
176 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
179 return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD);
185 * Setup the Flow counter timer thread that will fetch/accumulate raw counter
186 * data from the chip's internal flow counters
188 * ctxt [in] The ulp context for the flow counter manager
192 ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
194 struct bnxt_ulp_fc_info *ulp_fc_info;
196 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
198 if (ulp_fc_info && !(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
199 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
202 ulp_fc_info->flags |= ULP_FLAG_FC_THREAD;
209 * Cancel the alarm handler
211 * ctxt [in] The ulp context for the flow counter manager
214 void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
216 struct bnxt_ulp_fc_info *ulp_fc_info;
218 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
222 ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD;
223 rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, (void *)ctxt);
227 * DMA-in the raw counter data from the HW and accumulate in the
228 * local accumulator table using the TF-Core API
230 * tfp [in] The TF-Core context
232 * fc_info [in] The ULP Flow counter info ptr
234 * dir [in] The direction of the flow
236 * num_counters [in] The number of counters
239 __rte_unused static int32_t
240 ulp_bulk_get_flow_stats(struct tf *tfp,
241 struct bnxt_ulp_fc_info *fc_info,
243 struct bnxt_ulp_device_params *dparms)
244 /* MARK AS UNUSED FOR NOW TO AVOID COMPILATION ERRORS TILL API is RESOLVED */
247 struct tf_tbl_get_bulk_parms parms = { 0 };
248 enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64; /* TBD: Template? */
249 struct sw_acc_counter *sw_acc_tbl_entry = NULL;
250 uint64_t *stats = NULL;
255 parms.starting_idx = fc_info->shadow_hw_tbl[dir].start_idx;
256 parms.num_entries = dparms->flow_count_db_entries / 2; /* direction */
259 * Size of an entry needs to obtained from template
261 parms.entry_sz_in_bytes = sizeof(uint64_t);
262 stats = (uint64_t *)fc_info->shadow_hw_tbl[dir].mem_va;
263 parms.physical_mem_addr = (uint64_t)
264 ((uintptr_t)(fc_info->shadow_hw_tbl[dir].mem_pa));
268 "BULK: Memory not initialized id:0x%x dir:%d\n",
269 parms.starting_idx, dir);
273 rc = tf_tbl_bulk_get(tfp, &parms);
276 "BULK: Get failed for id:0x%x rc:%d\n",
277 parms.starting_idx, rc);
281 for (i = 0; i < parms.num_entries; i++) {
282 /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
283 sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][i];
284 if (!sw_acc_tbl_entry->valid)
286 sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats[i],
288 sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats[i],
295 static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
297 struct bnxt_ulp_fc_info *fc_info,
300 struct bnxt_ulp_device_params *dparms)
303 struct tf_get_tbl_entry_parms parms = { 0 };
304 enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64; /* TBD:Template? */
305 struct sw_acc_counter *sw_acc_tbl_entry = NULL, *t_sw;
307 uint32_t sw_cntr_indx = 0;
311 parms.idx = hw_cntr_id;
314 * Size of an entry needs to obtained from template
316 parms.data_sz_in_bytes = sizeof(uint64_t);
317 parms.data = (uint8_t *)&stats;
318 rc = tf_get_tbl_entry(tfp, &parms);
321 "Get failed for id:0x%x rc:%d\n",
326 /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
327 sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx;
328 sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx];
329 /* Some dpdk applications may accumulate the flow counters while some
330 * may not. In cases where the application is accumulating the counters
331 * the PMD need not do the accumulation itself and viceversa to report
332 * the correct flow counters.
334 if (ctxt->cfg_data->accum_stats) {
335 sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats, dparms);
336 sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats, dparms);
338 sw_acc_tbl_entry->pkt_count = FLOW_CNTR_PKTS(stats, dparms);
339 sw_acc_tbl_entry->byte_count = FLOW_CNTR_BYTES(stats, dparms);
342 /* Update the parent counters if it is child flow */
343 if (sw_acc_tbl_entry->parent_flow_id) {
344 /* Update the parent counters */
345 t_sw = sw_acc_tbl_entry;
346 if (ulp_flow_db_parent_flow_count_update(ctxt,
347 t_sw->parent_flow_id,
350 PMD_DRV_LOG(ERR, "Error updating parent counters\n");
358 * Alarm handler that will issue the TF-Core API to fetch
359 * data from the chip's internal flow counters
361 * ctxt [in] The ulp context for the flow counter manager
366 ulp_fc_mgr_alarm_cb(void *arg)
371 struct bnxt_ulp_context *ctxt = arg;
372 struct bnxt_ulp_fc_info *ulp_fc_info;
373 struct bnxt_ulp_device_params *dparms;
375 uint32_t dev_id, hw_cntr_id = 0, num_entries = 0;
377 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
381 if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
382 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
386 dparms = bnxt_ulp_device_params_get(dev_id);
388 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
392 tfp = bnxt_ulp_cntxt_tfp_get(ctxt, BNXT_ULP_SHARED_SESSION_NO);
394 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
399 * Take the fc_lock to ensure no flow is destroyed
400 * during the bulk get
402 if (pthread_mutex_trylock(&ulp_fc_info->fc_lock))
405 if (!ulp_fc_info->num_entries) {
406 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
407 ulp_fc_mgr_thread_cancel(ctxt);
411 * Commented for now till GET_BULK is resolved, just get the first flow
413 for (i = 0; i < TF_DIR_MAX; i++) {
414 rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i,
415 dparms->flow_count_db_entries);
421 /* reset the parent accumulation counters before accumulation if any */
422 ulp_flow_db_parent_flow_count_reset(ctxt);
424 num_entries = dparms->flow_count_db_entries / 2;
425 for (i = 0; i < TF_DIR_MAX; i++) {
426 for (j = 0; j < num_entries; j++) {
427 if (!ulp_fc_info->sw_acc_tbl[i][j].valid)
429 hw_cntr_id = ulp_fc_info->sw_acc_tbl[i][j].hw_cntr_id;
430 rc = ulp_get_single_flow_stat(ctxt, tfp, ulp_fc_info, i,
437 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
440 * If cmd fails once, no need of
441 * invoking again every second
445 ulp_fc_mgr_thread_cancel(ctxt);
449 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
455 * Set the starting index that indicates the first HW flow
458 * ctxt [in] The ulp context for the flow counter manager
460 * dir [in] The direction of the flow
462 * start_idx [in] The HW flow counter ID
465 bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, enum tf_dir dir)
467 struct bnxt_ulp_fc_info *ulp_fc_info;
469 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
472 return ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set;
478 * Set the starting index that indicates the first HW flow
481 * ctxt [in] The ulp context for the flow counter manager
483 * dir [in] The direction of the flow
485 * start_idx [in] The HW flow counter ID
488 int32_t ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
491 struct bnxt_ulp_fc_info *ulp_fc_info;
493 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
498 if (!ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set) {
499 ulp_fc_info->shadow_hw_tbl[dir].start_idx = start_idx;
500 ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set = true;
507 * Set the corresponding SW accumulator table entry based on
508 * the difference between this counter ID and the starting
509 * counter ID. Also, keep track of num of active counter enabled
512 * ctxt [in] The ulp context for the flow counter manager
514 * dir [in] The direction of the flow
516 * hw_cntr_id [in] The HW flow counter ID
519 int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
522 struct bnxt_ulp_fc_info *ulp_fc_info;
523 uint32_t sw_cntr_idx;
525 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
529 pthread_mutex_lock(&ulp_fc_info->fc_lock);
530 sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
531 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
532 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
533 ulp_fc_info->num_entries++;
534 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
540 * Reset the corresponding SW accumulator table entry based on
541 * the difference between this counter ID and the starting
544 * ctxt [in] The ulp context for the flow counter manager
546 * dir [in] The direction of the flow
548 * hw_cntr_id [in] The HW flow counter ID
551 int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
554 struct bnxt_ulp_fc_info *ulp_fc_info;
555 uint32_t sw_cntr_idx;
557 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
561 pthread_mutex_lock(&ulp_fc_info->fc_lock);
562 sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
563 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
564 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
565 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
566 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
567 ulp_fc_info->num_entries--;
568 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
574 * Fill the rte_flow_query_count 'data' argument passed
575 * in the rte_flow_query() with the values obtained and
576 * accumulated locally.
578 * ctxt [in] The ulp context for the flow counter manager
580 * flow_id [in] The HW flow ID
582 * count [out] The rte_flow_query_count 'data' that is set
585 int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
587 struct rte_flow_query_count *count)
590 uint32_t nxt_resource_index = 0;
591 struct bnxt_ulp_fc_info *ulp_fc_info;
592 struct ulp_flow_db_res_params params;
594 uint32_t hw_cntr_id = 0, sw_cntr_idx = 0;
595 struct sw_acc_counter *sw_acc_tbl_entry;
596 bool found_cntr_resource = false;
598 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
602 if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt))
606 rc = ulp_flow_db_resource_get(ctxt,
607 BNXT_ULP_FDB_TYPE_REGULAR,
611 if (params.resource_func ==
612 BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE &&
613 (params.resource_sub_type ==
614 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT ||
615 params.resource_sub_type ==
616 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_EXT_COUNT ||
617 params.resource_sub_type ==
618 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC)) {
619 found_cntr_resource = true;
622 } while (!rc && nxt_resource_index);
624 bnxt_ulp_cntxt_release_fdb_lock(ctxt);
626 if (rc || !found_cntr_resource)
629 dir = params.direction;
630 hw_cntr_id = params.resource_hndl;
631 if (params.resource_sub_type ==
632 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) {
634 * Think about optimizing with try_lock later
636 pthread_mutex_lock(&ulp_fc_info->fc_lock);
637 sw_cntr_idx = hw_cntr_id -
638 ulp_fc_info->shadow_hw_tbl[dir].start_idx;
639 sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
640 if (sw_acc_tbl_entry->pkt_count) {
642 count->bytes_set = 1;
643 count->hits = sw_acc_tbl_entry->pkt_count;
644 count->bytes = sw_acc_tbl_entry->byte_count;
647 sw_acc_tbl_entry->pkt_count = 0;
648 sw_acc_tbl_entry->byte_count = 0;
650 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
651 } else if (params.resource_sub_type ==
652 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC) {
653 /* Get stats from the parent child table */
654 ulp_flow_db_parent_flow_count_get(ctxt, flow_id,
655 &count->hits, &count->bytes,
658 count->bytes_set = 1;
660 /* TBD: Handle External counters */
668 * Set the parent flow if it is SW accumulation counter entry.
670 * ctxt [in] The ulp context for the flow counter manager
672 * dir [in] The direction of the flow
674 * hw_cntr_id [in] The HW flow counter ID
676 * fid [in] parent flow id
679 int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
684 struct bnxt_ulp_fc_info *ulp_fc_info;
685 uint32_t sw_cntr_idx;
688 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
692 pthread_mutex_lock(&ulp_fc_info->fc_lock);
693 sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
694 if (ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid) {
695 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].parent_flow_id = fid;
697 BNXT_TF_DBG(ERR, "Failed to set parent flow id %x:%x\n",
701 pthread_mutex_unlock(&ulp_fc_info->fc_lock);