1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
10 #include <rte_alarm.h>
13 #include "bnxt_tf_common.h"
14 #include "ulp_fc_mgr.h"
15 #include "ulp_flow_db.h"
16 #include "ulp_template_db_enum.h"
17 #include "ulp_template_struct.h"
21 ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info *parms, int size)
27 parms->mem_va = rte_zmalloc("ulp_fc_info",
28 RTE_CACHE_LINE_ROUNDUP(size),
30 if (parms->mem_va == NULL) {
31 BNXT_TF_DBG(ERR, "Allocate failed mem_va\n");
35 rte_mem_lock_page(parms->mem_va);
37 parms->mem_pa = (void *)(uintptr_t)rte_mem_virt2phy(parms->mem_va);
38 if (parms->mem_pa == (void *)(uintptr_t)RTE_BAD_IOVA) {
39 BNXT_TF_DBG(ERR, "Allocate failed mem_pa\n");
47 ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info *parms)
49 rte_free(parms->mem_va);
53 * Allocate and Initialize all Flow Counter Manager resources for this ulp
56 * ctxt [in] The ulp context for the Flow Counter manager.
60 ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
62 struct bnxt_ulp_device_params *dparms;
63 uint32_t dev_id, sw_acc_cntr_tbl_sz, hw_fc_mem_info_sz;
64 struct bnxt_ulp_fc_info *ulp_fc_info;
68 BNXT_TF_DBG(DEBUG, "Invalid ULP CTXT\n");
72 if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
73 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
77 dparms = bnxt_ulp_device_params_get(dev_id);
79 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
83 ulp_fc_info = rte_zmalloc("ulp_fc_info", sizeof(*ulp_fc_info), 0);
87 rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
89 PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n");
93 /* Add the FC info tbl to the ulp context. */
94 bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info);
96 sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) *
97 dparms->flow_count_db_entries;
99 for (i = 0; i < TF_DIR_MAX; i++) {
100 ulp_fc_info->sw_acc_tbl[i] = rte_zmalloc("ulp_sw_acc_cntr_tbl",
101 sw_acc_cntr_tbl_sz, 0);
102 if (!ulp_fc_info->sw_acc_tbl[i])
106 hw_fc_mem_info_sz = sizeof(uint64_t) * dparms->flow_count_db_entries;
108 for (i = 0; i < TF_DIR_MAX; i++) {
109 rc = ulp_fc_mgr_shadow_mem_alloc(&ulp_fc_info->shadow_hw_tbl[i],
118 ulp_fc_mgr_deinit(ctxt);
120 "Failed to allocate memory for fc mgr\n");
126 * Release all resources in the Flow Counter Manager for this ulp context
128 * ctxt [in] The ulp context for the Flow Counter manager
132 ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
134 struct bnxt_ulp_fc_info *ulp_fc_info;
137 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
142 ulp_fc_mgr_thread_cancel(ctxt);
144 pthread_mutex_destroy(&ulp_fc_info->fc_lock);
146 for (i = 0; i < TF_DIR_MAX; i++)
147 rte_free(ulp_fc_info->sw_acc_tbl[i]);
149 for (i = 0; i < TF_DIR_MAX; i++)
150 ulp_fc_mgr_shadow_mem_free(&ulp_fc_info->shadow_hw_tbl[i]);
153 rte_free(ulp_fc_info);
155 /* Safe to ignore on deinit */
156 (void)bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
162 * Check if the alarm thread that walks through the flows is started
164 * ctxt [in] The ulp context for the flow counter manager
167 bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
169 struct bnxt_ulp_fc_info *ulp_fc_info;
171 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
173 return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD);
177 * Setup the Flow counter timer thread that will fetch/accumulate raw counter
178 * data from the chip's internal flow counters
180 * ctxt [in] The ulp context for the flow counter manager
184 ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
186 struct bnxt_ulp_fc_info *ulp_fc_info;
188 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
190 if (!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
191 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
194 ulp_fc_info->flags |= ULP_FLAG_FC_THREAD;
201 * Cancel the alarm handler
203 * ctxt [in] The ulp context for the flow counter manager
206 void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
208 struct bnxt_ulp_fc_info *ulp_fc_info;
210 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
214 ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD;
215 rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, (void *)ctxt);
219 * DMA-in the raw counter data from the HW and accumulate in the
220 * local accumulator table using the TF-Core API
222 * tfp [in] The TF-Core context
224 * fc_info [in] The ULP Flow counter info ptr
226 * dir [in] The direction of the flow
228 * num_counters [in] The number of counters
231 __rte_unused static int32_t
232 ulp_bulk_get_flow_stats(struct tf *tfp,
233 struct bnxt_ulp_fc_info *fc_info,
235 struct bnxt_ulp_device_params *dparms)
236 /* MARK AS UNUSED FOR NOW TO AVOID COMPILATION ERRORS TILL API is RESOLVED */
239 struct tf_tbl_get_bulk_parms parms = { 0 };
240 enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64; /* TBD: Template? */
241 struct sw_acc_counter *sw_acc_tbl_entry = NULL;
242 uint64_t *stats = NULL;
247 parms.starting_idx = fc_info->shadow_hw_tbl[dir].start_idx;
248 parms.num_entries = dparms->flow_count_db_entries / 2; /* direction */
251 * Size of an entry needs to obtained from template
253 parms.entry_sz_in_bytes = sizeof(uint64_t);
254 stats = (uint64_t *)fc_info->shadow_hw_tbl[dir].mem_va;
255 parms.physical_mem_addr = (uintptr_t)fc_info->shadow_hw_tbl[dir].mem_pa;
259 "BULK: Memory not initialized id:0x%x dir:%d\n",
260 parms.starting_idx, dir);
264 rc = tf_tbl_bulk_get(tfp, &parms);
267 "BULK: Get failed for id:0x%x rc:%d\n",
268 parms.starting_idx, rc);
272 for (i = 0; i < parms.num_entries; i++) {
273 /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
274 sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][i];
275 if (!sw_acc_tbl_entry->valid)
277 sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats[i], dparms);
278 sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats[i],
285 static int ulp_get_single_flow_stat(struct tf *tfp,
286 struct bnxt_ulp_fc_info *fc_info,
289 struct bnxt_ulp_device_params *dparms)
292 struct tf_get_tbl_entry_parms parms = { 0 };
293 enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64; /* TBD:Template? */
294 struct sw_acc_counter *sw_acc_tbl_entry = NULL;
296 uint32_t sw_cntr_indx = 0;
300 parms.idx = hw_cntr_id;
303 * Size of an entry needs to obtained from template
305 parms.data_sz_in_bytes = sizeof(uint64_t);
306 parms.data = (uint8_t *)&stats;
307 rc = tf_get_tbl_entry(tfp, &parms);
310 "Get failed for id:0x%x rc:%d\n",
315 /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
316 sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx;
317 sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx];
318 sw_acc_tbl_entry->pkt_count = FLOW_CNTR_PKTS(stats, dparms);
319 sw_acc_tbl_entry->byte_count = FLOW_CNTR_BYTES(stats, dparms);
325 * Alarm handler that will issue the TF-Core API to fetch
326 * data from the chip's internal flow counters
328 * ctxt [in] The ulp context for the flow counter manager
333 ulp_fc_mgr_alarm_cb(void *arg)
338 struct bnxt_ulp_context *ctxt = arg;
339 struct bnxt_ulp_fc_info *ulp_fc_info;
340 struct bnxt_ulp_device_params *dparms;
342 uint32_t dev_id, hw_cntr_id = 0, num_entries = 0;
344 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
348 if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
349 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
353 dparms = bnxt_ulp_device_params_get(dev_id);
355 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
359 tfp = bnxt_ulp_cntxt_tfp_get(ctxt);
361 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
366 * Take the fc_lock to ensure no flow is destroyed
367 * during the bulk get
369 if (pthread_mutex_trylock(&ulp_fc_info->fc_lock))
372 if (!ulp_fc_info->num_entries) {
373 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
374 ulp_fc_mgr_thread_cancel(ctxt);
378 * Commented for now till GET_BULK is resolved, just get the first flow
380 for (i = 0; i < TF_DIR_MAX; i++) {
381 rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i,
382 dparms->flow_count_db_entries);
387 num_entries = dparms->flow_count_db_entries / 2;
388 for (i = 0; i < TF_DIR_MAX; i++) {
389 for (j = 0; j < num_entries; j++) {
390 if (!ulp_fc_info->sw_acc_tbl[i][j].valid)
392 hw_cntr_id = ulp_fc_info->sw_acc_tbl[i][j].hw_cntr_id;
393 rc = ulp_get_single_flow_stat(tfp, ulp_fc_info, i,
400 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
403 * If cmd fails once, no need of
404 * invoking again every second
408 ulp_fc_mgr_thread_cancel(ctxt);
412 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
418 * Set the starting index that indicates the first HW flow
421 * ctxt [in] The ulp context for the flow counter manager
423 * dir [in] The direction of the flow
425 * start_idx [in] The HW flow counter ID
428 bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, enum tf_dir dir)
430 struct bnxt_ulp_fc_info *ulp_fc_info;
432 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
434 return ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set;
438 * Set the starting index that indicates the first HW flow
441 * ctxt [in] The ulp context for the flow counter manager
443 * dir [in] The direction of the flow
445 * start_idx [in] The HW flow counter ID
448 int32_t ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
451 struct bnxt_ulp_fc_info *ulp_fc_info;
453 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
458 if (!ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set) {
459 ulp_fc_info->shadow_hw_tbl[dir].start_idx = start_idx;
460 ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set = true;
467 * Set the corresponding SW accumulator table entry based on
468 * the difference between this counter ID and the starting
469 * counter ID. Also, keep track of num of active counter enabled
472 * ctxt [in] The ulp context for the flow counter manager
474 * dir [in] The direction of the flow
476 * hw_cntr_id [in] The HW flow counter ID
479 int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
482 struct bnxt_ulp_fc_info *ulp_fc_info;
483 uint32_t sw_cntr_idx;
485 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
489 pthread_mutex_lock(&ulp_fc_info->fc_lock);
490 sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
491 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
492 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
493 ulp_fc_info->num_entries++;
494 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
500 * Reset the corresponding SW accumulator table entry based on
501 * the difference between this counter ID and the starting
504 * ctxt [in] The ulp context for the flow counter manager
506 * dir [in] The direction of the flow
508 * hw_cntr_id [in] The HW flow counter ID
511 int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
514 struct bnxt_ulp_fc_info *ulp_fc_info;
515 uint32_t sw_cntr_idx;
517 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
521 pthread_mutex_lock(&ulp_fc_info->fc_lock);
522 sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
523 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
524 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
525 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
526 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
527 ulp_fc_info->num_entries--;
528 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
534 * Fill the rte_flow_query_count 'data' argument passed
535 * in the rte_flow_query() with the values obtained and
536 * accumulated locally.
538 * ctxt [in] The ulp context for the flow counter manager
540 * flow_id [in] The HW flow ID
542 * count [out] The rte_flow_query_count 'data' that is set
545 int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
547 struct rte_flow_query_count *count)
550 uint32_t nxt_resource_index = 0;
551 struct bnxt_ulp_fc_info *ulp_fc_info;
552 struct ulp_flow_db_res_params params;
554 uint32_t hw_cntr_id = 0, sw_cntr_idx = 0;
555 struct sw_acc_counter *sw_acc_tbl_entry;
556 bool found_cntr_resource = false;
558 ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
563 rc = ulp_flow_db_resource_get(ctxt,
564 BNXT_ULP_FDB_TYPE_REGULAR,
568 if (params.resource_func ==
569 BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE &&
570 (params.resource_sub_type ==
571 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT ||
572 params.resource_sub_type ==
573 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_EXT_COUNT)) {
574 found_cntr_resource = true;
583 if (found_cntr_resource) {
584 dir = params.direction;
585 hw_cntr_id = params.resource_hndl;
586 sw_cntr_idx = hw_cntr_id -
587 ulp_fc_info->shadow_hw_tbl[dir].start_idx;
588 sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
589 if (params.resource_sub_type ==
590 BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT) {
591 pthread_mutex_lock(&ulp_fc_info->fc_lock);
592 if (sw_acc_tbl_entry->pkt_count) {
594 count->bytes_set = 1;
595 count->hits = sw_acc_tbl_entry->pkt_count;
596 count->bytes = sw_acc_tbl_entry->byte_count;
599 sw_acc_tbl_entry->pkt_count = 0;
600 sw_acc_tbl_entry->byte_count = 0;
602 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
604 /* TBD: Handle External counters */