net/bnxt: add templates for shared sessions
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_fc_mgr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
9 #include <rte_log.h>
10 #include <rte_alarm.h>
11 #include "bnxt.h"
12 #include "bnxt_ulp.h"
13 #include "bnxt_tf_common.h"
14 #include "ulp_fc_mgr.h"
15 #include "ulp_flow_db.h"
16 #include "ulp_template_db_enum.h"
17 #include "ulp_template_struct.h"
18 #include "tf_tbl.h"
19
20 static int
21 ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info *parms, int size)
22 {
23         /* Allocate memory*/
24         if (!parms)
25                 return -EINVAL;
26
27         parms->mem_va = rte_zmalloc("ulp_fc_info",
28                                     RTE_CACHE_LINE_ROUNDUP(size),
29                                     4096);
30         if (!parms->mem_va) {
31                 BNXT_TF_DBG(ERR, "Allocate failed mem_va\n");
32                 return -ENOMEM;
33         }
34
35         rte_mem_lock_page(parms->mem_va);
36
37         parms->mem_pa = (void *)(uintptr_t)rte_mem_virt2phy(parms->mem_va);
38         if (parms->mem_pa == (void *)(uintptr_t)RTE_BAD_IOVA) {
39                 BNXT_TF_DBG(ERR, "Allocate failed mem_pa\n");
40                 return -ENOMEM;
41         }
42
43         return 0;
44 }
45
46 static void
47 ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info *parms)
48 {
49         rte_free(parms->mem_va);
50 }
51
52 /*
53  * Allocate and Initialize all Flow Counter Manager resources for this ulp
54  * context.
55  *
56  * ctxt [in] The ulp context for the Flow Counter manager.
57  *
58  */
59 int32_t
60 ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
61 {
62         struct bnxt_ulp_device_params *dparms;
63         uint32_t dev_id, sw_acc_cntr_tbl_sz, hw_fc_mem_info_sz;
64         struct bnxt_ulp_fc_info *ulp_fc_info;
65         int i, rc;
66
67         if (!ctxt) {
68                 BNXT_TF_DBG(DEBUG, "Invalid ULP CTXT\n");
69                 return -EINVAL;
70         }
71
72         if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
73                 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
74                 return -EINVAL;
75         }
76
77         dparms = bnxt_ulp_device_params_get(dev_id);
78         if (!dparms) {
79                 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
80                 return -EINVAL;
81         }
82
83         if (!dparms->flow_count_db_entries) {
84                 BNXT_TF_DBG(DEBUG, "flow counter support is not enabled\n");
85                 bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
86                 return 0;
87         }
88
89         ulp_fc_info = rte_zmalloc("ulp_fc_info", sizeof(*ulp_fc_info), 0);
90         if (!ulp_fc_info)
91                 goto error;
92
93         rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
94         if (rc) {
95                 PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n");
96                 goto error;
97         }
98
99         /* Add the FC info tbl to the ulp context. */
100         bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info);
101
102         sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) *
103                                 dparms->flow_count_db_entries;
104
105         for (i = 0; i < TF_DIR_MAX; i++) {
106                 ulp_fc_info->sw_acc_tbl[i] = rte_zmalloc("ulp_sw_acc_cntr_tbl",
107                                                          sw_acc_cntr_tbl_sz, 0);
108                 if (!ulp_fc_info->sw_acc_tbl[i])
109                         goto error;
110         }
111
112         hw_fc_mem_info_sz = sizeof(uint64_t) * dparms->flow_count_db_entries;
113
114         for (i = 0; i < TF_DIR_MAX; i++) {
115                 rc = ulp_fc_mgr_shadow_mem_alloc(&ulp_fc_info->shadow_hw_tbl[i],
116                                                  hw_fc_mem_info_sz);
117                 if (rc)
118                         goto error;
119         }
120
121         return 0;
122
123 error:
124         ulp_fc_mgr_deinit(ctxt);
125         BNXT_TF_DBG(DEBUG,
126                     "Failed to allocate memory for fc mgr\n");
127
128         return -ENOMEM;
129 }
130
131 /*
132  * Release all resources in the Flow Counter Manager for this ulp context
133  *
134  * ctxt [in] The ulp context for the Flow Counter manager
135  *
136  */
137 int32_t
138 ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
139 {
140         struct bnxt_ulp_fc_info *ulp_fc_info;
141         int i;
142
143         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
144
145         if (!ulp_fc_info)
146                 return -EINVAL;
147
148         ulp_fc_mgr_thread_cancel(ctxt);
149
150         pthread_mutex_destroy(&ulp_fc_info->fc_lock);
151
152         for (i = 0; i < TF_DIR_MAX; i++)
153                 rte_free(ulp_fc_info->sw_acc_tbl[i]);
154
155         for (i = 0; i < TF_DIR_MAX; i++)
156                 ulp_fc_mgr_shadow_mem_free(&ulp_fc_info->shadow_hw_tbl[i]);
157
158         rte_free(ulp_fc_info);
159
160         /* Safe to ignore on deinit */
161         (void)bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
162
163         return 0;
164 }
165
166 /*
167  * Check if the alarm thread that walks through the flows is started
168  *
169  * ctxt [in] The ulp context for the flow counter manager
170  *
171  */
172 bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
173 {
174         struct bnxt_ulp_fc_info *ulp_fc_info;
175
176         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
177
178         if (ulp_fc_info)
179                 return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD);
180
181         return false;
182 }
183
184 /*
185  * Setup the Flow counter timer thread that will fetch/accumulate raw counter
186  * data from the chip's internal flow counters
187  *
188  * ctxt [in] The ulp context for the flow counter manager
189  *
190  */
191 int32_t
192 ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
193 {
194         struct bnxt_ulp_fc_info *ulp_fc_info;
195
196         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
197
198         if (ulp_fc_info && !(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
199                 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
200                                   ulp_fc_mgr_alarm_cb,
201                                   (void *)ctxt);
202                 ulp_fc_info->flags |= ULP_FLAG_FC_THREAD;
203         }
204
205         return 0;
206 }
207
208 /*
209  * Cancel the alarm handler
210  *
211  * ctxt [in] The ulp context for the flow counter manager
212  *
213  */
214 void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
215 {
216         struct bnxt_ulp_fc_info *ulp_fc_info;
217
218         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
219         if (!ulp_fc_info)
220                 return;
221
222         ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD;
223         rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, (void *)ctxt);
224 }
225
226 /*
227  * DMA-in the raw counter data from the HW and accumulate in the
228  * local accumulator table using the TF-Core API
229  *
230  * tfp [in] The TF-Core context
231  *
232  * fc_info [in] The ULP Flow counter info ptr
233  *
234  * dir [in] The direction of the flow
235  *
236  * num_counters [in] The number of counters
237  *
238  */
239 __rte_unused static int32_t
240 ulp_bulk_get_flow_stats(struct tf *tfp,
241                         struct bnxt_ulp_fc_info *fc_info,
242                         enum tf_dir dir,
243                         struct bnxt_ulp_device_params *dparms)
244 /* MARK AS UNUSED FOR NOW TO AVOID COMPILATION ERRORS TILL API is RESOLVED */
245 {
246         int rc = 0;
247         struct tf_tbl_get_bulk_parms parms = { 0 };
248         enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD: Template? */
249         struct sw_acc_counter *sw_acc_tbl_entry = NULL;
250         uint64_t *stats = NULL;
251         uint16_t i = 0;
252
253         parms.dir = dir;
254         parms.type = stype;
255         parms.starting_idx = fc_info->shadow_hw_tbl[dir].start_idx;
256         parms.num_entries = dparms->flow_count_db_entries / 2; /* direction */
257         /*
258          * TODO:
259          * Size of an entry needs to obtained from template
260          */
261         parms.entry_sz_in_bytes = sizeof(uint64_t);
262         stats = (uint64_t *)fc_info->shadow_hw_tbl[dir].mem_va;
263         parms.physical_mem_addr = (uint64_t)
264                 ((uintptr_t)(fc_info->shadow_hw_tbl[dir].mem_pa));
265
266         if (!stats) {
267                 PMD_DRV_LOG(ERR,
268                             "BULK: Memory not initialized id:0x%x dir:%d\n",
269                             parms.starting_idx, dir);
270                 return -EINVAL;
271         }
272
273         rc = tf_tbl_bulk_get(tfp, &parms);
274         if (rc) {
275                 PMD_DRV_LOG(ERR,
276                             "BULK: Get failed for id:0x%x rc:%d\n",
277                             parms.starting_idx, rc);
278                 return rc;
279         }
280
281         for (i = 0; i < parms.num_entries; i++) {
282                 /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
283                 sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][i];
284                 if (!sw_acc_tbl_entry->valid)
285                         continue;
286                 sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats[i],
287                                                               dparms);
288                 sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats[i],
289                                                                 dparms);
290         }
291
292         return rc;
293 }
294
295 static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
296                                     struct tf *tfp,
297                                     struct bnxt_ulp_fc_info *fc_info,
298                                     enum tf_dir dir,
299                                     uint32_t hw_cntr_id,
300                                     struct bnxt_ulp_device_params *dparms)
301 {
302         int rc = 0;
303         struct tf_get_tbl_entry_parms parms = { 0 };
304         enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD:Template? */
305         struct sw_acc_counter *sw_acc_tbl_entry = NULL, *t_sw;
306         uint64_t stats = 0;
307         uint32_t sw_cntr_indx = 0;
308
309         parms.dir = dir;
310         parms.type = stype;
311         parms.idx = hw_cntr_id;
312         /*
313          * TODO:
314          * Size of an entry needs to obtained from template
315          */
316         parms.data_sz_in_bytes = sizeof(uint64_t);
317         parms.data = (uint8_t *)&stats;
318         rc = tf_get_tbl_entry(tfp, &parms);
319         if (rc) {
320                 PMD_DRV_LOG(ERR,
321                             "Get failed for id:0x%x rc:%d\n",
322                             parms.idx, rc);
323                 return rc;
324         }
325
326         /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
327         sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx;
328         sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx];
329         /* Some dpdk applications may accumulate the flow counters while some
330          * may not. In cases where the application is accumulating the counters
331          * the PMD need not do the accumulation itself and viceversa to report
332          * the correct flow counters.
333          */
334         if (ctxt->cfg_data->accum_stats) {
335                 sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats, dparms);
336                 sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats, dparms);
337         } else {
338                 sw_acc_tbl_entry->pkt_count = FLOW_CNTR_PKTS(stats, dparms);
339                 sw_acc_tbl_entry->byte_count = FLOW_CNTR_BYTES(stats, dparms);
340         }
341
342         /* Update the parent counters if it is child flow */
343         if (sw_acc_tbl_entry->parent_flow_id) {
344                 /* Update the parent counters */
345                 t_sw = sw_acc_tbl_entry;
346                 if (ulp_flow_db_parent_flow_count_update(ctxt,
347                                                          t_sw->parent_flow_id,
348                                                          t_sw->pkt_count,
349                                                          t_sw->byte_count)) {
350                         PMD_DRV_LOG(ERR, "Error updating parent counters\n");
351                 }
352         }
353
354         return rc;
355 }
356
357 /*
358  * Alarm handler that will issue the TF-Core API to fetch
359  * data from the chip's internal flow counters
360  *
361  * ctxt [in] The ulp context for the flow counter manager
362  *
363  */
364
365 void
366 ulp_fc_mgr_alarm_cb(void *arg)
367 {
368         int rc = 0;
369         unsigned int j;
370         enum tf_dir i;
371         struct bnxt_ulp_context *ctxt = arg;
372         struct bnxt_ulp_fc_info *ulp_fc_info;
373         struct bnxt_ulp_device_params *dparms;
374         struct tf *tfp;
375         uint32_t dev_id, hw_cntr_id = 0, num_entries = 0;
376
377         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
378         if (!ulp_fc_info)
379                 return;
380
381         if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
382                 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
383                 return;
384         }
385
386         dparms = bnxt_ulp_device_params_get(dev_id);
387         if (!dparms) {
388                 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
389                 return;
390         }
391
392         tfp = bnxt_ulp_cntxt_tfp_get(ctxt, BNXT_ULP_SHARED_SESSION_NO);
393         if (!tfp) {
394                 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
395                 return;
396         }
397
398         /*
399          * Take the fc_lock to ensure no flow is destroyed
400          * during the bulk get
401          */
402         if (pthread_mutex_trylock(&ulp_fc_info->fc_lock))
403                 goto out;
404
405         if (!ulp_fc_info->num_entries) {
406                 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
407                 ulp_fc_mgr_thread_cancel(ctxt);
408                 return;
409         }
410         /*
411          * Commented for now till GET_BULK is resolved, just get the first flow
412          * stat for now
413          for (i = 0; i < TF_DIR_MAX; i++) {
414                 rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i,
415                                              dparms->flow_count_db_entries);
416                 if (rc)
417                         break;
418         }
419         */
420
421         /* reset the parent accumulation counters before accumulation if any */
422         ulp_flow_db_parent_flow_count_reset(ctxt);
423
424         num_entries = dparms->flow_count_db_entries / 2;
425         for (i = 0; i < TF_DIR_MAX; i++) {
426                 for (j = 0; j < num_entries; j++) {
427                         if (!ulp_fc_info->sw_acc_tbl[i][j].valid)
428                                 continue;
429                         hw_cntr_id = ulp_fc_info->sw_acc_tbl[i][j].hw_cntr_id;
430                         rc = ulp_get_single_flow_stat(ctxt, tfp, ulp_fc_info, i,
431                                                       hw_cntr_id, dparms);
432                         if (rc)
433                                 break;
434                 }
435         }
436
437         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
438
439         /*
440          * If cmd fails once, no need of
441          * invoking again every second
442          */
443
444         if (rc) {
445                 ulp_fc_mgr_thread_cancel(ctxt);
446                 return;
447         }
448 out:
449         rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
450                           ulp_fc_mgr_alarm_cb,
451                           (void *)ctxt);
452 }
453
454 /*
455  * Set the starting index that indicates the first HW flow
456  * counter ID
457  *
458  * ctxt [in] The ulp context for the flow counter manager
459  *
460  * dir [in] The direction of the flow
461  *
462  * start_idx [in] The HW flow counter ID
463  *
464  */
465 bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, enum tf_dir dir)
466 {
467         struct bnxt_ulp_fc_info *ulp_fc_info;
468
469         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
470
471         if (ulp_fc_info)
472                 return ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set;
473
474         return false;
475 }
476
477 /*
478  * Set the starting index that indicates the first HW flow
479  * counter ID
480  *
481  * ctxt [in] The ulp context for the flow counter manager
482  *
483  * dir [in] The direction of the flow
484  *
485  * start_idx [in] The HW flow counter ID
486  *
487  */
488 int32_t ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
489                                  uint32_t start_idx)
490 {
491         struct bnxt_ulp_fc_info *ulp_fc_info;
492
493         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
494
495         if (!ulp_fc_info)
496                 return -EIO;
497
498         if (!ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set) {
499                 ulp_fc_info->shadow_hw_tbl[dir].start_idx = start_idx;
500                 ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set = true;
501         }
502
503         return 0;
504 }
505
506 /*
507  * Set the corresponding SW accumulator table entry based on
508  * the difference between this counter ID and the starting
509  * counter ID. Also, keep track of num of active counter enabled
510  * flows.
511  *
512  * ctxt [in] The ulp context for the flow counter manager
513  *
514  * dir [in] The direction of the flow
515  *
516  * hw_cntr_id [in] The HW flow counter ID
517  *
518  */
519 int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
520                             uint32_t hw_cntr_id)
521 {
522         struct bnxt_ulp_fc_info *ulp_fc_info;
523         uint32_t sw_cntr_idx;
524
525         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
526         if (!ulp_fc_info)
527                 return -EIO;
528
529         pthread_mutex_lock(&ulp_fc_info->fc_lock);
530         sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
531         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
532         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
533         ulp_fc_info->num_entries++;
534         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
535
536         return 0;
537 }
538
539 /*
540  * Reset the corresponding SW accumulator table entry based on
541  * the difference between this counter ID and the starting
542  * counter ID.
543  *
544  * ctxt [in] The ulp context for the flow counter manager
545  *
546  * dir [in] The direction of the flow
547  *
548  * hw_cntr_id [in] The HW flow counter ID
549  *
550  */
551 int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
552                               uint32_t hw_cntr_id)
553 {
554         struct bnxt_ulp_fc_info *ulp_fc_info;
555         uint32_t sw_cntr_idx;
556
557         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
558         if (!ulp_fc_info)
559                 return -EIO;
560
561         pthread_mutex_lock(&ulp_fc_info->fc_lock);
562         sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
563         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
564         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
565         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
566         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
567         ulp_fc_info->num_entries--;
568         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
569
570         return 0;
571 }
572
573 /*
574  * Fill the rte_flow_query_count 'data' argument passed
575  * in the rte_flow_query() with the values obtained and
576  * accumulated locally.
577  *
578  * ctxt [in] The ulp context for the flow counter manager
579  *
580  * flow_id [in] The HW flow ID
581  *
582  * count [out] The rte_flow_query_count 'data' that is set
583  *
584  */
585 int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
586                                uint32_t flow_id,
587                                struct rte_flow_query_count *count)
588 {
589         int rc = 0;
590         uint32_t nxt_resource_index = 0;
591         struct bnxt_ulp_fc_info *ulp_fc_info;
592         struct ulp_flow_db_res_params params;
593         enum tf_dir dir;
594         uint32_t hw_cntr_id = 0, sw_cntr_idx = 0;
595         struct sw_acc_counter *sw_acc_tbl_entry;
596         bool found_cntr_resource = false;
597
598         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
599         if (!ulp_fc_info)
600                 return -ENODEV;
601
602         if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt))
603                 return -EIO;
604
605         do {
606                 rc = ulp_flow_db_resource_get(ctxt,
607                                               BNXT_ULP_FDB_TYPE_REGULAR,
608                                               flow_id,
609                                               &nxt_resource_index,
610                                               &params);
611                 if (params.resource_func ==
612                      BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE &&
613                      (params.resource_sub_type ==
614                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT ||
615                       params.resource_sub_type ==
616                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_EXT_COUNT ||
617                       params.resource_sub_type ==
618                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC)) {
619                         found_cntr_resource = true;
620                         break;
621                 }
622         } while (!rc && nxt_resource_index);
623
624         bnxt_ulp_cntxt_release_fdb_lock(ctxt);
625
626         if (rc || !found_cntr_resource)
627                 return rc;
628
629         dir = params.direction;
630         hw_cntr_id = params.resource_hndl;
631         if (params.resource_sub_type ==
632                         BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) {
633                 /* TODO:
634                  * Think about optimizing with try_lock later
635                  */
636                 pthread_mutex_lock(&ulp_fc_info->fc_lock);
637                 sw_cntr_idx = hw_cntr_id -
638                         ulp_fc_info->shadow_hw_tbl[dir].start_idx;
639                 sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
640                 if (sw_acc_tbl_entry->pkt_count) {
641                         count->hits_set = 1;
642                         count->bytes_set = 1;
643                         count->hits = sw_acc_tbl_entry->pkt_count;
644                         count->bytes = sw_acc_tbl_entry->byte_count;
645                 }
646                 if (count->reset) {
647                         sw_acc_tbl_entry->pkt_count = 0;
648                         sw_acc_tbl_entry->byte_count = 0;
649                 }
650                 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
651         } else if (params.resource_sub_type ==
652                         BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC) {
653                 /* Get stats from the parent child table */
654                 ulp_flow_db_parent_flow_count_get(ctxt, flow_id,
655                                                   &count->hits, &count->bytes,
656                                                   count->reset);
657                 count->hits_set = 1;
658                 count->bytes_set = 1;
659         } else {
660                 /* TBD: Handle External counters */
661                 rc = -EINVAL;
662         }
663
664         return rc;
665 }
666
667 /*
668  * Set the parent flow if it is SW accumulation counter entry.
669  *
670  * ctxt [in] The ulp context for the flow counter manager
671  *
672  * dir [in] The direction of the flow
673  *
674  * hw_cntr_id [in] The HW flow counter ID
675  *
676  * fid [in] parent flow id
677  *
678  */
679 int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
680                                         enum tf_dir dir,
681                                         uint32_t hw_cntr_id,
682                                         uint32_t fid)
683 {
684         struct bnxt_ulp_fc_info *ulp_fc_info;
685         uint32_t sw_cntr_idx;
686         int32_t rc = 0;
687
688         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
689         if (!ulp_fc_info)
690                 return -EIO;
691
692         pthread_mutex_lock(&ulp_fc_info->fc_lock);
693         sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
694         if (ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid) {
695                 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].parent_flow_id = fid;
696         } else {
697                 BNXT_TF_DBG(ERR, "Failed to set parent flow id %x:%x\n",
698                             hw_cntr_id, fid);
699                 rc = -ENOENT;
700         }
701         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
702
703         return rc;
704 }