9a771323850b55306c1ad24df43926576ce29025
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_fc_mgr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
9 #include <rte_log.h>
10 #include <rte_alarm.h>
11 #include "bnxt.h"
12 #include "bnxt_ulp.h"
13 #include "bnxt_tf_common.h"
14 #include "ulp_fc_mgr.h"
15 #include "ulp_flow_db.h"
16 #include "ulp_template_db_enum.h"
17 #include "ulp_template_struct.h"
18 #include "tf_tbl.h"
19
20 static int
21 ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info *parms, int size)
22 {
23         /* Allocate memory*/
24         if (!parms)
25                 return -EINVAL;
26
27         parms->mem_va = rte_zmalloc("ulp_fc_info",
28                                     RTE_CACHE_LINE_ROUNDUP(size),
29                                     4096);
30         if (!parms->mem_va) {
31                 BNXT_TF_DBG(ERR, "Allocate failed mem_va\n");
32                 return -ENOMEM;
33         }
34
35         rte_mem_lock_page(parms->mem_va);
36
37         parms->mem_pa = (void *)(uintptr_t)rte_mem_virt2phy(parms->mem_va);
38         if (parms->mem_pa == (void *)(uintptr_t)RTE_BAD_IOVA) {
39                 BNXT_TF_DBG(ERR, "Allocate failed mem_pa\n");
40                 return -ENOMEM;
41         }
42
43         return 0;
44 }
45
46 static void
47 ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info *parms)
48 {
49         rte_free(parms->mem_va);
50 }
51
52 /*
53  * Allocate and Initialize all Flow Counter Manager resources for this ulp
54  * context.
55  *
56  * ctxt [in] The ulp context for the Flow Counter manager.
57  *
58  */
59 int32_t
60 ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
61 {
62         struct bnxt_ulp_device_params *dparms;
63         uint32_t dev_id, sw_acc_cntr_tbl_sz, hw_fc_mem_info_sz;
64         struct bnxt_ulp_fc_info *ulp_fc_info;
65         int i, rc;
66
67         if (!ctxt) {
68                 BNXT_TF_DBG(DEBUG, "Invalid ULP CTXT\n");
69                 return -EINVAL;
70         }
71
72         if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
73                 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
74                 return -EINVAL;
75         }
76
77         dparms = bnxt_ulp_device_params_get(dev_id);
78         if (!dparms) {
79                 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
80                 return -EINVAL;
81         }
82
83         if (!dparms->flow_count_db_entries) {
84                 BNXT_TF_DBG(DEBUG, "flow counter support is not enabled\n");
85                 bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
86                 return 0;
87         }
88
89         ulp_fc_info = rte_zmalloc("ulp_fc_info", sizeof(*ulp_fc_info), 0);
90         if (!ulp_fc_info)
91                 goto error;
92
93         rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
94         if (rc) {
95                 PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n");
96                 goto error;
97         }
98
99         /* Add the FC info tbl to the ulp context. */
100         bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info);
101
102         sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) *
103                                 dparms->flow_count_db_entries;
104
105         for (i = 0; i < TF_DIR_MAX; i++) {
106                 ulp_fc_info->sw_acc_tbl[i] = rte_zmalloc("ulp_sw_acc_cntr_tbl",
107                                                          sw_acc_cntr_tbl_sz, 0);
108                 if (!ulp_fc_info->sw_acc_tbl[i])
109                         goto error;
110         }
111
112         hw_fc_mem_info_sz = sizeof(uint64_t) * dparms->flow_count_db_entries;
113
114         for (i = 0; i < TF_DIR_MAX; i++) {
115                 rc = ulp_fc_mgr_shadow_mem_alloc(&ulp_fc_info->shadow_hw_tbl[i],
116                                                  hw_fc_mem_info_sz);
117                 if (rc)
118                         goto error;
119         }
120
121         return 0;
122
123 error:
124         ulp_fc_mgr_deinit(ctxt);
125         BNXT_TF_DBG(DEBUG,
126                     "Failed to allocate memory for fc mgr\n");
127
128         return -ENOMEM;
129 }
130
131 /*
132  * Release all resources in the Flow Counter Manager for this ulp context
133  *
134  * ctxt [in] The ulp context for the Flow Counter manager
135  *
136  */
137 int32_t
138 ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
139 {
140         struct bnxt_ulp_fc_info *ulp_fc_info;
141         int i;
142
143         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
144
145         if (!ulp_fc_info)
146                 return -EINVAL;
147
148         ulp_fc_mgr_thread_cancel(ctxt);
149
150         pthread_mutex_destroy(&ulp_fc_info->fc_lock);
151
152         for (i = 0; i < TF_DIR_MAX; i++)
153                 rte_free(ulp_fc_info->sw_acc_tbl[i]);
154
155         for (i = 0; i < TF_DIR_MAX; i++)
156                 ulp_fc_mgr_shadow_mem_free(&ulp_fc_info->shadow_hw_tbl[i]);
157
158         rte_free(ulp_fc_info);
159
160         /* Safe to ignore on deinit */
161         (void)bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
162
163         return 0;
164 }
165
166 /*
167  * Check if the alarm thread that walks through the flows is started
168  *
169  * ctxt [in] The ulp context for the flow counter manager
170  *
171  */
172 bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
173 {
174         struct bnxt_ulp_fc_info *ulp_fc_info;
175
176         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
177
178         if (ulp_fc_info)
179                 return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD);
180
181         return false;
182 }
183
184 /*
185  * Setup the Flow counter timer thread that will fetch/accumulate raw counter
186  * data from the chip's internal flow counters
187  *
188  * ctxt [in] The ulp context for the flow counter manager
189  *
190  */
191 int32_t
192 ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
193 {
194         struct bnxt_ulp_fc_info *ulp_fc_info;
195
196         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
197
198         if (ulp_fc_info && !(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
199                 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
200                                   ulp_fc_mgr_alarm_cb, NULL);
201                 ulp_fc_info->flags |= ULP_FLAG_FC_THREAD;
202         }
203
204         return 0;
205 }
206
207 /*
208  * Cancel the alarm handler
209  *
210  * ctxt [in] The ulp context for the flow counter manager
211  *
212  */
213 void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
214 {
215         struct bnxt_ulp_fc_info *ulp_fc_info;
216
217         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
218         if (!ulp_fc_info)
219                 return;
220
221         ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD;
222         rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, NULL);
223 }
224
225 /*
226  * DMA-in the raw counter data from the HW and accumulate in the
227  * local accumulator table using the TF-Core API
228  *
229  * tfp [in] The TF-Core context
230  *
231  * fc_info [in] The ULP Flow counter info ptr
232  *
233  * dir [in] The direction of the flow
234  *
235  * num_counters [in] The number of counters
236  *
237  */
238 __rte_unused static int32_t
239 ulp_bulk_get_flow_stats(struct tf *tfp,
240                         struct bnxt_ulp_fc_info *fc_info,
241                         enum tf_dir dir,
242                         struct bnxt_ulp_device_params *dparms)
243 /* MARK AS UNUSED FOR NOW TO AVOID COMPILATION ERRORS TILL API is RESOLVED */
244 {
245         int rc = 0;
246         struct tf_tbl_get_bulk_parms parms = { 0 };
247         enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD: Template? */
248         struct sw_acc_counter *sw_acc_tbl_entry = NULL;
249         uint64_t *stats = NULL;
250         uint16_t i = 0;
251
252         parms.dir = dir;
253         parms.type = stype;
254         parms.starting_idx = fc_info->shadow_hw_tbl[dir].start_idx;
255         parms.num_entries = dparms->flow_count_db_entries / 2; /* direction */
256         /*
257          * TODO:
258          * Size of an entry needs to obtained from template
259          */
260         parms.entry_sz_in_bytes = sizeof(uint64_t);
261         stats = (uint64_t *)fc_info->shadow_hw_tbl[dir].mem_va;
262         parms.physical_mem_addr = (uint64_t)
263                 ((uintptr_t)(fc_info->shadow_hw_tbl[dir].mem_pa));
264
265         if (!stats) {
266                 PMD_DRV_LOG(ERR,
267                             "BULK: Memory not initialized id:0x%x dir:%d\n",
268                             parms.starting_idx, dir);
269                 return -EINVAL;
270         }
271
272         rc = tf_tbl_bulk_get(tfp, &parms);
273         if (rc) {
274                 PMD_DRV_LOG(ERR,
275                             "BULK: Get failed for id:0x%x rc:%d\n",
276                             parms.starting_idx, rc);
277                 return rc;
278         }
279
280         for (i = 0; i < parms.num_entries; i++) {
281                 /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
282                 sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][i];
283                 if (!sw_acc_tbl_entry->valid)
284                         continue;
285                 sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats[i],
286                                                               dparms);
287                 sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats[i],
288                                                                 dparms);
289         }
290
291         return rc;
292 }
293
294 static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
295                                     struct tf *tfp,
296                                     struct bnxt_ulp_fc_info *fc_info,
297                                     enum tf_dir dir,
298                                     uint32_t hw_cntr_id,
299                                     struct bnxt_ulp_device_params *dparms)
300 {
301         int rc = 0;
302         struct tf_get_tbl_entry_parms parms = { 0 };
303         enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD:Template? */
304         struct sw_acc_counter *sw_acc_tbl_entry = NULL, *t_sw;
305         uint64_t stats = 0;
306         uint32_t sw_cntr_indx = 0;
307
308         parms.dir = dir;
309         parms.type = stype;
310         parms.idx = hw_cntr_id;
311         /*
312          * TODO:
313          * Size of an entry needs to obtained from template
314          */
315         parms.data_sz_in_bytes = sizeof(uint64_t);
316         parms.data = (uint8_t *)&stats;
317         rc = tf_get_tbl_entry(tfp, &parms);
318         if (rc) {
319                 PMD_DRV_LOG(ERR,
320                             "Get failed for id:0x%x rc:%d\n",
321                             parms.idx, rc);
322                 return rc;
323         }
324
325         /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
326         sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx;
327         sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx];
328         /* Some dpdk applications may accumulate the flow counters while some
329          * may not. In cases where the application is accumulating the counters
330          * the PMD need not do the accumulation itself and viceversa to report
331          * the correct flow counters.
332          */
333         if (ctxt->cfg_data->accum_stats) {
334                 sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats, dparms);
335                 sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats, dparms);
336         } else {
337                 sw_acc_tbl_entry->pkt_count = FLOW_CNTR_PKTS(stats, dparms);
338                 sw_acc_tbl_entry->byte_count = FLOW_CNTR_BYTES(stats, dparms);
339         }
340
341         /* Update the parent counters if it is child flow */
342         if (sw_acc_tbl_entry->parent_flow_id) {
343                 /* Update the parent counters */
344                 t_sw = sw_acc_tbl_entry;
345                 if (ulp_flow_db_parent_flow_count_update(ctxt,
346                                                          t_sw->parent_flow_id,
347                                                          t_sw->pkt_count,
348                                                          t_sw->byte_count)) {
349                         PMD_DRV_LOG(ERR, "Error updating parent counters\n");
350                 }
351         }
352
353         return rc;
354 }
355
356 /*
357  * Alarm handler that will issue the TF-Core API to fetch
358  * data from the chip's internal flow counters
359  *
360  * ctxt [in] The ulp context for the flow counter manager
361  *
362  */
363
364 void
365 ulp_fc_mgr_alarm_cb(void *arg __rte_unused)
366 {
367         int rc = 0;
368         unsigned int j;
369         enum tf_dir i;
370         struct bnxt_ulp_context *ctxt;
371         struct bnxt_ulp_fc_info *ulp_fc_info;
372         struct bnxt_ulp_device_params *dparms;
373         struct tf *tfp;
374         uint32_t dev_id, hw_cntr_id = 0, num_entries = 0;
375
376         ctxt = bnxt_ulp_cntxt_entry_acquire();
377         if (ctxt == NULL) {
378                 BNXT_TF_DBG(INFO, "could not get the ulp context lock\n");
379                 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
380                                   ulp_fc_mgr_alarm_cb, NULL);
381                 return;
382         }
383
384         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
385         if (!ulp_fc_info) {
386                 bnxt_ulp_cntxt_entry_release();
387                 return;
388         }
389
390         if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
391                 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
392                 bnxt_ulp_cntxt_entry_release();
393                 return;
394         }
395
396         dparms = bnxt_ulp_device_params_get(dev_id);
397         if (!dparms) {
398                 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
399                 bnxt_ulp_cntxt_entry_release();
400                 return;
401         }
402
403         tfp = bnxt_ulp_cntxt_tfp_get(ctxt, BNXT_ULP_SHARED_SESSION_NO);
404         if (!tfp) {
405                 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
406                 bnxt_ulp_cntxt_entry_release();
407                 return;
408         }
409
410         /*
411          * Take the fc_lock to ensure no flow is destroyed
412          * during the bulk get
413          */
414         if (pthread_mutex_trylock(&ulp_fc_info->fc_lock))
415                 goto out;
416
417         if (!ulp_fc_info->num_entries) {
418                 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
419                 ulp_fc_mgr_thread_cancel(ctxt);
420                 bnxt_ulp_cntxt_entry_release();
421                 return;
422         }
423         /*
424          * Commented for now till GET_BULK is resolved, just get the first flow
425          * stat for now
426          for (i = 0; i < TF_DIR_MAX; i++) {
427                 rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i,
428                                              dparms->flow_count_db_entries);
429                 if (rc)
430                         break;
431         }
432         */
433
434         /* reset the parent accumulation counters before accumulation if any */
435         ulp_flow_db_parent_flow_count_reset(ctxt);
436
437         num_entries = dparms->flow_count_db_entries / 2;
438         for (i = 0; i < TF_DIR_MAX; i++) {
439                 for (j = 0; j < num_entries; j++) {
440                         if (!ulp_fc_info->sw_acc_tbl[i][j].valid)
441                                 continue;
442                         hw_cntr_id = ulp_fc_info->sw_acc_tbl[i][j].hw_cntr_id;
443                         rc = ulp_get_single_flow_stat(ctxt, tfp, ulp_fc_info, i,
444                                                       hw_cntr_id, dparms);
445                         if (rc)
446                                 break;
447                 }
448         }
449
450         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
451
452         /*
453          * If cmd fails once, no need of
454          * invoking again every second
455          */
456
457         if (rc) {
458                 ulp_fc_mgr_thread_cancel(ctxt);
459                 bnxt_ulp_cntxt_entry_release();
460                 return;
461         }
462 out:
463         bnxt_ulp_cntxt_entry_release();
464         rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
465                           ulp_fc_mgr_alarm_cb, NULL);
466 }
467
468 /*
469  * Set the starting index that indicates the first HW flow
470  * counter ID
471  *
472  * ctxt [in] The ulp context for the flow counter manager
473  *
474  * dir [in] The direction of the flow
475  *
476  * start_idx [in] The HW flow counter ID
477  *
478  */
479 bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, enum tf_dir dir)
480 {
481         struct bnxt_ulp_fc_info *ulp_fc_info;
482
483         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
484
485         if (ulp_fc_info)
486                 return ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set;
487
488         return false;
489 }
490
491 /*
492  * Set the starting index that indicates the first HW flow
493  * counter ID
494  *
495  * ctxt [in] The ulp context for the flow counter manager
496  *
497  * dir [in] The direction of the flow
498  *
499  * start_idx [in] The HW flow counter ID
500  *
501  */
502 int32_t ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
503                                  uint32_t start_idx)
504 {
505         struct bnxt_ulp_fc_info *ulp_fc_info;
506
507         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
508
509         if (!ulp_fc_info)
510                 return -EIO;
511
512         if (!ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set) {
513                 ulp_fc_info->shadow_hw_tbl[dir].start_idx = start_idx;
514                 ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set = true;
515         }
516
517         return 0;
518 }
519
520 /*
521  * Set the corresponding SW accumulator table entry based on
522  * the difference between this counter ID and the starting
523  * counter ID. Also, keep track of num of active counter enabled
524  * flows.
525  *
526  * ctxt [in] The ulp context for the flow counter manager
527  *
528  * dir [in] The direction of the flow
529  *
530  * hw_cntr_id [in] The HW flow counter ID
531  *
532  */
533 int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
534                             uint32_t hw_cntr_id)
535 {
536         struct bnxt_ulp_fc_info *ulp_fc_info;
537         uint32_t sw_cntr_idx;
538
539         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
540         if (!ulp_fc_info)
541                 return -EIO;
542
543         pthread_mutex_lock(&ulp_fc_info->fc_lock);
544         sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
545         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
546         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
547         ulp_fc_info->num_entries++;
548         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
549
550         return 0;
551 }
552
553 /*
554  * Reset the corresponding SW accumulator table entry based on
555  * the difference between this counter ID and the starting
556  * counter ID.
557  *
558  * ctxt [in] The ulp context for the flow counter manager
559  *
560  * dir [in] The direction of the flow
561  *
562  * hw_cntr_id [in] The HW flow counter ID
563  *
564  */
565 int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
566                               uint32_t hw_cntr_id)
567 {
568         struct bnxt_ulp_fc_info *ulp_fc_info;
569         uint32_t sw_cntr_idx;
570
571         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
572         if (!ulp_fc_info)
573                 return -EIO;
574
575         pthread_mutex_lock(&ulp_fc_info->fc_lock);
576         sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
577         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
578         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
579         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
580         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
581         ulp_fc_info->num_entries--;
582         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
583
584         return 0;
585 }
586
587 /*
588  * Fill the rte_flow_query_count 'data' argument passed
589  * in the rte_flow_query() with the values obtained and
590  * accumulated locally.
591  *
592  * ctxt [in] The ulp context for the flow counter manager
593  *
594  * flow_id [in] The HW flow ID
595  *
596  * count [out] The rte_flow_query_count 'data' that is set
597  *
598  */
599 int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
600                                uint32_t flow_id,
601                                struct rte_flow_query_count *count)
602 {
603         int rc = 0;
604         uint32_t nxt_resource_index = 0;
605         struct bnxt_ulp_fc_info *ulp_fc_info;
606         struct ulp_flow_db_res_params params;
607         enum tf_dir dir;
608         uint32_t hw_cntr_id = 0, sw_cntr_idx = 0;
609         struct sw_acc_counter *sw_acc_tbl_entry;
610         bool found_cntr_resource = false;
611
612         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
613         if (!ulp_fc_info)
614                 return -ENODEV;
615
616         if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt))
617                 return -EIO;
618
619         do {
620                 rc = ulp_flow_db_resource_get(ctxt,
621                                               BNXT_ULP_FDB_TYPE_REGULAR,
622                                               flow_id,
623                                               &nxt_resource_index,
624                                               &params);
625                 if (params.resource_func ==
626                      BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE &&
627                      (params.resource_sub_type ==
628                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT ||
629                       params.resource_sub_type ==
630                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_EXT_COUNT ||
631                       params.resource_sub_type ==
632                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC)) {
633                         found_cntr_resource = true;
634                         break;
635                 }
636         } while (!rc && nxt_resource_index);
637
638         bnxt_ulp_cntxt_release_fdb_lock(ctxt);
639
640         if (rc || !found_cntr_resource)
641                 return rc;
642
643         dir = params.direction;
644         hw_cntr_id = params.resource_hndl;
645         if (params.resource_sub_type ==
646                         BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) {
647                 /* TODO:
648                  * Think about optimizing with try_lock later
649                  */
650                 pthread_mutex_lock(&ulp_fc_info->fc_lock);
651                 sw_cntr_idx = hw_cntr_id -
652                         ulp_fc_info->shadow_hw_tbl[dir].start_idx;
653                 sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
654                 if (sw_acc_tbl_entry->pkt_count) {
655                         count->hits_set = 1;
656                         count->bytes_set = 1;
657                         count->hits = sw_acc_tbl_entry->pkt_count;
658                         count->bytes = sw_acc_tbl_entry->byte_count;
659                 }
660                 if (count->reset) {
661                         sw_acc_tbl_entry->pkt_count = 0;
662                         sw_acc_tbl_entry->byte_count = 0;
663                 }
664                 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
665         } else if (params.resource_sub_type ==
666                         BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC) {
667                 /* Get stats from the parent child table */
668                 ulp_flow_db_parent_flow_count_get(ctxt, flow_id,
669                                                   &count->hits, &count->bytes,
670                                                   count->reset);
671                 count->hits_set = 1;
672                 count->bytes_set = 1;
673         } else {
674                 /* TBD: Handle External counters */
675                 rc = -EINVAL;
676         }
677
678         return rc;
679 }
680
681 /*
682  * Set the parent flow if it is SW accumulation counter entry.
683  *
684  * ctxt [in] The ulp context for the flow counter manager
685  *
686  * dir [in] The direction of the flow
687  *
688  * hw_cntr_id [in] The HW flow counter ID
689  *
690  * fid [in] parent flow id
691  *
692  */
693 int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
694                                         enum tf_dir dir,
695                                         uint32_t hw_cntr_id,
696                                         uint32_t fid)
697 {
698         struct bnxt_ulp_fc_info *ulp_fc_info;
699         uint32_t sw_cntr_idx;
700         int32_t rc = 0;
701
702         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
703         if (!ulp_fc_info)
704                 return -EIO;
705
706         pthread_mutex_lock(&ulp_fc_info->fc_lock);
707         sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
708         if (ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid) {
709                 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].parent_flow_id = fid;
710         } else {
711                 BNXT_TF_DBG(ERR, "Failed to set parent flow id %x:%x\n",
712                             hw_cntr_id, fid);
713                 rc = -ENOENT;
714         }
715         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
716
717         return rc;
718 }