65029139e6cb61a83e1769b6ddb1fb8ce1610e6e
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_fc_mgr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
9 #include <rte_log.h>
10 #include <rte_alarm.h>
11 #include "bnxt.h"
12 #include "bnxt_ulp.h"
13 #include "bnxt_tf_common.h"
14 #include "ulp_fc_mgr.h"
15 #include "ulp_flow_db.h"
16 #include "ulp_template_db_enum.h"
17 #include "ulp_template_struct.h"
18 #include "tf_tbl.h"
19
20 static int
21 ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info *parms, int size)
22 {
23         /* Allocate memory*/
24         if (!parms)
25                 return -EINVAL;
26
27         parms->mem_va = rte_zmalloc("ulp_fc_info",
28                                     RTE_CACHE_LINE_ROUNDUP(size),
29                                     4096);
30         if (!parms->mem_va) {
31                 BNXT_TF_DBG(ERR, "Allocate failed mem_va\n");
32                 return -ENOMEM;
33         }
34
35         rte_mem_lock_page(parms->mem_va);
36
37         parms->mem_pa = (void *)(uintptr_t)rte_mem_virt2phy(parms->mem_va);
38         if (parms->mem_pa == (void *)(uintptr_t)RTE_BAD_IOVA) {
39                 BNXT_TF_DBG(ERR, "Allocate failed mem_pa\n");
40                 return -ENOMEM;
41         }
42
43         return 0;
44 }
45
46 static void
47 ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info *parms)
48 {
49         rte_free(parms->mem_va);
50 }
51
52 /*
53  * Allocate and Initialize all Flow Counter Manager resources for this ulp
54  * context.
55  *
56  * ctxt [in] The ulp context for the Flow Counter manager.
57  *
58  */
59 int32_t
60 ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
61 {
62         struct bnxt_ulp_device_params *dparms;
63         uint32_t dev_id, sw_acc_cntr_tbl_sz, hw_fc_mem_info_sz;
64         struct bnxt_ulp_fc_info *ulp_fc_info;
65         int i, rc;
66
67         if (!ctxt) {
68                 BNXT_TF_DBG(DEBUG, "Invalid ULP CTXT\n");
69                 return -EINVAL;
70         }
71
72         if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
73                 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
74                 return -EINVAL;
75         }
76
77         dparms = bnxt_ulp_device_params_get(dev_id);
78         if (!dparms) {
79                 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
80                 return -EINVAL;
81         }
82
83         ulp_fc_info = rte_zmalloc("ulp_fc_info", sizeof(*ulp_fc_info), 0);
84         if (!ulp_fc_info)
85                 goto error;
86
87         rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
88         if (rc) {
89                 PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n");
90                 goto error;
91         }
92
93         /* Add the FC info tbl to the ulp context. */
94         bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info);
95
96         sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) *
97                                 dparms->flow_count_db_entries;
98
99         for (i = 0; i < TF_DIR_MAX; i++) {
100                 ulp_fc_info->sw_acc_tbl[i] = rte_zmalloc("ulp_sw_acc_cntr_tbl",
101                                                          sw_acc_cntr_tbl_sz, 0);
102                 if (!ulp_fc_info->sw_acc_tbl[i])
103                         goto error;
104         }
105
106         hw_fc_mem_info_sz = sizeof(uint64_t) * dparms->flow_count_db_entries;
107
108         for (i = 0; i < TF_DIR_MAX; i++) {
109                 rc = ulp_fc_mgr_shadow_mem_alloc(&ulp_fc_info->shadow_hw_tbl[i],
110                                                  hw_fc_mem_info_sz);
111                 if (rc)
112                         goto error;
113         }
114
115         return 0;
116
117 error:
118         ulp_fc_mgr_deinit(ctxt);
119         BNXT_TF_DBG(DEBUG,
120                     "Failed to allocate memory for fc mgr\n");
121
122         return -ENOMEM;
123 }
124
125 /*
126  * Release all resources in the Flow Counter Manager for this ulp context
127  *
128  * ctxt [in] The ulp context for the Flow Counter manager
129  *
130  */
131 int32_t
132 ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
133 {
134         struct bnxt_ulp_fc_info *ulp_fc_info;
135         int i;
136
137         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
138
139         if (!ulp_fc_info)
140                 return -EINVAL;
141
142         ulp_fc_mgr_thread_cancel(ctxt);
143
144         pthread_mutex_destroy(&ulp_fc_info->fc_lock);
145
146         for (i = 0; i < TF_DIR_MAX; i++)
147                 rte_free(ulp_fc_info->sw_acc_tbl[i]);
148
149         for (i = 0; i < TF_DIR_MAX; i++)
150                 ulp_fc_mgr_shadow_mem_free(&ulp_fc_info->shadow_hw_tbl[i]);
151
152         rte_free(ulp_fc_info);
153
154         /* Safe to ignore on deinit */
155         (void)bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
156
157         return 0;
158 }
159
160 /*
161  * Check if the alarm thread that walks through the flows is started
162  *
163  * ctxt [in] The ulp context for the flow counter manager
164  *
165  */
166 bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
167 {
168         struct bnxt_ulp_fc_info *ulp_fc_info;
169
170         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
171
172         return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD);
173 }
174
175 /*
176  * Setup the Flow counter timer thread that will fetch/accumulate raw counter
177  * data from the chip's internal flow counters
178  *
179  * ctxt [in] The ulp context for the flow counter manager
180  *
181  */
182 int32_t
183 ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
184 {
185         struct bnxt_ulp_fc_info *ulp_fc_info;
186
187         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
188
189         if (!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
190                 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
191                                   ulp_fc_mgr_alarm_cb,
192                                   (void *)ctxt);
193                 ulp_fc_info->flags |= ULP_FLAG_FC_THREAD;
194         }
195
196         return 0;
197 }
198
199 /*
200  * Cancel the alarm handler
201  *
202  * ctxt [in] The ulp context for the flow counter manager
203  *
204  */
205 void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
206 {
207         struct bnxt_ulp_fc_info *ulp_fc_info;
208
209         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
210         if (!ulp_fc_info)
211                 return;
212
213         ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD;
214         rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, (void *)ctxt);
215 }
216
217 /*
218  * DMA-in the raw counter data from the HW and accumulate in the
219  * local accumulator table using the TF-Core API
220  *
221  * tfp [in] The TF-Core context
222  *
223  * fc_info [in] The ULP Flow counter info ptr
224  *
225  * dir [in] The direction of the flow
226  *
227  * num_counters [in] The number of counters
228  *
229  */
230 __rte_unused static int32_t
231 ulp_bulk_get_flow_stats(struct tf *tfp,
232                         struct bnxt_ulp_fc_info *fc_info,
233                         enum tf_dir dir,
234                         struct bnxt_ulp_device_params *dparms)
235 /* MARK AS UNUSED FOR NOW TO AVOID COMPILATION ERRORS TILL API is RESOLVED */
236 {
237         int rc = 0;
238         struct tf_tbl_get_bulk_parms parms = { 0 };
239         enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD: Template? */
240         struct sw_acc_counter *sw_acc_tbl_entry = NULL;
241         uint64_t *stats = NULL;
242         uint16_t i = 0;
243
244         parms.dir = dir;
245         parms.type = stype;
246         parms.starting_idx = fc_info->shadow_hw_tbl[dir].start_idx;
247         parms.num_entries = dparms->flow_count_db_entries / 2; /* direction */
248         /*
249          * TODO:
250          * Size of an entry needs to obtained from template
251          */
252         parms.entry_sz_in_bytes = sizeof(uint64_t);
253         stats = (uint64_t *)fc_info->shadow_hw_tbl[dir].mem_va;
254         parms.physical_mem_addr = (uint64_t)
255                 ((uintptr_t)(fc_info->shadow_hw_tbl[dir].mem_pa));
256
257         if (!stats) {
258                 PMD_DRV_LOG(ERR,
259                             "BULK: Memory not initialized id:0x%x dir:%d\n",
260                             parms.starting_idx, dir);
261                 return -EINVAL;
262         }
263
264         rc = tf_tbl_bulk_get(tfp, &parms);
265         if (rc) {
266                 PMD_DRV_LOG(ERR,
267                             "BULK: Get failed for id:0x%x rc:%d\n",
268                             parms.starting_idx, rc);
269                 return rc;
270         }
271
272         for (i = 0; i < parms.num_entries; i++) {
273                 /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
274                 sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][i];
275                 if (!sw_acc_tbl_entry->valid)
276                         continue;
277                 sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats[i],
278                                                               dparms);
279                 sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats[i],
280                                                                 dparms);
281         }
282
283         return rc;
284 }
285
286 static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
287                                     struct tf *tfp,
288                                     struct bnxt_ulp_fc_info *fc_info,
289                                     enum tf_dir dir,
290                                     uint32_t hw_cntr_id,
291                                     struct bnxt_ulp_device_params *dparms)
292 {
293         int rc = 0;
294         struct tf_get_tbl_entry_parms parms = { 0 };
295         enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD:Template? */
296         struct sw_acc_counter *sw_acc_tbl_entry = NULL, *t_sw;
297         uint64_t stats = 0;
298         uint32_t sw_cntr_indx = 0;
299
300         parms.dir = dir;
301         parms.type = stype;
302         parms.idx = hw_cntr_id;
303         /*
304          * TODO:
305          * Size of an entry needs to obtained from template
306          */
307         parms.data_sz_in_bytes = sizeof(uint64_t);
308         parms.data = (uint8_t *)&stats;
309         rc = tf_get_tbl_entry(tfp, &parms);
310         if (rc) {
311                 PMD_DRV_LOG(ERR,
312                             "Get failed for id:0x%x rc:%d\n",
313                             parms.idx, rc);
314                 return rc;
315         }
316
317         /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
318         sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx;
319         sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx];
320         /* Some dpdk applications may accumulate the flow counters while some
321          * may not. In cases where the application is accumulating the counters
322          * the PMD need not do the accumulation itself and viceversa to report
323          * the correct flow counters.
324          */
325         if (ctxt->cfg_data->accum_stats) {
326                 sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats, dparms);
327                 sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats, dparms);
328         } else {
329                 sw_acc_tbl_entry->pkt_count = FLOW_CNTR_PKTS(stats, dparms);
330                 sw_acc_tbl_entry->byte_count = FLOW_CNTR_BYTES(stats, dparms);
331         }
332
333         /* Update the parent counters if it is child flow */
334         if (sw_acc_tbl_entry->parent_flow_id) {
335                 /* Update the parent counters */
336                 t_sw = sw_acc_tbl_entry;
337                 if (ulp_flow_db_parent_flow_count_update(ctxt,
338                                                          t_sw->parent_flow_id,
339                                                          t_sw->pkt_count,
340                                                          t_sw->byte_count)) {
341                         PMD_DRV_LOG(ERR, "Error updating parent counters\n");
342                 }
343         }
344
345         return rc;
346 }
347
348 /*
349  * Alarm handler that will issue the TF-Core API to fetch
350  * data from the chip's internal flow counters
351  *
352  * ctxt [in] The ulp context for the flow counter manager
353  *
354  */
355
356 void
357 ulp_fc_mgr_alarm_cb(void *arg)
358 {
359         int rc = 0;
360         unsigned int j;
361         enum tf_dir i;
362         struct bnxt_ulp_context *ctxt = arg;
363         struct bnxt_ulp_fc_info *ulp_fc_info;
364         struct bnxt_ulp_device_params *dparms;
365         struct tf *tfp;
366         uint32_t dev_id, hw_cntr_id = 0, num_entries = 0;
367
368         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
369         if (!ulp_fc_info)
370                 return;
371
372         if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
373                 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
374                 return;
375         }
376
377         dparms = bnxt_ulp_device_params_get(dev_id);
378         if (!dparms) {
379                 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
380                 return;
381         }
382
383         tfp = bnxt_ulp_cntxt_tfp_get(ctxt);
384         if (!tfp) {
385                 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
386                 return;
387         }
388
389         /*
390          * Take the fc_lock to ensure no flow is destroyed
391          * during the bulk get
392          */
393         if (pthread_mutex_trylock(&ulp_fc_info->fc_lock))
394                 goto out;
395
396         if (!ulp_fc_info->num_entries) {
397                 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
398                 ulp_fc_mgr_thread_cancel(ctxt);
399                 return;
400         }
401         /*
402          * Commented for now till GET_BULK is resolved, just get the first flow
403          * stat for now
404          for (i = 0; i < TF_DIR_MAX; i++) {
405                 rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i,
406                                              dparms->flow_count_db_entries);
407                 if (rc)
408                         break;
409         }
410         */
411
412         /* reset the parent accumulation counters before accumulation if any */
413         ulp_flow_db_parent_flow_count_reset(ctxt);
414
415         num_entries = dparms->flow_count_db_entries / 2;
416         for (i = 0; i < TF_DIR_MAX; i++) {
417                 for (j = 0; j < num_entries; j++) {
418                         if (!ulp_fc_info->sw_acc_tbl[i][j].valid)
419                                 continue;
420                         hw_cntr_id = ulp_fc_info->sw_acc_tbl[i][j].hw_cntr_id;
421                         rc = ulp_get_single_flow_stat(ctxt, tfp, ulp_fc_info, i,
422                                                       hw_cntr_id, dparms);
423                         if (rc)
424                                 break;
425                 }
426         }
427
428         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
429
430         /*
431          * If cmd fails once, no need of
432          * invoking again every second
433          */
434
435         if (rc) {
436                 ulp_fc_mgr_thread_cancel(ctxt);
437                 return;
438         }
439 out:
440         rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
441                           ulp_fc_mgr_alarm_cb,
442                           (void *)ctxt);
443 }
444
445 /*
446  * Set the starting index that indicates the first HW flow
447  * counter ID
448  *
449  * ctxt [in] The ulp context for the flow counter manager
450  *
451  * dir [in] The direction of the flow
452  *
453  * start_idx [in] The HW flow counter ID
454  *
455  */
456 bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, enum tf_dir dir)
457 {
458         struct bnxt_ulp_fc_info *ulp_fc_info;
459
460         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
461
462         return ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set;
463 }
464
465 /*
466  * Set the starting index that indicates the first HW flow
467  * counter ID
468  *
469  * ctxt [in] The ulp context for the flow counter manager
470  *
471  * dir [in] The direction of the flow
472  *
473  * start_idx [in] The HW flow counter ID
474  *
475  */
476 int32_t ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
477                                  uint32_t start_idx)
478 {
479         struct bnxt_ulp_fc_info *ulp_fc_info;
480
481         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
482
483         if (!ulp_fc_info)
484                 return -EIO;
485
486         if (!ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set) {
487                 ulp_fc_info->shadow_hw_tbl[dir].start_idx = start_idx;
488                 ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set = true;
489         }
490
491         return 0;
492 }
493
494 /*
495  * Set the corresponding SW accumulator table entry based on
496  * the difference between this counter ID and the starting
497  * counter ID. Also, keep track of num of active counter enabled
498  * flows.
499  *
500  * ctxt [in] The ulp context for the flow counter manager
501  *
502  * dir [in] The direction of the flow
503  *
504  * hw_cntr_id [in] The HW flow counter ID
505  *
506  */
507 int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
508                             uint32_t hw_cntr_id)
509 {
510         struct bnxt_ulp_fc_info *ulp_fc_info;
511         uint32_t sw_cntr_idx;
512
513         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
514         if (!ulp_fc_info)
515                 return -EIO;
516
517         pthread_mutex_lock(&ulp_fc_info->fc_lock);
518         sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
519         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
520         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
521         ulp_fc_info->num_entries++;
522         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
523
524         return 0;
525 }
526
527 /*
528  * Reset the corresponding SW accumulator table entry based on
529  * the difference between this counter ID and the starting
530  * counter ID.
531  *
532  * ctxt [in] The ulp context for the flow counter manager
533  *
534  * dir [in] The direction of the flow
535  *
536  * hw_cntr_id [in] The HW flow counter ID
537  *
538  */
539 int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
540                               uint32_t hw_cntr_id)
541 {
542         struct bnxt_ulp_fc_info *ulp_fc_info;
543         uint32_t sw_cntr_idx;
544
545         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
546         if (!ulp_fc_info)
547                 return -EIO;
548
549         pthread_mutex_lock(&ulp_fc_info->fc_lock);
550         sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
551         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
552         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
553         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
554         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
555         ulp_fc_info->num_entries--;
556         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
557
558         return 0;
559 }
560
561 /*
562  * Fill the rte_flow_query_count 'data' argument passed
563  * in the rte_flow_query() with the values obtained and
564  * accumulated locally.
565  *
566  * ctxt [in] The ulp context for the flow counter manager
567  *
568  * flow_id [in] The HW flow ID
569  *
570  * count [out] The rte_flow_query_count 'data' that is set
571  *
572  */
573 int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
574                                uint32_t flow_id,
575                                struct rte_flow_query_count *count)
576 {
577         int rc = 0;
578         uint32_t nxt_resource_index = 0;
579         struct bnxt_ulp_fc_info *ulp_fc_info;
580         struct ulp_flow_db_res_params params;
581         enum tf_dir dir;
582         uint32_t hw_cntr_id = 0, sw_cntr_idx = 0;
583         struct sw_acc_counter *sw_acc_tbl_entry;
584         bool found_cntr_resource = false;
585
586         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
587         if (!ulp_fc_info)
588                 return -ENODEV;
589
590         if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt))
591                 return -EIO;
592
593         do {
594                 rc = ulp_flow_db_resource_get(ctxt,
595                                               BNXT_ULP_FDB_TYPE_REGULAR,
596                                               flow_id,
597                                               &nxt_resource_index,
598                                               &params);
599                 if (params.resource_func ==
600                      BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE &&
601                      (params.resource_sub_type ==
602                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT ||
603                       params.resource_sub_type ==
604                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_EXT_COUNT ||
605                       params.resource_sub_type ==
606                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC)) {
607                         found_cntr_resource = true;
608                         break;
609                 }
610         } while (!rc && nxt_resource_index);
611
612         bnxt_ulp_cntxt_release_fdb_lock(ctxt);
613
614         if (rc || !found_cntr_resource)
615                 return rc;
616
617         dir = params.direction;
618         hw_cntr_id = params.resource_hndl;
619         if (params.resource_sub_type ==
620                         BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) {
621                 /* TODO:
622                  * Think about optimizing with try_lock later
623                  */
624                 pthread_mutex_lock(&ulp_fc_info->fc_lock);
625                 sw_cntr_idx = hw_cntr_id -
626                         ulp_fc_info->shadow_hw_tbl[dir].start_idx;
627                 sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
628                 if (sw_acc_tbl_entry->pkt_count) {
629                         count->hits_set = 1;
630                         count->bytes_set = 1;
631                         count->hits = sw_acc_tbl_entry->pkt_count;
632                         count->bytes = sw_acc_tbl_entry->byte_count;
633                 }
634                 if (count->reset) {
635                         sw_acc_tbl_entry->pkt_count = 0;
636                         sw_acc_tbl_entry->byte_count = 0;
637                 }
638                 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
639         } else if (params.resource_sub_type ==
640                         BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC) {
641                 /* Get stats from the parent child table */
642                 ulp_flow_db_parent_flow_count_get(ctxt, flow_id,
643                                                   &count->hits, &count->bytes,
644                                                   count->reset);
645                 count->hits_set = 1;
646                 count->bytes_set = 1;
647         } else {
648                 /* TBD: Handle External counters */
649                 rc = -EINVAL;
650         }
651
652         return rc;
653 }
654
655 /*
656  * Set the parent flow if it is SW accumulation counter entry.
657  *
658  * ctxt [in] The ulp context for the flow counter manager
659  *
660  * dir [in] The direction of the flow
661  *
662  * hw_cntr_id [in] The HW flow counter ID
663  *
664  * fid [in] parent flow id
665  *
666  */
667 int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
668                                         enum tf_dir dir,
669                                         uint32_t hw_cntr_id,
670                                         uint32_t fid)
671 {
672         struct bnxt_ulp_fc_info *ulp_fc_info;
673         uint32_t sw_cntr_idx;
674         int32_t rc = 0;
675
676         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
677         if (!ulp_fc_info)
678                 return -EIO;
679
680         pthread_mutex_lock(&ulp_fc_info->fc_lock);
681         sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
682         if (ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid) {
683                 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].parent_flow_id = fid;
684         } else {
685                 BNXT_TF_DBG(ERR, "Failed to set parent flow id %x:%x\n",
686                             hw_cntr_id, fid);
687                 rc = -ENOENT;
688         }
689         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
690
691         return rc;
692 }