net/bnxt: add ULP priority opcode processing
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_fc_mgr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
9 #include <rte_log.h>
10 #include <rte_alarm.h>
11 #include "bnxt.h"
12 #include "bnxt_ulp.h"
13 #include "bnxt_tf_common.h"
14 #include "ulp_fc_mgr.h"
15 #include "ulp_flow_db.h"
16 #include "ulp_template_db_enum.h"
17 #include "ulp_template_struct.h"
18 #include "tf_tbl.h"
19
20 static int
21 ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info *parms, int size)
22 {
23         /* Allocate memory*/
24         if (!parms)
25                 return -EINVAL;
26
27         parms->mem_va = rte_zmalloc("ulp_fc_info",
28                                     RTE_CACHE_LINE_ROUNDUP(size),
29                                     4096);
30         if (!parms->mem_va) {
31                 BNXT_TF_DBG(ERR, "Allocate failed mem_va\n");
32                 return -ENOMEM;
33         }
34
35         rte_mem_lock_page(parms->mem_va);
36
37         parms->mem_pa = (void *)(uintptr_t)rte_mem_virt2phy(parms->mem_va);
38         if (parms->mem_pa == (void *)(uintptr_t)RTE_BAD_IOVA) {
39                 BNXT_TF_DBG(ERR, "Allocate failed mem_pa\n");
40                 return -ENOMEM;
41         }
42
43         return 0;
44 }
45
46 static void
47 ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info *parms)
48 {
49         rte_free(parms->mem_va);
50 }
51
52 /*
53  * Allocate and Initialize all Flow Counter Manager resources for this ulp
54  * context.
55  *
56  * ctxt [in] The ulp context for the Flow Counter manager.
57  *
58  */
59 int32_t
60 ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
61 {
62         struct bnxt_ulp_device_params *dparms;
63         uint32_t dev_id, sw_acc_cntr_tbl_sz, hw_fc_mem_info_sz;
64         struct bnxt_ulp_fc_info *ulp_fc_info;
65         int i, rc;
66
67         if (!ctxt) {
68                 BNXT_TF_DBG(DEBUG, "Invalid ULP CTXT\n");
69                 return -EINVAL;
70         }
71
72         if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
73                 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
74                 return -EINVAL;
75         }
76
77         dparms = bnxt_ulp_device_params_get(dev_id);
78         if (!dparms) {
79                 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
80                 return -EINVAL;
81         }
82
83         ulp_fc_info = rte_zmalloc("ulp_fc_info", sizeof(*ulp_fc_info), 0);
84         if (!ulp_fc_info)
85                 goto error;
86
87         rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
88         if (rc) {
89                 PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n");
90                 goto error;
91         }
92
93         /* Add the FC info tbl to the ulp context. */
94         bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info);
95
96         sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) *
97                                 dparms->flow_count_db_entries;
98
99         for (i = 0; i < TF_DIR_MAX; i++) {
100                 ulp_fc_info->sw_acc_tbl[i] = rte_zmalloc("ulp_sw_acc_cntr_tbl",
101                                                          sw_acc_cntr_tbl_sz, 0);
102                 if (!ulp_fc_info->sw_acc_tbl[i])
103                         goto error;
104         }
105
106         hw_fc_mem_info_sz = sizeof(uint64_t) * dparms->flow_count_db_entries;
107
108         for (i = 0; i < TF_DIR_MAX; i++) {
109                 rc = ulp_fc_mgr_shadow_mem_alloc(&ulp_fc_info->shadow_hw_tbl[i],
110                                                  hw_fc_mem_info_sz);
111                 if (rc)
112                         goto error;
113         }
114
115         return 0;
116
117 error:
118         ulp_fc_mgr_deinit(ctxt);
119         BNXT_TF_DBG(DEBUG,
120                     "Failed to allocate memory for fc mgr\n");
121
122         return -ENOMEM;
123 }
124
125 /*
126  * Release all resources in the Flow Counter Manager for this ulp context
127  *
128  * ctxt [in] The ulp context for the Flow Counter manager
129  *
130  */
131 int32_t
132 ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
133 {
134         struct bnxt_ulp_fc_info *ulp_fc_info;
135         int i;
136
137         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
138
139         if (!ulp_fc_info)
140                 return -EINVAL;
141
142         ulp_fc_mgr_thread_cancel(ctxt);
143
144         pthread_mutex_destroy(&ulp_fc_info->fc_lock);
145
146         for (i = 0; i < TF_DIR_MAX; i++)
147                 rte_free(ulp_fc_info->sw_acc_tbl[i]);
148
149         for (i = 0; i < TF_DIR_MAX; i++)
150                 ulp_fc_mgr_shadow_mem_free(&ulp_fc_info->shadow_hw_tbl[i]);
151
152         rte_free(ulp_fc_info);
153
154         /* Safe to ignore on deinit */
155         (void)bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
156
157         return 0;
158 }
159
160 /*
161  * Check if the alarm thread that walks through the flows is started
162  *
163  * ctxt [in] The ulp context for the flow counter manager
164  *
165  */
166 bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
167 {
168         struct bnxt_ulp_fc_info *ulp_fc_info;
169
170         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
171
172         return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD);
173 }
174
175 /*
176  * Setup the Flow counter timer thread that will fetch/accumulate raw counter
177  * data from the chip's internal flow counters
178  *
179  * ctxt [in] The ulp context for the flow counter manager
180  *
181  */
182 int32_t
183 ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
184 {
185         struct bnxt_ulp_fc_info *ulp_fc_info;
186
187         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
188
189         if (!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
190                 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
191                                   ulp_fc_mgr_alarm_cb,
192                                   (void *)ctxt);
193                 ulp_fc_info->flags |= ULP_FLAG_FC_THREAD;
194         }
195
196         return 0;
197 }
198
199 /*
200  * Cancel the alarm handler
201  *
202  * ctxt [in] The ulp context for the flow counter manager
203  *
204  */
205 void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
206 {
207         struct bnxt_ulp_fc_info *ulp_fc_info;
208
209         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
210         if (!ulp_fc_info)
211                 return;
212
213         ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD;
214         rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, (void *)ctxt);
215 }
216
217 /*
218  * DMA-in the raw counter data from the HW and accumulate in the
219  * local accumulator table using the TF-Core API
220  *
221  * tfp [in] The TF-Core context
222  *
223  * fc_info [in] The ULP Flow counter info ptr
224  *
225  * dir [in] The direction of the flow
226  *
227  * num_counters [in] The number of counters
228  *
229  */
230 __rte_unused static int32_t
231 ulp_bulk_get_flow_stats(struct tf *tfp,
232                         struct bnxt_ulp_fc_info *fc_info,
233                         enum tf_dir dir,
234                         struct bnxt_ulp_device_params *dparms)
235 /* MARK AS UNUSED FOR NOW TO AVOID COMPILATION ERRORS TILL API is RESOLVED */
236 {
237         int rc = 0;
238         struct tf_tbl_get_bulk_parms parms = { 0 };
239         enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD: Template? */
240         struct sw_acc_counter *sw_acc_tbl_entry = NULL;
241         uint64_t *stats = NULL;
242         uint16_t i = 0;
243
244         parms.dir = dir;
245         parms.type = stype;
246         parms.starting_idx = fc_info->shadow_hw_tbl[dir].start_idx;
247         parms.num_entries = dparms->flow_count_db_entries / 2; /* direction */
248         /*
249          * TODO:
250          * Size of an entry needs to obtained from template
251          */
252         parms.entry_sz_in_bytes = sizeof(uint64_t);
253         stats = (uint64_t *)fc_info->shadow_hw_tbl[dir].mem_va;
254         parms.physical_mem_addr = (uint64_t)
255                 ((uintptr_t)(fc_info->shadow_hw_tbl[dir].mem_pa));
256
257         if (!stats) {
258                 PMD_DRV_LOG(ERR,
259                             "BULK: Memory not initialized id:0x%x dir:%d\n",
260                             parms.starting_idx, dir);
261                 return -EINVAL;
262         }
263
264         rc = tf_tbl_bulk_get(tfp, &parms);
265         if (rc) {
266                 PMD_DRV_LOG(ERR,
267                             "BULK: Get failed for id:0x%x rc:%d\n",
268                             parms.starting_idx, rc);
269                 return rc;
270         }
271
272         for (i = 0; i < parms.num_entries; i++) {
273                 /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
274                 sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][i];
275                 if (!sw_acc_tbl_entry->valid)
276                         continue;
277                 sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats[i],
278                                                               dparms);
279                 sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats[i],
280                                                                 dparms);
281         }
282
283         return rc;
284 }
285
286 static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
287                                     struct tf *tfp,
288                                     struct bnxt_ulp_fc_info *fc_info,
289                                     enum tf_dir dir,
290                                     uint32_t hw_cntr_id,
291                                     struct bnxt_ulp_device_params *dparms)
292 {
293         int rc = 0;
294         struct tf_get_tbl_entry_parms parms = { 0 };
295         enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD:Template? */
296         struct sw_acc_counter *sw_acc_tbl_entry = NULL, *t_sw;
297         uint64_t stats = 0;
298         uint32_t sw_cntr_indx = 0;
299
300         parms.dir = dir;
301         parms.type = stype;
302         parms.idx = hw_cntr_id;
303         /*
304          * TODO:
305          * Size of an entry needs to obtained from template
306          */
307         parms.data_sz_in_bytes = sizeof(uint64_t);
308         parms.data = (uint8_t *)&stats;
309         rc = tf_get_tbl_entry(tfp, &parms);
310         if (rc) {
311                 PMD_DRV_LOG(ERR,
312                             "Get failed for id:0x%x rc:%d\n",
313                             parms.idx, rc);
314                 return rc;
315         }
316
317         /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
318         sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx;
319         sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx];
320         sw_acc_tbl_entry->pkt_count = FLOW_CNTR_PKTS(stats, dparms);
321         sw_acc_tbl_entry->byte_count = FLOW_CNTR_BYTES(stats, dparms);
322
323         /* Update the parent counters if it is child flow */
324         if (sw_acc_tbl_entry->parent_flow_id) {
325                 /* Update the parent counters */
326                 t_sw = sw_acc_tbl_entry;
327                 if (ulp_flow_db_parent_flow_count_update(ctxt,
328                                                          t_sw->parent_flow_id,
329                                                          t_sw->pkt_count,
330                                                          t_sw->byte_count)) {
331                         PMD_DRV_LOG(ERR, "Error updating parent counters\n");
332                 }
333         }
334
335         return rc;
336 }
337
338 /*
339  * Alarm handler that will issue the TF-Core API to fetch
340  * data from the chip's internal flow counters
341  *
342  * ctxt [in] The ulp context for the flow counter manager
343  *
344  */
345
346 void
347 ulp_fc_mgr_alarm_cb(void *arg)
348 {
349         int rc = 0;
350         unsigned int j;
351         enum tf_dir i;
352         struct bnxt_ulp_context *ctxt = arg;
353         struct bnxt_ulp_fc_info *ulp_fc_info;
354         struct bnxt_ulp_device_params *dparms;
355         struct tf *tfp;
356         uint32_t dev_id, hw_cntr_id = 0, num_entries = 0;
357
358         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
359         if (!ulp_fc_info)
360                 return;
361
362         if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
363                 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
364                 return;
365         }
366
367         dparms = bnxt_ulp_device_params_get(dev_id);
368         if (!dparms) {
369                 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
370                 return;
371         }
372
373         tfp = bnxt_ulp_cntxt_tfp_get(ctxt);
374         if (!tfp) {
375                 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
376                 return;
377         }
378
379         /*
380          * Take the fc_lock to ensure no flow is destroyed
381          * during the bulk get
382          */
383         if (pthread_mutex_trylock(&ulp_fc_info->fc_lock))
384                 goto out;
385
386         if (!ulp_fc_info->num_entries) {
387                 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
388                 ulp_fc_mgr_thread_cancel(ctxt);
389                 return;
390         }
391         /*
392          * Commented for now till GET_BULK is resolved, just get the first flow
393          * stat for now
394          for (i = 0; i < TF_DIR_MAX; i++) {
395                 rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i,
396                                              dparms->flow_count_db_entries);
397                 if (rc)
398                         break;
399         }
400         */
401
402         /* reset the parent accumulation counters before accumulation if any */
403         ulp_flow_db_parent_flow_count_reset(ctxt);
404
405         num_entries = dparms->flow_count_db_entries / 2;
406         for (i = 0; i < TF_DIR_MAX; i++) {
407                 for (j = 0; j < num_entries; j++) {
408                         if (!ulp_fc_info->sw_acc_tbl[i][j].valid)
409                                 continue;
410                         hw_cntr_id = ulp_fc_info->sw_acc_tbl[i][j].hw_cntr_id;
411                         rc = ulp_get_single_flow_stat(ctxt, tfp, ulp_fc_info, i,
412                                                       hw_cntr_id, dparms);
413                         if (rc)
414                                 break;
415                 }
416         }
417
418         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
419
420         /*
421          * If cmd fails once, no need of
422          * invoking again every second
423          */
424
425         if (rc) {
426                 ulp_fc_mgr_thread_cancel(ctxt);
427                 return;
428         }
429 out:
430         rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
431                           ulp_fc_mgr_alarm_cb,
432                           (void *)ctxt);
433 }
434
435 /*
436  * Set the starting index that indicates the first HW flow
437  * counter ID
438  *
439  * ctxt [in] The ulp context for the flow counter manager
440  *
441  * dir [in] The direction of the flow
442  *
443  * start_idx [in] The HW flow counter ID
444  *
445  */
446 bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, enum tf_dir dir)
447 {
448         struct bnxt_ulp_fc_info *ulp_fc_info;
449
450         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
451
452         return ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set;
453 }
454
455 /*
456  * Set the starting index that indicates the first HW flow
457  * counter ID
458  *
459  * ctxt [in] The ulp context for the flow counter manager
460  *
461  * dir [in] The direction of the flow
462  *
463  * start_idx [in] The HW flow counter ID
464  *
465  */
466 int32_t ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
467                                  uint32_t start_idx)
468 {
469         struct bnxt_ulp_fc_info *ulp_fc_info;
470
471         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
472
473         if (!ulp_fc_info)
474                 return -EIO;
475
476         if (!ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set) {
477                 ulp_fc_info->shadow_hw_tbl[dir].start_idx = start_idx;
478                 ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set = true;
479         }
480
481         return 0;
482 }
483
484 /*
485  * Set the corresponding SW accumulator table entry based on
486  * the difference between this counter ID and the starting
487  * counter ID. Also, keep track of num of active counter enabled
488  * flows.
489  *
490  * ctxt [in] The ulp context for the flow counter manager
491  *
492  * dir [in] The direction of the flow
493  *
494  * hw_cntr_id [in] The HW flow counter ID
495  *
496  */
497 int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
498                             uint32_t hw_cntr_id)
499 {
500         struct bnxt_ulp_fc_info *ulp_fc_info;
501         uint32_t sw_cntr_idx;
502
503         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
504         if (!ulp_fc_info)
505                 return -EIO;
506
507         pthread_mutex_lock(&ulp_fc_info->fc_lock);
508         sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
509         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
510         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
511         ulp_fc_info->num_entries++;
512         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
513
514         return 0;
515 }
516
517 /*
518  * Reset the corresponding SW accumulator table entry based on
519  * the difference between this counter ID and the starting
520  * counter ID.
521  *
522  * ctxt [in] The ulp context for the flow counter manager
523  *
524  * dir [in] The direction of the flow
525  *
526  * hw_cntr_id [in] The HW flow counter ID
527  *
528  */
529 int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
530                               uint32_t hw_cntr_id)
531 {
532         struct bnxt_ulp_fc_info *ulp_fc_info;
533         uint32_t sw_cntr_idx;
534
535         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
536         if (!ulp_fc_info)
537                 return -EIO;
538
539         pthread_mutex_lock(&ulp_fc_info->fc_lock);
540         sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
541         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
542         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
543         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
544         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
545         ulp_fc_info->num_entries--;
546         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
547
548         return 0;
549 }
550
551 /*
552  * Fill the rte_flow_query_count 'data' argument passed
553  * in the rte_flow_query() with the values obtained and
554  * accumulated locally.
555  *
556  * ctxt [in] The ulp context for the flow counter manager
557  *
558  * flow_id [in] The HW flow ID
559  *
560  * count [out] The rte_flow_query_count 'data' that is set
561  *
562  */
563 int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
564                                uint32_t flow_id,
565                                struct rte_flow_query_count *count)
566 {
567         int rc = 0;
568         uint32_t nxt_resource_index = 0;
569         struct bnxt_ulp_fc_info *ulp_fc_info;
570         struct ulp_flow_db_res_params params;
571         enum tf_dir dir;
572         uint32_t hw_cntr_id = 0, sw_cntr_idx = 0;
573         struct sw_acc_counter *sw_acc_tbl_entry;
574         bool found_cntr_resource = false;
575
576         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
577         if (!ulp_fc_info)
578                 return -ENODEV;
579
580         if (bnxt_ulp_cntxt_acquire_fdb_lock(ctxt))
581                 return -EIO;
582
583         do {
584                 rc = ulp_flow_db_resource_get(ctxt,
585                                               BNXT_ULP_FDB_TYPE_REGULAR,
586                                               flow_id,
587                                               &nxt_resource_index,
588                                               &params);
589                 if (params.resource_func ==
590                      BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE &&
591                      (params.resource_sub_type ==
592                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT ||
593                       params.resource_sub_type ==
594                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_EXT_COUNT ||
595                       params.resource_sub_type ==
596                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC)) {
597                         found_cntr_resource = true;
598                         break;
599                 }
600         } while (!rc && nxt_resource_index);
601
602         bnxt_ulp_cntxt_release_fdb_lock(ctxt);
603
604         if (rc || !found_cntr_resource)
605                 return rc;
606
607         dir = params.direction;
608         hw_cntr_id = params.resource_hndl;
609         if (params.resource_sub_type ==
610                         BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) {
611                 /* TODO:
612                  * Think about optimizing with try_lock later
613                  */
614                 pthread_mutex_lock(&ulp_fc_info->fc_lock);
615                 sw_cntr_idx = hw_cntr_id -
616                         ulp_fc_info->shadow_hw_tbl[dir].start_idx;
617                 sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
618                 if (sw_acc_tbl_entry->pkt_count) {
619                         count->hits_set = 1;
620                         count->bytes_set = 1;
621                         count->hits = sw_acc_tbl_entry->pkt_count;
622                         count->bytes = sw_acc_tbl_entry->byte_count;
623                 }
624                 if (count->reset) {
625                         sw_acc_tbl_entry->pkt_count = 0;
626                         sw_acc_tbl_entry->byte_count = 0;
627                 }
628                 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
629         } else if (params.resource_sub_type ==
630                         BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC) {
631                 /* Get the stats from the parent child table */
632                 ulp_flow_db_parent_flow_count_get(ctxt,
633                                                   flow_id,
634                                                   &count->hits,
635                                                   &count->bytes);
636                 count->hits_set = 1;
637                 count->bytes_set = 1;
638         } else {
639                 /* TBD: Handle External counters */
640                 rc = -EINVAL;
641         }
642
643         return rc;
644 }
645
646 /*
647  * Set the parent flow if it is SW accumulation counter entry.
648  *
649  * ctxt [in] The ulp context for the flow counter manager
650  *
651  * dir [in] The direction of the flow
652  *
653  * hw_cntr_id [in] The HW flow counter ID
654  *
655  * fid [in] parent flow id
656  *
657  */
658 int32_t ulp_fc_mgr_cntr_parent_flow_set(struct bnxt_ulp_context *ctxt,
659                                         enum tf_dir dir,
660                                         uint32_t hw_cntr_id,
661                                         uint32_t fid)
662 {
663         struct bnxt_ulp_fc_info *ulp_fc_info;
664         uint32_t sw_cntr_idx;
665         int32_t rc = 0;
666
667         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
668         if (!ulp_fc_info)
669                 return -EIO;
670
671         pthread_mutex_lock(&ulp_fc_info->fc_lock);
672         sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
673         if (ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid) {
674                 ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].parent_flow_id = fid;
675         } else {
676                 BNXT_TF_DBG(ERR, "Failed to set parent flow id %x:%x\n",
677                             hw_cntr_id, fid);
678                 rc = -ENOENT;
679         }
680         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
681
682         return rc;
683 }