net/bnxt: avoid hard coded values when reading counters
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_fc_mgr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include <rte_common.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
9 #include <rte_log.h>
10 #include <rte_alarm.h>
11 #include "bnxt.h"
12 #include "bnxt_ulp.h"
13 #include "bnxt_tf_common.h"
14 #include "ulp_fc_mgr.h"
15 #include "ulp_flow_db.h"
16 #include "ulp_template_db_enum.h"
17 #include "ulp_template_struct.h"
18 #include "tf_tbl.h"
19
20 static int
21 ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info *parms, int size)
22 {
23         /* Allocate memory*/
24         if (parms == NULL)
25                 return -EINVAL;
26
27         parms->mem_va = rte_zmalloc("ulp_fc_info",
28                                     RTE_CACHE_LINE_ROUNDUP(size),
29                                     4096);
30         if (parms->mem_va == NULL) {
31                 BNXT_TF_DBG(ERR, "Allocate failed mem_va\n");
32                 return -ENOMEM;
33         }
34
35         rte_mem_lock_page(parms->mem_va);
36
37         parms->mem_pa = (void *)(uintptr_t)rte_mem_virt2phy(parms->mem_va);
38         if (parms->mem_pa == (void *)(uintptr_t)RTE_BAD_IOVA) {
39                 BNXT_TF_DBG(ERR, "Allocate failed mem_pa\n");
40                 return -ENOMEM;
41         }
42
43         return 0;
44 }
45
46 static void
47 ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info *parms)
48 {
49         rte_free(parms->mem_va);
50 }
51
52 /*
53  * Allocate and Initialize all Flow Counter Manager resources for this ulp
54  * context.
55  *
56  * ctxt [in] The ulp context for the Flow Counter manager.
57  *
58  */
59 int32_t
60 ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
61 {
62         struct bnxt_ulp_device_params *dparms;
63         uint32_t dev_id, sw_acc_cntr_tbl_sz, hw_fc_mem_info_sz;
64         struct bnxt_ulp_fc_info *ulp_fc_info;
65         int i, rc;
66
67         if (!ctxt) {
68                 BNXT_TF_DBG(DEBUG, "Invalid ULP CTXT\n");
69                 return -EINVAL;
70         }
71
72         if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
73                 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
74                 return -EINVAL;
75         }
76
77         dparms = bnxt_ulp_device_params_get(dev_id);
78         if (!dparms) {
79                 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
80                 return -EINVAL;
81         }
82
83         ulp_fc_info = rte_zmalloc("ulp_fc_info", sizeof(*ulp_fc_info), 0);
84         if (!ulp_fc_info)
85                 goto error;
86
87         rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
88         if (rc) {
89                 PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n");
90                 goto error;
91         }
92
93         /* Add the FC info tbl to the ulp context. */
94         bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info);
95
96         sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) *
97                                 dparms->flow_count_db_entries;
98
99         for (i = 0; i < TF_DIR_MAX; i++) {
100                 ulp_fc_info->sw_acc_tbl[i] = rte_zmalloc("ulp_sw_acc_cntr_tbl",
101                                                          sw_acc_cntr_tbl_sz, 0);
102                 if (!ulp_fc_info->sw_acc_tbl[i])
103                         goto error;
104         }
105
106         hw_fc_mem_info_sz = sizeof(uint64_t) * dparms->flow_count_db_entries;
107
108         for (i = 0; i < TF_DIR_MAX; i++) {
109                 rc = ulp_fc_mgr_shadow_mem_alloc(&ulp_fc_info->shadow_hw_tbl[i],
110                                                  hw_fc_mem_info_sz);
111                 if (rc)
112                         goto error;
113         }
114
115         return 0;
116
117 error:
118         ulp_fc_mgr_deinit(ctxt);
119         BNXT_TF_DBG(DEBUG,
120                     "Failed to allocate memory for fc mgr\n");
121
122         return -ENOMEM;
123 }
124
125 /*
126  * Release all resources in the Flow Counter Manager for this ulp context
127  *
128  * ctxt [in] The ulp context for the Flow Counter manager
129  *
130  */
131 int32_t
132 ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
133 {
134         struct bnxt_ulp_fc_info *ulp_fc_info;
135         int i;
136
137         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
138
139         if (!ulp_fc_info)
140                 return -EINVAL;
141
142         ulp_fc_mgr_thread_cancel(ctxt);
143
144         pthread_mutex_destroy(&ulp_fc_info->fc_lock);
145
146         for (i = 0; i < TF_DIR_MAX; i++)
147                 rte_free(ulp_fc_info->sw_acc_tbl[i]);
148
149         for (i = 0; i < TF_DIR_MAX; i++)
150                 ulp_fc_mgr_shadow_mem_free(&ulp_fc_info->shadow_hw_tbl[i]);
151
152
153         rte_free(ulp_fc_info);
154
155         /* Safe to ignore on deinit */
156         (void)bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
157
158         return 0;
159 }
160
161 /*
162  * Check if the alarm thread that walks through the flows is started
163  *
164  * ctxt [in] The ulp context for the flow counter manager
165  *
166  */
167 bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
168 {
169         struct bnxt_ulp_fc_info *ulp_fc_info;
170
171         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
172
173         return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD);
174 }
175
176 /*
177  * Setup the Flow counter timer thread that will fetch/accumulate raw counter
178  * data from the chip's internal flow counters
179  *
180  * ctxt [in] The ulp context for the flow counter manager
181  *
182  */
183 int32_t
184 ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
185 {
186         struct bnxt_ulp_fc_info *ulp_fc_info;
187
188         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
189
190         if (!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
191                 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
192                                   ulp_fc_mgr_alarm_cb,
193                                   (void *)ctxt);
194                 ulp_fc_info->flags |= ULP_FLAG_FC_THREAD;
195         }
196
197         return 0;
198 }
199
200 /*
201  * Cancel the alarm handler
202  *
203  * ctxt [in] The ulp context for the flow counter manager
204  *
205  */
206 void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
207 {
208         struct bnxt_ulp_fc_info *ulp_fc_info;
209
210         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
211         if (!ulp_fc_info)
212                 return;
213
214         ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD;
215         rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, (void *)ctxt);
216 }
217
218 /*
219  * DMA-in the raw counter data from the HW and accumulate in the
220  * local accumulator table using the TF-Core API
221  *
222  * tfp [in] The TF-Core context
223  *
224  * fc_info [in] The ULP Flow counter info ptr
225  *
226  * dir [in] The direction of the flow
227  *
228  * num_counters [in] The number of counters
229  *
230  */
231 __rte_unused static int32_t
232 ulp_bulk_get_flow_stats(struct tf *tfp,
233                         struct bnxt_ulp_fc_info *fc_info,
234                         enum tf_dir dir,
235                         struct bnxt_ulp_device_params *dparms)
236 /* MARK AS UNUSED FOR NOW TO AVOID COMPILATION ERRORS TILL API is RESOLVED */
237 {
238         int rc = 0;
239         struct tf_tbl_get_bulk_parms parms = { 0 };
240         enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD: Template? */
241         struct sw_acc_counter *sw_acc_tbl_entry = NULL;
242         uint64_t *stats = NULL;
243         uint16_t i = 0;
244
245         parms.dir = dir;
246         parms.type = stype;
247         parms.starting_idx = fc_info->shadow_hw_tbl[dir].start_idx;
248         parms.num_entries = dparms->flow_count_db_entries / 2; /* direction */
249         /*
250          * TODO:
251          * Size of an entry needs to obtained from template
252          */
253         parms.entry_sz_in_bytes = sizeof(uint64_t);
254         stats = (uint64_t *)fc_info->shadow_hw_tbl[dir].mem_va;
255         parms.physical_mem_addr = (uintptr_t)fc_info->shadow_hw_tbl[dir].mem_pa;
256
257         if (stats == NULL) {
258                 PMD_DRV_LOG(ERR,
259                             "BULK: Memory not initialized id:0x%x dir:%d\n",
260                             parms.starting_idx, dir);
261                 return -EINVAL;
262         }
263
264         rc = tf_tbl_bulk_get(tfp, &parms);
265         if (rc) {
266                 PMD_DRV_LOG(ERR,
267                             "BULK: Get failed for id:0x%x rc:%d\n",
268                             parms.starting_idx, rc);
269                 return rc;
270         }
271
272         for (i = 0; i < parms.num_entries; i++) {
273                 /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
274                 sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][i];
275                 if (!sw_acc_tbl_entry->valid)
276                         continue;
277                 sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats[i], dparms);
278                 sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats[i],
279                                                                 dparms);
280         }
281
282         return rc;
283 }
284
285 static int ulp_get_single_flow_stat(struct tf *tfp,
286                                     struct bnxt_ulp_fc_info *fc_info,
287                                     enum tf_dir dir,
288                                     uint32_t hw_cntr_id,
289                                     struct bnxt_ulp_device_params *dparms)
290 {
291         int rc = 0;
292         struct tf_get_tbl_entry_parms parms = { 0 };
293         enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD:Template? */
294         struct sw_acc_counter *sw_acc_tbl_entry = NULL;
295         uint64_t stats = 0;
296         uint32_t sw_cntr_indx = 0;
297
298         parms.dir = dir;
299         parms.type = stype;
300         parms.idx = hw_cntr_id;
301         /*
302          * TODO:
303          * Size of an entry needs to obtained from template
304          */
305         parms.data_sz_in_bytes = sizeof(uint64_t);
306         parms.data = (uint8_t *)&stats;
307         rc = tf_get_tbl_entry(tfp, &parms);
308         if (rc) {
309                 PMD_DRV_LOG(ERR,
310                             "Get failed for id:0x%x rc:%d\n",
311                             parms.idx, rc);
312                 return rc;
313         }
314
315         /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
316         sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx;
317         sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx];
318         sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats, dparms);
319         sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats, dparms);
320
321         return rc;
322 }
323
324 /*
325  * Alarm handler that will issue the TF-Core API to fetch
326  * data from the chip's internal flow counters
327  *
328  * ctxt [in] The ulp context for the flow counter manager
329  *
330  */
331
332 void
333 ulp_fc_mgr_alarm_cb(void *arg)
334 {
335         int rc = 0;
336         unsigned int j;
337         enum tf_dir i;
338         struct bnxt_ulp_context *ctxt = arg;
339         struct bnxt_ulp_fc_info *ulp_fc_info;
340         struct bnxt_ulp_device_params *dparms;
341         struct tf *tfp;
342         uint32_t dev_id, hw_cntr_id = 0;
343
344         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
345         if (!ulp_fc_info)
346                 return;
347
348         if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
349                 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
350                 return;
351         }
352
353         dparms = bnxt_ulp_device_params_get(dev_id);
354         if (!dparms) {
355                 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
356                 return;
357         }
358
359         tfp = bnxt_ulp_cntxt_tfp_get(ctxt);
360         if (!tfp) {
361                 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
362                 return;
363         }
364
365         /*
366          * Take the fc_lock to ensure no flow is destroyed
367          * during the bulk get
368          */
369         if (pthread_mutex_trylock(&ulp_fc_info->fc_lock))
370                 goto out;
371
372         if (!ulp_fc_info->num_entries) {
373                 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
374                 ulp_fc_mgr_thread_cancel(ctxt);
375                 return;
376         }
377         /*
378          * Commented for now till GET_BULK is resolved, just get the first flow
379          * stat for now
380          for (i = 0; i < TF_DIR_MAX; i++) {
381                 rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i,
382                                              dparms->flow_count_db_entries);
383                 if (rc)
384                         break;
385         }
386         */
387         for (i = 0; i < TF_DIR_MAX; i++) {
388                 for (j = 0; j < ulp_fc_info->num_entries; j++) {
389                         if (!ulp_fc_info->sw_acc_tbl[i][j].valid)
390                                 continue;
391                         hw_cntr_id = ulp_fc_info->sw_acc_tbl[i][j].hw_cntr_id;
392                         rc = ulp_get_single_flow_stat(tfp, ulp_fc_info, i,
393                                                       hw_cntr_id, dparms);
394                         if (rc)
395                                 break;
396                 }
397         }
398
399         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
400
401         /*
402          * If cmd fails once, no need of
403          * invoking again every second
404          */
405
406         if (rc) {
407                 ulp_fc_mgr_thread_cancel(ctxt);
408                 return;
409         }
410 out:
411         rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
412                           ulp_fc_mgr_alarm_cb,
413                           (void *)ctxt);
414 }
415
416 /*
417  * Set the starting index that indicates the first HW flow
418  * counter ID
419  *
420  * ctxt [in] The ulp context for the flow counter manager
421  *
422  * dir [in] The direction of the flow
423  *
424  * start_idx [in] The HW flow counter ID
425  *
426  */
427 bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, enum tf_dir dir)
428 {
429         struct bnxt_ulp_fc_info *ulp_fc_info;
430
431         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
432
433         /* Assuming start_idx of 0 is invalid */
434         return (ulp_fc_info->shadow_hw_tbl[dir].start_idx != 0);
435 }
436
437 /*
438  * Set the starting index that indicates the first HW flow
439  * counter ID
440  *
441  * ctxt [in] The ulp context for the flow counter manager
442  *
443  * dir [in] The direction of the flow
444  *
445  * start_idx [in] The HW flow counter ID
446  *
447  */
448 int32_t ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
449                                  uint32_t start_idx)
450 {
451         struct bnxt_ulp_fc_info *ulp_fc_info;
452
453         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
454
455         if (!ulp_fc_info)
456                 return -EIO;
457
458         /* Assuming that 0 is an invalid counter ID ? */
459         if (ulp_fc_info->shadow_hw_tbl[dir].start_idx == 0)
460                 ulp_fc_info->shadow_hw_tbl[dir].start_idx = start_idx;
461
462         return 0;
463 }
464
465 /*
466  * Set the corresponding SW accumulator table entry based on
467  * the difference between this counter ID and the starting
468  * counter ID. Also, keep track of num of active counter enabled
469  * flows.
470  *
471  * ctxt [in] The ulp context for the flow counter manager
472  *
473  * dir [in] The direction of the flow
474  *
475  * hw_cntr_id [in] The HW flow counter ID
476  *
477  */
478 int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
479                             uint32_t hw_cntr_id)
480 {
481         struct bnxt_ulp_fc_info *ulp_fc_info;
482         uint32_t sw_cntr_idx;
483
484         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
485         if (!ulp_fc_info)
486                 return -EIO;
487
488         pthread_mutex_lock(&ulp_fc_info->fc_lock);
489         sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
490         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
491         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
492         ulp_fc_info->num_entries++;
493         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
494
495         return 0;
496 }
497
498 /*
499  * Reset the corresponding SW accumulator table entry based on
500  * the difference between this counter ID and the starting
501  * counter ID.
502  *
503  * ctxt [in] The ulp context for the flow counter manager
504  *
505  * dir [in] The direction of the flow
506  *
507  * hw_cntr_id [in] The HW flow counter ID
508  *
509  */
510 int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
511                               uint32_t hw_cntr_id)
512 {
513         struct bnxt_ulp_fc_info *ulp_fc_info;
514         uint32_t sw_cntr_idx;
515
516         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
517         if (!ulp_fc_info)
518                 return -EIO;
519
520         pthread_mutex_lock(&ulp_fc_info->fc_lock);
521         sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
522         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
523         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
524         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
525         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
526         ulp_fc_info->num_entries--;
527         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
528
529         return 0;
530 }
531
532 /*
533  * Fill the rte_flow_query_count 'data' argument passed
534  * in the rte_flow_query() with the values obtained and
535  * accumulated locally.
536  *
537  * ctxt [in] The ulp context for the flow counter manager
538  *
539  * flow_id [in] The HW flow ID
540  *
541  * count [out] The rte_flow_query_count 'data' that is set
542  *
543  */
544 int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
545                                uint32_t flow_id,
546                                struct rte_flow_query_count *count)
547 {
548         int rc = 0;
549         uint32_t nxt_resource_index = 0;
550         struct bnxt_ulp_fc_info *ulp_fc_info;
551         struct ulp_flow_db_res_params params;
552         enum tf_dir dir;
553         uint32_t hw_cntr_id = 0, sw_cntr_idx = 0;
554         struct sw_acc_counter sw_acc_tbl_entry;
555         bool found_cntr_resource = false;
556
557         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
558         if (!ulp_fc_info)
559                 return -ENODEV;
560
561         do {
562                 rc = ulp_flow_db_resource_get(ctxt,
563                                               BNXT_ULP_REGULAR_FLOW_TABLE,
564                                               flow_id,
565                                               &nxt_resource_index,
566                                               &params);
567                 if (params.resource_func ==
568                      BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE &&
569                      (params.resource_sub_type ==
570                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT ||
571                       params.resource_sub_type ==
572                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_EXT_COUNT)) {
573                         found_cntr_resource = true;
574                         break;
575                 }
576
577         } while (!rc);
578
579         if (rc)
580                 return rc;
581
582         if (found_cntr_resource) {
583                 dir = params.direction;
584                 hw_cntr_id = params.resource_hndl;
585                 sw_cntr_idx = hw_cntr_id -
586                                 ulp_fc_info->shadow_hw_tbl[dir].start_idx;
587                 sw_acc_tbl_entry = ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
588                 if (params.resource_sub_type ==
589                         BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT) {
590                         count->hits_set = 1;
591                         count->bytes_set = 1;
592                         count->hits = sw_acc_tbl_entry.pkt_count;
593                         count->bytes = sw_acc_tbl_entry.byte_count;
594                 } else {
595                         /* TBD: Handle External counters */
596                         rc = -EINVAL;
597                 }
598         }
599
600         return rc;
601 }