net/bnxt: support count action in flow query
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_fc_mgr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include <rte_common.h>
7 #include <rte_malloc.h>
8 #include <rte_log.h>
9 #include <rte_alarm.h>
10 #include "bnxt.h"
11 #include "bnxt_ulp.h"
12 #include "bnxt_tf_common.h"
13 #include "ulp_fc_mgr.h"
14 #include "ulp_flow_db.h"
15 #include "ulp_template_db_enum.h"
16 #include "ulp_template_struct.h"
17 #include "tf_tbl.h"
18
19 static int
20 ulp_fc_mgr_shadow_mem_alloc(struct hw_fc_mem_info *parms, int size)
21 {
22         /* Allocate memory*/
23         if (parms == NULL)
24                 return -EINVAL;
25
26         parms->mem_va = rte_zmalloc("ulp_fc_info",
27                                     RTE_CACHE_LINE_ROUNDUP(size),
28                                     4096);
29         if (parms->mem_va == NULL) {
30                 BNXT_TF_DBG(ERR, "Allocate failed mem_va\n");
31                 return -ENOMEM;
32         }
33
34         rte_mem_lock_page(parms->mem_va);
35
36         parms->mem_pa = (void *)(uintptr_t)rte_mem_virt2phy(parms->mem_va);
37         if (parms->mem_pa == (void *)(uintptr_t)RTE_BAD_IOVA) {
38                 BNXT_TF_DBG(ERR, "Allocate failed mem_pa\n");
39                 return -ENOMEM;
40         }
41
42         return 0;
43 }
44
45 static void
46 ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info *parms)
47 {
48         rte_free(parms->mem_va);
49 }
50
51 /*
52  * Allocate and Initialize all Flow Counter Manager resources for this ulp
53  * context.
54  *
55  * ctxt [in] The ulp context for the Flow Counter manager.
56  *
57  */
58 int32_t
59 ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
60 {
61         struct bnxt_ulp_device_params *dparms;
62         uint32_t dev_id, sw_acc_cntr_tbl_sz, hw_fc_mem_info_sz;
63         struct bnxt_ulp_fc_info *ulp_fc_info;
64         int i, rc;
65
66         if (!ctxt) {
67                 BNXT_TF_DBG(DEBUG, "Invalid ULP CTXT\n");
68                 return -EINVAL;
69         }
70
71         if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
72                 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
73                 return -EINVAL;
74         }
75
76         dparms = bnxt_ulp_device_params_get(dev_id);
77         if (!dparms) {
78                 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
79                 return -EINVAL;
80         }
81
82         ulp_fc_info = rte_zmalloc("ulp_fc_info", sizeof(*ulp_fc_info), 0);
83         if (!ulp_fc_info)
84                 goto error;
85
86         rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
87         if (rc) {
88                 PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n");
89                 goto error;
90         }
91
92         /* Add the FC info tbl to the ulp context. */
93         bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info);
94
95         sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) *
96                                 dparms->flow_count_db_entries;
97
98         for (i = 0; i < TF_DIR_MAX; i++) {
99                 ulp_fc_info->sw_acc_tbl[i] = rte_zmalloc("ulp_sw_acc_cntr_tbl",
100                                                          sw_acc_cntr_tbl_sz, 0);
101                 if (!ulp_fc_info->sw_acc_tbl[i])
102                         goto error;
103         }
104
105         hw_fc_mem_info_sz = sizeof(uint64_t) * dparms->flow_count_db_entries;
106
107         for (i = 0; i < TF_DIR_MAX; i++) {
108                 rc = ulp_fc_mgr_shadow_mem_alloc(&ulp_fc_info->shadow_hw_tbl[i],
109                                                  hw_fc_mem_info_sz);
110                 if (rc)
111                         goto error;
112         }
113
114         return 0;
115
116 error:
117         ulp_fc_mgr_deinit(ctxt);
118         BNXT_TF_DBG(DEBUG,
119                     "Failed to allocate memory for fc mgr\n");
120
121         return -ENOMEM;
122 }
123
124 /*
125  * Release all resources in the Flow Counter Manager for this ulp context
126  *
127  * ctxt [in] The ulp context for the Flow Counter manager
128  *
129  */
130 int32_t
131 ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt)
132 {
133         struct bnxt_ulp_fc_info *ulp_fc_info;
134         int i;
135
136         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
137
138         if (!ulp_fc_info)
139                 return -EINVAL;
140
141         ulp_fc_mgr_thread_cancel(ctxt);
142
143         pthread_mutex_destroy(&ulp_fc_info->fc_lock);
144
145         for (i = 0; i < TF_DIR_MAX; i++)
146                 rte_free(ulp_fc_info->sw_acc_tbl[i]);
147
148         for (i = 0; i < TF_DIR_MAX; i++)
149                 ulp_fc_mgr_shadow_mem_free(&ulp_fc_info->shadow_hw_tbl[i]);
150
151
152         rte_free(ulp_fc_info);
153
154         /* Safe to ignore on deinit */
155         (void)bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL);
156
157         return 0;
158 }
159
160 /*
161  * Check if the alarm thread that walks through the flows is started
162  *
163  * ctxt [in] The ulp context for the flow counter manager
164  *
165  */
166 bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt)
167 {
168         struct bnxt_ulp_fc_info *ulp_fc_info;
169
170         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
171
172         return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD);
173 }
174
175 /*
176  * Setup the Flow counter timer thread that will fetch/accumulate raw counter
177  * data from the chip's internal flow counters
178  *
179  * ctxt [in] The ulp context for the flow counter manager
180  *
181  */
182 int32_t
183 ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
184 {
185         struct bnxt_ulp_fc_info *ulp_fc_info;
186
187         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
188
189         if (!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
190                 rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
191                                   ulp_fc_mgr_alarm_cb,
192                                   (void *)ctxt);
193                 ulp_fc_info->flags |= ULP_FLAG_FC_THREAD;
194         }
195
196         return 0;
197 }
198
199 /*
200  * Cancel the alarm handler
201  *
202  * ctxt [in] The ulp context for the flow counter manager
203  *
204  */
205 void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
206 {
207         struct bnxt_ulp_fc_info *ulp_fc_info;
208
209         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
210         if (!ulp_fc_info)
211                 return;
212
213         ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD;
214         rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, (void *)ctxt);
215 }
216
217 /*
218  * DMA-in the raw counter data from the HW and accumulate in the
219  * local accumulator table using the TF-Core API
220  *
221  * tfp [in] The TF-Core context
222  *
223  * fc_info [in] The ULP Flow counter info ptr
224  *
225  * dir [in] The direction of the flow
226  *
227  * num_counters [in] The number of counters
228  *
229  */
230 __rte_unused static int32_t ulp_bulk_get_flow_stats(struct tf *tfp,
231                                        struct bnxt_ulp_fc_info *fc_info,
232                                        enum tf_dir dir, uint32_t num_counters)
233 /* MARK AS UNUSED FOR NOW TO AVOID COMPILATION ERRORS TILL API is RESOLVED */
234 {
235         int rc = 0;
236         struct tf_tbl_get_bulk_parms parms = { 0 };
237         enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD: Template? */
238         struct sw_acc_counter *sw_acc_tbl_entry = NULL;
239         uint64_t *stats = NULL;
240         uint16_t i = 0;
241
242         parms.dir = dir;
243         parms.type = stype;
244         parms.starting_idx = fc_info->shadow_hw_tbl[dir].start_idx;
245         parms.num_entries = num_counters;
246         /*
247          * TODO:
248          * Size of an entry needs to obtained from template
249          */
250         parms.entry_sz_in_bytes = sizeof(uint64_t);
251         stats = (uint64_t *)fc_info->shadow_hw_tbl[dir].mem_va;
252         parms.physical_mem_addr = (uintptr_t)fc_info->shadow_hw_tbl[dir].mem_pa;
253
254         if (stats == NULL) {
255                 PMD_DRV_LOG(ERR,
256                             "BULK: Memory not initialized id:0x%x dir:%d\n",
257                             parms.starting_idx, dir);
258                 return -EINVAL;
259         }
260
261         rc = tf_tbl_bulk_get(tfp, &parms);
262         if (rc) {
263                 PMD_DRV_LOG(ERR,
264                             "BULK: Get failed for id:0x%x rc:%d\n",
265                             parms.starting_idx, rc);
266                 return rc;
267         }
268
269         for (i = 0; i < num_counters; i++) {
270                 /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
271                 sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][i];
272                 if (!sw_acc_tbl_entry->valid)
273                         continue;
274                 sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats[i]);
275                 sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats[i]);
276         }
277
278         return rc;
279 }
280
281 static int ulp_get_single_flow_stat(struct tf *tfp,
282                                     struct bnxt_ulp_fc_info *fc_info,
283                                     enum tf_dir dir,
284                                     uint32_t hw_cntr_id)
285 {
286         int rc = 0;
287         struct tf_get_tbl_entry_parms parms = { 0 };
288         enum tf_tbl_type stype = TF_TBL_TYPE_ACT_STATS_64;  /* TBD:Template? */
289         struct sw_acc_counter *sw_acc_tbl_entry = NULL;
290         uint64_t stats = 0;
291         uint32_t sw_cntr_indx = 0;
292
293         parms.dir = dir;
294         parms.type = stype;
295         parms.idx = hw_cntr_id;
296         /*
297          * TODO:
298          * Size of an entry needs to obtained from template
299          */
300         parms.data_sz_in_bytes = sizeof(uint64_t);
301         parms.data = (uint8_t *)&stats;
302         rc = tf_get_tbl_entry(tfp, &parms);
303         if (rc) {
304                 PMD_DRV_LOG(ERR,
305                             "Get failed for id:0x%x rc:%d\n",
306                             parms.idx, rc);
307                 return rc;
308         }
309
310         /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
311         sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx;
312         sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx];
313         sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats);
314         sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats);
315
316         return rc;
317 }
318
319 /*
320  * Alarm handler that will issue the TF-Core API to fetch
321  * data from the chip's internal flow counters
322  *
323  * ctxt [in] The ulp context for the flow counter manager
324  *
325  */
326
327 void
328 ulp_fc_mgr_alarm_cb(void *arg)
329 {
330         int rc = 0;
331         unsigned int j;
332         enum tf_dir i;
333         struct bnxt_ulp_context *ctxt = arg;
334         struct bnxt_ulp_fc_info *ulp_fc_info;
335         struct bnxt_ulp_device_params *dparms;
336         struct tf *tfp;
337         uint32_t dev_id, hw_cntr_id = 0;
338
339         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
340         if (!ulp_fc_info)
341                 return;
342
343         if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) {
344                 BNXT_TF_DBG(DEBUG, "Failed to get device id\n");
345                 return;
346         }
347
348         dparms = bnxt_ulp_device_params_get(dev_id);
349         if (!dparms) {
350                 BNXT_TF_DBG(DEBUG, "Failed to device parms\n");
351                 return;
352         }
353
354         tfp = bnxt_ulp_cntxt_tfp_get(ctxt);
355         if (!tfp) {
356                 BNXT_TF_DBG(ERR, "Failed to get the truflow pointer\n");
357                 return;
358         }
359
360         /*
361          * Take the fc_lock to ensure no flow is destroyed
362          * during the bulk get
363          */
364         if (pthread_mutex_trylock(&ulp_fc_info->fc_lock))
365                 goto out;
366
367         if (!ulp_fc_info->num_entries) {
368                 pthread_mutex_unlock(&ulp_fc_info->fc_lock);
369                 ulp_fc_mgr_thread_cancel(ctxt);
370                 return;
371         }
372         /*
373          * Commented for now till GET_BULK is resolved, just get the first flow
374          * stat for now
375          for (i = 0; i < TF_DIR_MAX; i++) {
376                 rc = ulp_bulk_get_flow_stats(tfp, ulp_fc_info, i,
377                                              dparms->flow_count_db_entries);
378                 if (rc)
379                         break;
380         }
381         */
382         for (i = 0; i < TF_DIR_MAX; i++) {
383                 for (j = 0; j < ulp_fc_info->num_entries; j++) {
384                         if (!ulp_fc_info->sw_acc_tbl[i][j].valid)
385                                 continue;
386                         hw_cntr_id = ulp_fc_info->sw_acc_tbl[i][j].hw_cntr_id;
387                         rc = ulp_get_single_flow_stat(tfp, ulp_fc_info, i,
388                                                       hw_cntr_id);
389                         if (rc)
390                                 break;
391                 }
392         }
393
394         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
395
396         /*
397          * If cmd fails once, no need of
398          * invoking again every second
399          */
400
401         if (rc) {
402                 ulp_fc_mgr_thread_cancel(ctxt);
403                 return;
404         }
405 out:
406         rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
407                           ulp_fc_mgr_alarm_cb,
408                           (void *)ctxt);
409 }
410
411 /*
412  * Set the starting index that indicates the first HW flow
413  * counter ID
414  *
415  * ctxt [in] The ulp context for the flow counter manager
416  *
417  * dir [in] The direction of the flow
418  *
419  * start_idx [in] The HW flow counter ID
420  *
421  */
422 bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, enum tf_dir dir)
423 {
424         struct bnxt_ulp_fc_info *ulp_fc_info;
425
426         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
427
428         /* Assuming start_idx of 0 is invalid */
429         return (ulp_fc_info->shadow_hw_tbl[dir].start_idx != 0);
430 }
431
432 /*
433  * Set the starting index that indicates the first HW flow
434  * counter ID
435  *
436  * ctxt [in] The ulp context for the flow counter manager
437  *
438  * dir [in] The direction of the flow
439  *
440  * start_idx [in] The HW flow counter ID
441  *
442  */
443 int32_t ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
444                                  uint32_t start_idx)
445 {
446         struct bnxt_ulp_fc_info *ulp_fc_info;
447
448         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
449
450         if (!ulp_fc_info)
451                 return -EIO;
452
453         /* Assuming that 0 is an invalid counter ID ? */
454         if (ulp_fc_info->shadow_hw_tbl[dir].start_idx == 0)
455                 ulp_fc_info->shadow_hw_tbl[dir].start_idx = start_idx;
456
457         return 0;
458 }
459
460 /*
461  * Set the corresponding SW accumulator table entry based on
462  * the difference between this counter ID and the starting
463  * counter ID. Also, keep track of num of active counter enabled
464  * flows.
465  *
466  * ctxt [in] The ulp context for the flow counter manager
467  *
468  * dir [in] The direction of the flow
469  *
470  * hw_cntr_id [in] The HW flow counter ID
471  *
472  */
473 int32_t ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
474                             uint32_t hw_cntr_id)
475 {
476         struct bnxt_ulp_fc_info *ulp_fc_info;
477         uint32_t sw_cntr_idx;
478
479         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
480         if (!ulp_fc_info)
481                 return -EIO;
482
483         pthread_mutex_lock(&ulp_fc_info->fc_lock);
484         sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
485         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true;
486         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id;
487         ulp_fc_info->num_entries++;
488         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
489
490         return 0;
491 }
492
493 /*
494  * Reset the corresponding SW accumulator table entry based on
495  * the difference between this counter ID and the starting
496  * counter ID.
497  *
498  * ctxt [in] The ulp context for the flow counter manager
499  *
500  * dir [in] The direction of the flow
501  *
502  * hw_cntr_id [in] The HW flow counter ID
503  *
504  */
505 int32_t ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir,
506                               uint32_t hw_cntr_id)
507 {
508         struct bnxt_ulp_fc_info *ulp_fc_info;
509         uint32_t sw_cntr_idx;
510
511         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
512         if (!ulp_fc_info)
513                 return -EIO;
514
515         pthread_mutex_lock(&ulp_fc_info->fc_lock);
516         sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx;
517         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false;
518         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0;
519         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0;
520         ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0;
521         ulp_fc_info->num_entries--;
522         pthread_mutex_unlock(&ulp_fc_info->fc_lock);
523
524         return 0;
525 }
526
527 /*
528  * Fill the rte_flow_query_count 'data' argument passed
529  * in the rte_flow_query() with the values obtained and
530  * accumulated locally.
531  *
532  * ctxt [in] The ulp context for the flow counter manager
533  *
534  * flow_id [in] The HW flow ID
535  *
536  * count [out] The rte_flow_query_count 'data' that is set
537  *
538  */
539 int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
540                                uint32_t flow_id,
541                                struct rte_flow_query_count *count)
542 {
543         int rc = 0;
544         uint32_t nxt_resource_index = 0;
545         struct bnxt_ulp_fc_info *ulp_fc_info;
546         struct ulp_flow_db_res_params params;
547         enum tf_dir dir;
548         uint32_t hw_cntr_id = 0, sw_cntr_idx = 0;
549         struct sw_acc_counter sw_acc_tbl_entry;
550         bool found_cntr_resource = false;
551
552         ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt);
553         if (!ulp_fc_info)
554                 return -ENODEV;
555
556         do {
557                 rc = ulp_flow_db_resource_get(ctxt,
558                                               BNXT_ULP_REGULAR_FLOW_TABLE,
559                                               flow_id,
560                                               &nxt_resource_index,
561                                               &params);
562                 if (params.resource_func ==
563                      BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE &&
564                      (params.resource_sub_type ==
565                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT ||
566                       params.resource_sub_type ==
567                       BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_EXT_COUNT)) {
568                         found_cntr_resource = true;
569                         break;
570                 }
571
572         } while (!rc);
573
574         if (rc)
575                 return rc;
576
577         if (found_cntr_resource) {
578                 dir = params.direction;
579                 hw_cntr_id = params.resource_hndl;
580                 sw_cntr_idx = hw_cntr_id -
581                                 ulp_fc_info->shadow_hw_tbl[dir].start_idx;
582                 sw_acc_tbl_entry = ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx];
583                 if (params.resource_sub_type ==
584                         BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT) {
585                         count->hits_set = 1;
586                         count->bytes_set = 1;
587                         count->hits = sw_acc_tbl_entry.pkt_count;
588                         count->bytes = sw_acc_tbl_entry.byte_count;
589                 } else {
590                         /* TBD: Handle External counters */
591                         rc = -EINVAL;
592                 }
593         }
594
595         return rc;
596 }