e7af9eb84838711d0fa7db7f099a1a79ca7a918f
[dpdk.git] / drivers / net / bnxt / tf_core / tf_rm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include <string.h>
7
8 #include <rte_common.h>
9
10 #include <cfa_resource_types.h>
11
12 #include "tf_rm.h"
13 #include "tf_common.h"
14 #include "tf_util.h"
15 #include "tf_session.h"
16 #include "tf_device.h"
17 #include "tfp.h"
18 #include "tf_msg.h"
19
20 /**
21  * Generic RM Element data type that an RM DB is build upon.
22  */
23 struct tf_rm_element {
24         /**
25          * RM Element configuration type. If Private then the
26          * hcapi_type can be ignored. If Null then the element is not
27          * valid for the device.
28          */
29         enum tf_rm_elem_cfg_type cfg_type;
30
31         /**
32          * HCAPI RM Type for the element.
33          */
34         uint16_t hcapi_type;
35
36         /**
37          * HCAPI RM allocated range information for the element.
38          */
39         struct tf_rm_alloc_info alloc;
40
41         /**
42          * Bit allocator pool for the element. Pool size is controlled
43          * by the struct tf_session_resources at time of session creation.
44          * Null indicates that the element is not used for the device.
45          */
46         struct bitalloc *pool;
47 };
48
49 /**
50  * TF RM DB definition
51  */
52 struct tf_rm_new_db {
53         /**
54          * Number of elements in the DB
55          */
56         uint16_t num_entries;
57
58         /**
59          * Direction this DB controls.
60          */
61         enum tf_dir dir;
62
63         /**
64          * Module type, used for logging purposes.
65          */
66         enum tf_device_module_type type;
67
68         /**
69          * The DB consists of an array of elements
70          */
71         struct tf_rm_element *db;
72 };
73
74 /**
75  * Adjust an index according to the allocation information.
76  *
77  * All resources are controlled in a 0 based pool. Some resources, by
78  * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
79  * need to be adjusted before they are handed out.
80  *
81  * [in] cfg
82  *   Pointer to the DB configuration
83  *
84  * [in] reservations
85  *   Pointer to the allocation values associated with the module
86  *
87  * [in] count
88  *   Number of DB configuration elements
89  *
90  * [out] valid_count
91  *   Number of HCAPI entries with a reservation value greater than 0
92  *
93  * Returns:
94  *     0          - Success
95  *   - EOPNOTSUPP - Operation not supported
96  */
97 static void
98 tf_rm_count_hcapi_reservations(enum tf_dir dir,
99                                enum tf_device_module_type type,
100                                struct tf_rm_element_cfg *cfg,
101                                uint16_t *reservations,
102                                uint16_t count,
103                                uint16_t *valid_count)
104 {
105         int i;
106         uint16_t cnt = 0;
107
108         for (i = 0; i < count; i++) {
109                 if ((cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI ||
110                      cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) &&
111                     reservations[i] > 0)
112                         cnt++;
113
114                 /* Only log msg if a type is attempted reserved and
115                  * not supported. We ignore EM module as its using a
116                  * split configuration array thus it would fail for
117                  * this type of check.
118                  */
119                 if (type != TF_DEVICE_MODULE_TYPE_EM &&
120                     cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
121                     reservations[i] > 0) {
122                         TFP_DRV_LOG(ERR,
123                                 "%s, %s, %s allocation not supported\n",
124                                 tf_device_module_type_2_str(type),
125                                 tf_dir_2_str(dir),
126                                 tf_device_module_type_subtype_2_str(type, i));
127                         printf("%s, %s, %s allocation of %d not supported\n",
128                                 tf_device_module_type_2_str(type),
129                                 tf_dir_2_str(dir),
130                                tf_device_module_type_subtype_2_str(type, i),
131                                reservations[i]);
132
133                 }
134         }
135
136         *valid_count = cnt;
137 }
138
139 /**
140  * Resource Manager Adjust of base index definitions.
141  */
142 enum tf_rm_adjust_type {
143         TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
144         TF_RM_ADJUST_RM_BASE   /**< Removes base from the index */
145 };
146
147 /**
148  * Adjust an index according to the allocation information.
149  *
150  * All resources are controlled in a 0 based pool. Some resources, by
151  * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
152  * need to be adjusted before they are handed out.
153  *
154  * [in] db
155  *   Pointer to the db, used for the lookup
156  *
157  * [in] action
158  *   Adjust action
159  *
160  * [in] db_index
161  *   DB index for the element type
162  *
163  * [in] index
164  *   Index to convert
165  *
166  * [out] adj_index
167  *   Adjusted index
168  *
169  * Returns:
170  *     0          - Success
171  *   - EOPNOTSUPP - Operation not supported
172  */
173 static int
174 tf_rm_adjust_index(struct tf_rm_element *db,
175                    enum tf_rm_adjust_type action,
176                    uint32_t db_index,
177                    uint32_t index,
178                    uint32_t *adj_index)
179 {
180         int rc = 0;
181         uint32_t base_index;
182
183         base_index = db[db_index].alloc.entry.start;
184
185         switch (action) {
186         case TF_RM_ADJUST_RM_BASE:
187                 *adj_index = index - base_index;
188                 break;
189         case TF_RM_ADJUST_ADD_BASE:
190                 *adj_index = index + base_index;
191                 break;
192         default:
193                 return -EOPNOTSUPP;
194         }
195
196         return rc;
197 }
198
199 /**
200  * Logs an array of found residual entries to the console.
201  *
202  * [in] dir
203  *   Receive or transmit direction
204  *
205  * [in] type
206  *   Type of Device Module
207  *
208  * [in] count
209  *   Number of entries in the residual array
210  *
211  * [in] residuals
212  *   Pointer to an array of residual entries. Array is index same as
213  *   the DB in which this function is used. Each entry holds residual
214  *   value for that entry.
215  */
216 static void
217 tf_rm_log_residuals(enum tf_dir dir,
218                     enum tf_device_module_type type,
219                     uint16_t count,
220                     uint16_t *residuals)
221 {
222         int i;
223
224         /* Walk the residual array and log the types that wasn't
225          * cleaned up to the console.
226          */
227         for (i = 0; i < count; i++) {
228                 if (residuals[i] != 0)
229                         TFP_DRV_LOG(ERR,
230                                 "%s, %s was not cleaned up, %d outstanding\n",
231                                 tf_dir_2_str(dir),
232                                 tf_device_module_type_subtype_2_str(type, i),
233                                 residuals[i]);
234         }
235 }
236
237 /**
238  * Performs a check of the passed in DB for any lingering elements. If
239  * a resource type was found to not have been cleaned up by the caller
240  * then its residual values are recorded, logged and passed back in an
241  * allocate reservation array that the caller can pass to the FW for
242  * cleanup.
243  *
244  * [in] db
245  *   Pointer to the db, used for the lookup
246  *
247  * [out] resv_size
248  *   Pointer to the reservation size of the generated reservation
249  *   array.
250  *
251  * [in/out] resv
252  *   Pointer Pointer to a reservation array. The reservation array is
253  *   allocated after the residual scan and holds any found residual
254  *   entries. Thus it can be smaller than the DB that the check was
255  *   performed on. Array must be freed by the caller.
256  *
257  * [out] residuals_present
258  *   Pointer to a bool flag indicating if residual was present in the
259  *   DB
260  *
261  * Returns:
262  *     0          - Success
263  *   - EOPNOTSUPP - Operation not supported
264  */
265 static int
266 tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
267                       uint16_t *resv_size,
268                       struct tf_rm_resc_entry **resv,
269                       bool *residuals_present)
270 {
271         int rc;
272         int i;
273         int f;
274         uint16_t count;
275         uint16_t found;
276         uint16_t *residuals = NULL;
277         uint16_t hcapi_type;
278         struct tf_rm_get_inuse_count_parms iparms;
279         struct tf_rm_get_alloc_info_parms aparms;
280         struct tf_rm_get_hcapi_parms hparms;
281         struct tf_rm_alloc_info info;
282         struct tfp_calloc_parms cparms;
283         struct tf_rm_resc_entry *local_resv = NULL;
284
285         /* Create array to hold the entries that have residuals */
286         cparms.nitems = rm_db->num_entries;
287         cparms.size = sizeof(uint16_t);
288         cparms.alignment = 0;
289         rc = tfp_calloc(&cparms);
290         if (rc)
291                 return rc;
292
293         residuals = (uint16_t *)cparms.mem_va;
294
295         /* Traverse the DB and collect any residual elements */
296         iparms.rm_db = rm_db;
297         iparms.count = &count;
298         for (i = 0, found = 0; i < rm_db->num_entries; i++) {
299                 iparms.db_index = i;
300                 rc = tf_rm_get_inuse_count(&iparms);
301                 /* Not a device supported entry, just skip */
302                 if (rc == -ENOTSUP)
303                         continue;
304                 if (rc)
305                         goto cleanup_residuals;
306
307                 if (count) {
308                         found++;
309                         residuals[i] = count;
310                         *residuals_present = true;
311                 }
312         }
313
314         if (*residuals_present) {
315                 /* Populate a reduced resv array with only the entries
316                  * that have residuals.
317                  */
318                 cparms.nitems = found;
319                 cparms.size = sizeof(struct tf_rm_resc_entry);
320                 cparms.alignment = 0;
321                 rc = tfp_calloc(&cparms);
322                 if (rc)
323                         return rc;
324
325                 local_resv = (struct tf_rm_resc_entry *)cparms.mem_va;
326
327                 aparms.rm_db = rm_db;
328                 hparms.rm_db = rm_db;
329                 hparms.hcapi_type = &hcapi_type;
330                 for (i = 0, f = 0; i < rm_db->num_entries; i++) {
331                         if (residuals[i] == 0)
332                                 continue;
333                         aparms.db_index = i;
334                         aparms.info = &info;
335                         rc = tf_rm_get_info(&aparms);
336                         if (rc)
337                                 goto cleanup_all;
338
339                         hparms.db_index = i;
340                         rc = tf_rm_get_hcapi_type(&hparms);
341                         if (rc)
342                                 goto cleanup_all;
343
344                         local_resv[f].type = hcapi_type;
345                         local_resv[f].start = info.entry.start;
346                         local_resv[f].stride = info.entry.stride;
347                         f++;
348                 }
349                 *resv_size = found;
350         }
351
352         tf_rm_log_residuals(rm_db->dir,
353                             rm_db->type,
354                             rm_db->num_entries,
355                             residuals);
356
357         tfp_free((void *)residuals);
358         *resv = local_resv;
359
360         return 0;
361
362  cleanup_all:
363         tfp_free((void *)local_resv);
364         *resv = NULL;
365  cleanup_residuals:
366         tfp_free((void *)residuals);
367
368         return rc;
369 }
370
371 int
372 tf_rm_create_db(struct tf *tfp,
373                 struct tf_rm_create_db_parms *parms)
374 {
375         int rc;
376         int i;
377         int j;
378         struct tf_session *tfs;
379         struct tf_dev_info *dev;
380         uint16_t max_types;
381         struct tfp_calloc_parms cparms;
382         struct tf_rm_resc_req_entry *query;
383         enum tf_rm_resc_resv_strategy resv_strategy;
384         struct tf_rm_resc_req_entry *req;
385         struct tf_rm_resc_entry *resv;
386         struct tf_rm_new_db *rm_db;
387         struct tf_rm_element *db;
388         uint32_t pool_size;
389         uint16_t hcapi_items;
390
391         TF_CHECK_PARMS2(tfp, parms);
392
393         /* Retrieve the session information */
394         rc = tf_session_get_session(tfp, &tfs);
395         if (rc)
396                 return rc;
397
398         /* Retrieve device information */
399         rc = tf_session_get_device(tfs, &dev);
400         if (rc)
401                 return rc;
402
403         /* Need device max number of elements for the RM QCAPS */
404         rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
405         if (rc)
406                 return rc;
407
408         cparms.nitems = max_types;
409         cparms.size = sizeof(struct tf_rm_resc_req_entry);
410         cparms.alignment = 0;
411         rc = tfp_calloc(&cparms);
412         if (rc)
413                 return rc;
414
415         query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
416
417         /* Get Firmware Capabilities */
418         rc = tf_msg_session_resc_qcaps(tfp,
419                                        parms->dir,
420                                        max_types,
421                                        query,
422                                        &resv_strategy);
423         if (rc)
424                 return rc;
425
426         /* Process capabilities against DB requirements. However, as a
427          * DB can hold elements that are not HCAPI we can reduce the
428          * req msg content by removing those out of the request yet
429          * the DB holds them all as to give a fast lookup. We can also
430          * remove entries where there are no request for elements.
431          */
432         tf_rm_count_hcapi_reservations(parms->dir,
433                                        parms->type,
434                                        parms->cfg,
435                                        parms->alloc_cnt,
436                                        parms->num_elements,
437                                        &hcapi_items);
438
439         /* Handle the case where a DB create request really ends up
440          * being empty. Unsupported (if not rare) case but possible
441          * that no resources are necessary for a 'direction'.
442          */
443         if (hcapi_items == 0) {
444                 TFP_DRV_LOG(ERR,
445                         "%s: DB create request for Zero elements, DB Type:%s\n",
446                         tf_dir_2_str(parms->dir),
447                         tf_device_module_type_2_str(parms->type));
448
449                 parms->rm_db = NULL;
450                 return -ENOMEM;
451         }
452
453         /* Alloc request, alignment already set */
454         cparms.nitems = (size_t)hcapi_items;
455         cparms.size = sizeof(struct tf_rm_resc_req_entry);
456         rc = tfp_calloc(&cparms);
457         if (rc)
458                 return rc;
459         req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
460
461         /* Alloc reservation, alignment and nitems already set */
462         cparms.size = sizeof(struct tf_rm_resc_entry);
463         rc = tfp_calloc(&cparms);
464         if (rc)
465                 return rc;
466         resv = (struct tf_rm_resc_entry *)cparms.mem_va;
467
468         /* Build the request */
469         for (i = 0, j = 0; i < parms->num_elements; i++) {
470                 /* Skip any non HCAPI cfg elements */
471                 if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI ||
472                     parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) {
473                         /* Only perform reservation for entries that
474                          * has been requested
475                          */
476                         if (parms->alloc_cnt[i] == 0)
477                                 continue;
478
479                         /* Verify that we can get the full amount
480                          * allocated per the qcaps availability.
481                          */
482                         if (parms->alloc_cnt[i] <=
483                             query[parms->cfg[i].hcapi_type].max) {
484                                 req[j].type = parms->cfg[i].hcapi_type;
485                                 req[j].min = parms->alloc_cnt[i];
486                                 req[j].max = parms->alloc_cnt[i];
487                                 j++;
488                         } else {
489                                 TFP_DRV_LOG(ERR,
490                                             "%s: Resource failure, type:%d\n",
491                                             tf_dir_2_str(parms->dir),
492                                             parms->cfg[i].hcapi_type);
493                                 TFP_DRV_LOG(ERR,
494                                         "req:%d, avail:%d\n",
495                                         parms->alloc_cnt[i],
496                                         query[parms->cfg[i].hcapi_type].max);
497                                 return -EINVAL;
498                         }
499                 }
500         }
501
502         rc = tf_msg_session_resc_alloc(tfp,
503                                        parms->dir,
504                                        hcapi_items,
505                                        req,
506                                        resv);
507         if (rc)
508                 return rc;
509
510         /* Build the RM DB per the request */
511         cparms.nitems = 1;
512         cparms.size = sizeof(struct tf_rm_new_db);
513         rc = tfp_calloc(&cparms);
514         if (rc)
515                 return rc;
516         rm_db = (void *)cparms.mem_va;
517
518         /* Build the DB within RM DB */
519         cparms.nitems = parms->num_elements;
520         cparms.size = sizeof(struct tf_rm_element);
521         rc = tfp_calloc(&cparms);
522         if (rc)
523                 return rc;
524         rm_db->db = (struct tf_rm_element *)cparms.mem_va;
525
526         db = rm_db->db;
527         for (i = 0, j = 0; i < parms->num_elements; i++) {
528                 db[i].cfg_type = parms->cfg[i].cfg_type;
529                 db[i].hcapi_type = parms->cfg[i].hcapi_type;
530
531                 /* Skip any non HCAPI types as we didn't include them
532                  * in the reservation request.
533                  */
534                 if (parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI &&
535                     parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
536                         continue;
537
538                 /* If the element didn't request an allocation no need
539                  * to create a pool nor verify if we got a reservation.
540                  */
541                 if (parms->alloc_cnt[i] == 0)
542                         continue;
543
544                 /* If the element had requested an allocation and that
545                  * allocation was a success (full amount) then
546                  * allocate the pool.
547                  */
548                 if (parms->alloc_cnt[i] == resv[j].stride) {
549                         db[i].alloc.entry.start = resv[j].start;
550                         db[i].alloc.entry.stride = resv[j].stride;
551
552                         printf("Entry:%d Start:%d Stride:%d\n",
553                                i,
554                                resv[j].start,
555                                resv[j].stride);
556
557                         /* Only allocate BA pool if so requested */
558                         if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) {
559                                 /* Create pool */
560                                 pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
561                                              sizeof(struct bitalloc));
562                                 /* Alloc request, alignment already set */
563                                 cparms.nitems = pool_size;
564                                 cparms.size = sizeof(struct bitalloc);
565                                 rc = tfp_calloc(&cparms);
566                                 if (rc) {
567                                         TFP_DRV_LOG(ERR,
568                                              "%s: Pool alloc failed, type:%d\n",
569                                              tf_dir_2_str(parms->dir),
570                                              db[i].cfg_type);
571                                         goto fail;
572                                 }
573                                 db[i].pool = (struct bitalloc *)cparms.mem_va;
574
575                                 rc = ba_init(db[i].pool, resv[j].stride);
576                                 if (rc) {
577                                         TFP_DRV_LOG(ERR,
578                                              "%s: Pool init failed, type:%d\n",
579                                              tf_dir_2_str(parms->dir),
580                                              db[i].cfg_type);
581                                         goto fail;
582                                 }
583                         }
584                         j++;
585                 } else {
586                         /* Bail out as we want what we requested for
587                          * all elements, not any less.
588                          */
589                         TFP_DRV_LOG(ERR,
590                                     "%s: Alloc failed, type:%d\n",
591                                     tf_dir_2_str(parms->dir),
592                                     db[i].cfg_type);
593                         TFP_DRV_LOG(ERR,
594                                     "req:%d, alloc:%d\n",
595                                     parms->alloc_cnt[i],
596                                     resv[j].stride);
597                         goto fail;
598                 }
599         }
600
601         rm_db->num_entries = parms->num_elements;
602         rm_db->dir = parms->dir;
603         rm_db->type = parms->type;
604         *parms->rm_db = (void *)rm_db;
605
606         printf("%s: type:%d num_entries:%d\n",
607                tf_dir_2_str(parms->dir),
608                parms->type,
609                i);
610
611         tfp_free((void *)req);
612         tfp_free((void *)resv);
613
614         return 0;
615
616  fail:
617         tfp_free((void *)req);
618         tfp_free((void *)resv);
619         tfp_free((void *)db->pool);
620         tfp_free((void *)db);
621         tfp_free((void *)rm_db);
622         parms->rm_db = NULL;
623
624         return -EINVAL;
625 }
626
627 int
628 tf_rm_free_db(struct tf *tfp,
629               struct tf_rm_free_db_parms *parms)
630 {
631         int rc;
632         int i;
633         uint16_t resv_size = 0;
634         struct tf_rm_new_db *rm_db;
635         struct tf_rm_resc_entry *resv;
636         bool residuals_found = false;
637
638         TF_CHECK_PARMS2(parms, parms->rm_db);
639
640         /* Device unbind happens when the TF Session is closed and the
641          * session ref count is 0. Device unbind will cleanup each of
642          * its support modules, i.e. Identifier, thus we're ending up
643          * here to close the DB.
644          *
645          * On TF Session close it is assumed that the session has already
646          * cleaned up all its resources, individually, while
647          * destroying its flows.
648          *
649          * To assist in the 'cleanup checking' the DB is checked for any
650          * remaining elements and logged if found to be the case.
651          *
652          * Any such elements will need to be 'cleared' ahead of
653          * returning the resources to the HCAPI RM.
654          *
655          * RM will signal FW to flush the DB resources. FW will
656          * perform the invalidation. TF Session close will return the
657          * previous allocated elements to the RM and then close the
658          * HCAPI RM registration. That then saves several 'free' msgs
659          * from being required.
660          */
661
662         rm_db = (struct tf_rm_new_db *)parms->rm_db;
663
664         /* Check for residuals that the client didn't clean up */
665         rc = tf_rm_check_residuals(rm_db,
666                                    &resv_size,
667                                    &resv,
668                                    &residuals_found);
669         if (rc)
670                 return rc;
671
672         /* Invalidate any residuals followed by a DB traversal for
673          * pool cleanup.
674          */
675         if (residuals_found) {
676                 rc = tf_msg_session_resc_flush(tfp,
677                                                parms->dir,
678                                                resv_size,
679                                                resv);
680                 tfp_free((void *)resv);
681                 /* On failure we still have to cleanup so we can only
682                  * log that FW failed.
683                  */
684                 if (rc)
685                         TFP_DRV_LOG(ERR,
686                                     "%s: Internal Flush error, module:%s\n",
687                                     tf_dir_2_str(parms->dir),
688                                     tf_device_module_type_2_str(rm_db->type));
689         }
690
691         /* No need to check for configuration type, even if we do not
692          * have a BA pool we just delete on a null ptr, no harm
693          */
694         for (i = 0; i < rm_db->num_entries; i++)
695                 tfp_free((void *)rm_db->db[i].pool);
696
697         tfp_free((void *)parms->rm_db);
698
699         return rc;
700 }
701
702 int
703 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
704 {
705         int rc;
706         int id;
707         uint32_t index;
708         struct tf_rm_new_db *rm_db;
709         enum tf_rm_elem_cfg_type cfg_type;
710
711         TF_CHECK_PARMS2(parms, parms->rm_db);
712
713         rm_db = (struct tf_rm_new_db *)parms->rm_db;
714         cfg_type = rm_db->db[parms->db_index].cfg_type;
715
716         /* Bail out if not controlled by RM */
717         if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
718                 return -ENOTSUP;
719
720         /* Bail out if the pool is not valid, should never happen */
721         if (rm_db->db[parms->db_index].pool == NULL) {
722                 rc = -ENOTSUP;
723                 TFP_DRV_LOG(ERR,
724                             "%s: Invalid pool for this type:%d, rc:%s\n",
725                             tf_dir_2_str(rm_db->dir),
726                             parms->db_index,
727                             strerror(-rc));
728                 return rc;
729         }
730
731         /*
732          * priority  0: allocate from top of the tcam i.e. high
733          * priority !0: allocate index from bottom i.e lowest
734          */
735         if (parms->priority)
736                 id = ba_alloc_reverse(rm_db->db[parms->db_index].pool);
737         else
738                 id = ba_alloc(rm_db->db[parms->db_index].pool);
739         if (id == BA_FAIL) {
740                 rc = -ENOMEM;
741                 TFP_DRV_LOG(ERR,
742                             "%s: Allocation failed, rc:%s\n",
743                             tf_dir_2_str(rm_db->dir),
744                             strerror(-rc));
745                 return rc;
746         }
747
748         /* Adjust for any non zero start value */
749         rc = tf_rm_adjust_index(rm_db->db,
750                                 TF_RM_ADJUST_ADD_BASE,
751                                 parms->db_index,
752                                 id,
753                                 &index);
754         if (rc) {
755                 TFP_DRV_LOG(ERR,
756                             "%s: Alloc adjust of base index failed, rc:%s\n",
757                             tf_dir_2_str(rm_db->dir),
758                             strerror(-rc));
759                 return -EINVAL;
760         }
761
762         *parms->index = index;
763
764         return rc;
765 }
766
767 int
768 tf_rm_free(struct tf_rm_free_parms *parms)
769 {
770         int rc;
771         uint32_t adj_index;
772         struct tf_rm_new_db *rm_db;
773         enum tf_rm_elem_cfg_type cfg_type;
774
775         TF_CHECK_PARMS2(parms, parms->rm_db);
776
777         rm_db = (struct tf_rm_new_db *)parms->rm_db;
778         cfg_type = rm_db->db[parms->db_index].cfg_type;
779
780         /* Bail out if not controlled by RM */
781         if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
782                 return -ENOTSUP;
783
784         /* Bail out if the pool is not valid, should never happen */
785         if (rm_db->db[parms->db_index].pool == NULL) {
786                 rc = -ENOTSUP;
787                 TFP_DRV_LOG(ERR,
788                             "%s: Invalid pool for this type:%d, rc:%s\n",
789                             tf_dir_2_str(rm_db->dir),
790                             parms->db_index,
791                             strerror(-rc));
792                 return rc;
793         }
794
795         /* Adjust for any non zero start value */
796         rc = tf_rm_adjust_index(rm_db->db,
797                                 TF_RM_ADJUST_RM_BASE,
798                                 parms->db_index,
799                                 parms->index,
800                                 &adj_index);
801         if (rc)
802                 return rc;
803
804         rc = ba_free(rm_db->db[parms->db_index].pool, adj_index);
805         /* No logging direction matters and that is not available here */
806         if (rc)
807                 return rc;
808
809         return rc;
810 }
811
812 int
813 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
814 {
815         int rc;
816         uint32_t adj_index;
817         struct tf_rm_new_db *rm_db;
818         enum tf_rm_elem_cfg_type cfg_type;
819
820         TF_CHECK_PARMS2(parms, parms->rm_db);
821
822         rm_db = (struct tf_rm_new_db *)parms->rm_db;
823         cfg_type = rm_db->db[parms->db_index].cfg_type;
824
825         /* Bail out if not controlled by RM */
826         if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
827                 return -ENOTSUP;
828
829         /* Bail out if the pool is not valid, should never happen */
830         if (rm_db->db[parms->db_index].pool == NULL) {
831                 rc = -ENOTSUP;
832                 TFP_DRV_LOG(ERR,
833                             "%s: Invalid pool for this type:%d, rc:%s\n",
834                             tf_dir_2_str(rm_db->dir),
835                             parms->db_index,
836                             strerror(-rc));
837                 return rc;
838         }
839
840         /* Adjust for any non zero start value */
841         rc = tf_rm_adjust_index(rm_db->db,
842                                 TF_RM_ADJUST_RM_BASE,
843                                 parms->db_index,
844                                 parms->index,
845                                 &adj_index);
846         if (rc)
847                 return rc;
848
849         *parms->allocated = ba_inuse(rm_db->db[parms->db_index].pool,
850                                      adj_index);
851
852         return rc;
853 }
854
855 int
856 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
857 {
858         struct tf_rm_new_db *rm_db;
859         enum tf_rm_elem_cfg_type cfg_type;
860
861         TF_CHECK_PARMS2(parms, parms->rm_db);
862
863         rm_db = (struct tf_rm_new_db *)parms->rm_db;
864         cfg_type = rm_db->db[parms->db_index].cfg_type;
865
866         /* Bail out if not controlled by HCAPI */
867         if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
868             cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
869                 return -ENOTSUP;
870
871         memcpy(parms->info,
872                &rm_db->db[parms->db_index].alloc,
873                sizeof(struct tf_rm_alloc_info));
874
875         return 0;
876 }
877
878 int
879 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
880 {
881         struct tf_rm_new_db *rm_db;
882         enum tf_rm_elem_cfg_type cfg_type;
883
884         TF_CHECK_PARMS2(parms, parms->rm_db);
885
886         rm_db = (struct tf_rm_new_db *)parms->rm_db;
887         cfg_type = rm_db->db[parms->db_index].cfg_type;
888
889         /* Bail out if not controlled by HCAPI */
890         if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
891             cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
892                 return -ENOTSUP;
893
894         *parms->hcapi_type = rm_db->db[parms->db_index].hcapi_type;
895
896         return 0;
897 }
898
899 int
900 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
901 {
902         int rc = 0;
903         struct tf_rm_new_db *rm_db;
904         enum tf_rm_elem_cfg_type cfg_type;
905
906         TF_CHECK_PARMS2(parms, parms->rm_db);
907
908         rm_db = (struct tf_rm_new_db *)parms->rm_db;
909         cfg_type = rm_db->db[parms->db_index].cfg_type;
910
911         /* Bail out if not controlled by RM */
912         if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
913                 return -ENOTSUP;
914
915         /* Bail silently (no logging), if the pool is not valid there
916          * was no elements allocated for it.
917          */
918         if (rm_db->db[parms->db_index].pool == NULL) {
919                 *parms->count = 0;
920                 return 0;
921         }
922
923         *parms->count = ba_inuse_count(rm_db->db[parms->db_index].pool);
924
925         return rc;
926
927 }