net/bnxt: support shared session
[dpdk.git] / drivers / net / bnxt / tf_core / tf_rm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <string.h>
7
8 #include <rte_common.h>
9 #include <rte_debug.h>
10
11 #include <cfa_resource_types.h>
12
13 #include "tf_rm.h"
14 #include "tf_common.h"
15 #include "tf_util.h"
16 #include "tf_session.h"
17 #include "tf_device.h"
18 #include "tfp.h"
19 #include "tf_msg.h"
20
21 /* Logging defines */
22 #define TF_RM_DEBUG  0
23
24 /**
25  * Generic RM Element data type that an RM DB is build upon.
26  */
27 struct tf_rm_element {
28         /**
29          * RM Element configuration type. If Private then the
30          * hcapi_type can be ignored. If Null then the element is not
31          * valid for the device.
32          */
33         enum tf_rm_elem_cfg_type cfg_type;
34
35         /**
36          * HCAPI RM Type for the element.
37          */
38         uint16_t hcapi_type;
39
40         /**
41          * HCAPI RM allocated range information for the element.
42          */
43         struct tf_rm_alloc_info alloc;
44
45         /**
46          * If cfg_type == HCAPI_BA_CHILD, this field indicates
47          * the parent module subtype for look up into the parent pool.
48          * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
49          * module subtype of TF_MODULE_TYPE_TABLE.
50          */
51         uint16_t parent_subtype;
52
53         /**
54          * Bit allocator pool for the element. Pool size is controlled
55          * by the struct tf_session_resources at time of session creation.
56          * Null indicates that the pool is not used for the element.
57          */
58         struct bitalloc *pool;
59 };
60
61 /**
62  * TF RM DB definition
63  */
64 struct tf_rm_new_db {
65         /**
66          * Number of elements in the DB
67          */
68         uint16_t num_entries;
69
70         /**
71          * Direction this DB controls.
72          */
73         enum tf_dir dir;
74
75         /**
76          * Module type, used for logging purposes.
77          */
78         enum tf_module_type module;
79
80         /**
81          * The DB consists of an array of elements
82          */
83         struct tf_rm_element *db;
84 };
85
86 /**
87  * Adjust an index according to the allocation information.
88  *
89  * All resources are controlled in a 0 based pool. Some resources, by
90  * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
91  * need to be adjusted before they are handed out.
92  *
93  * [in] cfg
94  *   Pointer to the DB configuration
95  *
96  * [in] reservations
97  *   Pointer to the allocation values associated with the module
98  *
99  * [in] count
100  *   Number of DB configuration elements
101  *
102  * [out] valid_count
103  *   Number of HCAPI entries with a reservation value greater than 0
104  *
105  * Returns:
106  *     0          - Success
107  *   - EOPNOTSUPP - Operation not supported
108  */
109 static void
110 tf_rm_count_hcapi_reservations(enum tf_dir dir,
111                                enum tf_module_type module,
112                                struct tf_rm_element_cfg *cfg,
113                                uint16_t *reservations,
114                                uint16_t count,
115                                uint16_t *valid_count)
116 {
117         int i;
118         uint16_t cnt = 0;
119
120         for (i = 0; i < count; i++) {
121                 if (cfg[i].cfg_type != TF_RM_ELEM_CFG_NULL &&
122                     reservations[i] > 0)
123                         cnt++;
124
125                 /* Only log msg if a type is attempted reserved and
126                  * not supported. We ignore EM module as its using a
127                  * split configuration array thus it would fail for
128                  * this type of check.
129                  */
130                 if (module != TF_MODULE_TYPE_EM &&
131                     cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
132                     reservations[i] > 0) {
133                         TFP_DRV_LOG(ERR,
134                                 "%s, %s, %s allocation of %d not supported\n",
135                                 tf_module_2_str(module),
136                                 tf_dir_2_str(dir),
137                                 tf_module_subtype_2_str(module, i),
138                                 reservations[i]);
139                 }
140         }
141
142         *valid_count = cnt;
143 }
144
145 /**
146  * Resource Manager Adjust of base index definitions.
147  */
148 enum tf_rm_adjust_type {
149         TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
150         TF_RM_ADJUST_RM_BASE   /**< Removes base from the index */
151 };
152
153 /**
154  * Adjust an index according to the allocation information.
155  *
156  * All resources are controlled in a 0 based pool. Some resources, by
157  * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
158  * need to be adjusted before they are handed out.
159  *
160  * [in] db
161  *   Pointer to the db, used for the lookup
162  *
163  * [in] action
164  *   Adjust action
165  *
166  * [in] subtype
167  *   TF module subtype used as an index into the database.
168  *   An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
169  *   module subtype of TF_MODULE_TYPE_TABLE.
170  *
171  * [in] index
172  *   Index to convert
173  *
174  * [out] adj_index
175  *   Adjusted index
176  *
177  * Returns:
178  *     0          - Success
179  *   - EOPNOTSUPP - Operation not supported
180  */
181 static int
182 tf_rm_adjust_index(struct tf_rm_element *db,
183                    enum tf_rm_adjust_type action,
184                    uint32_t subtype,
185                    uint32_t index,
186                    uint32_t *adj_index)
187 {
188         int rc = 0;
189         uint32_t base_index;
190
191         base_index = db[subtype].alloc.entry.start;
192
193         switch (action) {
194         case TF_RM_ADJUST_RM_BASE:
195                 *adj_index = index - base_index;
196                 break;
197         case TF_RM_ADJUST_ADD_BASE:
198                 *adj_index = index + base_index;
199                 break;
200         default:
201                 return -EOPNOTSUPP;
202         }
203
204         return rc;
205 }
206
207 /**
208  * Logs an array of found residual entries to the console.
209  *
210  * [in] dir
211  *   Receive or transmit direction
212  *
213  * [in] module
214  *   Type of Device Module
215  *
216  * [in] count
217  *   Number of entries in the residual array
218  *
219  * [in] residuals
220  *   Pointer to an array of residual entries. Array is index same as
221  *   the DB in which this function is used. Each entry holds residual
222  *   value for that entry.
223  */
224 static void
225 tf_rm_log_residuals(enum tf_dir dir,
226                     enum tf_module_type module,
227                     uint16_t count,
228                     uint16_t *residuals)
229 {
230         int i;
231
232         /* Walk the residual array and log the types that wasn't
233          * cleaned up to the console.
234          */
235         for (i = 0; i < count; i++) {
236                 if (residuals[i] != 0)
237                         TFP_DRV_LOG(ERR,
238                                 "%s, %s was not cleaned up, %d outstanding\n",
239                                 tf_dir_2_str(dir),
240                                 tf_module_subtype_2_str(module, i),
241                                 residuals[i]);
242         }
243 }
244
245 /**
246  * Performs a check of the passed in DB for any lingering elements. If
247  * a resource type was found to not have been cleaned up by the caller
248  * then its residual values are recorded, logged and passed back in an
249  * allocate reservation array that the caller can pass to the FW for
250  * cleanup.
251  *
252  * [in] db
253  *   Pointer to the db, used for the lookup
254  *
255  * [out] resv_size
256  *   Pointer to the reservation size of the generated reservation
257  *   array.
258  *
259  * [in/out] resv
260  *   Pointer Pointer to a reservation array. The reservation array is
261  *   allocated after the residual scan and holds any found residual
262  *   entries. Thus it can be smaller than the DB that the check was
263  *   performed on. Array must be freed by the caller.
264  *
265  * [out] residuals_present
266  *   Pointer to a bool flag indicating if residual was present in the
267  *   DB
268  *
269  * Returns:
270  *     0          - Success
271  *   - EOPNOTSUPP - Operation not supported
272  */
273 static int
274 tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
275                       uint16_t *resv_size,
276                       struct tf_rm_resc_entry **resv,
277                       bool *residuals_present)
278 {
279         int rc;
280         int i;
281         int f;
282         uint16_t count;
283         uint16_t found;
284         uint16_t *residuals = NULL;
285         uint16_t hcapi_type;
286         struct tf_rm_get_inuse_count_parms iparms;
287         struct tf_rm_get_alloc_info_parms aparms;
288         struct tf_rm_get_hcapi_parms hparms;
289         struct tf_rm_alloc_info info;
290         struct tfp_calloc_parms cparms;
291         struct tf_rm_resc_entry *local_resv = NULL;
292
293         /* Create array to hold the entries that have residuals */
294         cparms.nitems = rm_db->num_entries;
295         cparms.size = sizeof(uint16_t);
296         cparms.alignment = 0;
297         rc = tfp_calloc(&cparms);
298         if (rc)
299                 return rc;
300
301         residuals = (uint16_t *)cparms.mem_va;
302
303         /* Traverse the DB and collect any residual elements */
304         iparms.rm_db = rm_db;
305         iparms.count = &count;
306         for (i = 0, found = 0; i < rm_db->num_entries; i++) {
307                 iparms.subtype = i;
308                 rc = tf_rm_get_inuse_count(&iparms);
309                 /* Not a device supported entry, just skip */
310                 if (rc == -ENOTSUP)
311                         continue;
312                 if (rc)
313                         goto cleanup_residuals;
314
315                 if (count) {
316                         found++;
317                         residuals[i] = count;
318                         *residuals_present = true;
319                 }
320         }
321
322         if (*residuals_present) {
323                 /* Populate a reduced resv array with only the entries
324                  * that have residuals.
325                  */
326                 cparms.nitems = found;
327                 cparms.size = sizeof(struct tf_rm_resc_entry);
328                 cparms.alignment = 0;
329                 rc = tfp_calloc(&cparms);
330                 if (rc)
331                         return rc;
332
333                 local_resv = (struct tf_rm_resc_entry *)cparms.mem_va;
334
335                 aparms.rm_db = rm_db;
336                 hparms.rm_db = rm_db;
337                 hparms.hcapi_type = &hcapi_type;
338                 for (i = 0, f = 0; i < rm_db->num_entries; i++) {
339                         if (residuals[i] == 0)
340                                 continue;
341                         aparms.subtype = i;
342                         aparms.info = &info;
343                         rc = tf_rm_get_info(&aparms);
344                         if (rc)
345                                 goto cleanup_all;
346
347                         hparms.subtype = i;
348                         rc = tf_rm_get_hcapi_type(&hparms);
349                         if (rc)
350                                 goto cleanup_all;
351
352                         local_resv[f].type = hcapi_type;
353                         local_resv[f].start = info.entry.start;
354                         local_resv[f].stride = info.entry.stride;
355                         f++;
356                 }
357                 *resv_size = found;
358         }
359
360         tf_rm_log_residuals(rm_db->dir,
361                             rm_db->module,
362                             rm_db->num_entries,
363                             residuals);
364
365         tfp_free((void *)residuals);
366         *resv = local_resv;
367
368         return 0;
369
370  cleanup_all:
371         tfp_free((void *)local_resv);
372         *resv = NULL;
373  cleanup_residuals:
374         tfp_free((void *)residuals);
375
376         return rc;
377 }
378
379 /**
380  * Some resources do not have a 1:1 mapping between the Truflow type and the cfa
381  * resource type (HCAPI RM).  These resources have multiple Truflow types which
382  * map to a single HCAPI RM type.  In order to support this, one Truflow type
383  * sharing the HCAPI resources is designated the parent.  All other Truflow
384  * types associated with that HCAPI RM type are designated the children.
385  *
386  * This function updates the resource counts of any HCAPI_BA_PARENT with the
387  * counts of the HCAPI_BA_CHILDREN.  These are read from the alloc_cnt and
388  * written back to the req_cnt.
389  *
390  * [in] cfg
391  *   Pointer to an array of module specific Truflow type indexed RM cfg items
392  *
393  * [in] alloc_cnt
394  *   Pointer to the tf_open_session() configured array of module specific
395  *   Truflow type indexed requested counts.
396  *
397  * [in/out] req_cnt
398  *   Pointer to the location to put the updated resource counts.
399  *
400  * Returns:
401  *     0          - Success
402  *     -          - Failure if negative
403  */
404 static int
405 tf_rm_update_parent_reservations(struct tf_rm_element_cfg *cfg,
406                                  uint16_t *alloc_cnt,
407                                  uint16_t num_elements,
408                                  uint16_t *req_cnt)
409 {
410         int parent, child;
411
412         /* Search through all the elements */
413         for (parent = 0; parent < num_elements; parent++) {
414                 uint16_t combined_cnt = 0;
415
416                 /* If I am a parent */
417                 if (cfg[parent].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
418                         /* start with my own count */
419                         RTE_ASSERT(cfg[parent].slices);
420                         combined_cnt =
421                                 alloc_cnt[parent] / cfg[parent].slices;
422
423                         if (alloc_cnt[parent] % cfg[parent].slices)
424                                 combined_cnt++;
425
426                         /* Search again through all the elements */
427                         for (child = 0; child < num_elements; child++) {
428                                 /* If this is one of my children */
429                                 if (cfg[child].cfg_type ==
430                                     TF_RM_ELEM_CFG_HCAPI_BA_CHILD &&
431                                     cfg[child].parent_subtype == parent) {
432                                         uint16_t cnt = 0;
433                                         RTE_ASSERT(cfg[child].slices);
434
435                                         /* Increment the parents combined count
436                                          * with each child's count adjusted for
437                                          * number of slices per RM allocated item.
438                                          */
439                                         cnt =
440                                          alloc_cnt[child] / cfg[child].slices;
441
442                                         if (alloc_cnt[child] % cfg[child].slices)
443                                                 cnt++;
444
445                                         combined_cnt += cnt;
446                                         /* Clear the requested child count */
447                                         req_cnt[child] = 0;
448                                 }
449                         }
450                         /* Save the parent count to be requested */
451                         req_cnt[parent] = combined_cnt;
452                 }
453         }
454         return 0;
455 }
456
457 int
458 tf_rm_create_db(struct tf *tfp,
459                 struct tf_rm_create_db_parms *parms)
460 {
461         int rc;
462         struct tf_session *tfs;
463         struct tf_dev_info *dev;
464         int i, j;
465         uint16_t max_types, hcapi_items, *req_cnt;
466         struct tfp_calloc_parms cparms;
467         struct tf_rm_resc_req_entry *query;
468         enum tf_rm_resc_resv_strategy resv_strategy;
469         struct tf_rm_resc_req_entry *req;
470         struct tf_rm_resc_entry *resv;
471         struct tf_rm_new_db *rm_db;
472         struct tf_rm_element *db;
473         uint32_t pool_size;
474
475         TF_CHECK_PARMS2(tfp, parms);
476
477         /* Retrieve the session information */
478         rc = tf_session_get_session_internal(tfp, &tfs);
479         if (rc)
480                 return rc;
481
482         /* Retrieve device information */
483         rc = tf_session_get_device(tfs, &dev);
484         if (rc)
485                 return rc;
486
487         /* Need device max number of elements for the RM QCAPS */
488         rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
489
490
491         /* Allocate memory for RM QCAPS request */
492         cparms.nitems = max_types;
493         cparms.size = sizeof(struct tf_rm_resc_req_entry);
494         cparms.alignment = 0;
495         rc = tfp_calloc(&cparms);
496         if (rc)
497                 return rc;
498
499         query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
500
501         /* Get Firmware Capabilities */
502         rc = tf_msg_session_resc_qcaps(tfp,
503                                        dev,
504                                        parms->dir,
505                                        max_types,
506                                        query,
507                                        &resv_strategy);
508         if (rc)
509                 return rc;
510
511         /* Copy requested counts (alloc_cnt) from tf_open_session() to local
512          * copy (req_cnt) so that it can be updated if required.
513          */
514
515         cparms.nitems = parms->num_elements;
516         cparms.size = sizeof(uint16_t);
517         rc = tfp_calloc(&cparms);
518         if (rc)
519                 return rc;
520
521         req_cnt = (uint16_t *)cparms.mem_va;
522
523         tfp_memcpy(req_cnt, parms->alloc_cnt,
524                    parms->num_elements * sizeof(uint16_t));
525
526         /* Update the req_cnt based upon the element configuration
527          */
528         tf_rm_update_parent_reservations(parms->cfg,
529                                          parms->alloc_cnt,
530                                          parms->num_elements,
531                                          req_cnt);
532
533         /* Process capabilities against DB requirements. However, as a
534          * DB can hold elements that are not HCAPI we can reduce the
535          * req msg content by removing those out of the request yet
536          * the DB holds them all as to give a fast lookup. We can also
537          * remove entries where there are no request for elements.
538          */
539         tf_rm_count_hcapi_reservations(parms->dir,
540                                        parms->module,
541                                        parms->cfg,
542                                        req_cnt,
543                                        parms->num_elements,
544                                        &hcapi_items);
545
546         if (hcapi_items == 0) {
547                 TFP_DRV_LOG(ERR,
548                             "%s: module:%s Empty RM DB create request\n",
549                             tf_dir_2_str(parms->dir),
550                             tf_module_2_str(parms->module));
551
552                 parms->rm_db = NULL;
553                 return -ENOMEM;
554         }
555
556         /* Alloc request, alignment already set */
557         cparms.nitems = (size_t)hcapi_items;
558         cparms.size = sizeof(struct tf_rm_resc_req_entry);
559         rc = tfp_calloc(&cparms);
560         if (rc)
561                 return rc;
562         req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
563
564         /* Alloc reservation, alignment and nitems already set */
565         cparms.size = sizeof(struct tf_rm_resc_entry);
566         rc = tfp_calloc(&cparms);
567         if (rc)
568                 return rc;
569         resv = (struct tf_rm_resc_entry *)cparms.mem_va;
570
571         /* Build the request */
572         for (i = 0, j = 0; i < parms->num_elements; i++) {
573                 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
574                 uint16_t hcapi_type = cfg->hcapi_type;
575
576                 /* Only perform reservation for requested entries
577                  */
578                 if (req_cnt[i] == 0)
579                         continue;
580
581                 /* Skip any children in the request */
582                 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
583                     cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
584                     cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
585
586                         /* Verify that we can get the full amount per qcaps.
587                          */
588                         if (req_cnt[i] <= query[hcapi_type].max) {
589                                 req[j].type = hcapi_type;
590                                 req[j].min = req_cnt[i];
591                                 req[j].max = req_cnt[i];
592                                 j++;
593                         } else {
594                                 const char *type_str;
595
596                                 dev->ops->tf_dev_get_resource_str(tfp,
597                                                               hcapi_type,
598                                                               &type_str);
599                                 TFP_DRV_LOG(ERR,
600                                             "Failure, %s:%d:%s req:%d avail:%d\n",
601                                             tf_dir_2_str(parms->dir),
602                                             hcapi_type, type_str,
603                                             req_cnt[i],
604                                             query[hcapi_type].max);
605                                 return -EINVAL;
606                         }
607                 }
608         }
609
610         /* Allocate all resources for the module type
611          */
612         rc = tf_msg_session_resc_alloc(tfp,
613                                        dev,
614                                        parms->dir,
615                                        hcapi_items,
616                                        req,
617                                        resv);
618         if (rc)
619                 return rc;
620
621         /* Build the RM DB per the request */
622         cparms.nitems = 1;
623         cparms.size = sizeof(struct tf_rm_new_db);
624         rc = tfp_calloc(&cparms);
625         if (rc)
626                 return rc;
627         rm_db = (void *)cparms.mem_va;
628
629         /* Build the DB within RM DB */
630         cparms.nitems = parms->num_elements;
631         cparms.size = sizeof(struct tf_rm_element);
632         rc = tfp_calloc(&cparms);
633         if (rc)
634                 return rc;
635         rm_db->db = (struct tf_rm_element *)cparms.mem_va;
636
637         db = rm_db->db;
638         for (i = 0, j = 0; i < parms->num_elements; i++) {
639                 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
640                 const char *type_str;
641
642                 dev->ops->tf_dev_get_resource_str(tfp,
643                                                   cfg->hcapi_type,
644                                                   &type_str);
645
646                 db[i].cfg_type = cfg->cfg_type;
647                 db[i].hcapi_type = cfg->hcapi_type;
648
649                 /* Save the parent subtype for later use to find the pool
650                  */
651                 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
652                         db[i].parent_subtype = cfg->parent_subtype;
653
654                 /* If the element didn't request an allocation no need
655                  * to create a pool nor verify if we got a reservation.
656                  */
657                 if (req_cnt[i] == 0)
658                         continue;
659
660                 /* Skip any children or invalid
661                  */
662                 if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
663                     cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
664                     cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
665                         continue;
666
667                 /* If the element had requested an allocation and that
668                  * allocation was a success (full amount) then
669                  * allocate the pool.
670                  */
671                 if (req_cnt[i] == resv[j].stride) {
672                         db[i].alloc.entry.start = resv[j].start;
673                         db[i].alloc.entry.stride = resv[j].stride;
674
675                         /* Only allocate BA pool if a BA type not a child */
676                         if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
677                             cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
678                                 if (cfg->divider) {
679                                         resv[j].stride =
680                                                 resv[j].stride / cfg->divider;
681                                         if (resv[j].stride <= 0) {
682                                                 TFP_DRV_LOG(ERR,
683                                                      "%s:Divide fails:%d:%s\n",
684                                                      tf_dir_2_str(parms->dir),
685                                                      cfg->hcapi_type, type_str);
686                                                 goto fail;
687                                         }
688                                 }
689                                 /* Create pool */
690                                 pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
691                                              sizeof(struct bitalloc));
692                                 /* Alloc request, alignment already set */
693                                 cparms.nitems = pool_size;
694                                 cparms.size = sizeof(struct bitalloc);
695                                 rc = tfp_calloc(&cparms);
696                                 if (rc) {
697                                         TFP_DRV_LOG(ERR,
698                                          "%s: Pool alloc failed, type:%d:%s\n",
699                                          tf_dir_2_str(parms->dir),
700                                          cfg->hcapi_type, type_str);
701                                         goto fail;
702                                 }
703                                 db[i].pool = (struct bitalloc *)cparms.mem_va;
704
705                                 rc = ba_init(db[i].pool,
706                                              resv[j].stride,
707                                              !tf_session_is_shared_session(tfs));
708                                 if (rc) {
709                                         TFP_DRV_LOG(ERR,
710                                           "%s: Pool init failed, type:%d:%s\n",
711                                           tf_dir_2_str(parms->dir),
712                                           cfg->hcapi_type, type_str);
713                                         goto fail;
714                                 }
715                         }
716                         j++;
717                 } else {
718                         /* Bail out as we want what we requested for
719                          * all elements, not any less.
720                          */
721                         TFP_DRV_LOG(ERR,
722                                     "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
723                                     tf_dir_2_str(parms->dir), cfg->hcapi_type,
724                                     type_str, req_cnt[i], resv[j].stride);
725                         goto fail;
726                 }
727         }
728
729         rm_db->num_entries = parms->num_elements;
730         rm_db->dir = parms->dir;
731         rm_db->module = parms->module;
732         *parms->rm_db = (void *)rm_db;
733
734         tfp_free((void *)req);
735         tfp_free((void *)resv);
736         tfp_free((void *)req_cnt);
737         return 0;
738
739  fail:
740         tfp_free((void *)req);
741         tfp_free((void *)resv);
742         tfp_free((void *)db->pool);
743         tfp_free((void *)db);
744         tfp_free((void *)rm_db);
745         tfp_free((void *)req_cnt);
746         parms->rm_db = NULL;
747
748         return -EINVAL;
749 }
750
751 int
752 tf_rm_create_db_no_reservation(struct tf *tfp,
753                                struct tf_rm_create_db_parms *parms)
754 {
755         int rc;
756         struct tf_session *tfs;
757         struct tf_dev_info *dev;
758         int i, j;
759         uint16_t hcapi_items, *req_cnt;
760         struct tfp_calloc_parms cparms;
761         struct tf_rm_resc_req_entry *req;
762         struct tf_rm_resc_entry *resv;
763         struct tf_rm_new_db *rm_db;
764         struct tf_rm_element *db;
765         uint32_t pool_size;
766
767         TF_CHECK_PARMS2(tfp, parms);
768
769         /* Retrieve the session information */
770         rc = tf_session_get_session_internal(tfp, &tfs);
771         if (rc)
772                 return rc;
773
774         /* Retrieve device information */
775         rc = tf_session_get_device(tfs, &dev);
776         if (rc)
777                 return rc;
778
779         /* Copy requested counts (alloc_cnt) from tf_open_session() to local
780          * copy (req_cnt) so that it can be updated if required.
781          */
782
783         cparms.nitems = parms->num_elements;
784         cparms.size = sizeof(uint16_t);
785         cparms.alignment = 0;
786         rc = tfp_calloc(&cparms);
787         if (rc)
788                 return rc;
789
790         req_cnt = (uint16_t *)cparms.mem_va;
791
792         tfp_memcpy(req_cnt, parms->alloc_cnt,
793                    parms->num_elements * sizeof(uint16_t));
794
795         /* Process capabilities against DB requirements. However, as a
796          * DB can hold elements that are not HCAPI we can reduce the
797          * req msg content by removing those out of the request yet
798          * the DB holds them all as to give a fast lookup. We can also
799          * remove entries where there are no request for elements.
800          */
801         tf_rm_count_hcapi_reservations(parms->dir,
802                                        parms->module,
803                                        parms->cfg,
804                                        req_cnt,
805                                        parms->num_elements,
806                                        &hcapi_items);
807
808         if (hcapi_items == 0) {
809                 TFP_DRV_LOG(ERR,
810                         "%s: module:%s Empty RM DB create request\n",
811                         tf_dir_2_str(parms->dir),
812                         tf_module_2_str(parms->module));
813
814                 parms->rm_db = NULL;
815                 return -ENOMEM;
816         }
817
818         /* Alloc request, alignment already set */
819         cparms.nitems = (size_t)hcapi_items;
820         cparms.size = sizeof(struct tf_rm_resc_req_entry);
821         rc = tfp_calloc(&cparms);
822         if (rc)
823                 return rc;
824         req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
825
826         /* Alloc reservation, alignment and nitems already set */
827         cparms.size = sizeof(struct tf_rm_resc_entry);
828         rc = tfp_calloc(&cparms);
829         if (rc)
830                 return rc;
831         resv = (struct tf_rm_resc_entry *)cparms.mem_va;
832
833         /* Build the request */
834         for (i = 0, j = 0; i < parms->num_elements; i++) {
835                 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
836                 uint16_t hcapi_type = cfg->hcapi_type;
837
838                 /* Only perform reservation for requested entries
839                  */
840                 if (req_cnt[i] == 0)
841                         continue;
842
843                 /* Skip any children in the request */
844                 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
845                     cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
846                     cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
847                         req[j].type = hcapi_type;
848                         req[j].min = req_cnt[i];
849                         req[j].max = req_cnt[i];
850                         j++;
851                 }
852         }
853
854         /* Get all resources info for the module type
855          */
856         rc = tf_msg_session_resc_info(tfp,
857                                       dev,
858                                       parms->dir,
859                                       hcapi_items,
860                                       req,
861                                       resv);
862         if (rc)
863                 return rc;
864
865         /* Build the RM DB per the request */
866         cparms.nitems = 1;
867         cparms.size = sizeof(struct tf_rm_new_db);
868         rc = tfp_calloc(&cparms);
869         if (rc)
870                 return rc;
871         rm_db = (void *)cparms.mem_va;
872
873         /* Build the DB within RM DB */
874         cparms.nitems = parms->num_elements;
875         cparms.size = sizeof(struct tf_rm_element);
876         rc = tfp_calloc(&cparms);
877         if (rc)
878                 return rc;
879         rm_db->db = (struct tf_rm_element *)cparms.mem_va;
880
881         db = rm_db->db;
882         for (i = 0, j = 0; i < parms->num_elements; i++) {
883                 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
884                 const char *type_str;
885
886                 dev->ops->tf_dev_get_resource_str(tfp,
887                                                   cfg->hcapi_type,
888                                                   &type_str);
889
890                 db[i].cfg_type = cfg->cfg_type;
891                 db[i].hcapi_type = cfg->hcapi_type;
892
893                 /* Save the parent subtype for later use to find the pool
894                  */
895                 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
896                         db[i].parent_subtype = cfg->parent_subtype;
897
898                 /* If the element didn't request an allocation no need
899                  * to create a pool nor verify if we got a reservation.
900                  */
901                 if (req_cnt[i] == 0)
902                         continue;
903
904                 /* Skip any children or invalid
905                  */
906                 if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
907                     cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
908                     cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
909                         continue;
910
911                 /* If the element had requested an allocation and that
912                  * allocation was a success (full amount) then
913                  * allocate the pool.
914                  */
915                 if (req_cnt[i] == resv[j].stride) {
916                         db[i].alloc.entry.start = resv[j].start;
917                         db[i].alloc.entry.stride = resv[j].stride;
918
919                         /* Only allocate BA pool if a BA type not a child */
920                         if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
921                             cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
922                                 if (cfg->divider) {
923                                         resv[j].stride =
924                                                 resv[j].stride / cfg->divider;
925                                         if (resv[j].stride <= 0) {
926                                                 TFP_DRV_LOG(ERR,
927                                                      "%s:Divide fails:%d:%s\n",
928                                                      tf_dir_2_str(parms->dir),
929                                                      cfg->hcapi_type, type_str);
930                                                 goto fail;
931                                         }
932                                 }
933                                 /* Create pool */
934                                 pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
935                                              sizeof(struct bitalloc));
936                                 /* Alloc request, alignment already set */
937                                 cparms.nitems = pool_size;
938                                 cparms.size = sizeof(struct bitalloc);
939                                 rc = tfp_calloc(&cparms);
940                                 if (rc) {
941                                         TFP_DRV_LOG(ERR,
942                                          "%s: Pool alloc failed, type:%d:%s\n",
943                                          tf_dir_2_str(parms->dir),
944                                          cfg->hcapi_type, type_str);
945                                         goto fail;
946                                 }
947                                 db[i].pool = (struct bitalloc *)cparms.mem_va;
948
949                                 rc = ba_init(db[i].pool,
950                                              resv[j].stride,
951                                              !tf_session_is_shared_session(tfs));
952                                 if (rc) {
953                                         TFP_DRV_LOG(ERR,
954                                           "%s: Pool init failed, type:%d:%s\n",
955                                           tf_dir_2_str(parms->dir),
956                                           cfg->hcapi_type, type_str);
957                                         goto fail;
958                                 }
959                         }
960                         j++;
961                 } else {
962                         /* Bail out as we want what we requested for
963                          * all elements, not any less.
964                          */
965                         TFP_DRV_LOG(ERR,
966                                     "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
967                                     tf_dir_2_str(parms->dir), cfg->hcapi_type,
968                                     type_str, req_cnt[i], resv[j].stride);
969                         goto fail;
970                 }
971         }
972
973         rm_db->num_entries = parms->num_elements;
974         rm_db->dir = parms->dir;
975         rm_db->module = parms->module;
976         *parms->rm_db = (void *)rm_db;
977
978         tfp_free((void *)req);
979         tfp_free((void *)resv);
980         tfp_free((void *)req_cnt);
981         return 0;
982
983  fail:
984         tfp_free((void *)req);
985         tfp_free((void *)resv);
986         tfp_free((void *)db->pool);
987         tfp_free((void *)db);
988         tfp_free((void *)rm_db);
989         tfp_free((void *)req_cnt);
990         parms->rm_db = NULL;
991
992         return -EINVAL;
993 }
994 int
995 tf_rm_free_db(struct tf *tfp,
996               struct tf_rm_free_db_parms *parms)
997 {
998         int rc;
999         int i;
1000         uint16_t resv_size = 0;
1001         struct tf_rm_new_db *rm_db;
1002         struct tf_rm_resc_entry *resv;
1003         bool residuals_found = false;
1004
1005         TF_CHECK_PARMS2(parms, parms->rm_db);
1006
1007         /* Device unbind happens when the TF Session is closed and the
1008          * session ref count is 0. Device unbind will cleanup each of
1009          * its support modules, i.e. Identifier, thus we're ending up
1010          * here to close the DB.
1011          *
1012          * On TF Session close it is assumed that the session has already
1013          * cleaned up all its resources, individually, while
1014          * destroying its flows.
1015          *
1016          * To assist in the 'cleanup checking' the DB is checked for any
1017          * remaining elements and logged if found to be the case.
1018          *
1019          * Any such elements will need to be 'cleared' ahead of
1020          * returning the resources to the HCAPI RM.
1021          *
1022          * RM will signal FW to flush the DB resources. FW will
1023          * perform the invalidation. TF Session close will return the
1024          * previous allocated elements to the RM and then close the
1025          * HCAPI RM registration. That then saves several 'free' msgs
1026          * from being required.
1027          */
1028
1029         rm_db = (struct tf_rm_new_db *)parms->rm_db;
1030
1031         /* Check for residuals that the client didn't clean up */
1032         rc = tf_rm_check_residuals(rm_db,
1033                                    &resv_size,
1034                                    &resv,
1035                                    &residuals_found);
1036         if (rc)
1037                 return rc;
1038
1039         /* Invalidate any residuals followed by a DB traversal for
1040          * pool cleanup.
1041          */
1042         if (residuals_found) {
1043                 rc = tf_msg_session_resc_flush(tfp,
1044                                                parms->dir,
1045                                                resv_size,
1046                                                resv);
1047                 tfp_free((void *)resv);
1048                 /* On failure we still have to cleanup so we can only
1049                  * log that FW failed.
1050                  */
1051                 if (rc)
1052                         TFP_DRV_LOG(ERR,
1053                                     "%s: Internal Flush error, module:%s\n",
1054                                     tf_dir_2_str(parms->dir),
1055                                     tf_module_2_str(rm_db->module));
1056         }
1057
1058         /* No need to check for configuration type, even if we do not
1059          * have a BA pool we just delete on a null ptr, no harm
1060          */
1061         for (i = 0; i < rm_db->num_entries; i++)
1062                 tfp_free((void *)rm_db->db[i].pool);
1063
1064         tfp_free((void *)parms->rm_db);
1065
1066         return rc;
1067 }
1068 /**
1069  * Get the bit allocator pool associated with the subtype and the db
1070  *
1071  * [in] rm_db
1072  *   Pointer to the DB
1073  *
1074  * [in] subtype
1075  *   Module subtype used to index into the module specific database.
1076  *   An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
1077  *   module subtype of TF_MODULE_TYPE_TABLE.
1078  *
1079  * [in/out] pool
1080  *   Pointer to the bit allocator pool used
1081  *
1082  * [in/out] new_subtype
1083  *   Pointer to the subtype of the actual pool used
1084  * Returns:
1085  *     0          - Success
1086  *   - ENOTSUP    - Operation not supported
1087  */
1088 static int
1089 tf_rm_get_pool(struct tf_rm_new_db *rm_db,
1090                uint16_t subtype,
1091                struct bitalloc **pool,
1092                uint16_t *new_subtype)
1093 {
1094         int rc = 0;
1095         uint16_t tmp_subtype = subtype;
1096
1097         /* If we are a child, get the parent table index */
1098         if (rm_db->db[subtype].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1099                 tmp_subtype = rm_db->db[subtype].parent_subtype;
1100
1101         *pool = rm_db->db[tmp_subtype].pool;
1102
1103         /* Bail out if the pool is not valid, should never happen */
1104         if (rm_db->db[tmp_subtype].pool == NULL) {
1105                 rc = -ENOTSUP;
1106                 TFP_DRV_LOG(ERR,
1107                             "%s: Invalid pool for this type:%d, rc:%s\n",
1108                             tf_dir_2_str(rm_db->dir),
1109                             tmp_subtype,
1110                             strerror(-rc));
1111                 return rc;
1112         }
1113         *new_subtype = tmp_subtype;
1114         return rc;
1115 }
1116
1117 int
1118 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
1119 {
1120         int rc;
1121         int id;
1122         uint32_t index;
1123         struct tf_rm_new_db *rm_db;
1124         enum tf_rm_elem_cfg_type cfg_type;
1125         struct bitalloc *pool;
1126         uint16_t subtype;
1127
1128         TF_CHECK_PARMS2(parms, parms->rm_db);
1129
1130         rm_db = (struct tf_rm_new_db *)parms->rm_db;
1131         TF_CHECK_PARMS1(rm_db->db);
1132
1133         cfg_type = rm_db->db[parms->subtype].cfg_type;
1134
1135         /* Bail out if not controlled by RM */
1136         if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1137             cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1138             cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1139                 return -ENOTSUP;
1140
1141         rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1142         if (rc)
1143                 return rc;
1144         /*
1145          * priority  0: allocate from top of the tcam i.e. high
1146          * priority !0: allocate index from bottom i.e lowest
1147          */
1148         if (parms->priority)
1149                 id = ba_alloc_reverse(pool);
1150         else
1151                 id = ba_alloc(pool);
1152         if (id == BA_FAIL) {
1153                 rc = -ENOMEM;
1154                 TFP_DRV_LOG(ERR,
1155                             "%s: Allocation failed, rc:%s\n",
1156                             tf_dir_2_str(rm_db->dir),
1157                             strerror(-rc));
1158                 return rc;
1159         }
1160
1161         /* Adjust for any non zero start value */
1162         rc = tf_rm_adjust_index(rm_db->db,
1163                                 TF_RM_ADJUST_ADD_BASE,
1164                                 subtype,
1165                                 id,
1166                                 &index);
1167         if (rc) {
1168                 TFP_DRV_LOG(ERR,
1169                             "%s: Alloc adjust of base index failed, rc:%s\n",
1170                             tf_dir_2_str(rm_db->dir),
1171                             strerror(-rc));
1172                 return -EINVAL;
1173         }
1174
1175         *parms->index = index;
1176         if (parms->base_index)
1177                 *parms->base_index = id;
1178
1179         return rc;
1180 }
1181
1182 int
1183 tf_rm_free(struct tf_rm_free_parms *parms)
1184 {
1185         int rc;
1186         uint32_t adj_index;
1187         struct tf_rm_new_db *rm_db;
1188         enum tf_rm_elem_cfg_type cfg_type;
1189         struct bitalloc *pool;
1190         uint16_t subtype;
1191
1192         TF_CHECK_PARMS2(parms, parms->rm_db);
1193         rm_db = (struct tf_rm_new_db *)parms->rm_db;
1194         TF_CHECK_PARMS1(rm_db->db);
1195
1196         cfg_type = rm_db->db[parms->subtype].cfg_type;
1197
1198         /* Bail out if not controlled by RM */
1199         if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1200             cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1201             cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1202                 return -ENOTSUP;
1203
1204         rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1205         if (rc)
1206                 return rc;
1207
1208         /* Adjust for any non zero start value */
1209         rc = tf_rm_adjust_index(rm_db->db,
1210                                 TF_RM_ADJUST_RM_BASE,
1211                                 subtype,
1212                                 parms->index,
1213                                 &adj_index);
1214         if (rc)
1215                 return rc;
1216
1217         rc = ba_free(pool, adj_index);
1218         /* No logging direction matters and that is not available here */
1219         if (rc)
1220                 return rc;
1221
1222         return rc;
1223 }
1224
1225 int
1226 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
1227 {
1228         int rc;
1229         uint32_t adj_index;
1230         struct tf_rm_new_db *rm_db;
1231         enum tf_rm_elem_cfg_type cfg_type;
1232         struct bitalloc *pool;
1233         uint16_t subtype;
1234
1235         TF_CHECK_PARMS2(parms, parms->rm_db);
1236         rm_db = (struct tf_rm_new_db *)parms->rm_db;
1237         TF_CHECK_PARMS1(rm_db->db);
1238
1239         cfg_type = rm_db->db[parms->subtype].cfg_type;
1240
1241
1242         /* Bail out if not controlled by RM */
1243         if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1244             cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1245             cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1246                 return -ENOTSUP;
1247
1248         rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1249         if (rc)
1250                 return rc;
1251
1252         /* Adjust for any non zero start value */
1253         rc = tf_rm_adjust_index(rm_db->db,
1254                                 TF_RM_ADJUST_RM_BASE,
1255                                 subtype,
1256                                 parms->index,
1257                                 &adj_index);
1258         if (rc)
1259                 return rc;
1260
1261         if (parms->base_index)
1262                 *parms->base_index = adj_index;
1263         *parms->allocated = ba_inuse(pool, adj_index);
1264
1265         return rc;
1266 }
1267
1268 int
1269 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
1270 {
1271         struct tf_rm_new_db *rm_db;
1272         enum tf_rm_elem_cfg_type cfg_type;
1273
1274         TF_CHECK_PARMS2(parms, parms->rm_db);
1275         rm_db = (struct tf_rm_new_db *)parms->rm_db;
1276         TF_CHECK_PARMS1(rm_db->db);
1277
1278         cfg_type = rm_db->db[parms->subtype].cfg_type;
1279
1280         /* Bail out if not controlled by HCAPI */
1281         if (cfg_type == TF_RM_ELEM_CFG_NULL)
1282                 return -ENOTSUP;
1283
1284         memcpy(parms->info,
1285                &rm_db->db[parms->subtype].alloc,
1286                sizeof(struct tf_rm_alloc_info));
1287
1288         return 0;
1289 }
1290
1291 int
1292 tf_rm_get_all_info(struct tf_rm_get_alloc_info_parms *parms, int size)
1293 {
1294         struct tf_rm_new_db *rm_db;
1295         enum tf_rm_elem_cfg_type cfg_type;
1296         struct tf_rm_alloc_info *info = parms->info;
1297         int i;
1298
1299         TF_CHECK_PARMS2(parms, parms->rm_db);
1300         rm_db = (struct tf_rm_new_db *)parms->rm_db;
1301         TF_CHECK_PARMS1(rm_db->db);
1302
1303         for (i = 0; i < size; i++) {
1304                 cfg_type = rm_db->db[i].cfg_type;
1305
1306                 /* Bail out if not controlled by HCAPI */
1307                 if (cfg_type == TF_RM_ELEM_CFG_NULL) {
1308                         info++;
1309                         continue;
1310                 }
1311
1312                 memcpy(info,
1313                        &rm_db->db[i].alloc,
1314                        sizeof(struct tf_rm_alloc_info));
1315                 info++;
1316         }
1317
1318         return 0;
1319 }
1320
1321 int
1322 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
1323 {
1324         struct tf_rm_new_db *rm_db;
1325         enum tf_rm_elem_cfg_type cfg_type;
1326
1327         TF_CHECK_PARMS2(parms, parms->rm_db);
1328         rm_db = (struct tf_rm_new_db *)parms->rm_db;
1329         TF_CHECK_PARMS1(rm_db->db);
1330
1331         cfg_type = rm_db->db[parms->subtype].cfg_type;
1332
1333         /* Bail out if not controlled by HCAPI */
1334         if (cfg_type == TF_RM_ELEM_CFG_NULL)
1335                 return -ENOTSUP;
1336
1337         *parms->hcapi_type = rm_db->db[parms->subtype].hcapi_type;
1338
1339         return 0;
1340 }
1341
1342 int
1343 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
1344 {
1345         int rc = 0;
1346         struct tf_rm_new_db *rm_db;
1347         enum tf_rm_elem_cfg_type cfg_type;
1348
1349         TF_CHECK_PARMS2(parms, parms->rm_db);
1350         rm_db = (struct tf_rm_new_db *)parms->rm_db;
1351         TF_CHECK_PARMS1(rm_db->db);
1352
1353         cfg_type = rm_db->db[parms->subtype].cfg_type;
1354
1355         /* Bail out if not a BA pool */
1356         if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1357             cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1358             cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1359                 return -ENOTSUP;
1360
1361         /* Bail silently (no logging), if the pool is not valid there
1362          * was no elements allocated for it.
1363          */
1364         if (rm_db->db[parms->subtype].pool == NULL) {
1365                 *parms->count = 0;
1366                 return 0;
1367         }
1368
1369         *parms->count = ba_inuse_count(rm_db->db[parms->subtype].pool);
1370
1371         return rc;
1372 }
1373 /* Only used for table bulk get at this time
1374  */
1375 int
1376 tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms)
1377 {
1378         struct tf_rm_new_db *rm_db;
1379         enum tf_rm_elem_cfg_type cfg_type;
1380         uint32_t base_index;
1381         uint32_t stride;
1382         int rc = 0;
1383         struct bitalloc *pool;
1384         uint16_t subtype;
1385
1386         TF_CHECK_PARMS2(parms, parms->rm_db);
1387         rm_db = (struct tf_rm_new_db *)parms->rm_db;
1388         TF_CHECK_PARMS1(rm_db->db);
1389
1390         cfg_type = rm_db->db[parms->subtype].cfg_type;
1391
1392         /* Bail out if not a BA pool */
1393         if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1394             cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1395             cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1396                 return -ENOTSUP;
1397
1398         rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1399         if (rc)
1400                 return rc;
1401
1402         base_index = rm_db->db[subtype].alloc.entry.start;
1403         stride = rm_db->db[subtype].alloc.entry.stride;
1404
1405         if (parms->starting_index < base_index ||
1406             parms->starting_index + parms->num_entries > base_index + stride)
1407                 return -EINVAL;
1408
1409         return rc;
1410 }