1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
8 #include <rte_common.h>
11 #include <cfa_resource_types.h>
14 #include "tf_common.h"
16 #include "tf_session.h"
17 #include "tf_device.h"
22 * Generic RM Element data type that an RM DB is build upon.
24 struct tf_rm_element {
26 * RM Element configuration type. If Private then the
27 * hcapi_type can be ignored. If Null then the element is not
28 * valid for the device.
30 enum tf_rm_elem_cfg_type cfg_type;
33 * HCAPI RM Type for the element.
38 * HCAPI RM allocated range information for the element.
40 struct tf_rm_alloc_info alloc;
43 * If cfg_type == HCAPI_BA_CHILD, this field indicates
44 * the parent module subtype for look up into the parent pool.
45 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
46 * module subtype of TF_MODULE_TYPE_TABLE.
48 uint16_t parent_subtype;
51 * Bit allocator pool for the element. Pool size is controlled
52 * by the struct tf_session_resources at time of session creation.
53 * Null indicates that the pool is not used for the element.
55 struct bitalloc *pool;
63 * Number of elements in the DB
68 * Direction this DB controls.
73 * Module type, used for logging purposes.
75 enum tf_module_type module;
78 * The DB consists of an array of elements
80 struct tf_rm_element *db;
84 * Adjust an index according to the allocation information.
86 * All resources are controlled in a 0 based pool. Some resources, by
87 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
88 * need to be adjusted before they are handed out.
91 * Pointer to the DB configuration
94 * Pointer to the allocation values associated with the module
97 * Number of DB configuration elements
100 * Number of HCAPI entries with a reservation value greater than 0
104 * - EOPNOTSUPP - Operation not supported
107 tf_rm_count_hcapi_reservations(enum tf_dir dir,
108 enum tf_module_type module,
109 struct tf_rm_element_cfg *cfg,
110 uint16_t *reservations,
112 uint16_t *valid_count)
117 for (i = 0; i < count; i++) {
118 if (cfg[i].cfg_type != TF_RM_ELEM_CFG_NULL &&
122 /* Only log msg if a type is attempted reserved and
123 * not supported. We ignore EM module as its using a
124 * split configuration array thus it would fail for
125 * this type of check.
127 if (module != TF_MODULE_TYPE_EM &&
128 cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
129 reservations[i] > 0) {
131 "%s, %s, %s allocation of %d not supported\n",
132 tf_module_2_str(module),
134 tf_module_subtype_2_str(module, i),
143 * Resource Manager Adjust of base index definitions.
145 enum tf_rm_adjust_type {
146 TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
147 TF_RM_ADJUST_RM_BASE /**< Removes base from the index */
151 * Adjust an index according to the allocation information.
153 * All resources are controlled in a 0 based pool. Some resources, by
154 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
155 * need to be adjusted before they are handed out.
158 * Pointer to the db, used for the lookup
164 * TF module subtype used as an index into the database.
165 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
166 * module subtype of TF_MODULE_TYPE_TABLE.
176 * - EOPNOTSUPP - Operation not supported
179 tf_rm_adjust_index(struct tf_rm_element *db,
180 enum tf_rm_adjust_type action,
188 base_index = db[subtype].alloc.entry.start;
191 case TF_RM_ADJUST_RM_BASE:
192 *adj_index = index - base_index;
194 case TF_RM_ADJUST_ADD_BASE:
195 *adj_index = index + base_index;
205 * Performs a check of the passed in DB for any lingering elements. If
206 * a resource type was found to not have been cleaned up by the caller
207 * then its residual values are recorded, logged and passed back in an
208 * allocate reservation array that the caller can pass to the FW for
212 * Pointer to the db, used for the lookup
215 * Pointer to the reservation size of the generated reservation
219 * Pointer Pointer to a reservation array. The reservation array is
220 * allocated after the residual scan and holds any found residual
221 * entries. Thus it can be smaller than the DB that the check was
222 * performed on. Array must be freed by the caller.
224 * [out] residuals_present
225 * Pointer to a bool flag indicating if residual was present in the
230 * - EOPNOTSUPP - Operation not supported
233 tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
235 struct tf_rm_resc_entry **resv,
236 bool *residuals_present)
243 uint16_t *residuals = NULL;
245 struct tf_rm_get_inuse_count_parms iparms;
246 struct tf_rm_get_alloc_info_parms aparms;
247 struct tf_rm_get_hcapi_parms hparms;
248 struct tf_rm_alloc_info info;
249 struct tfp_calloc_parms cparms;
250 struct tf_rm_resc_entry *local_resv = NULL;
252 /* Create array to hold the entries that have residuals */
253 cparms.nitems = rm_db->num_entries;
254 cparms.size = sizeof(uint16_t);
255 cparms.alignment = 0;
256 rc = tfp_calloc(&cparms);
260 residuals = (uint16_t *)cparms.mem_va;
262 /* Traverse the DB and collect any residual elements */
263 iparms.rm_db = rm_db;
264 iparms.count = &count;
265 for (i = 0, found = 0; i < rm_db->num_entries; i++) {
267 rc = tf_rm_get_inuse_count(&iparms);
268 /* Not a device supported entry, just skip */
272 goto cleanup_residuals;
276 residuals[i] = count;
277 *residuals_present = true;
281 if (*residuals_present) {
282 /* Populate a reduced resv array with only the entries
283 * that have residuals.
285 cparms.nitems = found;
286 cparms.size = sizeof(struct tf_rm_resc_entry);
287 cparms.alignment = 0;
288 rc = tfp_calloc(&cparms);
292 local_resv = (struct tf_rm_resc_entry *)cparms.mem_va;
294 aparms.rm_db = rm_db;
295 hparms.rm_db = rm_db;
296 hparms.hcapi_type = &hcapi_type;
297 for (i = 0, f = 0; i < rm_db->num_entries; i++) {
298 if (residuals[i] == 0)
302 rc = tf_rm_get_info(&aparms);
307 rc = tf_rm_get_hcapi_type(&hparms);
311 local_resv[f].type = hcapi_type;
312 local_resv[f].start = info.entry.start;
313 local_resv[f].stride = info.entry.stride;
319 tfp_free((void *)residuals);
325 tfp_free((void *)local_resv);
328 tfp_free((void *)residuals);
334 * Some resources do not have a 1:1 mapping between the Truflow type and the cfa
335 * resource type (HCAPI RM). These resources have multiple Truflow types which
336 * map to a single HCAPI RM type. In order to support this, one Truflow type
337 * sharing the HCAPI resources is designated the parent. All other Truflow
338 * types associated with that HCAPI RM type are designated the children.
340 * This function updates the resource counts of any HCAPI_BA_PARENT with the
341 * counts of the HCAPI_BA_CHILDREN. These are read from the alloc_cnt and
342 * written back to the req_cnt.
345 * Pointer to an array of module specific Truflow type indexed RM cfg items
348 * Pointer to the tf_open_session() configured array of module specific
349 * Truflow type indexed requested counts.
352 * Pointer to the location to put the updated resource counts.
356 * - - Failure if negative
359 tf_rm_update_parent_reservations(struct tf_rm_element_cfg *cfg,
361 uint16_t num_elements,
366 /* Search through all the elements */
367 for (parent = 0; parent < num_elements; parent++) {
368 uint16_t combined_cnt = 0;
370 /* If I am a parent */
371 if (cfg[parent].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
372 /* start with my own count */
373 RTE_ASSERT(cfg[parent].slices);
375 alloc_cnt[parent] / cfg[parent].slices;
377 if (alloc_cnt[parent] % cfg[parent].slices)
380 /* Search again through all the elements */
381 for (child = 0; child < num_elements; child++) {
382 /* If this is one of my children */
383 if (cfg[child].cfg_type ==
384 TF_RM_ELEM_CFG_HCAPI_BA_CHILD &&
385 cfg[child].parent_subtype == parent) {
387 RTE_ASSERT(cfg[child].slices);
389 /* Increment the parents combined count
390 * with each child's count adjusted for
391 * number of slices per RM allocated item.
394 alloc_cnt[child] / cfg[child].slices;
396 if (alloc_cnt[child] % cfg[child].slices)
400 /* Clear the requested child count */
404 /* Save the parent count to be requested */
405 req_cnt[parent] = combined_cnt;
412 tf_rm_create_db(struct tf *tfp,
413 struct tf_rm_create_db_parms *parms)
416 struct tf_session *tfs;
417 struct tf_dev_info *dev;
419 uint16_t max_types, hcapi_items, *req_cnt;
420 struct tfp_calloc_parms cparms;
421 struct tf_rm_resc_req_entry *query;
422 enum tf_rm_resc_resv_strategy resv_strategy;
423 struct tf_rm_resc_req_entry *req;
424 struct tf_rm_resc_entry *resv;
425 struct tf_rm_new_db *rm_db;
426 struct tf_rm_element *db;
429 TF_CHECK_PARMS2(tfp, parms);
431 /* Retrieve the session information */
432 rc = tf_session_get_session_internal(tfp, &tfs);
436 /* Retrieve device information */
437 rc = tf_session_get_device(tfs, &dev);
441 /* Need device max number of elements for the RM QCAPS */
442 rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
445 /* Allocate memory for RM QCAPS request */
446 cparms.nitems = max_types;
447 cparms.size = sizeof(struct tf_rm_resc_req_entry);
448 cparms.alignment = 0;
449 rc = tfp_calloc(&cparms);
453 query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
455 /* Get Firmware Capabilities */
456 rc = tf_msg_session_resc_qcaps(tfp,
465 /* Copy requested counts (alloc_cnt) from tf_open_session() to local
466 * copy (req_cnt) so that it can be updated if required.
469 cparms.nitems = parms->num_elements;
470 cparms.size = sizeof(uint16_t);
471 rc = tfp_calloc(&cparms);
475 req_cnt = (uint16_t *)cparms.mem_va;
477 tfp_memcpy(req_cnt, parms->alloc_cnt,
478 parms->num_elements * sizeof(uint16_t));
480 /* Update the req_cnt based upon the element configuration
482 tf_rm_update_parent_reservations(parms->cfg,
487 /* Process capabilities against DB requirements. However, as a
488 * DB can hold elements that are not HCAPI we can reduce the
489 * req msg content by removing those out of the request yet
490 * the DB holds them all as to give a fast lookup. We can also
491 * remove entries where there are no request for elements.
493 tf_rm_count_hcapi_reservations(parms->dir,
500 if (hcapi_items == 0) {
505 /* Alloc request, alignment already set */
506 cparms.nitems = (size_t)hcapi_items;
507 cparms.size = sizeof(struct tf_rm_resc_req_entry);
508 rc = tfp_calloc(&cparms);
511 req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
513 /* Alloc reservation, alignment and nitems already set */
514 cparms.size = sizeof(struct tf_rm_resc_entry);
515 rc = tfp_calloc(&cparms);
518 resv = (struct tf_rm_resc_entry *)cparms.mem_va;
520 /* Build the request */
521 for (i = 0, j = 0; i < parms->num_elements; i++) {
522 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
523 uint16_t hcapi_type = cfg->hcapi_type;
525 /* Only perform reservation for requested entries
530 /* Skip any children in the request */
531 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
532 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
533 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
535 /* Verify that we can get the full amount per qcaps.
537 if (req_cnt[i] <= query[hcapi_type].max) {
538 req[j].type = hcapi_type;
539 req[j].min = req_cnt[i];
540 req[j].max = req_cnt[i];
543 const char *type_str;
545 dev->ops->tf_dev_get_resource_str(tfp,
549 "Failure, %s:%d:%s req:%d avail:%d\n",
550 tf_dir_2_str(parms->dir),
551 hcapi_type, type_str,
553 query[hcapi_type].max);
559 /* Allocate all resources for the module type
561 rc = tf_msg_session_resc_alloc(tfp,
570 /* Build the RM DB per the request */
572 cparms.size = sizeof(struct tf_rm_new_db);
573 rc = tfp_calloc(&cparms);
576 rm_db = (void *)cparms.mem_va;
578 /* Build the DB within RM DB */
579 cparms.nitems = parms->num_elements;
580 cparms.size = sizeof(struct tf_rm_element);
581 rc = tfp_calloc(&cparms);
584 rm_db->db = (struct tf_rm_element *)cparms.mem_va;
587 for (i = 0, j = 0; i < parms->num_elements; i++) {
588 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
589 const char *type_str;
591 dev->ops->tf_dev_get_resource_str(tfp,
595 db[i].cfg_type = cfg->cfg_type;
596 db[i].hcapi_type = cfg->hcapi_type;
598 /* Save the parent subtype for later use to find the pool
600 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
601 db[i].parent_subtype = cfg->parent_subtype;
603 /* If the element didn't request an allocation no need
604 * to create a pool nor verify if we got a reservation.
609 /* Skip any children or invalid
611 if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
612 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
613 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
616 /* If the element had requested an allocation and that
617 * allocation was a success (full amount) then
620 if (req_cnt[i] == resv[j].stride) {
621 db[i].alloc.entry.start = resv[j].start;
622 db[i].alloc.entry.stride = resv[j].stride;
624 /* Only allocate BA pool if a BA type not a child */
625 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
626 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
628 pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
629 sizeof(struct bitalloc));
630 /* Alloc request, alignment already set */
631 cparms.nitems = pool_size;
632 cparms.size = sizeof(struct bitalloc);
633 rc = tfp_calloc(&cparms);
636 "%s: Pool alloc failed, type:%d:%s\n",
637 tf_dir_2_str(parms->dir),
638 cfg->hcapi_type, type_str);
641 db[i].pool = (struct bitalloc *)cparms.mem_va;
643 rc = ba_init(db[i].pool,
645 !tf_session_is_shared_session(tfs));
648 "%s: Pool init failed, type:%d:%s\n",
649 tf_dir_2_str(parms->dir),
650 cfg->hcapi_type, type_str);
656 /* Bail out as we want what we requested for
657 * all elements, not any less.
660 "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
661 tf_dir_2_str(parms->dir), cfg->hcapi_type,
662 type_str, req_cnt[i], resv[j].stride);
667 rm_db->num_entries = parms->num_elements;
668 rm_db->dir = parms->dir;
669 rm_db->module = parms->module;
670 *parms->rm_db = (void *)rm_db;
672 tfp_free((void *)req);
673 tfp_free((void *)resv);
674 tfp_free((void *)req_cnt);
678 tfp_free((void *)req);
679 tfp_free((void *)resv);
680 tfp_free((void *)db->pool);
681 tfp_free((void *)db);
682 tfp_free((void *)rm_db);
683 tfp_free((void *)req_cnt);
690 tf_rm_create_db_no_reservation(struct tf *tfp,
691 struct tf_rm_create_db_parms *parms)
694 struct tf_session *tfs;
695 struct tf_dev_info *dev;
697 uint16_t hcapi_items, *req_cnt;
698 struct tfp_calloc_parms cparms;
699 struct tf_rm_resc_req_entry *req;
700 struct tf_rm_resc_entry *resv;
701 struct tf_rm_new_db *rm_db;
702 struct tf_rm_element *db;
705 TF_CHECK_PARMS2(tfp, parms);
707 /* Retrieve the session information */
708 rc = tf_session_get_session_internal(tfp, &tfs);
712 /* Retrieve device information */
713 rc = tf_session_get_device(tfs, &dev);
717 /* Copy requested counts (alloc_cnt) from tf_open_session() to local
718 * copy (req_cnt) so that it can be updated if required.
721 cparms.nitems = parms->num_elements;
722 cparms.size = sizeof(uint16_t);
723 cparms.alignment = 0;
724 rc = tfp_calloc(&cparms);
728 req_cnt = (uint16_t *)cparms.mem_va;
730 tfp_memcpy(req_cnt, parms->alloc_cnt,
731 parms->num_elements * sizeof(uint16_t));
733 /* Process capabilities against DB requirements. However, as a
734 * DB can hold elements that are not HCAPI we can reduce the
735 * req msg content by removing those out of the request yet
736 * the DB holds them all as to give a fast lookup. We can also
737 * remove entries where there are no request for elements.
739 tf_rm_count_hcapi_reservations(parms->dir,
746 if (hcapi_items == 0) {
748 "%s: module:%s Empty RM DB create request\n",
749 tf_dir_2_str(parms->dir),
750 tf_module_2_str(parms->module));
756 /* Alloc request, alignment already set */
757 cparms.nitems = (size_t)hcapi_items;
758 cparms.size = sizeof(struct tf_rm_resc_req_entry);
759 rc = tfp_calloc(&cparms);
762 req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
764 /* Alloc reservation, alignment and nitems already set */
765 cparms.size = sizeof(struct tf_rm_resc_entry);
766 rc = tfp_calloc(&cparms);
769 resv = (struct tf_rm_resc_entry *)cparms.mem_va;
771 /* Build the request */
772 for (i = 0, j = 0; i < parms->num_elements; i++) {
773 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
774 uint16_t hcapi_type = cfg->hcapi_type;
776 /* Only perform reservation for requested entries
781 /* Skip any children in the request */
782 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
783 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
784 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
785 req[j].type = hcapi_type;
786 req[j].min = req_cnt[i];
787 req[j].max = req_cnt[i];
792 /* Get all resources info for the module type
794 rc = tf_msg_session_resc_info(tfp,
803 /* Build the RM DB per the request */
805 cparms.size = sizeof(struct tf_rm_new_db);
806 rc = tfp_calloc(&cparms);
809 rm_db = (void *)cparms.mem_va;
811 /* Build the DB within RM DB */
812 cparms.nitems = parms->num_elements;
813 cparms.size = sizeof(struct tf_rm_element);
814 rc = tfp_calloc(&cparms);
817 rm_db->db = (struct tf_rm_element *)cparms.mem_va;
820 for (i = 0, j = 0; i < parms->num_elements; i++) {
821 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
822 const char *type_str;
824 dev->ops->tf_dev_get_resource_str(tfp,
828 db[i].cfg_type = cfg->cfg_type;
829 db[i].hcapi_type = cfg->hcapi_type;
831 /* Save the parent subtype for later use to find the pool
833 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
834 db[i].parent_subtype = cfg->parent_subtype;
836 /* If the element didn't request an allocation no need
837 * to create a pool nor verify if we got a reservation.
842 /* Skip any children or invalid
844 if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
845 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
846 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
849 /* If the element had requested an allocation and that
850 * allocation was a success (full amount) then
853 if (req_cnt[i] == resv[j].stride) {
854 db[i].alloc.entry.start = resv[j].start;
855 db[i].alloc.entry.stride = resv[j].stride;
857 /* Only allocate BA pool if a BA type not a child */
858 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
859 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
861 pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
862 sizeof(struct bitalloc));
863 /* Alloc request, alignment already set */
864 cparms.nitems = pool_size;
865 cparms.size = sizeof(struct bitalloc);
866 rc = tfp_calloc(&cparms);
869 "%s: Pool alloc failed, type:%d:%s\n",
870 tf_dir_2_str(parms->dir),
871 cfg->hcapi_type, type_str);
874 db[i].pool = (struct bitalloc *)cparms.mem_va;
876 rc = ba_init(db[i].pool,
878 !tf_session_is_shared_session(tfs));
881 "%s: Pool init failed, type:%d:%s\n",
882 tf_dir_2_str(parms->dir),
883 cfg->hcapi_type, type_str);
889 /* Bail out as we want what we requested for
890 * all elements, not any less.
893 "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
894 tf_dir_2_str(parms->dir), cfg->hcapi_type,
895 type_str, req_cnt[i], resv[j].stride);
900 rm_db->num_entries = parms->num_elements;
901 rm_db->dir = parms->dir;
902 rm_db->module = parms->module;
903 *parms->rm_db = (void *)rm_db;
905 tfp_free((void *)req);
906 tfp_free((void *)resv);
907 tfp_free((void *)req_cnt);
911 tfp_free((void *)req);
912 tfp_free((void *)resv);
913 tfp_free((void *)db->pool);
914 tfp_free((void *)db);
915 tfp_free((void *)rm_db);
916 tfp_free((void *)req_cnt);
922 tf_rm_free_db(struct tf *tfp,
923 struct tf_rm_free_db_parms *parms)
927 uint16_t resv_size = 0;
928 struct tf_rm_new_db *rm_db;
929 struct tf_rm_resc_entry *resv;
930 bool residuals_found = false;
932 TF_CHECK_PARMS2(parms, parms->rm_db);
934 /* Device unbind happens when the TF Session is closed and the
935 * session ref count is 0. Device unbind will cleanup each of
936 * its support modules, i.e. Identifier, thus we're ending up
937 * here to close the DB.
939 * On TF Session close it is assumed that the session has already
940 * cleaned up all its resources, individually, while
941 * destroying its flows.
943 * To assist in the 'cleanup checking' the DB is checked for any
944 * remaining elements and logged if found to be the case.
946 * Any such elements will need to be 'cleared' ahead of
947 * returning the resources to the HCAPI RM.
949 * RM will signal FW to flush the DB resources. FW will
950 * perform the invalidation. TF Session close will return the
951 * previous allocated elements to the RM and then close the
952 * HCAPI RM registration. That then saves several 'free' msgs
953 * from being required.
956 rm_db = (struct tf_rm_new_db *)parms->rm_db;
958 /* Check for residuals that the client didn't clean up */
959 rc = tf_rm_check_residuals(rm_db,
966 /* Invalidate any residuals followed by a DB traversal for
969 if (residuals_found) {
970 rc = tf_msg_session_resc_flush(tfp,
974 tfp_free((void *)resv);
975 /* On failure we still have to cleanup so we can only
976 * log that FW failed.
980 "%s: Internal Flush error, module:%s\n",
981 tf_dir_2_str(parms->dir),
982 tf_module_2_str(rm_db->module));
985 /* No need to check for configuration type, even if we do not
986 * have a BA pool we just delete on a null ptr, no harm
988 for (i = 0; i < rm_db->num_entries; i++)
989 tfp_free((void *)rm_db->db[i].pool);
991 tfp_free((void *)parms->rm_db);
996 * Get the bit allocator pool associated with the subtype and the db
1002 * Module subtype used to index into the module specific database.
1003 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
1004 * module subtype of TF_MODULE_TYPE_TABLE.
1007 * Pointer to the bit allocator pool used
1009 * [in/out] new_subtype
1010 * Pointer to the subtype of the actual pool used
1013 * - ENOTSUP - Operation not supported
1016 tf_rm_get_pool(struct tf_rm_new_db *rm_db,
1018 struct bitalloc **pool,
1019 uint16_t *new_subtype)
1022 uint16_t tmp_subtype = subtype;
1024 /* If we are a child, get the parent table index */
1025 if (rm_db->db[subtype].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1026 tmp_subtype = rm_db->db[subtype].parent_subtype;
1028 *pool = rm_db->db[tmp_subtype].pool;
1030 /* Bail out if the pool is not valid, should never happen */
1031 if (rm_db->db[tmp_subtype].pool == NULL) {
1034 "%s: Invalid pool for this type:%d, rc:%s\n",
1035 tf_dir_2_str(rm_db->dir),
1040 *new_subtype = tmp_subtype;
1045 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
1050 struct tf_rm_new_db *rm_db;
1051 enum tf_rm_elem_cfg_type cfg_type;
1052 struct bitalloc *pool;
1055 TF_CHECK_PARMS2(parms, parms->rm_db);
1057 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1058 TF_CHECK_PARMS1(rm_db->db);
1060 cfg_type = rm_db->db[parms->subtype].cfg_type;
1062 /* Bail out if not controlled by RM */
1063 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1064 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1065 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1068 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1072 * priority 0: allocate from top of the tcam i.e. high
1073 * priority !0: allocate index from bottom i.e lowest
1075 if (parms->priority)
1076 id = ba_alloc_reverse(pool);
1078 id = ba_alloc(pool);
1079 if (id == BA_FAIL) {
1082 "%s: Allocation failed, rc:%s\n",
1083 tf_dir_2_str(rm_db->dir),
1088 /* Adjust for any non zero start value */
1089 rc = tf_rm_adjust_index(rm_db->db,
1090 TF_RM_ADJUST_ADD_BASE,
1096 "%s: Alloc adjust of base index failed, rc:%s\n",
1097 tf_dir_2_str(rm_db->dir),
1102 *parms->index = index;
1103 if (parms->base_index)
1104 *parms->base_index = id;
1110 tf_rm_free(struct tf_rm_free_parms *parms)
1114 struct tf_rm_new_db *rm_db;
1115 enum tf_rm_elem_cfg_type cfg_type;
1116 struct bitalloc *pool;
1119 TF_CHECK_PARMS2(parms, parms->rm_db);
1120 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1121 TF_CHECK_PARMS1(rm_db->db);
1123 cfg_type = rm_db->db[parms->subtype].cfg_type;
1125 /* Bail out if not controlled by RM */
1126 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1127 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1128 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1131 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1135 /* Adjust for any non zero start value */
1136 rc = tf_rm_adjust_index(rm_db->db,
1137 TF_RM_ADJUST_RM_BASE,
1144 rc = ba_free(pool, adj_index);
1145 /* No logging direction matters and that is not available here */
1153 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
1157 struct tf_rm_new_db *rm_db;
1158 enum tf_rm_elem_cfg_type cfg_type;
1159 struct bitalloc *pool;
1162 TF_CHECK_PARMS2(parms, parms->rm_db);
1163 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1164 TF_CHECK_PARMS1(rm_db->db);
1166 cfg_type = rm_db->db[parms->subtype].cfg_type;
1169 /* Bail out if not controlled by RM */
1170 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1171 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1172 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1175 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1179 /* Adjust for any non zero start value */
1180 rc = tf_rm_adjust_index(rm_db->db,
1181 TF_RM_ADJUST_RM_BASE,
1188 if (parms->base_index)
1189 *parms->base_index = adj_index;
1190 *parms->allocated = ba_inuse(pool, adj_index);
1196 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
1198 struct tf_rm_new_db *rm_db;
1199 enum tf_rm_elem_cfg_type cfg_type;
1201 TF_CHECK_PARMS2(parms, parms->rm_db);
1202 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1203 TF_CHECK_PARMS1(rm_db->db);
1205 cfg_type = rm_db->db[parms->subtype].cfg_type;
1207 /* Bail out if not controlled by HCAPI */
1208 if (cfg_type == TF_RM_ELEM_CFG_NULL)
1212 &rm_db->db[parms->subtype].alloc,
1213 sizeof(struct tf_rm_alloc_info));
1219 tf_rm_get_all_info(struct tf_rm_get_alloc_info_parms *parms, int size)
1221 struct tf_rm_new_db *rm_db;
1222 enum tf_rm_elem_cfg_type cfg_type;
1223 struct tf_rm_alloc_info *info = parms->info;
1226 TF_CHECK_PARMS1(parms);
1228 /* No rm info available for this module type
1233 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1234 TF_CHECK_PARMS1(rm_db->db);
1236 for (i = 0; i < size; i++) {
1237 cfg_type = rm_db->db[i].cfg_type;
1239 /* Bail out if not controlled by HCAPI */
1240 if (cfg_type == TF_RM_ELEM_CFG_NULL) {
1246 &rm_db->db[i].alloc,
1247 sizeof(struct tf_rm_alloc_info));
1255 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
1257 struct tf_rm_new_db *rm_db;
1258 enum tf_rm_elem_cfg_type cfg_type;
1260 TF_CHECK_PARMS2(parms, parms->rm_db);
1261 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1262 TF_CHECK_PARMS1(rm_db->db);
1264 cfg_type = rm_db->db[parms->subtype].cfg_type;
1266 /* Bail out if not controlled by HCAPI */
1267 if (cfg_type == TF_RM_ELEM_CFG_NULL)
1270 *parms->hcapi_type = rm_db->db[parms->subtype].hcapi_type;
1276 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
1279 struct tf_rm_new_db *rm_db;
1280 enum tf_rm_elem_cfg_type cfg_type;
1282 TF_CHECK_PARMS2(parms, parms->rm_db);
1283 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1284 TF_CHECK_PARMS1(rm_db->db);
1286 cfg_type = rm_db->db[parms->subtype].cfg_type;
1288 /* Bail out if not a BA pool */
1289 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1290 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1291 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1294 /* Bail silently (no logging), if the pool is not valid there
1295 * was no elements allocated for it.
1297 if (rm_db->db[parms->subtype].pool == NULL) {
1302 *parms->count = ba_inuse_count(rm_db->db[parms->subtype].pool);
1306 /* Only used for table bulk get at this time
1309 tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms)
1311 struct tf_rm_new_db *rm_db;
1312 enum tf_rm_elem_cfg_type cfg_type;
1313 uint32_t base_index;
1316 struct bitalloc *pool;
1319 TF_CHECK_PARMS2(parms, parms->rm_db);
1320 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1321 TF_CHECK_PARMS1(rm_db->db);
1323 cfg_type = rm_db->db[parms->subtype].cfg_type;
1325 /* Bail out if not a BA pool */
1326 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1327 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1328 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1331 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1335 base_index = rm_db->db[subtype].alloc.entry.start;
1336 stride = rm_db->db[subtype].alloc.entry.stride;
1338 if (parms->starting_index < base_index ||
1339 parms->starting_index + parms->num_entries > base_index + stride)