1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
8 #include <rte_common.h>
11 #include <cfa_resource_types.h>
14 #include "tf_common.h"
16 #include "tf_session.h"
17 #include "tf_device.h"
22 * Generic RM Element data type that an RM DB is build upon.
24 struct tf_rm_element {
26 * RM Element configuration type. If Private then the
27 * hcapi_type can be ignored. If Null then the element is not
28 * valid for the device.
30 enum tf_rm_elem_cfg_type cfg_type;
33 * HCAPI RM Type for the element.
38 * Resource slices. How many slices will fit in the
39 * resource pool chunk size.
44 * HCAPI RM allocated range information for the element.
46 struct tf_rm_alloc_info alloc;
49 * If cfg_type == HCAPI_BA_CHILD, this field indicates
50 * the parent module subtype for look up into the parent pool.
51 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
52 * module subtype of TF_MODULE_TYPE_TABLE.
54 uint16_t parent_subtype;
57 * Bit allocator pool for the element. Pool size is controlled
58 * by the struct tf_session_resources at time of session creation.
59 * Null indicates that the pool is not used for the element.
61 struct bitalloc *pool;
69 * Number of elements in the DB
74 * Direction this DB controls.
79 * Module type, used for logging purposes.
81 enum tf_module_type module;
84 * The DB consists of an array of elements
86 struct tf_rm_element *db;
90 * Adjust an index according to the allocation information.
92 * All resources are controlled in a 0 based pool. Some resources, by
93 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
94 * need to be adjusted before they are handed out.
97 * Pointer to the DB configuration
100 * Pointer to the allocation values associated with the module
103 * Number of DB configuration elements
106 * Number of HCAPI entries with a reservation value greater than 0
110 * - EOPNOTSUPP - Operation not supported
113 tf_rm_count_hcapi_reservations(enum tf_dir dir,
114 enum tf_module_type module,
115 struct tf_rm_element_cfg *cfg,
116 uint16_t *reservations,
118 uint16_t *valid_count)
123 for (i = 0; i < count; i++) {
124 if (cfg[i].cfg_type != TF_RM_ELEM_CFG_NULL &&
128 /* Only log msg if a type is attempted reserved and
129 * not supported. We ignore EM module as its using a
130 * split configuration array thus it would fail for
131 * this type of check.
133 if (module != TF_MODULE_TYPE_EM &&
134 cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
135 reservations[i] > 0) {
137 "%s, %s, %s allocation of %d not supported\n",
138 tf_module_2_str(module),
140 tf_module_subtype_2_str(module, i),
149 * Resource Manager Adjust of base index definitions.
151 enum tf_rm_adjust_type {
152 TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
153 TF_RM_ADJUST_RM_BASE /**< Removes base from the index */
157 * Adjust an index according to the allocation information.
159 * All resources are controlled in a 0 based pool. Some resources, by
160 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
161 * need to be adjusted before they are handed out.
164 * Pointer to the db, used for the lookup
170 * TF module subtype used as an index into the database.
171 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
172 * module subtype of TF_MODULE_TYPE_TABLE.
182 * - EOPNOTSUPP - Operation not supported
185 tf_rm_adjust_index(struct tf_rm_element *db,
186 enum tf_rm_adjust_type action,
194 base_index = db[subtype].alloc.entry.start;
197 case TF_RM_ADJUST_RM_BASE:
198 *adj_index = index - base_index;
200 case TF_RM_ADJUST_ADD_BASE:
201 *adj_index = index + base_index;
211 * Performs a check of the passed in DB for any lingering elements. If
212 * a resource type was found to not have been cleaned up by the caller
213 * then its residual values are recorded, logged and passed back in an
214 * allocate reservation array that the caller can pass to the FW for
218 * Pointer to the db, used for the lookup
221 * Pointer to the reservation size of the generated reservation
225 * Pointer Pointer to a reservation array. The reservation array is
226 * allocated after the residual scan and holds any found residual
227 * entries. Thus it can be smaller than the DB that the check was
228 * performed on. Array must be freed by the caller.
230 * [out] residuals_present
231 * Pointer to a bool flag indicating if residual was present in the
236 * - EOPNOTSUPP - Operation not supported
239 tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
241 struct tf_rm_resc_entry **resv,
242 bool *residuals_present)
249 uint16_t *residuals = NULL;
251 struct tf_rm_get_inuse_count_parms iparms;
252 struct tf_rm_get_alloc_info_parms aparms;
253 struct tf_rm_get_hcapi_parms hparms;
254 struct tf_rm_alloc_info info;
255 struct tfp_calloc_parms cparms;
256 struct tf_rm_resc_entry *local_resv = NULL;
258 /* Create array to hold the entries that have residuals */
259 cparms.nitems = rm_db->num_entries;
260 cparms.size = sizeof(uint16_t);
261 cparms.alignment = 0;
262 rc = tfp_calloc(&cparms);
266 residuals = (uint16_t *)cparms.mem_va;
268 /* Traverse the DB and collect any residual elements */
269 iparms.rm_db = rm_db;
270 iparms.count = &count;
271 for (i = 0, found = 0; i < rm_db->num_entries; i++) {
273 rc = tf_rm_get_inuse_count(&iparms);
274 /* Not a device supported entry, just skip */
278 goto cleanup_residuals;
282 residuals[i] = count;
283 *residuals_present = true;
287 if (*residuals_present) {
288 /* Populate a reduced resv array with only the entries
289 * that have residuals.
291 cparms.nitems = found;
292 cparms.size = sizeof(struct tf_rm_resc_entry);
293 cparms.alignment = 0;
294 rc = tfp_calloc(&cparms);
298 local_resv = (struct tf_rm_resc_entry *)cparms.mem_va;
300 aparms.rm_db = rm_db;
301 hparms.rm_db = rm_db;
302 hparms.hcapi_type = &hcapi_type;
303 for (i = 0, f = 0; i < rm_db->num_entries; i++) {
304 if (residuals[i] == 0)
308 rc = tf_rm_get_info(&aparms);
313 rc = tf_rm_get_hcapi_type(&hparms);
317 local_resv[f].type = hcapi_type;
318 local_resv[f].start = info.entry.start;
319 local_resv[f].stride = info.entry.stride;
325 tfp_free((void *)residuals);
331 tfp_free((void *)local_resv);
334 tfp_free((void *)residuals);
340 * Some resources do not have a 1:1 mapping between the Truflow type and the cfa
341 * resource type (HCAPI RM). These resources have multiple Truflow types which
342 * map to a single HCAPI RM type. In order to support this, one Truflow type
343 * sharing the HCAPI resources is designated the parent. All other Truflow
344 * types associated with that HCAPI RM type are designated the children.
346 * This function updates the resource counts of any HCAPI_BA_PARENT with the
347 * counts of the HCAPI_BA_CHILDREN. These are read from the alloc_cnt and
348 * written back to the req_cnt.
351 * Pointer to an array of module specific Truflow type indexed RM cfg items
354 * Pointer to the tf_open_session() configured array of module specific
355 * Truflow type indexed requested counts.
358 * Pointer to the location to put the updated resource counts.
362 * - - Failure if negative
365 tf_rm_update_parent_reservations(struct tf *tfp,
366 struct tf_dev_info *dev,
367 struct tf_rm_element_cfg *cfg,
369 uint16_t num_elements,
373 const char *type_str;
375 /* Search through all the elements */
376 for (parent = 0; parent < num_elements; parent++) {
377 uint16_t combined_cnt = 0;
379 /* If I am a parent */
380 if (cfg[parent].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
381 /* start with my own count */
382 RTE_ASSERT(cfg[parent].slices);
384 alloc_cnt[parent] / cfg[parent].slices;
386 if (alloc_cnt[parent] % cfg[parent].slices)
389 if (alloc_cnt[parent]) {
390 dev->ops->tf_dev_get_resource_str(tfp,
391 cfg[parent].hcapi_type,
395 /* Search again through all the elements */
396 for (child = 0; child < num_elements; child++) {
397 /* If this is one of my children */
398 if (cfg[child].cfg_type ==
399 TF_RM_ELEM_CFG_HCAPI_BA_CHILD &&
400 cfg[child].parent_subtype == parent &&
403 RTE_ASSERT(cfg[child].slices);
405 dev->ops->tf_dev_get_resource_str(tfp,
406 cfg[child].hcapi_type,
408 /* Increment the parents combined count
409 * with each child's count adjusted for
410 * number of slices per RM allocated item.
413 alloc_cnt[child] / cfg[child].slices;
415 if (alloc_cnt[child] % cfg[child].slices)
419 /* Clear the requested child count */
423 /* Save the parent count to be requested */
424 req_cnt[parent] = combined_cnt;
431 tf_rm_create_db(struct tf *tfp,
432 struct tf_rm_create_db_parms *parms)
435 struct tf_session *tfs;
436 struct tf_dev_info *dev;
438 uint16_t max_types, hcapi_items, *req_cnt;
439 struct tfp_calloc_parms cparms;
440 struct tf_rm_resc_req_entry *query;
441 enum tf_rm_resc_resv_strategy resv_strategy;
442 struct tf_rm_resc_req_entry *req;
443 struct tf_rm_resc_entry *resv;
444 struct tf_rm_new_db *rm_db;
445 struct tf_rm_element *db;
448 TF_CHECK_PARMS2(tfp, parms);
450 /* Retrieve the session information */
451 rc = tf_session_get_session_internal(tfp, &tfs);
455 /* Retrieve device information */
456 rc = tf_session_get_device(tfs, &dev);
460 /* Need device max number of elements for the RM QCAPS */
461 rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
464 /* Allocate memory for RM QCAPS request */
465 cparms.nitems = max_types;
466 cparms.size = sizeof(struct tf_rm_resc_req_entry);
467 cparms.alignment = 0;
468 rc = tfp_calloc(&cparms);
472 query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
474 /* Get Firmware Capabilities */
475 rc = tf_msg_session_resc_qcaps(tfp,
484 /* Copy requested counts (alloc_cnt) from tf_open_session() to local
485 * copy (req_cnt) so that it can be updated if required.
488 cparms.nitems = parms->num_elements;
489 cparms.size = sizeof(uint16_t);
490 rc = tfp_calloc(&cparms);
494 req_cnt = (uint16_t *)cparms.mem_va;
496 tfp_memcpy(req_cnt, parms->alloc_cnt,
497 parms->num_elements * sizeof(uint16_t));
499 /* Update the req_cnt based upon the element configuration
501 tf_rm_update_parent_reservations(tfp, dev, parms->cfg,
506 /* Process capabilities against DB requirements. However, as a
507 * DB can hold elements that are not HCAPI we can reduce the
508 * req msg content by removing those out of the request yet
509 * the DB holds them all as to give a fast lookup. We can also
510 * remove entries where there are no request for elements.
512 tf_rm_count_hcapi_reservations(parms->dir,
519 if (hcapi_items == 0) {
524 /* Alloc request, alignment already set */
525 cparms.nitems = (size_t)hcapi_items;
526 cparms.size = sizeof(struct tf_rm_resc_req_entry);
527 rc = tfp_calloc(&cparms);
530 req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
532 /* Alloc reservation, alignment and nitems already set */
533 cparms.size = sizeof(struct tf_rm_resc_entry);
534 rc = tfp_calloc(&cparms);
537 resv = (struct tf_rm_resc_entry *)cparms.mem_va;
539 /* Build the request */
540 for (i = 0, j = 0; i < parms->num_elements; i++) {
541 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
542 uint16_t hcapi_type = cfg->hcapi_type;
544 /* Only perform reservation for requested entries
549 /* Skip any children in the request */
550 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
551 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
552 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
554 /* Verify that we can get the full amount per qcaps.
556 if (req_cnt[i] <= query[hcapi_type].max) {
557 req[j].type = hcapi_type;
558 req[j].min = req_cnt[i];
559 req[j].max = req_cnt[i];
562 const char *type_str;
564 dev->ops->tf_dev_get_resource_str(tfp,
568 "Failure, %s:%d:%s req:%d avail:%d\n",
569 tf_dir_2_str(parms->dir),
570 hcapi_type, type_str,
572 query[hcapi_type].max);
578 /* Allocate all resources for the module type
580 rc = tf_msg_session_resc_alloc(tfp,
589 /* Build the RM DB per the request */
591 cparms.size = sizeof(struct tf_rm_new_db);
592 rc = tfp_calloc(&cparms);
595 rm_db = (void *)cparms.mem_va;
597 /* Build the DB within RM DB */
598 cparms.nitems = parms->num_elements;
599 cparms.size = sizeof(struct tf_rm_element);
600 rc = tfp_calloc(&cparms);
603 rm_db->db = (struct tf_rm_element *)cparms.mem_va;
606 for (i = 0, j = 0; i < parms->num_elements; i++) {
607 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
608 const char *type_str;
610 dev->ops->tf_dev_get_resource_str(tfp,
614 db[i].cfg_type = cfg->cfg_type;
615 db[i].hcapi_type = cfg->hcapi_type;
616 db[i].slices = cfg->slices;
618 /* Save the parent subtype for later use to find the pool
620 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
621 db[i].parent_subtype = cfg->parent_subtype;
623 /* If the element didn't request an allocation no need
624 * to create a pool nor verify if we got a reservation.
629 /* Skip any children or invalid
631 if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
632 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
633 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
636 /* If the element had requested an allocation and that
637 * allocation was a success (full amount) then
640 if (req_cnt[i] == resv[j].stride) {
641 db[i].alloc.entry.start = resv[j].start;
642 db[i].alloc.entry.stride = resv[j].stride;
644 /* Only allocate BA pool if a BA type not a child */
645 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
646 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
648 pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
649 sizeof(struct bitalloc));
650 /* Alloc request, alignment already set */
651 cparms.nitems = pool_size;
652 cparms.size = sizeof(struct bitalloc);
653 rc = tfp_calloc(&cparms);
656 "%s: Pool alloc failed, type:%d:%s\n",
657 tf_dir_2_str(parms->dir),
658 cfg->hcapi_type, type_str);
661 db[i].pool = (struct bitalloc *)cparms.mem_va;
663 rc = ba_init(db[i].pool,
665 !tf_session_is_shared_session(tfs));
668 "%s: Pool init failed, type:%d:%s\n",
669 tf_dir_2_str(parms->dir),
670 cfg->hcapi_type, type_str);
676 /* Bail out as we want what we requested for
677 * all elements, not any less.
680 "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
681 tf_dir_2_str(parms->dir), cfg->hcapi_type,
682 type_str, req_cnt[i], resv[j].stride);
687 rm_db->num_entries = parms->num_elements;
688 rm_db->dir = parms->dir;
689 rm_db->module = parms->module;
690 *parms->rm_db = (void *)rm_db;
692 tfp_free((void *)req);
693 tfp_free((void *)resv);
694 tfp_free((void *)req_cnt);
698 tfp_free((void *)req);
699 tfp_free((void *)resv);
700 tfp_free((void *)db->pool);
701 tfp_free((void *)db);
702 tfp_free((void *)rm_db);
703 tfp_free((void *)req_cnt);
710 tf_rm_create_db_no_reservation(struct tf *tfp,
711 struct tf_rm_create_db_parms *parms)
714 struct tf_session *tfs;
715 struct tf_dev_info *dev;
717 uint16_t hcapi_items, *req_cnt;
718 struct tfp_calloc_parms cparms;
719 struct tf_rm_resc_req_entry *req;
720 struct tf_rm_resc_entry *resv;
721 struct tf_rm_new_db *rm_db;
722 struct tf_rm_element *db;
725 TF_CHECK_PARMS2(tfp, parms);
727 /* Retrieve the session information */
728 rc = tf_session_get_session_internal(tfp, &tfs);
732 /* Retrieve device information */
733 rc = tf_session_get_device(tfs, &dev);
737 /* Copy requested counts (alloc_cnt) from tf_open_session() to local
738 * copy (req_cnt) so that it can be updated if required.
741 cparms.nitems = parms->num_elements;
742 cparms.size = sizeof(uint16_t);
743 cparms.alignment = 0;
744 rc = tfp_calloc(&cparms);
748 req_cnt = (uint16_t *)cparms.mem_va;
750 tfp_memcpy(req_cnt, parms->alloc_cnt,
751 parms->num_elements * sizeof(uint16_t));
753 /* Process capabilities against DB requirements. However, as a
754 * DB can hold elements that are not HCAPI we can reduce the
755 * req msg content by removing those out of the request yet
756 * the DB holds them all as to give a fast lookup. We can also
757 * remove entries where there are no request for elements.
759 tf_rm_count_hcapi_reservations(parms->dir,
766 if (hcapi_items == 0) {
768 "%s: module:%s Empty RM DB create request\n",
769 tf_dir_2_str(parms->dir),
770 tf_module_2_str(parms->module));
776 /* Alloc request, alignment already set */
777 cparms.nitems = (size_t)hcapi_items;
778 cparms.size = sizeof(struct tf_rm_resc_req_entry);
779 rc = tfp_calloc(&cparms);
782 req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
784 /* Alloc reservation, alignment and nitems already set */
785 cparms.size = sizeof(struct tf_rm_resc_entry);
786 rc = tfp_calloc(&cparms);
789 resv = (struct tf_rm_resc_entry *)cparms.mem_va;
791 /* Build the request */
792 for (i = 0, j = 0; i < parms->num_elements; i++) {
793 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
794 uint16_t hcapi_type = cfg->hcapi_type;
796 /* Only perform reservation for requested entries
801 /* Skip any children in the request */
802 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
803 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
804 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
805 req[j].type = hcapi_type;
806 req[j].min = req_cnt[i];
807 req[j].max = req_cnt[i];
812 /* Get all resources info for the module type
814 rc = tf_msg_session_resc_info(tfp,
823 /* Build the RM DB per the request */
825 cparms.size = sizeof(struct tf_rm_new_db);
826 rc = tfp_calloc(&cparms);
829 rm_db = (void *)cparms.mem_va;
831 /* Build the DB within RM DB */
832 cparms.nitems = parms->num_elements;
833 cparms.size = sizeof(struct tf_rm_element);
834 rc = tfp_calloc(&cparms);
837 rm_db->db = (struct tf_rm_element *)cparms.mem_va;
840 for (i = 0, j = 0; i < parms->num_elements; i++) {
841 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
842 const char *type_str;
844 dev->ops->tf_dev_get_resource_str(tfp,
848 db[i].cfg_type = cfg->cfg_type;
849 db[i].hcapi_type = cfg->hcapi_type;
851 /* Save the parent subtype for later use to find the pool
853 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
854 db[i].parent_subtype = cfg->parent_subtype;
856 /* If the element didn't request an allocation no need
857 * to create a pool nor verify if we got a reservation.
862 /* Skip any children or invalid
864 if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
865 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
866 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
869 /* If the element had requested an allocation and that
870 * allocation was a success (full amount) then
873 if (req_cnt[i] == resv[j].stride) {
874 db[i].alloc.entry.start = resv[j].start;
875 db[i].alloc.entry.stride = resv[j].stride;
877 /* Only allocate BA pool if a BA type not a child */
878 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
879 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
881 pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
882 sizeof(struct bitalloc));
883 /* Alloc request, alignment already set */
884 cparms.nitems = pool_size;
885 cparms.size = sizeof(struct bitalloc);
886 rc = tfp_calloc(&cparms);
889 "%s: Pool alloc failed, type:%d:%s\n",
890 tf_dir_2_str(parms->dir),
891 cfg->hcapi_type, type_str);
894 db[i].pool = (struct bitalloc *)cparms.mem_va;
896 rc = ba_init(db[i].pool,
898 !tf_session_is_shared_session(tfs));
901 "%s: Pool init failed, type:%d:%s\n",
902 tf_dir_2_str(parms->dir),
903 cfg->hcapi_type, type_str);
909 /* Bail out as we want what we requested for
910 * all elements, not any less.
913 "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
914 tf_dir_2_str(parms->dir), cfg->hcapi_type,
915 type_str, req_cnt[i], resv[j].stride);
920 rm_db->num_entries = parms->num_elements;
921 rm_db->dir = parms->dir;
922 rm_db->module = parms->module;
923 *parms->rm_db = (void *)rm_db;
925 tfp_free((void *)req);
926 tfp_free((void *)resv);
927 tfp_free((void *)req_cnt);
931 tfp_free((void *)req);
932 tfp_free((void *)resv);
933 tfp_free((void *)db->pool);
934 tfp_free((void *)db);
935 tfp_free((void *)rm_db);
936 tfp_free((void *)req_cnt);
942 tf_rm_free_db(struct tf *tfp,
943 struct tf_rm_free_db_parms *parms)
947 uint16_t resv_size = 0;
948 struct tf_rm_new_db *rm_db;
949 struct tf_rm_resc_entry *resv;
950 bool residuals_found = false;
952 TF_CHECK_PARMS2(parms, parms->rm_db);
954 /* Device unbind happens when the TF Session is closed and the
955 * session ref count is 0. Device unbind will cleanup each of
956 * its support modules, i.e. Identifier, thus we're ending up
957 * here to close the DB.
959 * On TF Session close it is assumed that the session has already
960 * cleaned up all its resources, individually, while
961 * destroying its flows.
963 * To assist in the 'cleanup checking' the DB is checked for any
964 * remaining elements and logged if found to be the case.
966 * Any such elements will need to be 'cleared' ahead of
967 * returning the resources to the HCAPI RM.
969 * RM will signal FW to flush the DB resources. FW will
970 * perform the invalidation. TF Session close will return the
971 * previous allocated elements to the RM and then close the
972 * HCAPI RM registration. That then saves several 'free' msgs
973 * from being required.
976 rm_db = (struct tf_rm_new_db *)parms->rm_db;
978 /* Check for residuals that the client didn't clean up */
979 rc = tf_rm_check_residuals(rm_db,
986 /* Invalidate any residuals followed by a DB traversal for
989 if (residuals_found) {
990 rc = tf_msg_session_resc_flush(tfp,
994 tfp_free((void *)resv);
995 /* On failure we still have to cleanup so we can only
996 * log that FW failed.
1000 "%s: Internal Flush error, module:%s\n",
1001 tf_dir_2_str(parms->dir),
1002 tf_module_2_str(rm_db->module));
1005 /* No need to check for configuration type, even if we do not
1006 * have a BA pool we just delete on a null ptr, no harm
1008 for (i = 0; i < rm_db->num_entries; i++)
1009 tfp_free((void *)rm_db->db[i].pool);
1011 tfp_free((void *)parms->rm_db);
1016 * Get the bit allocator pool associated with the subtype and the db
1022 * Module subtype used to index into the module specific database.
1023 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
1024 * module subtype of TF_MODULE_TYPE_TABLE.
1027 * Pointer to the bit allocator pool used
1029 * [in/out] new_subtype
1030 * Pointer to the subtype of the actual pool used
1033 * - ENOTSUP - Operation not supported
1036 tf_rm_get_pool(struct tf_rm_new_db *rm_db,
1038 struct bitalloc **pool,
1039 uint16_t *new_subtype)
1042 uint16_t tmp_subtype = subtype;
1044 /* If we are a child, get the parent table index */
1045 if (rm_db->db[subtype].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1046 tmp_subtype = rm_db->db[subtype].parent_subtype;
1048 *pool = rm_db->db[tmp_subtype].pool;
1050 /* Bail out if the pool is not valid, should never happen */
1051 if (rm_db->db[tmp_subtype].pool == NULL) {
1054 "%s: Invalid pool for this type:%d, rc:%s\n",
1055 tf_dir_2_str(rm_db->dir),
1060 *new_subtype = tmp_subtype;
1065 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
1070 struct tf_rm_new_db *rm_db;
1071 enum tf_rm_elem_cfg_type cfg_type;
1072 struct bitalloc *pool;
1075 TF_CHECK_PARMS2(parms, parms->rm_db);
1077 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1078 TF_CHECK_PARMS1(rm_db->db);
1080 cfg_type = rm_db->db[parms->subtype].cfg_type;
1082 /* Bail out if not controlled by RM */
1083 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1084 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1085 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1088 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1092 * priority 0: allocate from top of the tcam i.e. high
1093 * priority !0: allocate index from bottom i.e lowest
1095 if (parms->priority)
1096 id = ba_alloc_reverse(pool);
1098 id = ba_alloc(pool);
1099 if (id == BA_FAIL) {
1102 "%s: Allocation failed, rc:%s\n",
1103 tf_dir_2_str(rm_db->dir),
1108 /* Adjust for any non zero start value */
1109 rc = tf_rm_adjust_index(rm_db->db,
1110 TF_RM_ADJUST_ADD_BASE,
1116 "%s: Alloc adjust of base index failed, rc:%s\n",
1117 tf_dir_2_str(rm_db->dir),
1122 *parms->index = index;
1123 if (parms->base_index)
1124 *parms->base_index = id;
1130 tf_rm_free(struct tf_rm_free_parms *parms)
1134 struct tf_rm_new_db *rm_db;
1135 enum tf_rm_elem_cfg_type cfg_type;
1136 struct bitalloc *pool;
1139 TF_CHECK_PARMS2(parms, parms->rm_db);
1140 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1141 TF_CHECK_PARMS1(rm_db->db);
1143 cfg_type = rm_db->db[parms->subtype].cfg_type;
1145 /* Bail out if not controlled by RM */
1146 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1147 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1148 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1151 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1155 /* Adjust for any non zero start value */
1156 rc = tf_rm_adjust_index(rm_db->db,
1157 TF_RM_ADJUST_RM_BASE,
1164 rc = ba_free(pool, adj_index);
1165 /* No logging direction matters and that is not available here */
1173 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
1177 struct tf_rm_new_db *rm_db;
1178 enum tf_rm_elem_cfg_type cfg_type;
1179 struct bitalloc *pool;
1182 TF_CHECK_PARMS2(parms, parms->rm_db);
1183 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1184 TF_CHECK_PARMS1(rm_db->db);
1186 cfg_type = rm_db->db[parms->subtype].cfg_type;
1189 /* Bail out if not controlled by RM */
1190 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1191 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1192 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1195 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1199 /* Adjust for any non zero start value */
1200 rc = tf_rm_adjust_index(rm_db->db,
1201 TF_RM_ADJUST_RM_BASE,
1208 if (parms->base_index)
1209 *parms->base_index = adj_index;
1210 *parms->allocated = ba_inuse(pool, adj_index);
1216 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
1218 struct tf_rm_new_db *rm_db;
1219 enum tf_rm_elem_cfg_type cfg_type;
1221 TF_CHECK_PARMS2(parms, parms->rm_db);
1222 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1223 TF_CHECK_PARMS1(rm_db->db);
1225 cfg_type = rm_db->db[parms->subtype].cfg_type;
1227 /* Bail out if not controlled by HCAPI */
1228 if (cfg_type == TF_RM_ELEM_CFG_NULL)
1232 &rm_db->db[parms->subtype].alloc,
1233 sizeof(struct tf_rm_alloc_info));
1239 tf_rm_get_all_info(struct tf_rm_get_alloc_info_parms *parms, int size)
1241 struct tf_rm_new_db *rm_db;
1242 enum tf_rm_elem_cfg_type cfg_type;
1243 struct tf_rm_alloc_info *info = parms->info;
1246 TF_CHECK_PARMS1(parms);
1248 /* No rm info available for this module type
1253 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1254 TF_CHECK_PARMS1(rm_db->db);
1256 for (i = 0; i < size; i++) {
1257 cfg_type = rm_db->db[i].cfg_type;
1259 /* Bail out if not controlled by HCAPI */
1260 if (cfg_type == TF_RM_ELEM_CFG_NULL) {
1266 &rm_db->db[i].alloc,
1267 sizeof(struct tf_rm_alloc_info));
1275 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
1277 struct tf_rm_new_db *rm_db;
1278 enum tf_rm_elem_cfg_type cfg_type;
1280 TF_CHECK_PARMS2(parms, parms->rm_db);
1281 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1282 TF_CHECK_PARMS1(rm_db->db);
1284 cfg_type = rm_db->db[parms->subtype].cfg_type;
1286 /* Bail out if not controlled by HCAPI */
1287 if (cfg_type == TF_RM_ELEM_CFG_NULL)
1290 *parms->hcapi_type = rm_db->db[parms->subtype].hcapi_type;
1295 tf_rm_get_slices(struct tf_rm_get_slices_parms *parms)
1297 struct tf_rm_new_db *rm_db;
1298 enum tf_rm_elem_cfg_type cfg_type;
1300 TF_CHECK_PARMS2(parms, parms->rm_db);
1301 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1302 TF_CHECK_PARMS1(rm_db->db);
1304 cfg_type = rm_db->db[parms->subtype].cfg_type;
1306 /* Bail out if not controlled by HCAPI */
1307 if (cfg_type == TF_RM_ELEM_CFG_NULL)
1310 *parms->slices = rm_db->db[parms->subtype].slices;
1316 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
1319 struct tf_rm_new_db *rm_db;
1320 enum tf_rm_elem_cfg_type cfg_type;
1322 TF_CHECK_PARMS2(parms, parms->rm_db);
1323 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1324 TF_CHECK_PARMS1(rm_db->db);
1326 cfg_type = rm_db->db[parms->subtype].cfg_type;
1328 /* Bail out if not a BA pool */
1329 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1330 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1331 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1334 /* Bail silently (no logging), if the pool is not valid there
1335 * was no elements allocated for it.
1337 if (rm_db->db[parms->subtype].pool == NULL) {
1342 *parms->count = ba_inuse_count(rm_db->db[parms->subtype].pool);
1346 /* Only used for table bulk get at this time
1349 tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms)
1351 struct tf_rm_new_db *rm_db;
1352 enum tf_rm_elem_cfg_type cfg_type;
1353 uint32_t base_index;
1356 struct bitalloc *pool;
1359 TF_CHECK_PARMS2(parms, parms->rm_db);
1360 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1361 TF_CHECK_PARMS1(rm_db->db);
1363 cfg_type = rm_db->db[parms->subtype].cfg_type;
1365 /* Bail out if not a BA pool */
1366 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1367 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1368 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1371 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1375 base_index = rm_db->db[subtype].alloc.entry.start;
1376 stride = rm_db->db[subtype].alloc.entry.stride;
1378 if (parms->starting_index < base_index ||
1379 parms->starting_index + parms->num_entries > base_index + stride)