1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
8 #include <rte_common.h>
11 #include <cfa_resource_types.h>
14 #include "tf_common.h"
16 #include "tf_session.h"
17 #include "tf_device.h"
25 * Generic RM Element data type that an RM DB is build upon.
27 struct tf_rm_element {
29 * RM Element configuration type. If Private then the
30 * hcapi_type can be ignored. If Null then the element is not
31 * valid for the device.
33 enum tf_rm_elem_cfg_type cfg_type;
36 * HCAPI RM Type for the element.
41 * HCAPI RM allocated range information for the element.
43 struct tf_rm_alloc_info alloc;
46 * If cfg_type == HCAPI_BA_CHILD, this field indicates
47 * the parent module subtype for look up into the parent pool.
48 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
49 * module subtype of TF_MODULE_TYPE_TABLE.
51 uint16_t parent_subtype;
54 * Bit allocator pool for the element. Pool size is controlled
55 * by the struct tf_session_resources at time of session creation.
56 * Null indicates that the pool is not used for the element.
58 struct bitalloc *pool;
66 * Number of elements in the DB
71 * Direction this DB controls.
76 * Module type, used for logging purposes.
78 enum tf_module_type module;
81 * The DB consists of an array of elements
83 struct tf_rm_element *db;
87 * Adjust an index according to the allocation information.
89 * All resources are controlled in a 0 based pool. Some resources, by
90 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
91 * need to be adjusted before they are handed out.
94 * Pointer to the DB configuration
97 * Pointer to the allocation values associated with the module
100 * Number of DB configuration elements
103 * Number of HCAPI entries with a reservation value greater than 0
107 * - EOPNOTSUPP - Operation not supported
110 tf_rm_count_hcapi_reservations(enum tf_dir dir,
111 enum tf_module_type module,
112 struct tf_rm_element_cfg *cfg,
113 uint16_t *reservations,
115 uint16_t *valid_count)
120 for (i = 0; i < count; i++) {
121 if (cfg[i].cfg_type != TF_RM_ELEM_CFG_NULL &&
125 /* Only log msg if a type is attempted reserved and
126 * not supported. We ignore EM module as its using a
127 * split configuration array thus it would fail for
128 * this type of check.
130 if (module != TF_MODULE_TYPE_EM &&
131 cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
132 reservations[i] > 0) {
134 "%s, %s, %s allocation of %d not supported\n",
135 tf_module_2_str(module),
137 tf_module_subtype_2_str(module, i),
146 * Resource Manager Adjust of base index definitions.
148 enum tf_rm_adjust_type {
149 TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
150 TF_RM_ADJUST_RM_BASE /**< Removes base from the index */
154 * Adjust an index according to the allocation information.
156 * All resources are controlled in a 0 based pool. Some resources, by
157 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
158 * need to be adjusted before they are handed out.
161 * Pointer to the db, used for the lookup
167 * TF module subtype used as an index into the database.
168 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
169 * module subtype of TF_MODULE_TYPE_TABLE.
179 * - EOPNOTSUPP - Operation not supported
182 tf_rm_adjust_index(struct tf_rm_element *db,
183 enum tf_rm_adjust_type action,
191 base_index = db[subtype].alloc.entry.start;
194 case TF_RM_ADJUST_RM_BASE:
195 *adj_index = index - base_index;
197 case TF_RM_ADJUST_ADD_BASE:
198 *adj_index = index + base_index;
208 * Logs an array of found residual entries to the console.
211 * Receive or transmit direction
214 * Type of Device Module
217 * Number of entries in the residual array
220 * Pointer to an array of residual entries. Array is index same as
221 * the DB in which this function is used. Each entry holds residual
222 * value for that entry.
225 tf_rm_log_residuals(enum tf_dir dir,
226 enum tf_module_type module,
232 /* Walk the residual array and log the types that wasn't
233 * cleaned up to the console.
235 for (i = 0; i < count; i++) {
236 if (residuals[i] != 0)
238 "%s, %s was not cleaned up, %d outstanding\n",
240 tf_module_subtype_2_str(module, i),
246 * Performs a check of the passed in DB for any lingering elements. If
247 * a resource type was found to not have been cleaned up by the caller
248 * then its residual values are recorded, logged and passed back in an
249 * allocate reservation array that the caller can pass to the FW for
253 * Pointer to the db, used for the lookup
256 * Pointer to the reservation size of the generated reservation
260 * Pointer Pointer to a reservation array. The reservation array is
261 * allocated after the residual scan and holds any found residual
262 * entries. Thus it can be smaller than the DB that the check was
263 * performed on. Array must be freed by the caller.
265 * [out] residuals_present
266 * Pointer to a bool flag indicating if residual was present in the
271 * - EOPNOTSUPP - Operation not supported
274 tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
276 struct tf_rm_resc_entry **resv,
277 bool *residuals_present)
284 uint16_t *residuals = NULL;
286 struct tf_rm_get_inuse_count_parms iparms;
287 struct tf_rm_get_alloc_info_parms aparms;
288 struct tf_rm_get_hcapi_parms hparms;
289 struct tf_rm_alloc_info info;
290 struct tfp_calloc_parms cparms;
291 struct tf_rm_resc_entry *local_resv = NULL;
293 /* Create array to hold the entries that have residuals */
294 cparms.nitems = rm_db->num_entries;
295 cparms.size = sizeof(uint16_t);
296 cparms.alignment = 0;
297 rc = tfp_calloc(&cparms);
301 residuals = (uint16_t *)cparms.mem_va;
303 /* Traverse the DB and collect any residual elements */
304 iparms.rm_db = rm_db;
305 iparms.count = &count;
306 for (i = 0, found = 0; i < rm_db->num_entries; i++) {
308 rc = tf_rm_get_inuse_count(&iparms);
309 /* Not a device supported entry, just skip */
313 goto cleanup_residuals;
317 residuals[i] = count;
318 *residuals_present = true;
322 if (*residuals_present) {
323 /* Populate a reduced resv array with only the entries
324 * that have residuals.
326 cparms.nitems = found;
327 cparms.size = sizeof(struct tf_rm_resc_entry);
328 cparms.alignment = 0;
329 rc = tfp_calloc(&cparms);
333 local_resv = (struct tf_rm_resc_entry *)cparms.mem_va;
335 aparms.rm_db = rm_db;
336 hparms.rm_db = rm_db;
337 hparms.hcapi_type = &hcapi_type;
338 for (i = 0, f = 0; i < rm_db->num_entries; i++) {
339 if (residuals[i] == 0)
343 rc = tf_rm_get_info(&aparms);
348 rc = tf_rm_get_hcapi_type(&hparms);
352 local_resv[f].type = hcapi_type;
353 local_resv[f].start = info.entry.start;
354 local_resv[f].stride = info.entry.stride;
360 tf_rm_log_residuals(rm_db->dir,
365 tfp_free((void *)residuals);
371 tfp_free((void *)local_resv);
374 tfp_free((void *)residuals);
380 * Some resources do not have a 1:1 mapping between the Truflow type and the cfa
381 * resource type (HCAPI RM). These resources have multiple Truflow types which
382 * map to a single HCAPI RM type. In order to support this, one Truflow type
383 * sharing the HCAPI resources is designated the parent. All other Truflow
384 * types associated with that HCAPI RM type are designated the children.
386 * This function updates the resource counts of any HCAPI_BA_PARENT with the
387 * counts of the HCAPI_BA_CHILDREN. These are read from the alloc_cnt and
388 * written back to the req_cnt.
391 * Pointer to an array of module specific Truflow type indexed RM cfg items
394 * Pointer to the tf_open_session() configured array of module specific
395 * Truflow type indexed requested counts.
398 * Pointer to the location to put the updated resource counts.
402 * - - Failure if negative
405 tf_rm_update_parent_reservations(struct tf_rm_element_cfg *cfg,
407 uint16_t num_elements,
412 /* Search through all the elements */
413 for (parent = 0; parent < num_elements; parent++) {
414 uint16_t combined_cnt = 0;
416 /* If I am a parent */
417 if (cfg[parent].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
418 /* start with my own count */
419 RTE_ASSERT(cfg[parent].slices);
421 alloc_cnt[parent] / cfg[parent].slices;
423 if (alloc_cnt[parent] % cfg[parent].slices)
426 /* Search again through all the elements */
427 for (child = 0; child < num_elements; child++) {
428 /* If this is one of my children */
429 if (cfg[child].cfg_type ==
430 TF_RM_ELEM_CFG_HCAPI_BA_CHILD &&
431 cfg[child].parent_subtype == parent) {
433 RTE_ASSERT(cfg[child].slices);
435 /* Increment the parents combined count
436 * with each child's count adjusted for
437 * number of slices per RM allocated item.
440 alloc_cnt[child] / cfg[child].slices;
442 if (alloc_cnt[child] % cfg[child].slices)
446 /* Clear the requested child count */
450 /* Save the parent count to be requested */
451 req_cnt[parent] = combined_cnt;
458 tf_rm_create_db(struct tf *tfp,
459 struct tf_rm_create_db_parms *parms)
462 struct tf_session *tfs;
463 struct tf_dev_info *dev;
465 uint16_t max_types, hcapi_items, *req_cnt;
466 struct tfp_calloc_parms cparms;
467 struct tf_rm_resc_req_entry *query;
468 enum tf_rm_resc_resv_strategy resv_strategy;
469 struct tf_rm_resc_req_entry *req;
470 struct tf_rm_resc_entry *resv;
471 struct tf_rm_new_db *rm_db;
472 struct tf_rm_element *db;
475 TF_CHECK_PARMS2(tfp, parms);
477 /* Retrieve the session information */
478 rc = tf_session_get_session_internal(tfp, &tfs);
482 /* Retrieve device information */
483 rc = tf_session_get_device(tfs, &dev);
487 /* Need device max number of elements for the RM QCAPS */
488 rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
491 /* Allocate memory for RM QCAPS request */
492 cparms.nitems = max_types;
493 cparms.size = sizeof(struct tf_rm_resc_req_entry);
494 cparms.alignment = 0;
495 rc = tfp_calloc(&cparms);
499 query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
501 /* Get Firmware Capabilities */
502 rc = tf_msg_session_resc_qcaps(tfp,
511 /* Copy requested counts (alloc_cnt) from tf_open_session() to local
512 * copy (req_cnt) so that it can be updated if required.
515 cparms.nitems = parms->num_elements;
516 cparms.size = sizeof(uint16_t);
517 rc = tfp_calloc(&cparms);
521 req_cnt = (uint16_t *)cparms.mem_va;
523 tfp_memcpy(req_cnt, parms->alloc_cnt,
524 parms->num_elements * sizeof(uint16_t));
526 /* Update the req_cnt based upon the element configuration
528 tf_rm_update_parent_reservations(parms->cfg,
533 /* Process capabilities against DB requirements. However, as a
534 * DB can hold elements that are not HCAPI we can reduce the
535 * req msg content by removing those out of the request yet
536 * the DB holds them all as to give a fast lookup. We can also
537 * remove entries where there are no request for elements.
539 tf_rm_count_hcapi_reservations(parms->dir,
546 if (hcapi_items == 0) {
548 "%s: module:%s Empty RM DB create request\n",
549 tf_dir_2_str(parms->dir),
550 tf_module_2_str(parms->module));
556 /* Alloc request, alignment already set */
557 cparms.nitems = (size_t)hcapi_items;
558 cparms.size = sizeof(struct tf_rm_resc_req_entry);
559 rc = tfp_calloc(&cparms);
562 req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
564 /* Alloc reservation, alignment and nitems already set */
565 cparms.size = sizeof(struct tf_rm_resc_entry);
566 rc = tfp_calloc(&cparms);
569 resv = (struct tf_rm_resc_entry *)cparms.mem_va;
571 /* Build the request */
572 for (i = 0, j = 0; i < parms->num_elements; i++) {
573 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
574 uint16_t hcapi_type = cfg->hcapi_type;
576 /* Only perform reservation for requested entries
581 /* Skip any children in the request */
582 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
583 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
584 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
586 /* Verify that we can get the full amount per qcaps.
588 if (req_cnt[i] <= query[hcapi_type].max) {
589 req[j].type = hcapi_type;
590 req[j].min = req_cnt[i];
591 req[j].max = req_cnt[i];
594 const char *type_str;
596 dev->ops->tf_dev_get_resource_str(tfp,
600 "Failure, %s:%d:%s req:%d avail:%d\n",
601 tf_dir_2_str(parms->dir),
602 hcapi_type, type_str,
604 query[hcapi_type].max);
610 /* Allocate all resources for the module type
612 rc = tf_msg_session_resc_alloc(tfp,
621 /* Build the RM DB per the request */
623 cparms.size = sizeof(struct tf_rm_new_db);
624 rc = tfp_calloc(&cparms);
627 rm_db = (void *)cparms.mem_va;
629 /* Build the DB within RM DB */
630 cparms.nitems = parms->num_elements;
631 cparms.size = sizeof(struct tf_rm_element);
632 rc = tfp_calloc(&cparms);
635 rm_db->db = (struct tf_rm_element *)cparms.mem_va;
638 for (i = 0, j = 0; i < parms->num_elements; i++) {
639 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
640 const char *type_str;
642 dev->ops->tf_dev_get_resource_str(tfp,
646 db[i].cfg_type = cfg->cfg_type;
647 db[i].hcapi_type = cfg->hcapi_type;
649 /* Save the parent subtype for later use to find the pool
651 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
652 db[i].parent_subtype = cfg->parent_subtype;
654 /* If the element didn't request an allocation no need
655 * to create a pool nor verify if we got a reservation.
660 /* Skip any children or invalid
662 if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
663 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
664 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
667 /* If the element had requested an allocation and that
668 * allocation was a success (full amount) then
671 if (req_cnt[i] == resv[j].stride) {
672 db[i].alloc.entry.start = resv[j].start;
673 db[i].alloc.entry.stride = resv[j].stride;
675 /* Only allocate BA pool if a BA type not a child */
676 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
677 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
680 resv[j].stride / cfg->divider;
681 if (resv[j].stride <= 0) {
683 "%s:Divide fails:%d:%s\n",
684 tf_dir_2_str(parms->dir),
685 cfg->hcapi_type, type_str);
690 pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
691 sizeof(struct bitalloc));
692 /* Alloc request, alignment already set */
693 cparms.nitems = pool_size;
694 cparms.size = sizeof(struct bitalloc);
695 rc = tfp_calloc(&cparms);
698 "%s: Pool alloc failed, type:%d:%s\n",
699 tf_dir_2_str(parms->dir),
700 cfg->hcapi_type, type_str);
703 db[i].pool = (struct bitalloc *)cparms.mem_va;
705 rc = ba_init(db[i].pool,
707 !tf_session_is_shared_session(tfs));
710 "%s: Pool init failed, type:%d:%s\n",
711 tf_dir_2_str(parms->dir),
712 cfg->hcapi_type, type_str);
718 /* Bail out as we want what we requested for
719 * all elements, not any less.
722 "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
723 tf_dir_2_str(parms->dir), cfg->hcapi_type,
724 type_str, req_cnt[i], resv[j].stride);
729 rm_db->num_entries = parms->num_elements;
730 rm_db->dir = parms->dir;
731 rm_db->module = parms->module;
732 *parms->rm_db = (void *)rm_db;
734 tfp_free((void *)req);
735 tfp_free((void *)resv);
736 tfp_free((void *)req_cnt);
740 tfp_free((void *)req);
741 tfp_free((void *)resv);
742 tfp_free((void *)db->pool);
743 tfp_free((void *)db);
744 tfp_free((void *)rm_db);
745 tfp_free((void *)req_cnt);
752 tf_rm_create_db_no_reservation(struct tf *tfp,
753 struct tf_rm_create_db_parms *parms)
756 struct tf_session *tfs;
757 struct tf_dev_info *dev;
759 uint16_t hcapi_items, *req_cnt;
760 struct tfp_calloc_parms cparms;
761 struct tf_rm_resc_req_entry *req;
762 struct tf_rm_resc_entry *resv;
763 struct tf_rm_new_db *rm_db;
764 struct tf_rm_element *db;
767 TF_CHECK_PARMS2(tfp, parms);
769 /* Retrieve the session information */
770 rc = tf_session_get_session_internal(tfp, &tfs);
774 /* Retrieve device information */
775 rc = tf_session_get_device(tfs, &dev);
779 /* Copy requested counts (alloc_cnt) from tf_open_session() to local
780 * copy (req_cnt) so that it can be updated if required.
783 cparms.nitems = parms->num_elements;
784 cparms.size = sizeof(uint16_t);
785 cparms.alignment = 0;
786 rc = tfp_calloc(&cparms);
790 req_cnt = (uint16_t *)cparms.mem_va;
792 tfp_memcpy(req_cnt, parms->alloc_cnt,
793 parms->num_elements * sizeof(uint16_t));
795 /* Process capabilities against DB requirements. However, as a
796 * DB can hold elements that are not HCAPI we can reduce the
797 * req msg content by removing those out of the request yet
798 * the DB holds them all as to give a fast lookup. We can also
799 * remove entries where there are no request for elements.
801 tf_rm_count_hcapi_reservations(parms->dir,
808 if (hcapi_items == 0) {
810 "%s: module:%s Empty RM DB create request\n",
811 tf_dir_2_str(parms->dir),
812 tf_module_2_str(parms->module));
818 /* Alloc request, alignment already set */
819 cparms.nitems = (size_t)hcapi_items;
820 cparms.size = sizeof(struct tf_rm_resc_req_entry);
821 rc = tfp_calloc(&cparms);
824 req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
826 /* Alloc reservation, alignment and nitems already set */
827 cparms.size = sizeof(struct tf_rm_resc_entry);
828 rc = tfp_calloc(&cparms);
831 resv = (struct tf_rm_resc_entry *)cparms.mem_va;
833 /* Build the request */
834 for (i = 0, j = 0; i < parms->num_elements; i++) {
835 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
836 uint16_t hcapi_type = cfg->hcapi_type;
838 /* Only perform reservation for requested entries
843 /* Skip any children in the request */
844 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
845 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
846 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
847 req[j].type = hcapi_type;
848 req[j].min = req_cnt[i];
849 req[j].max = req_cnt[i];
854 /* Get all resources info for the module type
856 rc = tf_msg_session_resc_info(tfp,
865 /* Build the RM DB per the request */
867 cparms.size = sizeof(struct tf_rm_new_db);
868 rc = tfp_calloc(&cparms);
871 rm_db = (void *)cparms.mem_va;
873 /* Build the DB within RM DB */
874 cparms.nitems = parms->num_elements;
875 cparms.size = sizeof(struct tf_rm_element);
876 rc = tfp_calloc(&cparms);
879 rm_db->db = (struct tf_rm_element *)cparms.mem_va;
882 for (i = 0, j = 0; i < parms->num_elements; i++) {
883 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
884 const char *type_str;
886 dev->ops->tf_dev_get_resource_str(tfp,
890 db[i].cfg_type = cfg->cfg_type;
891 db[i].hcapi_type = cfg->hcapi_type;
893 /* Save the parent subtype for later use to find the pool
895 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
896 db[i].parent_subtype = cfg->parent_subtype;
898 /* If the element didn't request an allocation no need
899 * to create a pool nor verify if we got a reservation.
904 /* Skip any children or invalid
906 if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
907 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
908 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
911 /* If the element had requested an allocation and that
912 * allocation was a success (full amount) then
915 if (req_cnt[i] == resv[j].stride) {
916 db[i].alloc.entry.start = resv[j].start;
917 db[i].alloc.entry.stride = resv[j].stride;
919 /* Only allocate BA pool if a BA type not a child */
920 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
921 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
924 resv[j].stride / cfg->divider;
925 if (resv[j].stride <= 0) {
927 "%s:Divide fails:%d:%s\n",
928 tf_dir_2_str(parms->dir),
929 cfg->hcapi_type, type_str);
934 pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
935 sizeof(struct bitalloc));
936 /* Alloc request, alignment already set */
937 cparms.nitems = pool_size;
938 cparms.size = sizeof(struct bitalloc);
939 rc = tfp_calloc(&cparms);
942 "%s: Pool alloc failed, type:%d:%s\n",
943 tf_dir_2_str(parms->dir),
944 cfg->hcapi_type, type_str);
947 db[i].pool = (struct bitalloc *)cparms.mem_va;
949 rc = ba_init(db[i].pool,
951 !tf_session_is_shared_session(tfs));
954 "%s: Pool init failed, type:%d:%s\n",
955 tf_dir_2_str(parms->dir),
956 cfg->hcapi_type, type_str);
962 /* Bail out as we want what we requested for
963 * all elements, not any less.
966 "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
967 tf_dir_2_str(parms->dir), cfg->hcapi_type,
968 type_str, req_cnt[i], resv[j].stride);
973 rm_db->num_entries = parms->num_elements;
974 rm_db->dir = parms->dir;
975 rm_db->module = parms->module;
976 *parms->rm_db = (void *)rm_db;
978 tfp_free((void *)req);
979 tfp_free((void *)resv);
980 tfp_free((void *)req_cnt);
984 tfp_free((void *)req);
985 tfp_free((void *)resv);
986 tfp_free((void *)db->pool);
987 tfp_free((void *)db);
988 tfp_free((void *)rm_db);
989 tfp_free((void *)req_cnt);
995 tf_rm_free_db(struct tf *tfp,
996 struct tf_rm_free_db_parms *parms)
1000 uint16_t resv_size = 0;
1001 struct tf_rm_new_db *rm_db;
1002 struct tf_rm_resc_entry *resv;
1003 bool residuals_found = false;
1005 TF_CHECK_PARMS2(parms, parms->rm_db);
1007 /* Device unbind happens when the TF Session is closed and the
1008 * session ref count is 0. Device unbind will cleanup each of
1009 * its support modules, i.e. Identifier, thus we're ending up
1010 * here to close the DB.
1012 * On TF Session close it is assumed that the session has already
1013 * cleaned up all its resources, individually, while
1014 * destroying its flows.
1016 * To assist in the 'cleanup checking' the DB is checked for any
1017 * remaining elements and logged if found to be the case.
1019 * Any such elements will need to be 'cleared' ahead of
1020 * returning the resources to the HCAPI RM.
1022 * RM will signal FW to flush the DB resources. FW will
1023 * perform the invalidation. TF Session close will return the
1024 * previous allocated elements to the RM and then close the
1025 * HCAPI RM registration. That then saves several 'free' msgs
1026 * from being required.
1029 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1031 /* Check for residuals that the client didn't clean up */
1032 rc = tf_rm_check_residuals(rm_db,
1039 /* Invalidate any residuals followed by a DB traversal for
1042 if (residuals_found) {
1043 rc = tf_msg_session_resc_flush(tfp,
1047 tfp_free((void *)resv);
1048 /* On failure we still have to cleanup so we can only
1049 * log that FW failed.
1053 "%s: Internal Flush error, module:%s\n",
1054 tf_dir_2_str(parms->dir),
1055 tf_module_2_str(rm_db->module));
1058 /* No need to check for configuration type, even if we do not
1059 * have a BA pool we just delete on a null ptr, no harm
1061 for (i = 0; i < rm_db->num_entries; i++)
1062 tfp_free((void *)rm_db->db[i].pool);
1064 tfp_free((void *)parms->rm_db);
1069 * Get the bit allocator pool associated with the subtype and the db
1075 * Module subtype used to index into the module specific database.
1076 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
1077 * module subtype of TF_MODULE_TYPE_TABLE.
1080 * Pointer to the bit allocator pool used
1082 * [in/out] new_subtype
1083 * Pointer to the subtype of the actual pool used
1086 * - ENOTSUP - Operation not supported
1089 tf_rm_get_pool(struct tf_rm_new_db *rm_db,
1091 struct bitalloc **pool,
1092 uint16_t *new_subtype)
1095 uint16_t tmp_subtype = subtype;
1097 /* If we are a child, get the parent table index */
1098 if (rm_db->db[subtype].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1099 tmp_subtype = rm_db->db[subtype].parent_subtype;
1101 *pool = rm_db->db[tmp_subtype].pool;
1103 /* Bail out if the pool is not valid, should never happen */
1104 if (rm_db->db[tmp_subtype].pool == NULL) {
1107 "%s: Invalid pool for this type:%d, rc:%s\n",
1108 tf_dir_2_str(rm_db->dir),
1113 *new_subtype = tmp_subtype;
1118 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
1123 struct tf_rm_new_db *rm_db;
1124 enum tf_rm_elem_cfg_type cfg_type;
1125 struct bitalloc *pool;
1128 TF_CHECK_PARMS2(parms, parms->rm_db);
1130 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1131 TF_CHECK_PARMS1(rm_db->db);
1133 cfg_type = rm_db->db[parms->subtype].cfg_type;
1135 /* Bail out if not controlled by RM */
1136 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1137 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1138 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1141 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1145 * priority 0: allocate from top of the tcam i.e. high
1146 * priority !0: allocate index from bottom i.e lowest
1148 if (parms->priority)
1149 id = ba_alloc_reverse(pool);
1151 id = ba_alloc(pool);
1152 if (id == BA_FAIL) {
1155 "%s: Allocation failed, rc:%s\n",
1156 tf_dir_2_str(rm_db->dir),
1161 /* Adjust for any non zero start value */
1162 rc = tf_rm_adjust_index(rm_db->db,
1163 TF_RM_ADJUST_ADD_BASE,
1169 "%s: Alloc adjust of base index failed, rc:%s\n",
1170 tf_dir_2_str(rm_db->dir),
1175 *parms->index = index;
1176 if (parms->base_index)
1177 *parms->base_index = id;
1183 tf_rm_free(struct tf_rm_free_parms *parms)
1187 struct tf_rm_new_db *rm_db;
1188 enum tf_rm_elem_cfg_type cfg_type;
1189 struct bitalloc *pool;
1192 TF_CHECK_PARMS2(parms, parms->rm_db);
1193 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1194 TF_CHECK_PARMS1(rm_db->db);
1196 cfg_type = rm_db->db[parms->subtype].cfg_type;
1198 /* Bail out if not controlled by RM */
1199 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1200 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1201 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1204 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1208 /* Adjust for any non zero start value */
1209 rc = tf_rm_adjust_index(rm_db->db,
1210 TF_RM_ADJUST_RM_BASE,
1217 rc = ba_free(pool, adj_index);
1218 /* No logging direction matters and that is not available here */
1226 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
1230 struct tf_rm_new_db *rm_db;
1231 enum tf_rm_elem_cfg_type cfg_type;
1232 struct bitalloc *pool;
1235 TF_CHECK_PARMS2(parms, parms->rm_db);
1236 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1237 TF_CHECK_PARMS1(rm_db->db);
1239 cfg_type = rm_db->db[parms->subtype].cfg_type;
1242 /* Bail out if not controlled by RM */
1243 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1244 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1245 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1248 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1252 /* Adjust for any non zero start value */
1253 rc = tf_rm_adjust_index(rm_db->db,
1254 TF_RM_ADJUST_RM_BASE,
1261 if (parms->base_index)
1262 *parms->base_index = adj_index;
1263 *parms->allocated = ba_inuse(pool, adj_index);
1269 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
1271 struct tf_rm_new_db *rm_db;
1272 enum tf_rm_elem_cfg_type cfg_type;
1274 TF_CHECK_PARMS2(parms, parms->rm_db);
1275 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1276 TF_CHECK_PARMS1(rm_db->db);
1278 cfg_type = rm_db->db[parms->subtype].cfg_type;
1280 /* Bail out if not controlled by HCAPI */
1281 if (cfg_type == TF_RM_ELEM_CFG_NULL)
1285 &rm_db->db[parms->subtype].alloc,
1286 sizeof(struct tf_rm_alloc_info));
1292 tf_rm_get_all_info(struct tf_rm_get_alloc_info_parms *parms, int size)
1294 struct tf_rm_new_db *rm_db;
1295 enum tf_rm_elem_cfg_type cfg_type;
1296 struct tf_rm_alloc_info *info = parms->info;
1299 TF_CHECK_PARMS2(parms, parms->rm_db);
1300 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1301 TF_CHECK_PARMS1(rm_db->db);
1303 for (i = 0; i < size; i++) {
1304 cfg_type = rm_db->db[i].cfg_type;
1306 /* Bail out if not controlled by HCAPI */
1307 if (cfg_type == TF_RM_ELEM_CFG_NULL) {
1313 &rm_db->db[i].alloc,
1314 sizeof(struct tf_rm_alloc_info));
1322 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
1324 struct tf_rm_new_db *rm_db;
1325 enum tf_rm_elem_cfg_type cfg_type;
1327 TF_CHECK_PARMS2(parms, parms->rm_db);
1328 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1329 TF_CHECK_PARMS1(rm_db->db);
1331 cfg_type = rm_db->db[parms->subtype].cfg_type;
1333 /* Bail out if not controlled by HCAPI */
1334 if (cfg_type == TF_RM_ELEM_CFG_NULL)
1337 *parms->hcapi_type = rm_db->db[parms->subtype].hcapi_type;
1343 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
1346 struct tf_rm_new_db *rm_db;
1347 enum tf_rm_elem_cfg_type cfg_type;
1349 TF_CHECK_PARMS2(parms, parms->rm_db);
1350 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1351 TF_CHECK_PARMS1(rm_db->db);
1353 cfg_type = rm_db->db[parms->subtype].cfg_type;
1355 /* Bail out if not a BA pool */
1356 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1357 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1358 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1361 /* Bail silently (no logging), if the pool is not valid there
1362 * was no elements allocated for it.
1364 if (rm_db->db[parms->subtype].pool == NULL) {
1369 *parms->count = ba_inuse_count(rm_db->db[parms->subtype].pool);
1373 /* Only used for table bulk get at this time
1376 tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms)
1378 struct tf_rm_new_db *rm_db;
1379 enum tf_rm_elem_cfg_type cfg_type;
1380 uint32_t base_index;
1383 struct bitalloc *pool;
1386 TF_CHECK_PARMS2(parms, parms->rm_db);
1387 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1388 TF_CHECK_PARMS1(rm_db->db);
1390 cfg_type = rm_db->db[parms->subtype].cfg_type;
1392 /* Bail out if not a BA pool */
1393 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1394 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1395 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1398 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1402 base_index = rm_db->db[subtype].alloc.entry.start;
1403 stride = rm_db->db[subtype].alloc.entry.stride;
1405 if (parms->starting_index < base_index ||
1406 parms->starting_index + parms->num_entries > base_index + stride)