1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
8 #include <rte_common.h>
11 #include <cfa_resource_types.h>
14 #include "tf_common.h"
16 #include "tf_session.h"
17 #include "tf_device.h"
22 * Generic RM Element data type that an RM DB is build upon.
24 struct tf_rm_element {
26 * RM Element configuration type. If Private then the
27 * hcapi_type can be ignored. If Null then the element is not
28 * valid for the device.
30 enum tf_rm_elem_cfg_type cfg_type;
33 * HCAPI RM Type for the element.
38 * HCAPI RM allocated range information for the element.
40 struct tf_rm_alloc_info alloc;
43 * If cfg_type == HCAPI_BA_CHILD, this field indicates
44 * the parent module subtype for look up into the parent pool.
45 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
46 * module subtype of TF_MODULE_TYPE_TABLE.
48 uint16_t parent_subtype;
51 * Bit allocator pool for the element. Pool size is controlled
52 * by the struct tf_session_resources at time of session creation.
53 * Null indicates that the pool is not used for the element.
55 struct bitalloc *pool;
63 * Number of elements in the DB
68 * Direction this DB controls.
73 * Module type, used for logging purposes.
75 enum tf_module_type module;
78 * The DB consists of an array of elements
80 struct tf_rm_element *db;
84 * Adjust an index according to the allocation information.
86 * All resources are controlled in a 0 based pool. Some resources, by
87 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
88 * need to be adjusted before they are handed out.
91 * Pointer to the DB configuration
94 * Pointer to the allocation values associated with the module
97 * Number of DB configuration elements
100 * Number of HCAPI entries with a reservation value greater than 0
104 * - EOPNOTSUPP - Operation not supported
107 tf_rm_count_hcapi_reservations(enum tf_dir dir,
108 enum tf_module_type module,
109 struct tf_rm_element_cfg *cfg,
110 uint16_t *reservations,
112 uint16_t *valid_count)
117 for (i = 0; i < count; i++) {
118 if (cfg[i].cfg_type != TF_RM_ELEM_CFG_NULL &&
122 /* Only log msg if a type is attempted reserved and
123 * not supported. We ignore EM module as its using a
124 * split configuration array thus it would fail for
125 * this type of check.
127 if (module != TF_MODULE_TYPE_EM &&
128 cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
129 reservations[i] > 0) {
131 "%s, %s, %s allocation of %d not supported\n",
132 tf_module_2_str(module),
134 tf_module_subtype_2_str(module, i),
143 * Resource Manager Adjust of base index definitions.
145 enum tf_rm_adjust_type {
146 TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
147 TF_RM_ADJUST_RM_BASE /**< Removes base from the index */
151 * Adjust an index according to the allocation information.
153 * All resources are controlled in a 0 based pool. Some resources, by
154 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
155 * need to be adjusted before they are handed out.
158 * Pointer to the db, used for the lookup
164 * TF module subtype used as an index into the database.
165 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
166 * module subtype of TF_MODULE_TYPE_TABLE.
176 * - EOPNOTSUPP - Operation not supported
179 tf_rm_adjust_index(struct tf_rm_element *db,
180 enum tf_rm_adjust_type action,
188 base_index = db[subtype].alloc.entry.start;
191 case TF_RM_ADJUST_RM_BASE:
192 *adj_index = index - base_index;
194 case TF_RM_ADJUST_ADD_BASE:
195 *adj_index = index + base_index;
205 * Performs a check of the passed in DB for any lingering elements. If
206 * a resource type was found to not have been cleaned up by the caller
207 * then its residual values are recorded, logged and passed back in an
208 * allocate reservation array that the caller can pass to the FW for
212 * Pointer to the db, used for the lookup
215 * Pointer to the reservation size of the generated reservation
219 * Pointer Pointer to a reservation array. The reservation array is
220 * allocated after the residual scan and holds any found residual
221 * entries. Thus it can be smaller than the DB that the check was
222 * performed on. Array must be freed by the caller.
224 * [out] residuals_present
225 * Pointer to a bool flag indicating if residual was present in the
230 * - EOPNOTSUPP - Operation not supported
233 tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
235 struct tf_rm_resc_entry **resv,
236 bool *residuals_present)
243 uint16_t *residuals = NULL;
245 struct tf_rm_get_inuse_count_parms iparms;
246 struct tf_rm_get_alloc_info_parms aparms;
247 struct tf_rm_get_hcapi_parms hparms;
248 struct tf_rm_alloc_info info;
249 struct tfp_calloc_parms cparms;
250 struct tf_rm_resc_entry *local_resv = NULL;
252 /* Create array to hold the entries that have residuals */
253 cparms.nitems = rm_db->num_entries;
254 cparms.size = sizeof(uint16_t);
255 cparms.alignment = 0;
256 rc = tfp_calloc(&cparms);
260 residuals = (uint16_t *)cparms.mem_va;
262 /* Traverse the DB and collect any residual elements */
263 iparms.rm_db = rm_db;
264 iparms.count = &count;
265 for (i = 0, found = 0; i < rm_db->num_entries; i++) {
267 rc = tf_rm_get_inuse_count(&iparms);
268 /* Not a device supported entry, just skip */
272 goto cleanup_residuals;
276 residuals[i] = count;
277 *residuals_present = true;
281 if (*residuals_present) {
282 /* Populate a reduced resv array with only the entries
283 * that have residuals.
285 cparms.nitems = found;
286 cparms.size = sizeof(struct tf_rm_resc_entry);
287 cparms.alignment = 0;
288 rc = tfp_calloc(&cparms);
292 local_resv = (struct tf_rm_resc_entry *)cparms.mem_va;
294 aparms.rm_db = rm_db;
295 hparms.rm_db = rm_db;
296 hparms.hcapi_type = &hcapi_type;
297 for (i = 0, f = 0; i < rm_db->num_entries; i++) {
298 if (residuals[i] == 0)
302 rc = tf_rm_get_info(&aparms);
307 rc = tf_rm_get_hcapi_type(&hparms);
311 local_resv[f].type = hcapi_type;
312 local_resv[f].start = info.entry.start;
313 local_resv[f].stride = info.entry.stride;
319 tfp_free((void *)residuals);
325 tfp_free((void *)local_resv);
328 tfp_free((void *)residuals);
334 * Some resources do not have a 1:1 mapping between the Truflow type and the cfa
335 * resource type (HCAPI RM). These resources have multiple Truflow types which
336 * map to a single HCAPI RM type. In order to support this, one Truflow type
337 * sharing the HCAPI resources is designated the parent. All other Truflow
338 * types associated with that HCAPI RM type are designated the children.
340 * This function updates the resource counts of any HCAPI_BA_PARENT with the
341 * counts of the HCAPI_BA_CHILDREN. These are read from the alloc_cnt and
342 * written back to the req_cnt.
345 * Pointer to an array of module specific Truflow type indexed RM cfg items
348 * Pointer to the tf_open_session() configured array of module specific
349 * Truflow type indexed requested counts.
352 * Pointer to the location to put the updated resource counts.
356 * - - Failure if negative
359 tf_rm_update_parent_reservations(struct tf_rm_element_cfg *cfg,
361 uint16_t num_elements,
366 /* Search through all the elements */
367 for (parent = 0; parent < num_elements; parent++) {
368 uint16_t combined_cnt = 0;
370 /* If I am a parent */
371 if (cfg[parent].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
372 /* start with my own count */
373 RTE_ASSERT(cfg[parent].slices);
375 alloc_cnt[parent] / cfg[parent].slices;
377 if (alloc_cnt[parent] % cfg[parent].slices)
380 /* Search again through all the elements */
381 for (child = 0; child < num_elements; child++) {
382 /* If this is one of my children */
383 if (cfg[child].cfg_type ==
384 TF_RM_ELEM_CFG_HCAPI_BA_CHILD &&
385 cfg[child].parent_subtype == parent) {
387 RTE_ASSERT(cfg[child].slices);
389 /* Increment the parents combined count
390 * with each child's count adjusted for
391 * number of slices per RM allocated item.
394 alloc_cnt[child] / cfg[child].slices;
396 if (alloc_cnt[child] % cfg[child].slices)
400 /* Clear the requested child count */
404 /* Save the parent count to be requested */
405 req_cnt[parent] = combined_cnt;
412 tf_rm_create_db(struct tf *tfp,
413 struct tf_rm_create_db_parms *parms)
416 struct tf_session *tfs;
417 struct tf_dev_info *dev;
419 uint16_t max_types, hcapi_items, *req_cnt;
420 struct tfp_calloc_parms cparms;
421 struct tf_rm_resc_req_entry *query;
422 enum tf_rm_resc_resv_strategy resv_strategy;
423 struct tf_rm_resc_req_entry *req;
424 struct tf_rm_resc_entry *resv;
425 struct tf_rm_new_db *rm_db;
426 struct tf_rm_element *db;
429 TF_CHECK_PARMS2(tfp, parms);
431 /* Retrieve the session information */
432 rc = tf_session_get_session_internal(tfp, &tfs);
436 /* Retrieve device information */
437 rc = tf_session_get_device(tfs, &dev);
441 /* Need device max number of elements for the RM QCAPS */
442 rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
445 /* Allocate memory for RM QCAPS request */
446 cparms.nitems = max_types;
447 cparms.size = sizeof(struct tf_rm_resc_req_entry);
448 cparms.alignment = 0;
449 rc = tfp_calloc(&cparms);
453 query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
455 /* Get Firmware Capabilities */
456 rc = tf_msg_session_resc_qcaps(tfp,
465 /* Copy requested counts (alloc_cnt) from tf_open_session() to local
466 * copy (req_cnt) so that it can be updated if required.
469 cparms.nitems = parms->num_elements;
470 cparms.size = sizeof(uint16_t);
471 rc = tfp_calloc(&cparms);
475 req_cnt = (uint16_t *)cparms.mem_va;
477 tfp_memcpy(req_cnt, parms->alloc_cnt,
478 parms->num_elements * sizeof(uint16_t));
480 /* Update the req_cnt based upon the element configuration
482 tf_rm_update_parent_reservations(parms->cfg,
487 /* Process capabilities against DB requirements. However, as a
488 * DB can hold elements that are not HCAPI we can reduce the
489 * req msg content by removing those out of the request yet
490 * the DB holds them all as to give a fast lookup. We can also
491 * remove entries where there are no request for elements.
493 tf_rm_count_hcapi_reservations(parms->dir,
500 if (hcapi_items == 0) {
505 /* Alloc request, alignment already set */
506 cparms.nitems = (size_t)hcapi_items;
507 cparms.size = sizeof(struct tf_rm_resc_req_entry);
508 rc = tfp_calloc(&cparms);
511 req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
513 /* Alloc reservation, alignment and nitems already set */
514 cparms.size = sizeof(struct tf_rm_resc_entry);
515 rc = tfp_calloc(&cparms);
518 resv = (struct tf_rm_resc_entry *)cparms.mem_va;
520 /* Build the request */
521 for (i = 0, j = 0; i < parms->num_elements; i++) {
522 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
523 uint16_t hcapi_type = cfg->hcapi_type;
525 /* Only perform reservation for requested entries
530 /* Skip any children in the request */
531 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
532 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
533 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
535 /* Verify that we can get the full amount per qcaps.
537 if (req_cnt[i] <= query[hcapi_type].max) {
538 req[j].type = hcapi_type;
539 req[j].min = req_cnt[i];
540 req[j].max = req_cnt[i];
543 const char *type_str;
545 dev->ops->tf_dev_get_resource_str(tfp,
549 "Failure, %s:%d:%s req:%d avail:%d\n",
550 tf_dir_2_str(parms->dir),
551 hcapi_type, type_str,
553 query[hcapi_type].max);
559 /* Allocate all resources for the module type
561 rc = tf_msg_session_resc_alloc(tfp,
570 /* Build the RM DB per the request */
572 cparms.size = sizeof(struct tf_rm_new_db);
573 rc = tfp_calloc(&cparms);
576 rm_db = (void *)cparms.mem_va;
578 /* Build the DB within RM DB */
579 cparms.nitems = parms->num_elements;
580 cparms.size = sizeof(struct tf_rm_element);
581 rc = tfp_calloc(&cparms);
584 rm_db->db = (struct tf_rm_element *)cparms.mem_va;
587 for (i = 0, j = 0; i < parms->num_elements; i++) {
588 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
589 const char *type_str;
591 dev->ops->tf_dev_get_resource_str(tfp,
595 db[i].cfg_type = cfg->cfg_type;
596 db[i].hcapi_type = cfg->hcapi_type;
598 /* Save the parent subtype for later use to find the pool
600 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
601 db[i].parent_subtype = cfg->parent_subtype;
603 /* If the element didn't request an allocation no need
604 * to create a pool nor verify if we got a reservation.
609 /* Skip any children or invalid
611 if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
612 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
613 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
616 /* If the element had requested an allocation and that
617 * allocation was a success (full amount) then
620 if (req_cnt[i] == resv[j].stride) {
621 db[i].alloc.entry.start = resv[j].start;
622 db[i].alloc.entry.stride = resv[j].stride;
624 /* Only allocate BA pool if a BA type not a child */
625 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
626 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
629 resv[j].stride / cfg->divider;
630 if (resv[j].stride <= 0) {
632 "%s:Divide fails:%d:%s\n",
633 tf_dir_2_str(parms->dir),
634 cfg->hcapi_type, type_str);
639 pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
640 sizeof(struct bitalloc));
641 /* Alloc request, alignment already set */
642 cparms.nitems = pool_size;
643 cparms.size = sizeof(struct bitalloc);
644 rc = tfp_calloc(&cparms);
647 "%s: Pool alloc failed, type:%d:%s\n",
648 tf_dir_2_str(parms->dir),
649 cfg->hcapi_type, type_str);
652 db[i].pool = (struct bitalloc *)cparms.mem_va;
654 rc = ba_init(db[i].pool,
656 !tf_session_is_shared_session(tfs));
659 "%s: Pool init failed, type:%d:%s\n",
660 tf_dir_2_str(parms->dir),
661 cfg->hcapi_type, type_str);
667 /* Bail out as we want what we requested for
668 * all elements, not any less.
671 "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
672 tf_dir_2_str(parms->dir), cfg->hcapi_type,
673 type_str, req_cnt[i], resv[j].stride);
678 rm_db->num_entries = parms->num_elements;
679 rm_db->dir = parms->dir;
680 rm_db->module = parms->module;
681 *parms->rm_db = (void *)rm_db;
683 tfp_free((void *)req);
684 tfp_free((void *)resv);
685 tfp_free((void *)req_cnt);
689 tfp_free((void *)req);
690 tfp_free((void *)resv);
691 tfp_free((void *)db->pool);
692 tfp_free((void *)db);
693 tfp_free((void *)rm_db);
694 tfp_free((void *)req_cnt);
701 tf_rm_create_db_no_reservation(struct tf *tfp,
702 struct tf_rm_create_db_parms *parms)
705 struct tf_session *tfs;
706 struct tf_dev_info *dev;
708 uint16_t hcapi_items, *req_cnt;
709 struct tfp_calloc_parms cparms;
710 struct tf_rm_resc_req_entry *req;
711 struct tf_rm_resc_entry *resv;
712 struct tf_rm_new_db *rm_db;
713 struct tf_rm_element *db;
716 TF_CHECK_PARMS2(tfp, parms);
718 /* Retrieve the session information */
719 rc = tf_session_get_session_internal(tfp, &tfs);
723 /* Retrieve device information */
724 rc = tf_session_get_device(tfs, &dev);
728 /* Copy requested counts (alloc_cnt) from tf_open_session() to local
729 * copy (req_cnt) so that it can be updated if required.
732 cparms.nitems = parms->num_elements;
733 cparms.size = sizeof(uint16_t);
734 cparms.alignment = 0;
735 rc = tfp_calloc(&cparms);
739 req_cnt = (uint16_t *)cparms.mem_va;
741 tfp_memcpy(req_cnt, parms->alloc_cnt,
742 parms->num_elements * sizeof(uint16_t));
744 /* Process capabilities against DB requirements. However, as a
745 * DB can hold elements that are not HCAPI we can reduce the
746 * req msg content by removing those out of the request yet
747 * the DB holds them all as to give a fast lookup. We can also
748 * remove entries where there are no request for elements.
750 tf_rm_count_hcapi_reservations(parms->dir,
757 if (hcapi_items == 0) {
759 "%s: module:%s Empty RM DB create request\n",
760 tf_dir_2_str(parms->dir),
761 tf_module_2_str(parms->module));
767 /* Alloc request, alignment already set */
768 cparms.nitems = (size_t)hcapi_items;
769 cparms.size = sizeof(struct tf_rm_resc_req_entry);
770 rc = tfp_calloc(&cparms);
773 req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
775 /* Alloc reservation, alignment and nitems already set */
776 cparms.size = sizeof(struct tf_rm_resc_entry);
777 rc = tfp_calloc(&cparms);
780 resv = (struct tf_rm_resc_entry *)cparms.mem_va;
782 /* Build the request */
783 for (i = 0, j = 0; i < parms->num_elements; i++) {
784 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
785 uint16_t hcapi_type = cfg->hcapi_type;
787 /* Only perform reservation for requested entries
792 /* Skip any children in the request */
793 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
794 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
795 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
796 req[j].type = hcapi_type;
797 req[j].min = req_cnt[i];
798 req[j].max = req_cnt[i];
803 /* Get all resources info for the module type
805 rc = tf_msg_session_resc_info(tfp,
814 /* Build the RM DB per the request */
816 cparms.size = sizeof(struct tf_rm_new_db);
817 rc = tfp_calloc(&cparms);
820 rm_db = (void *)cparms.mem_va;
822 /* Build the DB within RM DB */
823 cparms.nitems = parms->num_elements;
824 cparms.size = sizeof(struct tf_rm_element);
825 rc = tfp_calloc(&cparms);
828 rm_db->db = (struct tf_rm_element *)cparms.mem_va;
831 for (i = 0, j = 0; i < parms->num_elements; i++) {
832 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
833 const char *type_str;
835 dev->ops->tf_dev_get_resource_str(tfp,
839 db[i].cfg_type = cfg->cfg_type;
840 db[i].hcapi_type = cfg->hcapi_type;
842 /* Save the parent subtype for later use to find the pool
844 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
845 db[i].parent_subtype = cfg->parent_subtype;
847 /* If the element didn't request an allocation no need
848 * to create a pool nor verify if we got a reservation.
853 /* Skip any children or invalid
855 if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
856 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
857 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
860 /* If the element had requested an allocation and that
861 * allocation was a success (full amount) then
864 if (req_cnt[i] == resv[j].stride) {
865 db[i].alloc.entry.start = resv[j].start;
866 db[i].alloc.entry.stride = resv[j].stride;
868 /* Only allocate BA pool if a BA type not a child */
869 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
870 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
873 resv[j].stride / cfg->divider;
874 if (resv[j].stride <= 0) {
876 "%s:Divide fails:%d:%s\n",
877 tf_dir_2_str(parms->dir),
878 cfg->hcapi_type, type_str);
883 pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
884 sizeof(struct bitalloc));
885 /* Alloc request, alignment already set */
886 cparms.nitems = pool_size;
887 cparms.size = sizeof(struct bitalloc);
888 rc = tfp_calloc(&cparms);
891 "%s: Pool alloc failed, type:%d:%s\n",
892 tf_dir_2_str(parms->dir),
893 cfg->hcapi_type, type_str);
896 db[i].pool = (struct bitalloc *)cparms.mem_va;
898 rc = ba_init(db[i].pool,
900 !tf_session_is_shared_session(tfs));
903 "%s: Pool init failed, type:%d:%s\n",
904 tf_dir_2_str(parms->dir),
905 cfg->hcapi_type, type_str);
911 /* Bail out as we want what we requested for
912 * all elements, not any less.
915 "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
916 tf_dir_2_str(parms->dir), cfg->hcapi_type,
917 type_str, req_cnt[i], resv[j].stride);
922 rm_db->num_entries = parms->num_elements;
923 rm_db->dir = parms->dir;
924 rm_db->module = parms->module;
925 *parms->rm_db = (void *)rm_db;
927 tfp_free((void *)req);
928 tfp_free((void *)resv);
929 tfp_free((void *)req_cnt);
933 tfp_free((void *)req);
934 tfp_free((void *)resv);
935 tfp_free((void *)db->pool);
936 tfp_free((void *)db);
937 tfp_free((void *)rm_db);
938 tfp_free((void *)req_cnt);
944 tf_rm_free_db(struct tf *tfp,
945 struct tf_rm_free_db_parms *parms)
949 uint16_t resv_size = 0;
950 struct tf_rm_new_db *rm_db;
951 struct tf_rm_resc_entry *resv;
952 bool residuals_found = false;
954 TF_CHECK_PARMS2(parms, parms->rm_db);
956 /* Device unbind happens when the TF Session is closed and the
957 * session ref count is 0. Device unbind will cleanup each of
958 * its support modules, i.e. Identifier, thus we're ending up
959 * here to close the DB.
961 * On TF Session close it is assumed that the session has already
962 * cleaned up all its resources, individually, while
963 * destroying its flows.
965 * To assist in the 'cleanup checking' the DB is checked for any
966 * remaining elements and logged if found to be the case.
968 * Any such elements will need to be 'cleared' ahead of
969 * returning the resources to the HCAPI RM.
971 * RM will signal FW to flush the DB resources. FW will
972 * perform the invalidation. TF Session close will return the
973 * previous allocated elements to the RM and then close the
974 * HCAPI RM registration. That then saves several 'free' msgs
975 * from being required.
978 rm_db = (struct tf_rm_new_db *)parms->rm_db;
980 /* Check for residuals that the client didn't clean up */
981 rc = tf_rm_check_residuals(rm_db,
988 /* Invalidate any residuals followed by a DB traversal for
991 if (residuals_found) {
992 rc = tf_msg_session_resc_flush(tfp,
996 tfp_free((void *)resv);
997 /* On failure we still have to cleanup so we can only
998 * log that FW failed.
1002 "%s: Internal Flush error, module:%s\n",
1003 tf_dir_2_str(parms->dir),
1004 tf_module_2_str(rm_db->module));
1007 /* No need to check for configuration type, even if we do not
1008 * have a BA pool we just delete on a null ptr, no harm
1010 for (i = 0; i < rm_db->num_entries; i++)
1011 tfp_free((void *)rm_db->db[i].pool);
1013 tfp_free((void *)parms->rm_db);
1018 * Get the bit allocator pool associated with the subtype and the db
1024 * Module subtype used to index into the module specific database.
1025 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
1026 * module subtype of TF_MODULE_TYPE_TABLE.
1029 * Pointer to the bit allocator pool used
1031 * [in/out] new_subtype
1032 * Pointer to the subtype of the actual pool used
1035 * - ENOTSUP - Operation not supported
1038 tf_rm_get_pool(struct tf_rm_new_db *rm_db,
1040 struct bitalloc **pool,
1041 uint16_t *new_subtype)
1044 uint16_t tmp_subtype = subtype;
1046 /* If we are a child, get the parent table index */
1047 if (rm_db->db[subtype].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1048 tmp_subtype = rm_db->db[subtype].parent_subtype;
1050 *pool = rm_db->db[tmp_subtype].pool;
1052 /* Bail out if the pool is not valid, should never happen */
1053 if (rm_db->db[tmp_subtype].pool == NULL) {
1056 "%s: Invalid pool for this type:%d, rc:%s\n",
1057 tf_dir_2_str(rm_db->dir),
1062 *new_subtype = tmp_subtype;
1067 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
1072 struct tf_rm_new_db *rm_db;
1073 enum tf_rm_elem_cfg_type cfg_type;
1074 struct bitalloc *pool;
1077 TF_CHECK_PARMS2(parms, parms->rm_db);
1079 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1080 TF_CHECK_PARMS1(rm_db->db);
1082 cfg_type = rm_db->db[parms->subtype].cfg_type;
1084 /* Bail out if not controlled by RM */
1085 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1086 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1087 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1090 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1094 * priority 0: allocate from top of the tcam i.e. high
1095 * priority !0: allocate index from bottom i.e lowest
1097 if (parms->priority)
1098 id = ba_alloc_reverse(pool);
1100 id = ba_alloc(pool);
1101 if (id == BA_FAIL) {
1104 "%s: Allocation failed, rc:%s\n",
1105 tf_dir_2_str(rm_db->dir),
1110 /* Adjust for any non zero start value */
1111 rc = tf_rm_adjust_index(rm_db->db,
1112 TF_RM_ADJUST_ADD_BASE,
1118 "%s: Alloc adjust of base index failed, rc:%s\n",
1119 tf_dir_2_str(rm_db->dir),
1124 *parms->index = index;
1125 if (parms->base_index)
1126 *parms->base_index = id;
1132 tf_rm_free(struct tf_rm_free_parms *parms)
1136 struct tf_rm_new_db *rm_db;
1137 enum tf_rm_elem_cfg_type cfg_type;
1138 struct bitalloc *pool;
1141 TF_CHECK_PARMS2(parms, parms->rm_db);
1142 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1143 TF_CHECK_PARMS1(rm_db->db);
1145 cfg_type = rm_db->db[parms->subtype].cfg_type;
1147 /* Bail out if not controlled by RM */
1148 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1149 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1150 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1153 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1157 /* Adjust for any non zero start value */
1158 rc = tf_rm_adjust_index(rm_db->db,
1159 TF_RM_ADJUST_RM_BASE,
1166 rc = ba_free(pool, adj_index);
1167 /* No logging direction matters and that is not available here */
1175 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
1179 struct tf_rm_new_db *rm_db;
1180 enum tf_rm_elem_cfg_type cfg_type;
1181 struct bitalloc *pool;
1184 TF_CHECK_PARMS2(parms, parms->rm_db);
1185 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1186 TF_CHECK_PARMS1(rm_db->db);
1188 cfg_type = rm_db->db[parms->subtype].cfg_type;
1191 /* Bail out if not controlled by RM */
1192 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1193 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1194 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1197 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1201 /* Adjust for any non zero start value */
1202 rc = tf_rm_adjust_index(rm_db->db,
1203 TF_RM_ADJUST_RM_BASE,
1210 if (parms->base_index)
1211 *parms->base_index = adj_index;
1212 *parms->allocated = ba_inuse(pool, adj_index);
1218 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
1220 struct tf_rm_new_db *rm_db;
1221 enum tf_rm_elem_cfg_type cfg_type;
1223 TF_CHECK_PARMS2(parms, parms->rm_db);
1224 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1225 TF_CHECK_PARMS1(rm_db->db);
1227 cfg_type = rm_db->db[parms->subtype].cfg_type;
1229 /* Bail out if not controlled by HCAPI */
1230 if (cfg_type == TF_RM_ELEM_CFG_NULL)
1234 &rm_db->db[parms->subtype].alloc,
1235 sizeof(struct tf_rm_alloc_info));
1241 tf_rm_get_all_info(struct tf_rm_get_alloc_info_parms *parms, int size)
1243 struct tf_rm_new_db *rm_db;
1244 enum tf_rm_elem_cfg_type cfg_type;
1245 struct tf_rm_alloc_info *info = parms->info;
1248 TF_CHECK_PARMS1(parms);
1250 /* No rm info available for this module type
1255 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1256 TF_CHECK_PARMS1(rm_db->db);
1258 for (i = 0; i < size; i++) {
1259 cfg_type = rm_db->db[i].cfg_type;
1261 /* Bail out if not controlled by HCAPI */
1262 if (cfg_type == TF_RM_ELEM_CFG_NULL) {
1268 &rm_db->db[i].alloc,
1269 sizeof(struct tf_rm_alloc_info));
1277 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
1279 struct tf_rm_new_db *rm_db;
1280 enum tf_rm_elem_cfg_type cfg_type;
1282 TF_CHECK_PARMS2(parms, parms->rm_db);
1283 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1284 TF_CHECK_PARMS1(rm_db->db);
1286 cfg_type = rm_db->db[parms->subtype].cfg_type;
1288 /* Bail out if not controlled by HCAPI */
1289 if (cfg_type == TF_RM_ELEM_CFG_NULL)
1292 *parms->hcapi_type = rm_db->db[parms->subtype].hcapi_type;
1298 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
1301 struct tf_rm_new_db *rm_db;
1302 enum tf_rm_elem_cfg_type cfg_type;
1304 TF_CHECK_PARMS2(parms, parms->rm_db);
1305 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1306 TF_CHECK_PARMS1(rm_db->db);
1308 cfg_type = rm_db->db[parms->subtype].cfg_type;
1310 /* Bail out if not a BA pool */
1311 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1312 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1313 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1316 /* Bail silently (no logging), if the pool is not valid there
1317 * was no elements allocated for it.
1319 if (rm_db->db[parms->subtype].pool == NULL) {
1324 *parms->count = ba_inuse_count(rm_db->db[parms->subtype].pool);
1328 /* Only used for table bulk get at this time
1331 tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms)
1333 struct tf_rm_new_db *rm_db;
1334 enum tf_rm_elem_cfg_type cfg_type;
1335 uint32_t base_index;
1338 struct bitalloc *pool;
1341 TF_CHECK_PARMS2(parms, parms->rm_db);
1342 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1343 TF_CHECK_PARMS1(rm_db->db);
1345 cfg_type = rm_db->db[parms->subtype].cfg_type;
1347 /* Bail out if not a BA pool */
1348 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1349 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1350 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1353 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1357 base_index = rm_db->db[subtype].alloc.entry.start;
1358 stride = rm_db->db[subtype].alloc.entry.stride;
1360 if (parms->starting_index < base_index ||
1361 parms->starting_index + parms->num_entries > base_index + stride)