1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
8 #include <rte_common.h>
11 #include <cfa_resource_types.h>
14 #include "tf_common.h"
16 #include "tf_session.h"
17 #include "tf_device.h"
25 * Generic RM Element data type that an RM DB is build upon.
27 struct tf_rm_element {
29 * RM Element configuration type. If Private then the
30 * hcapi_type can be ignored. If Null then the element is not
31 * valid for the device.
33 enum tf_rm_elem_cfg_type cfg_type;
36 * HCAPI RM Type for the element.
41 * Resource slices. How many slices will fit in the
42 * resource pool chunk size.
47 * HCAPI RM allocated range information for the element.
49 struct tf_rm_alloc_info alloc;
52 * If cfg_type == HCAPI_BA_CHILD, this field indicates
53 * the parent module subtype for look up into the parent pool.
54 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
55 * module subtype of TF_MODULE_TYPE_TABLE.
57 uint16_t parent_subtype;
60 * Bit allocator pool for the element. Pool size is controlled
61 * by the struct tf_session_resources at time of session creation.
62 * Null indicates that the pool is not used for the element.
64 struct bitalloc *pool;
72 * Number of elements in the DB
77 * Direction this DB controls.
82 * Module type, used for logging purposes.
84 enum tf_module_type module;
87 * The DB consists of an array of elements
89 struct tf_rm_element *db;
93 * Adjust an index according to the allocation information.
95 * All resources are controlled in a 0 based pool. Some resources, by
96 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
97 * need to be adjusted before they are handed out.
100 * Pointer to the DB configuration
103 * Pointer to the allocation values associated with the module
106 * Number of DB configuration elements
109 * Number of HCAPI entries with a reservation value greater than 0
113 * - EOPNOTSUPP - Operation not supported
116 tf_rm_count_hcapi_reservations(enum tf_dir dir,
117 enum tf_module_type module,
118 struct tf_rm_element_cfg *cfg,
119 uint16_t *reservations,
121 uint16_t *valid_count)
126 for (i = 0; i < count; i++) {
127 if (cfg[i].cfg_type != TF_RM_ELEM_CFG_NULL &&
131 /* Only log msg if a type is attempted reserved and
132 * not supported. We ignore EM module as its using a
133 * split configuration array thus it would fail for
134 * this type of check.
136 if (module != TF_MODULE_TYPE_EM &&
137 cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
138 reservations[i] > 0) {
140 "%s, %s, %s allocation of %d not supported\n",
141 tf_module_2_str(module),
143 tf_module_subtype_2_str(module, i),
152 * Resource Manager Adjust of base index definitions.
154 enum tf_rm_adjust_type {
155 TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
156 TF_RM_ADJUST_RM_BASE /**< Removes base from the index */
160 * Adjust an index according to the allocation information.
162 * All resources are controlled in a 0 based pool. Some resources, by
163 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
164 * need to be adjusted before they are handed out.
167 * Pointer to the db, used for the lookup
173 * TF module subtype used as an index into the database.
174 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
175 * module subtype of TF_MODULE_TYPE_TABLE.
185 * - EOPNOTSUPP - Operation not supported
188 tf_rm_adjust_index(struct tf_rm_element *db,
189 enum tf_rm_adjust_type action,
197 base_index = db[subtype].alloc.entry.start;
200 case TF_RM_ADJUST_RM_BASE:
201 *adj_index = index - base_index;
203 case TF_RM_ADJUST_ADD_BASE:
204 *adj_index = index + base_index;
214 * Logs an array of found residual entries to the console.
217 * Receive or transmit direction
220 * Type of Device Module
223 * Number of entries in the residual array
226 * Pointer to an array of residual entries. Array is index same as
227 * the DB in which this function is used. Each entry holds residual
228 * value for that entry.
230 #if (TF_RM_DEBUG == 1)
232 tf_rm_log_residuals(enum tf_dir dir,
233 enum tf_module_type module,
239 /* Walk the residual array and log the types that wasn't
240 * cleaned up to the console.
242 for (i = 0; i < count; i++) {
243 if (residuals[i] != 0)
245 "%s, %s was not cleaned up, %d outstanding\n",
247 tf_module_subtype_2_str(module, i),
251 #endif /* TF_RM_DEBUG == 1 */
253 * Performs a check of the passed in DB for any lingering elements. If
254 * a resource type was found to not have been cleaned up by the caller
255 * then its residual values are recorded, logged and passed back in an
256 * allocate reservation array that the caller can pass to the FW for
260 * Pointer to the db, used for the lookup
263 * Pointer to the reservation size of the generated reservation
267 * Pointer Pointer to a reservation array. The reservation array is
268 * allocated after the residual scan and holds any found residual
269 * entries. Thus it can be smaller than the DB that the check was
270 * performed on. Array must be freed by the caller.
272 * [out] residuals_present
273 * Pointer to a bool flag indicating if residual was present in the
278 * - EOPNOTSUPP - Operation not supported
281 tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
283 struct tf_rm_resc_entry **resv,
284 bool *residuals_present)
291 uint16_t *residuals = NULL;
293 struct tf_rm_get_inuse_count_parms iparms;
294 struct tf_rm_get_alloc_info_parms aparms;
295 struct tf_rm_get_hcapi_parms hparms;
296 struct tf_rm_alloc_info info;
297 struct tfp_calloc_parms cparms;
298 struct tf_rm_resc_entry *local_resv = NULL;
300 /* Create array to hold the entries that have residuals */
301 cparms.nitems = rm_db->num_entries;
302 cparms.size = sizeof(uint16_t);
303 cparms.alignment = 0;
304 rc = tfp_calloc(&cparms);
308 residuals = (uint16_t *)cparms.mem_va;
310 /* Traverse the DB and collect any residual elements */
311 iparms.rm_db = rm_db;
312 iparms.count = &count;
313 for (i = 0, found = 0; i < rm_db->num_entries; i++) {
315 rc = tf_rm_get_inuse_count(&iparms);
316 /* Not a device supported entry, just skip */
320 goto cleanup_residuals;
324 residuals[i] = count;
325 *residuals_present = true;
329 if (*residuals_present) {
330 /* Populate a reduced resv array with only the entries
331 * that have residuals.
333 cparms.nitems = found;
334 cparms.size = sizeof(struct tf_rm_resc_entry);
335 cparms.alignment = 0;
336 rc = tfp_calloc(&cparms);
340 local_resv = (struct tf_rm_resc_entry *)cparms.mem_va;
342 aparms.rm_db = rm_db;
343 hparms.rm_db = rm_db;
344 hparms.hcapi_type = &hcapi_type;
345 for (i = 0, f = 0; i < rm_db->num_entries; i++) {
346 if (residuals[i] == 0)
350 rc = tf_rm_get_info(&aparms);
355 rc = tf_rm_get_hcapi_type(&hparms);
359 local_resv[f].type = hcapi_type;
360 local_resv[f].start = info.entry.start;
361 local_resv[f].stride = info.entry.stride;
367 #if (TF_RM_DEBUG == 1)
368 tf_rm_log_residuals(rm_db->dir,
373 tfp_free((void *)residuals);
379 tfp_free((void *)local_resv);
382 tfp_free((void *)residuals);
388 * Some resources do not have a 1:1 mapping between the Truflow type and the cfa
389 * resource type (HCAPI RM). These resources have multiple Truflow types which
390 * map to a single HCAPI RM type. In order to support this, one Truflow type
391 * sharing the HCAPI resources is designated the parent. All other Truflow
392 * types associated with that HCAPI RM type are designated the children.
394 * This function updates the resource counts of any HCAPI_BA_PARENT with the
395 * counts of the HCAPI_BA_CHILDREN. These are read from the alloc_cnt and
396 * written back to the req_cnt.
399 * Pointer to an array of module specific Truflow type indexed RM cfg items
402 * Pointer to the tf_open_session() configured array of module specific
403 * Truflow type indexed requested counts.
406 * Pointer to the location to put the updated resource counts.
410 * - - Failure if negative
413 tf_rm_update_parent_reservations(struct tf *tfp,
414 struct tf_dev_info *dev,
415 struct tf_rm_element_cfg *cfg,
417 uint16_t num_elements,
422 const char *type_str;
424 /* Search through all the elements */
425 for (parent = 0; parent < num_elements; parent++) {
426 uint16_t combined_cnt = 0;
428 /* If I am a parent */
429 if (cfg[parent].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
430 uint8_t p_slices = 1;
432 /* Shared session doesn't support slices */
434 p_slices = cfg[parent].slices;
436 RTE_ASSERT(p_slices);
438 combined_cnt = alloc_cnt[parent] / p_slices;
440 if (alloc_cnt[parent] % p_slices)
443 if (alloc_cnt[parent]) {
444 dev->ops->tf_dev_get_resource_str(tfp,
445 cfg[parent].hcapi_type,
447 #if (TF_RM_DEBUG == 1)
448 printf("%s:%s cnt(%d) slices(%d)\n",
449 type_str, tf_tbl_type_2_str(parent),
450 alloc_cnt[parent], p_slices);
451 #endif /* (TF_RM_DEBUG == 1) */
454 /* Search again through all the elements */
455 for (child = 0; child < num_elements; child++) {
456 /* If this is one of my children */
457 if (cfg[child].cfg_type ==
458 TF_RM_ELEM_CFG_HCAPI_BA_CHILD &&
459 cfg[child].parent_subtype == parent &&
461 uint8_t c_slices = 1;
465 c_slices = cfg[child].slices;
467 RTE_ASSERT(c_slices);
469 dev->ops->tf_dev_get_resource_str(tfp,
470 cfg[child].hcapi_type,
472 #if (TF_RM_DEBUG == 1)
473 printf("%s:%s cnt(%d) slices(%d)\n",
475 tf_tbl_type_2_str(child),
478 #endif /* (TF_RM_DEBUG == 1) */
479 /* Increment the parents combined count
480 * with each child's count adjusted for
481 * number of slices per RM alloc item.
483 cnt = alloc_cnt[child] / c_slices;
485 if (alloc_cnt[child] % c_slices)
489 /* Clear the requested child count */
493 /* Save the parent count to be requested */
494 req_cnt[parent] = combined_cnt;
495 #if (TF_RM_DEBUG == 1)
496 printf("%s calculated total:%d\n\n",
497 type_str, req_cnt[parent]);
498 #endif /* (TF_RM_DEBUG == 1) */
505 tf_rm_create_db(struct tf *tfp,
506 struct tf_rm_create_db_parms *parms)
509 struct tf_session *tfs;
510 struct tf_dev_info *dev;
512 uint16_t max_types, hcapi_items, *req_cnt;
513 struct tfp_calloc_parms cparms;
514 struct tf_rm_resc_req_entry *query;
515 enum tf_rm_resc_resv_strategy resv_strategy;
516 struct tf_rm_resc_req_entry *req;
517 struct tf_rm_resc_entry *resv;
518 struct tf_rm_new_db *rm_db;
519 struct tf_rm_element *db;
521 bool shared_session = 0;
523 TF_CHECK_PARMS2(tfp, parms);
525 /* Retrieve the session information */
526 rc = tf_session_get_session_internal(tfp, &tfs);
530 /* Retrieve device information */
531 rc = tf_session_get_device(tfs, &dev);
535 /* Need device max number of elements for the RM QCAPS */
536 rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
538 /* Allocate memory for RM QCAPS request */
539 cparms.nitems = max_types;
540 cparms.size = sizeof(struct tf_rm_resc_req_entry);
541 cparms.alignment = 0;
542 rc = tfp_calloc(&cparms);
546 query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
548 /* Get Firmware Capabilities */
549 rc = tf_msg_session_resc_qcaps(tfp,
558 /* Copy requested counts (alloc_cnt) from tf_open_session() to local
559 * copy (req_cnt) so that it can be updated if required.
562 cparms.nitems = parms->num_elements;
563 cparms.size = sizeof(uint16_t);
564 rc = tfp_calloc(&cparms);
568 req_cnt = (uint16_t *)cparms.mem_va;
570 tfp_memcpy(req_cnt, parms->alloc_cnt,
571 parms->num_elements * sizeof(uint16_t));
573 shared_session = tf_session_is_shared_session(tfs);
575 /* Update the req_cnt based upon the element configuration
577 tf_rm_update_parent_reservations(tfp, dev, parms->cfg,
583 /* Process capabilities against DB requirements. However, as a
584 * DB can hold elements that are not HCAPI we can reduce the
585 * req msg content by removing those out of the request yet
586 * the DB holds them all as to give a fast lookup. We can also
587 * remove entries where there are no request for elements.
589 tf_rm_count_hcapi_reservations(parms->dir,
596 if (hcapi_items == 0) {
597 #if (TF_RM_DEBUG == 1)
599 "%s: module: %s Empty RM DB create request\n",
600 tf_dir_2_str(parms->dir),
601 tf_module_2_str(parms->module));
607 /* Alloc request, alignment already set */
608 cparms.nitems = (size_t)hcapi_items;
609 cparms.size = sizeof(struct tf_rm_resc_req_entry);
610 rc = tfp_calloc(&cparms);
613 req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
615 /* Alloc reservation, alignment and nitems already set */
616 cparms.size = sizeof(struct tf_rm_resc_entry);
617 rc = tfp_calloc(&cparms);
620 resv = (struct tf_rm_resc_entry *)cparms.mem_va;
622 /* Build the request */
623 for (i = 0, j = 0; i < parms->num_elements; i++) {
624 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
625 uint16_t hcapi_type = cfg->hcapi_type;
627 /* Only perform reservation for requested entries
632 /* Skip any children in the request */
633 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
634 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
635 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
637 /* Verify that we can get the full amount per qcaps.
639 if (req_cnt[i] <= query[hcapi_type].max) {
640 req[j].type = hcapi_type;
641 req[j].min = req_cnt[i];
642 req[j].max = req_cnt[i];
645 const char *type_str;
647 dev->ops->tf_dev_get_resource_str(tfp,
651 "Failure, %s:%d:%s req:%d avail:%d\n",
652 tf_dir_2_str(parms->dir),
653 hcapi_type, type_str,
655 query[hcapi_type].max);
661 /* Allocate all resources for the module type
663 rc = tf_msg_session_resc_alloc(tfp,
672 /* Build the RM DB per the request */
674 cparms.size = sizeof(struct tf_rm_new_db);
675 rc = tfp_calloc(&cparms);
678 rm_db = (void *)cparms.mem_va;
680 /* Build the DB within RM DB */
681 cparms.nitems = parms->num_elements;
682 cparms.size = sizeof(struct tf_rm_element);
683 rc = tfp_calloc(&cparms);
686 rm_db->db = (struct tf_rm_element *)cparms.mem_va;
689 for (i = 0, j = 0; i < parms->num_elements; i++) {
690 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
691 const char *type_str;
693 dev->ops->tf_dev_get_resource_str(tfp,
697 db[i].cfg_type = cfg->cfg_type;
698 db[i].hcapi_type = cfg->hcapi_type;
699 db[i].slices = cfg->slices;
701 /* Save the parent subtype for later use to find the pool
703 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
704 db[i].parent_subtype = cfg->parent_subtype;
706 /* If the element didn't request an allocation no need
707 * to create a pool nor verify if we got a reservation.
712 /* Skip any children or invalid
714 if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
715 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
716 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
719 /* If the element had requested an allocation and that
720 * allocation was a success (full amount) then
723 if (req_cnt[i] == resv[j].stride) {
724 db[i].alloc.entry.start = resv[j].start;
725 db[i].alloc.entry.stride = resv[j].stride;
727 /* Only allocate BA pool if a BA type not a child */
728 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
729 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
731 pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
732 sizeof(struct bitalloc));
733 /* Alloc request, alignment already set */
734 cparms.nitems = pool_size;
735 cparms.size = sizeof(struct bitalloc);
736 rc = tfp_calloc(&cparms);
739 "%s: Pool alloc failed, type:%d:%s\n",
740 tf_dir_2_str(parms->dir),
741 cfg->hcapi_type, type_str);
744 db[i].pool = (struct bitalloc *)cparms.mem_va;
746 rc = ba_init(db[i].pool,
748 !tf_session_is_shared_session(tfs));
751 "%s: Pool init failed, type:%d:%s\n",
752 tf_dir_2_str(parms->dir),
753 cfg->hcapi_type, type_str);
759 /* Bail out as we want what we requested for
760 * all elements, not any less.
763 "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
764 tf_dir_2_str(parms->dir), cfg->hcapi_type,
765 type_str, req_cnt[i], resv[j].stride);
770 rm_db->num_entries = parms->num_elements;
771 rm_db->dir = parms->dir;
772 rm_db->module = parms->module;
773 *parms->rm_db = (void *)rm_db;
775 #if (TF_RM_DEBUG == 1)
777 printf("%s: module:%s\n",
778 tf_dir_2_str(parms->dir),
779 tf_module_2_str(parms->module));
780 #endif /* (TF_RM_DEBUG == 1) */
782 tfp_free((void *)req);
783 tfp_free((void *)resv);
784 tfp_free((void *)req_cnt);
788 tfp_free((void *)req);
789 tfp_free((void *)resv);
790 tfp_free((void *)db->pool);
791 tfp_free((void *)db);
792 tfp_free((void *)rm_db);
793 tfp_free((void *)req_cnt);
800 tf_rm_create_db_no_reservation(struct tf *tfp,
801 struct tf_rm_create_db_parms *parms)
804 struct tf_session *tfs;
805 struct tf_dev_info *dev;
807 uint16_t hcapi_items, *req_cnt;
808 struct tfp_calloc_parms cparms;
809 struct tf_rm_resc_req_entry *req;
810 struct tf_rm_resc_entry *resv;
811 struct tf_rm_new_db *rm_db;
812 struct tf_rm_element *db;
815 TF_CHECK_PARMS2(tfp, parms);
817 /* Retrieve the session information */
818 rc = tf_session_get_session_internal(tfp, &tfs);
822 /* Retrieve device information */
823 rc = tf_session_get_device(tfs, &dev);
827 /* Copy requested counts (alloc_cnt) from tf_open_session() to local
828 * copy (req_cnt) so that it can be updated if required.
831 cparms.nitems = parms->num_elements;
832 cparms.size = sizeof(uint16_t);
833 cparms.alignment = 0;
834 rc = tfp_calloc(&cparms);
838 req_cnt = (uint16_t *)cparms.mem_va;
840 tfp_memcpy(req_cnt, parms->alloc_cnt,
841 parms->num_elements * sizeof(uint16_t));
843 /* Process capabilities against DB requirements. However, as a
844 * DB can hold elements that are not HCAPI we can reduce the
845 * req msg content by removing those out of the request yet
846 * the DB holds them all as to give a fast lookup. We can also
847 * remove entries where there are no request for elements.
849 tf_rm_count_hcapi_reservations(parms->dir,
856 if (hcapi_items == 0) {
858 "%s: module:%s Empty RM DB create request\n",
859 tf_dir_2_str(parms->dir),
860 tf_module_2_str(parms->module));
866 /* Alloc request, alignment already set */
867 cparms.nitems = (size_t)hcapi_items;
868 cparms.size = sizeof(struct tf_rm_resc_req_entry);
869 rc = tfp_calloc(&cparms);
872 req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
874 /* Alloc reservation, alignment and nitems already set */
875 cparms.size = sizeof(struct tf_rm_resc_entry);
876 rc = tfp_calloc(&cparms);
879 resv = (struct tf_rm_resc_entry *)cparms.mem_va;
881 /* Build the request */
882 for (i = 0, j = 0; i < parms->num_elements; i++) {
883 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
884 uint16_t hcapi_type = cfg->hcapi_type;
886 /* Only perform reservation for requested entries
891 /* Skip any children in the request */
892 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
893 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
894 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
895 req[j].type = hcapi_type;
896 req[j].min = req_cnt[i];
897 req[j].max = req_cnt[i];
902 /* Get all resources info for the module type
904 rc = tf_msg_session_resc_info(tfp,
913 /* Build the RM DB per the request */
915 cparms.size = sizeof(struct tf_rm_new_db);
916 rc = tfp_calloc(&cparms);
919 rm_db = (void *)cparms.mem_va;
921 /* Build the DB within RM DB */
922 cparms.nitems = parms->num_elements;
923 cparms.size = sizeof(struct tf_rm_element);
924 rc = tfp_calloc(&cparms);
927 rm_db->db = (struct tf_rm_element *)cparms.mem_va;
930 for (i = 0, j = 0; i < parms->num_elements; i++) {
931 struct tf_rm_element_cfg *cfg = &parms->cfg[i];
932 const char *type_str;
934 dev->ops->tf_dev_get_resource_str(tfp,
938 db[i].cfg_type = cfg->cfg_type;
939 db[i].hcapi_type = cfg->hcapi_type;
941 /* Save the parent subtype for later use to find the pool
943 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
944 db[i].parent_subtype = cfg->parent_subtype;
946 /* If the element didn't request an allocation no need
947 * to create a pool nor verify if we got a reservation.
952 /* Skip any children or invalid
954 if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
955 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
956 cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
959 /* If the element had requested an allocation and that
960 * allocation was a success (full amount) then
963 if (req_cnt[i] == resv[j].stride) {
964 db[i].alloc.entry.start = resv[j].start;
965 db[i].alloc.entry.stride = resv[j].stride;
967 /* Only allocate BA pool if a BA type not a child */
968 if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
969 cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
971 pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
972 sizeof(struct bitalloc));
973 /* Alloc request, alignment already set */
974 cparms.nitems = pool_size;
975 cparms.size = sizeof(struct bitalloc);
976 rc = tfp_calloc(&cparms);
979 "%s: Pool alloc failed, type:%d:%s\n",
980 tf_dir_2_str(parms->dir),
981 cfg->hcapi_type, type_str);
984 db[i].pool = (struct bitalloc *)cparms.mem_va;
986 rc = ba_init(db[i].pool,
988 !tf_session_is_shared_session(tfs));
991 "%s: Pool init failed, type:%d:%s\n",
992 tf_dir_2_str(parms->dir),
993 cfg->hcapi_type, type_str);
999 /* Bail out as we want what we requested for
1000 * all elements, not any less.
1003 "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
1004 tf_dir_2_str(parms->dir), cfg->hcapi_type,
1005 type_str, req_cnt[i], resv[j].stride);
1010 rm_db->num_entries = parms->num_elements;
1011 rm_db->dir = parms->dir;
1012 rm_db->module = parms->module;
1013 *parms->rm_db = (void *)rm_db;
1015 #if (TF_RM_DEBUG == 1)
1017 printf("%s: module:%s\n",
1018 tf_dir_2_str(parms->dir),
1019 tf_module_2_str(parms->module));
1020 #endif /* (TF_RM_DEBUG == 1) */
1022 tfp_free((void *)req);
1023 tfp_free((void *)resv);
1024 tfp_free((void *)req_cnt);
1028 tfp_free((void *)req);
1029 tfp_free((void *)resv);
1030 tfp_free((void *)db->pool);
1031 tfp_free((void *)db);
1032 tfp_free((void *)rm_db);
1033 tfp_free((void *)req_cnt);
1034 parms->rm_db = NULL;
1039 tf_rm_free_db(struct tf *tfp,
1040 struct tf_rm_free_db_parms *parms)
1044 uint16_t resv_size = 0;
1045 struct tf_rm_new_db *rm_db;
1046 struct tf_rm_resc_entry *resv;
1047 bool residuals_found = false;
1049 TF_CHECK_PARMS2(parms, parms->rm_db);
1051 /* Device unbind happens when the TF Session is closed and the
1052 * session ref count is 0. Device unbind will cleanup each of
1053 * its support modules, i.e. Identifier, thus we're ending up
1054 * here to close the DB.
1056 * On TF Session close it is assumed that the session has already
1057 * cleaned up all its resources, individually, while
1058 * destroying its flows.
1060 * To assist in the 'cleanup checking' the DB is checked for any
1061 * remaining elements and logged if found to be the case.
1063 * Any such elements will need to be 'cleared' ahead of
1064 * returning the resources to the HCAPI RM.
1066 * RM will signal FW to flush the DB resources. FW will
1067 * perform the invalidation. TF Session close will return the
1068 * previous allocated elements to the RM and then close the
1069 * HCAPI RM registration. That then saves several 'free' msgs
1070 * from being required.
1073 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1075 /* Check for residuals that the client didn't clean up */
1076 rc = tf_rm_check_residuals(rm_db,
1083 /* Invalidate any residuals followed by a DB traversal for
1086 if (residuals_found) {
1087 rc = tf_msg_session_resc_flush(tfp,
1091 tfp_free((void *)resv);
1092 /* On failure we still have to cleanup so we can only
1093 * log that FW failed.
1097 "%s: Internal Flush error, module:%s\n",
1098 tf_dir_2_str(parms->dir),
1099 tf_module_2_str(rm_db->module));
1102 /* No need to check for configuration type, even if we do not
1103 * have a BA pool we just delete on a null ptr, no harm
1105 for (i = 0; i < rm_db->num_entries; i++)
1106 tfp_free((void *)rm_db->db[i].pool);
1108 tfp_free((void *)parms->rm_db);
1113 * Get the bit allocator pool associated with the subtype and the db
1119 * Module subtype used to index into the module specific database.
1120 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
1121 * module subtype of TF_MODULE_TYPE_TABLE.
1124 * Pointer to the bit allocator pool used
1126 * [in/out] new_subtype
1127 * Pointer to the subtype of the actual pool used
1130 * - ENOTSUP - Operation not supported
1133 tf_rm_get_pool(struct tf_rm_new_db *rm_db,
1135 struct bitalloc **pool,
1136 uint16_t *new_subtype)
1139 uint16_t tmp_subtype = subtype;
1141 /* If we are a child, get the parent table index */
1142 if (rm_db->db[subtype].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1143 tmp_subtype = rm_db->db[subtype].parent_subtype;
1145 *pool = rm_db->db[tmp_subtype].pool;
1147 /* Bail out if the pool is not valid, should never happen */
1148 if (rm_db->db[tmp_subtype].pool == NULL) {
1151 "%s: Invalid pool for this type:%d, rc:%s\n",
1152 tf_dir_2_str(rm_db->dir),
1157 *new_subtype = tmp_subtype;
1162 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
1167 struct tf_rm_new_db *rm_db;
1168 enum tf_rm_elem_cfg_type cfg_type;
1169 struct bitalloc *pool;
1172 TF_CHECK_PARMS2(parms, parms->rm_db);
1174 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1175 TF_CHECK_PARMS1(rm_db->db);
1177 cfg_type = rm_db->db[parms->subtype].cfg_type;
1179 /* Bail out if not controlled by RM */
1180 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1181 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1182 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1185 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1189 * priority 0: allocate from top of the tcam i.e. high
1190 * priority !0: allocate index from bottom i.e lowest
1192 if (parms->priority)
1193 id = ba_alloc_reverse(pool);
1195 id = ba_alloc(pool);
1196 if (id == BA_FAIL) {
1199 "%s: Allocation failed, rc:%s\n",
1200 tf_dir_2_str(rm_db->dir),
1205 /* Adjust for any non zero start value */
1206 rc = tf_rm_adjust_index(rm_db->db,
1207 TF_RM_ADJUST_ADD_BASE,
1213 "%s: Alloc adjust of base index failed, rc:%s\n",
1214 tf_dir_2_str(rm_db->dir),
1219 *parms->index = index;
1220 if (parms->base_index)
1221 *parms->base_index = id;
1227 tf_rm_free(struct tf_rm_free_parms *parms)
1231 struct tf_rm_new_db *rm_db;
1232 enum tf_rm_elem_cfg_type cfg_type;
1233 struct bitalloc *pool;
1236 TF_CHECK_PARMS2(parms, parms->rm_db);
1237 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1238 TF_CHECK_PARMS1(rm_db->db);
1240 cfg_type = rm_db->db[parms->subtype].cfg_type;
1242 /* Bail out if not controlled by RM */
1243 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1244 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1245 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1248 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1252 /* Adjust for any non zero start value */
1253 rc = tf_rm_adjust_index(rm_db->db,
1254 TF_RM_ADJUST_RM_BASE,
1261 rc = ba_free(pool, adj_index);
1262 /* No logging direction matters and that is not available here */
1270 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
1274 struct tf_rm_new_db *rm_db;
1275 enum tf_rm_elem_cfg_type cfg_type;
1276 struct bitalloc *pool;
1279 TF_CHECK_PARMS2(parms, parms->rm_db);
1280 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1281 TF_CHECK_PARMS1(rm_db->db);
1283 cfg_type = rm_db->db[parms->subtype].cfg_type;
1285 /* Bail out if not controlled by RM */
1286 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1287 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1288 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1291 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1295 /* Adjust for any non zero start value */
1296 rc = tf_rm_adjust_index(rm_db->db,
1297 TF_RM_ADJUST_RM_BASE,
1304 if (parms->base_index)
1305 *parms->base_index = adj_index;
1306 *parms->allocated = ba_inuse(pool, adj_index);
1312 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
1314 struct tf_rm_new_db *rm_db;
1315 enum tf_rm_elem_cfg_type cfg_type;
1317 TF_CHECK_PARMS2(parms, parms->rm_db);
1318 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1319 TF_CHECK_PARMS1(rm_db->db);
1321 cfg_type = rm_db->db[parms->subtype].cfg_type;
1323 /* Bail out if not controlled by HCAPI */
1324 if (cfg_type == TF_RM_ELEM_CFG_NULL)
1328 &rm_db->db[parms->subtype].alloc,
1329 sizeof(struct tf_rm_alloc_info));
1335 tf_rm_get_all_info(struct tf_rm_get_alloc_info_parms *parms, int size)
1337 struct tf_rm_new_db *rm_db;
1338 enum tf_rm_elem_cfg_type cfg_type;
1339 struct tf_rm_alloc_info *info = parms->info;
1342 TF_CHECK_PARMS1(parms);
1344 /* No rm info available for this module type
1349 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1350 TF_CHECK_PARMS1(rm_db->db);
1352 for (i = 0; i < size; i++) {
1353 cfg_type = rm_db->db[i].cfg_type;
1355 /* Bail out if not controlled by HCAPI */
1356 if (cfg_type == TF_RM_ELEM_CFG_NULL) {
1362 &rm_db->db[i].alloc,
1363 sizeof(struct tf_rm_alloc_info));
1371 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
1373 struct tf_rm_new_db *rm_db;
1374 enum tf_rm_elem_cfg_type cfg_type;
1376 TF_CHECK_PARMS2(parms, parms->rm_db);
1377 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1378 TF_CHECK_PARMS1(rm_db->db);
1380 cfg_type = rm_db->db[parms->subtype].cfg_type;
1382 /* Bail out if not controlled by HCAPI */
1383 if (cfg_type == TF_RM_ELEM_CFG_NULL)
1386 *parms->hcapi_type = rm_db->db[parms->subtype].hcapi_type;
1391 tf_rm_get_slices(struct tf_rm_get_slices_parms *parms)
1393 struct tf_rm_new_db *rm_db;
1394 enum tf_rm_elem_cfg_type cfg_type;
1396 TF_CHECK_PARMS2(parms, parms->rm_db);
1397 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1398 TF_CHECK_PARMS1(rm_db->db);
1400 cfg_type = rm_db->db[parms->subtype].cfg_type;
1402 /* Bail out if not controlled by HCAPI */
1403 if (cfg_type == TF_RM_ELEM_CFG_NULL)
1406 *parms->slices = rm_db->db[parms->subtype].slices;
1412 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
1415 struct tf_rm_new_db *rm_db;
1416 enum tf_rm_elem_cfg_type cfg_type;
1418 TF_CHECK_PARMS2(parms, parms->rm_db);
1419 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1420 TF_CHECK_PARMS1(rm_db->db);
1422 cfg_type = rm_db->db[parms->subtype].cfg_type;
1424 /* Bail out if not a BA pool */
1425 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1426 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1427 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1430 /* Bail silently (no logging), if the pool is not valid there
1431 * was no elements allocated for it.
1433 if (rm_db->db[parms->subtype].pool == NULL) {
1438 *parms->count = ba_inuse_count(rm_db->db[parms->subtype].pool);
1442 /* Only used for table bulk get at this time
1445 tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms)
1447 struct tf_rm_new_db *rm_db;
1448 enum tf_rm_elem_cfg_type cfg_type;
1449 uint32_t base_index;
1452 struct bitalloc *pool;
1455 TF_CHECK_PARMS2(parms, parms->rm_db);
1456 rm_db = (struct tf_rm_new_db *)parms->rm_db;
1457 TF_CHECK_PARMS1(rm_db->db);
1459 cfg_type = rm_db->db[parms->subtype].cfg_type;
1461 /* Bail out if not a BA pool */
1462 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
1463 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
1464 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
1467 rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
1471 base_index = rm_db->db[subtype].alloc.entry.start;
1472 stride = rm_db->db[subtype].alloc.entry.stride;
1474 if (parms->starting_index < base_index ||
1475 parms->starting_index + parms->num_entries > base_index + stride)