1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
8 #include <rte_common.h>
10 #include <cfa_resource_types.h>
13 #include "tf_common.h"
15 #include "tf_session.h"
16 #include "tf_device.h"
24 * Generic RM Element data type that an RM DB is build upon.
26 struct tf_rm_element {
28 * RM Element configuration type. If Private then the
29 * hcapi_type can be ignored. If Null then the element is not
30 * valid for the device.
32 enum tf_rm_elem_cfg_type cfg_type;
35 * HCAPI RM Type for the element.
40 * HCAPI RM allocated range information for the element.
42 struct tf_rm_alloc_info alloc;
45 * Bit allocator pool for the element. Pool size is controlled
46 * by the struct tf_session_resources at time of session creation.
47 * Null indicates that the element is not used for the device.
49 struct bitalloc *pool;
57 * Number of elements in the DB
62 * Direction this DB controls.
67 * Module type, used for logging purposes.
69 enum tf_device_module_type type;
72 * The DB consists of an array of elements
74 struct tf_rm_element *db;
78 * Adjust an index according to the allocation information.
80 * All resources are controlled in a 0 based pool. Some resources, by
81 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
82 * need to be adjusted before they are handed out.
85 * Pointer to the DB configuration
88 * Pointer to the allocation values associated with the module
91 * Number of DB configuration elements
94 * Number of HCAPI entries with a reservation value greater than 0
98 * - EOPNOTSUPP - Operation not supported
101 tf_rm_count_hcapi_reservations(enum tf_dir dir,
102 enum tf_device_module_type type,
103 struct tf_rm_element_cfg *cfg,
104 uint16_t *reservations,
106 uint16_t *valid_count)
111 for (i = 0; i < count; i++) {
112 if ((cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI ||
113 cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) &&
117 /* Only log msg if a type is attempted reserved and
118 * not supported. We ignore EM module as its using a
119 * split configuration array thus it would fail for
120 * this type of check.
122 if (type != TF_DEVICE_MODULE_TYPE_EM &&
123 cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
124 reservations[i] > 0) {
126 "%s, %s, %s allocation of %d not supported\n",
127 tf_device_module_type_2_str(type),
129 tf_device_module_type_subtype_2_str(type, i),
138 * Resource Manager Adjust of base index definitions.
140 enum tf_rm_adjust_type {
141 TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
142 TF_RM_ADJUST_RM_BASE /**< Removes base from the index */
146 * Adjust an index according to the allocation information.
148 * All resources are controlled in a 0 based pool. Some resources, by
149 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
150 * need to be adjusted before they are handed out.
153 * Pointer to the db, used for the lookup
159 * DB index for the element type
169 * - EOPNOTSUPP - Operation not supported
172 tf_rm_adjust_index(struct tf_rm_element *db,
173 enum tf_rm_adjust_type action,
181 base_index = db[db_index].alloc.entry.start;
184 case TF_RM_ADJUST_RM_BASE:
185 *adj_index = index - base_index;
187 case TF_RM_ADJUST_ADD_BASE:
188 *adj_index = index + base_index;
198 * Logs an array of found residual entries to the console.
201 * Receive or transmit direction
204 * Type of Device Module
207 * Number of entries in the residual array
210 * Pointer to an array of residual entries. Array is index same as
211 * the DB in which this function is used. Each entry holds residual
212 * value for that entry.
215 tf_rm_log_residuals(enum tf_dir dir,
216 enum tf_device_module_type type,
222 /* Walk the residual array and log the types that wasn't
223 * cleaned up to the console.
225 for (i = 0; i < count; i++) {
226 if (residuals[i] != 0)
228 "%s, %s was not cleaned up, %d outstanding\n",
230 tf_device_module_type_subtype_2_str(type, i),
236 * Performs a check of the passed in DB for any lingering elements. If
237 * a resource type was found to not have been cleaned up by the caller
238 * then its residual values are recorded, logged and passed back in an
239 * allocate reservation array that the caller can pass to the FW for
243 * Pointer to the db, used for the lookup
246 * Pointer to the reservation size of the generated reservation
250 * Pointer Pointer to a reservation array. The reservation array is
251 * allocated after the residual scan and holds any found residual
252 * entries. Thus it can be smaller than the DB that the check was
253 * performed on. Array must be freed by the caller.
255 * [out] residuals_present
256 * Pointer to a bool flag indicating if residual was present in the
261 * - EOPNOTSUPP - Operation not supported
264 tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
266 struct tf_rm_resc_entry **resv,
267 bool *residuals_present)
274 uint16_t *residuals = NULL;
276 struct tf_rm_get_inuse_count_parms iparms;
277 struct tf_rm_get_alloc_info_parms aparms;
278 struct tf_rm_get_hcapi_parms hparms;
279 struct tf_rm_alloc_info info;
280 struct tfp_calloc_parms cparms;
281 struct tf_rm_resc_entry *local_resv = NULL;
283 /* Create array to hold the entries that have residuals */
284 cparms.nitems = rm_db->num_entries;
285 cparms.size = sizeof(uint16_t);
286 cparms.alignment = 0;
287 rc = tfp_calloc(&cparms);
291 residuals = (uint16_t *)cparms.mem_va;
293 /* Traverse the DB and collect any residual elements */
294 iparms.rm_db = rm_db;
295 iparms.count = &count;
296 for (i = 0, found = 0; i < rm_db->num_entries; i++) {
298 rc = tf_rm_get_inuse_count(&iparms);
299 /* Not a device supported entry, just skip */
303 goto cleanup_residuals;
307 residuals[i] = count;
308 *residuals_present = true;
312 if (*residuals_present) {
313 /* Populate a reduced resv array with only the entries
314 * that have residuals.
316 cparms.nitems = found;
317 cparms.size = sizeof(struct tf_rm_resc_entry);
318 cparms.alignment = 0;
319 rc = tfp_calloc(&cparms);
323 local_resv = (struct tf_rm_resc_entry *)cparms.mem_va;
325 aparms.rm_db = rm_db;
326 hparms.rm_db = rm_db;
327 hparms.hcapi_type = &hcapi_type;
328 for (i = 0, f = 0; i < rm_db->num_entries; i++) {
329 if (residuals[i] == 0)
333 rc = tf_rm_get_info(&aparms);
338 rc = tf_rm_get_hcapi_type(&hparms);
342 local_resv[f].type = hcapi_type;
343 local_resv[f].start = info.entry.start;
344 local_resv[f].stride = info.entry.stride;
350 tf_rm_log_residuals(rm_db->dir,
355 tfp_free((void *)residuals);
361 tfp_free((void *)local_resv);
364 tfp_free((void *)residuals);
370 tf_rm_create_db(struct tf *tfp,
371 struct tf_rm_create_db_parms *parms)
376 struct tf_session *tfs;
377 struct tf_dev_info *dev;
379 struct tfp_calloc_parms cparms;
380 struct tf_rm_resc_req_entry *query;
381 enum tf_rm_resc_resv_strategy resv_strategy;
382 struct tf_rm_resc_req_entry *req;
383 struct tf_rm_resc_entry *resv;
384 struct tf_rm_new_db *rm_db;
385 struct tf_rm_element *db;
387 uint16_t hcapi_items;
389 TF_CHECK_PARMS2(tfp, parms);
391 /* Retrieve the session information */
392 rc = tf_session_get_session_internal(tfp, &tfs);
396 /* Retrieve device information */
397 rc = tf_session_get_device(tfs, &dev);
401 /* Need device max number of elements for the RM QCAPS */
402 rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
406 cparms.nitems = max_types;
407 cparms.size = sizeof(struct tf_rm_resc_req_entry);
408 cparms.alignment = 0;
409 rc = tfp_calloc(&cparms);
413 query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
415 /* Get Firmware Capabilities */
416 rc = tf_msg_session_resc_qcaps(tfp,
424 /* Process capabilities against DB requirements. However, as a
425 * DB can hold elements that are not HCAPI we can reduce the
426 * req msg content by removing those out of the request yet
427 * the DB holds them all as to give a fast lookup. We can also
428 * remove entries where there are no request for elements.
430 tf_rm_count_hcapi_reservations(parms->dir,
437 /* Handle the case where a DB create request really ends up
438 * being empty. Unsupported (if not rare) case but possible
439 * that no resources are necessary for a 'direction'.
441 if (hcapi_items == 0) {
443 "%s: DB create request for Zero elements, DB Type:%s\n",
444 tf_dir_2_str(parms->dir),
445 tf_device_module_type_2_str(parms->type));
451 /* Alloc request, alignment already set */
452 cparms.nitems = (size_t)hcapi_items;
453 cparms.size = sizeof(struct tf_rm_resc_req_entry);
454 rc = tfp_calloc(&cparms);
457 req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
459 /* Alloc reservation, alignment and nitems already set */
460 cparms.size = sizeof(struct tf_rm_resc_entry);
461 rc = tfp_calloc(&cparms);
464 resv = (struct tf_rm_resc_entry *)cparms.mem_va;
466 /* Build the request */
467 for (i = 0, j = 0; i < parms->num_elements; i++) {
468 /* Skip any non HCAPI cfg elements */
469 if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI ||
470 parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) {
471 /* Only perform reservation for entries that
474 if (parms->alloc_cnt[i] == 0)
477 /* Verify that we can get the full amount
478 * allocated per the qcaps availability.
480 if (parms->alloc_cnt[i] <=
481 query[parms->cfg[i].hcapi_type].max) {
482 req[j].type = parms->cfg[i].hcapi_type;
483 req[j].min = parms->alloc_cnt[i];
484 req[j].max = parms->alloc_cnt[i];
488 "%s: Resource failure, type:%d\n",
489 tf_dir_2_str(parms->dir),
490 parms->cfg[i].hcapi_type);
492 "req:%d, avail:%d\n",
494 query[parms->cfg[i].hcapi_type].max);
500 rc = tf_msg_session_resc_alloc(tfp,
508 /* Build the RM DB per the request */
510 cparms.size = sizeof(struct tf_rm_new_db);
511 rc = tfp_calloc(&cparms);
514 rm_db = (void *)cparms.mem_va;
516 /* Build the DB within RM DB */
517 cparms.nitems = parms->num_elements;
518 cparms.size = sizeof(struct tf_rm_element);
519 rc = tfp_calloc(&cparms);
522 rm_db->db = (struct tf_rm_element *)cparms.mem_va;
525 for (i = 0, j = 0; i < parms->num_elements; i++) {
526 db[i].cfg_type = parms->cfg[i].cfg_type;
527 db[i].hcapi_type = parms->cfg[i].hcapi_type;
529 /* Skip any non HCAPI types as we didn't include them
530 * in the reservation request.
532 if (parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI &&
533 parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
536 /* If the element didn't request an allocation no need
537 * to create a pool nor verify if we got a reservation.
539 if (parms->alloc_cnt[i] == 0)
542 /* If the element had requested an allocation and that
543 * allocation was a success (full amount) then
546 if (parms->alloc_cnt[i] == resv[j].stride) {
547 db[i].alloc.entry.start = resv[j].start;
548 db[i].alloc.entry.stride = resv[j].stride;
550 /* Only allocate BA pool if so requested */
551 if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) {
553 pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
554 sizeof(struct bitalloc));
555 /* Alloc request, alignment already set */
556 cparms.nitems = pool_size;
557 cparms.size = sizeof(struct bitalloc);
558 rc = tfp_calloc(&cparms);
561 "%s: Pool alloc failed, type:%d\n",
562 tf_dir_2_str(parms->dir),
566 db[i].pool = (struct bitalloc *)cparms.mem_va;
568 rc = ba_init(db[i].pool, resv[j].stride);
571 "%s: Pool init failed, type:%d\n",
572 tf_dir_2_str(parms->dir),
579 /* Bail out as we want what we requested for
580 * all elements, not any less.
583 "%s: Alloc failed, type:%d\n",
584 tf_dir_2_str(parms->dir),
587 "req:%d, alloc:%d\n",
594 rm_db->num_entries = parms->num_elements;
595 rm_db->dir = parms->dir;
596 rm_db->type = parms->type;
597 *parms->rm_db = (void *)rm_db;
599 #if (TF_RM_DEBUG == 1)
600 printf("%s: type:%d num_entries:%d\n",
601 tf_dir_2_str(parms->dir),
604 #endif /* (TF_RM_DEBUG == 1) */
606 tfp_free((void *)req);
607 tfp_free((void *)resv);
612 tfp_free((void *)req);
613 tfp_free((void *)resv);
614 tfp_free((void *)db->pool);
615 tfp_free((void *)db);
616 tfp_free((void *)rm_db);
623 tf_rm_free_db(struct tf *tfp,
624 struct tf_rm_free_db_parms *parms)
628 uint16_t resv_size = 0;
629 struct tf_rm_new_db *rm_db;
630 struct tf_rm_resc_entry *resv;
631 bool residuals_found = false;
633 TF_CHECK_PARMS2(parms, parms->rm_db);
635 /* Device unbind happens when the TF Session is closed and the
636 * session ref count is 0. Device unbind will cleanup each of
637 * its support modules, i.e. Identifier, thus we're ending up
638 * here to close the DB.
640 * On TF Session close it is assumed that the session has already
641 * cleaned up all its resources, individually, while
642 * destroying its flows.
644 * To assist in the 'cleanup checking' the DB is checked for any
645 * remaining elements and logged if found to be the case.
647 * Any such elements will need to be 'cleared' ahead of
648 * returning the resources to the HCAPI RM.
650 * RM will signal FW to flush the DB resources. FW will
651 * perform the invalidation. TF Session close will return the
652 * previous allocated elements to the RM and then close the
653 * HCAPI RM registration. That then saves several 'free' msgs
654 * from being required.
657 rm_db = (struct tf_rm_new_db *)parms->rm_db;
659 /* Check for residuals that the client didn't clean up */
660 rc = tf_rm_check_residuals(rm_db,
667 /* Invalidate any residuals followed by a DB traversal for
670 if (residuals_found) {
671 rc = tf_msg_session_resc_flush(tfp,
675 tfp_free((void *)resv);
676 /* On failure we still have to cleanup so we can only
677 * log that FW failed.
681 "%s: Internal Flush error, module:%s\n",
682 tf_dir_2_str(parms->dir),
683 tf_device_module_type_2_str(rm_db->type));
686 /* No need to check for configuration type, even if we do not
687 * have a BA pool we just delete on a null ptr, no harm
689 for (i = 0; i < rm_db->num_entries; i++)
690 tfp_free((void *)rm_db->db[i].pool);
692 tfp_free((void *)parms->rm_db);
698 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
703 struct tf_rm_new_db *rm_db;
704 enum tf_rm_elem_cfg_type cfg_type;
706 TF_CHECK_PARMS2(parms, parms->rm_db);
708 rm_db = (struct tf_rm_new_db *)parms->rm_db;
709 cfg_type = rm_db->db[parms->db_index].cfg_type;
711 /* Bail out if not controlled by RM */
712 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
715 /* Bail out if the pool is not valid, should never happen */
716 if (rm_db->db[parms->db_index].pool == NULL) {
719 "%s: Invalid pool for this type:%d, rc:%s\n",
720 tf_dir_2_str(rm_db->dir),
727 * priority 0: allocate from top of the tcam i.e. high
728 * priority !0: allocate index from bottom i.e lowest
731 id = ba_alloc_reverse(rm_db->db[parms->db_index].pool);
733 id = ba_alloc(rm_db->db[parms->db_index].pool);
737 "%s: Allocation failed, rc:%s\n",
738 tf_dir_2_str(rm_db->dir),
743 /* Adjust for any non zero start value */
744 rc = tf_rm_adjust_index(rm_db->db,
745 TF_RM_ADJUST_ADD_BASE,
751 "%s: Alloc adjust of base index failed, rc:%s\n",
752 tf_dir_2_str(rm_db->dir),
757 *parms->index = index;
758 if (parms->base_index)
759 *parms->base_index = id;
765 tf_rm_free(struct tf_rm_free_parms *parms)
769 struct tf_rm_new_db *rm_db;
770 enum tf_rm_elem_cfg_type cfg_type;
772 TF_CHECK_PARMS2(parms, parms->rm_db);
774 rm_db = (struct tf_rm_new_db *)parms->rm_db;
775 cfg_type = rm_db->db[parms->db_index].cfg_type;
777 /* Bail out if not controlled by RM */
778 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
781 /* Bail out if the pool is not valid, should never happen */
782 if (rm_db->db[parms->db_index].pool == NULL) {
785 "%s: Invalid pool for this type:%d, rc:%s\n",
786 tf_dir_2_str(rm_db->dir),
792 /* Adjust for any non zero start value */
793 rc = tf_rm_adjust_index(rm_db->db,
794 TF_RM_ADJUST_RM_BASE,
801 rc = ba_free(rm_db->db[parms->db_index].pool, adj_index);
802 /* No logging direction matters and that is not available here */
810 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
814 struct tf_rm_new_db *rm_db;
815 enum tf_rm_elem_cfg_type cfg_type;
817 TF_CHECK_PARMS2(parms, parms->rm_db);
819 rm_db = (struct tf_rm_new_db *)parms->rm_db;
820 cfg_type = rm_db->db[parms->db_index].cfg_type;
822 /* Bail out if not controlled by RM */
823 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
826 /* Bail out if the pool is not valid, should never happen */
827 if (rm_db->db[parms->db_index].pool == NULL) {
830 "%s: Invalid pool for this type:%d, rc:%s\n",
831 tf_dir_2_str(rm_db->dir),
837 /* Adjust for any non zero start value */
838 rc = tf_rm_adjust_index(rm_db->db,
839 TF_RM_ADJUST_RM_BASE,
846 if (parms->base_index)
847 *parms->base_index = adj_index;
848 *parms->allocated = ba_inuse(rm_db->db[parms->db_index].pool,
855 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
857 struct tf_rm_new_db *rm_db;
858 enum tf_rm_elem_cfg_type cfg_type;
860 TF_CHECK_PARMS2(parms, parms->rm_db);
862 rm_db = (struct tf_rm_new_db *)parms->rm_db;
863 cfg_type = rm_db->db[parms->db_index].cfg_type;
865 /* Bail out if not controlled by HCAPI */
866 if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
867 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
871 &rm_db->db[parms->db_index].alloc,
872 sizeof(struct tf_rm_alloc_info));
878 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
880 struct tf_rm_new_db *rm_db;
881 enum tf_rm_elem_cfg_type cfg_type;
883 TF_CHECK_PARMS2(parms, parms->rm_db);
885 rm_db = (struct tf_rm_new_db *)parms->rm_db;
886 cfg_type = rm_db->db[parms->db_index].cfg_type;
888 /* Bail out if not controlled by HCAPI */
889 if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
890 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
893 *parms->hcapi_type = rm_db->db[parms->db_index].hcapi_type;
899 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
902 struct tf_rm_new_db *rm_db;
903 enum tf_rm_elem_cfg_type cfg_type;
905 TF_CHECK_PARMS2(parms, parms->rm_db);
907 rm_db = (struct tf_rm_new_db *)parms->rm_db;
908 cfg_type = rm_db->db[parms->db_index].cfg_type;
910 /* Bail out if not controlled by RM */
911 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
914 /* Bail silently (no logging), if the pool is not valid there
915 * was no elements allocated for it.
917 if (rm_db->db[parms->db_index].pool == NULL) {
922 *parms->count = ba_inuse_count(rm_db->db[parms->db_index].pool);
929 tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms)
931 struct tf_rm_new_db *rm_db;
932 enum tf_rm_elem_cfg_type cfg_type;
937 TF_CHECK_PARMS2(parms, parms->rm_db);
939 rm_db = (struct tf_rm_new_db *)parms->rm_db;
940 cfg_type = rm_db->db[parms->db_index].cfg_type;
942 /* Bail out if not controlled by RM */
943 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
946 /* Bail out if the pool is not valid, should never happen */
947 if (rm_db->db[parms->db_index].pool == NULL) {
950 "%s: Invalid pool for this type:%d, rc:%s\n",
951 tf_dir_2_str(rm_db->dir),
957 base_index = rm_db->db[parms->db_index].alloc.entry.start;
958 stride = rm_db->db[parms->db_index].alloc.entry.stride;
960 if (parms->starting_index < base_index ||
961 parms->starting_index + parms->num_entries > base_index + stride)