1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
8 #include <rte_common.h>
10 #include <cfa_resource_types.h>
13 #include "tf_common.h"
15 #include "tf_session.h"
16 #include "tf_device.h"
21 * Generic RM Element data type that an RM DB is build upon.
23 struct tf_rm_element {
25 * RM Element configuration type. If Private then the
26 * hcapi_type can be ignored. If Null then the element is not
27 * valid for the device.
29 enum tf_rm_elem_cfg_type cfg_type;
32 * HCAPI RM Type for the element.
37 * HCAPI RM allocated range information for the element.
39 struct tf_rm_alloc_info alloc;
42 * Bit allocator pool for the element. Pool size is controlled
43 * by the struct tf_session_resources at time of session creation.
44 * Null indicates that the element is not used for the device.
46 struct bitalloc *pool;
54 * Number of elements in the DB
59 * Direction this DB controls.
64 * Module type, used for logging purposes.
66 enum tf_device_module_type type;
69 * The DB consists of an array of elements
71 struct tf_rm_element *db;
75 * Adjust an index according to the allocation information.
77 * All resources are controlled in a 0 based pool. Some resources, by
78 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
79 * need to be adjusted before they are handed out.
82 * Pointer to the DB configuration
85 * Pointer to the allocation values associated with the module
88 * Number of DB configuration elements
91 * Number of HCAPI entries with a reservation value greater than 0
95 * - EOPNOTSUPP - Operation not supported
98 tf_rm_count_hcapi_reservations(enum tf_dir dir,
99 enum tf_device_module_type type,
100 struct tf_rm_element_cfg *cfg,
101 uint16_t *reservations,
103 uint16_t *valid_count)
108 for (i = 0; i < count; i++) {
109 if ((cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI ||
110 cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) &&
114 /* Only log msg if a type is attempted reserved and
115 * not supported. We ignore EM module as its using a
116 * split configuration array thus it would fail for
117 * this type of check.
119 if (type != TF_DEVICE_MODULE_TYPE_EM &&
120 cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
121 reservations[i] > 0) {
123 "%s, %s, %s allocation not supported\n",
124 tf_device_module_type_2_str(type),
126 tf_device_module_type_subtype_2_str(type, i));
127 printf("%s, %s, %s allocation of %d not supported\n",
128 tf_device_module_type_2_str(type),
130 tf_device_module_type_subtype_2_str(type, i),
140 * Resource Manager Adjust of base index definitions.
142 enum tf_rm_adjust_type {
143 TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
144 TF_RM_ADJUST_RM_BASE /**< Removes base from the index */
148 * Adjust an index according to the allocation information.
150 * All resources are controlled in a 0 based pool. Some resources, by
151 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
152 * need to be adjusted before they are handed out.
155 * Pointer to the db, used for the lookup
161 * DB index for the element type
171 * - EOPNOTSUPP - Operation not supported
174 tf_rm_adjust_index(struct tf_rm_element *db,
175 enum tf_rm_adjust_type action,
183 base_index = db[db_index].alloc.entry.start;
186 case TF_RM_ADJUST_RM_BASE:
187 *adj_index = index - base_index;
189 case TF_RM_ADJUST_ADD_BASE:
190 *adj_index = index + base_index;
200 * Logs an array of found residual entries to the console.
203 * Receive or transmit direction
206 * Type of Device Module
209 * Number of entries in the residual array
212 * Pointer to an array of residual entries. Array is index same as
213 * the DB in which this function is used. Each entry holds residual
214 * value for that entry.
217 tf_rm_log_residuals(enum tf_dir dir,
218 enum tf_device_module_type type,
224 /* Walk the residual array and log the types that wasn't
225 * cleaned up to the console.
227 for (i = 0; i < count; i++) {
228 if (residuals[i] != 0)
230 "%s, %s was not cleaned up, %d outstanding\n",
232 tf_device_module_type_subtype_2_str(type, i),
238 * Performs a check of the passed in DB for any lingering elements. If
239 * a resource type was found to not have been cleaned up by the caller
240 * then its residual values are recorded, logged and passed back in an
241 * allocate reservation array that the caller can pass to the FW for
245 * Pointer to the db, used for the lookup
248 * Pointer to the reservation size of the generated reservation
252 * Pointer Pointer to a reservation array. The reservation array is
253 * allocated after the residual scan and holds any found residual
254 * entries. Thus it can be smaller than the DB that the check was
255 * performed on. Array must be freed by the caller.
257 * [out] residuals_present
258 * Pointer to a bool flag indicating if residual was present in the
263 * - EOPNOTSUPP - Operation not supported
266 tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
268 struct tf_rm_resc_entry **resv,
269 bool *residuals_present)
276 uint16_t *residuals = NULL;
278 struct tf_rm_get_inuse_count_parms iparms;
279 struct tf_rm_get_alloc_info_parms aparms;
280 struct tf_rm_get_hcapi_parms hparms;
281 struct tf_rm_alloc_info info;
282 struct tfp_calloc_parms cparms;
283 struct tf_rm_resc_entry *local_resv = NULL;
285 /* Create array to hold the entries that have residuals */
286 cparms.nitems = rm_db->num_entries;
287 cparms.size = sizeof(uint16_t);
288 cparms.alignment = 0;
289 rc = tfp_calloc(&cparms);
293 residuals = (uint16_t *)cparms.mem_va;
295 /* Traverse the DB and collect any residual elements */
296 iparms.rm_db = rm_db;
297 iparms.count = &count;
298 for (i = 0, found = 0; i < rm_db->num_entries; i++) {
300 rc = tf_rm_get_inuse_count(&iparms);
301 /* Not a device supported entry, just skip */
305 goto cleanup_residuals;
309 residuals[i] = count;
310 *residuals_present = true;
314 if (*residuals_present) {
315 /* Populate a reduced resv array with only the entries
316 * that have residuals.
318 cparms.nitems = found;
319 cparms.size = sizeof(struct tf_rm_resc_entry);
320 cparms.alignment = 0;
321 rc = tfp_calloc(&cparms);
325 local_resv = (struct tf_rm_resc_entry *)cparms.mem_va;
327 aparms.rm_db = rm_db;
328 hparms.rm_db = rm_db;
329 hparms.hcapi_type = &hcapi_type;
330 for (i = 0, f = 0; i < rm_db->num_entries; i++) {
331 if (residuals[i] == 0)
335 rc = tf_rm_get_info(&aparms);
340 rc = tf_rm_get_hcapi_type(&hparms);
344 local_resv[f].type = hcapi_type;
345 local_resv[f].start = info.entry.start;
346 local_resv[f].stride = info.entry.stride;
352 tf_rm_log_residuals(rm_db->dir,
357 tfp_free((void *)residuals);
363 tfp_free((void *)local_resv);
366 tfp_free((void *)residuals);
372 tf_rm_create_db(struct tf *tfp,
373 struct tf_rm_create_db_parms *parms)
378 struct tf_session *tfs;
379 struct tf_dev_info *dev;
381 struct tfp_calloc_parms cparms;
382 struct tf_rm_resc_req_entry *query;
383 enum tf_rm_resc_resv_strategy resv_strategy;
384 struct tf_rm_resc_req_entry *req;
385 struct tf_rm_resc_entry *resv;
386 struct tf_rm_new_db *rm_db;
387 struct tf_rm_element *db;
389 uint16_t hcapi_items;
391 TF_CHECK_PARMS2(tfp, parms);
393 /* Retrieve the session information */
394 rc = tf_session_get_session(tfp, &tfs);
398 /* Retrieve device information */
399 rc = tf_session_get_device(tfs, &dev);
403 /* Need device max number of elements for the RM QCAPS */
404 rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
408 cparms.nitems = max_types;
409 cparms.size = sizeof(struct tf_rm_resc_req_entry);
410 cparms.alignment = 0;
411 rc = tfp_calloc(&cparms);
415 query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
417 /* Get Firmware Capabilities */
418 rc = tf_msg_session_resc_qcaps(tfp,
426 /* Process capabilities against DB requirements. However, as a
427 * DB can hold elements that are not HCAPI we can reduce the
428 * req msg content by removing those out of the request yet
429 * the DB holds them all as to give a fast lookup. We can also
430 * remove entries where there are no request for elements.
432 tf_rm_count_hcapi_reservations(parms->dir,
439 /* Handle the case where a DB create request really ends up
440 * being empty. Unsupported (if not rare) case but possible
441 * that no resources are necessary for a 'direction'.
443 if (hcapi_items == 0) {
445 "%s: DB create request for Zero elements, DB Type:%s\n",
446 tf_dir_2_str(parms->dir),
447 tf_device_module_type_2_str(parms->type));
453 /* Alloc request, alignment already set */
454 cparms.nitems = (size_t)hcapi_items;
455 cparms.size = sizeof(struct tf_rm_resc_req_entry);
456 rc = tfp_calloc(&cparms);
459 req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
461 /* Alloc reservation, alignment and nitems already set */
462 cparms.size = sizeof(struct tf_rm_resc_entry);
463 rc = tfp_calloc(&cparms);
466 resv = (struct tf_rm_resc_entry *)cparms.mem_va;
468 /* Build the request */
469 for (i = 0, j = 0; i < parms->num_elements; i++) {
470 /* Skip any non HCAPI cfg elements */
471 if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI ||
472 parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) {
473 /* Only perform reservation for entries that
476 if (parms->alloc_cnt[i] == 0)
479 /* Verify that we can get the full amount
480 * allocated per the qcaps availability.
482 if (parms->alloc_cnt[i] <=
483 query[parms->cfg[i].hcapi_type].max) {
484 req[j].type = parms->cfg[i].hcapi_type;
485 req[j].min = parms->alloc_cnt[i];
486 req[j].max = parms->alloc_cnt[i];
490 "%s: Resource failure, type:%d\n",
491 tf_dir_2_str(parms->dir),
492 parms->cfg[i].hcapi_type);
494 "req:%d, avail:%d\n",
496 query[parms->cfg[i].hcapi_type].max);
502 rc = tf_msg_session_resc_alloc(tfp,
510 /* Build the RM DB per the request */
512 cparms.size = sizeof(struct tf_rm_new_db);
513 rc = tfp_calloc(&cparms);
516 rm_db = (void *)cparms.mem_va;
518 /* Build the DB within RM DB */
519 cparms.nitems = parms->num_elements;
520 cparms.size = sizeof(struct tf_rm_element);
521 rc = tfp_calloc(&cparms);
524 rm_db->db = (struct tf_rm_element *)cparms.mem_va;
527 for (i = 0, j = 0; i < parms->num_elements; i++) {
528 db[i].cfg_type = parms->cfg[i].cfg_type;
529 db[i].hcapi_type = parms->cfg[i].hcapi_type;
531 /* Skip any non HCAPI types as we didn't include them
532 * in the reservation request.
534 if (parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI &&
535 parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
538 /* If the element didn't request an allocation no need
539 * to create a pool nor verify if we got a reservation.
541 if (parms->alloc_cnt[i] == 0)
544 /* If the element had requested an allocation and that
545 * allocation was a success (full amount) then
548 if (parms->alloc_cnt[i] == resv[j].stride) {
549 db[i].alloc.entry.start = resv[j].start;
550 db[i].alloc.entry.stride = resv[j].stride;
552 printf("Entry:%d Start:%d Stride:%d\n",
557 /* Only allocate BA pool if so requested */
558 if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) {
560 pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
561 sizeof(struct bitalloc));
562 /* Alloc request, alignment already set */
563 cparms.nitems = pool_size;
564 cparms.size = sizeof(struct bitalloc);
565 rc = tfp_calloc(&cparms);
568 "%s: Pool alloc failed, type:%d\n",
569 tf_dir_2_str(parms->dir),
573 db[i].pool = (struct bitalloc *)cparms.mem_va;
575 rc = ba_init(db[i].pool, resv[j].stride);
578 "%s: Pool init failed, type:%d\n",
579 tf_dir_2_str(parms->dir),
586 /* Bail out as we want what we requested for
587 * all elements, not any less.
590 "%s: Alloc failed, type:%d\n",
591 tf_dir_2_str(parms->dir),
594 "req:%d, alloc:%d\n",
601 rm_db->num_entries = parms->num_elements;
602 rm_db->dir = parms->dir;
603 rm_db->type = parms->type;
604 *parms->rm_db = (void *)rm_db;
606 printf("%s: type:%d num_entries:%d\n",
607 tf_dir_2_str(parms->dir),
611 tfp_free((void *)req);
612 tfp_free((void *)resv);
617 tfp_free((void *)req);
618 tfp_free((void *)resv);
619 tfp_free((void *)db->pool);
620 tfp_free((void *)db);
621 tfp_free((void *)rm_db);
628 tf_rm_free_db(struct tf *tfp,
629 struct tf_rm_free_db_parms *parms)
633 uint16_t resv_size = 0;
634 struct tf_rm_new_db *rm_db;
635 struct tf_rm_resc_entry *resv;
636 bool residuals_found = false;
638 TF_CHECK_PARMS2(parms, parms->rm_db);
640 /* Device unbind happens when the TF Session is closed and the
641 * session ref count is 0. Device unbind will cleanup each of
642 * its support modules, i.e. Identifier, thus we're ending up
643 * here to close the DB.
645 * On TF Session close it is assumed that the session has already
646 * cleaned up all its resources, individually, while
647 * destroying its flows.
649 * To assist in the 'cleanup checking' the DB is checked for any
650 * remaining elements and logged if found to be the case.
652 * Any such elements will need to be 'cleared' ahead of
653 * returning the resources to the HCAPI RM.
655 * RM will signal FW to flush the DB resources. FW will
656 * perform the invalidation. TF Session close will return the
657 * previous allocated elements to the RM and then close the
658 * HCAPI RM registration. That then saves several 'free' msgs
659 * from being required.
662 rm_db = (struct tf_rm_new_db *)parms->rm_db;
664 /* Check for residuals that the client didn't clean up */
665 rc = tf_rm_check_residuals(rm_db,
672 /* Invalidate any residuals followed by a DB traversal for
675 if (residuals_found) {
676 rc = tf_msg_session_resc_flush(tfp,
680 tfp_free((void *)resv);
681 /* On failure we still have to cleanup so we can only
682 * log that FW failed.
686 "%s: Internal Flush error, module:%s\n",
687 tf_dir_2_str(parms->dir),
688 tf_device_module_type_2_str(rm_db->type));
691 /* No need to check for configuration type, even if we do not
692 * have a BA pool we just delete on a null ptr, no harm
694 for (i = 0; i < rm_db->num_entries; i++)
695 tfp_free((void *)rm_db->db[i].pool);
697 tfp_free((void *)parms->rm_db);
703 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
708 struct tf_rm_new_db *rm_db;
709 enum tf_rm_elem_cfg_type cfg_type;
711 TF_CHECK_PARMS2(parms, parms->rm_db);
713 rm_db = (struct tf_rm_new_db *)parms->rm_db;
714 cfg_type = rm_db->db[parms->db_index].cfg_type;
716 /* Bail out if not controlled by RM */
717 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
720 /* Bail out if the pool is not valid, should never happen */
721 if (rm_db->db[parms->db_index].pool == NULL) {
724 "%s: Invalid pool for this type:%d, rc:%s\n",
725 tf_dir_2_str(rm_db->dir),
732 * priority 0: allocate from top of the tcam i.e. high
733 * priority !0: allocate index from bottom i.e lowest
736 id = ba_alloc_reverse(rm_db->db[parms->db_index].pool);
738 id = ba_alloc(rm_db->db[parms->db_index].pool);
742 "%s: Allocation failed, rc:%s\n",
743 tf_dir_2_str(rm_db->dir),
748 /* Adjust for any non zero start value */
749 rc = tf_rm_adjust_index(rm_db->db,
750 TF_RM_ADJUST_ADD_BASE,
756 "%s: Alloc adjust of base index failed, rc:%s\n",
757 tf_dir_2_str(rm_db->dir),
762 *parms->index = index;
768 tf_rm_free(struct tf_rm_free_parms *parms)
772 struct tf_rm_new_db *rm_db;
773 enum tf_rm_elem_cfg_type cfg_type;
775 TF_CHECK_PARMS2(parms, parms->rm_db);
777 rm_db = (struct tf_rm_new_db *)parms->rm_db;
778 cfg_type = rm_db->db[parms->db_index].cfg_type;
780 /* Bail out if not controlled by RM */
781 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
784 /* Bail out if the pool is not valid, should never happen */
785 if (rm_db->db[parms->db_index].pool == NULL) {
788 "%s: Invalid pool for this type:%d, rc:%s\n",
789 tf_dir_2_str(rm_db->dir),
795 /* Adjust for any non zero start value */
796 rc = tf_rm_adjust_index(rm_db->db,
797 TF_RM_ADJUST_RM_BASE,
804 rc = ba_free(rm_db->db[parms->db_index].pool, adj_index);
805 /* No logging direction matters and that is not available here */
813 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
817 struct tf_rm_new_db *rm_db;
818 enum tf_rm_elem_cfg_type cfg_type;
820 TF_CHECK_PARMS2(parms, parms->rm_db);
822 rm_db = (struct tf_rm_new_db *)parms->rm_db;
823 cfg_type = rm_db->db[parms->db_index].cfg_type;
825 /* Bail out if not controlled by RM */
826 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
829 /* Bail out if the pool is not valid, should never happen */
830 if (rm_db->db[parms->db_index].pool == NULL) {
833 "%s: Invalid pool for this type:%d, rc:%s\n",
834 tf_dir_2_str(rm_db->dir),
840 /* Adjust for any non zero start value */
841 rc = tf_rm_adjust_index(rm_db->db,
842 TF_RM_ADJUST_RM_BASE,
849 *parms->allocated = ba_inuse(rm_db->db[parms->db_index].pool,
856 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
858 struct tf_rm_new_db *rm_db;
859 enum tf_rm_elem_cfg_type cfg_type;
861 TF_CHECK_PARMS2(parms, parms->rm_db);
863 rm_db = (struct tf_rm_new_db *)parms->rm_db;
864 cfg_type = rm_db->db[parms->db_index].cfg_type;
866 /* Bail out if not controlled by HCAPI */
867 if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
868 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
872 &rm_db->db[parms->db_index].alloc,
873 sizeof(struct tf_rm_alloc_info));
879 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
881 struct tf_rm_new_db *rm_db;
882 enum tf_rm_elem_cfg_type cfg_type;
884 TF_CHECK_PARMS2(parms, parms->rm_db);
886 rm_db = (struct tf_rm_new_db *)parms->rm_db;
887 cfg_type = rm_db->db[parms->db_index].cfg_type;
889 /* Bail out if not controlled by HCAPI */
890 if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
891 cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
894 *parms->hcapi_type = rm_db->db[parms->db_index].hcapi_type;
900 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
903 struct tf_rm_new_db *rm_db;
904 enum tf_rm_elem_cfg_type cfg_type;
906 TF_CHECK_PARMS2(parms, parms->rm_db);
908 rm_db = (struct tf_rm_new_db *)parms->rm_db;
909 cfg_type = rm_db->db[parms->db_index].cfg_type;
911 /* Bail out if not controlled by RM */
912 if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
915 /* Bail silently (no logging), if the pool is not valid there
916 * was no elements allocated for it.
918 if (rm_db->db[parms->db_index].pool == NULL) {
923 *parms->count = ba_inuse_count(rm_db->db[parms->db_index].pool);