1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
8 #include <rte_common.h>
10 #include <cfa_resource_types.h>
13 #include "tf_common.h"
15 #include "tf_session.h"
16 #include "tf_device.h"
21 * Generic RM Element data type that an RM DB is build upon.
23 struct tf_rm_element {
25 * RM Element configuration type. If Private then the
26 * hcapi_type can be ignored. If Null then the element is not
27 * valid for the device.
29 enum tf_rm_elem_cfg_type cfg_type;
32 * HCAPI RM Type for the element.
37 * HCAPI RM allocated range information for the element.
39 struct tf_rm_alloc_info alloc;
42 * Bit allocator pool for the element. Pool size is controlled
43 * by the struct tf_session_resources at time of session creation.
44 * Null indicates that the element is not used for the device.
46 struct bitalloc *pool;
54 * Number of elements in the DB
59 * Direction this DB controls.
64 * Module type, used for logging purposes.
66 enum tf_device_module_type type;
69 * The DB consists of an array of elements
71 struct tf_rm_element *db;
75 * Adjust an index according to the allocation information.
77 * All resources are controlled in a 0 based pool. Some resources, by
78 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
79 * need to be adjusted before they are handed out.
82 * Pointer to the DB configuration
85 * Pointer to the allocation values associated with the module
88 * Number of DB configuration elements
91 * Number of HCAPI entries with a reservation value greater than 0
95 * - EOPNOTSUPP - Operation not supported
98 tf_rm_count_hcapi_reservations(enum tf_dir dir,
99 enum tf_device_module_type type,
100 struct tf_rm_element_cfg *cfg,
101 uint16_t *reservations,
103 uint16_t *valid_count)
108 for (i = 0; i < count; i++) {
109 if (cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI &&
113 /* Only log msg if a type is attempted reserved and
114 * not supported. We ignore EM module as its using a
115 * split configuration array thus it would fail for
116 * this type of check.
118 if (type != TF_DEVICE_MODULE_TYPE_EM &&
119 cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
120 reservations[i] > 0) {
122 "%s, %s, %s allocation not supported\n",
123 tf_device_module_type_2_str(type),
125 tf_device_module_type_subtype_2_str(type, i));
126 printf("%s, %s, %s allocation of %d not supported\n",
127 tf_device_module_type_2_str(type),
129 tf_device_module_type_subtype_2_str(type, i),
139 * Resource Manager Adjust of base index definitions.
141 enum tf_rm_adjust_type {
142 TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
143 TF_RM_ADJUST_RM_BASE /**< Removes base from the index */
147 * Adjust an index according to the allocation information.
149 * All resources are controlled in a 0 based pool. Some resources, by
150 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
151 * need to be adjusted before they are handed out.
154 * Pointer to the db, used for the lookup
160 * DB index for the element type
170 * - EOPNOTSUPP - Operation not supported
173 tf_rm_adjust_index(struct tf_rm_element *db,
174 enum tf_rm_adjust_type action,
182 base_index = db[db_index].alloc.entry.start;
185 case TF_RM_ADJUST_RM_BASE:
186 *adj_index = index - base_index;
188 case TF_RM_ADJUST_ADD_BASE:
189 *adj_index = index + base_index;
199 * Logs an array of found residual entries to the console.
202 * Receive or transmit direction
205 * Type of Device Module
208 * Number of entries in the residual array
211 * Pointer to an array of residual entries. Array is index same as
212 * the DB in which this function is used. Each entry holds residual
213 * value for that entry.
216 tf_rm_log_residuals(enum tf_dir dir,
217 enum tf_device_module_type type,
223 /* Walk the residual array and log the types that wasn't
224 * cleaned up to the console.
226 for (i = 0; i < count; i++) {
227 if (residuals[i] != 0)
229 "%s, %s was not cleaned up, %d outstanding\n",
231 tf_device_module_type_subtype_2_str(type, i),
237 * Performs a check of the passed in DB for any lingering elements. If
238 * a resource type was found to not have been cleaned up by the caller
239 * then its residual values are recorded, logged and passed back in an
240 * allocate reservation array that the caller can pass to the FW for
244 * Pointer to the db, used for the lookup
247 * Pointer to the reservation size of the generated reservation
251 * Pointer Pointer to a reservation array. The reservation array is
252 * allocated after the residual scan and holds any found residual
253 * entries. Thus it can be smaller than the DB that the check was
254 * performed on. Array must be freed by the caller.
256 * [out] residuals_present
257 * Pointer to a bool flag indicating if residual was present in the
262 * - EOPNOTSUPP - Operation not supported
265 tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
267 struct tf_rm_resc_entry **resv,
268 bool *residuals_present)
275 uint16_t *residuals = NULL;
277 struct tf_rm_get_inuse_count_parms iparms;
278 struct tf_rm_get_alloc_info_parms aparms;
279 struct tf_rm_get_hcapi_parms hparms;
280 struct tf_rm_alloc_info info;
281 struct tfp_calloc_parms cparms;
282 struct tf_rm_resc_entry *local_resv = NULL;
284 /* Create array to hold the entries that have residuals */
285 cparms.nitems = rm_db->num_entries;
286 cparms.size = sizeof(uint16_t);
287 cparms.alignment = 0;
288 rc = tfp_calloc(&cparms);
292 residuals = (uint16_t *)cparms.mem_va;
294 /* Traverse the DB and collect any residual elements */
295 iparms.rm_db = rm_db;
296 iparms.count = &count;
297 for (i = 0, found = 0; i < rm_db->num_entries; i++) {
299 rc = tf_rm_get_inuse_count(&iparms);
300 /* Not a device supported entry, just skip */
304 goto cleanup_residuals;
308 residuals[i] = count;
309 *residuals_present = true;
313 if (*residuals_present) {
314 /* Populate a reduced resv array with only the entries
315 * that have residuals.
317 cparms.nitems = found;
318 cparms.size = sizeof(struct tf_rm_resc_entry);
319 cparms.alignment = 0;
320 rc = tfp_calloc(&cparms);
324 local_resv = (struct tf_rm_resc_entry *)cparms.mem_va;
326 aparms.rm_db = rm_db;
327 hparms.rm_db = rm_db;
328 hparms.hcapi_type = &hcapi_type;
329 for (i = 0, f = 0; i < rm_db->num_entries; i++) {
330 if (residuals[i] == 0)
334 rc = tf_rm_get_info(&aparms);
339 rc = tf_rm_get_hcapi_type(&hparms);
343 local_resv[f].type = hcapi_type;
344 local_resv[f].start = info.entry.start;
345 local_resv[f].stride = info.entry.stride;
351 tf_rm_log_residuals(rm_db->dir,
356 tfp_free((void *)residuals);
362 tfp_free((void *)local_resv);
365 tfp_free((void *)residuals);
371 tf_rm_create_db(struct tf *tfp,
372 struct tf_rm_create_db_parms *parms)
377 struct tf_session *tfs;
378 struct tf_dev_info *dev;
380 struct tfp_calloc_parms cparms;
381 struct tf_rm_resc_req_entry *query;
382 enum tf_rm_resc_resv_strategy resv_strategy;
383 struct tf_rm_resc_req_entry *req;
384 struct tf_rm_resc_entry *resv;
385 struct tf_rm_new_db *rm_db;
386 struct tf_rm_element *db;
388 uint16_t hcapi_items;
390 TF_CHECK_PARMS2(tfp, parms);
392 /* Retrieve the session information */
393 rc = tf_session_get_session(tfp, &tfs);
397 /* Retrieve device information */
398 rc = tf_session_get_device(tfs, &dev);
402 /* Need device max number of elements for the RM QCAPS */
403 rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
407 cparms.nitems = max_types;
408 cparms.size = sizeof(struct tf_rm_resc_req_entry);
409 cparms.alignment = 0;
410 rc = tfp_calloc(&cparms);
414 query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
416 /* Get Firmware Capabilities */
417 rc = tf_msg_session_resc_qcaps(tfp,
425 /* Process capabilities against DB requirements. However, as a
426 * DB can hold elements that are not HCAPI we can reduce the
427 * req msg content by removing those out of the request yet
428 * the DB holds them all as to give a fast lookup. We can also
429 * remove entries where there are no request for elements.
431 tf_rm_count_hcapi_reservations(parms->dir,
438 /* Handle the case where a DB create request really ends up
439 * being empty. Unsupported (if not rare) case but possible
440 * that no resources are necessary for a 'direction'.
442 if (hcapi_items == 0) {
444 "%s: DB create request for Zero elements, DB Type:%s\n",
445 tf_dir_2_str(parms->dir),
446 tf_device_module_type_2_str(parms->type));
452 /* Alloc request, alignment already set */
453 cparms.nitems = (size_t)hcapi_items;
454 cparms.size = sizeof(struct tf_rm_resc_req_entry);
455 rc = tfp_calloc(&cparms);
458 req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
460 /* Alloc reservation, alignment and nitems already set */
461 cparms.size = sizeof(struct tf_rm_resc_entry);
462 rc = tfp_calloc(&cparms);
465 resv = (struct tf_rm_resc_entry *)cparms.mem_va;
467 /* Build the request */
468 for (i = 0, j = 0; i < parms->num_elements; i++) {
469 /* Skip any non HCAPI cfg elements */
470 if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI) {
471 /* Only perform reservation for entries that
474 if (parms->alloc_cnt[i] == 0)
477 /* Verify that we can get the full amount
478 * allocated per the qcaps availability.
480 if (parms->alloc_cnt[i] <=
481 query[parms->cfg[i].hcapi_type].max) {
482 req[j].type = parms->cfg[i].hcapi_type;
483 req[j].min = parms->alloc_cnt[i];
484 req[j].max = parms->alloc_cnt[i];
488 "%s: Resource failure, type:%d\n",
489 tf_dir_2_str(parms->dir),
490 parms->cfg[i].hcapi_type);
492 "req:%d, avail:%d\n",
494 query[parms->cfg[i].hcapi_type].max);
500 rc = tf_msg_session_resc_alloc(tfp,
508 /* Build the RM DB per the request */
510 cparms.size = sizeof(struct tf_rm_new_db);
511 rc = tfp_calloc(&cparms);
514 rm_db = (void *)cparms.mem_va;
516 /* Build the DB within RM DB */
517 cparms.nitems = parms->num_elements;
518 cparms.size = sizeof(struct tf_rm_element);
519 rc = tfp_calloc(&cparms);
522 rm_db->db = (struct tf_rm_element *)cparms.mem_va;
525 for (i = 0, j = 0; i < parms->num_elements; i++) {
526 db[i].cfg_type = parms->cfg[i].cfg_type;
527 db[i].hcapi_type = parms->cfg[i].hcapi_type;
529 /* Skip any non HCAPI types as we didn't include them
530 * in the reservation request.
532 if (parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI)
535 /* If the element didn't request an allocation no need
536 * to create a pool nor verify if we got a reservation.
538 if (parms->alloc_cnt[i] == 0)
541 /* If the element had requested an allocation and that
542 * allocation was a success (full amount) then
545 if (parms->alloc_cnt[i] == resv[j].stride) {
546 db[i].alloc.entry.start = resv[j].start;
547 db[i].alloc.entry.stride = resv[j].stride;
549 printf("Entry:%d Start:%d Stride:%d\n",
555 pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
556 sizeof(struct bitalloc));
557 /* Alloc request, alignment already set */
558 cparms.nitems = pool_size;
559 cparms.size = sizeof(struct bitalloc);
560 rc = tfp_calloc(&cparms);
563 "%s: Pool alloc failed, type:%d\n",
564 tf_dir_2_str(parms->dir),
568 db[i].pool = (struct bitalloc *)cparms.mem_va;
570 rc = ba_init(db[i].pool, resv[j].stride);
573 "%s: Pool init failed, type:%d\n",
574 tf_dir_2_str(parms->dir),
580 /* Bail out as we want what we requested for
581 * all elements, not any less.
584 "%s: Alloc failed, type:%d\n",
585 tf_dir_2_str(parms->dir),
588 "req:%d, alloc:%d\n",
595 rm_db->num_entries = parms->num_elements;
596 rm_db->dir = parms->dir;
597 rm_db->type = parms->type;
598 *parms->rm_db = (void *)rm_db;
600 printf("%s: type:%d num_entries:%d\n",
601 tf_dir_2_str(parms->dir),
605 tfp_free((void *)req);
606 tfp_free((void *)resv);
611 tfp_free((void *)req);
612 tfp_free((void *)resv);
613 tfp_free((void *)db->pool);
614 tfp_free((void *)db);
615 tfp_free((void *)rm_db);
622 tf_rm_free_db(struct tf *tfp,
623 struct tf_rm_free_db_parms *parms)
627 uint16_t resv_size = 0;
628 struct tf_rm_new_db *rm_db;
629 struct tf_rm_resc_entry *resv;
630 bool residuals_found = false;
632 TF_CHECK_PARMS2(parms, parms->rm_db);
634 /* Device unbind happens when the TF Session is closed and the
635 * session ref count is 0. Device unbind will cleanup each of
636 * its support modules, i.e. Identifier, thus we're ending up
637 * here to close the DB.
639 * On TF Session close it is assumed that the session has already
640 * cleaned up all its resources, individually, while
641 * destroying its flows.
643 * To assist in the 'cleanup checking' the DB is checked for any
644 * remaining elements and logged if found to be the case.
646 * Any such elements will need to be 'cleared' ahead of
647 * returning the resources to the HCAPI RM.
649 * RM will signal FW to flush the DB resources. FW will
650 * perform the invalidation. TF Session close will return the
651 * previous allocated elements to the RM and then close the
652 * HCAPI RM registration. That then saves several 'free' msgs
653 * from being required.
656 rm_db = (struct tf_rm_new_db *)parms->rm_db;
658 /* Check for residuals that the client didn't clean up */
659 rc = tf_rm_check_residuals(rm_db,
666 /* Invalidate any residuals followed by a DB traversal for
669 if (residuals_found) {
670 rc = tf_msg_session_resc_flush(tfp,
674 tfp_free((void *)resv);
675 /* On failure we still have to cleanup so we can only
676 * log that FW failed.
680 "%s: Internal Flush error, module:%s\n",
681 tf_dir_2_str(parms->dir),
682 tf_device_module_type_2_str(rm_db->type));
685 for (i = 0; i < rm_db->num_entries; i++)
686 tfp_free((void *)rm_db->db[i].pool);
688 tfp_free((void *)parms->rm_db);
694 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
699 struct tf_rm_new_db *rm_db;
700 enum tf_rm_elem_cfg_type cfg_type;
702 TF_CHECK_PARMS2(parms, parms->rm_db);
704 rm_db = (struct tf_rm_new_db *)parms->rm_db;
705 cfg_type = rm_db->db[parms->db_index].cfg_type;
707 /* Bail out if not controlled by RM */
708 if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
709 cfg_type != TF_RM_ELEM_CFG_PRIVATE)
712 /* Bail out if the pool is not valid, should never happen */
713 if (rm_db->db[parms->db_index].pool == NULL) {
716 "%s: Invalid pool for this type:%d, rc:%s\n",
717 tf_dir_2_str(rm_db->dir),
724 * priority 0: allocate from top of the tcam i.e. high
725 * priority !0: allocate index from bottom i.e lowest
728 id = ba_alloc_reverse(rm_db->db[parms->db_index].pool);
730 id = ba_alloc(rm_db->db[parms->db_index].pool);
734 "%s: Allocation failed, rc:%s\n",
735 tf_dir_2_str(rm_db->dir),
740 /* Adjust for any non zero start value */
741 rc = tf_rm_adjust_index(rm_db->db,
742 TF_RM_ADJUST_ADD_BASE,
748 "%s: Alloc adjust of base index failed, rc:%s\n",
749 tf_dir_2_str(rm_db->dir),
754 *parms->index = index;
760 tf_rm_free(struct tf_rm_free_parms *parms)
764 struct tf_rm_new_db *rm_db;
765 enum tf_rm_elem_cfg_type cfg_type;
767 TF_CHECK_PARMS2(parms, parms->rm_db);
769 rm_db = (struct tf_rm_new_db *)parms->rm_db;
770 cfg_type = rm_db->db[parms->db_index].cfg_type;
772 /* Bail out if not controlled by RM */
773 if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
774 cfg_type != TF_RM_ELEM_CFG_PRIVATE)
777 /* Bail out if the pool is not valid, should never happen */
778 if (rm_db->db[parms->db_index].pool == NULL) {
781 "%s: Invalid pool for this type:%d, rc:%s\n",
782 tf_dir_2_str(rm_db->dir),
788 /* Adjust for any non zero start value */
789 rc = tf_rm_adjust_index(rm_db->db,
790 TF_RM_ADJUST_RM_BASE,
797 rc = ba_free(rm_db->db[parms->db_index].pool, adj_index);
798 /* No logging direction matters and that is not available here */
806 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
810 struct tf_rm_new_db *rm_db;
811 enum tf_rm_elem_cfg_type cfg_type;
813 TF_CHECK_PARMS2(parms, parms->rm_db);
815 rm_db = (struct tf_rm_new_db *)parms->rm_db;
816 cfg_type = rm_db->db[parms->db_index].cfg_type;
818 /* Bail out if not controlled by RM */
819 if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
820 cfg_type != TF_RM_ELEM_CFG_PRIVATE)
823 /* Bail out if the pool is not valid, should never happen */
824 if (rm_db->db[parms->db_index].pool == NULL) {
827 "%s: Invalid pool for this type:%d, rc:%s\n",
828 tf_dir_2_str(rm_db->dir),
834 /* Adjust for any non zero start value */
835 rc = tf_rm_adjust_index(rm_db->db,
836 TF_RM_ADJUST_RM_BASE,
843 *parms->allocated = ba_inuse(rm_db->db[parms->db_index].pool,
850 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
852 struct tf_rm_new_db *rm_db;
853 enum tf_rm_elem_cfg_type cfg_type;
855 TF_CHECK_PARMS2(parms, parms->rm_db);
857 rm_db = (struct tf_rm_new_db *)parms->rm_db;
858 cfg_type = rm_db->db[parms->db_index].cfg_type;
860 /* Bail out if not controlled by RM */
861 if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
862 cfg_type != TF_RM_ELEM_CFG_PRIVATE)
866 &rm_db->db[parms->db_index].alloc,
867 sizeof(struct tf_rm_alloc_info));
873 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
875 struct tf_rm_new_db *rm_db;
876 enum tf_rm_elem_cfg_type cfg_type;
878 TF_CHECK_PARMS2(parms, parms->rm_db);
880 rm_db = (struct tf_rm_new_db *)parms->rm_db;
881 cfg_type = rm_db->db[parms->db_index].cfg_type;
883 /* Bail out if not controlled by RM */
884 if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
885 cfg_type != TF_RM_ELEM_CFG_PRIVATE)
888 *parms->hcapi_type = rm_db->db[parms->db_index].hcapi_type;
894 tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
897 struct tf_rm_new_db *rm_db;
898 enum tf_rm_elem_cfg_type cfg_type;
900 TF_CHECK_PARMS2(parms, parms->rm_db);
902 rm_db = (struct tf_rm_new_db *)parms->rm_db;
903 cfg_type = rm_db->db[parms->db_index].cfg_type;
905 /* Bail out if not controlled by RM */
906 if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
907 cfg_type != TF_RM_ELEM_CFG_PRIVATE)
910 /* Bail silently (no logging), if the pool is not valid there
911 * was no elements allocated for it.
913 if (rm_db->db[parms->db_index].pool == NULL) {
918 *parms->count = ba_inuse_count(rm_db->db[parms->db_index].pool);