1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
8 #include <rte_common.h>
10 #include <cfa_resource_types.h>
12 #include "tf_rm_new.h"
14 #include "tf_session.h"
15 #include "tf_device.h"
20 * Generic RM Element data type that an RM DB is build upon.
22 struct tf_rm_element {
24 * RM Element configuration type. If Private then the
25 * hcapi_type can be ignored. If Null then the element is not
26 * valid for the device.
28 enum tf_rm_elem_cfg_type cfg_type;
31 * HCAPI RM Type for the element.
36 * HCAPI RM allocated range information for the element.
38 struct tf_rm_alloc_info alloc;
41 * Bit allocator pool for the element. Pool size is controlled
42 * by the struct tf_session_resources at time of session creation.
43 * Null indicates that the element is not used for the device.
45 struct bitalloc *pool;
53 * Number of elements in the DB
58 * Direction this DB controls.
63 * The DB consists of an array of elements
65 struct tf_rm_element *db;
70 * Resource Manager Adjust of base index definitions.
72 enum tf_rm_adjust_type {
73 TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
74 TF_RM_ADJUST_RM_BASE /**< Removes base from the index */
78 * Adjust an index according to the allocation information.
80 * All resources are controlled in a 0 based pool. Some resources, by
81 * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
82 * need to be adjusted before they are handed out.
85 * Pointer to the db, used for the lookup
91 * DB index for the element type
101 * - EOPNOTSUPP - Operation not supported
104 tf_rm_adjust_index(struct tf_rm_element *db,
105 enum tf_rm_adjust_type action,
113 base_index = db[db_index].alloc.entry.start;
116 case TF_RM_ADJUST_RM_BASE:
117 *adj_index = index - base_index;
119 case TF_RM_ADJUST_ADD_BASE:
120 *adj_index = index + base_index;
130 tf_rm_create_db(struct tf *tfp,
131 struct tf_rm_create_db_parms *parms)
135 struct tf_session *tfs;
136 struct tf_dev_info *dev;
138 struct tfp_calloc_parms cparms;
139 struct tf_rm_resc_req_entry *query;
140 enum tf_rm_resc_resv_strategy resv_strategy;
141 struct tf_rm_resc_req_entry *req;
142 struct tf_rm_resc_entry *resv;
143 struct tf_rm_new_db *rm_db;
144 struct tf_rm_element *db;
147 /* Retrieve the session information */
148 rc = tf_session_get_session(tfp, &tfs);
152 /* Retrieve device information */
153 rc = tf_session_get_device(tfs, &dev);
157 /* Need device max number of elements for the RM QCAPS */
158 rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
162 cparms.nitems = max_types;
163 cparms.size = sizeof(struct tf_rm_resc_req_entry);
164 cparms.alignment = 0;
165 rc = tfp_calloc(&cparms);
169 query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
171 /* Get Firmware Capabilities */
172 rc = tf_msg_session_resc_qcaps(tfp,
180 /* Process capabilities against db requirements */
182 /* Alloc request, alignment already set */
183 cparms.nitems = parms->num_elements;
184 cparms.size = sizeof(struct tf_rm_resc_req_entry);
185 rc = tfp_calloc(&cparms);
188 req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
190 /* Alloc reservation, alignment and nitems already set */
191 cparms.size = sizeof(struct tf_rm_resc_entry);
192 rc = tfp_calloc(&cparms);
195 resv = (struct tf_rm_resc_entry *)cparms.mem_va;
197 /* Build the request */
198 for (i = 0; i < parms->num_elements; i++) {
199 /* Skip any non HCAPI cfg elements */
200 if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI) {
201 req[i].type = parms->cfg[i].hcapi_type;
202 /* Check that we can get the full amount allocated */
203 if (parms->alloc_num[i] <=
204 query[parms->cfg[i].hcapi_type].max) {
205 req[i].min = parms->alloc_num[i];
206 req[i].max = parms->alloc_num[i];
209 "%s: Resource failure, type:%d\n",
210 tf_dir_2_str(parms->dir),
211 parms->cfg[i].hcapi_type);
213 "req:%d, avail:%d\n",
215 query[parms->cfg[i].hcapi_type].max);
219 /* Skip the element */
220 req[i].type = CFA_RESOURCE_TYPE_INVALID;
224 rc = tf_msg_session_resc_alloc(tfp,
232 /* Build the RM DB per the request */
234 cparms.size = sizeof(struct tf_rm_new_db);
235 rc = tfp_calloc(&cparms);
238 rm_db = (void *)cparms.mem_va;
240 /* Build the DB within RM DB */
241 cparms.nitems = parms->num_elements;
242 cparms.size = sizeof(struct tf_rm_element);
243 rc = tfp_calloc(&cparms);
246 rm_db->db = (struct tf_rm_element *)cparms.mem_va;
249 for (i = 0; i < parms->num_elements; i++) {
250 /* If allocation failed for a single entry the DB
251 * creation is considered a failure.
253 if (parms->alloc_num[i] != resv[i].stride) {
255 "%s: Alloc failed, type:%d\n",
256 tf_dir_2_str(parms->dir),
259 "req:%d, alloc:%d\n",
265 db[i].cfg_type = parms->cfg[i].cfg_type;
266 db[i].hcapi_type = parms->cfg[i].hcapi_type;
267 db[i].alloc.entry.start = resv[i].start;
268 db[i].alloc.entry.stride = resv[i].stride;
271 pool_size = (BITALLOC_SIZEOF(resv[i].stride) /
272 sizeof(struct bitalloc));
273 /* Alloc request, alignment already set */
274 cparms.nitems = pool_size;
275 cparms.size = sizeof(struct bitalloc);
276 rc = tfp_calloc(&cparms);
279 db[i].pool = (struct bitalloc *)cparms.mem_va;
282 rm_db->num_entries = i;
283 rm_db->dir = parms->dir;
284 parms->rm_db = (void *)rm_db;
286 tfp_free((void *)req);
287 tfp_free((void *)resv);
292 tfp_free((void *)req);
293 tfp_free((void *)resv);
294 tfp_free((void *)db->pool);
295 tfp_free((void *)db);
296 tfp_free((void *)rm_db);
303 tf_rm_free_db(struct tf *tfp __rte_unused,
304 struct tf_rm_free_db_parms *parms)
308 struct tf_rm_new_db *rm_db;
310 /* Traverse the DB and clear each pool.
312 * Firmware is not cleared. It will be cleared on close only.
314 rm_db = (struct tf_rm_new_db *)parms->rm_db;
315 for (i = 0; i < rm_db->num_entries; i++)
316 tfp_free((void *)rm_db->db->pool);
318 tfp_free((void *)parms->rm_db);
324 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
328 struct tf_rm_new_db *rm_db;
329 enum tf_rm_elem_cfg_type cfg_type;
331 if (parms == NULL || parms->rm_db == NULL)
334 rm_db = (struct tf_rm_new_db *)parms->rm_db;
335 cfg_type = rm_db->db[parms->db_index].cfg_type;
337 /* Bail out if not controlled by RM */
338 if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
339 cfg_type != TF_RM_ELEM_CFG_PRIVATE)
342 id = ba_alloc(rm_db->db[parms->db_index].pool);
345 "%s: Allocation failed, rc:%s\n",
346 tf_dir_2_str(rm_db->dir),
351 /* Adjust for any non zero start value */
352 rc = tf_rm_adjust_index(rm_db->db,
353 TF_RM_ADJUST_ADD_BASE,
359 "%s: Alloc adjust of base index failed, rc:%s\n",
360 tf_dir_2_str(rm_db->dir),
369 tf_rm_free(struct tf_rm_free_parms *parms)
373 struct tf_rm_new_db *rm_db;
374 enum tf_rm_elem_cfg_type cfg_type;
376 if (parms == NULL || parms->rm_db == NULL)
379 rm_db = (struct tf_rm_new_db *)parms->rm_db;
380 cfg_type = rm_db->db[parms->db_index].cfg_type;
382 /* Bail out if not controlled by RM */
383 if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
384 cfg_type != TF_RM_ELEM_CFG_PRIVATE)
387 /* Adjust for any non zero start value */
388 rc = tf_rm_adjust_index(rm_db->db,
389 TF_RM_ADJUST_RM_BASE,
396 rc = ba_free(rm_db->db[parms->db_index].pool, adj_index);
397 /* No logging direction matters and that is not available here */
405 tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
409 struct tf_rm_new_db *rm_db;
410 enum tf_rm_elem_cfg_type cfg_type;
412 if (parms == NULL || parms->rm_db == NULL)
415 rm_db = (struct tf_rm_new_db *)parms->rm_db;
416 cfg_type = rm_db->db[parms->db_index].cfg_type;
418 /* Bail out if not controlled by RM */
419 if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
420 cfg_type != TF_RM_ELEM_CFG_PRIVATE)
423 /* Adjust for any non zero start value */
424 rc = tf_rm_adjust_index(rm_db->db,
425 TF_RM_ADJUST_RM_BASE,
432 *parms->allocated = ba_inuse(rm_db->db[parms->db_index].pool,
439 tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
442 struct tf_rm_new_db *rm_db;
443 enum tf_rm_elem_cfg_type cfg_type;
445 if (parms == NULL || parms->rm_db == NULL)
448 rm_db = (struct tf_rm_new_db *)parms->rm_db;
449 cfg_type = rm_db->db[parms->db_index].cfg_type;
451 /* Bail out if not controlled by RM */
452 if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
453 cfg_type != TF_RM_ELEM_CFG_PRIVATE)
456 parms->info = &rm_db->db[parms->db_index].alloc;
462 tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
465 struct tf_rm_new_db *rm_db;
466 enum tf_rm_elem_cfg_type cfg_type;
468 if (parms == NULL || parms->rm_db == NULL)
471 rm_db = (struct tf_rm_new_db *)parms->rm_db;
472 cfg_type = rm_db->db[parms->db_index].cfg_type;
474 /* Bail out if not controlled by RM */
475 if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
476 cfg_type != TF_RM_ELEM_CFG_PRIVATE)
479 *parms->hcapi_type = rm_db->db[parms->db_index].hcapi_type;