1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
9 #include <rte_common.h>
10 #include <rte_errno.h>
15 #include "tf_common.h"
17 #include "tf_em_common.h"
20 #include "tf_device.h"
21 #include "tf_ext_flow_handle.h"
22 #include "cfa_resource_types.h"
26 /* Number of pointers per page_size */
27 #define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
32 void *eem_db[TF_DIR_MAX];
35 * Init flag, set on bind and cleared on unbind
42 static enum tf_mem_type mem_type;
44 /** Table scope array */
45 struct tf_tbl_scope_cb tbl_scopes[TF_NUM_TBL_SCOPE];
47 /* API defined in tf_em.h */
48 struct tf_tbl_scope_cb *
49 tbl_scope_cb_find(uint32_t tbl_scope_id)
52 struct tf_rm_is_allocated_parms parms = { 0 };
55 /* Check that id is valid */
56 parms.rm_db = eem_db[TF_DIR_RX];
57 parms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
58 parms.index = tbl_scope_id;
59 parms.allocated = &allocated;
61 i = tf_rm_is_allocated(&parms);
63 if (i < 0 || allocated != TF_RM_ALLOCATED_ENTRY_IN_USE)
66 for (i = 0; i < TF_NUM_TBL_SCOPE; i++) {
67 if (tbl_scopes[i].tbl_scope_id == tbl_scope_id)
68 return &tbl_scopes[i];
75 tf_create_tbl_pool_external(enum tf_dir dir,
76 struct tf_tbl_scope_cb *tbl_scope_cb,
78 uint32_t entry_sz_bytes)
80 struct tfp_calloc_parms parms;
84 struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
86 parms.nitems = num_entries;
87 parms.size = sizeof(uint32_t);
90 if (tfp_calloc(&parms) != 0) {
91 TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
92 tf_dir_2_str(dir), strerror(ENOMEM));
98 rc = stack_init(num_entries, parms.mem_va, pool);
101 TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
102 tf_dir_2_str(dir), strerror(-rc));
106 /* Save the malloced memory address so that it can
107 * be freed when the table scope is freed.
109 tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
111 /* Fill pool with indexes in reverse
113 j = (num_entries - 1) * entry_sz_bytes;
115 for (i = 0; i < num_entries; i++) {
116 rc = stack_push(pool, j);
118 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
119 tf_dir_2_str(dir), strerror(-rc));
124 TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
131 if (!stack_is_full(pool)) {
133 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
134 tf_dir_2_str(dir), strerror(-rc));
139 tfp_free((void *)parms.mem_va);
144 * Destroy External Tbl pool of memory indexes.
149 * pointer to the table scope
152 tf_destroy_tbl_pool_external(enum tf_dir dir,
153 struct tf_tbl_scope_cb *tbl_scope_cb)
155 uint32_t *ext_act_pool_mem =
156 tbl_scope_cb->ext_act_pool_mem[dir];
158 tfp_free(ext_act_pool_mem);
162 * Allocate External Tbl entry from the scope pool.
165 * Pointer to Truflow Handle
167 * Allocation parameters
170 * 0 - Success, entry allocated - no search support
171 * -ENOMEM -EINVAL -EOPNOTSUPP
172 * - Failure, entry not allocated, out of resources
175 tf_tbl_ext_alloc(struct tf *tfp,
176 struct tf_tbl_alloc_parms *parms)
180 struct tf_tbl_scope_cb *tbl_scope_cb;
183 TF_CHECK_PARMS2(tfp, parms);
185 /* Get the pool info from the table scope
187 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
189 if (tbl_scope_cb == NULL) {
191 "%s, table scope not allocated\n",
192 tf_dir_2_str(parms->dir));
195 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
197 /* Allocate an element
199 rc = stack_pop(pool, &index);
203 "%s, Allocation failed, type:%d\n",
204 tf_dir_2_str(parms->dir),
214 * Free External Tbl entry to the scope pool.
217 * Pointer to Truflow Handle
219 * Allocation parameters
222 * 0 - Success, entry freed
224 * - Failure, entry not successfully freed for these reasons
230 tf_tbl_ext_free(struct tf *tfp,
231 struct tf_tbl_free_parms *parms)
235 struct tf_tbl_scope_cb *tbl_scope_cb;
238 TF_CHECK_PARMS2(tfp, parms);
240 /* Get the pool info from the table scope
242 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
244 if (tbl_scope_cb == NULL) {
246 "%s, table scope error\n",
247 tf_dir_2_str(parms->dir));
250 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
254 rc = stack_push(pool, index);
258 "%s, consistency error, stack full, type:%d, idx:%d\n",
259 tf_dir_2_str(parms->dir),
267 tf_em_get_key_mask(int num_entries)
269 uint32_t mask = num_entries - 1;
271 if (num_entries & TF_EM_MAX_MASK)
274 if (num_entries > TF_EM_MAX_ENTRY)
281 tf_em_create_key_entry(struct cfa_p4_eem_entry_hdr *result,
283 struct cfa_p4_eem_64b_entry *key_entry)
285 key_entry->hdr.word1 = result->word1;
286 key_entry->hdr.pointer = result->pointer;
287 memcpy(key_entry->key, in_key, TF_HW_EM_KEY_MAX_SIZE + 4);
292 * Return the number of page table pages needed to
293 * reference the given number of next level pages.
299 * Size of each EM page
302 * Number of EM page table pages
305 tf_em_page_tbl_pgcnt(uint32_t num_pages,
308 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
309 MAX_PAGE_PTRS(page_size);
313 * Given the number of data pages, page_size and the maximum
314 * number of page table levels (already determined), size
315 * the number of page table pages required at each level.
318 * Max number of levels
320 * [in] num_data_pages
321 * Number of EM data pages
330 tf_em_size_page_tbls(int max_lvl,
331 uint64_t num_data_pages,
335 if (max_lvl == TF_PT_LVL_0) {
336 page_cnt[TF_PT_LVL_0] = num_data_pages;
337 } else if (max_lvl == TF_PT_LVL_1) {
338 page_cnt[TF_PT_LVL_1] = num_data_pages;
339 page_cnt[TF_PT_LVL_0] =
340 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
341 } else if (max_lvl == TF_PT_LVL_2) {
342 page_cnt[TF_PT_LVL_2] = num_data_pages;
343 page_cnt[TF_PT_LVL_1] =
344 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
345 page_cnt[TF_PT_LVL_0] =
346 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
353 * Given the page size, size of each data item (entry size),
354 * and the total number of entries needed, determine the number
355 * of page table levels and the number of data pages required.
364 * Number of entries needed
366 * [out] num_data_pages
367 * Number of pages required
370 * Success - Number of EM page levels required
371 * -ENOMEM - Out of memory
374 tf_em_size_page_tbl_lvl(uint32_t page_size,
376 uint32_t num_entries,
377 uint64_t *num_data_pages)
379 uint64_t lvl_data_size = page_size;
380 int lvl = TF_PT_LVL_0;
384 data_size = (uint64_t)num_entries * entry_size;
386 while (lvl_data_size < data_size) {
389 if (lvl == TF_PT_LVL_1)
390 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
392 else if (lvl == TF_PT_LVL_2)
393 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
394 MAX_PAGE_PTRS(page_size) * page_size;
399 *num_data_pages = roundup(data_size, page_size) / page_size;
405 * Size the EM table based on capabilities
412 * - EINVAL - Parameter error
413 * - ENOMEM - Out of memory
416 tf_em_size_table(struct hcapi_cfa_em_table *tbl,
419 uint64_t num_data_pages;
422 uint32_t num_entries;
423 uint32_t cnt = TF_EM_MIN_ENTRIES;
425 /* Ignore entry if both size and number are zero */
426 if (!tbl->entry_size && !tbl->num_entries)
429 /* If only one is set then error */
430 if (!tbl->entry_size || !tbl->num_entries)
433 /* Determine number of page table levels and the number
434 * of data pages needed to process the given eem table.
436 if (tbl->type == TF_RECORD_TABLE) {
438 * For action records just a memory size is provided. Work
439 * backwards to resolve to number of entries
441 num_entries = tbl->num_entries / tbl->entry_size;
442 if (num_entries < TF_EM_MIN_ENTRIES) {
443 num_entries = TF_EM_MIN_ENTRIES;
445 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
450 num_entries = tbl->num_entries;
453 max_lvl = tf_em_size_page_tbl_lvl(page_size,
458 TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
460 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
461 tbl->type, (uint64_t)num_entries * tbl->entry_size,
466 tbl->num_lvl = max_lvl + 1;
467 tbl->num_data_pages = num_data_pages;
469 /* Determine the number of pages needed at each level */
470 page_cnt = tbl->page_cnt;
471 memset(page_cnt, 0, sizeof(tbl->page_cnt));
472 tf_em_size_page_tbls(max_lvl, num_data_pages, page_size,
475 TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
477 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 \
478 " l0: %u l1: %u l2: %u\n",
480 (uint64_t)num_data_pages * page_size,
482 page_cnt[TF_PT_LVL_0],
483 page_cnt[TF_PT_LVL_1],
484 page_cnt[TF_PT_LVL_2]);
490 * Validates EM number of entries requested
493 * Pointer to table scope control block to be populated
496 * Pointer to input parameters
500 * -EINVAL - Parameter error
503 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
504 struct tf_alloc_tbl_scope_parms *parms)
508 if (parms->rx_mem_size_in_mb != 0) {
509 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
510 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
512 uint32_t num_entries = (parms->rx_mem_size_in_mb *
513 TF_MEGABYTE) / (key_b + action_b);
515 if (num_entries < TF_EM_MIN_ENTRIES) {
516 TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
518 parms->rx_mem_size_in_mb);
522 cnt = TF_EM_MIN_ENTRIES;
523 while (num_entries > cnt &&
524 cnt <= TF_EM_MAX_ENTRIES)
527 if (cnt > TF_EM_MAX_ENTRIES) {
528 TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
530 (parms->tx_num_flows_in_k * TF_KILOBYTE));
534 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
536 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
538 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
539 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
541 "EEM: Invalid number of Rx flows "
542 "requested:%u max:%u\n",
543 parms->rx_num_flows_in_k * TF_KILOBYTE,
544 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
548 /* must be a power-of-2 supported value
549 * in the range 32K - 128M
551 cnt = TF_EM_MIN_ENTRIES;
552 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
553 cnt <= TF_EM_MAX_ENTRIES)
556 if (cnt > TF_EM_MAX_ENTRIES) {
558 "EEM: Invalid number of Rx requested: %u\n",
559 (parms->rx_num_flows_in_k * TF_KILOBYTE));
564 if (parms->tx_mem_size_in_mb != 0) {
565 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
566 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
568 uint32_t num_entries = (parms->tx_mem_size_in_mb *
569 (TF_KILOBYTE * TF_KILOBYTE)) /
572 if (num_entries < TF_EM_MIN_ENTRIES) {
574 "EEM: Insufficient memory requested:%uMB\n",
575 parms->rx_mem_size_in_mb);
579 cnt = TF_EM_MIN_ENTRIES;
580 while (num_entries > cnt &&
581 cnt <= TF_EM_MAX_ENTRIES)
584 if (cnt > TF_EM_MAX_ENTRIES) {
586 "EEM: Invalid number of Tx requested: %u\n",
587 (parms->tx_num_flows_in_k * TF_KILOBYTE));
591 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
593 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
595 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
596 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
598 "EEM: Invalid number of Tx flows "
599 "requested:%u max:%u\n",
600 (parms->tx_num_flows_in_k * TF_KILOBYTE),
601 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
605 cnt = TF_EM_MIN_ENTRIES;
606 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
607 cnt <= TF_EM_MAX_ENTRIES)
610 if (cnt > TF_EM_MAX_ENTRIES) {
612 "EEM: Invalid number of Tx requested: %u\n",
613 (parms->tx_num_flows_in_k * TF_KILOBYTE));
618 if (parms->rx_num_flows_in_k != 0 &&
619 parms->rx_max_key_sz_in_bits / 8 == 0) {
621 "EEM: Rx key size required: %u\n",
622 (parms->rx_max_key_sz_in_bits));
626 if (parms->tx_num_flows_in_k != 0 &&
627 parms->tx_max_key_sz_in_bits / 8 == 0) {
629 "EEM: Tx key size required: %u\n",
630 (parms->tx_max_key_sz_in_bits));
634 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
635 parms->rx_num_flows_in_k * TF_KILOBYTE;
636 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
637 parms->rx_max_key_sz_in_bits / 8;
639 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
640 parms->rx_num_flows_in_k * TF_KILOBYTE;
641 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
642 parms->rx_max_key_sz_in_bits / 8;
644 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
645 parms->rx_num_flows_in_k * TF_KILOBYTE;
646 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
647 parms->rx_max_action_entry_sz_in_bits / 8;
649 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries = 0;
652 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
653 parms->tx_num_flows_in_k * TF_KILOBYTE;
654 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
655 parms->tx_max_key_sz_in_bits / 8;
657 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
658 parms->tx_num_flows_in_k * TF_KILOBYTE;
659 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
660 parms->tx_max_key_sz_in_bits / 8;
662 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
663 parms->tx_num_flows_in_k * TF_KILOBYTE;
664 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
665 parms->tx_max_action_entry_sz_in_bits / 8;
667 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries = 0;
672 /** insert EEM entry API
676 * TF_ERR - unable to get lock
678 * insert callback returns:
680 * TF_ERR_EM_DUP - key is already in table
683 tf_insert_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
684 struct tf_insert_em_entry_parms *parms)
691 struct cfa_p4_eem_64b_entry key_entry;
693 enum hcapi_cfa_em_table_type table_type;
695 struct hcapi_cfa_hwop op;
696 struct hcapi_cfa_key_tbl key_tbl;
697 struct hcapi_cfa_key_data key_obj;
698 struct hcapi_cfa_key_loc key_loc;
702 /* Get mask to use on hash */
703 mask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE].num_entries);
709 dump_raw((uint8_t *)parms->key, TF_HW_EM_KEY_MAX_SIZE + 4, "In Key");
712 big_hash = hcapi_cfa_key_hash((uint64_t *)parms->key,
713 (TF_HW_EM_KEY_MAX_SIZE + 4) * 8);
714 key0_hash = (uint32_t)(big_hash >> 32);
715 key1_hash = (uint32_t)(big_hash & 0xFFFFFFFF);
717 key0_index = key0_hash & mask;
718 key1_index = key1_hash & mask;
721 TFP_DRV_LOG(DEBUG, "Key0 hash:0x%08x\n", key0_hash);
722 TFP_DRV_LOG(DEBUG, "Key1 hash:0x%08x\n", key1_hash);
725 * Use the "result" arg to populate all of the key entry then
726 * store the byte swapped "raw" entry in a local copy ready
727 * for insertion in to the table.
729 tf_em_create_key_entry((struct cfa_p4_eem_entry_hdr *)parms->em_record,
730 ((uint8_t *)parms->key),
734 * Try to add to Key0 table, if that does not work then
735 * try the key1 table.
738 op.opcode = HCAPI_CFA_HWOPS_ADD;
740 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE];
741 key_tbl.page_size = TF_EM_PAGE_SIZE;
742 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
743 key_obj.data = (uint8_t *)&key_entry;
744 key_obj.size = TF_EM_KEY_RECORD_SIZE;
746 rc = hcapi_cfa_key_hw_op(&op,
752 table_type = TF_KEY0_TABLE;
757 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY1_TABLE];
758 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
760 rc = hcapi_cfa_key_hw_op(&op,
767 table_type = TF_KEY1_TABLE;
773 TF_SET_FLOW_ID(parms->flow_id,
775 TF_GFID_TABLE_EXTERNAL,
777 TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
788 /** delete EEM hash entry API
792 * -EINVAL - parameter error
793 * TF_NO_SESSION - bad session ID
794 * TF_ERR_TBL_SCOPE - invalid table scope
795 * TF_ERR_TBL_IF - invalid table interface
797 * insert callback returns
799 * TF_NO_EM_MATCH - entry not found
802 tf_delete_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
803 struct tf_delete_em_entry_parms *parms)
805 enum hcapi_cfa_em_table_type hash_type;
807 struct hcapi_cfa_hwop op;
808 struct hcapi_cfa_key_tbl key_tbl;
809 struct hcapi_cfa_key_data key_obj;
810 struct hcapi_cfa_key_loc key_loc;
813 TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);
814 TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);
816 op.opcode = HCAPI_CFA_HWOPS_DEL;
818 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables
819 [(hash_type == 0 ? TF_KEY0_TABLE : TF_KEY1_TABLE)];
820 key_tbl.page_size = TF_EM_PAGE_SIZE;
821 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
823 key_obj.size = TF_EM_KEY_RECORD_SIZE;
825 rc = hcapi_cfa_key_hw_op(&op,
836 /** insert EM hash entry API
843 tf_em_insert_ext_entry(struct tf *tfp __rte_unused,
844 struct tf_insert_em_entry_parms *parms)
846 struct tf_tbl_scope_cb *tbl_scope_cb;
848 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
849 if (tbl_scope_cb == NULL) {
850 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
854 return tf_insert_eem_entry
859 /** Delete EM hash entry API
866 tf_em_delete_ext_entry(struct tf *tfp __rte_unused,
867 struct tf_delete_em_entry_parms *parms)
869 struct tf_tbl_scope_cb *tbl_scope_cb;
871 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
872 if (tbl_scope_cb == NULL) {
873 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
877 return tf_delete_eem_entry(tbl_scope_cb, parms);
882 tf_em_ext_common_bind(struct tf *tfp,
883 struct tf_em_cfg_parms *parms)
887 struct tf_rm_create_db_parms db_cfg = { 0 };
888 uint8_t db_exists = 0;
890 TF_CHECK_PARMS2(tfp, parms);
894 "EM Ext DB already initialized\n");
898 db_cfg.type = TF_DEVICE_MODULE_TYPE_EM;
899 db_cfg.num_elements = parms->num_elements;
900 db_cfg.cfg = parms->cfg;
902 for (i = 0; i < TF_DIR_MAX; i++) {
904 db_cfg.alloc_cnt = parms->resources->em_cnt[i].cnt;
906 /* Check if we got any request to support EEM, if so
907 * we build an EM Ext DB holding Table Scopes.
909 if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_TBL_SCOPE] == 0)
912 db_cfg.rm_db = &eem_db[i];
913 rc = tf_rm_create_db(tfp, &db_cfg);
916 "%s: EM Ext DB creation failed\n",
927 mem_type = parms->mem_type;
933 tf_em_ext_common_unbind(struct tf *tfp)
937 struct tf_rm_free_db_parms fparms = { 0 };
939 TF_CHECK_PARMS1(tfp);
941 /* Bail if nothing has been initialized */
944 "No EM Ext DBs created\n");
948 for (i = 0; i < TF_DIR_MAX; i++) {
950 fparms.rm_db = eem_db[i];
951 rc = tf_rm_free_db(tfp, &fparms);
964 * Sets the specified external table type element.
966 * This API sets the specified element data
969 * Pointer to TF handle
972 * Pointer to table set parameters
975 * - (0) if successful.
976 * - (-EINVAL) on failure.
978 int tf_tbl_ext_common_set(struct tf *tfp,
979 struct tf_tbl_set_parms *parms)
982 struct tf_tbl_scope_cb *tbl_scope_cb;
983 uint32_t tbl_scope_id;
984 struct hcapi_cfa_hwop op;
985 struct hcapi_cfa_key_tbl key_tbl;
986 struct hcapi_cfa_key_data key_obj;
987 struct hcapi_cfa_key_loc key_loc;
989 TF_CHECK_PARMS2(tfp, parms);
991 if (parms->data == NULL) {
993 "%s, invalid parms->data\n",
994 tf_dir_2_str(parms->dir));
998 tbl_scope_id = parms->tbl_scope_id;
1000 if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
1002 "%s, Table scope not allocated\n",
1003 tf_dir_2_str(parms->dir));
1007 /* Get the table scope control block associated with the
1010 tbl_scope_cb = tbl_scope_cb_find(tbl_scope_id);
1012 if (tbl_scope_cb == NULL) {
1014 "%s, table scope error\n",
1015 tf_dir_2_str(parms->dir));
1019 op.opcode = HCAPI_CFA_HWOPS_PUT;
1021 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_RECORD_TABLE];
1022 key_tbl.page_size = TF_EM_PAGE_SIZE;
1023 key_obj.offset = parms->idx;
1024 key_obj.data = parms->data;
1025 key_obj.size = parms->data_sz_in_bytes;
1027 rc = hcapi_cfa_key_hw_op(&op,
1036 tf_em_ext_common_alloc(struct tf *tfp,
1037 struct tf_alloc_tbl_scope_parms *parms)
1039 return tf_em_ext_alloc(tfp, parms);
1043 tf_em_ext_common_free(struct tf *tfp,
1044 struct tf_free_tbl_scope_parms *parms)
1046 return tf_em_ext_free(tfp, parms);
1049 int tf_em_ext_map_tbl_scope(struct tf *tfp,
1050 struct tf_map_tbl_scope_parms *parms)
1053 struct tf_session *tfs;
1054 struct tf_tbl_scope_cb *tbl_scope_cb;
1055 struct tf_global_cfg_parms gcfg_parms = { 0 };
1056 struct tfp_calloc_parms aparms;
1057 uint32_t *data, *mask;
1058 uint32_t sz_in_bytes = 8;
1059 struct tf_dev_info *dev;
1061 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
1063 if (tbl_scope_cb == NULL) {
1064 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb tbl_scope_id(%d)\n",
1065 parms->tbl_scope_id);
1069 /* Retrieve the session information */
1070 rc = tf_session_get_session_internal(tfp, &tfs);
1074 /* Retrieve the device information */
1075 rc = tf_session_get_device(tfs, &dev);
1079 if (dev->ops->tf_dev_map_tbl_scope == NULL) {
1082 "Map table scope operation not supported, rc:%s\n",
1088 aparms.size = sizeof(uint32_t);
1089 aparms.alignment = 0;
1091 if (tfp_calloc(&aparms) != 0) {
1092 TFP_DRV_LOG(ERR, "Map tbl scope alloc data error %s\n",
1096 data = aparms.mem_va;
1098 if (tfp_calloc(&aparms) != 0) {
1099 TFP_DRV_LOG(ERR, "Map tbl scope alloc mask error %s\n",
1104 mask = aparms.mem_va;
1106 rc = dev->ops->tf_dev_map_parif(tfp, parms->parif_bitmask,
1108 (uint8_t *)data, (uint8_t *)mask,
1113 "Map table scope config failure, rc:%s\n",
1118 /* Note that TF_GLOBAL_CFG_INTERNAL_PARIF_2_PF is same as below enum */
1119 gcfg_parms.type = TF_GLOBAL_CFG_TYPE_MAX;
1120 gcfg_parms.offset = 0;
1121 gcfg_parms.config = (uint8_t *)data;
1122 gcfg_parms.config_mask = (uint8_t *)mask;
1123 gcfg_parms.config_sz_in_bytes = sizeof(uint64_t);
1126 rc = tf_msg_set_global_cfg(tfp, &gcfg_parms);
1129 "Map tbl scope, set failed, rc:%s\n",