1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
9 #include <rte_common.h>
10 #include <rte_errno.h>
15 #include "tf_common.h"
17 #include "tf_em_common.h"
20 #include "tf_device.h"
21 #include "tf_ext_flow_handle.h"
22 #include "cfa_resource_types.h"
26 /* Number of pointers per page_size */
27 #define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
32 void *eem_db[TF_DIR_MAX];
35 * Init flag, set on bind and cleared on unbind
42 static enum tf_mem_type mem_type;
44 /** Table scope array */
45 struct tf_tbl_scope_cb tbl_scopes[TF_NUM_TBL_SCOPE];
47 /* API defined in tf_em.h */
48 struct tf_tbl_scope_cb *
49 tbl_scope_cb_find(uint32_t tbl_scope_id)
52 struct tf_rm_is_allocated_parms parms = { 0 };
55 /* Check that id is valid */
56 parms.rm_db = eem_db[TF_DIR_RX];
57 parms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
58 parms.index = tbl_scope_id;
59 parms.allocated = &allocated;
61 i = tf_rm_is_allocated(&parms);
63 if (i < 0 || allocated != TF_RM_ALLOCATED_ENTRY_IN_USE)
66 for (i = 0; i < TF_NUM_TBL_SCOPE; i++) {
67 if (tbl_scopes[i].tbl_scope_id == tbl_scope_id)
68 return &tbl_scopes[i];
75 tf_create_tbl_pool_external(enum tf_dir dir,
76 struct tf_tbl_scope_cb *tbl_scope_cb,
78 uint32_t entry_sz_bytes)
80 struct tfp_calloc_parms parms;
84 struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
86 parms.nitems = num_entries;
87 parms.size = sizeof(uint32_t);
90 if (tfp_calloc(&parms) != 0) {
91 TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
92 tf_dir_2_str(dir), strerror(ENOMEM));
98 rc = stack_init(num_entries, parms.mem_va, pool);
101 TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
102 tf_dir_2_str(dir), strerror(-rc));
106 /* Save the malloced memory address so that it can
107 * be freed when the table scope is freed.
109 tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
111 /* Fill pool with indexes in reverse
113 j = (num_entries - 1) * entry_sz_bytes;
115 for (i = 0; i < num_entries; i++) {
116 rc = stack_push(pool, j);
118 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
119 tf_dir_2_str(dir), strerror(-rc));
124 TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
131 if (!stack_is_full(pool)) {
133 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
134 tf_dir_2_str(dir), strerror(-rc));
139 tfp_free((void *)parms.mem_va);
144 * Destroy External Tbl pool of memory indexes.
149 * pointer to the table scope
152 tf_destroy_tbl_pool_external(enum tf_dir dir,
153 struct tf_tbl_scope_cb *tbl_scope_cb)
155 uint32_t *ext_act_pool_mem =
156 tbl_scope_cb->ext_act_pool_mem[dir];
158 tfp_free(ext_act_pool_mem);
162 * Allocate External Tbl entry from the scope pool.
165 * Pointer to Truflow Handle
167 * Allocation parameters
170 * 0 - Success, entry allocated - no search support
171 * -ENOMEM -EINVAL -EOPNOTSUPP
172 * - Failure, entry not allocated, out of resources
175 tf_tbl_ext_alloc(struct tf *tfp,
176 struct tf_tbl_alloc_parms *parms)
180 struct tf_tbl_scope_cb *tbl_scope_cb;
183 TF_CHECK_PARMS2(tfp, parms);
185 /* Get the pool info from the table scope
187 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
189 if (tbl_scope_cb == NULL) {
191 "%s, table scope not allocated\n",
192 tf_dir_2_str(parms->dir));
195 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
197 /* Allocate an element
199 rc = stack_pop(pool, &index);
203 "%s, Allocation failed, type:%d\n",
204 tf_dir_2_str(parms->dir),
214 * Free External Tbl entry to the scope pool.
217 * Pointer to Truflow Handle
219 * Allocation parameters
222 * 0 - Success, entry freed
224 * - Failure, entry not successfully freed for these reasons
230 tf_tbl_ext_free(struct tf *tfp,
231 struct tf_tbl_free_parms *parms)
235 struct tf_tbl_scope_cb *tbl_scope_cb;
238 TF_CHECK_PARMS2(tfp, parms);
240 /* Get the pool info from the table scope
242 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
244 if (tbl_scope_cb == NULL) {
246 "%s, table scope error\n",
247 tf_dir_2_str(parms->dir));
250 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
254 rc = stack_push(pool, index);
258 "%s, consistency error, stack full, type:%d, idx:%d\n",
259 tf_dir_2_str(parms->dir),
267 tf_em_get_key_mask(int num_entries)
269 uint32_t mask = num_entries - 1;
271 if (num_entries & TF_EM_MAX_MASK)
274 if (num_entries > TF_EM_MAX_ENTRY)
281 tf_em_create_key_entry(struct cfa_p4_eem_entry_hdr *result,
283 struct cfa_p4_eem_64b_entry *key_entry)
285 key_entry->hdr.word1 = result->word1;
286 key_entry->hdr.pointer = result->pointer;
287 memcpy(key_entry->key, in_key, TF_HW_EM_KEY_MAX_SIZE + 4);
292 * Return the number of page table pages needed to
293 * reference the given number of next level pages.
299 * Size of each EM page
302 * Number of EM page table pages
305 tf_em_page_tbl_pgcnt(uint32_t num_pages,
308 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
309 MAX_PAGE_PTRS(page_size);
314 * Given the number of data pages, page_size and the maximum
315 * number of page table levels (already determined), size
316 * the number of page table pages required at each level.
319 * Max number of levels
321 * [in] num_data_pages
322 * Number of EM data pages
331 tf_em_size_page_tbls(int max_lvl,
332 uint64_t num_data_pages,
336 if (max_lvl == TF_PT_LVL_0) {
337 page_cnt[TF_PT_LVL_0] = num_data_pages;
338 } else if (max_lvl == TF_PT_LVL_1) {
339 page_cnt[TF_PT_LVL_1] = num_data_pages;
340 page_cnt[TF_PT_LVL_0] =
341 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
342 } else if (max_lvl == TF_PT_LVL_2) {
343 page_cnt[TF_PT_LVL_2] = num_data_pages;
344 page_cnt[TF_PT_LVL_1] =
345 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
346 page_cnt[TF_PT_LVL_0] =
347 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
354 * Given the page size, size of each data item (entry size),
355 * and the total number of entries needed, determine the number
356 * of page table levels and the number of data pages required.
365 * Number of entries needed
367 * [out] num_data_pages
368 * Number of pages required
371 * Success - Number of EM page levels required
372 * -ENOMEM - Out of memory
375 tf_em_size_page_tbl_lvl(uint32_t page_size,
377 uint32_t num_entries,
378 uint64_t *num_data_pages)
380 uint64_t lvl_data_size = page_size;
381 int lvl = TF_PT_LVL_0;
385 data_size = (uint64_t)num_entries * entry_size;
387 while (lvl_data_size < data_size) {
390 if (lvl == TF_PT_LVL_1)
391 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
393 else if (lvl == TF_PT_LVL_2)
394 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
395 MAX_PAGE_PTRS(page_size) * page_size;
400 *num_data_pages = roundup(data_size, page_size) / page_size;
406 * Size the EM table based on capabilities
413 * - EINVAL - Parameter error
414 * - ENOMEM - Out of memory
417 tf_em_size_table(struct hcapi_cfa_em_table *tbl,
420 uint64_t num_data_pages;
423 uint32_t num_entries;
424 uint32_t cnt = TF_EM_MIN_ENTRIES;
426 /* Ignore entry if both size and number are zero */
427 if (!tbl->entry_size && !tbl->num_entries)
430 /* If only one is set then error */
431 if (!tbl->entry_size || !tbl->num_entries)
434 /* Determine number of page table levels and the number
435 * of data pages needed to process the given eem table.
437 if (tbl->type == TF_RECORD_TABLE) {
439 * For action records just a memory size is provided. Work
440 * backwards to resolve to number of entries
442 num_entries = tbl->num_entries / tbl->entry_size;
443 if (num_entries < TF_EM_MIN_ENTRIES) {
444 num_entries = TF_EM_MIN_ENTRIES;
446 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
451 num_entries = tbl->num_entries;
454 max_lvl = tf_em_size_page_tbl_lvl(page_size,
459 TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
461 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
462 tbl->type, (uint64_t)num_entries * tbl->entry_size,
467 tbl->num_lvl = max_lvl + 1;
468 tbl->num_data_pages = num_data_pages;
470 /* Determine the number of pages needed at each level */
471 page_cnt = tbl->page_cnt;
472 memset(page_cnt, 0, sizeof(tbl->page_cnt));
473 tf_em_size_page_tbls(max_lvl, num_data_pages, page_size,
476 TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
478 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 \
479 " l0: %u l1: %u l2: %u\n",
481 (uint64_t)num_data_pages * page_size,
483 page_cnt[TF_PT_LVL_0],
484 page_cnt[TF_PT_LVL_1],
485 page_cnt[TF_PT_LVL_2]);
491 * Validates EM number of entries requested
494 * Pointer to table scope control block to be populated
497 * Pointer to input parameters
501 * -EINVAL - Parameter error
504 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
505 struct tf_alloc_tbl_scope_parms *parms)
509 if (parms->rx_mem_size_in_mb != 0) {
510 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
511 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
513 uint32_t num_entries = (parms->rx_mem_size_in_mb *
514 TF_MEGABYTE) / (key_b + action_b);
516 if (num_entries < TF_EM_MIN_ENTRIES) {
517 TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
519 parms->rx_mem_size_in_mb);
523 cnt = TF_EM_MIN_ENTRIES;
524 while (num_entries > cnt &&
525 cnt <= TF_EM_MAX_ENTRIES)
528 if (cnt > TF_EM_MAX_ENTRIES) {
529 TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
531 (parms->tx_num_flows_in_k * TF_KILOBYTE));
535 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
537 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
539 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
540 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
542 "EEM: Invalid number of Rx flows "
543 "requested:%u max:%u\n",
544 parms->rx_num_flows_in_k * TF_KILOBYTE,
545 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
549 /* must be a power-of-2 supported value
550 * in the range 32K - 128M
552 cnt = TF_EM_MIN_ENTRIES;
553 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
554 cnt <= TF_EM_MAX_ENTRIES)
557 if (cnt > TF_EM_MAX_ENTRIES) {
559 "EEM: Invalid number of Rx requested: %u\n",
560 (parms->rx_num_flows_in_k * TF_KILOBYTE));
565 if (parms->tx_mem_size_in_mb != 0) {
566 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
567 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
569 uint32_t num_entries = (parms->tx_mem_size_in_mb *
570 (TF_KILOBYTE * TF_KILOBYTE)) /
573 if (num_entries < TF_EM_MIN_ENTRIES) {
575 "EEM: Insufficient memory requested:%uMB\n",
576 parms->rx_mem_size_in_mb);
580 cnt = TF_EM_MIN_ENTRIES;
581 while (num_entries > cnt &&
582 cnt <= TF_EM_MAX_ENTRIES)
585 if (cnt > TF_EM_MAX_ENTRIES) {
587 "EEM: Invalid number of Tx requested: %u\n",
588 (parms->tx_num_flows_in_k * TF_KILOBYTE));
592 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
594 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
596 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
597 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
599 "EEM: Invalid number of Tx flows "
600 "requested:%u max:%u\n",
601 (parms->tx_num_flows_in_k * TF_KILOBYTE),
602 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
606 cnt = TF_EM_MIN_ENTRIES;
607 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
608 cnt <= TF_EM_MAX_ENTRIES)
611 if (cnt > TF_EM_MAX_ENTRIES) {
613 "EEM: Invalid number of Tx requested: %u\n",
614 (parms->tx_num_flows_in_k * TF_KILOBYTE));
619 if (parms->rx_num_flows_in_k != 0 &&
620 parms->rx_max_key_sz_in_bits / 8 == 0) {
622 "EEM: Rx key size required: %u\n",
623 (parms->rx_max_key_sz_in_bits));
627 if (parms->tx_num_flows_in_k != 0 &&
628 parms->tx_max_key_sz_in_bits / 8 == 0) {
630 "EEM: Tx key size required: %u\n",
631 (parms->tx_max_key_sz_in_bits));
635 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
636 parms->rx_num_flows_in_k * TF_KILOBYTE;
637 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
638 parms->rx_max_key_sz_in_bits / 8;
640 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
641 parms->rx_num_flows_in_k * TF_KILOBYTE;
642 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
643 parms->rx_max_key_sz_in_bits / 8;
645 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
646 parms->rx_num_flows_in_k * TF_KILOBYTE;
647 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
648 parms->rx_max_action_entry_sz_in_bits / 8;
650 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries = 0;
653 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
654 parms->tx_num_flows_in_k * TF_KILOBYTE;
655 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
656 parms->tx_max_key_sz_in_bits / 8;
658 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
659 parms->tx_num_flows_in_k * TF_KILOBYTE;
660 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
661 parms->tx_max_key_sz_in_bits / 8;
663 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
664 parms->tx_num_flows_in_k * TF_KILOBYTE;
665 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
666 parms->tx_max_action_entry_sz_in_bits / 8;
668 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries = 0;
673 /** insert EEM entry API
677 * TF_ERR - unable to get lock
679 * insert callback returns:
681 * TF_ERR_EM_DUP - key is already in table
684 tf_insert_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
685 struct tf_insert_em_entry_parms *parms)
692 struct cfa_p4_eem_64b_entry key_entry;
694 enum hcapi_cfa_em_table_type table_type;
696 struct hcapi_cfa_hwop op;
697 struct hcapi_cfa_key_tbl key_tbl;
698 struct hcapi_cfa_key_data key_obj;
699 struct hcapi_cfa_key_loc key_loc;
703 /* Get mask to use on hash */
704 mask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE].num_entries);
710 dump_raw((uint8_t *)parms->key, TF_HW_EM_KEY_MAX_SIZE + 4, "In Key");
713 big_hash = hcapi_cfa_key_hash((uint64_t *)parms->key,
714 (TF_HW_EM_KEY_MAX_SIZE + 4) * 8);
715 key0_hash = (uint32_t)(big_hash >> 32);
716 key1_hash = (uint32_t)(big_hash & 0xFFFFFFFF);
718 key0_index = key0_hash & mask;
719 key1_index = key1_hash & mask;
722 TFP_DRV_LOG(DEBUG, "Key0 hash:0x%08x\n", key0_hash);
723 TFP_DRV_LOG(DEBUG, "Key1 hash:0x%08x\n", key1_hash);
726 * Use the "result" arg to populate all of the key entry then
727 * store the byte swapped "raw" entry in a local copy ready
728 * for insertion in to the table.
730 tf_em_create_key_entry((struct cfa_p4_eem_entry_hdr *)parms->em_record,
731 ((uint8_t *)parms->key),
735 * Try to add to Key0 table, if that does not work then
736 * try the key1 table.
739 op.opcode = HCAPI_CFA_HWOPS_ADD;
741 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE];
742 key_tbl.page_size = TF_EM_PAGE_SIZE;
743 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
744 key_obj.data = (uint8_t *)&key_entry;
745 key_obj.size = TF_EM_KEY_RECORD_SIZE;
747 rc = hcapi_cfa_key_hw_op(&op,
753 table_type = TF_KEY0_TABLE;
758 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY1_TABLE];
759 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
761 rc = hcapi_cfa_key_hw_op(&op,
768 table_type = TF_KEY1_TABLE;
774 TF_SET_FLOW_ID(parms->flow_id,
776 TF_GFID_TABLE_EXTERNAL,
778 TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
789 /** delete EEM hash entry API
793 * -EINVAL - parameter error
794 * TF_NO_SESSION - bad session ID
795 * TF_ERR_TBL_SCOPE - invalid table scope
796 * TF_ERR_TBL_IF - invalid table interface
798 * insert callback returns
800 * TF_NO_EM_MATCH - entry not found
803 tf_delete_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
804 struct tf_delete_em_entry_parms *parms)
806 enum hcapi_cfa_em_table_type hash_type;
808 struct hcapi_cfa_hwop op;
809 struct hcapi_cfa_key_tbl key_tbl;
810 struct hcapi_cfa_key_data key_obj;
811 struct hcapi_cfa_key_loc key_loc;
814 TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);
815 TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);
817 op.opcode = HCAPI_CFA_HWOPS_DEL;
819 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables
820 [(hash_type == 0 ? TF_KEY0_TABLE : TF_KEY1_TABLE)];
821 key_tbl.page_size = TF_EM_PAGE_SIZE;
822 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
824 key_obj.size = TF_EM_KEY_RECORD_SIZE;
826 rc = hcapi_cfa_key_hw_op(&op,
837 /** insert EM hash entry API
844 tf_em_insert_ext_entry(struct tf *tfp __rte_unused,
845 struct tf_insert_em_entry_parms *parms)
847 struct tf_tbl_scope_cb *tbl_scope_cb;
849 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
850 if (tbl_scope_cb == NULL) {
851 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
855 return tf_insert_eem_entry
860 /** Delete EM hash entry API
867 tf_em_delete_ext_entry(struct tf *tfp __rte_unused,
868 struct tf_delete_em_entry_parms *parms)
870 struct tf_tbl_scope_cb *tbl_scope_cb;
872 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
873 if (tbl_scope_cb == NULL) {
874 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
878 return tf_delete_eem_entry(tbl_scope_cb, parms);
883 tf_em_ext_common_bind(struct tf *tfp,
884 struct tf_em_cfg_parms *parms)
888 struct tf_rm_create_db_parms db_cfg = { 0 };
889 uint8_t db_exists = 0;
891 TF_CHECK_PARMS2(tfp, parms);
895 "EM Ext DB already initialized\n");
899 db_cfg.type = TF_DEVICE_MODULE_TYPE_EM;
900 db_cfg.num_elements = parms->num_elements;
901 db_cfg.cfg = parms->cfg;
903 for (i = 0; i < TF_DIR_MAX; i++) {
905 db_cfg.alloc_cnt = parms->resources->em_cnt[i].cnt;
907 /* Check if we got any request to support EEM, if so
908 * we build an EM Ext DB holding Table Scopes.
910 if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_TBL_SCOPE] == 0)
913 db_cfg.rm_db = &eem_db[i];
914 rc = tf_rm_create_db(tfp, &db_cfg);
917 "%s: EM Ext DB creation failed\n",
928 mem_type = parms->mem_type;
934 tf_em_ext_common_unbind(struct tf *tfp)
938 struct tf_rm_free_db_parms fparms = { 0 };
940 TF_CHECK_PARMS1(tfp);
942 /* Bail if nothing has been initialized */
945 "No EM Ext DBs created\n");
949 for (i = 0; i < TF_DIR_MAX; i++) {
951 fparms.rm_db = eem_db[i];
952 rc = tf_rm_free_db(tfp, &fparms);
965 * Sets the specified external table type element.
967 * This API sets the specified element data
970 * Pointer to TF handle
973 * Pointer to table set parameters
976 * - (0) if successful.
977 * - (-EINVAL) on failure.
979 int tf_tbl_ext_common_set(struct tf *tfp,
980 struct tf_tbl_set_parms *parms)
983 struct tf_tbl_scope_cb *tbl_scope_cb;
984 uint32_t tbl_scope_id;
985 struct hcapi_cfa_hwop op;
986 struct hcapi_cfa_key_tbl key_tbl;
987 struct hcapi_cfa_key_data key_obj;
988 struct hcapi_cfa_key_loc key_loc;
990 TF_CHECK_PARMS2(tfp, parms);
992 if (parms->data == NULL) {
994 "%s, invalid parms->data\n",
995 tf_dir_2_str(parms->dir));
999 tbl_scope_id = parms->tbl_scope_id;
1001 if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
1003 "%s, Table scope not allocated\n",
1004 tf_dir_2_str(parms->dir));
1008 /* Get the table scope control block associated with the
1011 tbl_scope_cb = tbl_scope_cb_find(tbl_scope_id);
1013 if (tbl_scope_cb == NULL) {
1015 "%s, table scope error\n",
1016 tf_dir_2_str(parms->dir));
1020 op.opcode = HCAPI_CFA_HWOPS_PUT;
1022 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_RECORD_TABLE];
1023 key_tbl.page_size = TF_EM_PAGE_SIZE;
1024 key_obj.offset = parms->idx;
1025 key_obj.data = parms->data;
1026 key_obj.size = parms->data_sz_in_bytes;
1028 rc = hcapi_cfa_key_hw_op(&op,
1037 tf_em_ext_common_alloc(struct tf *tfp,
1038 struct tf_alloc_tbl_scope_parms *parms)
1040 return tf_em_ext_alloc(tfp, parms);
1044 tf_em_ext_common_free(struct tf *tfp,
1045 struct tf_free_tbl_scope_parms *parms)
1047 return tf_em_ext_free(tfp, parms);
1050 int tf_em_ext_map_tbl_scope(struct tf *tfp,
1051 struct tf_map_tbl_scope_parms *parms)
1054 struct tf_session *tfs;
1055 struct tf_tbl_scope_cb *tbl_scope_cb;
1056 struct tf_global_cfg_parms gcfg_parms = { 0 };
1057 struct tfp_calloc_parms aparms;
1058 uint32_t *data, *mask;
1059 uint32_t sz_in_bytes = 8;
1060 struct tf_dev_info *dev;
1062 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
1064 if (tbl_scope_cb == NULL) {
1065 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb tbl_scope_id(%d)\n",
1066 parms->tbl_scope_id);
1070 /* Retrieve the session information */
1071 rc = tf_session_get_session_internal(tfp, &tfs);
1075 /* Retrieve the device information */
1076 rc = tf_session_get_device(tfs, &dev);
1080 if (dev->ops->tf_dev_map_tbl_scope == NULL) {
1083 "Map table scope operation not supported, rc:%s\n",
1089 aparms.size = sizeof(uint32_t);
1090 aparms.alignment = 0;
1092 if (tfp_calloc(&aparms) != 0) {
1093 TFP_DRV_LOG(ERR, "Map tbl scope alloc data error %s\n",
1097 data = aparms.mem_va;
1099 if (tfp_calloc(&aparms) != 0) {
1100 TFP_DRV_LOG(ERR, "Map tbl scope alloc mask error %s\n",
1105 mask = aparms.mem_va;
1107 rc = dev->ops->tf_dev_map_parif(tfp, parms->parif_bitmask,
1109 (uint8_t *)data, (uint8_t *)mask,
1114 "Map table scope config failure, rc:%s\n",
1120 (enum tf_global_config_type)TF_GLOBAL_CFG_INTERNAL_PARIF_2_PF;
1121 gcfg_parms.offset = 0;
1122 gcfg_parms.config = (uint8_t *)data;
1123 gcfg_parms.config_mask = (uint8_t *)mask;
1124 gcfg_parms.config_sz_in_bytes = sizeof(uint64_t);
1127 rc = tf_msg_set_global_cfg(tfp, &gcfg_parms);
1130 "Map tbl scope, set failed, rc:%s\n",