1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
9 #include <rte_common.h>
10 #include <rte_errno.h>
15 #include "tf_common.h"
17 #include "tf_em_common.h"
20 #include "tf_device.h"
21 #include "tf_ext_flow_handle.h"
22 #include "hcapi_cfa.h"
26 /* Number of pointers per page_size */
27 #define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
30 * Init flag, set on bind and cleared on unbind
37 static enum tf_mem_type mem_type;
39 /* API defined in tf_em.h */
41 tf_create_tbl_pool_external(enum tf_dir dir,
42 struct tf_tbl_scope_cb *tbl_scope_cb,
44 uint32_t entry_sz_bytes)
46 struct tfp_calloc_parms parms;
50 struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
52 parms.nitems = num_entries;
53 parms.size = sizeof(uint32_t);
56 if (tfp_calloc(&parms) != 0) {
57 TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
58 tf_dir_2_str(dir), strerror(ENOMEM));
64 rc = stack_init(num_entries, parms.mem_va, pool);
67 TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
68 tf_dir_2_str(dir), strerror(-rc));
72 /* Save the malloced memory address so that it can
73 * be freed when the table scope is freed.
75 tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
77 /* Fill pool with indexes in reverse
79 j = (num_entries - 1) * entry_sz_bytes;
81 for (i = 0; i < num_entries; i++) {
82 rc = stack_push(pool, j);
84 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
85 tf_dir_2_str(dir), strerror(-rc));
90 TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
97 if (!stack_is_full(pool)) {
99 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
100 tf_dir_2_str(dir), strerror(-rc));
105 tfp_free((void *)parms.mem_va);
110 * Destroy External Tbl pool of memory indexes.
115 * pointer to the table scope
118 tf_destroy_tbl_pool_external(enum tf_dir dir,
119 struct tf_tbl_scope_cb *tbl_scope_cb)
121 uint32_t *ext_act_pool_mem =
122 tbl_scope_cb->ext_act_pool_mem[dir];
124 tfp_free(ext_act_pool_mem);
128 * Looks up table scope control block using tbl_scope_id from tf_session.
131 * Pointer to Truflow Handle
136 * - Pointer to the tf_tbl_scope_cb, if found.
137 * - (NULL) on failure, not found.
139 struct tf_tbl_scope_cb *
140 tf_em_ext_common_tbl_scope_find(struct tf *tfp,
141 uint32_t tbl_scope_id)
144 struct em_ext_db *ext_db;
145 void *ext_ptr = NULL;
146 struct tf_tbl_scope_cb *tbl_scope_cb = NULL;
147 struct ll_entry *entry;
149 rc = tf_session_get_em_ext_db(tfp, &ext_ptr);
153 ext_db = (struct em_ext_db *)ext_ptr;
155 for (entry = ext_db->tbl_scope_ll.head; entry != NULL;
156 entry = entry->next) {
157 tbl_scope_cb = (struct tf_tbl_scope_cb *)entry;
158 if (tbl_scope_cb->tbl_scope_id == tbl_scope_id)
166 * Allocate External Tbl entry from the scope pool.
169 * Pointer to Truflow Handle
171 * Allocation parameters
174 * 0 - Success, entry allocated - no search support
175 * -ENOMEM -EINVAL -EOPNOTSUPP
176 * - Failure, entry not allocated, out of resources
179 tf_tbl_ext_alloc(struct tf *tfp,
180 struct tf_tbl_alloc_parms *parms)
184 struct tf_tbl_scope_cb *tbl_scope_cb;
187 TF_CHECK_PARMS2(tfp, parms);
189 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
190 if (tbl_scope_cb == NULL) {
192 "%s, table scope not allocated\n",
193 tf_dir_2_str(parms->dir));
197 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
199 /* Allocate an element
201 rc = stack_pop(pool, &index);
205 "%s, Allocation failed, type:%d\n",
206 tf_dir_2_str(parms->dir),
216 * Free External Tbl entry to the scope pool.
219 * Pointer to Truflow Handle
221 * Allocation parameters
224 * 0 - Success, entry freed
226 * - Failure, entry not successfully freed for these reasons
232 tf_tbl_ext_free(struct tf *tfp,
233 struct tf_tbl_free_parms *parms)
237 struct tf_tbl_scope_cb *tbl_scope_cb;
240 TF_CHECK_PARMS2(tfp, parms);
242 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
243 if (tbl_scope_cb == NULL) {
245 "%s, table scope error\n",
246 tf_dir_2_str(parms->dir));
249 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
253 rc = stack_push(pool, index);
257 "%s, consistency error, stack full, type:%d, idx:%d\n",
258 tf_dir_2_str(parms->dir),
266 tf_em_get_key_mask(int num_entries)
268 uint32_t mask = num_entries - 1;
270 if (num_entries & TF_EM_MAX_MASK)
273 if (num_entries > TF_EM_MAX_ENTRY)
280 tf_em_create_key_entry(struct cfa_p4_eem_entry_hdr *result,
282 struct cfa_p4_eem_64b_entry *key_entry)
284 key_entry->hdr.word1 = result->word1;
285 key_entry->hdr.pointer = result->pointer;
286 memcpy(key_entry->key, in_key, TF_P4_HW_EM_KEY_MAX_SIZE + 4);
291 * Return the number of page table pages needed to
292 * reference the given number of next level pages.
298 * Size of each EM page
301 * Number of EM page table pages
304 tf_em_page_tbl_pgcnt(uint32_t num_pages,
307 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
308 MAX_PAGE_PTRS(page_size);
312 * Given the number of data pages, page_size and the maximum
313 * number of page table levels (already determined), size
314 * the number of page table pages required at each level.
317 * Max number of levels
319 * [in] num_data_pages
320 * Number of EM data pages
329 tf_em_size_page_tbls(int max_lvl,
330 uint64_t num_data_pages,
334 if (max_lvl == TF_PT_LVL_0) {
335 page_cnt[TF_PT_LVL_0] = num_data_pages;
336 } else if (max_lvl == TF_PT_LVL_1) {
337 page_cnt[TF_PT_LVL_1] = num_data_pages;
338 page_cnt[TF_PT_LVL_0] =
339 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
340 } else if (max_lvl == TF_PT_LVL_2) {
341 page_cnt[TF_PT_LVL_2] = num_data_pages;
342 page_cnt[TF_PT_LVL_1] =
343 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
344 page_cnt[TF_PT_LVL_0] =
345 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
352 * Given the page size, size of each data item (entry size),
353 * and the total number of entries needed, determine the number
354 * of page table levels and the number of data pages required.
363 * Number of entries needed
365 * [out] num_data_pages
366 * Number of pages required
369 * Success - Number of EM page levels required
370 * -ENOMEM - Out of memory
373 tf_em_size_page_tbl_lvl(uint32_t page_size,
375 uint32_t num_entries,
376 uint64_t *num_data_pages)
378 uint64_t lvl_data_size = page_size;
379 int lvl = TF_PT_LVL_0;
383 data_size = (uint64_t)num_entries * entry_size;
385 while (lvl_data_size < data_size) {
388 if (lvl == TF_PT_LVL_1)
389 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
391 else if (lvl == TF_PT_LVL_2)
392 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
393 MAX_PAGE_PTRS(page_size) * page_size;
398 *num_data_pages = roundup(data_size, page_size) / page_size;
404 * Size the EM table based on capabilities
411 * - EINVAL - Parameter error
412 * - ENOMEM - Out of memory
415 tf_em_size_table(struct hcapi_cfa_em_table *tbl,
418 uint64_t num_data_pages;
421 uint32_t num_entries;
422 uint32_t cnt = TF_EM_MIN_ENTRIES;
424 /* Ignore entry if both size and number are zero */
425 if (!tbl->entry_size && !tbl->num_entries)
428 /* If only one is set then error */
429 if (!tbl->entry_size || !tbl->num_entries)
432 /* Determine number of page table levels and the number
433 * of data pages needed to process the given eem table.
435 if (tbl->type == TF_RECORD_TABLE) {
437 * For action records just a memory size is provided. Work
438 * backwards to resolve to number of entries
440 num_entries = tbl->num_entries / tbl->entry_size;
441 if (num_entries < TF_EM_MIN_ENTRIES) {
442 num_entries = TF_EM_MIN_ENTRIES;
444 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
449 num_entries = tbl->num_entries;
452 max_lvl = tf_em_size_page_tbl_lvl(page_size,
457 TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
459 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
460 tbl->type, (uint64_t)num_entries * tbl->entry_size,
465 tbl->num_lvl = max_lvl + 1;
466 tbl->num_data_pages = num_data_pages;
468 /* Determine the number of pages needed at each level */
469 page_cnt = tbl->page_cnt;
470 memset(page_cnt, 0, sizeof(tbl->page_cnt));
471 tf_em_size_page_tbls(max_lvl, num_data_pages, page_size,
474 TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
476 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 \
477 " l0: %u l1: %u l2: %u\n",
479 (uint64_t)num_data_pages * page_size,
481 page_cnt[TF_PT_LVL_0],
482 page_cnt[TF_PT_LVL_1],
483 page_cnt[TF_PT_LVL_2]);
489 * Validates EM number of entries requested
492 * Pointer to table scope control block to be populated
495 * Pointer to input parameters
499 * -EINVAL - Parameter error
502 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
503 struct tf_alloc_tbl_scope_parms *parms)
507 if (parms->rx_mem_size_in_mb != 0) {
508 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
509 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
511 uint32_t num_entries = (parms->rx_mem_size_in_mb *
512 TF_MEGABYTE) / (key_b + action_b);
514 if (num_entries < TF_EM_MIN_ENTRIES) {
515 TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
517 parms->rx_mem_size_in_mb);
521 cnt = TF_EM_MIN_ENTRIES;
522 while (num_entries > cnt &&
523 cnt <= TF_EM_MAX_ENTRIES)
526 if (cnt > TF_EM_MAX_ENTRIES) {
527 TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
529 (parms->tx_num_flows_in_k * TF_KILOBYTE));
533 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
535 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
537 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
538 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
540 "EEM: Invalid number of Rx flows "
541 "requested:%u max:%u\n",
542 parms->rx_num_flows_in_k * TF_KILOBYTE,
543 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
547 /* must be a power-of-2 supported value
548 * in the range 32K - 128M
550 cnt = TF_EM_MIN_ENTRIES;
551 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
552 cnt <= TF_EM_MAX_ENTRIES)
555 if (cnt > TF_EM_MAX_ENTRIES) {
557 "EEM: Invalid number of Rx requested: %u\n",
558 (parms->rx_num_flows_in_k * TF_KILOBYTE));
563 if (parms->tx_mem_size_in_mb != 0) {
564 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
565 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
567 uint32_t num_entries = (parms->tx_mem_size_in_mb *
568 (TF_KILOBYTE * TF_KILOBYTE)) /
571 if (num_entries < TF_EM_MIN_ENTRIES) {
573 "EEM: Insufficient memory requested:%uMB\n",
574 parms->rx_mem_size_in_mb);
578 cnt = TF_EM_MIN_ENTRIES;
579 while (num_entries > cnt &&
580 cnt <= TF_EM_MAX_ENTRIES)
583 if (cnt > TF_EM_MAX_ENTRIES) {
585 "EEM: Invalid number of Tx requested: %u\n",
586 (parms->tx_num_flows_in_k * TF_KILOBYTE));
590 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
592 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
594 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
595 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
597 "EEM: Invalid number of Tx flows "
598 "requested:%u max:%u\n",
599 (parms->tx_num_flows_in_k * TF_KILOBYTE),
600 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
604 cnt = TF_EM_MIN_ENTRIES;
605 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
606 cnt <= TF_EM_MAX_ENTRIES)
609 if (cnt > TF_EM_MAX_ENTRIES) {
611 "EEM: Invalid number of Tx requested: %u\n",
612 (parms->tx_num_flows_in_k * TF_KILOBYTE));
617 if (parms->rx_num_flows_in_k != 0 &&
618 parms->rx_max_key_sz_in_bits / 8 == 0) {
620 "EEM: Rx key size required: %u\n",
621 (parms->rx_max_key_sz_in_bits));
625 if (parms->tx_num_flows_in_k != 0 &&
626 parms->tx_max_key_sz_in_bits / 8 == 0) {
628 "EEM: Tx key size required: %u\n",
629 (parms->tx_max_key_sz_in_bits));
633 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
634 parms->rx_num_flows_in_k * TF_KILOBYTE;
635 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
636 parms->rx_max_key_sz_in_bits / 8;
638 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
639 parms->rx_num_flows_in_k * TF_KILOBYTE;
640 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
641 parms->rx_max_key_sz_in_bits / 8;
643 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
644 parms->rx_num_flows_in_k * TF_KILOBYTE;
645 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
646 parms->rx_max_action_entry_sz_in_bits / 8;
648 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries =
651 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_ACTION_TABLE].num_entries =
652 parms->rx_num_flows_in_k * TF_KILOBYTE;
653 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_ACTION_TABLE].entry_size =
654 parms->rx_max_action_entry_sz_in_bits / 8;
656 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EM_LKUP_TABLE].num_entries =
657 parms->rx_num_flows_in_k * TF_KILOBYTE;
658 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EM_LKUP_TABLE].entry_size =
659 parms->rx_max_key_sz_in_bits / 8;
662 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
663 parms->tx_num_flows_in_k * TF_KILOBYTE;
664 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
665 parms->tx_max_key_sz_in_bits / 8;
667 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
668 parms->tx_num_flows_in_k * TF_KILOBYTE;
669 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
670 parms->tx_max_key_sz_in_bits / 8;
672 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
673 parms->tx_num_flows_in_k * TF_KILOBYTE;
674 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
675 parms->tx_max_action_entry_sz_in_bits / 8;
677 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries =
680 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_ACTION_TABLE].num_entries =
681 parms->rx_num_flows_in_k * TF_KILOBYTE;
682 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_ACTION_TABLE].entry_size =
683 parms->tx_max_action_entry_sz_in_bits / 8;
685 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EM_LKUP_TABLE].num_entries =
686 parms->rx_num_flows_in_k * TF_KILOBYTE;
687 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EM_LKUP_TABLE].entry_size =
688 parms->tx_max_key_sz_in_bits / 8;
693 /** insert EEM entry API
697 * TF_ERR - unable to get lock
699 * insert callback returns:
701 * TF_ERR_EM_DUP - key is already in table
704 tf_insert_eem_entry(struct tf_dev_info *dev,
705 struct tf_tbl_scope_cb *tbl_scope_cb,
706 struct tf_insert_em_entry_parms *parms)
713 struct cfa_p4_eem_64b_entry key_entry;
715 enum hcapi_cfa_em_table_type table_type;
717 struct hcapi_cfa_hwop op;
718 struct hcapi_cfa_key_tbl key_tbl;
719 struct hcapi_cfa_key_data key_obj;
720 struct hcapi_cfa_key_loc key_loc;
724 /* Get mask to use on hash */
725 mask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE].num_entries);
731 dump_raw((uint8_t *)parms->key, TF_P4_HW_EM_KEY_MAX_SIZE + 4, "In Key");
734 if (dev->ops->tf_dev_cfa_key_hash == NULL)
737 big_hash = dev->ops->tf_dev_cfa_key_hash((uint64_t *)parms->key,
738 (TF_P4_HW_EM_KEY_MAX_SIZE + 4) * 8);
739 key0_hash = (uint32_t)(big_hash >> 32);
740 key1_hash = (uint32_t)(big_hash & 0xFFFFFFFF);
742 key0_index = key0_hash & mask;
743 key1_index = key1_hash & mask;
746 TFP_DRV_LOG(DEBUG, "Key0 hash:0x%08x\n", key0_hash);
747 TFP_DRV_LOG(DEBUG, "Key1 hash:0x%08x\n", key1_hash);
750 * Use the "result" arg to populate all of the key entry then
751 * store the byte swapped "raw" entry in a local copy ready
752 * for insertion in to the table.
754 tf_em_create_key_entry((struct cfa_p4_eem_entry_hdr *)parms->em_record,
755 ((uint8_t *)parms->key),
759 * Try to add to Key0 table, if that does not work then
760 * try the key1 table.
763 op.opcode = HCAPI_CFA_HWOPS_ADD;
765 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE];
766 key_tbl.page_size = TF_EM_PAGE_SIZE;
767 key_obj.offset = index * TF_P4_EM_KEY_RECORD_SIZE;
768 key_obj.data = (uint8_t *)&key_entry;
769 key_obj.size = TF_P4_EM_KEY_RECORD_SIZE;
771 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
777 table_type = TF_KEY0_TABLE;
782 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY1_TABLE];
783 key_obj.offset = index * TF_P4_EM_KEY_RECORD_SIZE;
785 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
792 table_type = TF_KEY1_TABLE;
798 TF_SET_FLOW_ID(parms->flow_id,
800 TF_GFID_TABLE_EXTERNAL,
802 TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
813 /** delete EEM hash entry API
817 * -EINVAL - parameter error
818 * TF_NO_SESSION - bad session ID
819 * TF_ERR_TBL_SCOPE - invalid table scope
820 * TF_ERR_TBL_IF - invalid table interface
822 * insert callback returns
824 * TF_NO_EM_MATCH - entry not found
827 tf_delete_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
828 struct tf_delete_em_entry_parms *parms)
830 enum hcapi_cfa_em_table_type hash_type;
832 struct hcapi_cfa_hwop op;
833 struct hcapi_cfa_key_tbl key_tbl;
834 struct hcapi_cfa_key_data key_obj;
835 struct hcapi_cfa_key_loc key_loc;
838 TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);
839 TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);
841 op.opcode = HCAPI_CFA_HWOPS_DEL;
843 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables
844 [(hash_type == 0 ? TF_KEY0_TABLE : TF_KEY1_TABLE)];
845 key_tbl.page_size = TF_EM_PAGE_SIZE;
846 key_obj.offset = index * TF_P4_EM_KEY_RECORD_SIZE;
848 key_obj.size = TF_P4_EM_KEY_RECORD_SIZE;
850 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
861 /** insert EM hash entry API
868 tf_em_insert_ext_entry(struct tf *tfp,
869 struct tf_insert_em_entry_parms *parms)
872 struct tf_tbl_scope_cb *tbl_scope_cb;
873 struct tf_session *tfs;
874 struct tf_dev_info *dev;
876 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
877 if (tbl_scope_cb == NULL) {
878 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
882 /* Retrieve the session information */
883 rc = tf_session_get_session_internal(tfp, &tfs);
887 /* Retrieve the device information */
888 rc = tf_session_get_device(tfs, &dev);
892 return tf_insert_eem_entry
898 /** Delete EM hash entry API
905 tf_em_delete_ext_entry(struct tf *tfp,
906 struct tf_delete_em_entry_parms *parms)
908 struct tf_tbl_scope_cb *tbl_scope_cb;
910 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
911 if (tbl_scope_cb == NULL) {
912 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
916 return tf_delete_eem_entry(tbl_scope_cb, parms);
921 tf_em_ext_common_bind(struct tf *tfp,
922 struct tf_em_cfg_parms *parms)
926 struct tf_rm_create_db_parms db_cfg = { 0 };
927 uint8_t db_exists = 0;
928 struct em_ext_db *ext_db;
929 struct tfp_calloc_parms cparms;
931 TF_CHECK_PARMS2(tfp, parms);
935 "EM Ext DB already initialized\n");
940 cparms.size = sizeof(struct em_ext_db);
941 cparms.alignment = 0;
942 if (tfp_calloc(&cparms) != 0) {
943 TFP_DRV_LOG(ERR, "em_ext_db alloc error %s\n",
948 ext_db = cparms.mem_va;
949 ll_init(&ext_db->tbl_scope_ll);
950 for (i = 0; i < TF_DIR_MAX; i++)
951 ext_db->eem_db[i] = NULL;
952 tf_session_set_em_ext_db(tfp, ext_db);
954 db_cfg.module = TF_MODULE_TYPE_EM;
955 db_cfg.num_elements = parms->num_elements;
956 db_cfg.cfg = parms->cfg;
958 for (i = 0; i < TF_DIR_MAX; i++) {
960 db_cfg.alloc_cnt = parms->resources->em_cnt[i].cnt;
962 /* Check if we got any request to support EEM, if so
963 * we build an EM Ext DB holding Table Scopes.
965 if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_TBL_SCOPE] == 0)
968 db_cfg.rm_db = (void *)&ext_db->eem_db[i];
969 rc = tf_rm_create_db(tfp, &db_cfg);
972 "%s: EM Ext DB creation failed\n",
983 mem_type = parms->mem_type;
989 tf_em_ext_common_unbind(struct tf *tfp)
993 struct tf_rm_free_db_parms fparms = { 0 };
994 struct em_ext_db *ext_db = NULL;
995 struct tf_session *tfs = NULL;
996 struct tf_dev_info *dev;
997 struct ll_entry *entry;
998 struct tf_tbl_scope_cb *tbl_scope_cb = NULL;
999 void *ext_ptr = NULL;
1000 struct tf_free_tbl_scope_parms tparms = { 0 };
1002 TF_CHECK_PARMS1(tfp);
1004 /* Bail if nothing has been initialized */
1007 "No EM Ext DBs created\n");
1011 rc = tf_session_get_session_internal(tfp, &tfs);
1013 TFP_DRV_LOG(ERR, "Failed to get tf_session, rc:%s\n",
1018 /* Retrieve the device information */
1019 rc = tf_session_get_device(tfs, &dev);
1022 "Failed to lookup device, rc:%s\n",
1027 rc = tf_session_get_em_ext_db(tfp, &ext_ptr);
1030 "Failed to get em_ext_db from session, rc:%s\n",
1034 ext_db = (struct em_ext_db *)ext_ptr;
1036 entry = ext_db->tbl_scope_ll.head;
1037 while (entry != NULL) {
1038 tbl_scope_cb = (struct tf_tbl_scope_cb *)entry;
1039 entry = entry->next;
1040 tparms.tbl_scope_id = tbl_scope_cb->tbl_scope_id;
1042 if (dev->ops->tf_dev_free_tbl_scope) {
1043 dev->ops->tf_dev_free_tbl_scope(tfp, &tparms);
1045 /* should not reach here */
1046 ll_delete(&ext_db->tbl_scope_ll, &tbl_scope_cb->ll_entry);
1047 tfp_free(tbl_scope_cb);
1051 for (i = 0; i < TF_DIR_MAX; i++) {
1052 if (ext_db->eem_db[i] == NULL)
1056 fparms.rm_db = ext_db->eem_db[i];
1057 rc = tf_rm_free_db(tfp, &fparms);
1061 ext_db->eem_db[i] = NULL;
1065 tf_session_set_em_ext_db(tfp, NULL);
1073 * Sets the specified external table type element.
1075 * This API sets the specified element data
1078 * Pointer to TF handle
1081 * Pointer to table set parameters
1084 * - (0) if successful.
1085 * - (-EINVAL) on failure.
1087 int tf_tbl_ext_common_set(struct tf *tfp,
1088 struct tf_tbl_set_parms *parms)
1091 struct tf_tbl_scope_cb *tbl_scope_cb;
1092 uint32_t tbl_scope_id;
1093 struct hcapi_cfa_hwop op;
1094 struct hcapi_cfa_key_tbl key_tbl;
1095 struct hcapi_cfa_key_data key_obj;
1096 struct hcapi_cfa_key_loc key_loc;
1098 TF_CHECK_PARMS2(tfp, parms);
1100 if (parms->data == NULL) {
1102 "%s, invalid parms->data\n",
1103 tf_dir_2_str(parms->dir));
1107 tbl_scope_id = parms->tbl_scope_id;
1109 if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
1111 "%s, Table scope not allocated\n",
1112 tf_dir_2_str(parms->dir));
1116 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, tbl_scope_id);
1117 if (tbl_scope_cb == NULL) {
1119 "%s, table scope error\n",
1120 tf_dir_2_str(parms->dir));
1124 op.opcode = HCAPI_CFA_HWOPS_PUT;
1126 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_RECORD_TABLE];
1127 key_tbl.page_size = TF_EM_PAGE_SIZE;
1128 key_obj.offset = parms->idx;
1129 key_obj.data = parms->data;
1130 key_obj.size = parms->data_sz_in_bytes;
1132 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
1141 tf_em_ext_common_alloc(struct tf *tfp,
1142 struct tf_alloc_tbl_scope_parms *parms)
1144 return tf_em_ext_alloc(tfp, parms);
1148 tf_em_ext_common_free(struct tf *tfp,
1149 struct tf_free_tbl_scope_parms *parms)
1151 return tf_em_ext_free(tfp, parms);
1154 int tf_em_ext_map_tbl_scope(struct tf *tfp,
1155 struct tf_map_tbl_scope_parms *parms)
1158 struct tf_session *tfs;
1159 struct tf_tbl_scope_cb *tbl_scope_cb;
1160 struct tf_global_cfg_parms gcfg_parms = { 0 };
1161 struct tfp_calloc_parms aparms;
1162 uint32_t *data, *mask;
1163 uint32_t sz_in_bytes = 8;
1164 struct tf_dev_info *dev;
1166 /* Retrieve the session information */
1167 rc = tf_session_get_session_internal(tfp, &tfs);
1171 /* Retrieve the device information */
1172 rc = tf_session_get_device(tfs, &dev);
1176 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
1177 if (tbl_scope_cb == NULL) {
1178 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb tbl_scope_id(%d)\n",
1179 parms->tbl_scope_id);
1183 if (dev->ops->tf_dev_map_tbl_scope == NULL) {
1186 "Map table scope operation not supported, rc:%s\n",
1192 aparms.size = sizeof(uint32_t);
1193 aparms.alignment = 0;
1195 if (tfp_calloc(&aparms) != 0) {
1196 TFP_DRV_LOG(ERR, "Map tbl scope alloc data error %s\n",
1200 data = aparms.mem_va;
1202 if (tfp_calloc(&aparms) != 0) {
1203 TFP_DRV_LOG(ERR, "Map tbl scope alloc mask error %s\n",
1208 mask = aparms.mem_va;
1210 rc = dev->ops->tf_dev_map_parif(tfp, parms->parif_bitmask,
1212 (uint8_t *)data, (uint8_t *)mask,
1217 "Map table scope config failure, rc:%s\n",
1222 /* Note that TF_GLOBAL_CFG_INTERNAL_PARIF_2_PF is same as below enum */
1223 gcfg_parms.type = TF_GLOBAL_CFG_TYPE_MAX;
1224 gcfg_parms.offset = 0;
1225 gcfg_parms.config = (uint8_t *)data;
1226 gcfg_parms.config_mask = (uint8_t *)mask;
1227 gcfg_parms.config_sz_in_bytes = sizeof(uint64_t);
1230 rc = tf_msg_set_global_cfg(tfp, &gcfg_parms);
1233 "Map tbl scope, set failed, rc:%s\n",