1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
9 #include <rte_common.h>
10 #include <rte_errno.h>
15 #include "tf_common.h"
17 #include "tf_em_common.h"
20 #include "tf_device.h"
21 #include "tf_ext_flow_handle.h"
22 #include "hcapi_cfa.h"
26 /* Number of pointers per page_size */
27 #define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
32 static enum tf_mem_type mem_type;
34 /* API defined in tf_em.h */
36 tf_create_tbl_pool_external(enum tf_dir dir,
37 struct tf_tbl_scope_cb *tbl_scope_cb,
39 uint32_t entry_sz_bytes)
41 struct tfp_calloc_parms parms;
45 struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
47 parms.nitems = num_entries;
48 parms.size = sizeof(uint32_t);
51 if (tfp_calloc(&parms) != 0) {
52 TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
53 tf_dir_2_str(dir), strerror(ENOMEM));
59 rc = stack_init(num_entries, parms.mem_va, pool);
62 TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
63 tf_dir_2_str(dir), strerror(-rc));
67 /* Save the malloced memory address so that it can
68 * be freed when the table scope is freed.
70 tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
72 /* Fill pool with indexes in reverse
74 j = (num_entries - 1) * entry_sz_bytes;
76 for (i = 0; i < num_entries; i++) {
77 rc = stack_push(pool, j);
79 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
80 tf_dir_2_str(dir), strerror(-rc));
85 TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
92 if (!stack_is_full(pool)) {
94 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
95 tf_dir_2_str(dir), strerror(-rc));
100 tfp_free((void *)parms.mem_va);
105 * Destroy External Tbl pool of memory indexes.
110 * pointer to the table scope
113 tf_destroy_tbl_pool_external(enum tf_dir dir,
114 struct tf_tbl_scope_cb *tbl_scope_cb)
116 uint32_t *ext_act_pool_mem =
117 tbl_scope_cb->ext_act_pool_mem[dir];
119 tfp_free(ext_act_pool_mem);
123 * Looks up table scope control block using tbl_scope_id from tf_session.
126 * Pointer to Truflow Handle
131 * - Pointer to the tf_tbl_scope_cb, if found.
132 * - (NULL) on failure, not found.
134 struct tf_tbl_scope_cb *
135 tf_em_ext_common_tbl_scope_find(struct tf *tfp,
136 uint32_t tbl_scope_id)
139 struct em_ext_db *ext_db;
140 void *ext_ptr = NULL;
141 struct tf_tbl_scope_cb *tbl_scope_cb = NULL;
142 struct ll_entry *entry;
144 rc = tf_session_get_em_ext_db(tfp, &ext_ptr);
148 ext_db = (struct em_ext_db *)ext_ptr;
150 for (entry = ext_db->tbl_scope_ll.head; entry != NULL;
151 entry = entry->next) {
152 tbl_scope_cb = (struct tf_tbl_scope_cb *)entry;
153 if (tbl_scope_cb->tbl_scope_id == tbl_scope_id)
161 * Allocate External Tbl entry from the scope pool.
164 * Pointer to Truflow Handle
166 * Allocation parameters
169 * 0 - Success, entry allocated - no search support
170 * -ENOMEM -EINVAL -EOPNOTSUPP
171 * - Failure, entry not allocated, out of resources
174 tf_tbl_ext_alloc(struct tf *tfp,
175 struct tf_tbl_alloc_parms *parms)
179 struct tf_tbl_scope_cb *tbl_scope_cb;
182 TF_CHECK_PARMS2(tfp, parms);
184 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
185 if (tbl_scope_cb == NULL) {
187 "%s, table scope not allocated\n",
188 tf_dir_2_str(parms->dir));
192 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
194 /* Allocate an element
196 rc = stack_pop(pool, &index);
200 "%s, Allocation failed, type:%d\n",
201 tf_dir_2_str(parms->dir),
211 * Free External Tbl entry to the scope pool.
214 * Pointer to Truflow Handle
216 * Allocation parameters
219 * 0 - Success, entry freed
221 * - Failure, entry not successfully freed for these reasons
227 tf_tbl_ext_free(struct tf *tfp,
228 struct tf_tbl_free_parms *parms)
232 struct tf_tbl_scope_cb *tbl_scope_cb;
235 TF_CHECK_PARMS2(tfp, parms);
237 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
238 if (tbl_scope_cb == NULL) {
240 "%s, table scope error\n",
241 tf_dir_2_str(parms->dir));
244 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
248 rc = stack_push(pool, index);
252 "%s, consistency error, stack full, type:%d, idx:%d\n",
253 tf_dir_2_str(parms->dir),
261 tf_em_get_key_mask(int num_entries)
263 uint32_t mask = num_entries - 1;
265 if (num_entries & TF_EM_MAX_MASK)
268 if (num_entries > TF_EM_MAX_ENTRY)
275 tf_em_create_key_entry(struct cfa_p4_eem_entry_hdr *result,
277 struct cfa_p4_eem_64b_entry *key_entry)
279 key_entry->hdr.word1 = result->word1;
280 key_entry->hdr.pointer = result->pointer;
281 memcpy(key_entry->key, in_key, TF_P4_HW_EM_KEY_MAX_SIZE + 4);
286 * Return the number of page table pages needed to
287 * reference the given number of next level pages.
293 * Size of each EM page
296 * Number of EM page table pages
299 tf_em_page_tbl_pgcnt(uint32_t num_pages,
302 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
303 MAX_PAGE_PTRS(page_size);
307 * Given the number of data pages, page_size and the maximum
308 * number of page table levels (already determined), size
309 * the number of page table pages required at each level.
312 * Max number of levels
314 * [in] num_data_pages
315 * Number of EM data pages
324 tf_em_size_page_tbls(int max_lvl,
325 uint64_t num_data_pages,
329 if (max_lvl == TF_PT_LVL_0) {
330 page_cnt[TF_PT_LVL_0] = num_data_pages;
331 } else if (max_lvl == TF_PT_LVL_1) {
332 page_cnt[TF_PT_LVL_1] = num_data_pages;
333 page_cnt[TF_PT_LVL_0] =
334 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
335 } else if (max_lvl == TF_PT_LVL_2) {
336 page_cnt[TF_PT_LVL_2] = num_data_pages;
337 page_cnt[TF_PT_LVL_1] =
338 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
339 page_cnt[TF_PT_LVL_0] =
340 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
347 * Given the page size, size of each data item (entry size),
348 * and the total number of entries needed, determine the number
349 * of page table levels and the number of data pages required.
358 * Number of entries needed
360 * [out] num_data_pages
361 * Number of pages required
364 * Success - Number of EM page levels required
365 * -ENOMEM - Out of memory
368 tf_em_size_page_tbl_lvl(uint32_t page_size,
370 uint32_t num_entries,
371 uint64_t *num_data_pages)
373 uint64_t lvl_data_size = page_size;
374 int lvl = TF_PT_LVL_0;
378 data_size = (uint64_t)num_entries * entry_size;
380 while (lvl_data_size < data_size) {
383 if (lvl == TF_PT_LVL_1)
384 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
386 else if (lvl == TF_PT_LVL_2)
387 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
388 MAX_PAGE_PTRS(page_size) * page_size;
393 *num_data_pages = roundup(data_size, page_size) / page_size;
399 * Size the EM table based on capabilities
406 * - EINVAL - Parameter error
407 * - ENOMEM - Out of memory
410 tf_em_size_table(struct hcapi_cfa_em_table *tbl,
413 uint64_t num_data_pages;
416 uint32_t num_entries;
417 uint32_t cnt = TF_EM_MIN_ENTRIES;
419 /* Ignore entry if both size and number are zero */
420 if (!tbl->entry_size && !tbl->num_entries)
423 /* If only one is set then error */
424 if (!tbl->entry_size || !tbl->num_entries)
427 /* Determine number of page table levels and the number
428 * of data pages needed to process the given eem table.
430 if (tbl->type == TF_RECORD_TABLE) {
432 * For action records just a memory size is provided. Work
433 * backwards to resolve to number of entries
435 num_entries = tbl->num_entries / tbl->entry_size;
436 if (num_entries < TF_EM_MIN_ENTRIES) {
437 num_entries = TF_EM_MIN_ENTRIES;
439 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
444 num_entries = tbl->num_entries;
447 max_lvl = tf_em_size_page_tbl_lvl(page_size,
452 TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
454 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
455 tbl->type, (uint64_t)num_entries * tbl->entry_size,
460 tbl->num_lvl = max_lvl + 1;
461 tbl->num_data_pages = num_data_pages;
463 /* Determine the number of pages needed at each level */
464 page_cnt = tbl->page_cnt;
465 memset(page_cnt, 0, sizeof(tbl->page_cnt));
466 tf_em_size_page_tbls(max_lvl, num_data_pages, page_size,
469 TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
471 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 \
472 " l0: %u l1: %u l2: %u\n",
474 (uint64_t)num_data_pages * page_size,
476 page_cnt[TF_PT_LVL_0],
477 page_cnt[TF_PT_LVL_1],
478 page_cnt[TF_PT_LVL_2]);
484 * Validates EM number of entries requested
487 * Pointer to table scope control block to be populated
490 * Pointer to input parameters
494 * -EINVAL - Parameter error
497 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
498 struct tf_alloc_tbl_scope_parms *parms)
502 if (parms->rx_mem_size_in_mb != 0) {
503 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
504 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
506 uint32_t num_entries = (parms->rx_mem_size_in_mb *
507 TF_MEGABYTE) / (key_b + action_b);
509 if (num_entries < TF_EM_MIN_ENTRIES) {
510 TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
512 parms->rx_mem_size_in_mb);
516 cnt = TF_EM_MIN_ENTRIES;
517 while (num_entries > cnt &&
518 cnt <= TF_EM_MAX_ENTRIES)
521 if (cnt > TF_EM_MAX_ENTRIES) {
522 TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
524 (parms->tx_num_flows_in_k * TF_KILOBYTE));
528 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
530 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
532 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
533 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
535 "EEM: Invalid number of Rx flows "
536 "requested:%u max:%u\n",
537 parms->rx_num_flows_in_k * TF_KILOBYTE,
538 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
542 /* must be a power-of-2 supported value
543 * in the range 32K - 128M
545 cnt = TF_EM_MIN_ENTRIES;
546 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
547 cnt <= TF_EM_MAX_ENTRIES)
550 if (cnt > TF_EM_MAX_ENTRIES) {
552 "EEM: Invalid number of Rx requested: %u\n",
553 (parms->rx_num_flows_in_k * TF_KILOBYTE));
558 if (parms->tx_mem_size_in_mb != 0) {
559 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
560 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
562 uint32_t num_entries = (parms->tx_mem_size_in_mb *
563 (TF_KILOBYTE * TF_KILOBYTE)) /
566 if (num_entries < TF_EM_MIN_ENTRIES) {
568 "EEM: Insufficient memory requested:%uMB\n",
569 parms->rx_mem_size_in_mb);
573 cnt = TF_EM_MIN_ENTRIES;
574 while (num_entries > cnt &&
575 cnt <= TF_EM_MAX_ENTRIES)
578 if (cnt > TF_EM_MAX_ENTRIES) {
580 "EEM: Invalid number of Tx requested: %u\n",
581 (parms->tx_num_flows_in_k * TF_KILOBYTE));
585 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
587 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
589 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
590 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
592 "EEM: Invalid number of Tx flows "
593 "requested:%u max:%u\n",
594 (parms->tx_num_flows_in_k * TF_KILOBYTE),
595 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
599 cnt = TF_EM_MIN_ENTRIES;
600 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
601 cnt <= TF_EM_MAX_ENTRIES)
604 if (cnt > TF_EM_MAX_ENTRIES) {
606 "EEM: Invalid number of Tx requested: %u\n",
607 (parms->tx_num_flows_in_k * TF_KILOBYTE));
612 if (parms->rx_num_flows_in_k != 0 &&
613 parms->rx_max_key_sz_in_bits / 8 == 0) {
615 "EEM: Rx key size required: %u\n",
616 (parms->rx_max_key_sz_in_bits));
620 if (parms->tx_num_flows_in_k != 0 &&
621 parms->tx_max_key_sz_in_bits / 8 == 0) {
623 "EEM: Tx key size required: %u\n",
624 (parms->tx_max_key_sz_in_bits));
628 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
629 parms->rx_num_flows_in_k * TF_KILOBYTE;
630 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
631 parms->rx_max_key_sz_in_bits / 8;
633 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
634 parms->rx_num_flows_in_k * TF_KILOBYTE;
635 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
636 parms->rx_max_key_sz_in_bits / 8;
638 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
639 parms->rx_num_flows_in_k * TF_KILOBYTE;
640 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
641 parms->rx_max_action_entry_sz_in_bits / 8;
643 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries =
646 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_ACTION_TABLE].num_entries =
647 parms->rx_num_flows_in_k * TF_KILOBYTE;
648 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_ACTION_TABLE].entry_size =
649 parms->rx_max_action_entry_sz_in_bits / 8;
651 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EM_LKUP_TABLE].num_entries =
652 parms->rx_num_flows_in_k * TF_KILOBYTE;
653 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EM_LKUP_TABLE].entry_size =
654 parms->rx_max_key_sz_in_bits / 8;
657 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
658 parms->tx_num_flows_in_k * TF_KILOBYTE;
659 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
660 parms->tx_max_key_sz_in_bits / 8;
662 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
663 parms->tx_num_flows_in_k * TF_KILOBYTE;
664 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
665 parms->tx_max_key_sz_in_bits / 8;
667 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
668 parms->tx_num_flows_in_k * TF_KILOBYTE;
669 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
670 parms->tx_max_action_entry_sz_in_bits / 8;
672 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries =
675 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_ACTION_TABLE].num_entries =
676 parms->rx_num_flows_in_k * TF_KILOBYTE;
677 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_ACTION_TABLE].entry_size =
678 parms->tx_max_action_entry_sz_in_bits / 8;
680 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EM_LKUP_TABLE].num_entries =
681 parms->rx_num_flows_in_k * TF_KILOBYTE;
682 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EM_LKUP_TABLE].entry_size =
683 parms->tx_max_key_sz_in_bits / 8;
688 /** insert EEM entry API
692 * TF_ERR - unable to get lock
694 * insert callback returns:
696 * TF_ERR_EM_DUP - key is already in table
699 tf_insert_eem_entry(struct tf_dev_info *dev,
700 struct tf_tbl_scope_cb *tbl_scope_cb,
701 struct tf_insert_em_entry_parms *parms)
708 struct cfa_p4_eem_64b_entry key_entry;
710 enum hcapi_cfa_em_table_type table_type;
712 struct hcapi_cfa_hwop op;
713 struct hcapi_cfa_key_tbl key_tbl;
714 struct hcapi_cfa_key_data key_obj;
715 struct hcapi_cfa_key_loc key_loc;
719 /* Get mask to use on hash */
720 mask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE].num_entries);
726 dump_raw((uint8_t *)parms->key, TF_P4_HW_EM_KEY_MAX_SIZE + 4, "In Key");
729 if (dev->ops->tf_dev_cfa_key_hash == NULL)
732 big_hash = dev->ops->tf_dev_cfa_key_hash((uint64_t *)parms->key,
733 (TF_P4_HW_EM_KEY_MAX_SIZE + 4) * 8);
734 key0_hash = (uint32_t)(big_hash >> 32);
735 key1_hash = (uint32_t)(big_hash & 0xFFFFFFFF);
737 key0_index = key0_hash & mask;
738 key1_index = key1_hash & mask;
741 TFP_DRV_LOG(DEBUG, "Key0 hash:0x%08x\n", key0_hash);
742 TFP_DRV_LOG(DEBUG, "Key1 hash:0x%08x\n", key1_hash);
745 * Use the "result" arg to populate all of the key entry then
746 * store the byte swapped "raw" entry in a local copy ready
747 * for insertion in to the table.
749 tf_em_create_key_entry((struct cfa_p4_eem_entry_hdr *)parms->em_record,
750 ((uint8_t *)parms->key),
754 * Try to add to Key0 table, if that does not work then
755 * try the key1 table.
758 op.opcode = HCAPI_CFA_HWOPS_ADD;
760 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE];
761 key_tbl.page_size = TF_EM_PAGE_SIZE;
762 key_obj.offset = index * TF_P4_EM_KEY_RECORD_SIZE;
763 key_obj.data = (uint8_t *)&key_entry;
764 key_obj.size = TF_P4_EM_KEY_RECORD_SIZE;
766 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
772 table_type = TF_KEY0_TABLE;
777 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY1_TABLE];
778 key_obj.offset = index * TF_P4_EM_KEY_RECORD_SIZE;
780 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
787 table_type = TF_KEY1_TABLE;
793 TF_SET_FLOW_ID(parms->flow_id,
795 TF_GFID_TABLE_EXTERNAL,
797 TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
808 /** delete EEM hash entry API
812 * -EINVAL - parameter error
813 * TF_NO_SESSION - bad session ID
814 * TF_ERR_TBL_SCOPE - invalid table scope
815 * TF_ERR_TBL_IF - invalid table interface
817 * insert callback returns
819 * TF_NO_EM_MATCH - entry not found
822 tf_delete_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
823 struct tf_delete_em_entry_parms *parms)
825 enum hcapi_cfa_em_table_type hash_type;
827 struct hcapi_cfa_hwop op;
828 struct hcapi_cfa_key_tbl key_tbl;
829 struct hcapi_cfa_key_data key_obj;
830 struct hcapi_cfa_key_loc key_loc;
833 TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);
834 TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);
836 op.opcode = HCAPI_CFA_HWOPS_DEL;
838 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables
839 [(hash_type == 0 ? TF_KEY0_TABLE : TF_KEY1_TABLE)];
840 key_tbl.page_size = TF_EM_PAGE_SIZE;
841 key_obj.offset = index * TF_P4_EM_KEY_RECORD_SIZE;
843 key_obj.size = TF_P4_EM_KEY_RECORD_SIZE;
845 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
856 /** insert EM hash entry API
863 tf_em_insert_ext_entry(struct tf *tfp,
864 struct tf_insert_em_entry_parms *parms)
867 struct tf_tbl_scope_cb *tbl_scope_cb;
868 struct tf_session *tfs;
869 struct tf_dev_info *dev;
871 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
872 if (tbl_scope_cb == NULL) {
873 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
877 /* Retrieve the session information */
878 rc = tf_session_get_session_internal(tfp, &tfs);
882 /* Retrieve the device information */
883 rc = tf_session_get_device(tfs, &dev);
887 return tf_insert_eem_entry
893 /** Delete EM hash entry API
900 tf_em_delete_ext_entry(struct tf *tfp,
901 struct tf_delete_em_entry_parms *parms)
903 struct tf_tbl_scope_cb *tbl_scope_cb;
905 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
906 if (tbl_scope_cb == NULL) {
907 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
911 return tf_delete_eem_entry(tbl_scope_cb, parms);
916 tf_em_ext_common_bind(struct tf *tfp,
917 struct tf_em_cfg_parms *parms)
921 struct tf_rm_create_db_parms db_cfg = { 0 };
922 struct em_ext_db *ext_db;
923 struct tfp_calloc_parms cparms;
925 TF_CHECK_PARMS2(tfp, parms);
928 cparms.size = sizeof(struct em_ext_db);
929 cparms.alignment = 0;
930 if (tfp_calloc(&cparms) != 0) {
931 TFP_DRV_LOG(ERR, "em_ext_db alloc error %s\n",
936 ext_db = cparms.mem_va;
937 ll_init(&ext_db->tbl_scope_ll);
938 for (i = 0; i < TF_DIR_MAX; i++)
939 ext_db->eem_db[i] = NULL;
940 tf_session_set_em_ext_db(tfp, ext_db);
942 db_cfg.module = TF_MODULE_TYPE_EM;
943 db_cfg.num_elements = parms->num_elements;
944 db_cfg.cfg = parms->cfg;
946 for (i = 0; i < TF_DIR_MAX; i++) {
948 db_cfg.alloc_cnt = parms->resources->em_cnt[i].cnt;
950 /* Check if we got any request to support EEM, if so
951 * we build an EM Ext DB holding Table Scopes.
953 if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_TBL_SCOPE] == 0)
956 db_cfg.rm_db = (void *)&ext_db->eem_db[i];
957 rc = tf_rm_create_db(tfp, &db_cfg);
960 "%s: EM Ext DB creation failed\n",
967 mem_type = parms->mem_type;
973 tf_em_ext_common_unbind(struct tf *tfp)
977 struct tf_rm_free_db_parms fparms = { 0 };
978 struct em_ext_db *ext_db = NULL;
979 struct tf_session *tfs = NULL;
980 struct tf_dev_info *dev;
981 struct ll_entry *entry;
982 struct tf_tbl_scope_cb *tbl_scope_cb = NULL;
983 void *ext_ptr = NULL;
984 struct tf_free_tbl_scope_parms tparms = { 0 };
986 TF_CHECK_PARMS1(tfp);
988 rc = tf_session_get_session_internal(tfp, &tfs);
990 TFP_DRV_LOG(ERR, "Failed to get tf_session, rc:%s\n",
995 /* Retrieve the device information */
996 rc = tf_session_get_device(tfs, &dev);
999 "Failed to lookup device, rc:%s\n",
1004 rc = tf_session_get_em_ext_db(tfp, &ext_ptr);
1007 "Failed to get em_ext_db from session, rc:%s\n",
1011 ext_db = (struct em_ext_db *)ext_ptr;
1013 entry = ext_db->tbl_scope_ll.head;
1014 while (entry != NULL) {
1015 tbl_scope_cb = (struct tf_tbl_scope_cb *)entry;
1016 entry = entry->next;
1017 tparms.tbl_scope_id = tbl_scope_cb->tbl_scope_id;
1019 if (dev->ops->tf_dev_free_tbl_scope) {
1020 dev->ops->tf_dev_free_tbl_scope(tfp, &tparms);
1022 /* should not reach here */
1023 ll_delete(&ext_db->tbl_scope_ll, &tbl_scope_cb->ll_entry);
1024 tfp_free(tbl_scope_cb);
1028 for (i = 0; i < TF_DIR_MAX; i++) {
1029 if (ext_db->eem_db[i] == NULL)
1033 fparms.rm_db = ext_db->eem_db[i];
1034 rc = tf_rm_free_db(tfp, &fparms);
1038 ext_db->eem_db[i] = NULL;
1042 tf_session_set_em_ext_db(tfp, NULL);
1048 * Sets the specified external table type element.
1050 * This API sets the specified element data
1053 * Pointer to TF handle
1056 * Pointer to table set parameters
1059 * - (0) if successful.
1060 * - (-EINVAL) on failure.
1062 int tf_tbl_ext_common_set(struct tf *tfp,
1063 struct tf_tbl_set_parms *parms)
1066 struct tf_tbl_scope_cb *tbl_scope_cb;
1067 uint32_t tbl_scope_id;
1068 struct hcapi_cfa_hwop op;
1069 struct hcapi_cfa_key_tbl key_tbl;
1070 struct hcapi_cfa_key_data key_obj;
1071 struct hcapi_cfa_key_loc key_loc;
1073 TF_CHECK_PARMS2(tfp, parms);
1075 if (parms->data == NULL) {
1077 "%s, invalid parms->data\n",
1078 tf_dir_2_str(parms->dir));
1082 tbl_scope_id = parms->tbl_scope_id;
1084 if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
1086 "%s, Table scope not allocated\n",
1087 tf_dir_2_str(parms->dir));
1091 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, tbl_scope_id);
1092 if (tbl_scope_cb == NULL) {
1094 "%s, table scope error\n",
1095 tf_dir_2_str(parms->dir));
1099 op.opcode = HCAPI_CFA_HWOPS_PUT;
1101 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_RECORD_TABLE];
1102 key_tbl.page_size = TF_EM_PAGE_SIZE;
1103 key_obj.offset = parms->idx;
1104 key_obj.data = parms->data;
1105 key_obj.size = parms->data_sz_in_bytes;
1107 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
1116 tf_em_ext_common_alloc(struct tf *tfp,
1117 struct tf_alloc_tbl_scope_parms *parms)
1119 return tf_em_ext_alloc(tfp, parms);
1123 tf_em_ext_common_free(struct tf *tfp,
1124 struct tf_free_tbl_scope_parms *parms)
1126 return tf_em_ext_free(tfp, parms);
1129 int tf_em_ext_map_tbl_scope(struct tf *tfp,
1130 struct tf_map_tbl_scope_parms *parms)
1133 struct tf_session *tfs;
1134 struct tf_tbl_scope_cb *tbl_scope_cb;
1135 struct tf_global_cfg_parms gcfg_parms = { 0 };
1136 struct tfp_calloc_parms aparms;
1137 uint32_t *data, *mask;
1138 uint32_t sz_in_bytes = 8;
1139 struct tf_dev_info *dev;
1141 /* Retrieve the session information */
1142 rc = tf_session_get_session_internal(tfp, &tfs);
1146 /* Retrieve the device information */
1147 rc = tf_session_get_device(tfs, &dev);
1151 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
1152 if (tbl_scope_cb == NULL) {
1153 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb tbl_scope_id(%d)\n",
1154 parms->tbl_scope_id);
1158 if (dev->ops->tf_dev_map_tbl_scope == NULL) {
1161 "Map table scope operation not supported, rc:%s\n",
1167 aparms.size = sizeof(uint32_t);
1168 aparms.alignment = 0;
1170 if (tfp_calloc(&aparms) != 0) {
1171 TFP_DRV_LOG(ERR, "Map tbl scope alloc data error %s\n",
1175 data = aparms.mem_va;
1177 if (tfp_calloc(&aparms) != 0) {
1178 TFP_DRV_LOG(ERR, "Map tbl scope alloc mask error %s\n",
1183 mask = aparms.mem_va;
1185 rc = dev->ops->tf_dev_map_parif(tfp, parms->parif_bitmask,
1187 (uint8_t *)data, (uint8_t *)mask,
1192 "Map table scope config failure, rc:%s\n",
1197 /* Note that TF_GLOBAL_CFG_INTERNAL_PARIF_2_PF is same as below enum */
1198 gcfg_parms.type = TF_GLOBAL_CFG_TYPE_MAX;
1199 gcfg_parms.offset = 0;
1200 gcfg_parms.config = (uint8_t *)data;
1201 gcfg_parms.config_mask = (uint8_t *)mask;
1202 gcfg_parms.config_sz_in_bytes = sizeof(uint64_t);
1205 rc = tf_msg_set_global_cfg(tfp, &gcfg_parms);
1208 "Map tbl scope, set failed, rc:%s\n",