1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
9 #include <rte_common.h>
10 #include <rte_errno.h>
15 #include "tf_common.h"
17 #include "tf_em_common.h"
20 #include "tf_device.h"
21 #include "tf_ext_flow_handle.h"
22 #include "hcapi_cfa.h"
27 /** Invalid table scope id */
28 #define TF_TBL_SCOPE_INVALID 0xffffffff
30 /* Number of pointers per page_size */
31 #define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
36 static enum tf_mem_type mem_type;
38 /* API defined in tf_em.h */
40 tf_create_tbl_pool_external(enum tf_dir dir,
41 struct tf_tbl_scope_cb *tbl_scope_cb,
43 uint32_t entry_sz_bytes)
45 struct tfp_calloc_parms parms;
49 struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
51 parms.nitems = num_entries;
52 parms.size = sizeof(uint32_t);
55 if (tfp_calloc(&parms) != 0) {
56 TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
57 tf_dir_2_str(dir), strerror(ENOMEM));
63 rc = stack_init(num_entries, parms.mem_va, pool);
66 TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
67 tf_dir_2_str(dir), strerror(-rc));
71 /* Save the malloced memory address so that it can
72 * be freed when the table scope is freed.
74 tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
76 /* Fill pool with indexes in reverse
78 j = (num_entries - 1) * entry_sz_bytes;
80 for (i = 0; i < num_entries; i++) {
81 rc = stack_push(pool, j);
83 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
84 tf_dir_2_str(dir), strerror(-rc));
89 TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
96 if (!stack_is_full(pool)) {
98 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
99 tf_dir_2_str(dir), strerror(-rc));
104 tfp_free((void *)parms.mem_va);
109 * Destroy External Tbl pool of memory indexes.
114 * pointer to the table scope
117 tf_destroy_tbl_pool_external(enum tf_dir dir,
118 struct tf_tbl_scope_cb *tbl_scope_cb)
120 uint32_t *ext_act_pool_mem =
121 tbl_scope_cb->ext_act_pool_mem[dir];
123 tfp_free(ext_act_pool_mem);
127 * Looks up table scope control block using tbl_scope_id from tf_session.
130 * Pointer to Truflow Handle
135 * - Pointer to the tf_tbl_scope_cb, if found.
136 * - (NULL) on failure, not found.
138 struct tf_tbl_scope_cb *
139 tf_em_ext_common_tbl_scope_find(struct tf *tfp,
140 uint32_t tbl_scope_id)
143 struct em_ext_db *ext_db;
144 void *ext_ptr = NULL;
145 struct tf_tbl_scope_cb *tbl_scope_cb = NULL;
146 struct ll_entry *entry;
148 rc = tf_session_get_em_ext_db(tfp, &ext_ptr);
152 ext_db = (struct em_ext_db *)ext_ptr;
154 for (entry = ext_db->tbl_scope_ll.head; entry != NULL;
155 entry = entry->next) {
156 tbl_scope_cb = (struct tf_tbl_scope_cb *)entry;
157 if (tbl_scope_cb->tbl_scope_id == tbl_scope_id)
165 * Allocate External Tbl entry from the scope pool.
168 * Pointer to Truflow Handle
170 * Allocation parameters
173 * 0 - Success, entry allocated - no search support
174 * -ENOMEM -EINVAL -EOPNOTSUPP
175 * - Failure, entry not allocated, out of resources
178 tf_tbl_ext_alloc(struct tf *tfp,
179 struct tf_tbl_alloc_parms *parms)
183 struct tf_tbl_scope_cb *tbl_scope_cb;
186 TF_CHECK_PARMS2(tfp, parms);
188 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
189 if (tbl_scope_cb == NULL) {
191 "%s, table scope not allocated\n",
192 tf_dir_2_str(parms->dir));
196 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
198 /* Allocate an element
200 rc = stack_pop(pool, &index);
204 "%s, Allocation failed, type:%d\n",
205 tf_dir_2_str(parms->dir),
215 * Free External Tbl entry to the scope pool.
218 * Pointer to Truflow Handle
220 * Allocation parameters
223 * 0 - Success, entry freed
225 * - Failure, entry not successfully freed for these reasons
231 tf_tbl_ext_free(struct tf *tfp,
232 struct tf_tbl_free_parms *parms)
236 struct tf_tbl_scope_cb *tbl_scope_cb;
239 TF_CHECK_PARMS2(tfp, parms);
241 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
242 if (tbl_scope_cb == NULL) {
244 "%s, table scope error\n",
245 tf_dir_2_str(parms->dir));
248 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
252 rc = stack_push(pool, index);
256 "%s, consistency error, stack full, type:%d, idx:%d\n",
257 tf_dir_2_str(parms->dir),
265 tf_em_get_key_mask(int num_entries)
267 uint32_t mask = num_entries - 1;
269 if (num_entries & TF_EM_MAX_MASK)
272 if (num_entries > TF_EM_MAX_ENTRY)
279 tf_em_create_key_entry(struct cfa_p4_eem_entry_hdr *result,
281 struct cfa_p4_eem_64b_entry *key_entry)
283 key_entry->hdr.word1 = result->word1;
284 key_entry->hdr.pointer = result->pointer;
285 memcpy(key_entry->key, in_key, TF_P4_HW_EM_KEY_MAX_SIZE + 4);
290 * Return the number of page table pages needed to
291 * reference the given number of next level pages.
297 * Size of each EM page
300 * Number of EM page table pages
303 tf_em_page_tbl_pgcnt(uint32_t num_pages,
306 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
307 MAX_PAGE_PTRS(page_size);
312 * Given the number of data pages, page_size and the maximum
313 * number of page table levels (already determined), size
314 * the number of page table pages required at each level.
317 * Max number of levels
319 * [in] num_data_pages
320 * Number of EM data pages
329 tf_em_size_page_tbls(int max_lvl,
330 uint64_t num_data_pages,
334 if (max_lvl == TF_PT_LVL_0) {
335 page_cnt[TF_PT_LVL_0] = num_data_pages;
336 } else if (max_lvl == TF_PT_LVL_1) {
337 page_cnt[TF_PT_LVL_1] = num_data_pages;
338 page_cnt[TF_PT_LVL_0] =
339 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
340 } else if (max_lvl == TF_PT_LVL_2) {
341 page_cnt[TF_PT_LVL_2] = num_data_pages;
342 page_cnt[TF_PT_LVL_1] =
343 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
344 page_cnt[TF_PT_LVL_0] =
345 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
352 * Given the page size, size of each data item (entry size),
353 * and the total number of entries needed, determine the number
354 * of page table levels and the number of data pages required.
363 * Number of entries needed
365 * [out] num_data_pages
366 * Number of pages required
369 * Success - Number of EM page levels required
370 * -ENOMEM - Out of memory
373 tf_em_size_page_tbl_lvl(uint32_t page_size,
375 uint32_t num_entries,
376 uint64_t *num_data_pages)
378 uint64_t lvl_data_size = page_size;
379 int lvl = TF_PT_LVL_0;
383 data_size = (uint64_t)num_entries * entry_size;
385 while (lvl_data_size < data_size) {
388 if (lvl == TF_PT_LVL_1)
389 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
391 else if (lvl == TF_PT_LVL_2)
392 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
393 MAX_PAGE_PTRS(page_size) * page_size;
398 *num_data_pages = roundup(data_size, page_size) / page_size;
404 * Size the EM table based on capabilities
411 * - EINVAL - Parameter error
412 * - ENOMEM - Out of memory
415 tf_em_size_table(struct hcapi_cfa_em_table *tbl,
418 uint64_t num_data_pages;
421 uint32_t num_entries;
422 uint32_t cnt = TF_EM_MIN_ENTRIES;
424 /* Ignore entry if both size and number are zero */
425 if (!tbl->entry_size && !tbl->num_entries)
428 /* If only one is set then error */
429 if (!tbl->entry_size || !tbl->num_entries)
432 /* Determine number of page table levels and the number
433 * of data pages needed to process the given eem table.
435 if (tbl->type == TF_RECORD_TABLE) {
437 * For action records just a memory size is provided. Work
438 * backwards to resolve to number of entries
440 num_entries = tbl->num_entries / tbl->entry_size;
441 if (num_entries < TF_EM_MIN_ENTRIES) {
442 num_entries = TF_EM_MIN_ENTRIES;
444 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
449 num_entries = tbl->num_entries;
452 max_lvl = tf_em_size_page_tbl_lvl(page_size,
457 TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
459 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
460 tbl->type, (uint64_t)num_entries * tbl->entry_size,
465 tbl->num_lvl = max_lvl + 1;
466 tbl->num_data_pages = num_data_pages;
468 /* Determine the number of pages needed at each level */
469 page_cnt = tbl->page_cnt;
470 memset(page_cnt, 0, sizeof(tbl->page_cnt));
471 tf_em_size_page_tbls(max_lvl, num_data_pages, page_size,
474 TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
476 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 \
477 " l0: %u l1: %u l2: %u\n",
479 (uint64_t)num_data_pages * page_size,
481 page_cnt[TF_PT_LVL_0],
482 page_cnt[TF_PT_LVL_1],
483 page_cnt[TF_PT_LVL_2]);
489 * Validates EM number of entries requested
492 * Pointer to table scope control block to be populated
495 * Pointer to input parameters
499 * -EINVAL - Parameter error
502 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
503 struct tf_alloc_tbl_scope_parms *parms)
507 if (parms->rx_mem_size_in_mb != 0) {
508 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
509 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
511 uint32_t num_entries = (parms->rx_mem_size_in_mb *
512 TF_MEGABYTE) / (key_b + action_b);
514 if (num_entries < TF_EM_MIN_ENTRIES) {
515 TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
517 parms->rx_mem_size_in_mb);
521 cnt = TF_EM_MIN_ENTRIES;
522 while (num_entries > cnt &&
523 cnt <= TF_EM_MAX_ENTRIES)
526 if (cnt > TF_EM_MAX_ENTRIES) {
527 TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
529 (parms->tx_num_flows_in_k * TF_KILOBYTE));
533 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
535 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
537 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
538 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
540 "EEM: Invalid number of Rx flows "
541 "requested:%u max:%u\n",
542 parms->rx_num_flows_in_k * TF_KILOBYTE,
543 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
547 /* must be a power-of-2 supported value
548 * in the range 32K - 128M
550 cnt = TF_EM_MIN_ENTRIES;
551 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
552 cnt <= TF_EM_MAX_ENTRIES)
555 if (cnt > TF_EM_MAX_ENTRIES) {
557 "EEM: Invalid number of Rx requested: %u\n",
558 (parms->rx_num_flows_in_k * TF_KILOBYTE));
563 if (parms->tx_mem_size_in_mb != 0) {
564 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
565 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
567 uint32_t num_entries = (parms->tx_mem_size_in_mb *
568 (TF_KILOBYTE * TF_KILOBYTE)) /
571 if (num_entries < TF_EM_MIN_ENTRIES) {
573 "EEM: Insufficient memory requested:%uMB\n",
574 parms->rx_mem_size_in_mb);
578 cnt = TF_EM_MIN_ENTRIES;
579 while (num_entries > cnt &&
580 cnt <= TF_EM_MAX_ENTRIES)
583 if (cnt > TF_EM_MAX_ENTRIES) {
585 "EEM: Invalid number of Tx requested: %u\n",
586 (parms->tx_num_flows_in_k * TF_KILOBYTE));
590 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
592 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
594 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
595 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
597 "EEM: Invalid number of Tx flows "
598 "requested:%u max:%u\n",
599 (parms->tx_num_flows_in_k * TF_KILOBYTE),
600 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
604 cnt = TF_EM_MIN_ENTRIES;
605 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
606 cnt <= TF_EM_MAX_ENTRIES)
609 if (cnt > TF_EM_MAX_ENTRIES) {
611 "EEM: Invalid number of Tx requested: %u\n",
612 (parms->tx_num_flows_in_k * TF_KILOBYTE));
617 if (parms->rx_num_flows_in_k != 0 &&
618 parms->rx_max_key_sz_in_bits / 8 == 0) {
620 "EEM: Rx key size required: %u\n",
621 (parms->rx_max_key_sz_in_bits));
625 if (parms->tx_num_flows_in_k != 0 &&
626 parms->tx_max_key_sz_in_bits / 8 == 0) {
628 "EEM: Tx key size required: %u\n",
629 (parms->tx_max_key_sz_in_bits));
633 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
634 parms->rx_num_flows_in_k * TF_KILOBYTE;
635 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
636 parms->rx_max_key_sz_in_bits / 8;
638 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
639 parms->rx_num_flows_in_k * TF_KILOBYTE;
640 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
641 parms->rx_max_key_sz_in_bits / 8;
643 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
644 parms->rx_num_flows_in_k * TF_KILOBYTE;
645 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
646 parms->rx_max_action_entry_sz_in_bits / 8;
648 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries =
651 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_ACTION_TABLE].num_entries =
652 parms->rx_num_flows_in_k * TF_KILOBYTE;
653 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_ACTION_TABLE].entry_size =
654 parms->rx_max_action_entry_sz_in_bits / 8;
656 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EM_LKUP_TABLE].num_entries =
657 parms->rx_num_flows_in_k * TF_KILOBYTE;
658 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EM_LKUP_TABLE].entry_size =
659 parms->rx_max_key_sz_in_bits / 8;
662 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
663 parms->tx_num_flows_in_k * TF_KILOBYTE;
664 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
665 parms->tx_max_key_sz_in_bits / 8;
667 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
668 parms->tx_num_flows_in_k * TF_KILOBYTE;
669 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
670 parms->tx_max_key_sz_in_bits / 8;
672 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
673 parms->tx_num_flows_in_k * TF_KILOBYTE;
674 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
675 parms->tx_max_action_entry_sz_in_bits / 8;
677 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries =
680 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_ACTION_TABLE].num_entries =
681 parms->rx_num_flows_in_k * TF_KILOBYTE;
682 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_ACTION_TABLE].entry_size =
683 parms->tx_max_action_entry_sz_in_bits / 8;
685 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EM_LKUP_TABLE].num_entries =
686 parms->rx_num_flows_in_k * TF_KILOBYTE;
687 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EM_LKUP_TABLE].entry_size =
688 parms->tx_max_key_sz_in_bits / 8;
693 /** insert EEM entry API
697 * TF_ERR - unable to get lock
699 * insert callback returns:
701 * TF_ERR_EM_DUP - key is already in table
704 tf_insert_eem_entry(struct tf_dev_info *dev,
705 struct tf_tbl_scope_cb *tbl_scope_cb,
706 struct tf_insert_em_entry_parms *parms)
713 struct cfa_p4_eem_64b_entry key_entry;
715 enum hcapi_cfa_em_table_type table_type;
717 struct hcapi_cfa_hwop op;
718 struct hcapi_cfa_key_tbl key_tbl;
719 struct hcapi_cfa_key_data key_obj;
720 struct hcapi_cfa_key_loc key_loc;
724 /* Get mask to use on hash */
725 mask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE].num_entries);
730 if (dev->ops->tf_dev_cfa_key_hash == NULL)
733 big_hash = dev->ops->tf_dev_cfa_key_hash((uint64_t *)parms->key,
734 (TF_P4_HW_EM_KEY_MAX_SIZE + 4) * 8);
735 key0_hash = (uint32_t)(big_hash >> 32);
736 key1_hash = (uint32_t)(big_hash & 0xFFFFFFFF);
738 key0_index = key0_hash & mask;
739 key1_index = key1_hash & mask;
742 * Use the "result" arg to populate all of the key entry then
743 * store the byte swapped "raw" entry in a local copy ready
744 * for insertion in to the table.
746 tf_em_create_key_entry((struct cfa_p4_eem_entry_hdr *)parms->em_record,
747 ((uint8_t *)parms->key),
751 * Try to add to Key0 table, if that does not work then
752 * try the key1 table.
755 op.opcode = HCAPI_CFA_HWOPS_ADD;
757 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE];
758 key_tbl.page_size = TF_EM_PAGE_SIZE;
759 key_obj.offset = index * TF_P4_EM_KEY_RECORD_SIZE;
760 key_obj.data = (uint8_t *)&key_entry;
761 key_obj.size = TF_P4_EM_KEY_RECORD_SIZE;
763 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
769 table_type = TF_KEY0_TABLE;
774 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY1_TABLE];
775 key_obj.offset = index * TF_P4_EM_KEY_RECORD_SIZE;
777 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
784 table_type = TF_KEY1_TABLE;
790 TF_SET_FLOW_ID(parms->flow_id,
792 TF_GFID_TABLE_EXTERNAL,
794 TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
805 /** delete EEM hash entry API
809 * -EINVAL - parameter error
810 * TF_NO_SESSION - bad session ID
811 * TF_ERR_TBL_SCOPE - invalid table scope
812 * TF_ERR_TBL_IF - invalid table interface
814 * insert callback returns
816 * TF_NO_EM_MATCH - entry not found
819 tf_delete_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
820 struct tf_delete_em_entry_parms *parms)
822 enum hcapi_cfa_em_table_type hash_type;
824 struct hcapi_cfa_hwop op;
825 struct hcapi_cfa_key_tbl key_tbl;
826 struct hcapi_cfa_key_data key_obj;
827 struct hcapi_cfa_key_loc key_loc;
830 TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);
831 TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);
833 op.opcode = HCAPI_CFA_HWOPS_DEL;
835 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables
836 [(hash_type == 0 ? TF_KEY0_TABLE : TF_KEY1_TABLE)];
837 key_tbl.page_size = TF_EM_PAGE_SIZE;
838 key_obj.offset = index * TF_P4_EM_KEY_RECORD_SIZE;
840 key_obj.size = TF_P4_EM_KEY_RECORD_SIZE;
842 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
853 /** insert EM hash entry API
860 tf_em_insert_ext_entry(struct tf *tfp,
861 struct tf_insert_em_entry_parms *parms)
864 struct tf_tbl_scope_cb *tbl_scope_cb;
865 struct tf_session *tfs;
866 struct tf_dev_info *dev;
868 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
869 if (tbl_scope_cb == NULL) {
870 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
874 /* Retrieve the session information */
875 rc = tf_session_get_session_internal(tfp, &tfs);
879 /* Retrieve the device information */
880 rc = tf_session_get_device(tfs, &dev);
884 return tf_insert_eem_entry
890 /** Delete EM hash entry API
897 tf_em_delete_ext_entry(struct tf *tfp,
898 struct tf_delete_em_entry_parms *parms)
900 struct tf_tbl_scope_cb *tbl_scope_cb;
902 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
903 if (tbl_scope_cb == NULL) {
904 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
908 return tf_delete_eem_entry(tbl_scope_cb, parms);
913 tf_em_ext_common_bind(struct tf *tfp,
914 struct tf_em_cfg_parms *parms)
918 struct tf_rm_create_db_parms db_cfg = { 0 };
919 struct em_ext_db *ext_db;
920 struct tfp_calloc_parms cparms;
922 TF_CHECK_PARMS2(tfp, parms);
925 cparms.size = sizeof(struct em_ext_db);
926 cparms.alignment = 0;
927 if (tfp_calloc(&cparms) != 0) {
928 TFP_DRV_LOG(ERR, "em_ext_db alloc error %s\n",
933 ext_db = cparms.mem_va;
934 ll_init(&ext_db->tbl_scope_ll);
935 for (i = 0; i < TF_DIR_MAX; i++)
936 ext_db->eem_db[i] = NULL;
937 tf_session_set_em_ext_db(tfp, ext_db);
939 db_cfg.module = TF_MODULE_TYPE_EM;
940 db_cfg.num_elements = parms->num_elements;
941 db_cfg.cfg = parms->cfg;
943 for (i = 0; i < TF_DIR_MAX; i++) {
945 db_cfg.alloc_cnt = parms->resources->em_cnt[i].cnt;
947 /* Check if we got any request to support EEM, if so
948 * we build an EM Ext DB holding Table Scopes.
950 if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_TBL_SCOPE] == 0)
953 db_cfg.rm_db = (void *)&ext_db->eem_db[i];
954 rc = tf_rm_create_db(tfp, &db_cfg);
957 "%s: EM Ext DB creation failed\n",
964 mem_type = parms->mem_type;
970 tf_em_ext_common_unbind(struct tf *tfp)
974 struct tf_rm_free_db_parms fparms = { 0 };
975 struct em_ext_db *ext_db = NULL;
976 struct tf_session *tfs = NULL;
977 struct tf_dev_info *dev;
978 struct ll_entry *entry;
979 struct tf_tbl_scope_cb *tbl_scope_cb = NULL;
980 void *ext_ptr = NULL;
981 struct tf_free_tbl_scope_parms tparms = { 0 };
983 TF_CHECK_PARMS1(tfp);
985 rc = tf_session_get_session_internal(tfp, &tfs);
987 TFP_DRV_LOG(ERR, "Failed to get tf_session, rc:%s\n",
992 /* Retrieve the device information */
993 rc = tf_session_get_device(tfs, &dev);
996 "Failed to lookup device, rc:%s\n",
1001 rc = tf_session_get_em_ext_db(tfp, &ext_ptr);
1004 "Failed to get em_ext_db from session, rc:%s\n",
1008 ext_db = (struct em_ext_db *)ext_ptr;
1010 if (ext_db != NULL) {
1011 entry = ext_db->tbl_scope_ll.head;
1012 while (entry != NULL) {
1013 tbl_scope_cb = (struct tf_tbl_scope_cb *)entry;
1014 entry = entry->next;
1015 tparms.tbl_scope_id =
1016 tbl_scope_cb->tbl_scope_id;
1018 if (dev->ops->tf_dev_free_tbl_scope) {
1019 dev->ops->tf_dev_free_tbl_scope(tfp,
1022 /* should not reach here */
1023 ll_delete(&ext_db->tbl_scope_ll,
1024 &tbl_scope_cb->ll_entry);
1025 tfp_free(tbl_scope_cb);
1029 for (i = 0; i < TF_DIR_MAX; i++) {
1030 if (ext_db->eem_db[i] == NULL)
1034 fparms.rm_db = ext_db->eem_db[i];
1035 rc = tf_rm_free_db(tfp, &fparms);
1039 ext_db->eem_db[i] = NULL;
1045 tf_session_set_em_ext_db(tfp, NULL);
1051 * Sets the specified external table type element.
1053 * This API sets the specified element data
1056 * Pointer to TF handle
1059 * Pointer to table set parameters
1062 * - (0) if successful.
1063 * - (-EINVAL) on failure.
1065 int tf_tbl_ext_common_set(struct tf *tfp,
1066 struct tf_tbl_set_parms *parms)
1069 struct tf_tbl_scope_cb *tbl_scope_cb;
1070 uint32_t tbl_scope_id;
1071 struct hcapi_cfa_hwop op;
1072 struct hcapi_cfa_key_tbl key_tbl;
1073 struct hcapi_cfa_key_data key_obj;
1074 struct hcapi_cfa_key_loc key_loc;
1076 TF_CHECK_PARMS2(tfp, parms);
1078 if (parms->data == NULL) {
1080 "%s, invalid parms->data\n",
1081 tf_dir_2_str(parms->dir));
1085 tbl_scope_id = parms->tbl_scope_id;
1087 if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
1089 "%s, Table scope not allocated\n",
1090 tf_dir_2_str(parms->dir));
1094 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, tbl_scope_id);
1095 if (tbl_scope_cb == NULL) {
1097 "%s, table scope error\n",
1098 tf_dir_2_str(parms->dir));
1102 op.opcode = HCAPI_CFA_HWOPS_PUT;
1104 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_RECORD_TABLE];
1105 key_tbl.page_size = TF_EM_PAGE_SIZE;
1106 key_obj.offset = parms->idx;
1107 key_obj.data = parms->data;
1108 key_obj.size = parms->data_sz_in_bytes;
1110 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
1119 tf_em_ext_common_alloc(struct tf *tfp,
1120 struct tf_alloc_tbl_scope_parms *parms)
1122 return tf_em_ext_alloc(tfp, parms);
1126 tf_em_ext_common_free(struct tf *tfp,
1127 struct tf_free_tbl_scope_parms *parms)
1129 return tf_em_ext_free(tfp, parms);
1132 int tf_em_ext_map_tbl_scope(struct tf *tfp,
1133 struct tf_map_tbl_scope_parms *parms)
1136 struct tf_session *tfs;
1137 struct tf_tbl_scope_cb *tbl_scope_cb;
1138 struct tf_global_cfg_parms gcfg_parms = { 0 };
1139 struct tfp_calloc_parms aparms;
1140 uint32_t *data, *mask;
1141 uint32_t sz_in_bytes = 8;
1142 struct tf_dev_info *dev;
1144 /* Retrieve the session information */
1145 rc = tf_session_get_session_internal(tfp, &tfs);
1149 /* Retrieve the device information */
1150 rc = tf_session_get_device(tfs, &dev);
1154 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
1155 if (tbl_scope_cb == NULL) {
1156 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb tbl_scope_id(%d)\n",
1157 parms->tbl_scope_id);
1161 if (dev->ops->tf_dev_map_tbl_scope == NULL) {
1164 "Map table scope operation not supported, rc:%s\n",
1170 aparms.size = sizeof(uint32_t);
1171 aparms.alignment = 0;
1173 if (tfp_calloc(&aparms) != 0) {
1174 TFP_DRV_LOG(ERR, "Map tbl scope alloc data error %s\n",
1178 data = aparms.mem_va;
1180 if (tfp_calloc(&aparms) != 0) {
1181 TFP_DRV_LOG(ERR, "Map tbl scope alloc mask error %s\n",
1186 mask = aparms.mem_va;
1188 rc = dev->ops->tf_dev_map_parif(tfp, parms->parif_bitmask,
1190 (uint8_t *)data, (uint8_t *)mask,
1195 "Map table scope config failure, rc:%s\n",
1200 /* Note that TF_GLOBAL_CFG_INTERNAL_PARIF_2_PF is same as below enum */
1201 gcfg_parms.type = TF_GLOBAL_CFG_TYPE_MAX;
1202 gcfg_parms.offset = 0;
1203 gcfg_parms.config = (uint8_t *)data;
1204 gcfg_parms.config_mask = (uint8_t *)mask;
1205 gcfg_parms.config_sz_in_bytes = sizeof(uint64_t);
1208 rc = tf_msg_set_global_cfg(tfp, &gcfg_parms);
1211 "Map tbl scope, set failed, rc:%s\n",