1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2021 Broadcom
9 #include <rte_common.h>
10 #include <rte_errno.h>
15 #include "tf_common.h"
17 #include "tf_em_common.h"
20 #include "tf_device.h"
21 #include "tf_ext_flow_handle.h"
22 #include "hcapi_cfa.h"
26 /* Number of pointers per page_size */
27 #define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
32 static enum tf_mem_type mem_type;
34 /* API defined in tf_em.h */
36 tf_create_tbl_pool_external(enum tf_dir dir,
37 struct tf_tbl_scope_cb *tbl_scope_cb,
39 uint32_t entry_sz_bytes)
41 struct tfp_calloc_parms parms;
45 struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
47 parms.nitems = num_entries;
48 parms.size = sizeof(uint32_t);
51 if (tfp_calloc(&parms) != 0) {
52 TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
53 tf_dir_2_str(dir), strerror(ENOMEM));
59 rc = stack_init(num_entries, parms.mem_va, pool);
62 TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
63 tf_dir_2_str(dir), strerror(-rc));
67 /* Save the malloced memory address so that it can
68 * be freed when the table scope is freed.
70 tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
72 /* Fill pool with indexes in reverse
74 j = (num_entries - 1) * entry_sz_bytes;
76 for (i = 0; i < num_entries; i++) {
77 rc = stack_push(pool, j);
79 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
80 tf_dir_2_str(dir), strerror(-rc));
85 TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
92 if (!stack_is_full(pool)) {
94 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
95 tf_dir_2_str(dir), strerror(-rc));
100 tfp_free((void *)parms.mem_va);
105 * Destroy External Tbl pool of memory indexes.
110 * pointer to the table scope
113 tf_destroy_tbl_pool_external(enum tf_dir dir,
114 struct tf_tbl_scope_cb *tbl_scope_cb)
116 uint32_t *ext_act_pool_mem =
117 tbl_scope_cb->ext_act_pool_mem[dir];
119 tfp_free(ext_act_pool_mem);
123 * Looks up table scope control block using tbl_scope_id from tf_session.
126 * Pointer to Truflow Handle
131 * - Pointer to the tf_tbl_scope_cb, if found.
132 * - (NULL) on failure, not found.
134 struct tf_tbl_scope_cb *
135 tf_em_ext_common_tbl_scope_find(struct tf *tfp,
136 uint32_t tbl_scope_id)
139 struct em_ext_db *ext_db;
140 void *ext_ptr = NULL;
141 struct tf_tbl_scope_cb *tbl_scope_cb = NULL;
142 struct ll_entry *entry;
144 rc = tf_session_get_em_ext_db(tfp, &ext_ptr);
148 ext_db = (struct em_ext_db *)ext_ptr;
150 for (entry = ext_db->tbl_scope_ll.head; entry != NULL;
151 entry = entry->next) {
152 tbl_scope_cb = (struct tf_tbl_scope_cb *)entry;
153 if (tbl_scope_cb->tbl_scope_id == tbl_scope_id)
161 * Allocate External Tbl entry from the scope pool.
164 * Pointer to Truflow Handle
166 * Allocation parameters
169 * 0 - Success, entry allocated - no search support
170 * -ENOMEM -EINVAL -EOPNOTSUPP
171 * - Failure, entry not allocated, out of resources
174 tf_tbl_ext_alloc(struct tf *tfp,
175 struct tf_tbl_alloc_parms *parms)
179 struct tf_tbl_scope_cb *tbl_scope_cb;
182 TF_CHECK_PARMS2(tfp, parms);
184 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
185 if (tbl_scope_cb == NULL) {
187 "%s, table scope not allocated\n",
188 tf_dir_2_str(parms->dir));
192 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
194 /* Allocate an element
196 rc = stack_pop(pool, &index);
200 "%s, Allocation failed, type:%d\n",
201 tf_dir_2_str(parms->dir),
211 * Free External Tbl entry to the scope pool.
214 * Pointer to Truflow Handle
216 * Allocation parameters
219 * 0 - Success, entry freed
221 * - Failure, entry not successfully freed for these reasons
227 tf_tbl_ext_free(struct tf *tfp,
228 struct tf_tbl_free_parms *parms)
232 struct tf_tbl_scope_cb *tbl_scope_cb;
235 TF_CHECK_PARMS2(tfp, parms);
237 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
238 if (tbl_scope_cb == NULL) {
240 "%s, table scope error\n",
241 tf_dir_2_str(parms->dir));
244 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
248 rc = stack_push(pool, index);
252 "%s, consistency error, stack full, type:%d, idx:%d\n",
253 tf_dir_2_str(parms->dir),
261 tf_em_get_key_mask(int num_entries)
263 uint32_t mask = num_entries - 1;
265 if (num_entries & TF_EM_MAX_MASK)
268 if (num_entries > TF_EM_MAX_ENTRY)
275 tf_em_create_key_entry(struct cfa_p4_eem_entry_hdr *result,
277 struct cfa_p4_eem_64b_entry *key_entry)
279 key_entry->hdr.word1 = result->word1;
280 key_entry->hdr.pointer = result->pointer;
281 memcpy(key_entry->key, in_key, TF_P4_HW_EM_KEY_MAX_SIZE + 4);
286 * Return the number of page table pages needed to
287 * reference the given number of next level pages.
293 * Size of each EM page
296 * Number of EM page table pages
299 tf_em_page_tbl_pgcnt(uint32_t num_pages,
302 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
303 MAX_PAGE_PTRS(page_size);
308 * Given the number of data pages, page_size and the maximum
309 * number of page table levels (already determined), size
310 * the number of page table pages required at each level.
313 * Max number of levels
315 * [in] num_data_pages
316 * Number of EM data pages
325 tf_em_size_page_tbls(int max_lvl,
326 uint64_t num_data_pages,
330 if (max_lvl == TF_PT_LVL_0) {
331 page_cnt[TF_PT_LVL_0] = num_data_pages;
332 } else if (max_lvl == TF_PT_LVL_1) {
333 page_cnt[TF_PT_LVL_1] = num_data_pages;
334 page_cnt[TF_PT_LVL_0] =
335 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
336 } else if (max_lvl == TF_PT_LVL_2) {
337 page_cnt[TF_PT_LVL_2] = num_data_pages;
338 page_cnt[TF_PT_LVL_1] =
339 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
340 page_cnt[TF_PT_LVL_0] =
341 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
348 * Given the page size, size of each data item (entry size),
349 * and the total number of entries needed, determine the number
350 * of page table levels and the number of data pages required.
359 * Number of entries needed
361 * [out] num_data_pages
362 * Number of pages required
365 * Success - Number of EM page levels required
366 * -ENOMEM - Out of memory
369 tf_em_size_page_tbl_lvl(uint32_t page_size,
371 uint32_t num_entries,
372 uint64_t *num_data_pages)
374 uint64_t lvl_data_size = page_size;
375 int lvl = TF_PT_LVL_0;
379 data_size = (uint64_t)num_entries * entry_size;
381 while (lvl_data_size < data_size) {
384 if (lvl == TF_PT_LVL_1)
385 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
387 else if (lvl == TF_PT_LVL_2)
388 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
389 MAX_PAGE_PTRS(page_size) * page_size;
394 *num_data_pages = roundup(data_size, page_size) / page_size;
400 * Size the EM table based on capabilities
407 * - EINVAL - Parameter error
408 * - ENOMEM - Out of memory
411 tf_em_size_table(struct hcapi_cfa_em_table *tbl,
414 uint64_t num_data_pages;
417 uint32_t num_entries;
418 uint32_t cnt = TF_EM_MIN_ENTRIES;
420 /* Ignore entry if both size and number are zero */
421 if (!tbl->entry_size && !tbl->num_entries)
424 /* If only one is set then error */
425 if (!tbl->entry_size || !tbl->num_entries)
428 /* Determine number of page table levels and the number
429 * of data pages needed to process the given eem table.
431 if (tbl->type == TF_RECORD_TABLE) {
433 * For action records just a memory size is provided. Work
434 * backwards to resolve to number of entries
436 num_entries = tbl->num_entries / tbl->entry_size;
437 if (num_entries < TF_EM_MIN_ENTRIES) {
438 num_entries = TF_EM_MIN_ENTRIES;
440 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
445 num_entries = tbl->num_entries;
448 max_lvl = tf_em_size_page_tbl_lvl(page_size,
453 TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
455 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
456 tbl->type, (uint64_t)num_entries * tbl->entry_size,
461 tbl->num_lvl = max_lvl + 1;
462 tbl->num_data_pages = num_data_pages;
464 /* Determine the number of pages needed at each level */
465 page_cnt = tbl->page_cnt;
466 memset(page_cnt, 0, sizeof(tbl->page_cnt));
467 tf_em_size_page_tbls(max_lvl, num_data_pages, page_size,
470 TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
472 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 \
473 " l0: %u l1: %u l2: %u\n",
475 (uint64_t)num_data_pages * page_size,
477 page_cnt[TF_PT_LVL_0],
478 page_cnt[TF_PT_LVL_1],
479 page_cnt[TF_PT_LVL_2]);
485 * Validates EM number of entries requested
488 * Pointer to table scope control block to be populated
491 * Pointer to input parameters
495 * -EINVAL - Parameter error
498 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
499 struct tf_alloc_tbl_scope_parms *parms)
503 if (parms->rx_mem_size_in_mb != 0) {
504 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
505 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
507 uint32_t num_entries = (parms->rx_mem_size_in_mb *
508 TF_MEGABYTE) / (key_b + action_b);
510 if (num_entries < TF_EM_MIN_ENTRIES) {
511 TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
513 parms->rx_mem_size_in_mb);
517 cnt = TF_EM_MIN_ENTRIES;
518 while (num_entries > cnt &&
519 cnt <= TF_EM_MAX_ENTRIES)
522 if (cnt > TF_EM_MAX_ENTRIES) {
523 TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
525 (parms->tx_num_flows_in_k * TF_KILOBYTE));
529 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
531 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
533 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
534 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
536 "EEM: Invalid number of Rx flows "
537 "requested:%u max:%u\n",
538 parms->rx_num_flows_in_k * TF_KILOBYTE,
539 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
543 /* must be a power-of-2 supported value
544 * in the range 32K - 128M
546 cnt = TF_EM_MIN_ENTRIES;
547 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
548 cnt <= TF_EM_MAX_ENTRIES)
551 if (cnt > TF_EM_MAX_ENTRIES) {
553 "EEM: Invalid number of Rx requested: %u\n",
554 (parms->rx_num_flows_in_k * TF_KILOBYTE));
559 if (parms->tx_mem_size_in_mb != 0) {
560 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
561 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
563 uint32_t num_entries = (parms->tx_mem_size_in_mb *
564 (TF_KILOBYTE * TF_KILOBYTE)) /
567 if (num_entries < TF_EM_MIN_ENTRIES) {
569 "EEM: Insufficient memory requested:%uMB\n",
570 parms->rx_mem_size_in_mb);
574 cnt = TF_EM_MIN_ENTRIES;
575 while (num_entries > cnt &&
576 cnt <= TF_EM_MAX_ENTRIES)
579 if (cnt > TF_EM_MAX_ENTRIES) {
581 "EEM: Invalid number of Tx requested: %u\n",
582 (parms->tx_num_flows_in_k * TF_KILOBYTE));
586 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
588 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
590 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
591 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
593 "EEM: Invalid number of Tx flows "
594 "requested:%u max:%u\n",
595 (parms->tx_num_flows_in_k * TF_KILOBYTE),
596 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
600 cnt = TF_EM_MIN_ENTRIES;
601 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
602 cnt <= TF_EM_MAX_ENTRIES)
605 if (cnt > TF_EM_MAX_ENTRIES) {
607 "EEM: Invalid number of Tx requested: %u\n",
608 (parms->tx_num_flows_in_k * TF_KILOBYTE));
613 if (parms->rx_num_flows_in_k != 0 &&
614 parms->rx_max_key_sz_in_bits / 8 == 0) {
616 "EEM: Rx key size required: %u\n",
617 (parms->rx_max_key_sz_in_bits));
621 if (parms->tx_num_flows_in_k != 0 &&
622 parms->tx_max_key_sz_in_bits / 8 == 0) {
624 "EEM: Tx key size required: %u\n",
625 (parms->tx_max_key_sz_in_bits));
629 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
630 parms->rx_num_flows_in_k * TF_KILOBYTE;
631 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
632 parms->rx_max_key_sz_in_bits / 8;
634 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
635 parms->rx_num_flows_in_k * TF_KILOBYTE;
636 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
637 parms->rx_max_key_sz_in_bits / 8;
639 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
640 parms->rx_num_flows_in_k * TF_KILOBYTE;
641 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
642 parms->rx_max_action_entry_sz_in_bits / 8;
644 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries =
647 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_ACTION_TABLE].num_entries =
648 parms->rx_num_flows_in_k * TF_KILOBYTE;
649 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_ACTION_TABLE].entry_size =
650 parms->rx_max_action_entry_sz_in_bits / 8;
652 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EM_LKUP_TABLE].num_entries =
653 parms->rx_num_flows_in_k * TF_KILOBYTE;
654 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EM_LKUP_TABLE].entry_size =
655 parms->rx_max_key_sz_in_bits / 8;
658 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
659 parms->tx_num_flows_in_k * TF_KILOBYTE;
660 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
661 parms->tx_max_key_sz_in_bits / 8;
663 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
664 parms->tx_num_flows_in_k * TF_KILOBYTE;
665 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
666 parms->tx_max_key_sz_in_bits / 8;
668 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
669 parms->tx_num_flows_in_k * TF_KILOBYTE;
670 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
671 parms->tx_max_action_entry_sz_in_bits / 8;
673 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries =
676 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_ACTION_TABLE].num_entries =
677 parms->rx_num_flows_in_k * TF_KILOBYTE;
678 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_ACTION_TABLE].entry_size =
679 parms->tx_max_action_entry_sz_in_bits / 8;
681 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EM_LKUP_TABLE].num_entries =
682 parms->rx_num_flows_in_k * TF_KILOBYTE;
683 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EM_LKUP_TABLE].entry_size =
684 parms->tx_max_key_sz_in_bits / 8;
689 /** insert EEM entry API
693 * TF_ERR - unable to get lock
695 * insert callback returns:
697 * TF_ERR_EM_DUP - key is already in table
700 tf_insert_eem_entry(struct tf_dev_info *dev,
701 struct tf_tbl_scope_cb *tbl_scope_cb,
702 struct tf_insert_em_entry_parms *parms)
709 struct cfa_p4_eem_64b_entry key_entry;
711 enum hcapi_cfa_em_table_type table_type;
713 struct hcapi_cfa_hwop op;
714 struct hcapi_cfa_key_tbl key_tbl;
715 struct hcapi_cfa_key_data key_obj;
716 struct hcapi_cfa_key_loc key_loc;
720 /* Get mask to use on hash */
721 mask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE].num_entries);
726 if (dev->ops->tf_dev_cfa_key_hash == NULL)
729 big_hash = dev->ops->tf_dev_cfa_key_hash((uint64_t *)parms->key,
730 (TF_P4_HW_EM_KEY_MAX_SIZE + 4) * 8);
731 key0_hash = (uint32_t)(big_hash >> 32);
732 key1_hash = (uint32_t)(big_hash & 0xFFFFFFFF);
734 key0_index = key0_hash & mask;
735 key1_index = key1_hash & mask;
738 * Use the "result" arg to populate all of the key entry then
739 * store the byte swapped "raw" entry in a local copy ready
740 * for insertion in to the table.
742 tf_em_create_key_entry((struct cfa_p4_eem_entry_hdr *)parms->em_record,
743 ((uint8_t *)parms->key),
747 * Try to add to Key0 table, if that does not work then
748 * try the key1 table.
751 op.opcode = HCAPI_CFA_HWOPS_ADD;
753 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE];
754 key_tbl.page_size = TF_EM_PAGE_SIZE;
755 key_obj.offset = index * TF_P4_EM_KEY_RECORD_SIZE;
756 key_obj.data = (uint8_t *)&key_entry;
757 key_obj.size = TF_P4_EM_KEY_RECORD_SIZE;
759 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
765 table_type = TF_KEY0_TABLE;
770 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY1_TABLE];
771 key_obj.offset = index * TF_P4_EM_KEY_RECORD_SIZE;
773 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
780 table_type = TF_KEY1_TABLE;
786 TF_SET_FLOW_ID(parms->flow_id,
788 TF_GFID_TABLE_EXTERNAL,
790 TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
801 /** delete EEM hash entry API
805 * -EINVAL - parameter error
806 * TF_NO_SESSION - bad session ID
807 * TF_ERR_TBL_SCOPE - invalid table scope
808 * TF_ERR_TBL_IF - invalid table interface
810 * insert callback returns
812 * TF_NO_EM_MATCH - entry not found
815 tf_delete_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
816 struct tf_delete_em_entry_parms *parms)
818 enum hcapi_cfa_em_table_type hash_type;
820 struct hcapi_cfa_hwop op;
821 struct hcapi_cfa_key_tbl key_tbl;
822 struct hcapi_cfa_key_data key_obj;
823 struct hcapi_cfa_key_loc key_loc;
826 TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);
827 TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);
829 op.opcode = HCAPI_CFA_HWOPS_DEL;
831 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables
832 [(hash_type == 0 ? TF_KEY0_TABLE : TF_KEY1_TABLE)];
833 key_tbl.page_size = TF_EM_PAGE_SIZE;
834 key_obj.offset = index * TF_P4_EM_KEY_RECORD_SIZE;
836 key_obj.size = TF_P4_EM_KEY_RECORD_SIZE;
838 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
849 /** insert EM hash entry API
856 tf_em_insert_ext_entry(struct tf *tfp,
857 struct tf_insert_em_entry_parms *parms)
860 struct tf_tbl_scope_cb *tbl_scope_cb;
861 struct tf_session *tfs;
862 struct tf_dev_info *dev;
864 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
865 if (tbl_scope_cb == NULL) {
866 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
870 /* Retrieve the session information */
871 rc = tf_session_get_session_internal(tfp, &tfs);
875 /* Retrieve the device information */
876 rc = tf_session_get_device(tfs, &dev);
880 return tf_insert_eem_entry
886 /** Delete EM hash entry API
893 tf_em_delete_ext_entry(struct tf *tfp,
894 struct tf_delete_em_entry_parms *parms)
896 struct tf_tbl_scope_cb *tbl_scope_cb;
898 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
899 if (tbl_scope_cb == NULL) {
900 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
904 return tf_delete_eem_entry(tbl_scope_cb, parms);
909 tf_em_ext_common_bind(struct tf *tfp,
910 struct tf_em_cfg_parms *parms)
914 struct tf_rm_create_db_parms db_cfg = { 0 };
915 struct em_ext_db *ext_db;
916 struct tfp_calloc_parms cparms;
918 TF_CHECK_PARMS2(tfp, parms);
921 cparms.size = sizeof(struct em_ext_db);
922 cparms.alignment = 0;
923 if (tfp_calloc(&cparms) != 0) {
924 TFP_DRV_LOG(ERR, "em_ext_db alloc error %s\n",
929 ext_db = cparms.mem_va;
930 ll_init(&ext_db->tbl_scope_ll);
931 for (i = 0; i < TF_DIR_MAX; i++)
932 ext_db->eem_db[i] = NULL;
933 tf_session_set_em_ext_db(tfp, ext_db);
935 db_cfg.module = TF_MODULE_TYPE_EM;
936 db_cfg.num_elements = parms->num_elements;
937 db_cfg.cfg = parms->cfg;
939 for (i = 0; i < TF_DIR_MAX; i++) {
941 db_cfg.alloc_cnt = parms->resources->em_cnt[i].cnt;
943 /* Check if we got any request to support EEM, if so
944 * we build an EM Ext DB holding Table Scopes.
946 if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_TBL_SCOPE] == 0)
949 db_cfg.rm_db = (void *)&ext_db->eem_db[i];
950 rc = tf_rm_create_db(tfp, &db_cfg);
953 "%s: EM Ext DB creation failed\n",
960 mem_type = parms->mem_type;
966 tf_em_ext_common_unbind(struct tf *tfp)
970 struct tf_rm_free_db_parms fparms = { 0 };
971 struct em_ext_db *ext_db = NULL;
972 struct tf_session *tfs = NULL;
973 struct tf_dev_info *dev;
974 struct ll_entry *entry;
975 struct tf_tbl_scope_cb *tbl_scope_cb = NULL;
976 void *ext_ptr = NULL;
977 struct tf_free_tbl_scope_parms tparms = { 0 };
979 TF_CHECK_PARMS1(tfp);
981 rc = tf_session_get_session_internal(tfp, &tfs);
983 TFP_DRV_LOG(ERR, "Failed to get tf_session, rc:%s\n",
988 /* Retrieve the device information */
989 rc = tf_session_get_device(tfs, &dev);
992 "Failed to lookup device, rc:%s\n",
997 rc = tf_session_get_em_ext_db(tfp, &ext_ptr);
1000 "Failed to get em_ext_db from session, rc:%s\n",
1004 ext_db = (struct em_ext_db *)ext_ptr;
1006 if (ext_db != NULL) {
1007 entry = ext_db->tbl_scope_ll.head;
1008 while (entry != NULL) {
1009 tbl_scope_cb = (struct tf_tbl_scope_cb *)entry;
1010 entry = entry->next;
1011 tparms.tbl_scope_id =
1012 tbl_scope_cb->tbl_scope_id;
1014 if (dev->ops->tf_dev_free_tbl_scope) {
1015 dev->ops->tf_dev_free_tbl_scope(tfp,
1018 /* should not reach here */
1019 ll_delete(&ext_db->tbl_scope_ll,
1020 &tbl_scope_cb->ll_entry);
1021 tfp_free(tbl_scope_cb);
1025 for (i = 0; i < TF_DIR_MAX; i++) {
1026 if (ext_db->eem_db[i] == NULL)
1030 fparms.rm_db = ext_db->eem_db[i];
1031 rc = tf_rm_free_db(tfp, &fparms);
1035 ext_db->eem_db[i] = NULL;
1041 tf_session_set_em_ext_db(tfp, NULL);
1047 * Sets the specified external table type element.
1049 * This API sets the specified element data
1052 * Pointer to TF handle
1055 * Pointer to table set parameters
1058 * - (0) if successful.
1059 * - (-EINVAL) on failure.
1061 int tf_tbl_ext_common_set(struct tf *tfp,
1062 struct tf_tbl_set_parms *parms)
1065 struct tf_tbl_scope_cb *tbl_scope_cb;
1066 uint32_t tbl_scope_id;
1067 struct hcapi_cfa_hwop op;
1068 struct hcapi_cfa_key_tbl key_tbl;
1069 struct hcapi_cfa_key_data key_obj;
1070 struct hcapi_cfa_key_loc key_loc;
1072 TF_CHECK_PARMS2(tfp, parms);
1074 if (parms->data == NULL) {
1076 "%s, invalid parms->data\n",
1077 tf_dir_2_str(parms->dir));
1081 tbl_scope_id = parms->tbl_scope_id;
1083 if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
1085 "%s, Table scope not allocated\n",
1086 tf_dir_2_str(parms->dir));
1090 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, tbl_scope_id);
1091 if (tbl_scope_cb == NULL) {
1093 "%s, table scope error\n",
1094 tf_dir_2_str(parms->dir));
1098 op.opcode = HCAPI_CFA_HWOPS_PUT;
1100 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_RECORD_TABLE];
1101 key_tbl.page_size = TF_EM_PAGE_SIZE;
1102 key_obj.offset = parms->idx;
1103 key_obj.data = parms->data;
1104 key_obj.size = parms->data_sz_in_bytes;
1106 rc = cfa_p4_devops.hcapi_cfa_key_hw_op(&op,
1115 tf_em_ext_common_alloc(struct tf *tfp,
1116 struct tf_alloc_tbl_scope_parms *parms)
1118 return tf_em_ext_alloc(tfp, parms);
1122 tf_em_ext_common_free(struct tf *tfp,
1123 struct tf_free_tbl_scope_parms *parms)
1125 return tf_em_ext_free(tfp, parms);
1128 int tf_em_ext_map_tbl_scope(struct tf *tfp,
1129 struct tf_map_tbl_scope_parms *parms)
1132 struct tf_session *tfs;
1133 struct tf_tbl_scope_cb *tbl_scope_cb;
1134 struct tf_global_cfg_parms gcfg_parms = { 0 };
1135 struct tfp_calloc_parms aparms;
1136 uint32_t *data, *mask;
1137 uint32_t sz_in_bytes = 8;
1138 struct tf_dev_info *dev;
1140 /* Retrieve the session information */
1141 rc = tf_session_get_session_internal(tfp, &tfs);
1145 /* Retrieve the device information */
1146 rc = tf_session_get_device(tfs, &dev);
1150 tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
1151 if (tbl_scope_cb == NULL) {
1152 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb tbl_scope_id(%d)\n",
1153 parms->tbl_scope_id);
1157 if (dev->ops->tf_dev_map_tbl_scope == NULL) {
1160 "Map table scope operation not supported, rc:%s\n",
1166 aparms.size = sizeof(uint32_t);
1167 aparms.alignment = 0;
1169 if (tfp_calloc(&aparms) != 0) {
1170 TFP_DRV_LOG(ERR, "Map tbl scope alloc data error %s\n",
1174 data = aparms.mem_va;
1176 if (tfp_calloc(&aparms) != 0) {
1177 TFP_DRV_LOG(ERR, "Map tbl scope alloc mask error %s\n",
1182 mask = aparms.mem_va;
1184 rc = dev->ops->tf_dev_map_parif(tfp, parms->parif_bitmask,
1186 (uint8_t *)data, (uint8_t *)mask,
1191 "Map table scope config failure, rc:%s\n",
1196 /* Note that TF_GLOBAL_CFG_INTERNAL_PARIF_2_PF is same as below enum */
1197 gcfg_parms.type = TF_GLOBAL_CFG_TYPE_MAX;
1198 gcfg_parms.offset = 0;
1199 gcfg_parms.config = (uint8_t *)data;
1200 gcfg_parms.config_mask = (uint8_t *)mask;
1201 gcfg_parms.config_sz_in_bytes = sizeof(uint64_t);
1204 rc = tf_msg_set_global_cfg(tfp, &gcfg_parms);
1207 "Map tbl scope, set failed, rc:%s\n",