1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
9 #include <rte_common.h>
10 #include <rte_errno.h>
15 #include "tf_common.h"
17 #include "tf_em_common.h"
20 #include "tf_device.h"
21 #include "tf_ext_flow_handle.h"
22 #include "cfa_resource_types.h"
26 /* Number of pointers per page_size */
27 #define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
32 void *eem_db[TF_DIR_MAX];
35 * Init flag, set on bind and cleared on unbind
42 static enum tf_mem_type mem_type;
44 /** Table scope array */
45 struct tf_tbl_scope_cb tbl_scopes[TF_NUM_TBL_SCOPE];
47 /** Table scope reversal table
49 * Table scope are allocated from 15 to 0 within HCAPI RM. Because of the
50 * association between PFs and legacy table scopes, reverse table scope ids.
51 * 15 indicates 0, 14 indicates 1, etc... The application will only see the 0
52 * based number. The firmware will only use the 0 based number. Only HCAPI RM
53 * and Truflow RM believe the number is 15. When HCAPI RM support allocation
54 * from low to high is supported, this adjust function can be removed.
56 const uint32_t tbl_scope_reverse[TF_NUM_TBL_SCOPE] = {
57 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
60 tf_tbl_scope_adjust(uint32_t tbl_scope_id)
62 if (tbl_scope_id < TF_NUM_TBL_SCOPE)
63 return tbl_scope_reverse[tbl_scope_id];
65 return TF_TBL_SCOPE_INVALID;
69 /* API defined in tf_em.h */
70 struct tf_tbl_scope_cb *
71 tbl_scope_cb_find(uint32_t tbl_scope_id)
74 struct tf_rm_is_allocated_parms parms = { 0 };
76 uint32_t rm_tbl_scope_id;
78 rm_tbl_scope_id = tf_tbl_scope_adjust(tbl_scope_id);
80 if (rm_tbl_scope_id == TF_TBL_SCOPE_INVALID)
83 /* Check that id is valid */
84 parms.rm_db = eem_db[TF_DIR_RX];
85 parms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
86 parms.index = rm_tbl_scope_id;
87 parms.allocated = &allocated;
89 i = tf_rm_is_allocated(&parms);
91 if (i < 0 || allocated != TF_RM_ALLOCATED_ENTRY_IN_USE)
94 for (i = 0; i < TF_NUM_TBL_SCOPE; i++) {
95 if (tbl_scopes[i].tbl_scope_id == tbl_scope_id)
96 return &tbl_scopes[i];
102 int tf_tbl_scope_alloc(uint32_t *tbl_scope_id)
105 struct tf_rm_allocate_parms parms = { 0 };
106 uint32_t rm_tbl_scope_id;
107 uint32_t usr_tbl_scope_id = TF_TBL_SCOPE_INVALID;
109 /* Get Table Scope control block from the session pool */
110 parms.rm_db = eem_db[TF_DIR_RX];
111 parms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
112 parms.index = &rm_tbl_scope_id;
114 rc = tf_rm_allocate(&parms);
117 "Failed to allocate table scope rc:%s\n",
122 usr_tbl_scope_id = tf_tbl_scope_adjust(rm_tbl_scope_id);
124 if (usr_tbl_scope_id == TF_TBL_SCOPE_INVALID) {
126 "Invalid table scope allocated id:%d\n",
127 (int)rm_tbl_scope_id);
130 *tbl_scope_id = usr_tbl_scope_id;
134 int tf_tbl_scope_free(uint32_t tbl_scope_id)
136 struct tf_rm_free_parms parms = { 0 };
137 uint32_t rm_tbl_scope_id;
140 rm_tbl_scope_id = tf_tbl_scope_adjust(tbl_scope_id);
142 if (rm_tbl_scope_id == TF_TBL_SCOPE_INVALID) {
144 "Invalid table scope allocated id:%d\n",
149 parms.rm_db = eem_db[TF_DIR_RX];
150 parms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
151 parms.index = rm_tbl_scope_id;
153 rc = tf_rm_free(&parms);
158 tf_create_tbl_pool_external(enum tf_dir dir,
159 struct tf_tbl_scope_cb *tbl_scope_cb,
160 uint32_t num_entries,
161 uint32_t entry_sz_bytes)
163 struct tfp_calloc_parms parms;
167 struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
169 parms.nitems = num_entries;
170 parms.size = sizeof(uint32_t);
173 if (tfp_calloc(&parms) != 0) {
174 TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
175 tf_dir_2_str(dir), strerror(ENOMEM));
179 /* Create empty stack
181 rc = stack_init(num_entries, parms.mem_va, pool);
184 TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
185 tf_dir_2_str(dir), strerror(-rc));
189 /* Save the malloced memory address so that it can
190 * be freed when the table scope is freed.
192 tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
194 /* Fill pool with indexes in reverse
196 j = (num_entries - 1) * entry_sz_bytes;
198 for (i = 0; i < num_entries; i++) {
199 rc = stack_push(pool, j);
201 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
202 tf_dir_2_str(dir), strerror(-rc));
207 TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
214 if (!stack_is_full(pool)) {
216 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
217 tf_dir_2_str(dir), strerror(-rc));
222 tfp_free((void *)parms.mem_va);
227 * Destroy External Tbl pool of memory indexes.
232 * pointer to the table scope
235 tf_destroy_tbl_pool_external(enum tf_dir dir,
236 struct tf_tbl_scope_cb *tbl_scope_cb)
238 uint32_t *ext_act_pool_mem =
239 tbl_scope_cb->ext_act_pool_mem[dir];
241 tfp_free(ext_act_pool_mem);
245 * Allocate External Tbl entry from the scope pool.
248 * Pointer to Truflow Handle
250 * Allocation parameters
253 * 0 - Success, entry allocated - no search support
254 * -ENOMEM -EINVAL -EOPNOTSUPP
255 * - Failure, entry not allocated, out of resources
258 tf_tbl_ext_alloc(struct tf *tfp,
259 struct tf_tbl_alloc_parms *parms)
263 struct tf_tbl_scope_cb *tbl_scope_cb;
266 TF_CHECK_PARMS2(tfp, parms);
268 /* Get the pool info from the table scope
270 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
272 if (tbl_scope_cb == NULL) {
274 "%s, table scope not allocated\n",
275 tf_dir_2_str(parms->dir));
278 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
280 /* Allocate an element
282 rc = stack_pop(pool, &index);
286 "%s, Allocation failed, type:%d\n",
287 tf_dir_2_str(parms->dir),
297 * Free External Tbl entry to the scope pool.
300 * Pointer to Truflow Handle
302 * Allocation parameters
305 * 0 - Success, entry freed
307 * - Failure, entry not successfully freed for these reasons
313 tf_tbl_ext_free(struct tf *tfp,
314 struct tf_tbl_free_parms *parms)
318 struct tf_tbl_scope_cb *tbl_scope_cb;
321 TF_CHECK_PARMS2(tfp, parms);
323 /* Get the pool info from the table scope
325 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
327 if (tbl_scope_cb == NULL) {
329 "%s, table scope error\n",
330 tf_dir_2_str(parms->dir));
333 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
337 rc = stack_push(pool, index);
341 "%s, consistency error, stack full, type:%d, idx:%d\n",
342 tf_dir_2_str(parms->dir),
350 tf_em_get_key_mask(int num_entries)
352 uint32_t mask = num_entries - 1;
354 if (num_entries & TF_EM_MAX_MASK)
357 if (num_entries > TF_EM_MAX_ENTRY)
364 tf_em_create_key_entry(struct cfa_p4_eem_entry_hdr *result,
366 struct cfa_p4_eem_64b_entry *key_entry)
368 key_entry->hdr.word1 = result->word1;
369 key_entry->hdr.pointer = result->pointer;
370 memcpy(key_entry->key, in_key, TF_HW_EM_KEY_MAX_SIZE + 4);
375 * Return the number of page table pages needed to
376 * reference the given number of next level pages.
382 * Size of each EM page
385 * Number of EM page table pages
388 tf_em_page_tbl_pgcnt(uint32_t num_pages,
391 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
392 MAX_PAGE_PTRS(page_size);
397 * Given the number of data pages, page_size and the maximum
398 * number of page table levels (already determined), size
399 * the number of page table pages required at each level.
402 * Max number of levels
404 * [in] num_data_pages
405 * Number of EM data pages
414 tf_em_size_page_tbls(int max_lvl,
415 uint64_t num_data_pages,
419 if (max_lvl == TF_PT_LVL_0) {
420 page_cnt[TF_PT_LVL_0] = num_data_pages;
421 } else if (max_lvl == TF_PT_LVL_1) {
422 page_cnt[TF_PT_LVL_1] = num_data_pages;
423 page_cnt[TF_PT_LVL_0] =
424 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
425 } else if (max_lvl == TF_PT_LVL_2) {
426 page_cnt[TF_PT_LVL_2] = num_data_pages;
427 page_cnt[TF_PT_LVL_1] =
428 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
429 page_cnt[TF_PT_LVL_0] =
430 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
437 * Given the page size, size of each data item (entry size),
438 * and the total number of entries needed, determine the number
439 * of page table levels and the number of data pages required.
448 * Number of entries needed
450 * [out] num_data_pages
451 * Number of pages required
454 * Success - Number of EM page levels required
455 * -ENOMEM - Out of memory
458 tf_em_size_page_tbl_lvl(uint32_t page_size,
460 uint32_t num_entries,
461 uint64_t *num_data_pages)
463 uint64_t lvl_data_size = page_size;
464 int lvl = TF_PT_LVL_0;
468 data_size = (uint64_t)num_entries * entry_size;
470 while (lvl_data_size < data_size) {
473 if (lvl == TF_PT_LVL_1)
474 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
476 else if (lvl == TF_PT_LVL_2)
477 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
478 MAX_PAGE_PTRS(page_size) * page_size;
483 *num_data_pages = roundup(data_size, page_size) / page_size;
489 * Size the EM table based on capabilities
496 * - EINVAL - Parameter error
497 * - ENOMEM - Out of memory
500 tf_em_size_table(struct hcapi_cfa_em_table *tbl,
503 uint64_t num_data_pages;
506 uint32_t num_entries;
507 uint32_t cnt = TF_EM_MIN_ENTRIES;
509 /* Ignore entry if both size and number are zero */
510 if (!tbl->entry_size && !tbl->num_entries)
513 /* If only one is set then error */
514 if (!tbl->entry_size || !tbl->num_entries)
517 /* Determine number of page table levels and the number
518 * of data pages needed to process the given eem table.
520 if (tbl->type == TF_RECORD_TABLE) {
522 * For action records just a memory size is provided. Work
523 * backwards to resolve to number of entries
525 num_entries = tbl->num_entries / tbl->entry_size;
526 if (num_entries < TF_EM_MIN_ENTRIES) {
527 num_entries = TF_EM_MIN_ENTRIES;
529 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
534 num_entries = tbl->num_entries;
537 max_lvl = tf_em_size_page_tbl_lvl(page_size,
542 TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
544 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
545 tbl->type, (uint64_t)num_entries * tbl->entry_size,
550 tbl->num_lvl = max_lvl + 1;
551 tbl->num_data_pages = num_data_pages;
553 /* Determine the number of pages needed at each level */
554 page_cnt = tbl->page_cnt;
555 memset(page_cnt, 0, sizeof(tbl->page_cnt));
556 tf_em_size_page_tbls(max_lvl, num_data_pages, page_size,
559 TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
561 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 \
562 " l0: %u l1: %u l2: %u\n",
564 (uint64_t)num_data_pages * page_size,
566 page_cnt[TF_PT_LVL_0],
567 page_cnt[TF_PT_LVL_1],
568 page_cnt[TF_PT_LVL_2]);
574 * Validates EM number of entries requested
577 * Pointer to table scope control block to be populated
580 * Pointer to input parameters
584 * -EINVAL - Parameter error
587 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
588 struct tf_alloc_tbl_scope_parms *parms)
592 if (parms->rx_mem_size_in_mb != 0) {
593 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
594 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
596 uint32_t num_entries = (parms->rx_mem_size_in_mb *
597 TF_MEGABYTE) / (key_b + action_b);
599 if (num_entries < TF_EM_MIN_ENTRIES) {
600 TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
602 parms->rx_mem_size_in_mb);
606 cnt = TF_EM_MIN_ENTRIES;
607 while (num_entries > cnt &&
608 cnt <= TF_EM_MAX_ENTRIES)
611 if (cnt > TF_EM_MAX_ENTRIES) {
612 TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
614 (parms->tx_num_flows_in_k * TF_KILOBYTE));
618 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
620 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
622 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
623 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
625 "EEM: Invalid number of Rx flows "
626 "requested:%u max:%u\n",
627 parms->rx_num_flows_in_k * TF_KILOBYTE,
628 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
632 /* must be a power-of-2 supported value
633 * in the range 32K - 128M
635 cnt = TF_EM_MIN_ENTRIES;
636 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
637 cnt <= TF_EM_MAX_ENTRIES)
640 if (cnt > TF_EM_MAX_ENTRIES) {
642 "EEM: Invalid number of Rx requested: %u\n",
643 (parms->rx_num_flows_in_k * TF_KILOBYTE));
648 if (parms->tx_mem_size_in_mb != 0) {
649 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
650 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
652 uint32_t num_entries = (parms->tx_mem_size_in_mb *
653 (TF_KILOBYTE * TF_KILOBYTE)) /
656 if (num_entries < TF_EM_MIN_ENTRIES) {
658 "EEM: Insufficient memory requested:%uMB\n",
659 parms->rx_mem_size_in_mb);
663 cnt = TF_EM_MIN_ENTRIES;
664 while (num_entries > cnt &&
665 cnt <= TF_EM_MAX_ENTRIES)
668 if (cnt > TF_EM_MAX_ENTRIES) {
670 "EEM: Invalid number of Tx requested: %u\n",
671 (parms->tx_num_flows_in_k * TF_KILOBYTE));
675 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
677 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
679 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
680 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
682 "EEM: Invalid number of Tx flows "
683 "requested:%u max:%u\n",
684 (parms->tx_num_flows_in_k * TF_KILOBYTE),
685 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
689 cnt = TF_EM_MIN_ENTRIES;
690 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
691 cnt <= TF_EM_MAX_ENTRIES)
694 if (cnt > TF_EM_MAX_ENTRIES) {
696 "EEM: Invalid number of Tx requested: %u\n",
697 (parms->tx_num_flows_in_k * TF_KILOBYTE));
702 if (parms->rx_num_flows_in_k != 0 &&
703 parms->rx_max_key_sz_in_bits / 8 == 0) {
705 "EEM: Rx key size required: %u\n",
706 (parms->rx_max_key_sz_in_bits));
710 if (parms->tx_num_flows_in_k != 0 &&
711 parms->tx_max_key_sz_in_bits / 8 == 0) {
713 "EEM: Tx key size required: %u\n",
714 (parms->tx_max_key_sz_in_bits));
718 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
719 parms->rx_num_flows_in_k * TF_KILOBYTE;
720 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
721 parms->rx_max_key_sz_in_bits / 8;
723 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
724 parms->rx_num_flows_in_k * TF_KILOBYTE;
725 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
726 parms->rx_max_key_sz_in_bits / 8;
728 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
729 parms->rx_num_flows_in_k * TF_KILOBYTE;
730 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
731 parms->rx_max_action_entry_sz_in_bits / 8;
733 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries = 0;
736 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
737 parms->tx_num_flows_in_k * TF_KILOBYTE;
738 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
739 parms->tx_max_key_sz_in_bits / 8;
741 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
742 parms->tx_num_flows_in_k * TF_KILOBYTE;
743 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
744 parms->tx_max_key_sz_in_bits / 8;
746 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
747 parms->tx_num_flows_in_k * TF_KILOBYTE;
748 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
749 parms->tx_max_action_entry_sz_in_bits / 8;
751 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries = 0;
756 /** insert EEM entry API
760 * TF_ERR - unable to get lock
762 * insert callback returns:
764 * TF_ERR_EM_DUP - key is already in table
767 tf_insert_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
768 struct tf_insert_em_entry_parms *parms)
775 struct cfa_p4_eem_64b_entry key_entry;
777 enum hcapi_cfa_em_table_type table_type;
779 struct hcapi_cfa_hwop op;
780 struct hcapi_cfa_key_tbl key_tbl;
781 struct hcapi_cfa_key_data key_obj;
782 struct hcapi_cfa_key_loc key_loc;
786 #if (TF_EM_SYSMEM_DELAY_EXPORT == 1)
787 if (!tbl_scope_cb->valid) {
788 rc = offload_system_mmap(tbl_scope_cb);
791 struct tf_rm_free_parms fparms = { 0 };
792 uint32_t rm_tbl_scope_id;
795 "System alloc mmap failed\n");
798 tf_tbl_scope_adjust(parms->tbl_scope_id);
800 if (rm_tbl_scope_id == TF_TBL_SCOPE_INVALID)
803 /* Free Table control block */
804 fparms.rm_db = eem_db[TF_DIR_RX];
805 fparms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
806 fparms.index = parms->tbl_scope_id;
811 tbl_scope_cb->valid = true;
814 /* Get mask to use on hash */
815 mask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE].num_entries);
821 dump_raw((uint8_t *)parms->key, TF_HW_EM_KEY_MAX_SIZE + 4, "In Key");
824 big_hash = hcapi_cfa_key_hash((uint64_t *)parms->key,
825 (TF_HW_EM_KEY_MAX_SIZE + 4) * 8);
826 key0_hash = (uint32_t)(big_hash >> 32);
827 key1_hash = (uint32_t)(big_hash & 0xFFFFFFFF);
829 key0_index = key0_hash & mask;
830 key1_index = key1_hash & mask;
833 TFP_DRV_LOG(DEBUG, "Key0 hash:0x%08x\n", key0_hash);
834 TFP_DRV_LOG(DEBUG, "Key1 hash:0x%08x\n", key1_hash);
837 * Use the "result" arg to populate all of the key entry then
838 * store the byte swapped "raw" entry in a local copy ready
839 * for insertion in to the table.
841 tf_em_create_key_entry((struct cfa_p4_eem_entry_hdr *)parms->em_record,
842 ((uint8_t *)parms->key),
846 * Try to add to Key0 table, if that does not work then
847 * try the key1 table.
850 op.opcode = HCAPI_CFA_HWOPS_ADD;
852 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE];
853 key_tbl.page_size = TF_EM_PAGE_SIZE;
854 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
855 key_obj.data = (uint8_t *)&key_entry;
856 key_obj.size = TF_EM_KEY_RECORD_SIZE;
858 rc = hcapi_cfa_key_hw_op(&op,
864 table_type = TF_KEY0_TABLE;
869 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY1_TABLE];
870 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
872 rc = hcapi_cfa_key_hw_op(&op,
879 table_type = TF_KEY1_TABLE;
885 TF_SET_FLOW_ID(parms->flow_id,
887 TF_GFID_TABLE_EXTERNAL,
889 TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
900 /** delete EEM hash entry API
904 * -EINVAL - parameter error
905 * TF_NO_SESSION - bad session ID
906 * TF_ERR_TBL_SCOPE - invalid table scope
907 * TF_ERR_TBL_IF - invalid table interface
909 * insert callback returns
911 * TF_NO_EM_MATCH - entry not found
914 tf_delete_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
915 struct tf_delete_em_entry_parms *parms)
917 enum hcapi_cfa_em_table_type hash_type;
919 struct hcapi_cfa_hwop op;
920 struct hcapi_cfa_key_tbl key_tbl;
921 struct hcapi_cfa_key_data key_obj;
922 struct hcapi_cfa_key_loc key_loc;
925 TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);
926 TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);
928 op.opcode = HCAPI_CFA_HWOPS_DEL;
930 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables
931 [(hash_type == 0 ? TF_KEY0_TABLE : TF_KEY1_TABLE)];
932 key_tbl.page_size = TF_EM_PAGE_SIZE;
933 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
935 key_obj.size = TF_EM_KEY_RECORD_SIZE;
937 rc = hcapi_cfa_key_hw_op(&op,
948 /** insert EM hash entry API
955 tf_em_insert_ext_entry(struct tf *tfp __rte_unused,
956 struct tf_insert_em_entry_parms *parms)
958 struct tf_tbl_scope_cb *tbl_scope_cb;
960 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
961 if (tbl_scope_cb == NULL) {
962 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
966 return tf_insert_eem_entry
971 /** Delete EM hash entry API
978 tf_em_delete_ext_entry(struct tf *tfp __rte_unused,
979 struct tf_delete_em_entry_parms *parms)
981 struct tf_tbl_scope_cb *tbl_scope_cb;
983 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
984 if (tbl_scope_cb == NULL) {
985 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
989 return tf_delete_eem_entry(tbl_scope_cb, parms);
994 tf_em_ext_common_bind(struct tf *tfp,
995 struct tf_em_cfg_parms *parms)
999 struct tf_rm_create_db_parms db_cfg = { 0 };
1000 uint8_t db_exists = 0;
1002 TF_CHECK_PARMS2(tfp, parms);
1006 "EM Ext DB already initialized\n");
1010 db_cfg.type = TF_DEVICE_MODULE_TYPE_EM;
1011 db_cfg.num_elements = parms->num_elements;
1012 db_cfg.cfg = parms->cfg;
1014 for (i = 0; i < TF_DIR_MAX; i++) {
1016 db_cfg.alloc_cnt = parms->resources->em_cnt[i].cnt;
1018 /* Check if we got any request to support EEM, if so
1019 * we build an EM Ext DB holding Table Scopes.
1021 if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_TBL_SCOPE] == 0)
1024 db_cfg.rm_db = &eem_db[i];
1025 rc = tf_rm_create_db(tfp, &db_cfg);
1028 "%s: EM Ext DB creation failed\n",
1039 mem_type = parms->mem_type;
1045 tf_em_ext_common_unbind(struct tf *tfp)
1049 struct tf_rm_free_db_parms fparms = { 0 };
1051 TF_CHECK_PARMS1(tfp);
1053 /* Bail if nothing has been initialized */
1056 "No EM Ext DBs created\n");
1060 for (i = 0; i < TF_DIR_MAX; i++) {
1062 fparms.rm_db = eem_db[i];
1063 rc = tf_rm_free_db(tfp, &fparms);
1076 * Sets the specified external table type element.
1078 * This API sets the specified element data
1081 * Pointer to TF handle
1084 * Pointer to table set parameters
1087 * - (0) if successful.
1088 * - (-EINVAL) on failure.
1090 int tf_tbl_ext_common_set(struct tf *tfp,
1091 struct tf_tbl_set_parms *parms)
1094 struct tf_tbl_scope_cb *tbl_scope_cb;
1095 uint32_t tbl_scope_id;
1096 struct hcapi_cfa_hwop op;
1097 struct hcapi_cfa_key_tbl key_tbl;
1098 struct hcapi_cfa_key_data key_obj;
1099 struct hcapi_cfa_key_loc key_loc;
1101 TF_CHECK_PARMS2(tfp, parms);
1103 if (parms->data == NULL) {
1105 "%s, invalid parms->data\n",
1106 tf_dir_2_str(parms->dir));
1110 tbl_scope_id = parms->tbl_scope_id;
1112 if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
1114 "%s, Table scope not allocated\n",
1115 tf_dir_2_str(parms->dir));
1119 /* Get the table scope control block associated with the
1122 tbl_scope_cb = tbl_scope_cb_find(tbl_scope_id);
1124 if (tbl_scope_cb == NULL) {
1126 "%s, table scope error\n",
1127 tf_dir_2_str(parms->dir));
1131 #if (TF_EM_SYSMEM_DELAY_EXPORT == 1)
1132 if (!tbl_scope_cb->valid) {
1133 rc = offload_system_mmap(tbl_scope_cb);
1136 struct tf_rm_free_parms fparms = { 0 };
1137 uint32_t rm_tbl_scope_id;
1139 /* TODO: support allocation of table scope from
1140 * min in HCAPI RM. For now call adjust function
1141 * on value obtained from RM.
1144 tf_tbl_scope_adjust(parms->tbl_scope_id);
1146 if (rm_tbl_scope_id == TF_TBL_SCOPE_INVALID)
1150 "System alloc mmap failed\n");
1151 /* Free Table control block */
1152 fparms.rm_db = eem_db[TF_DIR_RX];
1153 fparms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
1154 fparms.index = rm_tbl_scope_id;
1155 tf_rm_free(&fparms);
1159 tbl_scope_cb->valid = true;
1163 op.opcode = HCAPI_CFA_HWOPS_PUT;
1165 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_RECORD_TABLE];
1166 key_tbl.page_size = TF_EM_PAGE_SIZE;
1167 key_obj.offset = parms->idx;
1168 key_obj.data = parms->data;
1169 key_obj.size = parms->data_sz_in_bytes;
1171 rc = hcapi_cfa_key_hw_op(&op,
1180 tf_em_ext_common_alloc(struct tf *tfp,
1181 struct tf_alloc_tbl_scope_parms *parms)
1183 return tf_em_ext_alloc(tfp, parms);
1187 tf_em_ext_common_free(struct tf *tfp,
1188 struct tf_free_tbl_scope_parms *parms)
1190 return tf_em_ext_free(tfp, parms);