endif
ifeq ($(CONFIG_RTE_LIBRTE_BNXT_PMD), y)
-CFLAGS += -I$(SRCDIR) -I$(SRCDIR)/tf_ulp -I$(SRCDIR)/tf_core
+CFLAGS += -I$(SRCDIR) -I$(SRCDIR)/tf_ulp -I$(SRCDIR)/tf_core -I$(SRCDIR)/hcapi
include $(SRCDIR)/tf_ulp/Makefile
include $(SRCDIR)/tf_core/Makefile
+include $(SRCDIR)/hcapi/Makefile
endif
#
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Broadcom
+ * All rights reserved.
+ */
+/*
+ * Name: cfa_p40_hw.h
+ *
+ * Description: header for SWE based on Truflow
+ *
+ * Date: taken from 12/16/19 17:18:12
+ *
+ * Note: This file was first generated using tflib_decode.py.
+ *
+ * Changes have been made due to lack of availability of xml for
+ * additional tables at this time (EEM Record and union table fields)
+ * Changes not autogenerated are noted in comments.
+ */
+
+#ifndef _CFA_P40_HW_H_
+#define _CFA_P40_HW_H_
+
+/**
+ * Valid TCAM entry. (for idx 5 ...)
+ */
+#define CFA_P40_PROF_L2_CTXT_TCAM_VALID_BITPOS 166
+#define CFA_P40_PROF_L2_CTXT_TCAM_VALID_NUM_BITS 1
+/**
+ * Key type (pass). (for idx 5 ...)
+ */
+#define CFA_P40_PROF_L2_CTXT_TCAM_KEY_TYPE_BITPOS 164
+#define CFA_P40_PROF_L2_CTXT_TCAM_KEY_TYPE_NUM_BITS 2
+/**
+ * Tunnel HDR type. (for idx 5 ...)
+ */
+#define CFA_P40_PROF_L2_CTXT_TCAM_TUN_HDR_TYPE_BITPOS 160
+#define CFA_P40_PROF_L2_CTXT_TCAM_TUN_HDR_TYPE_NUM_BITS 4
+/**
+ * Number of VLAN tags in tunnel l2 header. (for idx 4 ...)
+ */
+#define CFA_P40_PROF_L2_CTXT_TCAM_T_L2_NUMTAGS_BITPOS 158
+#define CFA_P40_PROF_L2_CTXT_TCAM_T_L2_NUMTAGS_NUM_BITS 2
+/**
+ * Number of VLAN tags in l2 header. (for idx 4 ...)
+ */
+#define CFA_P40_PROF_L2_CTXT_TCAM_L2_NUMTAGS_BITPOS 156
+#define CFA_P40_PROF_L2_CTXT_TCAM_L2_NUMTAGS_NUM_BITS 2
+/**
+ * Tunnel/Inner Source/Dest. MAC Address.
+ */
+#define CFA_P40_PROF_L2_CTXT_TCAM_MAC1_BITPOS 108
+#define CFA_P40_PROF_L2_CTXT_TCAM_MAC1_NUM_BITS 48
+/**
+ * Tunnel Outer VLAN Tag ID. (for idx 3 ...)
+ */
+#define CFA_P40_PROF_L2_CTXT_TCAM_T_OVID_BITPOS 96
+#define CFA_P40_PROF_L2_CTXT_TCAM_T_OVID_NUM_BITS 12
+/**
+ * Tunnel Inner VLAN Tag ID. (for idx 2 ...)
+ */
+#define CFA_P40_PROF_L2_CTXT_TCAM_T_IVID_BITPOS 84
+#define CFA_P40_PROF_L2_CTXT_TCAM_T_IVID_NUM_BITS 12
+/**
+ * Source Partition. (for idx 2 ...)
+ */
+#define CFA_P40_PROF_L2_CTXT_TCAM_SPARIF_BITPOS 80
+#define CFA_P40_PROF_L2_CTXT_TCAM_SPARIF_NUM_BITS 4
+/**
+ * Source Virtual I/F. (for idx 2 ...)
+ */
+#define CFA_P40_PROF_L2_CTXT_TCAM_SVIF_BITPOS 72
+#define CFA_P40_PROF_L2_CTXT_TCAM_SVIF_NUM_BITS 8
+/**
+ * Tunnel/Inner Source/Dest. MAC Address.
+ */
+#define CFA_P40_PROF_L2_CTXT_TCAM_MAC0_BITPOS 24
+#define CFA_P40_PROF_L2_CTXT_TCAM_MAC0_NUM_BITS 48
+/**
+ * Outer VLAN Tag ID.
+ */
+#define CFA_P40_PROF_L2_CTXT_TCAM_OVID_BITPOS 12
+#define CFA_P40_PROF_L2_CTXT_TCAM_OVID_NUM_BITS 12
+/**
+ * Inner VLAN Tag ID.
+ */
+#define CFA_P40_PROF_L2_CTXT_TCAM_IVID_BITPOS 0
+#define CFA_P40_PROF_L2_CTXT_TCAM_IVID_NUM_BITS 12
+
+enum cfa_p40_prof_l2_ctxt_tcam_flds {
+ CFA_P40_PROF_L2_CTXT_TCAM_VALID_FLD = 0,
+ CFA_P40_PROF_L2_CTXT_TCAM_KEY_TYPE_FLD = 1,
+ CFA_P40_PROF_L2_CTXT_TCAM_TUN_HDR_TYPE_FLD = 2,
+ CFA_P40_PROF_L2_CTXT_TCAM_T_L2_NUMTAGS_FLD = 3,
+ CFA_P40_PROF_L2_CTXT_TCAM_L2_NUMTAGS_FLD = 4,
+ CFA_P40_PROF_L2_CTXT_TCAM_MAC1_FLD = 5,
+ CFA_P40_PROF_L2_CTXT_TCAM_T_OVID_FLD = 6,
+ CFA_P40_PROF_L2_CTXT_TCAM_T_IVID_FLD = 7,
+ CFA_P40_PROF_L2_CTXT_TCAM_SPARIF_FLD = 8,
+ CFA_P40_PROF_L2_CTXT_TCAM_SVIF_FLD = 9,
+ CFA_P40_PROF_L2_CTXT_TCAM_MAC0_FLD = 10,
+ CFA_P40_PROF_L2_CTXT_TCAM_OVID_FLD = 11,
+ CFA_P40_PROF_L2_CTXT_TCAM_IVID_FLD = 12,
+ CFA_P40_PROF_L2_CTXT_TCAM_MAX_FLD
+};
+
+#define CFA_P40_PROF_L2_CTXT_TCAM_TOTAL_NUM_BITS 167
+
+/**
+ * Valid entry. (for idx 2 ...)
+ */
+#define CFA_P40_ACT_VEB_TCAM_VALID_BITPOS 79
+#define CFA_P40_ACT_VEB_TCAM_VALID_NUM_BITS 1
+/**
+ * reserved program to 0. (for idx 2 ...)
+ */
+#define CFA_P40_ACT_VEB_TCAM_RESERVED_BITPOS 78
+#define CFA_P40_ACT_VEB_TCAM_RESERVED_NUM_BITS 1
+/**
+ * PF Parif Number. (for idx 2 ...)
+ */
+#define CFA_P40_ACT_VEB_TCAM_PARIF_IN_BITPOS 74
+#define CFA_P40_ACT_VEB_TCAM_PARIF_IN_NUM_BITS 4
+/**
+ * Number of VLAN Tags. (for idx 2 ...)
+ */
+#define CFA_P40_ACT_VEB_TCAM_NUM_VTAGS_BITPOS 72
+#define CFA_P40_ACT_VEB_TCAM_NUM_VTAGS_NUM_BITS 2
+/**
+ * Dest. MAC Address.
+ */
+#define CFA_P40_ACT_VEB_TCAM_MAC_BITPOS 24
+#define CFA_P40_ACT_VEB_TCAM_MAC_NUM_BITS 48
+/**
+ * Outer VLAN Tag ID.
+ */
+#define CFA_P40_ACT_VEB_TCAM_OVID_BITPOS 12
+#define CFA_P40_ACT_VEB_TCAM_OVID_NUM_BITS 12
+/**
+ * Inner VLAN Tag ID.
+ */
+#define CFA_P40_ACT_VEB_TCAM_IVID_BITPOS 0
+#define CFA_P40_ACT_VEB_TCAM_IVID_NUM_BITS 12
+
+enum cfa_p40_act_veb_tcam_flds {
+ CFA_P40_ACT_VEB_TCAM_VALID_FLD = 0,
+ CFA_P40_ACT_VEB_TCAM_RESERVED_FLD = 1,
+ CFA_P40_ACT_VEB_TCAM_PARIF_IN_FLD = 2,
+ CFA_P40_ACT_VEB_TCAM_NUM_VTAGS_FLD = 3,
+ CFA_P40_ACT_VEB_TCAM_MAC_FLD = 4,
+ CFA_P40_ACT_VEB_TCAM_OVID_FLD = 5,
+ CFA_P40_ACT_VEB_TCAM_IVID_FLD = 6,
+ CFA_P40_ACT_VEB_TCAM_MAX_FLD
+};
+
+#define CFA_P40_ACT_VEB_TCAM_TOTAL_NUM_BITS 80
+
+/**
+ * Entry is valid.
+ */
+#define CFA_P40_LKUP_TCAM_RECORD_MEM_VALID_BITPOS 18
+#define CFA_P40_LKUP_TCAM_RECORD_MEM_VALID_NUM_BITS 1
+/**
+ * Action Record Pointer
+ */
+#define CFA_P40_LKUP_TCAM_RECORD_MEM_ACT_REC_PTR_BITPOS 2
+#define CFA_P40_LKUP_TCAM_RECORD_MEM_ACT_REC_PTR_NUM_BITS 16
+/**
+ * for resolving TCAM/EM conflicts
+ */
+#define CFA_P40_LKUP_TCAM_RECORD_MEM_STRENGTH_BITPOS 0
+#define CFA_P40_LKUP_TCAM_RECORD_MEM_STRENGTH_NUM_BITS 2
+
+enum cfa_p40_lkup_tcam_record_mem_flds {
+ CFA_P40_LKUP_TCAM_RECORD_MEM_VALID_FLD = 0,
+ CFA_P40_LKUP_TCAM_RECORD_MEM_ACT_REC_PTR_FLD = 1,
+ CFA_P40_LKUP_TCAM_RECORD_MEM_STRENGTH_FLD = 2,
+ CFA_P40_LKUP_TCAM_RECORD_MEM_MAX_FLD
+};
+
+#define CFA_P40_LKUP_TCAM_RECORD_MEM_TOTAL_NUM_BITS 19
+
+/**
+ * (for idx 1 ...)
+ */
+#define CFA_P40_PROF_CTXT_REMAP_MEM_TPID_ANTI_SPOOF_CTL_BITPOS 62
+#define CFA_P40_PROF_CTXT_REMAP_MEM_TPID_ANTI_SPOOF_CTL_NUM_BITS 2
+enum cfa_p40_prof_ctxt_remap_mem_tpid_anti_spoof_ctl {
+ CFA_P40_PROF_CTXT_REMAP_MEM_TPID_IGNORE = 0x0UL,
+
+ CFA_P40_PROF_CTXT_REMAP_MEM_TPID_DROP = 0x1UL,
+
+ CFA_P40_PROF_CTXT_REMAP_MEM_TPID_DEFAULT = 0x2UL,
+
+ CFA_P40_PROF_CTXT_REMAP_MEM_TPID_SPIF = 0x3UL,
+ CFA_P40_PROF_CTXT_REMAP_MEM_TPID_MAX = 0x3UL
+};
+/**
+ * (for idx 1 ...)
+ */
+#define CFA_P40_PROF_CTXT_REMAP_MEM_PRI_ANTI_SPOOF_CTL_BITPOS 60
+#define CFA_P40_PROF_CTXT_REMAP_MEM_PRI_ANTI_SPOOF_CTL_NUM_BITS 2
+enum cfa_p40_prof_ctxt_remap_mem_pri_anti_spoof_ctl {
+ CFA_P40_PROF_CTXT_REMAP_MEM_PRI_IGNORE = 0x0UL,
+
+ CFA_P40_PROF_CTXT_REMAP_MEM_PRI_DROP = 0x1UL,
+
+ CFA_P40_PROF_CTXT_REMAP_MEM_PRI_DEFAULT = 0x2UL,
+
+ CFA_P40_PROF_CTXT_REMAP_MEM_PRI_SPIF = 0x3UL,
+ CFA_P40_PROF_CTXT_REMAP_MEM_PRI_MAX = 0x3UL
+};
+/**
+ * Bypass Source Properties Lookup. (for idx 1 ...)
+ */
+#define CFA_P40_PROF_CTXT_REMAP_MEM_BYP_SP_LKUP_BITPOS 59
+#define CFA_P40_PROF_CTXT_REMAP_MEM_BYP_SP_LKUP_NUM_BITS 1
+/**
+ * SP Record Pointer. (for idx 1 ...)
+ */
+#define CFA_P40_PROF_CTXT_REMAP_MEM_SP_REC_PTR_BITPOS 43
+#define CFA_P40_PROF_CTXT_REMAP_MEM_SP_REC_PTR_NUM_BITS 16
+/**
+ * BD Action pointer passing enable. (for idx 1 ...)
+ */
+#define CFA_P40_PROF_CTXT_REMAP_MEM_BD_ACT_EN_BITPOS 42
+#define CFA_P40_PROF_CTXT_REMAP_MEM_BD_ACT_EN_NUM_BITS 1
+/**
+ * Default VLAN TPID. (for idx 1 ...)
+ */
+#define CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_TPID_BITPOS 39
+#define CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_TPID_NUM_BITS 3
+/**
+ * Allowed VLAN TPIDs. (for idx 1 ...)
+ */
+#define CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_TPID_BITPOS 33
+#define CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_TPID_NUM_BITS 6
+/**
+ * Default VLAN PRI.
+ */
+#define CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_PRI_BITPOS 30
+#define CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_PRI_NUM_BITS 3
+/**
+ * Allowed VLAN PRIs.
+ */
+#define CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_PRI_BITPOS 22
+#define CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_PRI_NUM_BITS 8
+/**
+ * Partition.
+ */
+#define CFA_P40_PROF_CTXT_REMAP_MEM_PARIF_BITPOS 18
+#define CFA_P40_PROF_CTXT_REMAP_MEM_PARIF_NUM_BITS 4
+/**
+ * Bypass Lookup.
+ */
+#define CFA_P40_PROF_CTXT_REMAP_MEM_BYP_LKUP_EN_BITPOS 17
+#define CFA_P40_PROF_CTXT_REMAP_MEM_BYP_LKUP_EN_NUM_BITS 1
+
+/**
+ * L2 Context Remap Data. Action bypass mode (1) {7'd0,prof_vnic[9:0]} Note:
+ * should also set byp_lkup_en. Action bypass mode (0) byp_lkup_en(0) -
+ * {prof_func[6:0],l2_context[9:0]} byp_lkup_en(1) - {1'b0,act_rec_ptr[15:0]}
+ */
+
+#define CFA_P40_PROF_CTXT_REMAP_MEM_PROF_VNIC_BITPOS 0
+#define CFA_P40_PROF_CTXT_REMAP_MEM_PROF_VNIC_NUM_BITS 12
+
+#define CFA_P40_PROF_CTXT_REMAP_MEM_PROF_FUNC_BITPOS 10
+#define CFA_P40_PROF_CTXT_REMAP_MEM_PROF_FUNC_NUM_BITS 7
+
+#define CFA_P40_PROF_CTXT_REMAP_MEM_L2_CTXT_BITPOS 0
+#define CFA_P40_PROF_CTXT_REMAP_MEM_L2_CTXT_NUM_BITS 10
+
+#define CFA_P40_PROF_CTXT_REMAP_MEM_ARP_BITPOS 0
+#define CFA_P40_PROF_CTXT_REMAP_MEM_ARP_NUM_BITS 16
+
+enum cfa_p40_prof_ctxt_remap_mem_flds {
+ CFA_P40_PROF_CTXT_REMAP_MEM_TPID_ANTI_SPOOF_CTL_FLD = 0,
+ CFA_P40_PROF_CTXT_REMAP_MEM_PRI_ANTI_SPOOF_CTL_FLD = 1,
+ CFA_P40_PROF_CTXT_REMAP_MEM_BYP_SP_LKUP_FLD = 2,
+ CFA_P40_PROF_CTXT_REMAP_MEM_SP_REC_PTR_FLD = 3,
+ CFA_P40_PROF_CTXT_REMAP_MEM_BD_ACT_EN_FLD = 4,
+ CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_TPID_FLD = 5,
+ CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_TPID_FLD = 6,
+ CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_PRI_FLD = 7,
+ CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_PRI_FLD = 8,
+ CFA_P40_PROF_CTXT_REMAP_MEM_PARIF_FLD = 9,
+ CFA_P40_PROF_CTXT_REMAP_MEM_BYP_LKUP_EN_FLD = 10,
+ CFA_P40_PROF_CTXT_REMAP_MEM_PROF_VNIC_FLD = 11,
+ CFA_P40_PROF_CTXT_REMAP_MEM_PROF_FUNC_FLD = 12,
+ CFA_P40_PROF_CTXT_REMAP_MEM_L2_CTXT_FLD = 13,
+ CFA_P40_PROF_CTXT_REMAP_MEM_ARP_FLD = 14,
+ CFA_P40_PROF_CTXT_REMAP_MEM_MAX_FLD
+};
+
+#define CFA_P40_PROF_CTXT_REMAP_MEM_TOTAL_NUM_BITS 64
+
+/**
+ * Bypass action pointer look up (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_PL_BYP_LKUP_EN_BITPOS 37
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_PL_BYP_LKUP_EN_NUM_BITS 1
+/**
+ * Exact match search enable (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_SEARCH_ENB_BITPOS 36
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_SEARCH_ENB_NUM_BITS 1
+/**
+ * Exact match profile
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_PROFILE_ID_BITPOS 28
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_PROFILE_ID_NUM_BITS 8
+/**
+ * Exact match key format
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_ID_BITPOS 23
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_ID_NUM_BITS 5
+/**
+ * Exact match key mask
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_MASK_BITPOS 13
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_MASK_NUM_BITS 10
+/**
+ * TCAM search enable
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_SEARCH_ENB_BITPOS 12
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_SEARCH_ENB_NUM_BITS 1
+/**
+ * TCAM profile
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_PROFILE_ID_BITPOS 4
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_PROFILE_ID_NUM_BITS 8
+/**
+ * TCAM key format
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_KEY_ID_BITPOS 0
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_KEY_ID_NUM_BITS 4
+
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_BYPASS_OPT_BITPOS 16
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_BYPASS_OPT_NUM_BITS 2
+
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_ACT_REC_PTR_BITPOS 0
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_ACT_REC_PTR_NUM_BITS 16
+
+enum cfa_p40_prof_profile_tcam_remap_mem_flds {
+ CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_PL_BYP_LKUP_EN_FLD = 0,
+ CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_SEARCH_ENB_FLD = 1,
+ CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_PROFILE_ID_FLD = 2,
+ CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_ID_FLD = 3,
+ CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_MASK_FLD = 4,
+ CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_SEARCH_ENB_FLD = 5,
+ CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_PROFILE_ID_FLD = 6,
+ CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_KEY_ID_FLD = 7,
+ CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_BYPASS_OPT_FLD = 8,
+ CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_ACT_REC_PTR_FLD = 9,
+ CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_MAX_FLD
+};
+
+#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TOTAL_NUM_BITS 38
+
+/**
+ * Valid TCAM entry (for idx 2 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_VALID_BITPOS 80
+#define CFA_P40_PROF_PROFILE_TCAM_VALID_NUM_BITS 1
+/**
+ * Packet type (for idx 2 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_PKT_TYPE_BITPOS 76
+#define CFA_P40_PROF_PROFILE_TCAM_PKT_TYPE_NUM_BITS 4
+/**
+ * Pass through CFA (for idx 2 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_RECYCLE_CNT_BITPOS 74
+#define CFA_P40_PROF_PROFILE_TCAM_RECYCLE_CNT_NUM_BITS 2
+/**
+ * Aggregate error (for idx 2 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_AGG_ERROR_BITPOS 73
+#define CFA_P40_PROF_PROFILE_TCAM_AGG_ERROR_NUM_BITS 1
+/**
+ * Profile function (for idx 2 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_PROF_FUNC_BITPOS 66
+#define CFA_P40_PROF_PROFILE_TCAM_PROF_FUNC_NUM_BITS 7
+/**
+ * Reserved for future use. Set to 0.
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_RESERVED_BITPOS 57
+#define CFA_P40_PROF_PROFILE_TCAM_RESERVED_NUM_BITS 9
+/**
+ * non-tunnel(0)/tunneled(1) packet (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_HREC_NEXT_BITPOS 56
+#define CFA_P40_PROF_PROFILE_TCAM_HREC_NEXT_NUM_BITS 1
+/**
+ * Tunnel L2 tunnel valid (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_VALID_BITPOS 55
+#define CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_VALID_NUM_BITS 1
+/**
+ * Tunnel L2 header type (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_TYPE_BITPOS 53
+#define CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_TYPE_NUM_BITS 2
+/**
+ * Remapped tunnel L2 dest_type UC(0)/MC(2)/BC(3) (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TL2_UC_MC_BC_BITPOS 51
+#define CFA_P40_PROF_PROFILE_TCAM_TL2_UC_MC_BC_NUM_BITS 2
+/**
+ * Tunnel L2 1+ VLAN tags present (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TL2_VTAG_PRESENT_BITPOS 50
+#define CFA_P40_PROF_PROFILE_TCAM_TL2_VTAG_PRESENT_NUM_BITS 1
+/**
+ * Tunnel L2 2 VLAN tags present (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TL2_TWO_VTAGS_BITPOS 49
+#define CFA_P40_PROF_PROFILE_TCAM_TL2_TWO_VTAGS_NUM_BITS 1
+/**
+ * Tunnel L3 valid (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TL3_VALID_BITPOS 48
+#define CFA_P40_PROF_PROFILE_TCAM_TL3_VALID_NUM_BITS 1
+/**
+ * Tunnel L3 error (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TL3_ERROR_BITPOS 47
+#define CFA_P40_PROF_PROFILE_TCAM_TL3_ERROR_NUM_BITS 1
+/**
+ * Tunnel L3 header type (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_TYPE_BITPOS 43
+#define CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_TYPE_NUM_BITS 4
+/**
+ * Tunnel L3 header is IPV4 or IPV6. (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_ISIP_BITPOS 42
+#define CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_ISIP_NUM_BITS 1
+/**
+ * Tunnel L3 IPV6 src address is compressed (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_SRC_BITPOS 41
+#define CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_SRC_NUM_BITS 1
+/**
+ * Tunnel L3 IPV6 dest address is compressed (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_DEST_BITPOS 40
+#define CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_DEST_NUM_BITS 1
+/**
+ * Tunnel L4 valid (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_VALID_BITPOS 39
+#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_VALID_NUM_BITS 1
+/**
+ * Tunnel L4 error (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_ERROR_BITPOS 38
+#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_ERROR_NUM_BITS 1
+/**
+ * Tunnel L4 header type (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_TYPE_BITPOS 34
+#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_TYPE_NUM_BITS 4
+/**
+ * Tunnel L4 header is UDP or TCP (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_IS_UDP_TCP_BITPOS 33
+#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_IS_UDP_TCP_NUM_BITS 1
+/**
+ * Tunnel valid (for idx 1 ...)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_VALID_BITPOS 32
+#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_VALID_NUM_BITS 1
+/**
+ * Tunnel error
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_ERR_BITPOS 31
+#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_ERR_NUM_BITS 1
+/**
+ * Tunnel header type
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_TYPE_BITPOS 27
+#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_TYPE_NUM_BITS 4
+/**
+ * Tunnel header flags
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_FLAGS_BITPOS 24
+#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_FLAGS_NUM_BITS 3
+/**
+ * L2 header valid
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_VALID_BITPOS 23
+#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_VALID_NUM_BITS 1
+/**
+ * L2 header error
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_ERROR_BITPOS 22
+#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_ERROR_NUM_BITS 1
+/**
+ * L2 header type
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_TYPE_BITPOS 20
+#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_TYPE_NUM_BITS 2
+/**
+ * Remapped L2 dest_type UC(0)/MC(2)/BC(3)
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_L2_UC_MC_BC_BITPOS 18
+#define CFA_P40_PROF_PROFILE_TCAM_L2_UC_MC_BC_NUM_BITS 2
+/**
+ * L2 header 1+ VLAN tags present
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_L2_VTAG_PRESENT_BITPOS 17
+#define CFA_P40_PROF_PROFILE_TCAM_L2_VTAG_PRESENT_NUM_BITS 1
+/**
+ * L2 header 2 VLAN tags present
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_L2_TWO_VTAGS_BITPOS 16
+#define CFA_P40_PROF_PROFILE_TCAM_L2_TWO_VTAGS_NUM_BITS 1
+/**
+ * L3 header valid
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_L3_VALID_BITPOS 15
+#define CFA_P40_PROF_PROFILE_TCAM_L3_VALID_NUM_BITS 1
+/**
+ * L3 header error
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_L3_ERROR_BITPOS 14
+#define CFA_P40_PROF_PROFILE_TCAM_L3_ERROR_NUM_BITS 1
+/**
+ * L3 header type
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_L3_HDR_TYPE_BITPOS 10
+#define CFA_P40_PROF_PROFILE_TCAM_L3_HDR_TYPE_NUM_BITS 4
+/**
+ * L3 header is IPV4 or IPV6.
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_L3_HDR_ISIP_BITPOS 9
+#define CFA_P40_PROF_PROFILE_TCAM_L3_HDR_ISIP_NUM_BITS 1
+/**
+ * L3 header IPV6 src address is compressed
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_SRC_BITPOS 8
+#define CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_SRC_NUM_BITS 1
+/**
+ * L3 header IPV6 dest address is compressed
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_DEST_BITPOS 7
+#define CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_DEST_NUM_BITS 1
+/**
+ * L4 header valid
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_VALID_BITPOS 6
+#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_VALID_NUM_BITS 1
+/**
+ * L4 header error
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_ERROR_BITPOS 5
+#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_ERROR_NUM_BITS 1
+/**
+ * L4 header type
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_TYPE_BITPOS 1
+#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_TYPE_NUM_BITS 4
+/**
+ * L4 header is UDP or TCP
+ */
+#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_IS_UDP_TCP_BITPOS 0
+#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_IS_UDP_TCP_NUM_BITS 1
+
+enum cfa_p40_prof_profile_tcam_flds {
+ CFA_P40_PROF_PROFILE_TCAM_VALID_FLD = 0,
+ CFA_P40_PROF_PROFILE_TCAM_PKT_TYPE_FLD = 1,
+ CFA_P40_PROF_PROFILE_TCAM_RECYCLE_CNT_FLD = 2,
+ CFA_P40_PROF_PROFILE_TCAM_AGG_ERROR_FLD = 3,
+ CFA_P40_PROF_PROFILE_TCAM_PROF_FUNC_FLD = 4,
+ CFA_P40_PROF_PROFILE_TCAM_RESERVED_FLD = 5,
+ CFA_P40_PROF_PROFILE_TCAM_HREC_NEXT_FLD = 6,
+ CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_VALID_FLD = 7,
+ CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_TYPE_FLD = 8,
+ CFA_P40_PROF_PROFILE_TCAM_TL2_UC_MC_BC_FLD = 9,
+ CFA_P40_PROF_PROFILE_TCAM_TL2_VTAG_PRESENT_FLD = 10,
+ CFA_P40_PROF_PROFILE_TCAM_TL2_TWO_VTAGS_FLD = 11,
+ CFA_P40_PROF_PROFILE_TCAM_TL3_VALID_FLD = 12,
+ CFA_P40_PROF_PROFILE_TCAM_TL3_ERROR_FLD = 13,
+ CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_TYPE_FLD = 14,
+ CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_ISIP_FLD = 15,
+ CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_SRC_FLD = 16,
+ CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_DEST_FLD = 17,
+ CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_VALID_FLD = 18,
+ CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_ERROR_FLD = 19,
+ CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_TYPE_FLD = 20,
+ CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_IS_UDP_TCP_FLD = 21,
+ CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_VALID_FLD = 22,
+ CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_ERR_FLD = 23,
+ CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_TYPE_FLD = 24,
+ CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_FLAGS_FLD = 25,
+ CFA_P40_PROF_PROFILE_TCAM_L2_HDR_VALID_FLD = 26,
+ CFA_P40_PROF_PROFILE_TCAM_L2_HDR_ERROR_FLD = 27,
+ CFA_P40_PROF_PROFILE_TCAM_L2_HDR_TYPE_FLD = 28,
+ CFA_P40_PROF_PROFILE_TCAM_L2_UC_MC_BC_FLD = 29,
+ CFA_P40_PROF_PROFILE_TCAM_L2_VTAG_PRESENT_FLD = 30,
+ CFA_P40_PROF_PROFILE_TCAM_L2_TWO_VTAGS_FLD = 31,
+ CFA_P40_PROF_PROFILE_TCAM_L3_VALID_FLD = 32,
+ CFA_P40_PROF_PROFILE_TCAM_L3_ERROR_FLD = 33,
+ CFA_P40_PROF_PROFILE_TCAM_L3_HDR_TYPE_FLD = 34,
+ CFA_P40_PROF_PROFILE_TCAM_L3_HDR_ISIP_FLD = 35,
+ CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_SRC_FLD = 36,
+ CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_DEST_FLD = 37,
+ CFA_P40_PROF_PROFILE_TCAM_L4_HDR_VALID_FLD = 38,
+ CFA_P40_PROF_PROFILE_TCAM_L4_HDR_ERROR_FLD = 39,
+ CFA_P40_PROF_PROFILE_TCAM_L4_HDR_TYPE_FLD = 40,
+ CFA_P40_PROF_PROFILE_TCAM_L4_HDR_IS_UDP_TCP_FLD = 41,
+ CFA_P40_PROF_PROFILE_TCAM_MAX_FLD
+};
+
+#define CFA_P40_PROF_PROFILE_TCAM_TOTAL_NUM_BITS 81
+
+/**
+ * CFA flexible key layout definition
+ */
+enum cfa_p40_key_fld_id {
+ CFA_P40_KEY_FLD_ID_MAX
+};
+
+/**************************************************************************/
+/**
+ * Non-autogenerated fields
+ */
+
+/**
+ * Valid
+ */
+#define CFA_P40_EEM_KEY_TBL_VALID_BITPOS 0
+#define CFA_P40_EEM_KEY_TBL_VALID_NUM_BITS 1
+
+/**
+ * L1 Cacheable
+ */
+#define CFA_P40_EEM_KEY_TBL_L1_CACHEABLE_BITPOS 1
+#define CFA_P40_EEM_KEY_TBL_L1_CACHEABLE_NUM_BITS 1
+
+/**
+ * Strength
+ */
+#define CFA_P40_EEM_KEY_TBL_STRENGTH_BITPOS 2
+#define CFA_P40_EEM_KEY_TBL_STRENGTH_NUM_BITS 2
+
+/**
+ * Key Size
+ */
+#define CFA_P40_EEM_KEY_TBL_KEY_SZ_BITPOS 15
+#define CFA_P40_EEM_KEY_TBL_KEY_SZ_NUM_BITS 9
+
+/**
+ * Record Size
+ */
+#define CFA_P40_EEM_KEY_TBL_REC_SZ_BITPOS 24
+#define CFA_P40_EEM_KEY_TBL_REC_SZ_NUM_BITS 5
+
+/**
+ * Action Record Internal
+ */
+#define CFA_P40_EEM_KEY_TBL_ACT_REC_INT_BITPOS 29
+#define CFA_P40_EEM_KEY_TBL_ACT_REC_INT_NUM_BITS 1
+
+/**
+ * External Flow Counter
+ */
+#define CFA_P40_EEM_KEY_TBL_EXT_FLOW_CTR_BITPOS 30
+#define CFA_P40_EEM_KEY_TBL_EXT_FLOW_CTR_NUM_BITS 1
+
+/**
+ * Action Record Pointer
+ */
+#define CFA_P40_EEM_KEY_TBL_AR_PTR_BITPOS 31
+#define CFA_P40_EEM_KEY_TBL_AR_PTR_NUM_BITS 33
+
+/**
+ * EEM Key omitted - create using keybuilder
+ * Fields here cannot be larger than a uint64_t
+ */
+
+#define CFA_P40_EEM_KEY_TBL_TOTAL_NUM_BITS 64
+
+enum cfa_p40_eem_key_tbl_flds {
+ CFA_P40_EEM_KEY_TBL_VALID_FLD = 0,
+ CFA_P40_EEM_KEY_TBL_L1_CACHEABLE_FLD = 1,
+ CFA_P40_EEM_KEY_TBL_STRENGTH_FLD = 2,
+ CFA_P40_EEM_KEY_TBL_KEY_SZ_FLD = 3,
+ CFA_P40_EEM_KEY_TBL_REC_SZ_FLD = 4,
+ CFA_P40_EEM_KEY_TBL_ACT_REC_INT_FLD = 5,
+ CFA_P40_EEM_KEY_TBL_EXT_FLOW_CTR_FLD = 6,
+ CFA_P40_EEM_KEY_TBL_AR_PTR_FLD = 7,
+ CFA_P40_EEM_KEY_TBL_MAX_FLD
+};
+
+/**
+ * Mirror Destination 0 Source Property Record Pointer
+ */
+#define CFA_P40_MIRROR_TBL_SP_PTR_BITPOS 0
+#define CFA_P40_MIRROR_TBL_SP_PTR_NUM_BITS 11
+
+/**
+ * ignore or honor drop
+ */
+#define CFA_P40_MIRROR_TBL_IGN_DROP_BITPOS 13
+#define CFA_P40_MIRROR_TBL_IGN_DROP_NUM_BITS 1
+
+/**
+ * ingress or egress copy
+ */
+#define CFA_P40_MIRROR_TBL_COPY_BITPOS 14
+#define CFA_P40_MIRROR_TBL_COPY_NUM_BITS 1
+
+/**
+ * Mirror Destination enable.
+ */
+#define CFA_P40_MIRROR_TBL_EN_BITPOS 15
+#define CFA_P40_MIRROR_TBL_EN_NUM_BITS 1
+
+/**
+ * Action Record Pointer
+ */
+#define CFA_P40_MIRROR_TBL_AR_PTR_BITPOS 16
+#define CFA_P40_MIRROR_TBL_AR_PTR_NUM_BITS 16
+
+#define CFA_P40_MIRROR_TBL_TOTAL_NUM_BITS 32
+
+enum cfa_p40_mirror_tbl_flds {
+ CFA_P40_MIRROR_TBL_SP_PTR_FLD = 0,
+ CFA_P40_MIRROR_TBL_IGN_DROP_FLD = 1,
+ CFA_P40_MIRROR_TBL_COPY_FLD = 2,
+ CFA_P40_MIRROR_TBL_EN_FLD = 3,
+ CFA_P40_MIRROR_TBL_AR_PTR_FLD = 4,
+ CFA_P40_MIRROR_TBL_MAX_FLD
+};
+
+/**
+ * P45 Specific Updates (SR) - Non-autogenerated
+ */
+/**
+ * Valid TCAM entry.
+ */
+#define CFA_P45_PROF_L2_CTXT_TCAM_VALID_BITPOS 166
+#define CFA_P45_PROF_L2_CTXT_TCAM_VALID_NUM_BITS 1
+/**
+ * Source Partition.
+ */
+#define CFA_P45_PROF_L2_CTXT_TCAM_SPARIF_BITPOS 166
+#define CFA_P45_PROF_L2_CTXT_TCAM_SPARIF_NUM_BITS 4
+
+/**
+ * Source Virtual I/F.
+ */
+#define CFA_P45_PROF_L2_CTXT_TCAM_SVIF_BITPOS 72
+#define CFA_P45_PROF_L2_CTXT_TCAM_SVIF_NUM_BITS 12
+
+
+/* The SR layout of the l2 ctxt key is different from the Wh+. Switch to
+ * cfa_p45_hw.h definition when available.
+ */
+enum cfa_p45_prof_l2_ctxt_tcam_flds {
+ CFA_P45_PROF_L2_CTXT_TCAM_VALID_FLD = 0,
+ CFA_P45_PROF_L2_CTXT_TCAM_SPARIF_FLD = 1,
+ CFA_P45_PROF_L2_CTXT_TCAM_KEY_TYPE_FLD = 2,
+ CFA_P45_PROF_L2_CTXT_TCAM_TUN_HDR_TYPE_FLD = 3,
+ CFA_P45_PROF_L2_CTXT_TCAM_T_L2_NUMTAGS_FLD = 4,
+ CFA_P45_PROF_L2_CTXT_TCAM_L2_NUMTAGS_FLD = 5,
+ CFA_P45_PROF_L2_CTXT_TCAM_MAC1_FLD = 6,
+ CFA_P45_PROF_L2_CTXT_TCAM_T_OVID_FLD = 7,
+ CFA_P45_PROF_L2_CTXT_TCAM_T_IVID_FLD = 8,
+ CFA_P45_PROF_L2_CTXT_TCAM_SVIF_FLD = 9,
+ CFA_P45_PROF_L2_CTXT_TCAM_MAC0_FLD = 10,
+ CFA_P45_PROF_L2_CTXT_TCAM_OVID_FLD = 11,
+ CFA_P45_PROF_L2_CTXT_TCAM_IVID_FLD = 12,
+ CFA_P45_PROF_L2_CTXT_TCAM_MAX_FLD
+};
+
+#define CFA_P45_PROF_L2_CTXT_TCAM_TOTAL_NUM_BITS 171
+
+#endif /* _CFA_P40_HW_H_ */
+++ /dev/null
-/*
- * Copyright(c) 2019-2020 Broadcom Limited.
- * All rights reserved.
- */
-
-#include "bitstring.h"
-#include "hcapi_cfa_defs.h"
-#include <errno.h>
-#include "assert.h"
-
-/* HCAPI CFA common PUT APIs */
-int hcapi_cfa_put_field(uint64_t *data_buf,
- const struct hcapi_cfa_layout *layout,
- uint16_t field_id, uint64_t val)
-{
- assert(layout);
-
- if (field_id > layout->array_sz)
- /* Invalid field_id */
- return -EINVAL;
-
- if (layout->is_msb_order)
- bs_put_msb(data_buf,
- layout->field_array[field_id].bitpos,
- layout->field_array[field_id].bitlen, val);
- else
- bs_put_lsb(data_buf,
- layout->field_array[field_id].bitpos,
- layout->field_array[field_id].bitlen, val);
- return 0;
-}
-
-int hcapi_cfa_put_fields(uint64_t *obj_data,
- const struct hcapi_cfa_layout *layout,
- struct hcapi_cfa_data_obj *field_tbl,
- uint16_t field_tbl_sz)
-{
- int i;
- uint16_t bitpos;
- uint8_t bitlen;
- uint16_t field_id;
-
- assert(layout);
- assert(field_tbl);
-
- if (layout->is_msb_order) {
- for (i = 0; i < field_tbl_sz; i++) {
- field_id = field_tbl[i].field_id;
- if (field_id > layout->array_sz)
- return -EINVAL;
- bitpos = layout->field_array[field_id].bitpos;
- bitlen = layout->field_array[field_id].bitlen;
- bs_put_msb(obj_data, bitpos, bitlen,
- field_tbl[i].val);
- }
- } else {
- for (i = 0; i < field_tbl_sz; i++) {
- field_id = field_tbl[i].field_id;
- if (field_id > layout->array_sz)
- return -EINVAL;
- bitpos = layout->field_array[field_id].bitpos;
- bitlen = layout->field_array[field_id].bitlen;
- bs_put_lsb(obj_data, bitpos, bitlen,
- field_tbl[i].val);
- }
- }
- return 0;
-}
-
-/* HCAPI CFA common GET APIs */
-int hcapi_cfa_get_field(uint64_t *obj_data,
- const struct hcapi_cfa_layout *layout,
- uint16_t field_id,
- uint64_t *val)
-{
- assert(layout);
- assert(val);
-
- if (field_id > layout->array_sz)
- /* Invalid field_id */
- return -EINVAL;
-
- if (layout->is_msb_order)
- *val = bs_get_msb(obj_data,
- layout->field_array[field_id].bitpos,
- layout->field_array[field_id].bitlen);
- else
- *val = bs_get_lsb(obj_data,
- layout->field_array[field_id].bitpos,
- layout->field_array[field_id].bitlen);
- return 0;
-}
* Copyright(c) 2019-2020 Broadcom
* All rights reserved.
*/
-
+#include <inttypes.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdbool.h>
HWRM_TFT_REG_SET = 822,
HWRM_TFT_TBL_TYPE_SET = 823,
HWRM_TFT_TBL_TYPE_GET = 824,
- HWRM_TFT_TBL_TYPE_GET_BULK = 825,
- TF_SUBTYPE_LAST = HWRM_TFT_TBL_TYPE_GET_BULK,
+ HWRM_TFT_TBL_TYPE_BULK_GET = 825,
+ TF_SUBTYPE_LAST = HWRM_TFT_TBL_TYPE_BULK_GET,
} tf_subtype_t;
/* Request and Response compile time checking */
struct tf_tbl_type_set_input;
struct tf_tbl_type_get_input;
struct tf_tbl_type_get_output;
-struct tf_tbl_type_get_bulk_input;
-struct tf_tbl_type_get_bulk_output;
+struct tf_tbl_type_bulk_get_input;
+struct tf_tbl_type_bulk_get_output;
/* Input params for session attach */
typedef struct tf_session_attach_input {
/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent */
#define TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_RX (0x0)
/* When set to 1, indicates the get apply to TX */
#define TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_TX (0x1)
- /* When set to 1, indicates the clear entry on read */
-#define TF_TBL_TYPE_GET_INPUT_FLAGS_CLEAR_ON_READ (0x2)
/* Type of the object to set */
uint32_t type;
/* Index to get */
} tf_tbl_type_get_output_t, *ptf_tbl_type_get_output_t;
/* Input params for table type get */
-typedef struct tf_tbl_type_get_bulk_input {
+typedef struct tf_tbl_type_bulk_get_input {
/* Session Id */
uint32_t fw_session_id;
/* flags */
uint16_t flags;
/* When set to 0, indicates the get apply to RX */
-#define TF_TBL_TYPE_GET_BULK_INPUT_FLAGS_DIR_RX (0x0)
+#define TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_DIR_RX (0x0)
/* When set to 1, indicates the get apply to TX */
-#define TF_TBL_TYPE_GET_BULK_INPUT_FLAGS_DIR_TX (0x1)
+#define TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_DIR_TX (0x1)
/* When set to 1, indicates the clear entry on read */
-#define TF_TBL_TYPE_GET_BULK_INPUT_FLAGS_CLEAR_ON_READ (0x2)
+#define TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_CLEAR_ON_READ (0x2)
/* Type of the object to set */
uint32_t type;
/* Starting index to get from */
uint32_t num_entries;
/* Host memory where data will be stored */
uint64_t host_addr;
-} tf_tbl_type_get_bulk_input_t, *ptf_tbl_type_get_bulk_input_t;
+} tf_tbl_type_bulk_get_input_t, *ptf_tbl_type_bulk_get_input_t;
/* Output params for table type get */
-typedef struct tf_tbl_type_get_bulk_output {
+typedef struct tf_tbl_type_bulk_get_output {
/* Size of the total data read in bytes */
uint16_t size;
-} tf_tbl_type_get_bulk_output_t, *ptf_tbl_type_get_bulk_output_t;
+} tf_tbl_type_bulk_get_output_t, *ptf_tbl_type_bulk_get_output_t;
#endif /* _HWRM_TF_H_ */
#include "tf_common.h"
#include "hwrm_tf.h"
-static inline uint32_t SWAP_WORDS32(uint32_t val32)
+static int tf_check_tcam_entry(enum tf_tcam_tbl_type tcam_tbl_type,
+ enum tf_device_type device,
+ uint16_t key_sz_in_bits,
+ uint16_t *num_slice_per_row)
{
- return (((val32 & 0x0000ffff) << 16) |
- ((val32 & 0xffff0000) >> 16));
-}
+ uint16_t key_bytes;
+ uint16_t slice_sz = 0;
+
+#define CFA_P4_WC_TCAM_SLICES_PER_ROW 2
+#define CFA_P4_WC_TCAM_SLICE_SIZE 12
+
+ if (tcam_tbl_type == TF_TCAM_TBL_TYPE_WC_TCAM) {
+ key_bytes = TF_BITS2BYTES_WORD_ALIGN(key_sz_in_bits);
+ if (device == TF_DEVICE_TYPE_WH) {
+ slice_sz = CFA_P4_WC_TCAM_SLICE_SIZE;
+ *num_slice_per_row = CFA_P4_WC_TCAM_SLICES_PER_ROW;
+ } else {
+ TFP_DRV_LOG(ERR,
+ "Unsupported device type %d\n",
+ device);
+ return -ENOTSUP;
+ }
-static void tf_seeds_init(struct tf_session *session)
-{
- int i;
- uint32_t r;
-
- /* Initialize the lfsr */
- rand_init();
-
- /* RX and TX use the same seed values */
- session->lkup_lkup3_init_cfg[TF_DIR_RX] =
- session->lkup_lkup3_init_cfg[TF_DIR_TX] =
- SWAP_WORDS32(rand32());
-
- for (i = 0; i < TF_LKUP_SEED_MEM_SIZE / 2; i++) {
- r = SWAP_WORDS32(rand32());
- session->lkup_em_seed_mem[TF_DIR_RX][i * 2] = r;
- session->lkup_em_seed_mem[TF_DIR_TX][i * 2] = r;
- r = SWAP_WORDS32(rand32());
- session->lkup_em_seed_mem[TF_DIR_RX][i * 2 + 1] = (r & 0x1);
- session->lkup_em_seed_mem[TF_DIR_TX][i * 2 + 1] = (r & 0x1);
+ if (key_bytes > *num_slice_per_row * slice_sz) {
+ TFP_DRV_LOG(ERR,
+ "%s: Key size %d is not supported\n",
+ tf_tcam_tbl_2_str(tcam_tbl_type),
+ key_bytes);
+ return -ENOTSUP;
+ }
+ } else { /* for other type of tcam */
+ *num_slice_per_row = 1;
}
+
+ return 0;
}
/**
uint8_t fw_session_id;
int dir;
- if (tfp == NULL || parms == NULL)
- return -EINVAL;
+ TF_CHECK_PARMS(tfp, parms);
/* Filter out any non-supported device types on the Core
* side. It is assumed that the Firmware will be supported if
* firmware open session succeeds.
*/
- if (parms->device_type != TF_DEVICE_TYPE_WH)
+ if (parms->device_type != TF_DEVICE_TYPE_WH) {
+ TFP_DRV_LOG(ERR,
+ "Unsupported device type %d\n",
+ parms->device_type);
return -ENOTSUP;
+ }
/* Build the beginning of session_id */
rc = sscanf(parms->ctrl_chan_name,
&slot,
&device);
if (rc != 4) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"Failed to scan device ctrl_chan_name\n");
return -EINVAL;
}
if (rc) {
/* Log error */
if (rc == -EEXIST)
- PMD_DRV_LOG(ERR,
- "Session is already open, rc:%d\n",
- rc);
+ TFP_DRV_LOG(ERR,
+ "Session is already open, rc:%s\n",
+ strerror(-rc));
else
- PMD_DRV_LOG(ERR,
- "Open message send failed, rc:%d\n",
- rc);
+ TFP_DRV_LOG(ERR,
+ "Open message send failed, rc:%s\n",
+ strerror(-rc));
parms->session_id.id = TF_FW_SESSION_ID_INVALID;
return rc;
rc = tfp_calloc(&alloc_parms);
if (rc) {
/* Log error */
- PMD_DRV_LOG(ERR,
- "Failed to allocate session info, rc:%d\n",
- rc);
+ TFP_DRV_LOG(ERR,
+ "Failed to allocate session info, rc:%s\n",
+ strerror(-rc));
goto cleanup;
}
- tfp->session = alloc_parms.mem_va;
+ tfp->session = (struct tf_session_info *)alloc_parms.mem_va;
/* Allocate core data for the session */
alloc_parms.nitems = 1;
rc = tfp_calloc(&alloc_parms);
if (rc) {
/* Log error */
- PMD_DRV_LOG(ERR,
- "Failed to allocate session data, rc:%d\n",
- rc);
+ TFP_DRV_LOG(ERR,
+ "Failed to allocate session data, rc:%s\n",
+ strerror(-rc));
goto cleanup;
}
session->session_id.internal.device = device;
session->session_id.internal.fw_session_id = fw_session_id;
+ /* Query for Session Config
+ */
rc = tf_msg_session_qcfg(tfp);
if (rc) {
- /* Log error */
- PMD_DRV_LOG(ERR,
- "Query config message send failed, rc:%d\n",
- rc);
+ TFP_DRV_LOG(ERR,
+ "Query config message send failed, rc:%s\n",
+ strerror(-rc));
goto cleanup_close;
}
#if (TF_SHADOW == 1)
rc = tf_rm_shadow_db_init(tfs);
if (rc)
- PMD_DRV_LOG(ERR,
- "Shadow DB Initialization failed\n, rc:%d",
- rc);
+ TFP_DRV_LOG(ERR,
+ "Shadow DB Initialization failed\n, rc:%s",
+ strerror(-rc));
/* Add additional processing */
#endif /* TF_SHADOW */
}
/* Adjust the Session with what firmware allowed us to get */
rc = tf_rm_allocate_validate(tfp);
if (rc) {
- /* Log error */
+ TFP_DRV_LOG(ERR,
+ "Rm allocate validate failed, rc:%s\n",
+ strerror(-rc));
goto cleanup_close;
}
- /* Setup hash seeds */
- tf_seeds_init(session);
-
/* Initialize EM pool */
for (dir = 0; dir < TF_DIR_MAX; dir++) {
rc = tf_create_em_pool(session,
/* Return session ID */
parms->session_id = session->session_id;
- PMD_DRV_LOG(INFO,
+ TFP_DRV_LOG(INFO,
"Session created, session_id:%d\n",
parms->session_id.id);
- PMD_DRV_LOG(INFO,
+ TFP_DRV_LOG(INFO,
"domain:%d, bus:%d, device:%d, fw_session_id:%d\n",
parms->session_id.internal.domain,
parms->session_id.internal.bus,
#if (TF_SHARED == 1)
int rc;
- if (tfp == NULL)
- return -EINVAL;
+ TF_CHECK_PARMS_SESSION(tfp, parms);
/* - Open the shared memory for the attach_chan_name
* - Point to the shared session for this Device instance
* than one client of the session.
*/
- if (tfp->session) {
- if (tfp->session->session_id.id != TF_SESSION_ID_INVALID) {
- rc = tf_msg_session_attach(tfp,
- parms->ctrl_chan_name,
- parms->session_id);
- }
+ if (tfp->session->session_id.id != TF_SESSION_ID_INVALID) {
+ rc = tf_msg_session_attach(tfp,
+ parms->ctrl_chan_name,
+ parms->session_id);
}
#endif /* TF_SHARED */
return -1;
union tf_session_id session_id;
int dir;
- if (tfp == NULL || tfp->session == NULL)
- return -EINVAL;
+ TF_CHECK_TFP_SESSION(tfp);
tfs = (struct tf_session *)(tfp->session->core_data);
rc = tf_msg_session_close(tfp);
if (rc) {
/* Log error */
- PMD_DRV_LOG(ERR,
- "Message send failed, rc:%d\n",
- rc);
+ TFP_DRV_LOG(ERR,
+ "Message send failed, rc:%s\n",
+ strerror(-rc));
}
/* Update the ref_count */
tfp->session = NULL;
}
- PMD_DRV_LOG(INFO,
+ TFP_DRV_LOG(INFO,
"Session closed, session_id:%d\n",
session_id.id);
- PMD_DRV_LOG(INFO,
+ TFP_DRV_LOG(INFO,
"domain:%d, bus:%d, device:%d, fw_session_id:%d\n",
session_id.internal.domain,
session_id.internal.bus,
int tf_insert_em_entry(struct tf *tfp,
struct tf_insert_em_entry_parms *parms)
{
- struct tf_tbl_scope_cb *tbl_scope_cb;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ int rc;
- if (tfp == NULL || parms == NULL)
- return -EINVAL;
+ TF_CHECK_PARMS_SESSION(tfp, parms);
- tbl_scope_cb = tbl_scope_cb_find((struct tf_session *)
- (tfp->session->core_data),
- parms->tbl_scope_id);
- if (tbl_scope_cb == NULL)
- return -EINVAL;
+ /* Retrieve the session information */
+ rc = tf_session_get_session(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup session, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
- /* Process the EM entry per Table Scope type */
- if (parms->mem == TF_MEM_EXTERNAL) {
- /* External EEM */
- return tf_insert_eem_entry((struct tf_session *)
- (tfp->session->core_data),
- tbl_scope_cb,
- parms);
- } else if (parms->mem == TF_MEM_INTERNAL) {
- /* Internal EM */
- return tf_insert_em_internal_entry(tfp, parms);
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup device, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = dev->ops->tf_dev_insert_em_entry(tfp, parms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: EM insert failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
}
return -EINVAL;
int tf_delete_em_entry(struct tf *tfp,
struct tf_delete_em_entry_parms *parms)
{
- struct tf_tbl_scope_cb *tbl_scope_cb;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ int rc;
- if (tfp == NULL || parms == NULL)
- return -EINVAL;
+ TF_CHECK_PARMS_SESSION(tfp, parms);
- tbl_scope_cb = tbl_scope_cb_find((struct tf_session *)
- (tfp->session->core_data),
- parms->tbl_scope_id);
- if (tbl_scope_cb == NULL)
- return -EINVAL;
+ /* Retrieve the session information */
+ rc = tf_session_get_session(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup session, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
- if (parms->mem == TF_MEM_EXTERNAL)
- return tf_delete_eem_entry(tfp, parms);
- else
- return tf_delete_em_internal_entry(tfp, parms);
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup device, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = dev->ops->tf_dev_delete_em_entry(tfp, parms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: EM delete failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ return rc;
}
-/** allocate identifier resource
- *
- * Returns success or failure code.
- */
int tf_alloc_identifier(struct tf *tfp,
struct tf_alloc_identifier_parms *parms)
{
int id;
int rc;
- if (parms == NULL || tfp == NULL)
- return -EINVAL;
-
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR, "%s: session error\n",
- tf_dir_2_str(parms->dir));
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
tfs = (struct tf_session *)(tfp->session->core_data);
rc);
break;
case TF_IDENT_TYPE_L2_FUNC:
- PMD_DRV_LOG(ERR, "%s: unsupported %s\n",
+ TFP_DRV_LOG(ERR, "%s: unsupported %s\n",
tf_dir_2_str(parms->dir),
tf_ident_2_str(parms->ident_type));
rc = -EOPNOTSUPP;
break;
default:
- PMD_DRV_LOG(ERR, "%s: %s\n",
+ TFP_DRV_LOG(ERR, "%s: %s\n",
tf_dir_2_str(parms->dir),
tf_ident_2_str(parms->ident_type));
- rc = -EINVAL;
+ rc = -EOPNOTSUPP;
break;
}
if (rc) {
- PMD_DRV_LOG(ERR, "%s: identifier pool %s failure\n",
+ TFP_DRV_LOG(ERR, "%s: identifier pool %s failure, rc:%s\n",
tf_dir_2_str(parms->dir),
- tf_ident_2_str(parms->ident_type));
+ tf_ident_2_str(parms->ident_type),
+ strerror(-rc));
return rc;
}
id = ba_alloc(session_pool);
if (id == BA_FAIL) {
- PMD_DRV_LOG(ERR, "%s: %s: No resource available\n",
+ TFP_DRV_LOG(ERR, "%s: %s: No resource available\n",
tf_dir_2_str(parms->dir),
tf_ident_2_str(parms->ident_type));
return -ENOMEM;
int ba_rc;
struct tf_session *tfs;
- if (parms == NULL || tfp == NULL)
- return -EINVAL;
-
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR, "%s: Session error\n",
- tf_dir_2_str(parms->dir));
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
tfs = (struct tf_session *)(tfp->session->core_data);
rc);
break;
case TF_IDENT_TYPE_L2_FUNC:
- PMD_DRV_LOG(ERR, "%s: unsupported %s\n",
+ TFP_DRV_LOG(ERR, "%s: unsupported %s\n",
tf_dir_2_str(parms->dir),
tf_ident_2_str(parms->ident_type));
rc = -EOPNOTSUPP;
break;
default:
- PMD_DRV_LOG(ERR, "%s: invalid %s\n",
+ TFP_DRV_LOG(ERR, "%s: invalid %s\n",
tf_dir_2_str(parms->dir),
tf_ident_2_str(parms->ident_type));
- rc = -EINVAL;
+ rc = -EOPNOTSUPP;
break;
}
if (rc) {
- PMD_DRV_LOG(ERR, "%s: %s Identifier pool access failed\n",
+ TFP_DRV_LOG(ERR,
+ "%s: %s Identifier pool access failed, rc:%s\n",
tf_dir_2_str(parms->dir),
- tf_ident_2_str(parms->ident_type));
+ tf_ident_2_str(parms->ident_type),
+ strerror(-rc));
return rc;
}
ba_rc = ba_inuse(session_pool, (int)parms->id);
if (ba_rc == BA_FAIL || ba_rc == BA_ENTRY_FREE) {
- PMD_DRV_LOG(ERR, "%s: %s: Entry %d already free",
+ TFP_DRV_LOG(ERR, "%s: %s: Entry %d already free",
tf_dir_2_str(parms->dir),
tf_ident_2_str(parms->ident_type),
parms->id);
struct tf_alloc_tcam_entry_parms *parms)
{
int rc;
- int index = 0;
+ int index;
struct tf_session *tfs;
struct bitalloc *session_pool;
+ uint16_t num_slice_per_row;
- if (parms == NULL || tfp == NULL)
- return -EINVAL;
+ /* TEMP, due to device design. When tcam is modularized device
+ * should be retrieved from the session
+ */
+ enum tf_device_type device_type;
+ /* TEMP */
+ device_type = TF_DEVICE_TYPE_WH;
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR, "%s: session error\n",
- tf_dir_2_str(parms->dir));
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
tfs = (struct tf_session *)(tfp->session->core_data);
+ rc = tf_check_tcam_entry(parms->tcam_tbl_type,
+ device_type,
+ parms->key_sz_in_bits,
+ &num_slice_per_row);
+ /* Error logging handled by tf_check_tcam_entry */
+ if (rc)
+ return rc;
+
rc = tf_rm_lookup_tcam_type_pool(tfs,
parms->dir,
parms->tcam_tbl_type,
if (rc)
return rc;
- /*
- * priority 0: allocate from top of the tcam i.e. high
- * priority !0: allocate index from bottom i.e lowest
- */
- if (parms->priority) {
- for (index = session_pool->size - 1; index >= 0; index--) {
- if (ba_inuse(session_pool,
- index) == BA_ENTRY_FREE) {
- break;
- }
- }
- if (ba_alloc_index(session_pool,
- index) == BA_FAIL) {
- TFP_DRV_LOG(ERR,
- "%s: %s: ba_alloc index %d failed\n",
- tf_dir_2_str(parms->dir),
- tf_tcam_tbl_2_str(parms->tcam_tbl_type),
- index);
- return -ENOMEM;
- }
- } else {
- index = ba_alloc(session_pool);
- if (index == BA_FAIL) {
- TFP_DRV_LOG(ERR, "%s: %s: Out of resource\n",
- tf_dir_2_str(parms->dir),
- tf_tcam_tbl_2_str(parms->tcam_tbl_type));
- return -ENOMEM;
- }
+ index = ba_alloc(session_pool);
+ if (index == BA_FAIL) {
+ TFP_DRV_LOG(ERR, "%s: %s: No resource available\n",
+ tf_dir_2_str(parms->dir),
+ tf_tcam_tbl_2_str(parms->tcam_tbl_type));
+ return -ENOMEM;
}
+ index *= num_slice_per_row;
+
parms->idx = index;
return 0;
}
{
int rc;
int id;
+ int index;
struct tf_session *tfs;
struct bitalloc *session_pool;
+ uint16_t num_slice_per_row;
- if (tfp == NULL || parms == NULL) {
- PMD_DRV_LOG(ERR, "Invalid parameters\n");
- return -EINVAL;
- }
+ /* TEMP, due to device design. When tcam is modularized device
+ * should be retrieved from the session
+ */
+ enum tf_device_type device_type;
+ /* TEMP */
+ device_type = TF_DEVICE_TYPE_WH;
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "%s, Session info invalid\n",
- tf_dir_2_str(parms->dir));
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
tfs = (struct tf_session *)(tfp->session->core_data);
- /*
- * Each tcam send msg function should check for key sizes range
- */
+ rc = tf_check_tcam_entry(parms->tcam_tbl_type,
+ device_type,
+ parms->key_sz_in_bits,
+ &num_slice_per_row);
+ /* Error logging handled by tf_check_tcam_entry */
+ if (rc)
+ return rc;
rc = tf_rm_lookup_tcam_type_pool(tfs,
parms->dir,
if (rc)
return rc;
-
/* Verify that the entry has been previously allocated */
- id = ba_inuse(session_pool, parms->idx);
+ index = parms->idx / num_slice_per_row;
+
+ id = ba_inuse(session_pool, index);
if (id != 1) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"%s: %s: Invalid or not allocated index, idx:%d\n",
tf_dir_2_str(parms->dir),
tf_tcam_tbl_2_str(parms->tcam_tbl_type),
tf_get_tcam_entry(struct tf *tfp __rte_unused,
struct tf_get_tcam_entry_parms *parms __rte_unused)
{
- int rc = -EOPNOTSUPP;
-
- if (tfp == NULL || parms == NULL) {
- PMD_DRV_LOG(ERR, "Invalid parameters\n");
- return -EINVAL;
- }
-
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "%s, Session info invalid\n",
- tf_dir_2_str(parms->dir));
- return -EINVAL;
- }
-
- return rc;
+ TF_CHECK_PARMS_SESSION(tfp, parms);
+ return -EOPNOTSUPP;
}
int
struct tf_free_tcam_entry_parms *parms)
{
int rc;
+ int index;
struct tf_session *tfs;
struct bitalloc *session_pool;
+ uint16_t num_slice_per_row = 1;
- if (parms == NULL || tfp == NULL)
- return -EINVAL;
-
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR, "%s: Session error\n",
- tf_dir_2_str(parms->dir));
- return -EINVAL;
- }
+ /* TEMP, due to device design. When tcam is modularized device
+ * should be retrieved from the session
+ */
+ enum tf_device_type device_type;
+ /* TEMP */
+ device_type = TF_DEVICE_TYPE_WH;
+ TF_CHECK_PARMS_SESSION(tfp, parms);
tfs = (struct tf_session *)(tfp->session->core_data);
+ rc = tf_check_tcam_entry(parms->tcam_tbl_type,
+ device_type,
+ 0,
+ &num_slice_per_row);
+ /* Error logging handled by tf_check_tcam_entry */
+ if (rc)
+ return rc;
+
rc = tf_rm_lookup_tcam_type_pool(tfs,
parms->dir,
parms->tcam_tbl_type,
if (rc)
return rc;
- rc = ba_inuse(session_pool, (int)parms->idx);
+ index = parms->idx / num_slice_per_row;
+
+ rc = ba_inuse(session_pool, index);
if (rc == BA_FAIL || rc == BA_ENTRY_FREE) {
- PMD_DRV_LOG(ERR, "%s: %s: Entry %d already free",
+ TFP_DRV_LOG(ERR, "%s: %s: Entry %d already free",
tf_dir_2_str(parms->dir),
tf_tcam_tbl_2_str(parms->tcam_tbl_type),
- parms->idx);
+ index);
return -EINVAL;
}
- ba_free(session_pool, (int)parms->idx);
+ ba_free(session_pool, index);
rc = tf_msg_tcam_entry_free(tfp, parms);
if (rc) {
/* Log error */
- PMD_DRV_LOG(ERR, "%s: %s: Entry %d free failed",
+ TFP_DRV_LOG(ERR, "%s: %s: Entry %d free failed with err %s",
tf_dir_2_str(parms->dir),
tf_tcam_tbl_2_str(parms->tcam_tbl_type),
- parms->idx);
+ parms->idx,
+ strerror(-rc));
}
return rc;
#include <stdlib.h>
#include <stdbool.h>
#include <stdio.h>
-
+#include "hcapi/hcapi_cfa.h"
#include "tf_project.h"
/**
#define TF_ACT_REC_OFFSET_2_PTR(offset) ((offset) >> 4)
#define TF_ACT_REC_PTR_2_OFFSET(offset) ((offset) << 4)
+
/*
* Helper Macros
*/
*/
enum tf_device_type {
TF_DEVICE_TYPE_WH = 0, /**< Whitney+ */
- TF_DEVICE_TYPE_BRD2, /**< TBD */
- TF_DEVICE_TYPE_BRD3, /**< TBD */
- TF_DEVICE_TYPE_BRD4, /**< TBD */
+ TF_DEVICE_TYPE_SR, /**< Stingray */
+ TF_DEVICE_TYPE_THOR, /**< Thor */
+ TF_DEVICE_TYPE_SR2, /**< Stingray2 */
TF_DEVICE_TYPE_MAX /**< Maximum */
};
-/** Identifier resource types
+/**
+ * Identifier resource types
*/
enum tf_identifier_type {
- /** The L2 Context is returned from the L2 Ctxt TCAM lookup
+ /**
+ * The L2 Context is returned from the L2 Ctxt TCAM lookup
* and can be used in WC TCAM or EM keys to virtualize further
* lookups.
*/
TF_IDENT_TYPE_L2_CTXT,
- /** The WC profile func is returned from the L2 Ctxt TCAM lookup
+ /**
+ * The WC profile func is returned from the L2 Ctxt TCAM lookup
* to enable virtualization of the profile TCAM.
*/
TF_IDENT_TYPE_PROF_FUNC,
- /** The WC profile ID is included in the WC lookup key
+ /**
+ * The WC profile ID is included in the WC lookup key
* to enable virtualization of the WC TCAM hardware.
*/
TF_IDENT_TYPE_WC_PROF,
- /** The EM profile ID is included in the EM lookup key
+ /**
+ * The EM profile ID is included in the EM lookup key
* to enable virtualization of the EM hardware. (not required for SR2
* as it has table scope)
*/
TF_IDENT_TYPE_EM_PROF,
- /** The L2 func is included in the ILT result and from recycling to
+ /**
+ * The L2 func is included in the ILT result and from recycling to
* enable virtualization of further lookups.
*/
TF_IDENT_TYPE_L2_FUNC,
/* External */
- /** External table type - initially 1 poolsize entries.
+ /**
+ * External table type - initially 1 poolsize entries.
* All External table types are associated with a table
* scope. Internal types are not.
*/
TF_EM_TBL_TYPE_MAX
};
-/** TruFlow Session Information
+/**
+ * TruFlow Session Information
*
* Structure defining a TruFlow Session, also known as a Management
* session. This structure is initialized at time of
* tf_open_session(). It is passed to all of the TruFlow APIs as way
* to prescribe and isolate resources between different TruFlow ULP
* Applications.
+ *
+ * Ownership of the elements is split between ULP and TruFlow. Please
+ * see the individual elements.
*/
struct tf_session_info {
/**
uint32_t core_data_sz_bytes;
};
-/** TruFlow handle
+/**
+ * TruFlow handle
*
* Contains a pointer to the session info. Allocated by ULP and passed
* to TruFlow using tf_open_session(). TruFlow will populate the
* tf_open_session parameters definition.
*/
struct tf_open_session_parms {
- /** [in] ctrl_chan_name
+ /**
+ * [in] ctrl_chan_name
*
* String containing name of control channel interface to be
* used for this session to communicate with firmware.
* shared memory allocation.
*/
char ctrl_chan_name[TF_SESSION_NAME_MAX];
- /** [in] shadow_copy
+ /**
+ * [in] shadow_copy
*
* Boolean controlling the use and availability of shadow
* copy. Shadow copy will allow the TruFlow to keep track of
* control channel.
*/
bool shadow_copy;
- /** [in/out] session_id
+ /**
+ * [in/out] session_id
*
* Session_id is unique per session.
*
* The session_id allows a session to be shared between devices.
*/
union tf_session_id session_id;
- /** [in] device type
+ /**
+ * [in] device type
*
* Device type is passed, one of Wh+, SR, Thor, SR2
*/
struct tf_open_session_parms *parms);
struct tf_attach_session_parms {
- /** [in] ctrl_chan_name
+ /**
+ * [in] ctrl_chan_name
*
* String containing name of control channel interface to be
* used for this session to communicate with firmware.
*/
char ctrl_chan_name[TF_SESSION_NAME_MAX];
- /** [in] attach_chan_name
+ /**
+ * [in] attach_chan_name
*
* String containing name of attach channel interface to be
* used for this session.
*/
char attach_chan_name[TF_SESSION_NAME_MAX];
- /** [in] session_id
+ /**
+ * [in] session_id
*
* Session_id is unique per session. For Attach the session_id
* should be the session_id that was returned on the first
*
* @ref tf_free_identifier
*/
-/** tf_alloc_identifier parameter definition
+/**
+ * tf_alloc_identifier parameter definition
*/
struct tf_alloc_identifier_parms {
/**
uint16_t id;
};
-/** tf_free_identifier parameter definition
+/**
+ * tf_free_identifier parameter definition
*/
struct tf_free_identifier_parms {
/**
uint16_t id;
};
-/** allocate identifier resource
+/**
+ * allocate identifier resource
*
* TruFlow core will allocate a free id from the per identifier resource type
* pool reserved for the session during tf_open(). No firmware is involved.
int tf_alloc_identifier_new(struct tf *tfp,
struct tf_alloc_identifier_parms *parms);
-/** free identifier resource
+/**
+ * free identifier resource
*
* TruFlow core will return an id back to the per identifier resource type pool
* reserved for the session. No firmware is involved. During tf_close, the
*/
-/** tf_alloc_tbl_scope_parms definition
+/**
+ * tf_alloc_tbl_scope_parms definition
*/
struct tf_alloc_tbl_scope_parms {
/**
*/
uint32_t rx_num_flows_in_k;
/**
- * [in] Brd4 only receive table access interface id
+ * [in] SR2 only receive table access interface id
*/
uint32_t rx_tbl_if_id;
/**
*/
uint32_t tx_num_flows_in_k;
/**
- * [in] Brd4 only receive table access interface id
+ * [in] SR2 only receive table access interface id
*/
uint32_t tx_tbl_if_id;
/**
/**
* allocate a table scope
*
- * On Brd4 Firmware will allocate a scope ID. On other devices, the scope
+ * On SR2 Firmware will allocate a scope ID. On other devices, the scope
* is a software construct to identify an EEM table. This function will
* divide the hash memory/buckets and records according to the device
* device constraints based upon calculations using either the number of flows
*
* This API will allocate the table region in
* DRAM, program the PTU page table entries, and program the number of static
- * buckets (if Brd4) in the RX and TX CFAs. Buckets are assumed to start at
+ * buckets (if SR2) in the RX and TX CFAs. Buckets are assumed to start at
* 0 in the EM memory for the scope. Upon successful completion of this API,
* hash tables are fully initialized and ready for entries to be inserted.
*
*
* Firmware checks that the table scope ID is owned by the TruFlow
* session, verifies that no references to this table scope remains
- * (Brd4 ILT) or Profile TCAM entries for either CFA (RX/TX) direction,
+ * (SR2 ILT) or Profile TCAM entries for either CFA (RX/TX) direction,
* then frees the table scope ID.
*
* Returns success or failure code.
int tf_free_tbl_scope(struct tf *tfp,
struct tf_free_tbl_scope_parms *parms);
-
/**
* @page tcam TCAM Access
*
* @ref tf_free_tcam_entry
*/
-/** tf_alloc_tcam_entry parameter definition
+
+/**
+ * tf_alloc_tcam_entry parameter definition
*/
struct tf_alloc_tcam_entry_parms {
/**
*/
uint8_t *mask;
/**
- * [in] Priority of entry requested
- * 0: index from top i.e. highest priority first
- * !0: index from bottom i.e lowest priority first
+ * [in] Priority of entry requested (definition TBD)
*/
uint32_t priority;
/**
uint16_t idx;
};
-/** allocate TCAM entry
+/**
+ * allocate TCAM entry
*
* Allocate a TCAM entry - one of these types:
*
int tf_alloc_tcam_entry(struct tf *tfp,
struct tf_alloc_tcam_entry_parms *parms);
-/** tf_set_tcam_entry parameter definition
+/**
+ * tf_set_tcam_entry parameter definition
*/
struct tf_set_tcam_entry_parms {
/**
uint16_t result_sz_in_bits;
};
-/** set TCAM entry
+/**
+ * set TCAM entry
*
* Program a TCAM table entry for a TruFlow session.
*
int tf_set_tcam_entry(struct tf *tfp,
struct tf_set_tcam_entry_parms *parms);
-/** tf_get_tcam_entry parameter definition
+/**
+ * tf_get_tcam_entry parameter definition
*/
struct tf_get_tcam_entry_parms {
/**
uint16_t result_sz_in_bits;
};
-/*
+/**
* get TCAM entry
*
* Program a TCAM table entry for a TruFlow session.
int tf_get_tcam_entry(struct tf *tfp,
struct tf_get_tcam_entry_parms *parms);
-/*
+/**
* tf_free_tcam_entry parameter definition
*/
struct tf_free_tcam_entry_parms {
uint16_t ref_cnt;
};
-/*
+/**
+ * free TCAM entry
+ *
* Free TCAM entry.
*
* Firmware checks to ensure the TCAM entries are owned by the TruFlow
* @ref tf_get_tbl_entry
*/
+
/**
* tf_alloc_tbl_entry parameter definition
*/
struct tf_get_tbl_entry_parms *parms);
/**
- * tf_get_bulk_tbl_entry parameter definition
+ * tf_bulk_get_tbl_entry parameter definition
*/
-struct tf_get_bulk_tbl_entry_parms {
+struct tf_bulk_get_tbl_entry_parms {
/**
* [in] Receive or transmit direction
*/
* [in] Type of object to get
*/
enum tf_tbl_type type;
- /**
- * [in] Clear hardware entries on reads only
- * supported for TF_TBL_TYPE_ACT_STATS_64
- */
- bool clear_on_read;
/**
* [in] Starting index to read from
*/
* Returns success or failure code. Failure will be returned if the
* provided data buffer is too small for the data type requested.
*/
-int tf_get_bulk_tbl_entry(struct tf *tfp,
- struct tf_get_bulk_tbl_entry_parms *parms);
+int tf_bulk_get_tbl_entry(struct tf *tfp,
+ struct tf_bulk_get_tbl_entry_parms *parms);
/**
* @page exact_match Exact Match Table
*/
uint32_t tbl_scope_id;
/**
- * [in] ID of table interface to use (Brd4 only)
+ * [in] ID of table interface to use (SR2 only)
*/
uint32_t tbl_if_id;
/**
*/
uint32_t tbl_scope_id;
/**
- * [in] ID of table interface to use (Brd4 only)
+ * [in] ID of table interface to use (SR2 only)
*/
uint32_t tbl_if_id;
/**
* [in] epoch group IDs of entry to delete
- * 2 element array with 2 ids. (Brd4 only)
+ * 2 element array with 2 ids. (SR2 only)
*/
uint16_t *epochs;
/**
*/
uint32_t tbl_scope_id;
/**
- * [in] ID of table interface to use (Brd4 only)
+ * [in] ID of table interface to use (SR2 only)
*/
uint32_t tbl_if_id;
/**
uint16_t em_record_sz_in_bits;
/**
* [in] epoch group IDs of entry to lookup
- * 2 element array with 2 ids. (Brd4 only)
+ * 2 element array with 2 ids. (SR2 only)
*/
uint16_t *epochs;
/**
* specified direction and table scope.
*
* When inserting an entry into an exact match table, the TruFlow library may
- * need to allocate a dynamic bucket for the entry (Brd4 only).
+ * need to allocate a dynamic bucket for the entry (SR2 only).
*
* The insertion of duplicate entries in an EM table is not permitted. If a
* TruFlow application can guarantee that it will never insert duplicates, it
*/
int tf_search_em_entry(struct tf *tfp,
struct tf_search_em_entry_parms *parms);
+
#endif /* _TF_CORE_H_ */
*/
int (*tf_dev_get_tcam)(struct tf *tfp,
struct tf_tcam_get_parms *parms);
+
+ /**
+ * Insert EM hash entry API
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] parms
+ * Pointer to E/EM insert parameters
+ *
+ * Returns:
+ * 0 - Success
+ * -EINVAL - Error
+ */
+ int (*tf_dev_insert_em_entry)(struct tf *tfp,
+ struct tf_insert_em_entry_parms *parms);
+
+ /**
+ * Delete EM hash entry API
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] parms
+ * Pointer to E/EM delete parameters
+ *
+ * returns:
+ * 0 - Success
+ * -EINVAL - Error
+ */
+ int (*tf_dev_delete_em_entry)(struct tf *tfp,
+ struct tf_delete_em_entry_parms *parms);
};
/**
#include "tf_identifier.h"
#include "tf_tbl_type.h"
#include "tf_tcam.h"
+#include "tf_em.h"
/**
* Device specific function that retrieves the MAX number of HCAPI
.tf_dev_alloc_search_tcam = tf_tcam_alloc_search,
.tf_dev_set_tcam = tf_tcam_set,
.tf_dev_get_tcam = tf_tcam_get,
+ .tf_dev_insert_em_entry = tf_em_insert_entry,
+ .tf_dev_delete_em_entry = tf_em_delete_entry,
};
#include "bnxt.h"
-/* Enable EEM table dump
- */
-#define TF_EEM_DUMP
-
-static struct tf_eem_64b_entry zero_key_entry;
static uint32_t tf_em_get_key_mask(int num_entries)
{
return mask;
}
-/* CRC32i support for Key0 hash */
-#define ucrc32(ch, crc) (crc32tbl[((crc) ^ (ch)) & 0xff] ^ ((crc) >> 8))
-#define crc32(x, y) crc32i(~0, x, y)
-
-static const uint32_t crc32tbl[] = { /* CRC polynomial 0xedb88320 */
-0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
-0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
-0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
-0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
-0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
-0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
-0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
-0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
-0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
-0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
-0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
-0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
-0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
-0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
-0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
-0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
-0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
-0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
-0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
-0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
-0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
-0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
-0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
-0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
-0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
-0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
-0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
-0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
-0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
-0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
-0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
-0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
-0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
-0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
-0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
-0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
-0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
-0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
-0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
-0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
-0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
-0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
-0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
-0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
-0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
-0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
-0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
-0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
-0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
-0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
-0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
-0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
-0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
-0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
-0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
-0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
-0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
-0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
-0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
-0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
-0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
-0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
-0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
-0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
-};
-
-static uint32_t crc32i(uint32_t crc, const uint8_t *buf, size_t len)
-{
- int l;
-
- for (l = (len - 1); l >= 0; l--)
- crc = ucrc32(buf[l], crc);
-
- return ~crc;
-}
-
-static uint32_t tf_em_lkup_get_crc32_hash(struct tf_session *session,
- uint8_t *key,
- enum tf_dir dir)
-{
- int i;
- uint32_t index;
- uint32_t val1, val2;
- uint8_t temp[4];
- uint8_t *kptr = key;
-
- /* Do byte-wise XOR of the 52-byte HASH key first. */
- index = *key;
- kptr--;
-
- for (i = TF_HW_EM_KEY_MAX_SIZE - 2; i >= 0; i--) {
- index = index ^ *kptr;
- kptr--;
- }
-
- /* Get seeds */
- val1 = session->lkup_em_seed_mem[dir][index * 2];
- val2 = session->lkup_em_seed_mem[dir][index * 2 + 1];
-
- temp[3] = (uint8_t)(val1 >> 24);
- temp[2] = (uint8_t)(val1 >> 16);
- temp[1] = (uint8_t)(val1 >> 8);
- temp[0] = (uint8_t)(val1 & 0xff);
- val1 = 0;
-
- /* Start with seed */
- if (!(val2 & 0x1))
- val1 = crc32i(~val1, temp, 4);
-
- val1 = crc32i(~val1,
- (key - (TF_HW_EM_KEY_MAX_SIZE - 1)),
- TF_HW_EM_KEY_MAX_SIZE);
-
- /* End with seed */
- if (val2 & 0x1)
- val1 = crc32i(~val1, temp, 4);
-
- return val1;
-}
-
-static uint32_t tf_em_lkup_get_lookup3_hash(uint32_t lookup3_init_value,
- uint8_t *in_key)
-{
- uint32_t val1;
-
- val1 = hashword(((uint32_t *)in_key) + 1,
- TF_HW_EM_KEY_MAX_SIZE / (sizeof(uint32_t)),
- lookup3_init_value);
-
- return val1;
-}
-
-void *tf_em_get_table_page(struct tf_tbl_scope_cb *tbl_scope_cb,
- enum tf_dir dir,
- uint32_t offset,
- enum tf_em_table_type table_type)
-{
- int level = 0;
- int page = offset / TF_EM_PAGE_SIZE;
- void *addr = NULL;
- struct tf_em_ctx_mem_info *ctx = &tbl_scope_cb->em_ctx_info[dir];
-
- if (ctx == NULL)
- return NULL;
-
- if (dir != TF_DIR_RX && dir != TF_DIR_TX)
- return NULL;
-
- if (table_type < TF_KEY0_TABLE || table_type > TF_EFC_TABLE)
- return NULL;
-
- /*
- * Use the level according to the num_level of page table
- */
- level = ctx->em_tables[table_type].num_lvl - 1;
-
- addr = (void *)ctx->em_tables[table_type].pg_tbl[level].pg_va_tbl[page];
-
- return addr;
-}
-
-/** Read Key table entry
- *
- * Entry is read in to entry
- */
-static int tf_em_read_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
- struct tf_eem_64b_entry *entry,
- uint32_t entry_size,
- uint32_t index,
- enum tf_em_table_type table_type,
- enum tf_dir dir)
-{
- void *page;
- uint32_t entry_offset = (index * entry_size) % TF_EM_PAGE_SIZE;
-
- page = tf_em_get_table_page(tbl_scope_cb,
- dir,
- (index * entry_size),
- table_type);
-
- if (page == NULL)
- return -EINVAL;
-
- memcpy((uint8_t *)entry, (uint8_t *)page + entry_offset, entry_size);
- return 0;
-}
-
-static int tf_em_write_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
- struct tf_eem_64b_entry *entry,
- uint32_t entry_size,
- uint32_t index,
- enum tf_em_table_type table_type,
- enum tf_dir dir)
-{
- void *page;
- uint32_t entry_offset = (index * entry_size) % TF_EM_PAGE_SIZE;
-
- page = tf_em_get_table_page(tbl_scope_cb,
- dir,
- (index * entry_size),
- table_type);
-
- if (page == NULL)
- return -EINVAL;
-
- memcpy((uint8_t *)page + entry_offset, entry, entry_size);
-
- return 0;
-}
-
-static int tf_em_entry_exists(struct tf_tbl_scope_cb *tbl_scope_cb,
- struct tf_eem_64b_entry *entry,
- uint32_t index,
- enum tf_em_table_type table_type,
- enum tf_dir dir)
-{
- int rc;
- struct tf_eem_64b_entry table_entry;
-
- rc = tf_em_read_entry(tbl_scope_cb,
- &table_entry,
- TF_EM_KEY_RECORD_SIZE,
- index,
- table_type,
- dir);
-
- if (rc != 0)
- return -EINVAL;
-
- if (table_entry.hdr.word1 & (1 << TF_LKUP_RECORD_VALID_SHIFT)) {
- if (entry != NULL) {
- if (memcmp(&table_entry,
- entry,
- TF_EM_KEY_RECORD_SIZE) == 0)
- return -EEXIST;
- } else {
- return -EEXIST;
- }
-
- return -EBUSY;
- }
-
- return 0;
-}
-
-static void tf_em_create_key_entry(struct tf_eem_entry_hdr *result,
- uint8_t *in_key,
- struct tf_eem_64b_entry *key_entry)
+static void tf_em_create_key_entry(struct cfa_p4_eem_entry_hdr *result,
+ uint8_t *in_key,
+ struct cfa_p4_eem_64b_entry *key_entry)
{
key_entry->hdr.word1 = result->word1;
- if (result->word1 & TF_LKUP_RECORD_ACT_REC_INT_MASK)
+ if (result->word1 & CFA_P4_EEM_ENTRY_ACT_REC_INT_MASK)
key_entry->hdr.pointer = result->pointer;
else
key_entry->hdr.pointer = result->pointer;
memcpy(key_entry->key, in_key, TF_HW_EM_KEY_MAX_SIZE + 4);
-}
-
-/* tf_em_select_inject_table
- *
- * Returns:
- * 0 - Key does not exist in either table and can be inserted
- * at "index" in table "table".
- * EEXIST - Key does exist in table at "index" in table "table".
- * TF_ERR - Something went horribly wrong.
- */
-static int tf_em_select_inject_table(struct tf_tbl_scope_cb *tbl_scope_cb,
- enum tf_dir dir,
- struct tf_eem_64b_entry *entry,
- uint32_t key0_hash,
- uint32_t key1_hash,
- uint32_t *index,
- enum tf_em_table_type *table)
-{
- int key0_entry;
- int key1_entry;
-
- /*
- * Check KEY0 table.
- */
- key0_entry = tf_em_entry_exists(tbl_scope_cb,
- entry,
- key0_hash,
- TF_KEY0_TABLE,
- dir);
- /*
- * Check KEY1 table.
- */
- key1_entry = tf_em_entry_exists(tbl_scope_cb,
- entry,
- key1_hash,
- TF_KEY1_TABLE,
- dir);
-
- if (key0_entry == -EEXIST) {
- *table = TF_KEY0_TABLE;
- *index = key0_hash;
- return -EEXIST;
- } else if (key1_entry == -EEXIST) {
- *table = TF_KEY1_TABLE;
- *index = key1_hash;
- return -EEXIST;
- } else if (key0_entry == 0) {
- *table = TF_KEY0_TABLE;
- *index = key0_hash;
- return 0;
- } else if (key1_entry == 0) {
- *table = TF_KEY1_TABLE;
- *index = key1_hash;
- return 0;
- }
-
- return -EINVAL;
+#ifdef TF_EEM_DEBUG
+ dump_raw((uint8_t *)key_entry, TF_EM_KEY_RECORD_SIZE, "Create raw:");
+#endif
}
/** insert EEM entry API
* 0
* TF_ERR_EM_DUP - key is already in table
*/
-int tf_insert_eem_entry(struct tf_session *session,
- struct tf_tbl_scope_cb *tbl_scope_cb,
- struct tf_insert_em_entry_parms *parms)
+static int tf_insert_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
+ struct tf_insert_em_entry_parms *parms)
{
uint32_t mask;
uint32_t key0_hash;
uint32_t key1_hash;
uint32_t key0_index;
uint32_t key1_index;
- struct tf_eem_64b_entry key_entry;
+ struct cfa_p4_eem_64b_entry key_entry;
uint32_t index;
- enum tf_em_table_type table_type;
+ enum hcapi_cfa_em_table_type table_type;
uint32_t gfid;
- int num_of_entry;
+ struct hcapi_cfa_hwop op;
+ struct hcapi_cfa_key_tbl key_tbl;
+ struct hcapi_cfa_key_data key_obj;
+ struct hcapi_cfa_key_loc key_loc;
+ uint64_t big_hash;
+ int rc;
/* Get mask to use on hash */
mask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE].num_entries);
if (!mask)
return -EINVAL;
- num_of_entry = TF_HW_EM_KEY_MAX_SIZE + 4;
+#ifdef TF_EEM_DEBUG
+ dump_raw((uint8_t *)parms->key, TF_HW_EM_KEY_MAX_SIZE + 4, "In Key");
+#endif
- key0_hash = tf_em_lkup_get_crc32_hash(session,
- &parms->key[num_of_entry] - 1,
- parms->dir);
- key0_index = key0_hash & mask;
+ big_hash = hcapi_cfa_key_hash((uint64_t *)parms->key,
+ (TF_HW_EM_KEY_MAX_SIZE + 4) * 8);
+ key0_hash = (uint32_t)(big_hash >> 32);
+ key1_hash = (uint32_t)(big_hash & 0xFFFFFFFF);
- key1_hash =
- tf_em_lkup_get_lookup3_hash(session->lkup_lkup3_init_cfg[parms->dir],
- parms->key);
+ key0_index = key0_hash & mask;
key1_index = key1_hash & mask;
+#ifdef TF_EEM_DEBUG
+ TFP_DRV_LOG(DEBUG, "Key0 hash:0x%08x\n", key0_hash);
+ TFP_DRV_LOG(DEBUG, "Key1 hash:0x%08x\n", key1_hash);
+#endif
/*
* Use the "result" arg to populate all of the key entry then
* store the byte swapped "raw" entry in a local copy ready
* for insertion in to the table.
*/
- tf_em_create_key_entry((struct tf_eem_entry_hdr *)parms->em_record,
+ tf_em_create_key_entry((struct cfa_p4_eem_entry_hdr *)parms->em_record,
((uint8_t *)parms->key),
&key_entry);
/*
- * Find which table to use
+ * Try to add to Key0 table, if that does not work then
+ * try the key1 table.
*/
- if (tf_em_select_inject_table(tbl_scope_cb,
- parms->dir,
- &key_entry,
- key0_index,
- key1_index,
- &index,
- &table_type) == 0) {
- if (table_type == TF_KEY0_TABLE) {
- TF_SET_GFID(gfid,
- key0_index,
- TF_KEY0_TABLE);
- } else {
- TF_SET_GFID(gfid,
- key1_index,
- TF_KEY1_TABLE);
- }
-
- /*
- * Inject
- */
- if (tf_em_write_entry(tbl_scope_cb,
- &key_entry,
- TF_EM_KEY_RECORD_SIZE,
- index,
- table_type,
- parms->dir) == 0) {
- TF_SET_FLOW_ID(parms->flow_id,
- gfid,
- TF_GFID_TABLE_EXTERNAL,
- parms->dir);
- TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
- 0,
- 0,
- 0,
- index,
- 0,
- table_type);
- return 0;
- }
+ index = key0_index;
+ op.opcode = HCAPI_CFA_HWOPS_ADD;
+ key_tbl.base0 = (uint8_t *)
+ &tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE];
+ key_obj.offset = (index * TF_EM_KEY_RECORD_SIZE) % TF_EM_PAGE_SIZE;
+ key_obj.data = (uint8_t *)&key_entry;
+ key_obj.size = TF_EM_KEY_RECORD_SIZE;
+
+ rc = hcapi_cfa_key_hw_op(&op,
+ &key_tbl,
+ &key_obj,
+ &key_loc);
+
+ if (rc == 0) {
+ table_type = TF_KEY0_TABLE;
+ } else {
+ index = key1_index;
+
+ key_tbl.base0 = (uint8_t *)
+ &tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY1_TABLE];
+ key_obj.offset =
+ (index * TF_EM_KEY_RECORD_SIZE) % TF_EM_PAGE_SIZE;
+
+ rc = hcapi_cfa_key_hw_op(&op,
+ &key_tbl,
+ &key_obj,
+ &key_loc);
+ if (rc != 0)
+ return rc;
+
+ table_type = TF_KEY1_TABLE;
}
- return -EINVAL;
+ TF_SET_GFID(gfid,
+ index,
+ table_type);
+ TF_SET_FLOW_ID(parms->flow_id,
+ gfid,
+ TF_GFID_TABLE_EXTERNAL,
+ parms->dir);
+ TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
+ 0,
+ 0,
+ 0,
+ index,
+ 0,
+ table_type);
+
+ return 0;
}
/**
* returns:
* 0 - Success
*/
-int tf_insert_em_internal_entry(struct tf *tfp,
- struct tf_insert_em_entry_parms *parms)
+static int tf_insert_em_internal_entry(struct tf *tfp,
+ struct tf_insert_em_entry_parms *parms)
{
int rc;
uint32_t gfid;
if (rc != 0)
return -1;
- TFP_DRV_LOG(INFO,
+ PMD_DRV_LOG(ERR,
"Internal entry @ Index:%d rptr_index:0x%x rptr_entry:0x%x num_of_entries:%d\n",
index * TF_SESSION_EM_ENTRY_SIZE,
rptr_index,
* 0
* -EINVAL
*/
-int tf_delete_em_internal_entry(struct tf *tfp,
- struct tf_delete_em_entry_parms *parms)
+static int tf_delete_em_internal_entry(struct tf *tfp,
+ struct tf_delete_em_entry_parms *parms)
{
int rc;
struct tf_session *session =
* 0
* TF_NO_EM_MATCH - entry not found
*/
-int tf_delete_eem_entry(struct tf *tfp,
- struct tf_delete_em_entry_parms *parms)
+static int tf_delete_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
+ struct tf_delete_em_entry_parms *parms)
{
- struct tf_session *session;
- struct tf_tbl_scope_cb *tbl_scope_cb;
- enum tf_em_table_type hash_type;
+ enum hcapi_cfa_em_table_type hash_type;
uint32_t index;
+ struct hcapi_cfa_hwop op;
+ struct hcapi_cfa_key_tbl key_tbl;
+ struct hcapi_cfa_key_data key_obj;
+ struct hcapi_cfa_key_loc key_loc;
+ int rc;
- if (parms == NULL)
+ if (parms->flow_handle == 0)
return -EINVAL;
- session = (struct tf_session *)tfp->session->core_data;
- if (session == NULL)
- return -EINVAL;
+ TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);
+ TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);
- tbl_scope_cb = tbl_scope_cb_find(session,
- parms->tbl_scope_id);
- if (tbl_scope_cb == NULL)
- return -EINVAL;
+ op.opcode = HCAPI_CFA_HWOPS_DEL;
+ key_tbl.base0 = (uint8_t *)
+ &tbl_scope_cb->em_ctx_info[parms->dir].em_tables[(hash_type == 0 ?
+ TF_KEY0_TABLE :
+ TF_KEY1_TABLE)];
+ key_obj.offset = (index * TF_EM_KEY_RECORD_SIZE) % TF_EM_PAGE_SIZE;
+ key_obj.data = NULL;
+ key_obj.size = TF_EM_KEY_RECORD_SIZE;
+
+ rc = hcapi_cfa_key_hw_op(&op,
+ &key_tbl,
+ &key_obj,
+ &key_loc);
+
+ if (!rc)
+ return rc;
- if (parms->flow_handle == 0)
+ return 0;
+}
+
+/** insert EM hash entry API
+ *
+ * returns:
+ * 0 - Success
+ * -EINVAL - Error
+ */
+int tf_em_insert_entry(struct tf *tfp,
+ struct tf_insert_em_entry_parms *parms)
+{
+ struct tf_tbl_scope_cb *tbl_scope_cb;
+
+ tbl_scope_cb = tbl_scope_cb_find
+ ((struct tf_session *)(tfp->session->core_data),
+ parms->tbl_scope_id);
+ if (tbl_scope_cb == NULL) {
+ TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
return -EINVAL;
+ }
- TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);
- TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);
+ /* Process the EM entry per Table Scope type */
+ if (parms->mem == TF_MEM_EXTERNAL)
+ /* External EEM */
+ return tf_insert_eem_entry
+ (tbl_scope_cb, parms);
+ else if (parms->mem == TF_MEM_INTERNAL)
+ /* Internal EM */
+ return tf_insert_em_internal_entry(tfp, parms);
- if (tf_em_entry_exists(tbl_scope_cb,
- NULL,
- index,
- hash_type,
- parms->dir) == -EEXIST) {
- tf_em_write_entry(tbl_scope_cb,
- &zero_key_entry,
- TF_EM_KEY_RECORD_SIZE,
- index,
- hash_type,
- parms->dir);
+ return -EINVAL;
+}
- return 0;
+/** Delete EM hash entry API
+ *
+ * returns:
+ * 0 - Success
+ * -EINVAL - Error
+ */
+int tf_em_delete_entry(struct tf *tfp,
+ struct tf_delete_em_entry_parms *parms)
+{
+ struct tf_tbl_scope_cb *tbl_scope_cb;
+
+ tbl_scope_cb = tbl_scope_cb_find
+ ((struct tf_session *)(tfp->session->core_data),
+ parms->tbl_scope_id);
+ if (tbl_scope_cb == NULL) {
+ TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
+ return -EINVAL;
}
+ if (parms->mem == TF_MEM_EXTERNAL)
+ return tf_delete_eem_entry(tbl_scope_cb, parms);
+ else if (parms->mem == TF_MEM_INTERNAL)
+ return tf_delete_em_internal_entry(tfp, parms);
return -EINVAL;
}
#include "tf_core.h"
#include "tf_session.h"
+#define SUPPORT_CFA_HW_P4 1
+#define SUPPORT_CFA_HW_P58 0
+#define SUPPORT_CFA_HW_P59 0
+#define SUPPORT_CFA_HW_ALL 0
+
+#include "hcapi/hcapi_cfa_defs.h"
+
#define TF_HW_EM_KEY_MAX_SIZE 52
#define TF_EM_KEY_RECORD_SIZE 64
#define TF_EM_INTERNAL_INDEX_MASK 0xFFFC
#define TF_EM_INTERNAL_ENTRY_MASK 0x3
-/** EEM Entry header
- *
- */
-struct tf_eem_entry_hdr {
- uint32_t pointer;
- uint32_t word1; /*
- * The header is made up of two words,
- * this is the first word. This field has multiple
- * subfields, there is no suitable single name for
- * it so just going with word1.
- */
-#define TF_LKUP_RECORD_VALID_SHIFT 31
-#define TF_LKUP_RECORD_VALID_MASK 0x80000000
-#define TF_LKUP_RECORD_L1_CACHEABLE_SHIFT 30
-#define TF_LKUP_RECORD_L1_CACHEABLE_MASK 0x40000000
-#define TF_LKUP_RECORD_STRENGTH_SHIFT 28
-#define TF_LKUP_RECORD_STRENGTH_MASK 0x30000000
-#define TF_LKUP_RECORD_RESERVED_SHIFT 17
-#define TF_LKUP_RECORD_RESERVED_MASK 0x0FFE0000
-#define TF_LKUP_RECORD_KEY_SIZE_SHIFT 8
-#define TF_LKUP_RECORD_KEY_SIZE_MASK 0x0001FF00
-#define TF_LKUP_RECORD_ACT_REC_SIZE_SHIFT 3
-#define TF_LKUP_RECORD_ACT_REC_SIZE_MASK 0x000000F8
-#define TF_LKUP_RECORD_ACT_REC_INT_SHIFT 2
-#define TF_LKUP_RECORD_ACT_REC_INT_MASK 0x00000004
-#define TF_LKUP_RECORD_EXT_FLOW_CTR_SHIFT 1
-#define TF_LKUP_RECORD_EXT_FLOW_CTR_MASK 0x00000002
-#define TF_LKUP_RECORD_ACT_PTR_MSB_SHIFT 0
-#define TF_LKUP_RECORD_ACT_PTR_MSB_MASK 0x00000001
-};
-
-/** EEM Entry
- * Each EEM entry is 512-bit (64-bytes)
- */
-struct tf_eem_64b_entry {
- /** Key is 448 bits - 56 bytes */
- uint8_t key[TF_EM_KEY_RECORD_SIZE - sizeof(struct tf_eem_entry_hdr)];
- /** Header is 8 bytes long */
- struct tf_eem_entry_hdr hdr;
-};
-
/** EM Entry
* Each EM entry is 512-bit (64-bytes) but ordered differently to
* EEM.
*/
struct tf_em_64b_entry {
/** Header is 8 bytes long */
- struct tf_eem_entry_hdr hdr;
+ struct cfa_p4_eem_entry_hdr hdr;
/** Key is 448 bits - 56 bytes */
- uint8_t key[TF_EM_KEY_RECORD_SIZE - sizeof(struct tf_eem_entry_hdr)];
+ uint8_t key[TF_EM_KEY_RECORD_SIZE - sizeof(struct cfa_p4_eem_entry_hdr)];
};
/**
struct tf_tbl_scope_cb *tbl_scope_cb_find(struct tf_session *session,
uint32_t tbl_scope_id);
-int tf_insert_eem_entry(struct tf_session *session,
- struct tf_tbl_scope_cb *tbl_scope_cb,
- struct tf_insert_em_entry_parms *parms);
-
-int tf_insert_em_internal_entry(struct tf *tfp,
- struct tf_insert_em_entry_parms *parms);
-
-int tf_delete_eem_entry(struct tf *tfp,
- struct tf_delete_em_entry_parms *parms);
-
-int tf_delete_em_internal_entry(struct tf *tfp,
- struct tf_delete_em_entry_parms *parms);
-
void *tf_em_get_table_page(struct tf_tbl_scope_cb *tbl_scope_cb,
enum tf_dir dir,
uint32_t offset,
- enum tf_em_table_type table_type);
+ enum hcapi_cfa_em_table_type table_type);
+
+int tf_em_insert_entry(struct tf *tfp,
+ struct tf_insert_em_entry_parms *parms);
+int tf_em_delete_entry(struct tf *tfp,
+ struct tf_delete_em_entry_parms *parms);
#endif /* _TF_EM_H_ */
tfp_free(buf->va_addr);
}
+/**
+ * NEW HWRM direct messages
+ */
+
/**
* Sends session open request to TF Firmware
*/
HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX :
HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_RX);
req.flags = tfp_cpu_to_le_16(flags);
- req.strength = (em_result->hdr.word1 & TF_LKUP_RECORD_STRENGTH_MASK) >>
- TF_LKUP_RECORD_STRENGTH_SHIFT;
+ req.strength =
+ (em_result->hdr.word1 & CFA_P4_EEM_ENTRY_STRENGTH_MASK) >>
+ CFA_P4_EEM_ENTRY_STRENGTH_SHIFT;
req.em_key_bitlen = em_parms->key_sz_in_bits;
req.action_ptr = em_result->hdr.pointer;
req.em_record_idx = *rptr_index;
}
int
-tf_msg_get_bulk_tbl_entry(struct tf *tfp,
- struct tf_get_bulk_tbl_entry_parms *params)
+tf_msg_bulk_get_tbl_entry(struct tf *tfp,
+ struct tf_bulk_get_tbl_entry_parms *params)
{
int rc;
struct tfp_send_msg_parms parms = { 0 };
- struct tf_tbl_type_get_bulk_input req = { 0 };
- struct tf_tbl_type_get_bulk_output resp = { 0 };
+ struct tf_tbl_type_bulk_get_input req = { 0 };
+ struct tf_tbl_type_bulk_get_output resp = { 0 };
struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
int data_size = 0;
/* Populate the request */
req.fw_session_id =
tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
- req.flags = tfp_cpu_to_le_16((params->dir) |
- ((params->clear_on_read) ?
- TF_TBL_TYPE_GET_BULK_INPUT_FLAGS_CLEAR_ON_READ : 0x0));
+ req.flags = tfp_cpu_to_le_16(params->dir);
req.type = tfp_cpu_to_le_32(params->type);
req.start_index = tfp_cpu_to_le_32(params->starting_idx);
req.num_entries = tfp_cpu_to_le_32(params->num_entries);
MSG_PREP(parms,
TF_KONG_MB,
HWRM_TF,
- HWRM_TFT_TBL_TYPE_GET_BULK,
+ HWRM_TFT_TBL_TYPE_BULK_GET,
req,
resp);
* Returns:
* 0 on Success else internal Truflow error
*/
-int tf_msg_get_bulk_tbl_entry(struct tf *tfp,
- struct tf_get_bulk_tbl_entry_parms *parms);
+int tf_msg_bulk_get_tbl_entry(struct tf *tfp,
+ struct tf_bulk_get_tbl_entry_parms *parms);
#endif /* _TF_MSG_H_ */
* IDs
*/
#define TF_NUM_WC_PROF_ID 256 /* < Number WC profile IDs */
-#define TF_NUM_WC_TCAM_ROW 256 /* Number slices per row in WC
- * TCAM. A slices is a WC TCAM entry.
- */
+#define TF_NUM_WC_TCAM_ROW 512 /* < Number of rows in WC TCAM */
#define TF_NUM_METER_PROF 256 /* < Number of meter profiles */
#define TF_NUM_METER 1024 /* < Number of meter instances */
#define TF_NUM_MIRROR 2 /* < Number of mirror instances */
#define TF_NUM_UPAR 2 /* < Number of UPAR instances */
-/* Wh+/Brd2 specific HW resources */
+/* Wh+/SR specific HW resources */
#define TF_NUM_SP_TCAM 512 /* < Number of Source Property TCAM
* entries
*/
-/* Brd2/Brd4 specific HW resources */
+/* SR/SR2 specific HW resources */
#define TF_NUM_L2_FUNC 256 /* < Number of L2 Func */
-/* Brd3, Brd4 common HW resources */
+/* Thor, SR2 common HW resources */
#define TF_NUM_FKB 1 /* < Number of Flexible Key Builder
* templates
*/
-/* Brd4 specific HW resources */
+/* SR2 specific HW resources */
#define TF_NUM_TBL_SCOPE 16 /* < Number of TBL scopes */
#define TF_NUM_EPOCH0 1 /* < Number of Epoch0 */
#define TF_NUM_EPOCH1 1 /* < Number of Epoch1 */
#define TF_RSVD_METER_INST_END_IDX_TX 0
/* Mirror */
-#define TF_RSVD_MIRROR_RX 1
+/* Not yet supported fully in the infra */
+#define TF_RSVD_MIRROR_RX 0
#define TF_RSVD_MIRROR_BEGIN_IDX_RX 0
#define TF_RSVD_MIRROR_END_IDX_RX 0
-#define TF_RSVD_MIRROR_TX 1
+#define TF_RSVD_MIRROR_TX 0
#define TF_RSVD_MIRROR_BEGIN_IDX_TX 0
#define TF_RSVD_MIRROR_END_IDX_TX 0
TF_RESC_TYPE_HW_METER_INST,
TF_RESC_TYPE_HW_MIRROR,
TF_RESC_TYPE_HW_UPAR,
- /* Wh+/Brd2 specific HW resources */
+ /* Wh+/SR specific HW resources */
TF_RESC_TYPE_HW_SP_TCAM,
- /* Brd2/Brd4 specific HW resources */
+ /* SR/SR2 specific HW resources */
TF_RESC_TYPE_HW_L2_FUNC,
- /* Brd3, Brd4 common HW resources */
+ /* Thor, SR2 common HW resources */
TF_RESC_TYPE_HW_FKB,
- /* Brd4 specific HW resources */
+ /* SR2 specific HW resources */
TF_RESC_TYPE_HW_TBL_SCOPE,
TF_RESC_TYPE_HW_EPOCH0,
TF_RESC_TYPE_HW_EPOCH1,
#include "tf_resources.h"
#include "tf_msg.h"
#include "bnxt.h"
+#include "tfp.h"
/**
* Internal macro to perform HW resource allocation check between what
{
int i;
- PMD_DRV_LOG(ERR, "QCAPS errors HW\n");
- PMD_DRV_LOG(ERR, " Direction: %s\n", tf_dir_2_str(dir));
- PMD_DRV_LOG(ERR, " Elements:\n");
+ TFP_DRV_LOG(ERR, "QCAPS errors HW\n");
+ TFP_DRV_LOG(ERR, " Direction: %s\n", tf_dir_2_str(dir));
+ TFP_DRV_LOG(ERR, " Elements:\n");
for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
if (*error_flag & 1 << i)
- PMD_DRV_LOG(ERR, " %s, %d elem available, req:%d\n",
+ TFP_DRV_LOG(ERR, " %s, %d elem available, req:%d\n",
tf_hcapi_hw_2_str(i),
hw_query->hw_query[i].max,
tf_rm_rsvd_hw_value(dir, i));
{
int i;
- PMD_DRV_LOG(ERR, "QCAPS errors SRAM\n");
- PMD_DRV_LOG(ERR, " Direction: %s\n", tf_dir_2_str(dir));
- PMD_DRV_LOG(ERR, " Elements:\n");
+ TFP_DRV_LOG(ERR, "QCAPS errors SRAM\n");
+ TFP_DRV_LOG(ERR, " Direction: %s\n", tf_dir_2_str(dir));
+ TFP_DRV_LOG(ERR, " Elements:\n");
for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
if (*error_flag & 1 << i)
- PMD_DRV_LOG(ERR, " %s, %d elem available, req:%d\n",
+ TFP_DRV_LOG(ERR, " %s, %d elem available, req:%d\n",
tf_hcapi_sram_2_str(i),
sram_query->sram_query[i].max,
tf_rm_rsvd_sram_value(dir, i));
for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
if (hw_entry[i].stride != hw_alloc->hw_num[i]) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"%s, Alloc failed id:%d expect:%d got:%d\n",
tf_dir_2_str(dir),
i,
for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
if (sram_entry[i].stride != sram_alloc->sram_num[i]) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"%s, Alloc failed idx:%d expect:%d got:%d\n",
tf_dir_2_str(dir),
i,
rc = tf_msg_session_hw_resc_qcaps(tfp, dir, &hw_query);
if (rc) {
/* Log error */
- PMD_DRV_LOG(ERR,
- "%s, HW qcaps message send failed\n",
- tf_dir_2_str(dir));
+ TFP_DRV_LOG(ERR,
+ "%s, HW qcaps message send failed, rc:%s\n",
+ tf_dir_2_str(dir),
+ strerror(-rc));
goto cleanup;
}
rc = tf_rm_check_hw_qcaps_static(&hw_query, dir, &error_flag);
if (rc) {
/* Log error */
- PMD_DRV_LOG(ERR,
- "%s, HW QCAPS validation failed, error_flag:0x%x\n",
+ TFP_DRV_LOG(ERR,
+ "%s, HW QCAPS validation failed,"
+ "error_flag:0x%x, rc:%s\n",
tf_dir_2_str(dir),
- error_flag);
+ error_flag,
+ strerror(-rc));
tf_rm_print_hw_qcaps_error(dir, &hw_query, &error_flag);
goto cleanup;
}
rc = tf_msg_session_hw_resc_alloc(tfp, dir, &hw_alloc, hw_entries);
if (rc) {
/* Log error */
- PMD_DRV_LOG(ERR,
- "%s, HW alloc message send failed\n",
- tf_dir_2_str(dir));
+ TFP_DRV_LOG(ERR,
+ "%s, HW alloc message send failed, rc:%s\n",
+ tf_dir_2_str(dir),
+ strerror(-rc));
goto cleanup;
}
rc = tf_rm_hw_alloc_validate(dir, &hw_alloc, hw_entries);
if (rc) {
/* Log error */
- PMD_DRV_LOG(ERR,
- "%s, HW Resource validation failed\n",
- tf_dir_2_str(dir));
+ TFP_DRV_LOG(ERR,
+ "%s, HW Resource validation failed, rc:%s\n",
+ tf_dir_2_str(dir),
+ strerror(-rc));
goto cleanup;
}
return 0;
cleanup:
+
return -1;
}
rc = tf_msg_session_sram_resc_qcaps(tfp, dir, &sram_query);
if (rc) {
/* Log error */
- PMD_DRV_LOG(ERR,
- "%s, SRAM qcaps message send failed\n",
- tf_dir_2_str(dir));
+ TFP_DRV_LOG(ERR,
+ "%s, SRAM qcaps message send failed, rc:%s\n",
+ tf_dir_2_str(dir),
+ strerror(-rc));
goto cleanup;
}
rc = tf_rm_check_sram_qcaps_static(&sram_query, dir, &error_flag);
if (rc) {
/* Log error */
- PMD_DRV_LOG(ERR,
- "%s, SRAM QCAPS validation failed, error_flag:%x\n",
+ TFP_DRV_LOG(ERR,
+ "%s, SRAM QCAPS validation failed,"
+ "error_flag:%x, rc:%s\n",
tf_dir_2_str(dir),
- error_flag);
+ error_flag,
+ strerror(-rc));
tf_rm_print_sram_qcaps_error(dir, &sram_query, &error_flag);
goto cleanup;
}
sram_entries);
if (rc) {
/* Log error */
- PMD_DRV_LOG(ERR,
- "%s, SRAM alloc message send failed\n",
- tf_dir_2_str(dir));
+ TFP_DRV_LOG(ERR,
+ "%s, SRAM alloc message send failed, rc:%s\n",
+ tf_dir_2_str(dir),
+ strerror(-rc));
goto cleanup;
}
rc = tf_rm_sram_alloc_validate(dir, &sram_alloc, sram_entries);
if (rc) {
/* Log error */
- PMD_DRV_LOG(ERR,
- "%s, SRAM Resource allocation validation failed\n",
- tf_dir_2_str(dir));
+ TFP_DRV_LOG(ERR,
+ "%s, SRAM Resource allocation validation failed,"
+ " rc:%s\n",
+ tf_dir_2_str(dir),
+ strerror(-rc));
goto cleanup;
}
return 0;
cleanup:
+
return -1;
}
flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].start = 0;
flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride = 0;
} else {
- PMD_DRV_LOG(ERR, "%s: TBL_SCOPE free_cnt:%d, entries:%d\n",
+ TFP_DRV_LOG(ERR, "%s, TBL_SCOPE free_cnt:%d, entries:%d\n",
tf_dir_2_str(dir),
free_cnt,
hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride);
*/
for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
if (hw_entries[i].stride != 0)
- PMD_DRV_LOG(ERR,
- "%s: %s was not cleaned up\n",
+ TFP_DRV_LOG(ERR,
+ "%s, %s was not cleaned up\n",
tf_dir_2_str(dir),
tf_hcapi_hw_2_str(i));
}
*/
for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
if (sram_entries[i].stride != 0)
- PMD_DRV_LOG(ERR,
- "%s: %s was not cleaned up\n",
+ TFP_DRV_LOG(ERR,
+ "%s, %s was not cleaned up\n",
tf_dir_2_str(dir),
tf_hcapi_sram_2_str(i));
}
if (rc) {
rc_close = -ENOTEMPTY;
/* Log error */
- PMD_DRV_LOG(ERR,
- "%s, lingering HW resources\n",
- tf_dir_2_str(i));
+ TFP_DRV_LOG(ERR,
+ "%s, lingering HW resources, rc:%s\n",
+ tf_dir_2_str(i),
+ strerror(-rc));
/* Log the entries to be flushed */
tf_rm_log_hw_flush(i, hw_flush_entries);
if (rc) {
rc_close = rc;
/* Log error */
- PMD_DRV_LOG(ERR,
- "%s, HW flush failed\n",
- tf_dir_2_str(i));
+ TFP_DRV_LOG(ERR,
+ "%s, HW flush failed, rc:%s\n",
+ tf_dir_2_str(i),
+ strerror(-rc));
}
}
if (rc) {
rc_close = -ENOTEMPTY;
/* Log error */
- PMD_DRV_LOG(ERR,
- "%s, lingering SRAM resources\n",
- tf_dir_2_str(i));
+ TFP_DRV_LOG(ERR,
+ "%s, lingering SRAM resources, rc:%s\n",
+ tf_dir_2_str(i),
+ strerror(-rc));
/* Log the entries to be flushed */
tf_rm_log_sram_flush(i, sram_flush_entries);
if (rc) {
rc_close = rc;
/* Log error */
- PMD_DRV_LOG(ERR,
- "%s, HW flush failed\n",
- tf_dir_2_str(i));
+ TFP_DRV_LOG(ERR,
+ "%s, HW flush failed, rc:%s\n",
+ tf_dir_2_str(i),
+ strerror(-rc));
}
}
if (rc) {
rc_close = rc;
/* Log error */
- PMD_DRV_LOG(ERR,
- "%s, HW free failed\n",
- tf_dir_2_str(i));
+ TFP_DRV_LOG(ERR,
+ "%s, HW free failed, rc:%s\n",
+ tf_dir_2_str(i),
+ strerror(-rc));
}
rc = tf_msg_session_sram_resc_free(tfp, i, sram_entries);
if (rc) {
rc_close = rc;
/* Log error */
- PMD_DRV_LOG(ERR,
- "%s, SRAM free failed\n",
- tf_dir_2_str(i));
+ TFP_DRV_LOG(ERR,
+ "%s, SRAM free failed, rc:%s\n",
+ tf_dir_2_str(i),
+ strerror(-rc));
}
}
}
if (rc == -EOPNOTSUPP) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Tcam type not supported, type:%d\n",
- dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Tcam type not supported, type:%d\n",
+ tf_dir_2_str(dir),
type);
return rc;
} else if (rc == -1) {
- PMD_DRV_LOG(ERR,
- "%s:, Tcam type lookup failed, type:%d\n",
+ TFP_DRV_LOG(ERR,
+ "%s, Tcam type lookup failed, type:%d\n",
tf_dir_2_str(dir),
type);
return rc;
}
if (rc == -EOPNOTSUPP) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Table type not supported, type:%d\n",
- dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Table type not supported, type:%d\n",
+ tf_dir_2_str(dir),
type);
return rc;
} else if (rc == -1) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Table type lookup failed, type:%d\n",
- dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Table type lookup failed, type:%d\n",
+ tf_dir_2_str(dir),
type);
return rc;
}
#include "bnxt.h"
#include "tf_resources.h"
#include "tf_rm.h"
+#include "stack.h"
#include "tf_common.h"
#define PTU_PTE_VALID 0x1UL
* Pointer to the page table to free
*/
static void
-tf_em_free_pg_tbl(struct tf_em_page_tbl *tp)
+tf_em_free_pg_tbl(struct hcapi_cfa_em_page_tbl *tp)
{
uint32_t i;
for (i = 0; i < tp->pg_count; i++) {
if (!tp->pg_va_tbl[i]) {
- PMD_DRV_LOG(WARNING,
- "No map for page %d table %016" PRIu64 "\n",
+ TFP_DRV_LOG(WARNING,
+ "No mapping for page: %d table: %016" PRIu64 "\n",
i,
(uint64_t)(uintptr_t)tp);
continue;
* Pointer to the EM table to free
*/
static void
-tf_em_free_page_table(struct tf_em_table *tbl)
+tf_em_free_page_table(struct hcapi_cfa_em_table *tbl)
{
- struct tf_em_page_tbl *tp;
+ struct hcapi_cfa_em_page_tbl *tp;
int i;
for (i = 0; i < tbl->num_lvl; i++) {
tp = &tbl->pg_tbl[i];
-
- PMD_DRV_LOG(INFO,
+ TFP_DRV_LOG(INFO,
"EEM: Freeing page table: size %u lvl %d cnt %u\n",
TF_EM_PAGE_SIZE,
i,
* -ENOMEM - Out of memory
*/
static int
-tf_em_alloc_pg_tbl(struct tf_em_page_tbl *tp,
+tf_em_alloc_pg_tbl(struct hcapi_cfa_em_page_tbl *tp,
uint32_t pg_count,
uint32_t pg_size)
{
* -ENOMEM - Out of memory
*/
static int
-tf_em_alloc_page_table(struct tf_em_table *tbl)
+tf_em_alloc_page_table(struct hcapi_cfa_em_table *tbl)
{
- struct tf_em_page_tbl *tp;
+ struct hcapi_cfa_em_page_tbl *tp;
int rc = 0;
int i;
uint32_t j;
tbl->page_cnt[i],
TF_EM_PAGE_SIZE);
if (rc) {
- PMD_DRV_LOG(WARNING,
- "Failed to allocate page table: lvl: %d\n",
- i);
+ TFP_DRV_LOG(WARNING,
+ "Failed to allocate page table: lvl: %d, rc:%s\n",
+ i,
+ strerror(-rc));
goto cleanup;
}
for (j = 0; j < tp->pg_count; j++) {
- PMD_DRV_LOG(INFO,
+ TFP_DRV_LOG(INFO,
"EEM: Allocated page table: size %u lvl %d cnt"
" %u VA:%p PA:%p\n",
TF_EM_PAGE_SIZE,
* Flag controlling if the page table is last
*/
static void
-tf_em_link_page_table(struct tf_em_page_tbl *tp,
- struct tf_em_page_tbl *tp_next,
+tf_em_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
+ struct hcapi_cfa_em_page_tbl *tp_next,
bool set_pte_last)
{
uint64_t *pg_pa = tp_next->pg_pa_tbl;
* Pointer to EM page table
*/
static void
-tf_em_setup_page_table(struct tf_em_table *tbl)
+tf_em_setup_page_table(struct hcapi_cfa_em_table *tbl)
{
- struct tf_em_page_tbl *tp_next;
- struct tf_em_page_tbl *tp;
+ struct hcapi_cfa_em_page_tbl *tp_next;
+ struct hcapi_cfa_em_page_tbl *tp;
bool set_pte_last = 0;
int i;
* - ENOMEM - Out of memory
*/
static int
-tf_em_size_table(struct tf_em_table *tbl)
+tf_em_size_table(struct hcapi_cfa_em_table *tbl)
{
uint64_t num_data_pages;
uint32_t *page_cnt;
tbl->num_entries,
&num_data_pages);
if (max_lvl < 0) {
- PMD_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
- PMD_DRV_LOG(WARNING,
+ TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
+ TFP_DRV_LOG(WARNING,
"table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
- tbl->type,
- (uint64_t)num_entries * tbl->entry_size,
+ tbl->type, (uint64_t)num_entries * tbl->entry_size,
TF_EM_PAGE_SIZE);
return -ENOMEM;
}
tf_em_size_page_tbls(max_lvl, num_data_pages, TF_EM_PAGE_SIZE,
page_cnt);
- PMD_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
- PMD_DRV_LOG(INFO,
+ TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
+ TFP_DRV_LOG(INFO,
"EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 " l0: %u l1: %u l2: %u\n",
max_lvl + 1,
(uint64_t)num_data_pages * TF_EM_PAGE_SIZE,
struct tf_tbl_scope_cb *tbl_scope_cb,
int dir)
{
- struct tf_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
- struct tf_em_table *tbl;
+ struct hcapi_cfa_em_ctx_mem_info *ctxp =
+ &tbl_scope_cb->em_ctx_info[dir];
+ struct hcapi_cfa_em_table *tbl;
int i;
for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
struct tf_tbl_scope_cb *tbl_scope_cb,
int dir)
{
- struct tf_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
- struct tf_em_table *tbl;
+ struct hcapi_cfa_em_ctx_mem_info *ctxp =
+ &tbl_scope_cb->em_ctx_info[dir];
+ struct hcapi_cfa_em_table *tbl;
int rc = 0;
int i;
TF_MEGABYTE) / (key_b + action_b);
if (num_entries < TF_EM_MIN_ENTRIES) {
- PMD_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
+ TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
"%uMB\n",
parms->rx_mem_size_in_mb);
return -EINVAL;
cnt *= 2;
if (cnt > TF_EM_MAX_ENTRIES) {
- PMD_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
+ TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
"%u\n",
(parms->tx_num_flows_in_k * TF_KILOBYTE));
return -EINVAL;
TF_EM_MIN_ENTRIES ||
(parms->rx_num_flows_in_k * TF_KILOBYTE) >
tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"EEM: Invalid number of Rx flows "
"requested:%u max:%u\n",
parms->rx_num_flows_in_k * TF_KILOBYTE,
cnt *= 2;
if (cnt > TF_EM_MAX_ENTRIES) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"EEM: Invalid number of Rx requested: %u\n",
(parms->rx_num_flows_in_k * TF_KILOBYTE));
return -EINVAL;
(key_b + action_b);
if (num_entries < TF_EM_MIN_ENTRIES) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"EEM: Insufficient memory requested:%uMB\n",
parms->rx_mem_size_in_mb);
return -EINVAL;
cnt *= 2;
if (cnt > TF_EM_MAX_ENTRIES) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"EEM: Invalid number of Tx requested: %u\n",
(parms->tx_num_flows_in_k * TF_KILOBYTE));
return -EINVAL;
TF_EM_MIN_ENTRIES ||
(parms->tx_num_flows_in_k * TF_KILOBYTE) >
tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"EEM: Invalid number of Tx flows "
"requested:%u max:%u\n",
(parms->tx_num_flows_in_k * TF_KILOBYTE),
cnt *= 2;
if (cnt > TF_EM_MAX_ENTRIES) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"EEM: Invalid number of Tx requested: %u\n",
(parms->tx_num_flows_in_k * TF_KILOBYTE));
return -EINVAL;
if (parms->rx_num_flows_in_k != 0 &&
(parms->rx_max_key_sz_in_bits / 8 == 0)) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"EEM: Rx key size required: %u\n",
(parms->rx_max_key_sz_in_bits));
return -EINVAL;
if (parms->tx_num_flows_in_k != 0 &&
(parms->tx_max_key_sz_in_bits / 8 == 0)) {
- PMD_DRV_LOG(ERR,
+ TFP_DRV_LOG(ERR,
"EEM: Tx key size required: %u\n",
(parms->tx_max_key_sz_in_bits));
return -EINVAL;
if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&
parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
- parms->type != TF_TBL_TYPE_MIRROR_CONFIG &&
parms->type != TF_TBL_TYPE_ACT_STATS_64) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Type not supported, type:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Type not supported, type:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type);
return -EOPNOTSUPP;
}
/* Verify that the entry has been previously allocated */
id = ba_inuse(session_pool, index);
if (id != 1) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Invalid or not allocated index, type:%d, idx:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Invalid or not allocated index, type:%d, idx:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type,
index);
return -EINVAL;
parms->data,
parms->idx);
if (rc) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Set failed, type:%d, rc:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Set failed, type:%d, rc:%s\n",
+ tf_dir_2_str(parms->dir),
parms->type,
- rc);
+ strerror(-rc));
}
return rc;
/* Verify that the entry has been previously allocated */
id = ba_inuse(session_pool, index);
if (id != 1) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Invalid or not allocated index, type:%d, idx:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Invalid or not allocated index, type:%d, idx:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type,
index);
return -EINVAL;
parms->data,
parms->idx);
if (rc) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Get failed, type:%d, rc:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Get failed, type:%d, rc:%s\n",
+ tf_dir_2_str(parms->dir),
parms->type,
- rc);
+ strerror(-rc));
}
return rc;
* -EINVAL - Parameter error
*/
static int
-tf_get_bulk_tbl_entry_internal(struct tf *tfp,
- struct tf_get_bulk_tbl_entry_parms *parms)
+tf_bulk_get_tbl_entry_internal(struct tf *tfp,
+ struct tf_bulk_get_tbl_entry_parms *parms)
{
int rc;
int id;
}
/* Get the entry */
- rc = tf_msg_get_bulk_tbl_entry(tfp, parms);
+ rc = tf_msg_bulk_get_tbl_entry(tfp, parms);
if (rc) {
TFP_DRV_LOG(ERR,
"%s, Bulk get failed, type:%d, rc:%s\n",
tf_alloc_tbl_entry_shadow(struct tf_session *tfs __rte_unused,
struct tf_alloc_tbl_entry_parms *parms __rte_unused)
{
- PMD_DRV_LOG(ERR,
- "dir:%d, Entry Alloc with search not supported\n",
- parms->dir);
-
+ TFP_DRV_LOG(ERR,
+ "%s, Entry Alloc with search not supported\n",
+ tf_dir_2_str(parms->dir));
return -EOPNOTSUPP;
}
tf_free_tbl_entry_shadow(struct tf_session *tfs,
struct tf_free_tbl_entry_parms *parms)
{
- PMD_DRV_LOG(ERR,
- "dir:%d, Entry Free with search not supported\n",
- parms->dir);
+ TFP_DRV_LOG(ERR,
+ "%s, Entry Free with search not supported\n",
+ tf_dir_2_str(parms->dir));
return -EOPNOTSUPP;
}
parms.alignment = 0;
if (tfp_calloc(&parms) != 0) {
- PMD_DRV_LOG(ERR, "%d: TBL: external pool failure %s\n",
- dir, strerror(-ENOMEM));
+ TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
+ tf_dir_2_str(dir), strerror(ENOMEM));
return -ENOMEM;
}
rc = stack_init(num_entries, parms.mem_va, pool);
if (rc != 0) {
- PMD_DRV_LOG(ERR, "%d: TBL: stack init failure %s\n",
- dir, strerror(-rc));
+ TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
+ tf_dir_2_str(dir), strerror(-rc));
goto cleanup;
}
for (i = 0; i < num_entries; i++) {
rc = stack_push(pool, j);
if (rc != 0) {
- PMD_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
+ TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
tf_dir_2_str(dir), strerror(-rc));
goto cleanup;
}
if (j < 0) {
- PMD_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
+ TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
dir, j);
goto cleanup;
}
if (!stack_is_full(pool)) {
rc = -EINVAL;
- PMD_DRV_LOG(ERR, "%d TBL: stack failure %s\n",
- dir, strerror(-rc));
+ TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
+ tf_dir_2_str(dir), strerror(-rc));
goto cleanup;
}
return 0;
struct tf_tbl_scope_cb *tbl_scope_cb;
struct stack *pool;
- /* Check parameters */
- if (tfp == NULL || parms == NULL) {
- PMD_DRV_LOG(ERR, "Invalid parameters\n");
- return -EINVAL;
- }
-
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
tfs = (struct tf_session *)(tfp->session->core_data);
tbl_scope_cb = tbl_scope_cb_find(tfs, parms->tbl_scope_id);
if (tbl_scope_cb == NULL) {
- PMD_DRV_LOG(ERR,
- "%s, table scope not allocated\n",
- tf_dir_2_str(parms->dir));
+ TFP_DRV_LOG(ERR,
+ "%s, table scope not allocated\n",
+ tf_dir_2_str(parms->dir));
return -EINVAL;
}
pool = &tbl_scope_cb->ext_act_pool[parms->dir];
rc = stack_pop(pool, &index);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Allocation failed, type:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Allocation failed, type:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type);
return rc;
}
struct bitalloc *session_pool;
struct tf_session *tfs;
- /* Check parameters */
- if (tfp == NULL || parms == NULL) {
- PMD_DRV_LOG(ERR, "Invalid parameters\n");
- return -EINVAL;
- }
-
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
tfs = (struct tf_session *)(tfp->session->core_data);
parms->type != TF_TBL_TYPE_ACT_ENCAP_8B &&
parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&
parms->type != TF_TBL_TYPE_ACT_ENCAP_64B &&
- parms->type != TF_TBL_TYPE_MIRROR_CONFIG &&
parms->type != TF_TBL_TYPE_ACT_STATS_64) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Type not supported, type:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Type not supported, type:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type);
return -EOPNOTSUPP;
}
if (id == -1) {
free_cnt = ba_free_count(session_pool);
- PMD_DRV_LOG(ERR,
- "dir:%d, Allocation failed, type:%d, free:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Allocation failed, type:%d, free:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type,
free_cnt);
return -ENOMEM;
struct tf_tbl_scope_cb *tbl_scope_cb;
struct stack *pool;
- /* Check parameters */
- if (tfp == NULL || parms == NULL) {
- PMD_DRV_LOG(ERR, "Invalid parameters\n");
- return -EINVAL;
- }
-
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
tfs = (struct tf_session *)(tfp->session->core_data);
tbl_scope_cb = tbl_scope_cb_find(tfs, parms->tbl_scope_id);
if (tbl_scope_cb == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
+ TFP_DRV_LOG(ERR,
+ "%s, table scope error\n",
+ tf_dir_2_str(parms->dir));
return -EINVAL;
}
pool = &tbl_scope_cb->ext_act_pool[parms->dir];
rc = stack_push(pool, index);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "dir:%d, consistency error, stack full, type:%d, idx:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, consistency error, stack full, type:%d, idx:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type,
index);
}
struct tf_session *tfs;
uint32_t index;
- /* Check parameters */
- if (tfp == NULL || parms == NULL) {
- PMD_DRV_LOG(ERR, "Invalid parameters\n");
- return -EINVAL;
- }
-
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
tfs = (struct tf_session *)(tfp->session->core_data);
parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&
parms->type != TF_TBL_TYPE_ACT_ENCAP_64B &&
parms->type != TF_TBL_TYPE_ACT_STATS_64) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Type not supported, type:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Type not supported, type:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type);
return -EOPNOTSUPP;
}
/* Check if element was indeed allocated */
id = ba_inuse_free(session_pool, index);
if (id == -1) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Element not previously alloc'ed, type:%d, idx:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Element not previously alloc'ed, type:%d, idx:%d\n",
+ tf_dir_2_str(parms->dir),
parms->type,
index);
return -ENOMEM;
tbl_scope_cb = tbl_scope_cb_find(session,
parms->tbl_scope_id);
- if (tbl_scope_cb == NULL)
+ if (tbl_scope_cb == NULL) {
+ TFP_DRV_LOG(ERR, "Table scope error\n");
return -EINVAL;
+ }
/* Free Table control block */
ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);
int rc;
enum tf_dir dir;
struct tf_tbl_scope_cb *tbl_scope_cb;
- struct tf_em_table *em_tables;
+ struct hcapi_cfa_em_table *em_tables;
int index;
struct tf_session *session;
struct tf_free_tbl_scope_parms free_parms;
- /* check parameters */
- if (parms == NULL || tfp->session == NULL) {
- PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
- return -EINVAL;
- }
-
session = (struct tf_session *)tfp->session->core_data;
/* Get Table Scope control block from the session pool */
index = ba_alloc(session->tbl_scope_pool_rx);
if (index == -1) {
- PMD_DRV_LOG(ERR, "EEM: Unable to allocate table scope "
+ TFP_DRV_LOG(ERR, "EEM: Unable to allocate table scope "
"Control Block\n");
return -ENOMEM;
}
dir,
&tbl_scope_cb->em_caps[dir]);
if (rc) {
- PMD_DRV_LOG(ERR,
- "EEM: Unable to query for EEM capability\n");
+ TFP_DRV_LOG(ERR,
+ "EEM: Unable to query for EEM capability,"
+ " rc:%s\n",
+ strerror(-rc));
goto cleanup;
}
}
*/
rc = tf_em_ctx_reg(tfp, tbl_scope_cb, dir);
if (rc) {
- PMD_DRV_LOG(ERR,
- "EEM: Unable to register for EEM ctx\n");
+ TFP_DRV_LOG(ERR,
+ "EEM: Unable to register for EEM ctx,"
+ " rc:%s\n",
+ strerror(-rc));
goto cleanup;
}
parms->hw_flow_cache_flush_timer,
dir);
if (rc) {
- PMD_DRV_LOG(ERR,
- "TBL: Unable to configure EEM in firmware\n");
+ TFP_DRV_LOG(ERR,
+ "TBL: Unable to configure EEM in firmware"
+ " rc:%s\n",
+ strerror(-rc));
goto cleanup_full;
}
HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE);
if (rc) {
- PMD_DRV_LOG(ERR,
- "EEM: Unable to enable EEM in firmware\n");
+ TFP_DRV_LOG(ERR,
+ "EEM: Unable to enable EEM in firmware"
+ " rc:%s\n",
+ strerror(-rc));
goto cleanup_full;
}
em_tables[TF_RECORD_TABLE].num_entries,
em_tables[TF_RECORD_TABLE].entry_size);
if (rc) {
- PMD_DRV_LOG(ERR,
- "%d TBL: Unable to allocate idx pools %s\n",
- dir,
+ TFP_DRV_LOG(ERR,
+ "%s TBL: Unable to allocate idx pools %s\n",
+ tf_dir_2_str(dir),
strerror(-rc));
goto cleanup_full;
}
struct tf_tbl_scope_cb *tbl_scope_cb;
struct tf_session *session;
- if (tfp == NULL || parms == NULL || parms->data == NULL)
- return -EINVAL;
+ TF_CHECK_PARMS_SESSION(tfp, parms);
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
+ if (parms->data == NULL) {
+ TFP_DRV_LOG(ERR,
+ "%s, invalid parms->data\n",
+ tf_dir_2_str(parms->dir));
return -EINVAL;
}
tbl_scope_id = parms->tbl_scope_id;
if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Table scope not allocated\n",
- parms->dir);
+ TFP_DRV_LOG(ERR,
+ "%s, Table scope not allocated\n",
+ tf_dir_2_str(parms->dir));
return -EINVAL;
}
*/
tbl_scope_cb = tbl_scope_cb_find(session, tbl_scope_id);
- if (tbl_scope_cb == NULL)
- return -EINVAL;
+ if (tbl_scope_cb == NULL) {
+ TFP_DRV_LOG(ERR,
+ "%s, table scope error\n",
+ tf_dir_2_str(parms->dir));
+ return -EINVAL;
+ }
/* External table, implicitly the Action table */
- base_addr = tf_em_get_table_page(tbl_scope_cb,
- parms->dir,
- offset,
- TF_RECORD_TABLE);
+ base_addr = (void *)(uintptr_t)
+ hcapi_get_table_page(&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_RECORD_TABLE], offset);
+
if (base_addr == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Base address lookup failed\n",
- parms->dir);
+ TFP_DRV_LOG(ERR,
+ "%s, Base address lookup failed\n",
+ tf_dir_2_str(parms->dir));
return -EINVAL;
}
/* Internal table type processing */
rc = tf_set_tbl_entry_internal(tfp, parms);
if (rc) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Set failed, type:%d, rc:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Set failed, type:%d, rc:%s\n",
+ tf_dir_2_str(parms->dir),
parms->type,
- rc);
+ strerror(-rc));
}
}
{
int rc = 0;
- if (tfp == NULL || parms == NULL)
- return -EINVAL;
-
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
if (parms->type == TF_TBL_TYPE_EXT) {
- PMD_DRV_LOG(ERR,
- "dir:%d, External table type not supported\n",
- parms->dir);
+ /* Not supported, yet */
+ TFP_DRV_LOG(ERR,
+ "%s, External table type not supported\n",
+ tf_dir_2_str(parms->dir));
rc = -EOPNOTSUPP;
} else {
/* Internal table type processing */
rc = tf_get_tbl_entry_internal(tfp, parms);
if (rc)
- PMD_DRV_LOG(ERR,
- "dir:%d, Get failed, type:%d, rc:%d\n",
- parms->dir,
+ TFP_DRV_LOG(ERR,
+ "%s, Get failed, type:%d, rc:%s\n",
+ tf_dir_2_str(parms->dir),
parms->type,
- rc);
+ strerror(-rc));
}
return rc;
/* API defined in tf_core.h */
int
-tf_get_bulk_tbl_entry(struct tf *tfp,
- struct tf_get_bulk_tbl_entry_parms *parms)
+tf_bulk_get_tbl_entry(struct tf *tfp,
+ struct tf_bulk_get_tbl_entry_parms *parms)
{
int rc = 0;
rc = -EOPNOTSUPP;
} else {
/* Internal table type processing */
- rc = tf_get_bulk_tbl_entry_internal(tfp, parms);
+ rc = tf_bulk_get_tbl_entry_internal(tfp, parms);
if (rc)
TFP_DRV_LOG(ERR,
"%s, Bulk get failed, type:%d, rc:%s\n",
{
int rc;
- /* check parameters */
- if (parms == NULL || tfp == NULL) {
- PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION_NO_DIR(tfp, parms);
rc = tf_alloc_eem_tbl_scope(tfp, parms);
{
int rc;
- /* check parameters */
- if (parms == NULL || tfp == NULL) {
- PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION_NO_DIR(tfp, parms);
/* free table scope and all associated resources */
rc = tf_free_eem_tbl_scope_cb(tfp, parms);
struct tf_session *tfs;
#endif /* TF_SHADOW */
- /* Check parameters */
- if (parms == NULL || tfp == NULL) {
- PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
/*
* No shadow copy support for external tables, allocate and return
*/
}
#if (TF_SHADOW == 1)
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
- return -EINVAL;
- }
-
tfs = (struct tf_session *)(tfp->session->core_data);
/* Search the Shadow DB for requested element. If not found go
rc = tf_alloc_tbl_entry_pool_internal(tfp, parms);
if (rc)
- PMD_DRV_LOG(ERR, "dir%d, Alloc failed, rc:%d\n",
- parms->dir,
- rc);
+ TFP_DRV_LOG(ERR, "%s, Alloc failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
return rc;
}
struct tf_session *tfs;
#endif /* TF_SHADOW */
- /* Check parameters */
- if (parms == NULL || tfp == NULL) {
- PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
- return -EINVAL;
- }
+ TF_CHECK_PARMS_SESSION(tfp, parms);
+
/*
* No shadow of external tables so just free the entry
*/
}
#if (TF_SHADOW == 1)
- if (tfp->session == NULL || tfp->session->core_data == NULL) {
- PMD_DRV_LOG(ERR,
- "dir:%d, Session info invalid\n",
- parms->dir);
- return -EINVAL;
- }
-
tfs = (struct tf_session *)(tfp->session->core_data);
/* Search the Shadow DB for requested element. If not found go
rc = tf_free_tbl_entry_pool_internal(tfp, parms);
if (rc)
- PMD_DRV_LOG(ERR, "dir:%d, Alloc failed, rc:%d\n",
- parms->dir,
- rc);
+ TFP_DRV_LOG(ERR, "%s, Alloc failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
return rc;
}
static void
-tf_dump_link_page_table(struct tf_em_page_tbl *tp,
- struct tf_em_page_tbl *tp_next)
+tf_dump_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
+ struct hcapi_cfa_em_page_tbl *tp_next)
{
uint64_t *pg_va;
uint32_t i;
{
struct tf_session *session;
struct tf_tbl_scope_cb *tbl_scope_cb;
- struct tf_em_page_tbl *tp;
- struct tf_em_page_tbl *tp_next;
- struct tf_em_table *tbl;
+ struct hcapi_cfa_em_page_tbl *tp;
+ struct hcapi_cfa_em_page_tbl *tp_next;
+ struct hcapi_cfa_em_table *tbl;
int i;
int j;
int dir;
tbl_scope_cb = tbl_scope_cb_find(session,
tbl_scope_id);
if (tbl_scope_cb == NULL)
- TFP_DRV_LOG(ERR, "No table scope\n");
+ PMD_DRV_LOG(ERR, "No table scope\n");
for (dir = 0; dir < TF_DIR_MAX; dir++) {
printf("Direction %s:\n", (dir == TF_DIR_RX ? "Rx" : "Tx"));
struct tf_session;
-enum tf_pg_tbl_lvl {
- TF_PT_LVL_0,
- TF_PT_LVL_1,
- TF_PT_LVL_2,
- TF_PT_LVL_MAX
-};
-
-enum tf_em_table_type {
- TF_KEY0_TABLE,
- TF_KEY1_TABLE,
- TF_RECORD_TABLE,
- TF_EFC_TABLE,
- TF_MAX_TABLE
-};
-
-struct tf_em_page_tbl {
- uint32_t pg_count;
- uint32_t pg_size;
- void **pg_va_tbl;
- uint64_t *pg_pa_tbl;
-};
-
-struct tf_em_table {
- int type;
- uint32_t num_entries;
- uint16_t ctx_id;
- uint32_t entry_size;
- int num_lvl;
- uint32_t page_cnt[TF_PT_LVL_MAX];
- uint64_t num_data_pages;
- void *l0_addr;
- uint64_t l0_dma_addr;
- struct tf_em_page_tbl pg_tbl[TF_PT_LVL_MAX];
-};
-
-struct tf_em_ctx_mem_info {
- struct tf_em_table em_tables[TF_MAX_TABLE];
-};
-
/** table scope control block content */
struct tf_em_caps {
uint32_t flags;
struct tf_tbl_scope_cb {
uint32_t tbl_scope_id;
int index;
- struct tf_em_ctx_mem_info em_ctx_info[TF_DIR_MAX];
+ struct hcapi_cfa_em_ctx_mem_info em_ctx_info[TF_DIR_MAX];
struct tf_em_caps em_caps[TF_DIR_MAX];
struct stack ext_act_pool[TF_DIR_MAX];
uint32_t *ext_act_pool_mem[TF_DIR_MAX];
};
-/**
- * Hardware Page sizes supported for EEM:
- * 4K, 8K, 64K, 256K, 1M, 2M, 4M, 1G.
- *
- * Round-down other page sizes to the lower hardware page
- * size supported.
+/** Hardware Page sizes supported for EEM: 4K, 8K, 64K, 256K, 1M, 2M, 4M, 1G.
+ * Round-down other page sizes to the lower hardware page size supported.
*/
#define TF_EM_PAGE_SIZE_4K 12
#define TF_EM_PAGE_SIZE_8K 13