- Implement the modules RM, Device (WH+), Identifier.
- Update Session module.
- Implement new HWRMs for RM direct messaging.
- Add new parameter check macro's and clean up the header includes for
i.e. tfp such that bnxt.h is not directly included in the new modules.
- Add cfa_resource_types, required for RM design.
Signed-off-by: Michael Wildt <michael.wildt@broadcom.com>
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Randy Schacher <stuart.schacher@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
'tf_core/tf_rm.c',
'tf_core/tf_tbl.c',
'tf_core/tfp.c',
+ 'tf_core/tf_session.c',
+ 'tf_core/tf_device.c',
'tf_core/tf_device_p4.c',
'tf_core/tf_identifier.c',
'tf_core/tf_shadow_tbl.c',
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_msg.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_em.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_tbl.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_session.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_device.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_device_p4.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_identifier.c
#ifndef _CFA_RESOURCE_TYPES_H_
#define _CFA_RESOURCE_TYPES_H_
+/*
+ * This is the constant used to define invalid CFA
+ * resource types across all devices.
+ */
+#define CFA_RESOURCE_TYPE_INVALID 65535
/* L2 Context TCAM */
#define CFA_RESOURCE_TYPE_P59_L2_CTXT_TCAM 0x0UL
#define CFA_RESOURCE_TYPE_P59_LAST CFA_RESOURCE_TYPE_P59_VEB_TCAM
-/* SRAM Multicast Group */
-#define CFA_RESOURCE_TYPE_P58_SRAM_MCG 0x0UL
-/* SRAM Encap 8 byte record */
-#define CFA_RESOURCE_TYPE_P58_SRAM_ENCAP_8B 0x1UL
-/* SRAM Encap 16 byte record */
-#define CFA_RESOURCE_TYPE_P58_SRAM_ENCAP_16B 0x2UL
-/* SRAM Encap 64 byte record */
-#define CFA_RESOURCE_TYPE_P58_SRAM_ENCAP_64B 0x3UL
-/* SRAM Source Property MAC */
-#define CFA_RESOURCE_TYPE_P58_SRAM_SP_MAC 0x4UL
-/* SRAM Source Property MAC and IPv4 */
-#define CFA_RESOURCE_TYPE_P58_SRAM_SP_MAC_IPV4 0x5UL
-/* SRAM Source Property MAC and IPv6 */
-#define CFA_RESOURCE_TYPE_P58_SRAM_SP_MAC_IPV6 0x6UL
-/* SRAM Network Address Translation Source Port */
-#define CFA_RESOURCE_TYPE_P58_SRAM_NAT_SPORT 0x7UL
-/* SRAM Network Address Translation Destination Port */
-#define CFA_RESOURCE_TYPE_P58_SRAM_NAT_DPORT 0x8UL
-/* SRAM Network Address Translation Source IPv4 address */
-#define CFA_RESOURCE_TYPE_P58_SRAM_NAT_S_IPV4 0x9UL
-/* SRAM Network Address Translation Destination IPv4 address */
-#define CFA_RESOURCE_TYPE_P58_SRAM_NAT_D_IPV4 0xaUL
-/* SRAM Network Address Translation Source IPv4 address */
-#define CFA_RESOURCE_TYPE_P58_SRAM_NAT_S_IPV6 0xbUL
-/* SRAM Network Address Translation Destination IPv4 address */
-#define CFA_RESOURCE_TYPE_P58_SRAM_NAT_D_IPV6 0xcUL
+/* Multicast Group */
+#define CFA_RESOURCE_TYPE_P58_MCG 0x0UL
+/* Encap 8 byte record */
+#define CFA_RESOURCE_TYPE_P58_ENCAP_8B 0x1UL
+/* Encap 16 byte record */
+#define CFA_RESOURCE_TYPE_P58_ENCAP_16B 0x2UL
+/* Encap 64 byte record */
+#define CFA_RESOURCE_TYPE_P58_ENCAP_64B 0x3UL
+/* Source Property MAC */
+#define CFA_RESOURCE_TYPE_P58_SP_MAC 0x4UL
+/* Source Property MAC and IPv4 */
+#define CFA_RESOURCE_TYPE_P58_SP_MAC_IPV4 0x5UL
+/* Source Property MAC and IPv6 */
+#define CFA_RESOURCE_TYPE_P58_SP_MAC_IPV6 0x6UL
+/* Network Address Translation Source Port */
+#define CFA_RESOURCE_TYPE_P58_NAT_SPORT 0x7UL
+/* Network Address Translation Destination Port */
+#define CFA_RESOURCE_TYPE_P58_NAT_DPORT 0x8UL
+/* Network Address Translation Source IPv4 address */
+#define CFA_RESOURCE_TYPE_P58_NAT_S_IPV4 0x9UL
+/* Network Address Translation Destination IPv4 address */
+#define CFA_RESOURCE_TYPE_P58_NAT_D_IPV4 0xaUL
+/* Network Address Translation Source IPv4 address */
+#define CFA_RESOURCE_TYPE_P58_NAT_S_IPV6 0xbUL
+/* Network Address Translation Destination IPv4 address */
+#define CFA_RESOURCE_TYPE_P58_NAT_D_IPV6 0xcUL
/* Meter */
-#define CFA_RESOURCE_TYPE_P58_SRAM_METER 0xdUL
+#define CFA_RESOURCE_TYPE_P58_METER 0xdUL
/* Flow State */
-#define CFA_RESOURCE_TYPE_P58_SRAM_FLOW_STATE 0xeUL
+#define CFA_RESOURCE_TYPE_P58_FLOW_STATE 0xeUL
/* Full Action Records */
-#define CFA_RESOURCE_TYPE_P58_SRAM_FULL_ACTION 0xfUL
+#define CFA_RESOURCE_TYPE_P58_FULL_ACTION 0xfUL
/* Action Record Format 0 */
-#define CFA_RESOURCE_TYPE_P58_SRAM_FORMAT_0_ACTION 0x10UL
+#define CFA_RESOURCE_TYPE_P58_FORMAT_0_ACTION 0x10UL
/* Action Record Format 2 */
-#define CFA_RESOURCE_TYPE_P58_SRAM_FORMAT_2_ACTION 0x11UL
+#define CFA_RESOURCE_TYPE_P58_FORMAT_2_ACTION 0x11UL
/* Action Record Format 3 */
-#define CFA_RESOURCE_TYPE_P58_SRAM_FORMAT_3_ACTION 0x12UL
+#define CFA_RESOURCE_TYPE_P58_FORMAT_3_ACTION 0x12UL
/* Action Record Format 4 */
-#define CFA_RESOURCE_TYPE_P58_SRAM_FORMAT_4_ACTION 0x13UL
+#define CFA_RESOURCE_TYPE_P58_FORMAT_4_ACTION 0x13UL
/* L2 Context TCAM */
-#define CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM 0x14UL
+#define CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM 0x14UL
/* Profile Func */
-#define CFA_RESOURCE_TYPE_P58_PROF_FUNC 0x15UL
+#define CFA_RESOURCE_TYPE_P58_PROF_FUNC 0x15UL
/* Profile TCAM */
-#define CFA_RESOURCE_TYPE_P58_PROF_TCAM 0x16UL
+#define CFA_RESOURCE_TYPE_P58_PROF_TCAM 0x16UL
/* Exact Match Profile Id */
-#define CFA_RESOURCE_TYPE_P58_EM_PROF_ID 0x17UL
+#define CFA_RESOURCE_TYPE_P58_EM_PROF_ID 0x17UL
/* Wildcard Profile Id */
-#define CFA_RESOURCE_TYPE_P58_WC_TCAM_PROF_ID 0x18UL
+#define CFA_RESOURCE_TYPE_P58_WC_TCAM_PROF_ID 0x18UL
+/* Exact Match Record */
+#define CFA_RESOURCE_TYPE_P58_EM_REC 0x19UL
/* Wildcard TCAM */
-#define CFA_RESOURCE_TYPE_P58_WC_TCAM 0x19UL
+#define CFA_RESOURCE_TYPE_P58_WC_TCAM 0x1aUL
/* Meter profile */
-#define CFA_RESOURCE_TYPE_P58_METER_PROF 0x1aUL
+#define CFA_RESOURCE_TYPE_P58_METER_PROF 0x1bUL
/* Meter */
-#define CFA_RESOURCE_TYPE_P58_METER 0x1bUL
-/* Meter */
-#define CFA_RESOURCE_TYPE_P58_MIRROR 0x1cUL
+#define CFA_RESOURCE_TYPE_P58_MIRROR 0x1cUL
/* Source Property TCAM */
-#define CFA_RESOURCE_TYPE_P58_SP_TCAM 0x1dUL
+#define CFA_RESOURCE_TYPE_P58_SP_TCAM 0x1dUL
/* Exact Match Flexible Key Builder */
-#define CFA_RESOURCE_TYPE_P58_EM_FKB 0x1eUL
+#define CFA_RESOURCE_TYPE_P58_EM_FKB 0x1eUL
/* Wildcard Flexible Key Builder */
-#define CFA_RESOURCE_TYPE_P58_WC_FKB 0x1fUL
+#define CFA_RESOURCE_TYPE_P58_WC_FKB 0x1fUL
/* VEB TCAM */
-#define CFA_RESOURCE_TYPE_P58_VEB_TCAM 0x20UL
-#define CFA_RESOURCE_TYPE_P58_LAST CFA_RESOURCE_TYPE_P58_VEB_TCAM
+#define CFA_RESOURCE_TYPE_P58_VEB_TCAM 0x20UL
+#define CFA_RESOURCE_TYPE_P58_LAST CFA_RESOURCE_TYPE_P58_VEB_TCAM
-/* SRAM Multicast Group */
-#define CFA_RESOURCE_TYPE_P45_SRAM_MCG 0x0UL
-/* SRAM Encap 8 byte record */
-#define CFA_RESOURCE_TYPE_P45_SRAM_ENCAP_8B 0x1UL
-/* SRAM Encap 16 byte record */
-#define CFA_RESOURCE_TYPE_P45_SRAM_ENCAP_16B 0x2UL
-/* SRAM Encap 64 byte record */
-#define CFA_RESOURCE_TYPE_P45_SRAM_ENCAP_64B 0x3UL
-/* SRAM Source Property MAC */
-#define CFA_RESOURCE_TYPE_P45_SRAM_SP_MAC 0x4UL
-/* SRAM Source Property MAC and IPv4 */
-#define CFA_RESOURCE_TYPE_P45_SRAM_SP_MAC_IPV4 0x5UL
-/* SRAM Source Property MAC and IPv6 */
-#define CFA_RESOURCE_TYPE_P45_SRAM_SP_MAC_IPV6 0x6UL
-/* SRAM 64B Counters */
-#define CFA_RESOURCE_TYPE_P45_SRAM_COUNTER_64B 0x7UL
-/* SRAM Network Address Translation Source Port */
-#define CFA_RESOURCE_TYPE_P45_SRAM_NAT_SPORT 0x8UL
-/* SRAM Network Address Translation Destination Port */
-#define CFA_RESOURCE_TYPE_P45_SRAM_NAT_DPORT 0x9UL
-/* SRAM Network Address Translation Source IPv4 address */
-#define CFA_RESOURCE_TYPE_P45_SRAM_NAT_S_IPV4 0xaUL
-/* SRAM Network Address Translation Destination IPv4 address */
-#define CFA_RESOURCE_TYPE_P45_SRAM_NAT_D_IPV4 0xbUL
-/* SRAM Network Address Translation Source IPv6 address */
-#define CFA_RESOURCE_TYPE_P45_SRAM_NAT_S_IPV6 0xcUL
-/* SRAM Network Address Translation Destination IPv6 address */
-#define CFA_RESOURCE_TYPE_P45_SRAM_NAT_D_IPV6 0xdUL
+/* Multicast Group */
+#define CFA_RESOURCE_TYPE_P45_MCG 0x0UL
+/* Encap 8 byte record */
+#define CFA_RESOURCE_TYPE_P45_ENCAP_8B 0x1UL
+/* Encap 16 byte record */
+#define CFA_RESOURCE_TYPE_P45_ENCAP_16B 0x2UL
+/* Encap 64 byte record */
+#define CFA_RESOURCE_TYPE_P45_ENCAP_64B 0x3UL
+/* Source Property MAC */
+#define CFA_RESOURCE_TYPE_P45_SP_MAC 0x4UL
+/* Source Property MAC and IPv4 */
+#define CFA_RESOURCE_TYPE_P45_SP_MAC_IPV4 0x5UL
+/* Source Property MAC and IPv6 */
+#define CFA_RESOURCE_TYPE_P45_SP_MAC_IPV6 0x6UL
+/* 64B Counters */
+#define CFA_RESOURCE_TYPE_P45_COUNTER_64B 0x7UL
+/* Network Address Translation Source Port */
+#define CFA_RESOURCE_TYPE_P45_NAT_SPORT 0x8UL
+/* Network Address Translation Destination Port */
+#define CFA_RESOURCE_TYPE_P45_NAT_DPORT 0x9UL
+/* Network Address Translation Source IPv4 address */
+#define CFA_RESOURCE_TYPE_P45_NAT_S_IPV4 0xaUL
+/* Network Address Translation Destination IPv4 address */
+#define CFA_RESOURCE_TYPE_P45_NAT_D_IPV4 0xbUL
+/* Network Address Translation Source IPv6 address */
+#define CFA_RESOURCE_TYPE_P45_NAT_S_IPV6 0xcUL
+/* Network Address Translation Destination IPv6 address */
+#define CFA_RESOURCE_TYPE_P45_NAT_D_IPV6 0xdUL
/* Meter */
-#define CFA_RESOURCE_TYPE_P45_SRAM_METER 0xeUL
+#define CFA_RESOURCE_TYPE_P45_METER 0xeUL
/* Flow State */
-#define CFA_RESOURCE_TYPE_P45_SRAM_FLOW_STATE 0xfUL
+#define CFA_RESOURCE_TYPE_P45_FLOW_STATE 0xfUL
/* Full Action Records */
-#define CFA_RESOURCE_TYPE_P45_SRAM_FULL_ACTION 0x10UL
+#define CFA_RESOURCE_TYPE_P45_FULL_ACTION 0x10UL
/* Action Record Format 0 */
-#define CFA_RESOURCE_TYPE_P45_SRAM_FORMAT_0_ACTION 0x11UL
+#define CFA_RESOURCE_TYPE_P45_FORMAT_0_ACTION 0x11UL
/* Action Record Format 2 */
-#define CFA_RESOURCE_TYPE_P45_SRAM_FORMAT_2_ACTION 0x12UL
+#define CFA_RESOURCE_TYPE_P45_FORMAT_2_ACTION 0x12UL
/* Action Record Format 3 */
-#define CFA_RESOURCE_TYPE_P45_SRAM_FORMAT_3_ACTION 0x13UL
+#define CFA_RESOURCE_TYPE_P45_FORMAT_3_ACTION 0x13UL
/* Action Record Format 4 */
-#define CFA_RESOURCE_TYPE_P45_SRAM_FORMAT_4_ACTION 0x14UL
+#define CFA_RESOURCE_TYPE_P45_FORMAT_4_ACTION 0x14UL
/* L2 Context TCAM */
-#define CFA_RESOURCE_TYPE_P45_L2_CTXT_TCAM 0x15UL
+#define CFA_RESOURCE_TYPE_P45_L2_CTXT_TCAM 0x15UL
/* Profile Func */
-#define CFA_RESOURCE_TYPE_P45_PROF_FUNC 0x16UL
+#define CFA_RESOURCE_TYPE_P45_PROF_FUNC 0x16UL
/* Profile TCAM */
-#define CFA_RESOURCE_TYPE_P45_PROF_TCAM 0x17UL
+#define CFA_RESOURCE_TYPE_P45_PROF_TCAM 0x17UL
/* Exact Match Profile Id */
-#define CFA_RESOURCE_TYPE_P45_EM_PROF_ID 0x18UL
+#define CFA_RESOURCE_TYPE_P45_EM_PROF_ID 0x18UL
/* Exact Match Record */
-#define CFA_RESOURCE_TYPE_P45_EM_REC 0x19UL
+#define CFA_RESOURCE_TYPE_P45_EM_REC 0x19UL
/* Wildcard Profile Id */
-#define CFA_RESOURCE_TYPE_P45_WC_TCAM_PROF_ID 0x1aUL
+#define CFA_RESOURCE_TYPE_P45_WC_TCAM_PROF_ID 0x1aUL
/* Wildcard TCAM */
-#define CFA_RESOURCE_TYPE_P45_WC_TCAM 0x1bUL
+#define CFA_RESOURCE_TYPE_P45_WC_TCAM 0x1bUL
/* Meter profile */
-#define CFA_RESOURCE_TYPE_P45_METER_PROF 0x1cUL
-/* Meter */
-#define CFA_RESOURCE_TYPE_P45_METER 0x1dUL
+#define CFA_RESOURCE_TYPE_P45_METER_PROF 0x1cUL
/* Meter */
-#define CFA_RESOURCE_TYPE_P45_MIRROR 0x1eUL
+#define CFA_RESOURCE_TYPE_P45_MIRROR 0x1dUL
/* Source Property TCAM */
-#define CFA_RESOURCE_TYPE_P45_SP_TCAM 0x1fUL
+#define CFA_RESOURCE_TYPE_P45_SP_TCAM 0x1eUL
/* VEB TCAM */
-#define CFA_RESOURCE_TYPE_P45_VEB_TCAM 0x20UL
-#define CFA_RESOURCE_TYPE_P45_LAST CFA_RESOURCE_TYPE_P45_VEB_TCAM
+#define CFA_RESOURCE_TYPE_P45_VEB_TCAM 0x1fUL
+#define CFA_RESOURCE_TYPE_P45_LAST CFA_RESOURCE_TYPE_P45_VEB_TCAM
-/* SRAM Multicast Group */
-#define CFA_RESOURCE_TYPE_P4_SRAM_MCG 0x0UL
-/* SRAM Encap 8 byte record */
-#define CFA_RESOURCE_TYPE_P4_SRAM_ENCAP_8B 0x1UL
-/* SRAM Encap 16 byte record */
-#define CFA_RESOURCE_TYPE_P4_SRAM_ENCAP_16B 0x2UL
-/* SRAM Encap 64 byte record */
-#define CFA_RESOURCE_TYPE_P4_SRAM_ENCAP_64B 0x3UL
-/* SRAM Source Property MAC */
-#define CFA_RESOURCE_TYPE_P4_SRAM_SP_MAC 0x4UL
-/* SRAM Source Property MAC and IPv4 */
-#define CFA_RESOURCE_TYPE_P4_SRAM_SP_MAC_IPV4 0x5UL
-/* SRAM Source Property MAC and IPv6 */
-#define CFA_RESOURCE_TYPE_P4_SRAM_SP_MAC_IPV6 0x6UL
-/* SRAM 64B Counters */
-#define CFA_RESOURCE_TYPE_P4_SRAM_COUNTER_64B 0x7UL
-/* SRAM Network Address Translation Source Port */
-#define CFA_RESOURCE_TYPE_P4_SRAM_NAT_SPORT 0x8UL
-/* SRAM Network Address Translation Destination Port */
-#define CFA_RESOURCE_TYPE_P4_SRAM_NAT_DPORT 0x9UL
-/* SRAM Network Address Translation Source IPv4 address */
-#define CFA_RESOURCE_TYPE_P4_SRAM_NAT_S_IPV4 0xaUL
-/* SRAM Network Address Translation Destination IPv4 address */
-#define CFA_RESOURCE_TYPE_P4_SRAM_NAT_D_IPV4 0xbUL
-/* SRAM Network Address Translation Source IPv6 address */
-#define CFA_RESOURCE_TYPE_P4_SRAM_NAT_S_IPV6 0xcUL
-/* SRAM Network Address Translation Destination IPv6 address */
-#define CFA_RESOURCE_TYPE_P4_SRAM_NAT_D_IPV6 0xdUL
+/* Multicast Group */
+#define CFA_RESOURCE_TYPE_P4_MCG 0x0UL
+/* Encap 8 byte record */
+#define CFA_RESOURCE_TYPE_P4_ENCAP_8B 0x1UL
+/* Encap 16 byte record */
+#define CFA_RESOURCE_TYPE_P4_ENCAP_16B 0x2UL
+/* Encap 64 byte record */
+#define CFA_RESOURCE_TYPE_P4_ENCAP_64B 0x3UL
+/* Source Property MAC */
+#define CFA_RESOURCE_TYPE_P4_SP_MAC 0x4UL
+/* Source Property MAC and IPv4 */
+#define CFA_RESOURCE_TYPE_P4_SP_MAC_IPV4 0x5UL
+/* Source Property MAC and IPv6 */
+#define CFA_RESOURCE_TYPE_P4_SP_MAC_IPV6 0x6UL
+/* 64B Counters */
+#define CFA_RESOURCE_TYPE_P4_COUNTER_64B 0x7UL
+/* Network Address Translation Source Port */
+#define CFA_RESOURCE_TYPE_P4_NAT_SPORT 0x8UL
+/* Network Address Translation Destination Port */
+#define CFA_RESOURCE_TYPE_P4_NAT_DPORT 0x9UL
+/* Network Address Translation Source IPv4 address */
+#define CFA_RESOURCE_TYPE_P4_NAT_S_IPV4 0xaUL
+/* Network Address Translation Destination IPv4 address */
+#define CFA_RESOURCE_TYPE_P4_NAT_D_IPV4 0xbUL
+/* Network Address Translation Source IPv6 address */
+#define CFA_RESOURCE_TYPE_P4_NAT_S_IPV6 0xcUL
+/* Network Address Translation Destination IPv6 address */
+#define CFA_RESOURCE_TYPE_P4_NAT_D_IPV6 0xdUL
/* Meter */
-#define CFA_RESOURCE_TYPE_P4_SRAM_METER 0xeUL
+#define CFA_RESOURCE_TYPE_P4_METER 0xeUL
/* Flow State */
-#define CFA_RESOURCE_TYPE_P4_SRAM_FLOW_STATE 0xfUL
+#define CFA_RESOURCE_TYPE_P4_FLOW_STATE 0xfUL
/* Full Action Records */
-#define CFA_RESOURCE_TYPE_P4_SRAM_FULL_ACTION 0x10UL
+#define CFA_RESOURCE_TYPE_P4_FULL_ACTION 0x10UL
/* Action Record Format 0 */
-#define CFA_RESOURCE_TYPE_P4_SRAM_FORMAT_0_ACTION 0x11UL
+#define CFA_RESOURCE_TYPE_P4_FORMAT_0_ACTION 0x11UL
/* Action Record Format 2 */
-#define CFA_RESOURCE_TYPE_P4_SRAM_FORMAT_2_ACTION 0x12UL
+#define CFA_RESOURCE_TYPE_P4_FORMAT_2_ACTION 0x12UL
/* Action Record Format 3 */
-#define CFA_RESOURCE_TYPE_P4_SRAM_FORMAT_3_ACTION 0x13UL
+#define CFA_RESOURCE_TYPE_P4_FORMAT_3_ACTION 0x13UL
/* Action Record Format 4 */
-#define CFA_RESOURCE_TYPE_P4_SRAM_FORMAT_4_ACTION 0x14UL
+#define CFA_RESOURCE_TYPE_P4_FORMAT_4_ACTION 0x14UL
/* L2 Context TCAM */
-#define CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM 0x15UL
+#define CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM 0x15UL
/* Profile Func */
-#define CFA_RESOURCE_TYPE_P4_PROF_FUNC 0x16UL
+#define CFA_RESOURCE_TYPE_P4_PROF_FUNC 0x16UL
/* Profile TCAM */
-#define CFA_RESOURCE_TYPE_P4_PROF_TCAM 0x17UL
+#define CFA_RESOURCE_TYPE_P4_PROF_TCAM 0x17UL
/* Exact Match Profile Id */
-#define CFA_RESOURCE_TYPE_P4_EM_PROF_ID 0x18UL
+#define CFA_RESOURCE_TYPE_P4_EM_PROF_ID 0x18UL
/* Exact Match Record */
-#define CFA_RESOURCE_TYPE_P4_EM_REC 0x19UL
+#define CFA_RESOURCE_TYPE_P4_EM_REC 0x19UL
/* Wildcard Profile Id */
-#define CFA_RESOURCE_TYPE_P4_WC_TCAM_PROF_ID 0x1aUL
+#define CFA_RESOURCE_TYPE_P4_WC_TCAM_PROF_ID 0x1aUL
/* Wildcard TCAM */
-#define CFA_RESOURCE_TYPE_P4_WC_TCAM 0x1bUL
+#define CFA_RESOURCE_TYPE_P4_WC_TCAM 0x1bUL
/* Meter profile */
-#define CFA_RESOURCE_TYPE_P4_METER_PROF 0x1cUL
-/* Meter */
-#define CFA_RESOURCE_TYPE_P4_METER 0x1dUL
+#define CFA_RESOURCE_TYPE_P4_METER_PROF 0x1cUL
/* Meter */
-#define CFA_RESOURCE_TYPE_P4_MIRROR 0x1eUL
+#define CFA_RESOURCE_TYPE_P4_MIRROR 0x1dUL
/* Source Property TCAM */
-#define CFA_RESOURCE_TYPE_P4_SP_TCAM 0x1fUL
-#define CFA_RESOURCE_TYPE_P4_LAST CFA_RESOURCE_TYPE_P4_SP_TCAM
+#define CFA_RESOURCE_TYPE_P4_SP_TCAM 0x1eUL
+#define CFA_RESOURCE_TYPE_P4_LAST CFA_RESOURCE_TYPE_P4_SP_TCAM
#endif /* _CFA_RESOURCE_TYPES_H_ */
} \
} while (0)
+
+#define TF_CHECK_PARMS1(parms) do { \
+ if ((parms) == NULL) { \
+ TFP_DRV_LOG(ERR, "Invalid Argument(s)\n"); \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define TF_CHECK_PARMS2(parms1, parms2) do { \
+ if ((parms1) == NULL || (parms2) == NULL) { \
+ TFP_DRV_LOG(ERR, "Invalid Argument(s)\n"); \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define TF_CHECK_PARMS3(parms1, parms2, parms3) do { \
+ if ((parms1) == NULL || \
+ (parms2) == NULL || \
+ (parms3) == NULL) { \
+ TFP_DRV_LOG(ERR, "Invalid Argument(s)\n"); \
+ return -EINVAL; \
+ } \
+ } while (0)
+
#endif /* _TF_COMMON_H_ */
/* Create empty stack
*/
- rc = stack_init(num_entries, parms.mem_va, pool);
+ rc = stack_init(num_entries, (uint32_t *)parms.mem_va, pool);
if (rc != 0) {
TFP_DRV_LOG(ERR, "EM pool stack init failure %s\n",
TF_SESSION_NAME_MAX);
/* Initialize Session */
- session->device_type = parms->device_type;
session->dev = NULL;
tf_rm_init(tfp);
/* Initialize EM pool */
for (dir = 0; dir < TF_DIR_MAX; dir++) {
- rc = tf_create_em_pool(session, dir, TF_SESSION_EM_POOL_SIZE);
+ rc = tf_create_em_pool(session,
+ (enum tf_dir)dir,
+ TF_SESSION_EM_POOL_SIZE);
if (rc) {
TFP_DRV_LOG(ERR,
"EM Pool initialization failed\n");
return -EINVAL;
}
+int
+tf_open_session_new(struct tf *tfp,
+ struct tf_open_session_parms *parms)
+{
+ int rc;
+ unsigned int domain, bus, slot, device;
+ struct tf_session_open_session_parms oparms;
+
+ TF_CHECK_PARMS(tfp, parms);
+
+ /* Filter out any non-supported device types on the Core
+ * side. It is assumed that the Firmware will be supported if
+ * firmware open session succeeds.
+ */
+ if (parms->device_type != TF_DEVICE_TYPE_WH) {
+ TFP_DRV_LOG(ERR,
+ "Unsupported device type %d\n",
+ parms->device_type);
+ return -ENOTSUP;
+ }
+
+ /* Verify control channel and build the beginning of session_id */
+ rc = sscanf(parms->ctrl_chan_name,
+ "%x:%x:%x.%d",
+ &domain,
+ &bus,
+ &slot,
+ &device);
+ if (rc != 4) {
+ TFP_DRV_LOG(ERR,
+ "Failed to scan device ctrl_chan_name\n");
+ return -EINVAL;
+ }
+
+ parms->session_id.internal.domain = domain;
+ parms->session_id.internal.bus = bus;
+ parms->session_id.internal.device = device;
+ oparms.open_cfg = parms;
+
+ rc = tf_session_open_session(tfp, &oparms);
+ /* Logging handled by tf_session_open_session */
+ if (rc)
+ return rc;
+
+ TFP_DRV_LOG(INFO,
+ "Session created, session_id:%d\n",
+ parms->session_id.id);
+
+ TFP_DRV_LOG(INFO,
+ "domain:%d, bus:%d, device:%d, fw_session_id:%d\n",
+ parms->session_id.internal.domain,
+ parms->session_id.internal.bus,
+ parms->session_id.internal.device,
+ parms->session_id.internal.fw_session_id);
+
+ return 0;
+}
+
int
tf_attach_session(struct tf *tfp __rte_unused,
struct tf_attach_session_parms *parms __rte_unused)
return -1;
}
+int
+tf_attach_session_new(struct tf *tfp,
+ struct tf_attach_session_parms *parms)
+{
+ int rc;
+ unsigned int domain, bus, slot, device;
+ struct tf_session_attach_session_parms aparms;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ /* Verify control channel */
+ rc = sscanf(parms->ctrl_chan_name,
+ "%x:%x:%x.%d",
+ &domain,
+ &bus,
+ &slot,
+ &device);
+ if (rc != 4) {
+ TFP_DRV_LOG(ERR,
+ "Failed to scan device ctrl_chan_name\n");
+ return -EINVAL;
+ }
+
+ /* Verify 'attach' channel */
+ rc = sscanf(parms->attach_chan_name,
+ "%x:%x:%x.%d",
+ &domain,
+ &bus,
+ &slot,
+ &device);
+ if (rc != 4) {
+ TFP_DRV_LOG(ERR,
+ "Failed to scan device attach_chan_name\n");
+ return -EINVAL;
+ }
+
+ /* Prepare return value of session_id, using ctrl_chan_name
+ * device values as it becomes the session id.
+ */
+ parms->session_id.internal.domain = domain;
+ parms->session_id.internal.bus = bus;
+ parms->session_id.internal.device = device;
+ aparms.attach_cfg = parms;
+ rc = tf_session_attach_session(tfp,
+ &aparms);
+ /* Logging handled by dev_bind */
+ if (rc)
+ return rc;
+
+ TFP_DRV_LOG(INFO,
+ "Attached to session, session_id:%d\n",
+ parms->session_id.id);
+
+ TFP_DRV_LOG(INFO,
+ "domain:%d, bus:%d, device:%d, fw_session_id:%d\n",
+ parms->session_id.internal.domain,
+ parms->session_id.internal.bus,
+ parms->session_id.internal.device,
+ parms->session_id.internal.fw_session_id);
+
+ return rc;
+}
+
int
tf_close_session(struct tf *tfp)
{
if (tfs->ref_count == 0) {
/* Free EM pool */
for (dir = 0; dir < TF_DIR_MAX; dir++)
- tf_free_em_pool(tfs, dir);
+ tf_free_em_pool(tfs, (enum tf_dir)dir);
tfp_free(tfp->session->core_data);
tfp_free(tfp->session);
return rc_close;
}
+int
+tf_close_session_new(struct tf *tfp)
+{
+ int rc;
+ struct tf_session_close_session_parms cparms = { 0 };
+ union tf_session_id session_id = { 0 };
+ uint8_t ref_count;
+
+ TF_CHECK_PARMS1(tfp);
+
+ cparms.ref_count = &ref_count;
+ cparms.session_id = &session_id;
+ rc = tf_session_close_session(tfp,
+ &cparms);
+ /* Logging handled by tf_session_close_session */
+ if (rc)
+ return rc;
+
+ TFP_DRV_LOG(INFO,
+ "Closed session, session_id:%d, ref_count:%d\n",
+ cparms.session_id->id,
+ *cparms.ref_count);
+
+ TFP_DRV_LOG(INFO,
+ "domain:%d, bus:%d, device:%d, fw_session_id:%d\n",
+ cparms.session_id->internal.domain,
+ cparms.session_id->internal.bus,
+ cparms.session_id->internal.device,
+ cparms.session_id->internal.fw_session_id);
+
+ return rc;
+}
+
/** insert EM hash entry API
*
* returns:
return 0;
}
-/** free identifier resource
- *
- * Returns success or failure code.
- */
+int
+tf_alloc_identifier_new(struct tf *tfp,
+ struct tf_alloc_identifier_parms *parms)
+{
+ int rc;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ struct tf_ident_alloc_parms aparms;
+ uint16_t id;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ /* Can't do static initialization due to UT enum check */
+ memset(&aparms, 0, sizeof(struct tf_ident_alloc_parms));
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup session, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup device, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ if (dev->ops->tf_dev_alloc_ident == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return -EOPNOTSUPP;
+ }
+
+ aparms.dir = parms->dir;
+ aparms.ident_type = parms->ident_type;
+ aparms.id = &id;
+ rc = dev->ops->tf_dev_alloc_ident(tfp, &aparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Identifier allocation failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ parms->id = id;
+
+ return 0;
+}
+
int tf_free_identifier(struct tf *tfp,
struct tf_free_identifier_parms *parms)
{
return 0;
}
+int
+tf_free_identifier_new(struct tf *tfp,
+ struct tf_free_identifier_parms *parms)
+{
+ int rc;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ struct tf_ident_free_parms fparms;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ /* Can't do static initialization due to UT enum check */
+ memset(&fparms, 0, sizeof(struct tf_ident_free_parms));
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup session, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup device, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ if (dev->ops->tf_dev_free_ident == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return -EOPNOTSUPP;
+ }
+
+ fparms.dir = parms->dir;
+ fparms.ident_type = parms->ident_type;
+ fparms.id = parms->id;
+ rc = dev->ops->tf_dev_free_ident(tfp, &fparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Identifier allocation failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ return 0;
+}
+
int
tf_alloc_tcam_entry(struct tf *tfp,
struct tf_alloc_tcam_entry_parms *parms)
* The number of identifier resources requested for the session.
* The index used is tf_identifier_type.
*/
- uint16_t identifer_cnt[TF_DIR_MAX][TF_IDENT_TYPE_MAX];
+ uint16_t identifier_cnt[TF_IDENT_TYPE_MAX][TF_DIR_MAX];
/** [in] Requested Index Table resource counts
*
* The number of index table resources requested for the session.
int tf_open_session(struct tf *tfp,
struct tf_open_session_parms *parms);
+int tf_open_session_new(struct tf *tfp,
+ struct tf_open_session_parms *parms);
+
struct tf_attach_session_parms {
/** [in] ctrl_chan_name
*
*/
int tf_attach_session(struct tf *tfp,
struct tf_attach_session_parms *parms);
+int tf_attach_session_new(struct tf *tfp,
+ struct tf_attach_session_parms *parms);
/**
* Closes an existing session. Cleans up all hardware and firmware
* Returns success or failure code.
*/
int tf_close_session(struct tf *tfp);
+int tf_close_session_new(struct tf *tfp);
/**
* @page ident Identity Management
*/
int tf_alloc_identifier(struct tf *tfp,
struct tf_alloc_identifier_parms *parms);
+int tf_alloc_identifier_new(struct tf *tfp,
+ struct tf_alloc_identifier_parms *parms);
/** free identifier resource
*
*/
int tf_free_identifier(struct tf *tfp,
struct tf_free_identifier_parms *parms);
+int tf_free_identifier_new(struct tf *tfp,
+ struct tf_free_identifier_parms *parms);
/**
* @page dram_table DRAM Table Scope Interface
#include "tf_device.h"
#include "tf_device_p4.h"
#include "tfp.h"
-#include "bnxt.h"
struct tf;
+/* Forward declarations */
+static int dev_unbind_p4(struct tf *tfp);
+
/**
- * Device specific bind function
+ * Device specific bind function, WH+
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] shadow_copy
+ * Flag controlling shadow copy DB creation
+ *
+ * [in] resources
+ * Pointer to resource allocation information
+ *
+ * [out] dev_handle
+ * Device handle
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on parameter or internal failure.
*/
static int
-dev_bind_p4(struct tf *tfp __rte_unused,
- struct tf_session_resources *resources __rte_unused,
- struct tf_dev_info *dev_info)
+dev_bind_p4(struct tf *tfp,
+ bool shadow_copy,
+ struct tf_session_resources *resources,
+ struct tf_dev_info *dev_handle)
{
+ int rc;
+ int frc;
+ struct tf_ident_cfg_parms ident_cfg;
+ struct tf_tbl_cfg_parms tbl_cfg;
+ struct tf_tcam_cfg_parms tcam_cfg;
+
/* Initialize the modules */
- dev_info->ops = &tf_dev_ops_p4;
+ ident_cfg.num_elements = TF_IDENT_TYPE_MAX;
+ ident_cfg.cfg = tf_ident_p4;
+ ident_cfg.shadow_copy = shadow_copy;
+ ident_cfg.resources = resources;
+ rc = tf_ident_bind(tfp, &ident_cfg);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Identifier initialization failure\n");
+ goto fail;
+ }
+
+ tbl_cfg.num_elements = TF_TBL_TYPE_MAX;
+ tbl_cfg.cfg = tf_tbl_p4;
+ tbl_cfg.shadow_copy = shadow_copy;
+ tbl_cfg.resources = resources;
+ rc = tf_tbl_bind(tfp, &tbl_cfg);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Table initialization failure\n");
+ goto fail;
+ }
+
+ tcam_cfg.num_elements = TF_TCAM_TBL_TYPE_MAX;
+ tcam_cfg.cfg = tf_tcam_p4;
+ tcam_cfg.shadow_copy = shadow_copy;
+ tcam_cfg.resources = resources;
+ rc = tf_tcam_bind(tfp, &tcam_cfg);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "TCAM initialization failure\n");
+ goto fail;
+ }
+
+ dev_handle->type = TF_DEVICE_TYPE_WH;
+ dev_handle->ops = &tf_dev_ops_p4;
+
return 0;
+
+ fail:
+ /* Cleanup of already created modules */
+ frc = dev_unbind_p4(tfp);
+ if (frc)
+ return frc;
+
+ return rc;
+}
+
+/**
+ * Device specific unbind function, WH+
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+static int
+dev_unbind_p4(struct tf *tfp)
+{
+ int rc = 0;
+ bool fail = false;
+
+ /* Unbind all the support modules. As this is only done on
+ * close we only report errors as everything has to be cleaned
+ * up regardless.
+ */
+ rc = tf_ident_unbind(tfp);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Device unbind failed, Identifier\n");
+ fail = true;
+ }
+
+ rc = tf_tbl_unbind(tfp);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Device unbind failed, Table Type\n");
+ fail = true;
+ }
+
+ rc = tf_tcam_unbind(tfp);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Device unbind failed, TCAM\n");
+ fail = true;
+ }
+
+ if (fail)
+ return -1;
+
+ return rc;
}
int
dev_bind(struct tf *tfp __rte_unused,
enum tf_device_type type,
+ bool shadow_copy,
struct tf_session_resources *resources,
- struct tf_dev_info *dev_info)
+ struct tf_dev_info *dev_handle)
{
switch (type) {
case TF_DEVICE_TYPE_WH:
return dev_bind_p4(tfp,
+ shadow_copy,
resources,
- dev_info);
+ dev_handle);
default:
TFP_DRV_LOG(ERR,
- "Device type not supported\n");
- return -ENOTSUP;
+ "No such device\n");
+ return -ENODEV;
}
}
int
-dev_unbind(struct tf *tfp __rte_unused,
- struct tf_dev_info *dev_handle __rte_unused)
+dev_unbind(struct tf *tfp,
+ struct tf_dev_info *dev_handle)
{
- return 0;
+ switch (dev_handle->type) {
+ case TF_DEVICE_TYPE_WH:
+ return dev_unbind_p4(tfp);
+ default:
+ TFP_DRV_LOG(ERR,
+ "No such device\n");
+ return -ENODEV;
+ }
}
* TF device information
*/
struct tf_dev_info {
+ enum tf_device_type type;
const struct tf_dev_ops *ops;
};
*
* Returns
* - (0) if successful.
- * - (-EINVAL) on failure.
+ * - (-EINVAL) parameter failure.
+ * - (-ENODEV) no such device supported.
*/
int dev_bind(struct tf *tfp,
enum tf_device_type type,
+ bool shadow_copy,
struct tf_session_resources *resources,
struct tf_dev_info *dev_handle);
*
* [in] dev_handle
* Device handle
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) parameter failure.
+ * - (-ENODEV) no such device supported.
*/
int dev_unbind(struct tf *tfp,
struct tf_dev_info *dev_handle);
* different device variants.
*/
struct tf_dev_ops {
+ /**
+ * Retrieves the MAX number of resource types that the device
+ * supports.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [out] max_types
+ * Pointer to MAX number of types the device supports
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+ int (*tf_dev_get_max_types)(struct tf *tfp,
+ uint16_t *max_types);
+
+ /**
+ * Retrieves the WC TCAM slice information that the device
+ * supports.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [out] slice_size
+ * Pointer to slice size the device supports
+ *
+ * [out] num_slices_per_row
+ * Pointer to number of slices per row the device supports
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+ int (*tf_dev_get_wc_tcam_slices)(struct tf *tfp,
+ uint16_t *slice_size,
+ uint16_t *num_slices_per_row);
+
/**
* Allocation of an identifier element.
*
* Pointer to TF handle
*
* [in] parms
- * Pointer to table type allocation parameters
+ * Pointer to table allocation parameters
*
* Returns
* - (0) if successful.
* - (-EINVAL) on failure.
*/
- int (*tf_dev_alloc_tbl_type)(struct tf *tfp,
- struct tf_tbl_type_alloc_parms *parms);
+ int (*tf_dev_alloc_tbl)(struct tf *tfp,
+ struct tf_tbl_alloc_parms *parms);
/**
* Free of a table type element.
* Pointer to TF handle
*
* [in] parms
- * Pointer to table type free parameters
+ * Pointer to table free parameters
*
* Returns
* - (0) if successful.
* - (-EINVAL) on failure.
*/
- int (*tf_dev_free_tbl_type)(struct tf *tfp,
- struct tf_tbl_type_free_parms *parms);
+ int (*tf_dev_free_tbl)(struct tf *tfp,
+ struct tf_tbl_free_parms *parms);
/**
* Searches for the specified table type element in a shadow DB.
* Pointer to TF handle
*
* [in] parms
- * Pointer to table type allocation and search parameters
+ * Pointer to table allocation and search parameters
*
* Returns
* - (0) if successful.
* - (-EINVAL) on failure.
*/
- int (*tf_dev_alloc_search_tbl_type)
- (struct tf *tfp,
- struct tf_tbl_type_alloc_search_parms *parms);
+ int (*tf_dev_alloc_search_tbl)(struct tf *tfp,
+ struct tf_tbl_alloc_search_parms *parms);
/**
* Sets the specified table type element.
* Pointer to TF handle
*
* [in] parms
- * Pointer to table type set parameters
+ * Pointer to table set parameters
*
* Returns
* - (0) if successful.
* - (-EINVAL) on failure.
*/
- int (*tf_dev_set_tbl_type)(struct tf *tfp,
- struct tf_tbl_type_set_parms *parms);
+ int (*tf_dev_set_tbl)(struct tf *tfp,
+ struct tf_tbl_set_parms *parms);
/**
* Retrieves the specified table type element.
* Pointer to TF handle
*
* [in] parms
- * Pointer to table type get parameters
+ * Pointer to table get parameters
*
* Returns
* - (0) if successful.
* - (-EINVAL) on failure.
*/
- int (*tf_dev_get_tbl_type)(struct tf *tfp,
- struct tf_tbl_type_get_parms *parms);
+ int (*tf_dev_get_tbl)(struct tf *tfp,
+ struct tf_tbl_get_parms *parms);
/**
* Allocation of a tcam element.
* All rights reserved.
*/
+#include <rte_common.h>
+#include <cfa_resource_types.h>
+
#include "tf_device.h"
#include "tf_identifier.h"
#include "tf_tbl_type.h"
#include "tf_tcam.h"
+/**
+ * Device specific function that retrieves the MAX number of HCAPI
+ * types the device supports.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [out] max_types
+ * Pointer to the MAX number of HCAPI types supported
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+static int
+tf_dev_p4_get_max_types(struct tf *tfp __rte_unused,
+ uint16_t *max_types)
+{
+ if (max_types == NULL)
+ return -EINVAL;
+
+ *max_types = CFA_RESOURCE_TYPE_P4_LAST + 1;
+
+ return 0;
+}
+
+/**
+ * Device specific function that retrieves the WC TCAM slices the
+ * device supports.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [out] slice_size
+ * Pointer to the WC TCAM slice size
+ *
+ * [out] num_slices_per_row
+ * Pointer to the WC TCAM row slice configuration
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+static int
+tf_dev_p4_get_wc_tcam_slices(struct tf *tfp __rte_unused,
+ uint16_t *slice_size,
+ uint16_t *num_slices_per_row)
+{
+#define CFA_P4_WC_TCAM_SLICE_SIZE 12
+#define CFA_P4_WC_TCAM_SLICES_PER_ROW 2
+
+ if (slice_size == NULL || num_slices_per_row == NULL)
+ return -EINVAL;
+
+ *slice_size = CFA_P4_WC_TCAM_SLICE_SIZE;
+ *num_slices_per_row = CFA_P4_WC_TCAM_SLICES_PER_ROW;
+
+ return 0;
+}
+
+/**
+ * Truflow P4 device specific functions
+ */
const struct tf_dev_ops tf_dev_ops_p4 = {
+ .tf_dev_get_max_types = tf_dev_p4_get_max_types,
+ .tf_dev_get_wc_tcam_slices = tf_dev_p4_get_wc_tcam_slices,
.tf_dev_alloc_ident = tf_ident_alloc,
.tf_dev_free_ident = tf_ident_free,
- .tf_dev_alloc_tbl_type = tf_tbl_type_alloc,
- .tf_dev_free_tbl_type = tf_tbl_type_free,
- .tf_dev_alloc_search_tbl_type = tf_tbl_type_alloc_search,
- .tf_dev_set_tbl_type = tf_tbl_type_set,
- .tf_dev_get_tbl_type = tf_tbl_type_get,
+ .tf_dev_alloc_tbl = tf_tbl_alloc,
+ .tf_dev_free_tbl = tf_tbl_free,
+ .tf_dev_alloc_search_tbl = tf_tbl_alloc_search,
+ .tf_dev_set_tbl = tf_tbl_set,
+ .tf_dev_get_tbl = tf_tbl_get,
.tf_dev_alloc_tcam = tf_tcam_alloc,
.tf_dev_free_tcam = tf_tcam_free,
.tf_dev_alloc_search_tcam = tf_tcam_alloc_search,
#include "tf_rm_new.h"
struct tf_rm_element_cfg tf_ident_p4[TF_IDENT_TYPE_MAX] = {
- { TF_RM_ELEM_CFG_PRIVATE, 0 /* CFA_RESOURCE_TYPE_P4_INVALID */ },
+ { TF_RM_ELEM_CFG_PRIVATE, CFA_RESOURCE_TYPE_INVALID },
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_PROF_FUNC },
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_WC_TCAM_PROF_ID },
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_EM_PROF_ID },
- { TF_RM_ELEM_CFG_NULL, 0 /* CFA_RESOURCE_TYPE_P4_L2_FUNC */ }
+ /* CFA_RESOURCE_TYPE_P4_L2_FUNC */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID }
};
struct tf_rm_element_cfg tf_tcam_p4[TF_TCAM_TBL_TYPE_MAX] = {
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_PROF_TCAM },
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_WC_TCAM },
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SP_TCAM },
- { TF_RM_ELEM_CFG_NULL, 0 /* CFA_RESOURCE_TYPE_P4_CT_RULE_TCAM */ },
- { TF_RM_ELEM_CFG_NULL, 0 /* CFA_RESOURCE_TYPE_P4_VEB_TCAM */ }
+ /* CFA_RESOURCE_TYPE_P4_CT_RULE_TCAM */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
+ /* CFA_RESOURCE_TYPE_P4_VEB_TCAM */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID }
};
struct tf_rm_element_cfg tf_tbl_p4[TF_TBL_TYPE_MAX] = {
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SRAM_FULL_ACTION },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SRAM_MCG },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SRAM_ENCAP_8B },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SRAM_ENCAP_16B },
- { TF_RM_ELEM_CFG_NULL, 0, /* CFA_RESOURCE_TYPE_P4_SRAM_ENCAP_32B */ },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SRAM_ENCAP_64B },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SRAM_SP_MAC },
- { TF_RM_ELEM_CFG_NULL, 0 /* CFA_RESOURCE_TYPE_P4_SRAM_SP_SMAC_IPV4 */ },
- { TF_RM_ELEM_CFG_NULL, 0 /* CFA_RESOURCE_TYPE_P4_SRAM_SP_SMAC_IPV6 */ },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SRAM_COUNTER_64B },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SRAM_NAT_SPORT },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SRAM_NAT_DPORT },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SRAM_NAT_S_IPV4 },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SRAM_NAT_D_IPV4 },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SRAM_NAT_S_IPV6 },
- { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SRAM_NAT_D_IPV6 },
+ { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_FULL_ACTION },
+ { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_MCG },
+ { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_ENCAP_8B },
+ { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_ENCAP_16B },
+ /* CFA_RESOURCE_TYPE_P4_SRAM_ENCAP_32B */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
+ { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_ENCAP_64B },
+ { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_SP_MAC },
+ /* CFA_RESOURCE_TYPE_P4_SRAM_SP_SMAC_IPV4 */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
+ /* CFA_RESOURCE_TYPE_P4_SRAM_SP_SMAC_IPV6 */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
+ { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_COUNTER_64B },
+ { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_SPORT },
+ { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_DPORT },
+ { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_S_IPV4 },
+ { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_D_IPV4 },
+ { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_S_IPV6 },
+ { TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_NAT_D_IPV6 },
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_METER_PROF },
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_METER },
{ TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_MIRROR },
- { TF_RM_ELEM_CFG_NULL, /* CFA_RESOURCE_TYPE_P4_UPAR */ },
- { TF_RM_ELEM_CFG_NULL, /* CFA_RESOURCE_TYPE_P4_EPOC */ },
- { TF_RM_ELEM_CFG_NULL, /* CFA_RESOURCE_TYPE_P4_METADATA */ },
- { TF_RM_ELEM_CFG_NULL, /* CFA_RESOURCE_TYPE_P4_CT_STATE */ },
- { TF_RM_ELEM_CFG_NULL, /* CFA_RESOURCE_TYPE_P4_RANGE_PROF */ },
- { TF_RM_ELEM_CFG_NULL, /* CFA_RESOURCE_TYPE_P4_RANGE_ENTRY */ },
- { TF_RM_ELEM_CFG_NULL, /* CFA_RESOURCE_TYPE_P4_LAG */ },
- { TF_RM_ELEM_CFG_NULL, /* CFA_RESOURCE_TYPE_P4_VNIC_SVIF */ },
- { TF_RM_ELEM_CFG_NULL, /* CFA_RESOURCE_TYPE_P4_EM_FBK */ },
- { TF_RM_ELEM_CFG_NULL, /* CFA_RESOURCE_TYPE_P4_WC_FKB */ },
- { TF_RM_ELEM_CFG_NULL, /* CFA_RESOURCE_TYPE_P4_EXT */ }
+ /* CFA_RESOURCE_TYPE_P4_UPAR */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
+ /* CFA_RESOURCE_TYPE_P4_EPOC */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
+ /* CFA_RESOURCE_TYPE_P4_METADATA */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
+ /* CFA_RESOURCE_TYPE_P4_CT_STATE */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
+ /* CFA_RESOURCE_TYPE_P4_RANGE_PROF */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
+ /* CFA_RESOURCE_TYPE_P4_RANGE_ENTRY */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
+ /* CFA_RESOURCE_TYPE_P4_LAG */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
+ /* CFA_RESOURCE_TYPE_P4_VNIC_SVIF */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
+ /* CFA_RESOURCE_TYPE_P4_EM_FBK */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
+ /* CFA_RESOURCE_TYPE_P4_WC_FKB */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID },
+ /* CFA_RESOURCE_TYPE_P4_EXT */
+ { TF_RM_ELEM_CFG_NULL, CFA_RESOURCE_TYPE_INVALID }
};
#endif /* _TF_DEVICE_P4_H_ */
#include <rte_common.h>
#include "tf_identifier.h"
+#include "tf_common.h"
+#include "tf_rm_new.h"
+#include "tf_util.h"
+#include "tfp.h"
struct tf;
/**
* Identifier DBs.
*/
-/* static void *ident_db[TF_DIR_MAX]; */
+static void *ident_db[TF_DIR_MAX];
/**
* Init flag, set on bind and cleared on unbind
*/
-/* static uint8_t init; */
+static uint8_t init;
int
-tf_ident_bind(struct tf *tfp __rte_unused,
- struct tf_ident_cfg *parms __rte_unused)
+tf_ident_bind(struct tf *tfp,
+ struct tf_ident_cfg_parms *parms)
{
+ int rc;
+ int i;
+ struct tf_rm_create_db_parms db_cfg = { 0 };
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ if (init) {
+ TFP_DRV_LOG(ERR,
+ "Identifier already initialized\n");
+ return -EINVAL;
+ }
+
+ db_cfg.num_elements = parms->num_elements;
+
+ for (i = 0; i < TF_DIR_MAX; i++) {
+ db_cfg.dir = i;
+ db_cfg.num_elements = parms->num_elements;
+ db_cfg.cfg = parms->cfg;
+ db_cfg.alloc_num = parms->resources->identifier_cnt[i];
+ db_cfg.rm_db = ident_db[i];
+ rc = tf_rm_create_db(tfp, &db_cfg);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Identifier DB creation failed\n",
+ tf_dir_2_str(i));
+ return rc;
+ }
+ }
+
+ init = 1;
+
return 0;
}
int
tf_ident_unbind(struct tf *tfp __rte_unused)
{
+ int rc;
+ int i;
+ struct tf_rm_free_db_parms fparms = { 0 };
+
+ TF_CHECK_PARMS1(tfp);
+
+ /* Bail if nothing has been initialized done silent as to
+ * allow for creation cleanup.
+ */
+ if (!init)
+ return -EINVAL;
+
+ for (i = 0; i < TF_DIR_MAX; i++) {
+ fparms.dir = i;
+ fparms.rm_db = ident_db[i];
+ rc = tf_rm_free_db(tfp, &fparms);
+ if (rc)
+ return rc;
+
+ ident_db[i] = NULL;
+ }
+
+ init = 0;
+
return 0;
}
int
tf_ident_alloc(struct tf *tfp __rte_unused,
- struct tf_ident_alloc_parms *parms __rte_unused)
+ struct tf_ident_alloc_parms *parms)
{
+ int rc;
+ struct tf_rm_allocate_parms aparms = { 0 };
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ if (!init) {
+ TFP_DRV_LOG(ERR,
+ "%s: No Identifier DBs created\n",
+ tf_dir_2_str(parms->dir));
+ return -EINVAL;
+ }
+
+ /* Allocate requested element */
+ aparms.rm_db = ident_db[parms->dir];
+ aparms.db_index = parms->ident_type;
+ aparms.index = (uint32_t *)&parms->id;
+ rc = tf_rm_allocate(&aparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed allocate, type:%d\n",
+ tf_dir_2_str(parms->dir),
+ parms->ident_type);
+ return rc;
+ }
+
return 0;
}
int
tf_ident_free(struct tf *tfp __rte_unused,
- struct tf_ident_free_parms *parms __rte_unused)
+ struct tf_ident_free_parms *parms)
{
+ int rc;
+ struct tf_rm_is_allocated_parms aparms = { 0 };
+ struct tf_rm_free_parms fparms = { 0 };
+ int allocated = 0;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ if (!init) {
+ TFP_DRV_LOG(ERR,
+ "%s: No Identifier DBs created\n",
+ tf_dir_2_str(parms->dir));
+ return -EINVAL;
+ }
+
+ /* Check if element is in use */
+ aparms.rm_db = ident_db[parms->dir];
+ aparms.db_index = parms->ident_type;
+ aparms.index = parms->id;
+ aparms.allocated = &allocated;
+ rc = tf_rm_is_allocated(&aparms);
+ if (rc)
+ return rc;
+
+ if (!allocated) {
+ TFP_DRV_LOG(ERR,
+ "%s: Entry already free, type:%d, index:%d\n",
+ tf_dir_2_str(parms->dir),
+ parms->ident_type,
+ parms->id);
+ return rc;
+ }
+
+ /* Free requested element */
+ fparms.rm_db = ident_db[parms->dir];
+ fparms.db_index = parms->ident_type;
+ fparms.index = parms->id;
+ rc = tf_rm_free(&fparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Free failed, type:%d, index:%d\n",
+ tf_dir_2_str(parms->dir),
+ parms->ident_type,
+ parms->id);
+ return rc;
+ }
+
return 0;
}
* The Identifier module provides processing of Identifiers.
*/
-struct tf_ident_cfg {
+struct tf_ident_cfg_parms {
/**
- * Number of identifier types in each of the configuration
- * arrays
+ * [in] Number of identifier types in each of the
+ * configuration arrays
*/
uint16_t num_elements;
-
/**
- * TCAM configuration array
+ * [in] Identifier configuration array
+ */
+ struct tf_rm_element_cfg *cfg;
+ /**
+ * [in] Boolean controlling the request shadow copy.
*/
- struct tf_rm_element_cfg *ident_cfg[TF_DIR_MAX];
+ bool shadow_copy;
+ /**
+ * [in] Session resource allocations
+ */
+ struct tf_session_resources *resources;
};
/**
- * Identifier allcoation parameter definition
+ * Identifier allocation parameter definition
*/
struct tf_ident_alloc_parms {
/**
/**
* [out] Identifier allocated
*/
- uint16_t id;
+ uint16_t *id;
};
/**
* - (-EINVAL) on failure.
*/
int tf_ident_bind(struct tf *tfp,
- struct tf_ident_cfg *parms);
+ struct tf_ident_cfg_parms *parms);
/**
* Cleans up the private DBs and releases all the data.
#include <inttypes.h>
#include <stdbool.h>
#include <stdlib.h>
-
-#include "bnxt.h"
-#include "tf_core.h"
-#include "tf_session.h"
-#include "tfp.h"
+#include <string.h>
#include "tf_msg_common.h"
#include "tf_msg.h"
-#include "hsi_struct_def_dpdk.h"
+#include "tf_util.h"
+#include "tf_session.h"
+#include "tfp.h"
#include "hwrm_tf.h"
#include "tf_em.h"
return rc;
}
+/**
+ * Allocates a DMA buffer that can be used for message transfer.
+ *
+ * [in] buf
+ * Pointer to DMA buffer structure
+ *
+ * [in] size
+ * Requested size of the buffer in bytes
+ *
+ * Returns:
+ * 0 - Success
+ * -ENOMEM - Unable to allocate buffer, no memory
+ */
+static int
+tf_msg_alloc_dma_buf(struct tf_msg_dma_buf *buf, int size)
+{
+ struct tfp_calloc_parms alloc_parms;
+ int rc;
+
+ /* Allocate session */
+ alloc_parms.nitems = 1;
+ alloc_parms.size = size;
+ alloc_parms.alignment = 4096;
+ rc = tfp_calloc(&alloc_parms);
+ if (rc)
+ return -ENOMEM;
+
+ buf->pa_addr = (uintptr_t)alloc_parms.mem_pa;
+ buf->va_addr = alloc_parms.mem_va;
+
+ return 0;
+}
+
+/**
+ * Free's a previous allocated DMA buffer.
+ *
+ * [in] buf
+ * Pointer to DMA buffer structure
+ */
+static void
+tf_msg_free_dma_buf(struct tf_msg_dma_buf *buf)
+{
+ tfp_free(buf->va_addr);
+}
+
/**
* Sends session open request to TF Firmware
*/
struct tfp_send_msg_parms parms = { 0 };
/* Populate the request */
- memcpy(&req.session_name, ctrl_chan_name, TF_SESSION_NAME_MAX);
+ tfp_memcpy(&req.session_name, ctrl_chan_name, TF_SESSION_NAME_MAX);
parms.tf_type = HWRM_TF_SESSION_OPEN;
parms.req_data = (uint32_t *)&req;
return tfp_le_to_cpu_32(parms.tf_resp_code);
}
+int
+tf_msg_session_resc_qcaps(struct tf *tfp,
+ enum tf_dir dir,
+ uint16_t size,
+ struct tf_rm_resc_req_entry *query,
+ enum tf_rm_resc_resv_strategy *resv_strategy)
+{
+ int rc;
+ int i;
+ struct tfp_send_msg_parms parms = { 0 };
+ struct hwrm_tf_session_resc_qcaps_input req = { 0 };
+ struct hwrm_tf_session_resc_qcaps_output resp = { 0 };
+ uint8_t fw_session_id;
+ struct tf_msg_dma_buf qcaps_buf = { 0 };
+ struct tf_rm_resc_req_entry *data;
+ int dma_size;
+
+ if (size == 0 || query == NULL || resv_strategy == NULL) {
+ TFP_DRV_LOG(ERR,
+ "%s: Resource QCAPS parameter error, rc:%s\n",
+ tf_dir_2_str(dir),
+ strerror(-EINVAL));
+ return -EINVAL;
+ }
+
+ rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Unable to lookup FW id, rc:%s\n",
+ tf_dir_2_str(dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Prepare DMA buffer */
+ dma_size = size * sizeof(struct tf_rm_resc_req_entry);
+ rc = tf_msg_alloc_dma_buf(&qcaps_buf, dma_size);
+ if (rc)
+ return rc;
+
+ /* Populate the request */
+ req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
+ req.flags = tfp_cpu_to_le_16(dir);
+ req.qcaps_size = size;
+ req.qcaps_addr = qcaps_buf.pa_addr;
+
+ parms.tf_type = HWRM_TF_SESSION_RESC_QCAPS;
+ parms.req_data = (uint32_t *)&req;
+ parms.req_size = sizeof(req);
+ parms.resp_data = (uint32_t *)&resp;
+ parms.resp_size = sizeof(resp);
+ parms.mailbox = TF_KONG_MB;
+
+ rc = tfp_send_msg_direct(tfp, &parms);
+ if (rc)
+ return rc;
+
+ /* Process the response
+ * Should always get expected number of entries
+ */
+ if (resp.size != size) {
+ TFP_DRV_LOG(ERR,
+ "%s: QCAPS message error, rc:%s\n",
+ tf_dir_2_str(dir),
+ strerror(-EINVAL));
+ return -EINVAL;
+ }
+
+ /* Post process the response */
+ data = (struct tf_rm_resc_req_entry *)qcaps_buf.va_addr;
+ for (i = 0; i < size; i++) {
+ query[i].type = tfp_cpu_to_le_32(data[i].type);
+ query[i].min = tfp_le_to_cpu_16(data[i].min);
+ query[i].max = tfp_le_to_cpu_16(data[i].max);
+ }
+
+ *resv_strategy = resp.flags &
+ HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_MASK;
+
+ tf_msg_free_dma_buf(&qcaps_buf);
+
+ return rc;
+}
+
+int
+tf_msg_session_resc_alloc(struct tf *tfp,
+ enum tf_dir dir,
+ uint16_t size,
+ struct tf_rm_resc_req_entry *request,
+ struct tf_rm_resc_entry *resv)
+{
+ int rc;
+ int i;
+ struct tfp_send_msg_parms parms = { 0 };
+ struct hwrm_tf_session_resc_alloc_input req = { 0 };
+ struct hwrm_tf_session_resc_alloc_output resp = { 0 };
+ uint8_t fw_session_id;
+ struct tf_msg_dma_buf req_buf = { 0 };
+ struct tf_msg_dma_buf resv_buf = { 0 };
+ struct tf_rm_resc_req_entry *req_data;
+ struct tf_rm_resc_entry *resv_data;
+ int dma_size;
+
+ rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Unable to lookup FW id, rc:%s\n",
+ tf_dir_2_str(dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Prepare DMA buffers */
+ dma_size = size * sizeof(struct tf_rm_resc_req_entry);
+ rc = tf_msg_alloc_dma_buf(&req_buf, dma_size);
+ if (rc)
+ return rc;
+
+ dma_size = size * sizeof(struct tf_rm_resc_entry);
+ rc = tf_msg_alloc_dma_buf(&resv_buf, dma_size);
+ if (rc)
+ return rc;
+
+ /* Populate the request */
+ req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
+ req.flags = tfp_cpu_to_le_16(dir);
+ req.req_size = size;
+
+ req_data = (struct tf_rm_resc_req_entry *)req_buf.va_addr;
+ for (i = 0; i < size; i++) {
+ req_data[i].type = tfp_cpu_to_le_32(request[i].type);
+ req_data[i].min = tfp_cpu_to_le_16(request[i].min);
+ req_data[i].max = tfp_cpu_to_le_16(request[i].max);
+ }
+
+ req.req_addr = req_buf.pa_addr;
+ req.resp_addr = resv_buf.pa_addr;
+
+ parms.tf_type = HWRM_TF_SESSION_RESC_ALLOC;
+ parms.req_data = (uint32_t *)&req;
+ parms.req_size = sizeof(req);
+ parms.resp_data = (uint32_t *)&resp;
+ parms.resp_size = sizeof(resp);
+ parms.mailbox = TF_KONG_MB;
+
+ rc = tfp_send_msg_direct(tfp, &parms);
+ if (rc)
+ return rc;
+
+ /* Process the response
+ * Should always get expected number of entries
+ */
+ if (resp.size != size) {
+ TFP_DRV_LOG(ERR,
+ "%s: Alloc message error, rc:%s\n",
+ tf_dir_2_str(dir),
+ strerror(-EINVAL));
+ return -EINVAL;
+ }
+
+ /* Post process the response */
+ resv_data = (struct tf_rm_resc_entry *)resv_buf.va_addr;
+ for (i = 0; i < size; i++) {
+ resv[i].type = tfp_cpu_to_le_32(resv_data[i].type);
+ resv[i].start = tfp_cpu_to_le_16(resv_data[i].start);
+ resv[i].stride = tfp_cpu_to_le_16(resv_data[i].stride);
+ }
+
+ tf_msg_free_dma_buf(&req_buf);
+ tf_msg_free_dma_buf(&resv_buf);
+
+ return rc;
+}
+
/**
* Sends EM mem register request to Firmware
*/
req.fw_session_id =
tfp_cpu_to_le_32(tfs->session_id.internal.fw_session_id);
- memcpy(req.em_key, em_parms->key, ((em_parms->key_sz_in_bits + 7) / 8));
+ tfp_memcpy(req.em_key,
+ em_parms->key,
+ ((em_parms->key_sz_in_bits + 7) / 8));
flags = (em_parms->dir == TF_DIR_TX ?
HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX :
return tfp_le_to_cpu_32(parms.tf_resp_code);
}
-static int
-tf_msg_alloc_dma_buf(struct tf_msg_dma_buf *buf, int size)
-{
- struct tfp_calloc_parms alloc_parms;
- int rc;
-
- /* Allocate session */
- alloc_parms.nitems = 1;
- alloc_parms.size = size;
- alloc_parms.alignment = 4096;
- rc = tfp_calloc(&alloc_parms);
- if (rc)
- return -ENOMEM;
-
- buf->pa_addr = (uintptr_t)alloc_parms.mem_pa;
- buf->va_addr = alloc_parms.mem_va;
-
- return 0;
-}
-
int
tf_msg_get_bulk_tbl_entry(struct tf *tfp,
struct tf_get_bulk_tbl_entry_parms *params)
if (rc)
goto cleanup;
data = buf.va_addr;
- memcpy(&req.dev_data[0], &buf.pa_addr, sizeof(buf.pa_addr));
+ tfp_memcpy(&req.dev_data[0],
+ &buf.pa_addr,
+ sizeof(buf.pa_addr));
}
- memcpy(&data[0], parms->key, key_bytes);
- memcpy(&data[key_bytes], parms->mask, key_bytes);
- memcpy(&data[req.result_offset], parms->result, result_bytes);
+ tfp_memcpy(&data[0], parms->key, key_bytes);
+ tfp_memcpy(&data[key_bytes], parms->mask, key_bytes);
+ tfp_memcpy(&data[req.result_offset], parms->result, result_bytes);
mparms.tf_type = HWRM_TF_TCAM_SET;
mparms.req_data = (uint32_t *)&req;
goto cleanup;
cleanup:
- if (buf.va_addr != NULL)
- tfp_free(buf.va_addr);
+ tf_msg_free_dma_buf(&buf);
return rc;
}
#ifndef _TF_MSG_H_
#define _TF_MSG_H_
+#include <rte_common.h>
+#include <hsi_struct_def_dpdk.h>
+
#include "tf_tbl.h"
#include "tf_rm.h"
+#include "tf_rm_new.h"
struct tf;
enum tf_dir dir,
struct tf_rm_entry *sram_entry);
+/**
+ * Sends session HW resource query capability request to TF Firmware
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] dir
+ * Receive or Transmit direction
+ *
+ * [in] size
+ * Number of elements in the query. Should be set to the max
+ * elements for the device type
+ *
+ * [out] query
+ * Pointer to an array of query elements
+ *
+ * [out] resv_strategy
+ * Pointer to the reservation strategy
+ *
+ * Returns:
+ * 0 on Success else internal Truflow error
+ */
+int tf_msg_session_resc_qcaps(struct tf *tfp,
+ enum tf_dir dir,
+ uint16_t size,
+ struct tf_rm_resc_req_entry *query,
+ enum tf_rm_resc_resv_strategy *resv_strategy);
+
+/**
+ * Sends session HW resource allocation request to TF Firmware
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] dir
+ * Receive or Transmit direction
+ *
+ * [in] size
+ * Number of elements in the req and resv arrays
+ *
+ * [in] req
+ * Pointer to an array of request elements
+ *
+ * [in] resv
+ * Pointer to an array of reserved elements
+ *
+ * Returns:
+ * 0 on Success else internal Truflow error
+ */
+int tf_msg_session_resc_alloc(struct tf *tfp,
+ enum tf_dir dir,
+ uint16_t size,
+ struct tf_rm_resc_req_entry *request,
+ struct tf_rm_resc_entry *resv);
+
/**
* Sends EM internal insert request to Firmware
*/
* All rights reserved.
*/
+#include <string.h>
+
#include <rte_common.h>
-#include "tf_rm_new.h"
+#include <cfa_resource_types.h>
-/**
- * Resource query single entry. Used when accessing HCAPI RM on the
- * firmware.
- */
-struct tf_rm_query_entry {
- /** Minimum guaranteed number of elements */
- uint16_t min;
- /** Maximum non-guaranteed number of elements */
- uint16_t max;
-};
+#include "tf_rm_new.h"
+#include "tf_util.h"
+#include "tf_session.h"
+#include "tf_device.h"
+#include "tfp.h"
+#include "tf_msg.h"
/**
* Generic RM Element data type that an RM DB is build upon.
* hcapi_type can be ignored. If Null then the element is not
* valid for the device.
*/
- enum tf_rm_elem_cfg_type type;
+ enum tf_rm_elem_cfg_type cfg_type;
/**
* HCAPI RM Type for the element.
/**
* TF RM DB definition
*/
-struct tf_rm_db {
+struct tf_rm_new_db {
+ /**
+ * Number of elements in the DB
+ */
+ uint16_t num_entries;
+
+ /**
+ * Direction this DB controls.
+ */
+ enum tf_dir dir;
+
/**
* The DB consists of an array of elements
*/
struct tf_rm_element *db;
};
+
+/**
+ * Resource Manager Adjust of base index definitions.
+ */
+enum tf_rm_adjust_type {
+ TF_RM_ADJUST_ADD_BASE, /**< Adds base to the index */
+ TF_RM_ADJUST_RM_BASE /**< Removes base from the index */
+};
+
+/**
+ * Adjust an index according to the allocation information.
+ *
+ * All resources are controlled in a 0 based pool. Some resources, by
+ * design, are not 0 based, i.e. Full Action Records (SRAM) thus they
+ * need to be adjusted before they are handed out.
+ *
+ * [in] db
+ * Pointer to the db, used for the lookup
+ *
+ * [in] action
+ * Adjust action
+ *
+ * [in] db_index
+ * DB index for the element type
+ *
+ * [in] index
+ * Index to convert
+ *
+ * [out] adj_index
+ * Adjusted index
+ *
+ * Returns:
+ * 0 - Success
+ * - EOPNOTSUPP - Operation not supported
+ */
+static int
+tf_rm_adjust_index(struct tf_rm_element *db,
+ enum tf_rm_adjust_type action,
+ uint32_t db_index,
+ uint32_t index,
+ uint32_t *adj_index)
+{
+ int rc = 0;
+ uint32_t base_index;
+
+ base_index = db[db_index].alloc.entry.start;
+
+ switch (action) {
+ case TF_RM_ADJUST_RM_BASE:
+ *adj_index = index - base_index;
+ break;
+ case TF_RM_ADJUST_ADD_BASE:
+ *adj_index = index + base_index;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
int
-tf_rm_create_db(struct tf *tfp __rte_unused,
- struct tf_rm_create_db_parms *parms __rte_unused)
+tf_rm_create_db(struct tf *tfp,
+ struct tf_rm_create_db_parms *parms)
{
+ int rc;
+ int i;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ uint16_t max_types;
+ struct tfp_calloc_parms cparms;
+ struct tf_rm_resc_req_entry *query;
+ enum tf_rm_resc_resv_strategy resv_strategy;
+ struct tf_rm_resc_req_entry *req;
+ struct tf_rm_resc_entry *resv;
+ struct tf_rm_new_db *rm_db;
+ struct tf_rm_element *db;
+ uint32_t pool_size;
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session(tfp, &tfs);
+ if (rc)
+ return rc;
+
+ /* Retrieve device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc)
+ return rc;
+
+ /* Need device max number of elements for the RM QCAPS */
+ rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
+ if (rc)
+ return rc;
+
+ cparms.nitems = max_types;
+ cparms.size = sizeof(struct tf_rm_resc_req_entry);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ return rc;
+
+ query = (struct tf_rm_resc_req_entry *)cparms.mem_va;
+
+ /* Get Firmware Capabilities */
+ rc = tf_msg_session_resc_qcaps(tfp,
+ parms->dir,
+ max_types,
+ query,
+ &resv_strategy);
+ if (rc)
+ return rc;
+
+ /* Process capabilities against db requirements */
+
+ /* Alloc request, alignment already set */
+ cparms.nitems = parms->num_elements;
+ cparms.size = sizeof(struct tf_rm_resc_req_entry);
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ return rc;
+ req = (struct tf_rm_resc_req_entry *)cparms.mem_va;
+
+ /* Alloc reservation, alignment and nitems already set */
+ cparms.size = sizeof(struct tf_rm_resc_entry);
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ return rc;
+ resv = (struct tf_rm_resc_entry *)cparms.mem_va;
+
+ /* Build the request */
+ for (i = 0; i < parms->num_elements; i++) {
+ /* Skip any non HCAPI cfg elements */
+ if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI) {
+ req[i].type = parms->cfg[i].hcapi_type;
+ /* Check that we can get the full amount allocated */
+ if (parms->alloc_num[i] <=
+ query[parms->cfg[i].hcapi_type].max) {
+ req[i].min = parms->alloc_num[i];
+ req[i].max = parms->alloc_num[i];
+ } else {
+ TFP_DRV_LOG(ERR,
+ "%s: Resource failure, type:%d\n",
+ tf_dir_2_str(parms->dir),
+ parms->cfg[i].hcapi_type);
+ TFP_DRV_LOG(ERR,
+ "req:%d, avail:%d\n",
+ parms->alloc_num[i],
+ query[parms->cfg[i].hcapi_type].max);
+ return -EINVAL;
+ }
+ } else {
+ /* Skip the element */
+ req[i].type = CFA_RESOURCE_TYPE_INVALID;
+ }
+ }
+
+ rc = tf_msg_session_resc_alloc(tfp,
+ parms->dir,
+ parms->num_elements,
+ req,
+ resv);
+ if (rc)
+ return rc;
+
+ /* Build the RM DB per the request */
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_rm_new_db);
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ return rc;
+ rm_db = (void *)cparms.mem_va;
+
+ /* Build the DB within RM DB */
+ cparms.nitems = parms->num_elements;
+ cparms.size = sizeof(struct tf_rm_element);
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ return rc;
+ rm_db->db = (struct tf_rm_element *)cparms.mem_va;
+
+ db = rm_db->db;
+ for (i = 0; i < parms->num_elements; i++) {
+ /* If allocation failed for a single entry the DB
+ * creation is considered a failure.
+ */
+ if (parms->alloc_num[i] != resv[i].stride) {
+ TFP_DRV_LOG(ERR,
+ "%s: Alloc failed, type:%d\n",
+ tf_dir_2_str(parms->dir),
+ i);
+ TFP_DRV_LOG(ERR,
+ "req:%d, alloc:%d\n",
+ parms->alloc_num[i],
+ resv[i].stride);
+ goto fail;
+ }
+
+ db[i].cfg_type = parms->cfg[i].cfg_type;
+ db[i].hcapi_type = parms->cfg[i].hcapi_type;
+ db[i].alloc.entry.start = resv[i].start;
+ db[i].alloc.entry.stride = resv[i].stride;
+
+ /* Create pool */
+ pool_size = (BITALLOC_SIZEOF(resv[i].stride) /
+ sizeof(struct bitalloc));
+ /* Alloc request, alignment already set */
+ cparms.nitems = pool_size;
+ cparms.size = sizeof(struct bitalloc);
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ return rc;
+ db[i].pool = (struct bitalloc *)cparms.mem_va;
+ }
+
+ rm_db->num_entries = i;
+ rm_db->dir = parms->dir;
+ parms->rm_db = (void *)rm_db;
+
+ tfp_free((void *)req);
+ tfp_free((void *)resv);
+
return 0;
+
+ fail:
+ tfp_free((void *)req);
+ tfp_free((void *)resv);
+ tfp_free((void *)db->pool);
+ tfp_free((void *)db);
+ tfp_free((void *)rm_db);
+ parms->rm_db = NULL;
+
+ return -EINVAL;
}
int
tf_rm_free_db(struct tf *tfp __rte_unused,
- struct tf_rm_free_db_parms *parms __rte_unused)
+ struct tf_rm_free_db_parms *parms)
{
- return 0;
+ int rc = 0;
+ int i;
+ struct tf_rm_new_db *rm_db;
+
+ /* Traverse the DB and clear each pool.
+ * NOTE:
+ * Firmware is not cleared. It will be cleared on close only.
+ */
+ rm_db = (struct tf_rm_new_db *)parms->rm_db;
+ for (i = 0; i < rm_db->num_entries; i++)
+ tfp_free((void *)rm_db->db->pool);
+
+ tfp_free((void *)parms->rm_db);
+
+ return rc;
}
int
-tf_rm_allocate(struct tf_rm_allocate_parms *parms __rte_unused)
+tf_rm_allocate(struct tf_rm_allocate_parms *parms)
{
- return 0;
+ int rc = 0;
+ int id;
+ struct tf_rm_new_db *rm_db;
+ enum tf_rm_elem_cfg_type cfg_type;
+
+ if (parms == NULL || parms->rm_db == NULL)
+ return -EINVAL;
+
+ rm_db = (struct tf_rm_new_db *)parms->rm_db;
+ cfg_type = rm_db->db[parms->db_index].cfg_type;
+
+ /* Bail out if not controlled by RM */
+ if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
+ cfg_type != TF_RM_ELEM_CFG_PRIVATE)
+ return -ENOTSUP;
+
+ id = ba_alloc(rm_db->db[parms->db_index].pool);
+ if (id == BA_FAIL) {
+ TFP_DRV_LOG(ERR,
+ "%s: Allocation failed, rc:%s\n",
+ tf_dir_2_str(rm_db->dir),
+ strerror(-rc));
+ return -ENOMEM;
+ }
+
+ /* Adjust for any non zero start value */
+ rc = tf_rm_adjust_index(rm_db->db,
+ TF_RM_ADJUST_ADD_BASE,
+ parms->db_index,
+ id,
+ parms->index);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Alloc adjust of base index failed, rc:%s\n",
+ tf_dir_2_str(rm_db->dir),
+ strerror(-rc));
+ return -1;
+ }
+
+ return rc;
}
int
-tf_rm_free(struct tf_rm_free_parms *parms __rte_unused)
+tf_rm_free(struct tf_rm_free_parms *parms)
{
- return 0;
+ int rc = 0;
+ uint32_t adj_index;
+ struct tf_rm_new_db *rm_db;
+ enum tf_rm_elem_cfg_type cfg_type;
+
+ if (parms == NULL || parms->rm_db == NULL)
+ return -EINVAL;
+
+ rm_db = (struct tf_rm_new_db *)parms->rm_db;
+ cfg_type = rm_db->db[parms->db_index].cfg_type;
+
+ /* Bail out if not controlled by RM */
+ if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
+ cfg_type != TF_RM_ELEM_CFG_PRIVATE)
+ return -ENOTSUP;
+
+ /* Adjust for any non zero start value */
+ rc = tf_rm_adjust_index(rm_db->db,
+ TF_RM_ADJUST_RM_BASE,
+ parms->db_index,
+ parms->index,
+ &adj_index);
+ if (rc)
+ return rc;
+
+ rc = ba_free(rm_db->db[parms->db_index].pool, adj_index);
+ /* No logging direction matters and that is not available here */
+ if (rc)
+ return rc;
+
+ return rc;
}
int
-tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms __rte_unused)
+tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
{
- return 0;
+ int rc = 0;
+ uint32_t adj_index;
+ struct tf_rm_new_db *rm_db;
+ enum tf_rm_elem_cfg_type cfg_type;
+
+ if (parms == NULL || parms->rm_db == NULL)
+ return -EINVAL;
+
+ rm_db = (struct tf_rm_new_db *)parms->rm_db;
+ cfg_type = rm_db->db[parms->db_index].cfg_type;
+
+ /* Bail out if not controlled by RM */
+ if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
+ cfg_type != TF_RM_ELEM_CFG_PRIVATE)
+ return -ENOTSUP;
+
+ /* Adjust for any non zero start value */
+ rc = tf_rm_adjust_index(rm_db->db,
+ TF_RM_ADJUST_RM_BASE,
+ parms->db_index,
+ parms->index,
+ &adj_index);
+ if (rc)
+ return rc;
+
+ *parms->allocated = ba_inuse(rm_db->db[parms->db_index].pool,
+ adj_index);
+
+ return rc;
}
int
-tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms __rte_unused)
+tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
{
- return 0;
+ int rc = 0;
+ struct tf_rm_new_db *rm_db;
+ enum tf_rm_elem_cfg_type cfg_type;
+
+ if (parms == NULL || parms->rm_db == NULL)
+ return -EINVAL;
+
+ rm_db = (struct tf_rm_new_db *)parms->rm_db;
+ cfg_type = rm_db->db[parms->db_index].cfg_type;
+
+ /* Bail out if not controlled by RM */
+ if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
+ cfg_type != TF_RM_ELEM_CFG_PRIVATE)
+ return -ENOTSUP;
+
+ parms->info = &rm_db->db[parms->db_index].alloc;
+
+ return rc;
}
int
-tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms __rte_unused)
+tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
{
- return 0;
+ int rc = 0;
+ struct tf_rm_new_db *rm_db;
+ enum tf_rm_elem_cfg_type cfg_type;
+
+ if (parms == NULL || parms->rm_db == NULL)
+ return -EINVAL;
+
+ rm_db = (struct tf_rm_new_db *)parms->rm_db;
+ cfg_type = rm_db->db[parms->db_index].cfg_type;
+
+ /* Bail out if not controlled by RM */
+ if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
+ cfg_type != TF_RM_ELEM_CFG_PRIVATE)
+ return -ENOTSUP;
+
+ *parms->hcapi_type = rm_db->db[parms->db_index].hcapi_type;
+
+ return rc;
}
* All rights reserved.
*/
-#ifndef TF_RM_H_
-#define TF_RM_H_
+#ifndef TF_RM_NEW_H_
+#define TF_RM_NEW_H_
#include "tf_core.h"
#include "bitalloc.h"
* MAX pool size of the Chip Å“needs to be added to the tf_rm_elem_info
* structure and several new APIs would need to be added to allow for
* growth of a single TF resource type.
+ *
+ * The access functions does not check for NULL pointers as it's a
+ * support module, not called directly.
*/
/**
* Resource reservation single entry result. Used when accessing HCAPI
* RM on the firmware.
*/
-struct tf_rm_entry {
+struct tf_rm_new_entry {
/** Starting index of the allocated resource */
uint16_t start;
/** Number of allocated elements */
* ULP layer that is not controlled by HCAPI within the Firmware.
*/
enum tf_rm_elem_cfg_type {
- TF_RM_ELEM_CFG_NULL, /**< No configuration */
- TF_RM_ELEM_CFG_HCAPI, /**< HCAPI 'controlled' */
- TF_RM_ELEM_CFG_PRIVATE, /**< Private thus not HCAPI 'controlled' */
+ /** No configuration */
+ TF_RM_ELEM_CFG_NULL,
+ /** HCAPI 'controlled' */
+ TF_RM_ELEM_CFG_HCAPI,
+ /** Private thus not HCAPI 'controlled' */
+ TF_RM_ELEM_CFG_PRIVATE,
+ /**
+ * Shared element thus it belongs to a shared FW Session and
+ * is not controlled by the Host.
+ */
+ TF_RM_ELEM_CFG_SHARED,
TF_RM_TYPE_MAX
};
+/**
+ * RM Reservation strategy enumeration. Type of strategy comes from
+ * the HCAPI RM QCAPS handshake.
+ */
+enum tf_rm_resc_resv_strategy {
+ TF_RM_RESC_RESV_STATIC_PARTITION,
+ TF_RM_RESC_RESV_STRATEGY_1,
+ TF_RM_RESC_RESV_STRATEGY_2,
+ TF_RM_RESC_RESV_STRATEGY_3,
+ TF_RM_RESC_RESV_MAX
+};
+
/**
* RM Element configuration structure, used by the Device to configure
* how an individual TF type is configured in regard to the HCAPI RM
* RM Element config controls how the DB for that element is
* processed.
*/
- enum tf_rm_elem_cfg_type cfg;
+ enum tf_rm_elem_cfg_type cfg_type;
/* If a HCAPI to TF type conversion is required then TF type
* can be added here.
* In case of dynamic allocation support this would have
* to be changed to linked list of tf_rm_entry instead.
*/
- struct tf_rm_entry entry;
+ struct tf_rm_new_entry entry;
};
/**
*/
enum tf_dir dir;
/**
- * [in] Number of elements in the parameter structure
+ * [in] Number of elements.
*/
uint16_t num_elements;
/**
- * [in] Parameter structure
+ * [in] Parameter structure array. Array size is num_elements.
+ */
+ struct tf_rm_element_cfg *cfg;
+ /**
+ * Allocation number array. Array size is num_elements.
*/
- struct tf_rm_element_cfg *parms;
+ uint16_t *alloc_num;
/**
* [out] RM DB Handle
*/
- void *tf_rm_db;
+ void *rm_db;
};
/**
/**
* [in] RM DB Handle
*/
- void *tf_rm_db;
+ void *rm_db;
};
/**
/**
* [in] RM DB Handle
*/
- void *tf_rm_db;
+ void *rm_db;
/**
* [in] DB Index, indicates which DB entry to perform the
* action on.
/**
* [in] RM DB Handle
*/
- void *tf_rm_db;
+ void *rm_db;
/**
* [in] DB Index, indicates which DB entry to perform the
* action on.
/**
* [in] Index to free
*/
- uint32_t index;
+ uint16_t index;
};
/**
/**
* [in] RM DB Handle
*/
- void *tf_rm_db;
+ void *rm_db;
/**
* [in] DB Index, indicates which DB entry to perform the
* action on.
/**
* [in] Pointer to flag that indicates the state of the query
*/
- uint8_t *allocated;
+ int *allocated;
};
/**
/**
* [in] RM DB Handle
*/
- void *tf_rm_db;
+ void *rm_db;
/**
* [in] DB Index, indicates which DB entry to perform the
* action on.
/**
* [in] RM DB Handle
*/
- void *tf_rm_db;
+ void *rm_db;
/**
* [in] DB Index, indicates which DB entry to perform the
* action on.
* Returns
* - (0) if successful.
* - (-EINVAL) on failure.
+ * - (-ENOMEM) if pool is empty
*/
int tf_rm_allocate(struct tf_rm_allocate_parms *parms);
*
* Returns
* - (0) if successful.
- * - (-EpINVAL) on failure.
+ * - (-EINVAL) on failure.
*/
int tf_rm_free(struct tf_rm_free_parms *parms);
*/
int tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms);
-#endif /* TF_RM_H_ */
+#endif /* TF_RM_NEW_H_ */
* All rights reserved.
*/
+#include <string.h>
+
+#include <rte_common.h>
+
+#include "tf_session.h"
+#include "tf_common.h"
+#include "tf_msg.h"
+#include "tfp.h"
+
+int
+tf_session_open_session(struct tf *tfp,
+ struct tf_session_open_session_parms *parms)
+{
+ int rc;
+ struct tf_session *session = NULL;
+ struct tfp_calloc_parms cparms;
+ uint8_t fw_session_id;
+ union tf_session_id *session_id;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ /* Open FW session and get a new session_id */
+ rc = tf_msg_session_open(tfp,
+ parms->open_cfg->ctrl_chan_name,
+ &fw_session_id);
+ if (rc) {
+ /* Log error */
+ if (rc == -EEXIST)
+ TFP_DRV_LOG(ERR,
+ "Session is already open, rc:%s\n",
+ strerror(-rc));
+ else
+ TFP_DRV_LOG(ERR,
+ "Open message send failed, rc:%s\n",
+ strerror(-rc));
+
+ parms->open_cfg->session_id.id = TF_FW_SESSION_ID_INVALID;
+ return rc;
+ }
+
+ /* Allocate session */
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_session_info);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc) {
+ /* Log error */
+ TFP_DRV_LOG(ERR,
+ "Failed to allocate session info, rc:%s\n",
+ strerror(-rc));
+ goto cleanup;
+ }
+ tfp->session = (struct tf_session_info *)cparms.mem_va;
+
+ /* Allocate core data for the session */
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_session);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc) {
+ /* Log error */
+ TFP_DRV_LOG(ERR,
+ "Failed to allocate session data, rc:%s\n",
+ strerror(-rc));
+ goto cleanup;
+ }
+ tfp->session->core_data = cparms.mem_va;
+
+ /* Initialize Session and Device */
+ session = (struct tf_session *)tfp->session->core_data;
+ session->ver.major = 0;
+ session->ver.minor = 0;
+ session->ver.update = 0;
+
+ session_id = &parms->open_cfg->session_id;
+ session->session_id.internal.domain = session_id->internal.domain;
+ session->session_id.internal.bus = session_id->internal.bus;
+ session->session_id.internal.device = session_id->internal.device;
+ session->session_id.internal.fw_session_id = fw_session_id;
+ /* Return the allocated fw session id */
+ session_id->internal.fw_session_id = fw_session_id;
+
+ session->shadow_copy = parms->open_cfg->shadow_copy;
+
+ tfp_memcpy(session->ctrl_chan_name,
+ parms->open_cfg->ctrl_chan_name,
+ TF_SESSION_NAME_MAX);
+
+ rc = dev_bind(tfp,
+ parms->open_cfg->device_type,
+ session->shadow_copy,
+ &parms->open_cfg->resources,
+ session->dev);
+ /* Logging handled by dev_bind */
+ if (rc)
+ return rc;
+
+ /* Query for Session Config
+ */
+ rc = tf_msg_session_qcfg(tfp);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Query config message send failed, rc:%s\n",
+ strerror(-rc));
+ goto cleanup_close;
+ }
+
+ session->ref_count++;
+
+ return 0;
+
+ cleanup:
+ tfp_free(tfp->session->core_data);
+ tfp_free(tfp->session);
+ tfp->session = NULL;
+ return rc;
+
+ cleanup_close:
+ tf_close_session(tfp);
+ return -EINVAL;
+}
+
+int
+tf_session_attach_session(struct tf *tfp __rte_unused,
+ struct tf_session_attach_session_parms *parms __rte_unused)
+{
+ int rc = -EOPNOTSUPP;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ TFP_DRV_LOG(ERR,
+ "Attach not yet supported, rc:%s\n",
+ strerror(-rc));
+ return rc;
+}
+
+int
+tf_session_close_session(struct tf *tfp,
+ struct tf_session_close_session_parms *parms)
+{
+ int rc;
+ struct tf_session *tfs = NULL;
+ struct tf_dev_info *tfd = NULL;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ rc = tf_session_get_session(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Session lookup failed, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ if (tfs->session_id.id == TF_SESSION_ID_INVALID) {
+ rc = -EINVAL;
+ TFP_DRV_LOG(ERR,
+ "Invalid session id, unable to close, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Record the session we're closing so the caller knows the
+ * details.
+ */
+ *parms->session_id = tfs->session_id;
+
+ rc = tf_session_get_device(tfs, &tfd);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Device lookup failed, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ /* In case we're attached only the session client gets closed */
+ rc = tf_msg_session_close(tfp);
+ if (rc) {
+ /* Log error */
+ TFP_DRV_LOG(ERR,
+ "FW Session close failed, rc:%s\n",
+ strerror(-rc));
+ }
+
+ tfs->ref_count--;
+
+ /* Final cleanup as we're last user of the session */
+ if (tfs->ref_count == 0) {
+ /* Unbind the device */
+ rc = dev_unbind(tfp, tfd);
+ if (rc) {
+ /* Log error */
+ TFP_DRV_LOG(ERR,
+ "Device unbind failed, rc:%s\n",
+ strerror(-rc));
+ }
+
+ tfp_free(tfp->session->core_data);
+ tfp_free(tfp->session);
+ tfp->session = NULL;
+ }
+
+ return 0;
+}
+
int
tf_session_get_session(struct tf *tfp,
- struct tf_session *tfs)
+ struct tf_session **tfs)
{
+ int rc;
+
if (tfp->session == NULL || tfp->session->core_data == NULL) {
- TFP_DRV_LOG(ERR, "Session not created\n");
- return -EINVAL;
+ rc = -EINVAL;
+ TFP_DRV_LOG(ERR,
+ "Session not created, rc:%s\n",
+ strerror(-rc));
+ return rc;
}
- tfs = (struct tf_session *)(tfp->session->core_data);
+ *tfs = (struct tf_session *)(tfp->session->core_data);
return 0;
}
int
tf_session_get_device(struct tf_session *tfs,
- struct tf_device *tfd)
+ struct tf_dev_info **tfd)
{
+ int rc;
+
if (tfs->dev == NULL) {
- TFP_DRV_LOG(ERR, "Device not created\n");
- return -EINVAL;
+ rc = -EINVAL;
+ TFP_DRV_LOG(ERR,
+ "Device not created, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+
+ *tfd = tfs->dev;
+
+ return 0;
+}
+
+int
+tf_session_get_fw_session_id(struct tf *tfp,
+ uint8_t *fw_session_id)
+{
+ int rc;
+ struct tf_session *tfs = NULL;
+
+ if (tfp->session == NULL) {
+ rc = -EINVAL;
+ TFP_DRV_LOG(ERR,
+ "Session not created, rc:%s\n",
+ strerror(-rc));
+ return rc;
}
- tfd = tfs->dev;
+
+ rc = tf_session_get_session(tfp, &tfs);
+ if (rc)
+ return rc;
+
+ *fw_session_id = tfs->session_id.internal.fw_session_id;
return 0;
}
*/
struct tf_session_version ver;
- /** Device type, provided by tf_open_session().
- */
- enum tf_device_type device_type;
-
- /** Session ID, allocated by FW on tf_open_session().
- */
+ /** Session ID, allocated by FW on tf_open_session() */
union tf_session_id session_id;
/**
*/
uint8_t ref_count;
- /** Device */
+ /** Device handle */
struct tf_dev_info *dev;
/** Session HW and SRAM resources */
struct stack em_pool[TF_DIR_MAX];
};
+/**
+ * Session open parameter definition
+ */
+struct tf_session_open_session_parms {
+ /**
+ * [in] Pointer to the TF open session configuration
+ */
+ struct tf_open_session_parms *open_cfg;
+};
+
+/**
+ * Session attach parameter definition
+ */
+struct tf_session_attach_session_parms {
+ /**
+ * [in] Pointer to the TF attach session configuration
+ */
+ struct tf_attach_session_parms *attach_cfg;
+};
+
+/**
+ * Session close parameter definition
+ */
+struct tf_session_close_session_parms {
+ uint8_t *ref_count;
+ union tf_session_id *session_id;
+};
+
/**
* @page session Session Management
*
+ * @ref tf_session_open_session
+ *
+ * @ref tf_session_attach_session
+ *
+ * @ref tf_session_close_session
+ *
* @ref tf_session_get_session
*
* @ref tf_session_get_device
+ *
+ * @ref tf_session_get_fw_session_id
+ */
+
+/**
+ * Creates a host session with a corresponding firmware session.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] parms
+ * Pointer to the session open parameters
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
*/
+int tf_session_open_session(struct tf *tfp,
+ struct tf_session_open_session_parms *parms);
+
+/**
+ * Attaches a previous created session.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in] parms
+ * Pointer to the session attach parameters
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+int tf_session_attach_session(struct tf *tfp,
+ struct tf_session_attach_session_parms *parms);
+
+/**
+ * Closes a previous created session.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [in/out] parms
+ * Pointer to the session close parameters.
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+int tf_session_close_session(struct tf *tfp,
+ struct tf_session_close_session_parms *parms);
/**
* Looks up the private session information from the TF session info.
* Pointer to TF handle
*
* [out] tfs
- * Pointer to the session
+ * Pointer pointer to the session
*
* Returns
* - (0) if successful.
* - (-EINVAL) on failure.
*/
int tf_session_get_session(struct tf *tfp,
- struct tf_session *tfs);
+ struct tf_session **tfs);
/**
* Looks up the device information from the TF Session.
* Pointer to TF handle
*
* [out] tfd
- * Pointer to the device
+ * Pointer pointer to the device
*
* Returns
* - (0) if successful.
* - (-EINVAL) on failure.
*/
int tf_session_get_device(struct tf_session *tfs,
- struct tf_dev_info *tfd);
+ struct tf_dev_info **tfd);
+
+/**
+ * Looks up the FW session id of the firmware connection for the
+ * requested TF handle.
+ *
+ * [in] tfp
+ * Pointer to TF handle
+ *
+ * [out] session_id
+ * Pointer to the session_id
+ *
+ * Returns
+ * - (0) if successful.
+ * - (-EINVAL) on failure.
+ */
+int tf_session_get_fw_session_id(struct tf *tfp,
+ uint8_t *fw_session_id);
#endif /* _TF_SESSION_H_ */
#define _TF_TBL_H_
#include <stdint.h>
+
+#include "tf_core.h"
#include "stack.h"
+struct tf_session;
+
enum tf_pg_tbl_lvl {
PT_LVL_0,
PT_LVL_1,
struct tf;
/**
- * Table Type DBs.
+ * Table DBs.
*/
/* static void *tbl_db[TF_DIR_MAX]; */
/**
- * Table Type Shadow DBs
+ * Table Shadow DBs
*/
/* static void *shadow_tbl_db[TF_DIR_MAX]; */
/* static uint8_t shadow_init; */
int
-tf_tbl_type_bind(struct tf *tfp __rte_unused,
- struct tf_tbl_type_cfg_parms *parms __rte_unused)
+tf_tbl_bind(struct tf *tfp __rte_unused,
+ struct tf_tbl_cfg_parms *parms __rte_unused)
{
return 0;
}
int
-tf_tbl_type_unbind(struct tf *tfp __rte_unused)
+tf_tbl_unbind(struct tf *tfp __rte_unused)
{
return 0;
}
int
-tf_tbl_type_alloc(struct tf *tfp __rte_unused,
- struct tf_tbl_type_alloc_parms *parms __rte_unused)
+tf_tbl_alloc(struct tf *tfp __rte_unused,
+ struct tf_tbl_alloc_parms *parms __rte_unused)
{
return 0;
}
int
-tf_tbl_type_free(struct tf *tfp __rte_unused,
- struct tf_tbl_type_free_parms *parms __rte_unused)
+tf_tbl_free(struct tf *tfp __rte_unused,
+ struct tf_tbl_free_parms *parms __rte_unused)
{
return 0;
}
int
-tf_tbl_type_alloc_search(struct tf *tfp __rte_unused,
- struct tf_tbl_type_alloc_search_parms *parms __rte_unused)
+tf_tbl_alloc_search(struct tf *tfp __rte_unused,
+ struct tf_tbl_alloc_search_parms *parms __rte_unused)
{
return 0;
}
int
-tf_tbl_type_set(struct tf *tfp __rte_unused,
- struct tf_tbl_type_set_parms *parms __rte_unused)
+tf_tbl_set(struct tf *tfp __rte_unused,
+ struct tf_tbl_set_parms *parms __rte_unused)
{
return 0;
}
int
-tf_tbl_type_get(struct tf *tfp __rte_unused,
- struct tf_tbl_type_get_parms *parms __rte_unused)
+tf_tbl_get(struct tf *tfp __rte_unused,
+ struct tf_tbl_get_parms *parms __rte_unused)
{
return 0;
}
struct tf;
/**
- * The Table Type module provides processing of Internal TF table types.
+ * The Table module provides processing of Internal TF table types.
*/
/**
- * Table Type configuration parameters
+ * Table configuration parameters
*/
-struct tf_tbl_type_cfg_parms {
+struct tf_tbl_cfg_parms {
/**
* Number of table types in each of the configuration arrays
*/
uint16_t num_elements;
-
/**
* Table Type element configuration array
*/
- struct tf_rm_element_cfg *tbl_cfg[TF_DIR_MAX];
-
+ struct tf_rm_element_cfg *cfg;
/**
* Shadow table type configuration array
*/
- struct tf_shadow_tbl_type_cfg *tbl_shadow_cfg[TF_DIR_MAX];
+ struct tf_shadow_tbl_cfg *shadow_cfg;
+ /**
+ * Boolean controlling the request shadow copy.
+ */
+ bool shadow_copy;
+ /**
+ * Session resource allocations
+ */
+ struct tf_session_resources *resources;
};
/**
- * Table Type allocation parameters
+ * Table allocation parameters
*/
-struct tf_tbl_type_alloc_parms {
+struct tf_tbl_alloc_parms {
/**
* [in] Receive or transmit direction
*/
};
/**
- * Table Type free parameters
+ * Table free parameters
*/
-struct tf_tbl_type_free_parms {
+struct tf_tbl_free_parms {
/**
* [in] Receive or transmit direction
*/
uint16_t ref_cnt;
};
-struct tf_tbl_type_alloc_search_parms {
+/**
+ * Table allocate search parameters
+ */
+struct tf_tbl_alloc_search_parms {
/**
* [in] Receive or transmit direction
*/
};
/**
- * Table Type set parameters
+ * Table set parameters
*/
-struct tf_tbl_type_set_parms {
+struct tf_tbl_set_parms {
/**
* [in] Receive or transmit direction
*/
};
/**
- * Table Type get parameters
+ * Table get parameters
*/
-struct tf_tbl_type_get_parms {
+struct tf_tbl_get_parms {
/**
* [in] Receive or transmit direction
*/
};
/**
- * @page tbl_type Table Type
+ * @page tbl Table
*
- * @ref tf_tbl_type_bind
+ * @ref tf_tbl_bind
*
- * @ref tf_tbl_type_unbind
+ * @ref tf_tbl_unbind
*
- * @ref tf_tbl_type_alloc
+ * @ref tf_tbl_alloc
*
- * @ref tf_tbl_type_free
+ * @ref tf_tbl_free
*
- * @ref tf_tbl_type_alloc_search
+ * @ref tf_tbl_alloc_search
*
- * @ref tf_tbl_type_set
+ * @ref tf_tbl_set
*
- * @ref tf_tbl_type_get
+ * @ref tf_tbl_get
*/
/**
- * Initializes the Table Type module with the requested DBs. Must be
+ * Initializes the Table module with the requested DBs. Must be
* invoked as the first thing before any of the access functions.
*
* [in] tfp
* Pointer to TF handle, used for HCAPI communication
*
* [in] parms
- * Pointer to parameters
+ * Pointer to Table configuration parameters
*
* Returns
* - (0) if successful.
* - (-EINVAL) on failure.
*/
-int tf_tbl_type_bind(struct tf *tfp,
- struct tf_tbl_type_cfg_parms *parms);
+int tf_tbl_bind(struct tf *tfp,
+ struct tf_tbl_cfg_parms *parms);
/**
* Cleans up the private DBs and releases all the data.
* - (0) if successful.
* - (-EINVAL) on failure.
*/
-int tf_tbl_type_unbind(struct tf *tfp);
+int tf_tbl_unbind(struct tf *tfp);
/**
* Allocates the requested table type from the internal RM DB.
* Pointer to TF handle, used for HCAPI communication
*
* [in] parms
- * Pointer to parameters
+ * Pointer to Table allocation parameters
*
* Returns
* - (0) if successful.
* - (-EINVAL) on failure.
*/
-int tf_tbl_type_alloc(struct tf *tfp,
- struct tf_tbl_type_alloc_parms *parms);
+int tf_tbl_alloc(struct tf *tfp,
+ struct tf_tbl_alloc_parms *parms);
/**
* Free's the requested table type and returns it to the DB. If shadow
* Pointer to TF handle, used for HCAPI communication
*
* [in] parms
- * Pointer to parameters
+ * Pointer to Table free parameters
*
* Returns
* - (0) if successful.
* - (-EINVAL) on failure.
*/
-int tf_tbl_type_free(struct tf *tfp,
- struct tf_tbl_type_free_parms *parms);
+int tf_tbl_free(struct tf *tfp,
+ struct tf_tbl_free_parms *parms);
/**
* Supported if Shadow DB is configured. Searches the Shadow DB for
* - (0) if successful.
* - (-EINVAL) on failure.
*/
-int tf_tbl_type_alloc_search(struct tf *tfp,
- struct tf_tbl_type_alloc_search_parms *parms);
+int tf_tbl_alloc_search(struct tf *tfp,
+ struct tf_tbl_alloc_search_parms *parms);
/**
* Configures the requested element by sending a firmware request which
* Pointer to TF handle, used for HCAPI communication
*
* [in] parms
- * Pointer to parameters
+ * Pointer to Table set parameters
*
* Returns
* - (0) if successful.
* - (-EINVAL) on failure.
*/
-int tf_tbl_type_set(struct tf *tfp,
- struct tf_tbl_type_set_parms *parms);
+int tf_tbl_set(struct tf *tfp,
+ struct tf_tbl_set_parms *parms);
/**
* Retrieves the requested element by sending a firmware request to get
* Pointer to TF handle, used for HCAPI communication
*
* [in] parms
- * Pointer to parameters
+ * Pointer to Table get parameters
*
* Returns
* - (0) if successful.
* - (-EINVAL) on failure.
*/
-int tf_tbl_type_get(struct tf *tfp,
- struct tf_tbl_type_get_parms *parms);
+int tf_tbl_get(struct tf *tfp,
+ struct tf_tbl_get_parms *parms);
#endif /* TF_TBL_TYPE_H */
* Number of tcam types in each of the configuration arrays
*/
uint16_t num_elements;
-
/**
* TCAM configuration array
*/
- struct tf_rm_element_cfg *tcam_cfg[TF_DIR_MAX];
-
+ struct tf_rm_element_cfg *cfg;
/**
* Shadow table type configuration array
*/
- struct tf_shadow_tcam_cfg *tcam_shadow_cfg[TF_DIR_MAX];
+ struct tf_shadow_tcam_cfg *shadow_cfg;
+ /**
+ * Boolean controlling the request shadow copy.
+ */
+ bool shadow_copy;
+ /**
+ * Session resource allocations
+ */
+ struct tf_session_resources *resources;
};
/**