#include <rte_string_fns.h>
#include <rte_acl.h>
#include <rte_tailq.h>
+#include <rte_vect.h>
#include "acl.h"
};
EAL_REGISTER_TAILQ(rte_acl_tailq)
+#ifndef CC_AVX512_SUPPORT
+/*
+ * If the compiler doesn't support AVX512 instructions,
+ * then the dummy one would be used instead for AVX512 classify method.
+ */
+int
+rte_acl_classify_avx512x16(__rte_unused const struct rte_acl_ctx *ctx,
+ __rte_unused const uint8_t **data,
+ __rte_unused uint32_t *results,
+ __rte_unused uint32_t num,
+ __rte_unused uint32_t categories)
+{
+ return -ENOTSUP;
+}
+
+int
+rte_acl_classify_avx512x32(__rte_unused const struct rte_acl_ctx *ctx,
+ __rte_unused const uint8_t **data,
+ __rte_unused uint32_t *results,
+ __rte_unused uint32_t num,
+ __rte_unused uint32_t categories)
+{
+ return -ENOTSUP;
+}
+#endif
+
#ifndef CC_AVX2_SUPPORT
/*
* If the compiler doesn't support AVX2 instructions,
[RTE_ACL_CLASSIFY_AVX2] = rte_acl_classify_avx2,
[RTE_ACL_CLASSIFY_NEON] = rte_acl_classify_neon,
[RTE_ACL_CLASSIFY_ALTIVEC] = rte_acl_classify_altivec,
+ [RTE_ACL_CLASSIFY_AVX512X16] = rte_acl_classify_avx512x16,
+ [RTE_ACL_CLASSIFY_AVX512X32] = rte_acl_classify_avx512x32,
};
/*
{
if (alg == RTE_ACL_CLASSIFY_NEON) {
#if defined(RTE_ARCH_ARM64)
- return 0;
+ if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
+ return 0;
#elif defined(RTE_ARCH_ARM)
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
return 0;
- return -ENOTSUP;
-#else
- return -ENOTSUP;
#endif
+ return -ENOTSUP;
}
return -EINVAL;
{
if (alg == RTE_ACL_CLASSIFY_ALTIVEC) {
#if defined(RTE_ARCH_PPC_64)
- return 0;
-#else
- return -ENOTSUP;
+ if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
+ return 0;
#endif
+ return -ENOTSUP;
}
return -EINVAL;
}
+#ifdef CC_AVX512_SUPPORT
+static int
+acl_check_avx512_cpu_flags(void)
+{
+ return (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512VL) &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512CD) &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW));
+}
+#endif
+
/*
* Helper function for acl_check_alg.
* Check support for x86 specific classify methods.
static int
acl_check_alg_x86(enum rte_acl_classify_alg alg)
{
+ if (alg == RTE_ACL_CLASSIFY_AVX512X32) {
+#ifdef CC_AVX512_SUPPORT
+ if (acl_check_avx512_cpu_flags() != 0 &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
+ return 0;
+#endif
+ return -ENOTSUP;
+ }
+
+ if (alg == RTE_ACL_CLASSIFY_AVX512X16) {
+#ifdef CC_AVX512_SUPPORT
+ if (acl_check_avx512_cpu_flags() != 0 &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
+ return 0;
+#endif
+ return -ENOTSUP;
+ }
+
if (alg == RTE_ACL_CLASSIFY_AVX2) {
#ifdef CC_AVX2_SUPPORT
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
return 0;
#endif
return -ENOTSUP;
if (alg == RTE_ACL_CLASSIFY_SSE) {
#ifdef RTE_ARCH_X86
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
return 0;
#endif
return -ENOTSUP;
return acl_check_alg_arm(alg);
case RTE_ACL_CLASSIFY_ALTIVEC:
return acl_check_alg_ppc(alg);
+ case RTE_ACL_CLASSIFY_AVX512X32:
+ case RTE_ACL_CLASSIFY_AVX512X16:
case RTE_ACL_CLASSIFY_AVX2:
case RTE_ACL_CLASSIFY_SSE:
return acl_check_alg_x86(alg);
#elif defined(RTE_ARCH_PPC_64)
RTE_ACL_CLASSIFY_ALTIVEC,
#elif defined(RTE_ARCH_X86)
+ RTE_ACL_CLASSIFY_AVX512X32,
+ RTE_ACL_CLASSIFY_AVX512X16,
RTE_ACL_CLASSIFY_AVX2,
RTE_ACL_CLASSIFY_SSE,
#endif
printf("acl context <%s>@%p\n", ctx->name, ctx);
printf(" socket_id=%"PRId32"\n", ctx->socket_id);
printf(" alg=%"PRId32"\n", ctx->alg);
+ printf(" first_load_sz=%"PRIu32"\n", ctx->first_load_sz);
printf(" max_rules=%"PRIu32"\n", ctx->max_rules);
printf(" rule_size=%"PRIu32"\n", ctx->rule_sz);
printf(" num_rules=%"PRIu32"\n", ctx->num_rules);