From b3509fa3653e940902a88044e6e1f24cb7bdea8d Mon Sep 17 00:00:00 2001 From: Vladimir Medvedkin Date: Tue, 27 Oct 2020 15:11:26 +0000 Subject: [PATCH] fib: add AVX512 lookup Add new lookup implementation for DIR24_8 algorithm using AVX512 instruction set Signed-off-by: Vladimir Medvedkin Acked-by: Konstantin Ananyev --- doc/guides/rel_notes/release_20_11.rst | 4 + lib/librte_fib/dir24_8.c | 39 ++++++ lib/librte_fib/dir24_8_avx512.c | 165 +++++++++++++++++++++++++ lib/librte_fib/dir24_8_avx512.h | 24 ++++ lib/librte_fib/meson.build | 34 +++++ lib/librte_fib/rte_fib.c | 2 +- lib/librte_fib/rte_fib.h | 4 + 7 files changed, 271 insertions(+), 1 deletion(-) create mode 100644 lib/librte_fib/dir24_8_avx512.c create mode 100644 lib/librte_fib/dir24_8_avx512.h diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst index dca8d41eb6..bf0a1ce83a 100644 --- a/doc/guides/rel_notes/release_20_11.rst +++ b/doc/guides/rel_notes/release_20_11.rst @@ -325,6 +325,10 @@ New Features * Added new ``RTE_ACL_CLASSIFY_AVX512X32`` vector implementation, which can process up to 32 flows in parallel. Requires AVX512 support. +* **Added AVX512 lookup implementation for FIB.** + + Added a AVX512 lookup functions implementation into FIB library. + * **Added support to update subport bandwidth dynamically.** * Added new API ``rte_sched_port_subport_profile_add`` to add new diff --git a/lib/librte_fib/dir24_8.c b/lib/librte_fib/dir24_8.c index 87400fcf0f..bb3bc9753b 100644 --- a/lib/librte_fib/dir24_8.c +++ b/lib/librte_fib/dir24_8.c @@ -13,11 +13,18 @@ #include #include #include +#include #include #include #include "dir24_8.h" +#ifdef CC_DIR24_8_AVX512_SUPPORT + +#include "dir24_8_avx512.h" + +#endif /* CC_DIR24_8_AVX512_SUPPORT */ + #define DIR24_8_NAMESIZE 64 #define ROUNDUP(x, y) RTE_ALIGN_CEIL(x, (1 << (32 - y))) @@ -56,10 +63,37 @@ get_scalar_fn_inlined(enum rte_fib_dir24_8_nh_sz nh_sz) } } +static inline rte_fib_lookup_fn_t +get_vector_fn(enum rte_fib_dir24_8_nh_sz nh_sz) +{ +#ifdef CC_DIR24_8_AVX512_SUPPORT + if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) <= 0) || + (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) + return NULL; + + switch (nh_sz) { + case RTE_FIB_DIR24_8_1B: + return rte_dir24_8_vec_lookup_bulk_1b; + case RTE_FIB_DIR24_8_2B: + return rte_dir24_8_vec_lookup_bulk_2b; + case RTE_FIB_DIR24_8_4B: + return rte_dir24_8_vec_lookup_bulk_4b; + case RTE_FIB_DIR24_8_8B: + return rte_dir24_8_vec_lookup_bulk_8b; + default: + return NULL; + } +#else + RTE_SET_USED(nh_sz); +#endif + return NULL; +} + rte_fib_lookup_fn_t dir24_8_get_lookup_fn(void *p, enum rte_fib_lookup_type type) { enum rte_fib_dir24_8_nh_sz nh_sz; + rte_fib_lookup_fn_t ret_fn; struct dir24_8_tbl *dp = p; if (dp == NULL) @@ -74,6 +108,11 @@ dir24_8_get_lookup_fn(void *p, enum rte_fib_lookup_type type) return get_scalar_fn_inlined(nh_sz); case RTE_FIB_LOOKUP_DIR24_8_SCALAR_UNI: return dir24_8_lookup_bulk_uni; + case RTE_FIB_LOOKUP_DIR24_8_VECTOR_AVX512: + return get_vector_fn(nh_sz); + case RTE_FIB_LOOKUP_DEFAULT: + ret_fn = get_vector_fn(nh_sz); + return (ret_fn != NULL) ? ret_fn : get_scalar_fn(nh_sz); default: return NULL; } diff --git a/lib/librte_fib/dir24_8_avx512.c b/lib/librte_fib/dir24_8_avx512.c new file mode 100644 index 0000000000..43dba28cfb --- /dev/null +++ b/lib/librte_fib/dir24_8_avx512.c @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#include +#include + +#include "dir24_8.h" +#include "dir24_8_avx512.h" + +static __rte_always_inline void +dir24_8_vec_lookup_x16(void *p, const uint32_t *ips, + uint64_t *next_hops, int size) +{ + struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p; + __mmask16 msk_ext; + __mmask16 exp_msk = 0x5555; + __m512i ip_vec, idxes, res, bytes; + const __m512i zero = _mm512_set1_epi32(0); + const __m512i lsb = _mm512_set1_epi32(1); + const __m512i lsbyte_msk = _mm512_set1_epi32(0xff); + __m512i tmp1, tmp2, res_msk; + __m256i tmp256; + /* used to mask gather values if size is 1/2 (8/16 bit next hops) */ + if (size == sizeof(uint8_t)) + res_msk = _mm512_set1_epi32(UINT8_MAX); + else if (size == sizeof(uint16_t)) + res_msk = _mm512_set1_epi32(UINT16_MAX); + + ip_vec = _mm512_loadu_si512(ips); + /* mask 24 most significant bits */ + idxes = _mm512_srli_epi32(ip_vec, 8); + + /** + * lookup in tbl24 + * Put it inside branch to make compiler happy with -O0 + */ + if (size == sizeof(uint8_t)) { + res = _mm512_i32gather_epi32(idxes, (const int *)dp->tbl24, 1); + res = _mm512_and_epi32(res, res_msk); + } else if (size == sizeof(uint16_t)) { + res = _mm512_i32gather_epi32(idxes, (const int *)dp->tbl24, 2); + res = _mm512_and_epi32(res, res_msk); + } else + res = _mm512_i32gather_epi32(idxes, (const int *)dp->tbl24, 4); + + /* get extended entries indexes */ + msk_ext = _mm512_test_epi32_mask(res, lsb); + + if (msk_ext != 0) { + idxes = _mm512_srli_epi32(res, 1); + idxes = _mm512_slli_epi32(idxes, 8); + bytes = _mm512_and_epi32(ip_vec, lsbyte_msk); + idxes = _mm512_maskz_add_epi32(msk_ext, idxes, bytes); + if (size == sizeof(uint8_t)) { + idxes = _mm512_mask_i32gather_epi32(zero, msk_ext, + idxes, (const int *)dp->tbl8, 1); + idxes = _mm512_and_epi32(idxes, res_msk); + } else if (size == sizeof(uint16_t)) { + idxes = _mm512_mask_i32gather_epi32(zero, msk_ext, + idxes, (const int *)dp->tbl8, 2); + idxes = _mm512_and_epi32(idxes, res_msk); + } else + idxes = _mm512_mask_i32gather_epi32(zero, msk_ext, + idxes, (const int *)dp->tbl8, 4); + + res = _mm512_mask_blend_epi32(msk_ext, res, idxes); + } + + res = _mm512_srli_epi32(res, 1); + tmp1 = _mm512_maskz_expand_epi32(exp_msk, res); + tmp256 = _mm512_extracti32x8_epi32(res, 1); + tmp2 = _mm512_maskz_expand_epi32(exp_msk, + _mm512_castsi256_si512(tmp256)); + _mm512_storeu_si512(next_hops, tmp1); + _mm512_storeu_si512(next_hops + 8, tmp2); +} + +static __rte_always_inline void +dir24_8_vec_lookup_x8_8b(void *p, const uint32_t *ips, + uint64_t *next_hops) +{ + struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p; + const __m512i zero = _mm512_set1_epi32(0); + const __m512i lsbyte_msk = _mm512_set1_epi64(0xff); + const __m512i lsb = _mm512_set1_epi64(1); + __m512i res, idxes, bytes; + __m256i idxes_256, ip_vec; + __mmask8 msk_ext; + + ip_vec = _mm256_loadu_si256((const void *)ips); + /* mask 24 most significant bits */ + idxes_256 = _mm256_srli_epi32(ip_vec, 8); + + /* lookup in tbl24 */ + res = _mm512_i32gather_epi64(idxes_256, (const void *)dp->tbl24, 8); + + /* get extended entries indexes */ + msk_ext = _mm512_test_epi64_mask(res, lsb); + + if (msk_ext != 0) { + bytes = _mm512_cvtepi32_epi64(ip_vec); + idxes = _mm512_srli_epi64(res, 1); + idxes = _mm512_slli_epi64(idxes, 8); + bytes = _mm512_and_epi64(bytes, lsbyte_msk); + idxes = _mm512_maskz_add_epi64(msk_ext, idxes, bytes); + idxes = _mm512_mask_i64gather_epi64(zero, msk_ext, idxes, + (const void *)dp->tbl8, 8); + + res = _mm512_mask_blend_epi64(msk_ext, res, idxes); + } + + res = _mm512_srli_epi64(res, 1); + _mm512_storeu_si512(next_hops, res); +} + +void +rte_dir24_8_vec_lookup_bulk_1b(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n) +{ + uint32_t i; + for (i = 0; i < (n / 16); i++) + dir24_8_vec_lookup_x16(p, ips + i * 16, next_hops + i * 16, + sizeof(uint8_t)); + + dir24_8_lookup_bulk_1b(p, ips + i * 16, next_hops + i * 16, + n - i * 16); +} + +void +rte_dir24_8_vec_lookup_bulk_2b(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n) +{ + uint32_t i; + for (i = 0; i < (n / 16); i++) + dir24_8_vec_lookup_x16(p, ips + i * 16, next_hops + i * 16, + sizeof(uint16_t)); + + dir24_8_lookup_bulk_2b(p, ips + i * 16, next_hops + i * 16, + n - i * 16); +} + +void +rte_dir24_8_vec_lookup_bulk_4b(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n) +{ + uint32_t i; + for (i = 0; i < (n / 16); i++) + dir24_8_vec_lookup_x16(p, ips + i * 16, next_hops + i * 16, + sizeof(uint32_t)); + + dir24_8_lookup_bulk_4b(p, ips + i * 16, next_hops + i * 16, + n - i * 16); +} + +void +rte_dir24_8_vec_lookup_bulk_8b(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n) +{ + uint32_t i; + for (i = 0; i < (n / 8); i++) + dir24_8_vec_lookup_x8_8b(p, ips + i * 8, next_hops + i * 8); + + dir24_8_lookup_bulk_8b(p, ips + i * 8, next_hops + i * 8, n - i * 8); +} diff --git a/lib/librte_fib/dir24_8_avx512.h b/lib/librte_fib/dir24_8_avx512.h new file mode 100644 index 0000000000..1d3c2b9317 --- /dev/null +++ b/lib/librte_fib/dir24_8_avx512.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#ifndef _DIR248_AVX512_H_ +#define _DIR248_AVX512_H_ + +void +rte_dir24_8_vec_lookup_bulk_1b(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n); + +void +rte_dir24_8_vec_lookup_bulk_2b(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n); + +void +rte_dir24_8_vec_lookup_bulk_4b(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n); + +void +rte_dir24_8_vec_lookup_bulk_8b(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n); + +#endif /* _DIR248_AVX512_H_ */ diff --git a/lib/librte_fib/meson.build b/lib/librte_fib/meson.build index 771828fbed..87de5542eb 100644 --- a/lib/librte_fib/meson.build +++ b/lib/librte_fib/meson.build @@ -5,3 +5,37 @@ sources = files('rte_fib.c', 'rte_fib6.c', 'dir24_8.c', 'trie.c') headers = files('rte_fib.h', 'rte_fib6.h') deps += ['rib'] + +# compile AVX512 version if: +# we are building 64-bit binary AND binutils can generate proper code +if dpdk_conf.has('RTE_ARCH_X86_64') and binutils_ok.returncode() == 0 + # compile AVX512 version if either: + # a. we have AVX512F supported in minimum instruction set baseline + # b. it's not minimum instruction set, but supported by compiler + # + # in former case, just add avx512 C file to files list + # in latter case, compile c file to static lib, using correct + # compiler flags, and then have the .o file from static lib + # linked into main lib. + + # check if all required flags already enabled (variant a). + acl_avx512_flags = ['__AVX512F__','__AVX512DQ__'] + acl_avx512_on = true + foreach f:acl_avx512_flags + if cc.get_define(f, args: machine_args) == '' + acl_avx512_on = false + endif + endforeach + + if acl_avx512_on == true + cflags += ['-DCC_DIR24_8_AVX512_SUPPORT'] + sources += files('dir24_8_avx512.c') + elif cc.has_multi_arguments('-mavx512f', '-mavx512dq') + dir24_8_avx512_tmp = static_library('dir24_8_avx512_tmp', + 'dir24_8_avx512.c', + dependencies: static_rte_eal, + c_args: cflags + ['-mavx512f', '-mavx512dq']) + objs += dir24_8_avx512_tmp.extract_objects('dir24_8_avx512.c') + cflags += ['-DCC_DIR24_8_AVX512_SUPPORT'] + endif +endif diff --git a/lib/librte_fib/rte_fib.c b/lib/librte_fib/rte_fib.c index f2f343beeb..b354d4bfd0 100644 --- a/lib/librte_fib/rte_fib.c +++ b/lib/librte_fib/rte_fib.c @@ -108,7 +108,7 @@ init_dataplane(struct rte_fib *fib, __rte_unused int socket_id, if (fib->dp == NULL) return -rte_errno; fib->lookup = dir24_8_get_lookup_fn(fib->dp, - RTE_FIB_LOOKUP_DIR24_8_SCALAR_MACRO); + RTE_FIB_LOOKUP_DEFAULT); fib->modify = dir24_8_modify; return 0; default: diff --git a/lib/librte_fib/rte_fib.h b/lib/librte_fib/rte_fib.h index ddb992c201..fef0749525 100644 --- a/lib/librte_fib/rte_fib.h +++ b/lib/librte_fib/rte_fib.h @@ -59,6 +59,8 @@ enum rte_fib_dir24_8_nh_sz { /** Type of lookup function implementation */ enum rte_fib_lookup_type { + RTE_FIB_LOOKUP_DEFAULT, + /**< Selects the best implementation based on the max simd bitwidth */ RTE_FIB_LOOKUP_DIR24_8_SCALAR_MACRO, /**< Macro based lookup function */ RTE_FIB_LOOKUP_DIR24_8_SCALAR_INLINE, @@ -70,6 +72,8 @@ enum rte_fib_lookup_type { /**< * Unified lookup function for all next hop sizes */ + RTE_FIB_LOOKUP_DIR24_8_VECTOR_AVX512 + /**< Vector implementation using AVX512 */ }; /** FIB configuration structure */ -- 2.20.1