#include <rte_malloc.h>
#include <rte_errno.h>
#include <rte_memory.h>
+#include <rte_vect.h>
#include <rte_rib.h>
#include <rte_fib.h>
#include "dir24_8.h"
+#ifdef CC_DIR24_8_AVX512_SUPPORT
+
+#include "dir24_8_avx512.h"
+
+#endif /* CC_DIR24_8_AVX512_SUPPORT */
+
#define DIR24_8_NAMESIZE 64
#define ROUNDUP(x, y) RTE_ALIGN_CEIL(x, (1 << (32 - y)))
}
}
+static inline rte_fib_lookup_fn_t
+get_vector_fn(enum rte_fib_dir24_8_nh_sz nh_sz)
+{
+#ifdef CC_DIR24_8_AVX512_SUPPORT
+ if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) <= 0) ||
+ (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512))
+ return NULL;
+
+ switch (nh_sz) {
+ case RTE_FIB_DIR24_8_1B:
+ return rte_dir24_8_vec_lookup_bulk_1b;
+ case RTE_FIB_DIR24_8_2B:
+ return rte_dir24_8_vec_lookup_bulk_2b;
+ case RTE_FIB_DIR24_8_4B:
+ return rte_dir24_8_vec_lookup_bulk_4b;
+ case RTE_FIB_DIR24_8_8B:
+ return rte_dir24_8_vec_lookup_bulk_8b;
+ default:
+ return NULL;
+ }
+#else
+ RTE_SET_USED(nh_sz);
+#endif
+ return NULL;
+}
+
rte_fib_lookup_fn_t
dir24_8_get_lookup_fn(void *p, enum rte_fib_lookup_type type)
{
enum rte_fib_dir24_8_nh_sz nh_sz;
+ rte_fib_lookup_fn_t ret_fn;
struct dir24_8_tbl *dp = p;
if (dp == NULL)
return get_scalar_fn_inlined(nh_sz);
case RTE_FIB_LOOKUP_DIR24_8_SCALAR_UNI:
return dir24_8_lookup_bulk_uni;
+ case RTE_FIB_LOOKUP_DIR24_8_VECTOR_AVX512:
+ return get_vector_fn(nh_sz);
+ case RTE_FIB_LOOKUP_DEFAULT:
+ ret_fn = get_vector_fn(nh_sz);
+ return (ret_fn != NULL) ? ret_fn : get_scalar_fn(nh_sz);
default:
return NULL;
}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_vect.h>
+#include <rte_fib.h>
+
+#include "dir24_8.h"
+#include "dir24_8_avx512.h"
+
+static __rte_always_inline void
+dir24_8_vec_lookup_x16(void *p, const uint32_t *ips,
+ uint64_t *next_hops, int size)
+{
+ struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;
+ __mmask16 msk_ext;
+ __mmask16 exp_msk = 0x5555;
+ __m512i ip_vec, idxes, res, bytes;
+ const __m512i zero = _mm512_set1_epi32(0);
+ const __m512i lsb = _mm512_set1_epi32(1);
+ const __m512i lsbyte_msk = _mm512_set1_epi32(0xff);
+ __m512i tmp1, tmp2, res_msk;
+ __m256i tmp256;
+ /* used to mask gather values if size is 1/2 (8/16 bit next hops) */
+ if (size == sizeof(uint8_t))
+ res_msk = _mm512_set1_epi32(UINT8_MAX);
+ else if (size == sizeof(uint16_t))
+ res_msk = _mm512_set1_epi32(UINT16_MAX);
+
+ ip_vec = _mm512_loadu_si512(ips);
+ /* mask 24 most significant bits */
+ idxes = _mm512_srli_epi32(ip_vec, 8);
+
+ /**
+ * lookup in tbl24
+ * Put it inside branch to make compiler happy with -O0
+ */
+ if (size == sizeof(uint8_t)) {
+ res = _mm512_i32gather_epi32(idxes, (const int *)dp->tbl24, 1);
+ res = _mm512_and_epi32(res, res_msk);
+ } else if (size == sizeof(uint16_t)) {
+ res = _mm512_i32gather_epi32(idxes, (const int *)dp->tbl24, 2);
+ res = _mm512_and_epi32(res, res_msk);
+ } else
+ res = _mm512_i32gather_epi32(idxes, (const int *)dp->tbl24, 4);
+
+ /* get extended entries indexes */
+ msk_ext = _mm512_test_epi32_mask(res, lsb);
+
+ if (msk_ext != 0) {
+ idxes = _mm512_srli_epi32(res, 1);
+ idxes = _mm512_slli_epi32(idxes, 8);
+ bytes = _mm512_and_epi32(ip_vec, lsbyte_msk);
+ idxes = _mm512_maskz_add_epi32(msk_ext, idxes, bytes);
+ if (size == sizeof(uint8_t)) {
+ idxes = _mm512_mask_i32gather_epi32(zero, msk_ext,
+ idxes, (const int *)dp->tbl8, 1);
+ idxes = _mm512_and_epi32(idxes, res_msk);
+ } else if (size == sizeof(uint16_t)) {
+ idxes = _mm512_mask_i32gather_epi32(zero, msk_ext,
+ idxes, (const int *)dp->tbl8, 2);
+ idxes = _mm512_and_epi32(idxes, res_msk);
+ } else
+ idxes = _mm512_mask_i32gather_epi32(zero, msk_ext,
+ idxes, (const int *)dp->tbl8, 4);
+
+ res = _mm512_mask_blend_epi32(msk_ext, res, idxes);
+ }
+
+ res = _mm512_srli_epi32(res, 1);
+ tmp1 = _mm512_maskz_expand_epi32(exp_msk, res);
+ tmp256 = _mm512_extracti32x8_epi32(res, 1);
+ tmp2 = _mm512_maskz_expand_epi32(exp_msk,
+ _mm512_castsi256_si512(tmp256));
+ _mm512_storeu_si512(next_hops, tmp1);
+ _mm512_storeu_si512(next_hops + 8, tmp2);
+}
+
+static __rte_always_inline void
+dir24_8_vec_lookup_x8_8b(void *p, const uint32_t *ips,
+ uint64_t *next_hops)
+{
+ struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;
+ const __m512i zero = _mm512_set1_epi32(0);
+ const __m512i lsbyte_msk = _mm512_set1_epi64(0xff);
+ const __m512i lsb = _mm512_set1_epi64(1);
+ __m512i res, idxes, bytes;
+ __m256i idxes_256, ip_vec;
+ __mmask8 msk_ext;
+
+ ip_vec = _mm256_loadu_si256((const void *)ips);
+ /* mask 24 most significant bits */
+ idxes_256 = _mm256_srli_epi32(ip_vec, 8);
+
+ /* lookup in tbl24 */
+ res = _mm512_i32gather_epi64(idxes_256, (const void *)dp->tbl24, 8);
+
+ /* get extended entries indexes */
+ msk_ext = _mm512_test_epi64_mask(res, lsb);
+
+ if (msk_ext != 0) {
+ bytes = _mm512_cvtepi32_epi64(ip_vec);
+ idxes = _mm512_srli_epi64(res, 1);
+ idxes = _mm512_slli_epi64(idxes, 8);
+ bytes = _mm512_and_epi64(bytes, lsbyte_msk);
+ idxes = _mm512_maskz_add_epi64(msk_ext, idxes, bytes);
+ idxes = _mm512_mask_i64gather_epi64(zero, msk_ext, idxes,
+ (const void *)dp->tbl8, 8);
+
+ res = _mm512_mask_blend_epi64(msk_ext, res, idxes);
+ }
+
+ res = _mm512_srli_epi64(res, 1);
+ _mm512_storeu_si512(next_hops, res);
+}
+
+void
+rte_dir24_8_vec_lookup_bulk_1b(void *p, const uint32_t *ips,
+ uint64_t *next_hops, const unsigned int n)
+{
+ uint32_t i;
+ for (i = 0; i < (n / 16); i++)
+ dir24_8_vec_lookup_x16(p, ips + i * 16, next_hops + i * 16,
+ sizeof(uint8_t));
+
+ dir24_8_lookup_bulk_1b(p, ips + i * 16, next_hops + i * 16,
+ n - i * 16);
+}
+
+void
+rte_dir24_8_vec_lookup_bulk_2b(void *p, const uint32_t *ips,
+ uint64_t *next_hops, const unsigned int n)
+{
+ uint32_t i;
+ for (i = 0; i < (n / 16); i++)
+ dir24_8_vec_lookup_x16(p, ips + i * 16, next_hops + i * 16,
+ sizeof(uint16_t));
+
+ dir24_8_lookup_bulk_2b(p, ips + i * 16, next_hops + i * 16,
+ n - i * 16);
+}
+
+void
+rte_dir24_8_vec_lookup_bulk_4b(void *p, const uint32_t *ips,
+ uint64_t *next_hops, const unsigned int n)
+{
+ uint32_t i;
+ for (i = 0; i < (n / 16); i++)
+ dir24_8_vec_lookup_x16(p, ips + i * 16, next_hops + i * 16,
+ sizeof(uint32_t));
+
+ dir24_8_lookup_bulk_4b(p, ips + i * 16, next_hops + i * 16,
+ n - i * 16);
+}
+
+void
+rte_dir24_8_vec_lookup_bulk_8b(void *p, const uint32_t *ips,
+ uint64_t *next_hops, const unsigned int n)
+{
+ uint32_t i;
+ for (i = 0; i < (n / 8); i++)
+ dir24_8_vec_lookup_x8_8b(p, ips + i * 8, next_hops + i * 8);
+
+ dir24_8_lookup_bulk_8b(p, ips + i * 8, next_hops + i * 8, n - i * 8);
+}
sources = files('rte_fib.c', 'rte_fib6.c', 'dir24_8.c', 'trie.c')
headers = files('rte_fib.h', 'rte_fib6.h')
deps += ['rib']
+
+# compile AVX512 version if:
+# we are building 64-bit binary AND binutils can generate proper code
+if dpdk_conf.has('RTE_ARCH_X86_64') and binutils_ok.returncode() == 0
+ # compile AVX512 version if either:
+ # a. we have AVX512F supported in minimum instruction set baseline
+ # b. it's not minimum instruction set, but supported by compiler
+ #
+ # in former case, just add avx512 C file to files list
+ # in latter case, compile c file to static lib, using correct
+ # compiler flags, and then have the .o file from static lib
+ # linked into main lib.
+
+ # check if all required flags already enabled (variant a).
+ acl_avx512_flags = ['__AVX512F__','__AVX512DQ__']
+ acl_avx512_on = true
+ foreach f:acl_avx512_flags
+ if cc.get_define(f, args: machine_args) == ''
+ acl_avx512_on = false
+ endif
+ endforeach
+
+ if acl_avx512_on == true
+ cflags += ['-DCC_DIR24_8_AVX512_SUPPORT']
+ sources += files('dir24_8_avx512.c')
+ elif cc.has_multi_arguments('-mavx512f', '-mavx512dq')
+ dir24_8_avx512_tmp = static_library('dir24_8_avx512_tmp',
+ 'dir24_8_avx512.c',
+ dependencies: static_rte_eal,
+ c_args: cflags + ['-mavx512f', '-mavx512dq'])
+ objs += dir24_8_avx512_tmp.extract_objects('dir24_8_avx512.c')
+ cflags += ['-DCC_DIR24_8_AVX512_SUPPORT']
+ endif
+endif