From: Ashwin Sekhar T K Date: Thu, 27 Apr 2017 12:44:18 +0000 (-0700) Subject: efd: support lookup using NEON intrinsics X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=3f98dd87d2262430de2bd992a790e86b833e9191;p=dpdk.git efd: support lookup using NEON intrinsics * Added file lib/librte_efd/rte_efd_arm64.h to hold arm64 specific definitions * Verified the changes with efd_autotest unit test case Signed-off-by: Ashwin Sekhar T K --- diff --git a/MAINTAINERS b/MAINTAINERS index 00351ff9a0..c27359b4f2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -147,6 +147,7 @@ F: lib/librte_eal/common/include/arch/arm/*_64.h F: lib/librte_acl/acl_run_neon.* F: lib/librte_lpm/rte_lpm_neon.h F: lib/librte_hash/rte*_arm64.h +F: lib/librte_efd/rte*_arm64.h F: drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c F: drivers/net/i40e/i40e_rxtx_vec_neon.c F: drivers/net/virtio/virtio_rxtx_simple_neon.c diff --git a/lib/librte_efd/rte_efd.c b/lib/librte_efd/rte_efd.c index f601d62e32..4d9a088769 100644 --- a/lib/librte_efd/rte_efd.c +++ b/lib/librte_efd/rte_efd.c @@ -53,6 +53,8 @@ #include "rte_efd.h" #if defined(RTE_ARCH_X86) #include "rte_efd_x86.h" +#elif defined(RTE_ARCH_ARM64) +#include "rte_efd_arm64.h" #endif #define EFD_KEY(key_idx, table) (table->keys + ((key_idx) * table->key_len)) @@ -103,6 +105,7 @@ allocated memory enum efd_lookup_internal_function { EFD_LOOKUP_SCALAR = 0, EFD_LOOKUP_AVX2, + EFD_LOOKUP_NEON, EFD_LOOKUP_NUM }; @@ -673,6 +676,16 @@ rte_efd_create(const char *name, uint32_t max_num_rules, uint32_t key_len, if (RTE_EFD_VALUE_NUM_BITS > 3 && rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) table->lookup_fn = EFD_LOOKUP_AVX2; else +#endif +#if defined(RTE_ARCH_ARM64) + /* + * For less than or equal to 16 bits, scalar function performs better + * than vectorised version + */ + if (RTE_EFD_VALUE_NUM_BITS > 16 && + rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) + table->lookup_fn = EFD_LOOKUP_NEON; + else #endif table->lookup_fn = EFD_LOOKUP_SCALAR; @@ -1271,6 +1284,15 @@ efd_lookup_internal(const struct efd_online_group_entry * const group, group->lookup_table, hash_val_a, hash_val_b); + break; +#endif +#if defined(RTE_ARCH_ARM64) + case EFD_LOOKUP_NEON: + return efd_lookup_internal_neon(group->hash_idx, + group->lookup_table, + hash_val_a, + hash_val_b); + break; #endif case EFD_LOOKUP_SCALAR: /* Fall-through */ diff --git a/lib/librte_efd/rte_efd_arm64.h b/lib/librte_efd/rte_efd_arm64.h new file mode 100644 index 0000000000..cc9341130e --- /dev/null +++ b/lib/librte_efd/rte_efd_arm64.h @@ -0,0 +1,76 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium networks Ltd. 2017. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * rte_efd_arm64.h + * This file holds all arm64 specific EFD functions + */ + +#ifndef __RTE_EFD_ARM64_H__ +#define __RTE_EFD_ARM64_H__ + +#include + +static inline efd_value_t +efd_lookup_internal_neon(const efd_hashfunc_t *group_hash_idx, + const efd_lookuptbl_t *group_lookup_table, + const uint32_t hash_val_a, const uint32_t hash_val_b) +{ + efd_value_t value = 0; + uint32_t i = 0; + uint32x4_t vhash_val_a = vmovq_n_u32(hash_val_a); + uint32x4_t vhash_val_b = vmovq_n_u32(hash_val_b); + int32x4_t vshift = {0, 1, 2, 3}; + uint32x4_t vmask = vdupq_n_u32(0x1); + int32x4_t vincr = vdupq_n_s32(4); + + for (; i < RTE_EFD_VALUE_NUM_BITS; i += 4) { + uint32x4_t vhash_idx = vshll_n_u16( + vld1_u16((uint16_t const *)&group_hash_idx[i]), 0); + uint32x4_t vlookup_table = vshll_n_u16( + vld1_u16((uint16_t const *)&group_lookup_table[i]), 0); + uint32x4_t vhash = vaddq_u32(vhash_val_a, + vmulq_u32(vhash_idx, vhash_val_b)); + int32x4_t vbucket_idx = vnegq_s32(vreinterpretq_s32_u32( + vshrq_n_u32(vhash, EFD_LOOKUPTBL_SHIFT))); + uint32x4_t vresult = vshlq_u32(vlookup_table, vbucket_idx); + + vresult = vandq_u32(vresult, vmask); + vresult = vshlq_u32(vresult, vshift); + value |= vaddvq_u32(vresult); + vshift = vaddq_s32(vshift, vincr); + } + + return value; +} + +#endif /* __RTE_EFD_ARM64_H__ */