X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_lpm%2Frte_lpm.h;h=21550444d1493f36c3b6b47e4e1f438f488534ea;hb=763e450ae24d88b87f1eb86643a2897b141d9c07;hp=c2b429f956215663e6fab04f095c93e878d3717f;hpb=f1f7261838b38f57fb1b3e62d1f97164439d017d;p=dpdk.git diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h index c2b429f956..21550444d1 100644 --- a/lib/librte_lpm/rte_lpm.h +++ b/lib/librte_lpm/rte_lpm.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #ifndef _RTE_LPM_H_ @@ -45,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -93,12 +65,14 @@ extern "C" { #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN /** @internal Tbl24 entry structure. */ +__extension__ struct rte_lpm_tbl_entry_v20 { /** * Stores Next hop (tbl8 or tbl24 when valid_group is not set) or * a group index pointing to a tbl8 structure (tbl24 only, when * valid_group is set) */ + RTE_STD_C11 union { uint8_t next_hop; uint8_t group_idx; @@ -116,6 +90,7 @@ struct rte_lpm_tbl_entry_v20 { uint8_t depth :6; /**< Rule depth. */ }; +__extension__ struct rte_lpm_tbl_entry { /** * Stores Next hop (tbl8 or tbl24 when valid_group is not set) or @@ -137,6 +112,7 @@ struct rte_lpm_tbl_entry { }; #else +__extension__ struct rte_lpm_tbl_entry_v20 { uint8_t depth :6; uint8_t valid_group :1; @@ -147,6 +123,7 @@ struct rte_lpm_tbl_entry_v20 { }; }; +__extension__ struct rte_lpm_tbl_entry { uint32_t depth :6; uint32_t valid_group :1; @@ -193,7 +170,7 @@ struct rte_lpm_v20 { __rte_cache_aligned; /**< LPM tbl24 table. */ struct rte_lpm_tbl_entry_v20 tbl8[RTE_LPM_TBL8_NUM_ENTRIES] __rte_cache_aligned; /**< LPM tbl8 table. */ - struct rte_lpm_rule_v20 rules_tbl[0] \ + struct rte_lpm_rule_v20 rules_tbl[] __rte_cache_aligned; /**< LPM rules. */ }; @@ -475,103 +452,16 @@ rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips, * if lookup would fail. */ static inline void -rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint32_t hop[4], - uint32_t defv) -{ - __m128i i24; - rte_xmm_t i8; - uint32_t tbl[4]; - uint64_t idx, pt, pt2; - const uint32_t *ptbl; - - const __m128i mask8 = - _mm_set_epi32(UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX); - - /* - * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 2 LPM entries - * as one 64-bit value (0x0300000003000000). - */ - const uint64_t mask_xv = - ((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK | - (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32); - - /* - * RTE_LPM_LOOKUP_SUCCESS for 2 LPM entries - * as one 64-bit value (0x0100000001000000). - */ - const uint64_t mask_v = - ((uint64_t)RTE_LPM_LOOKUP_SUCCESS | - (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32); - - /* get 4 indexes for tbl24[]. */ - i24 = _mm_srli_epi32(ip, CHAR_BIT); - - /* extract values from tbl24[] */ - idx = _mm_cvtsi128_si64(i24); - i24 = _mm_srli_si128(i24, sizeof(uint64_t)); - - ptbl = (const uint32_t *)&lpm->tbl24[(uint32_t)idx]; - tbl[0] = *ptbl; - ptbl = (const uint32_t *)&lpm->tbl24[idx >> 32]; - tbl[1] = *ptbl; - - idx = _mm_cvtsi128_si64(i24); - - ptbl = (const uint32_t *)&lpm->tbl24[(uint32_t)idx]; - tbl[2] = *ptbl; - ptbl = (const uint32_t *)&lpm->tbl24[idx >> 32]; - tbl[3] = *ptbl; - - /* get 4 indexes for tbl8[]. */ - i8.x = _mm_and_si128(ip, mask8); - - pt = (uint64_t)tbl[0] | - (uint64_t)tbl[1] << 32; - pt2 = (uint64_t)tbl[2] | - (uint64_t)tbl[3] << 32; - - /* search successfully finished for all 4 IP addresses. */ - if (likely((pt & mask_xv) == mask_v) && - likely((pt2 & mask_xv) == mask_v)) { - *(uint64_t *)hop = pt & RTE_LPM_MASKX4_RES; - *(uint64_t *)(hop + 2) = pt2 & RTE_LPM_MASKX4_RES; - return; - } +rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], + uint32_t defv); - if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == - RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { - i8.u32[0] = i8.u32[0] + - (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]]; - tbl[0] = *ptbl; - } - if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == - RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { - i8.u32[1] = i8.u32[1] + - (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]]; - tbl[1] = *ptbl; - } - if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == - RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { - i8.u32[2] = i8.u32[2] + - (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]]; - tbl[2] = *ptbl; - } - if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == - RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { - i8.u32[3] = i8.u32[3] + - (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]]; - tbl[3] = *ptbl; - } - - hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[0] & 0x00FFFFFF : defv; - hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[1] & 0x00FFFFFF : defv; - hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[2] & 0x00FFFFFF : defv; - hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[3] & 0x00FFFFFF : defv; -} +#if defined(RTE_ARCH_ARM) || defined(RTE_ARCH_ARM64) +#include "rte_lpm_neon.h" +#elif defined(RTE_ARCH_PPC_64) +#include "rte_lpm_altivec.h" +#else +#include "rte_lpm_sse.h" +#endif #ifdef __cplusplus }