X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fl3fwd%2Fl3fwd_em_hlm.h;h=278707c18c5071ed167e200f54b5bd6fc59cf491;hb=ded0d2c0aaca537e282f3ab9b0691148b87c1c26;hp=520672d51d3528e138f0652484a748a54375c44c;hpb=5df1c51033506513a2e7a318bb2aec641ff0b53a;p=dpdk.git diff --git a/examples/l3fwd/l3fwd_em_hlm.h b/examples/l3fwd/l3fwd_em_hlm.h index 520672d51d..278707c18c 100644 --- a/examples/l3fwd/l3fwd_em_hlm.h +++ b/examples/l3fwd/l3fwd_em_hlm.h @@ -1,35 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * Copyright(c) 2017, Linaro Limited - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2018 Intel Corporation. + * Copyright(c) 2017-2018 Linaro Limited. */ #ifndef __L3FWD_EM_HLM_H__ @@ -38,7 +9,7 @@ #if defined RTE_ARCH_X86 #include "l3fwd_sse.h" #include "l3fwd_em_hlm_sse.h" -#elif defined RTE_MACHINE_CPUFLAG_NEON +#elif defined __ARM_NEON #include "l3fwd_neon.h" #include "l3fwd_em_hlm_neon.h" #endif @@ -52,7 +23,7 @@ static __rte_always_inline void em_get_dst_port_ipv4xN(struct lcore_conf *qconf, struct rte_mbuf *m[], - uint8_t portid, uint16_t dst_port[]) + uint16_t portid, uint16_t dst_port[]) { int i; int32_t ret[EM_HASH_LOOKUP_COUNT]; @@ -68,7 +39,7 @@ em_get_dst_port_ipv4xN(struct lcore_conf *qconf, struct rte_mbuf *m[], EM_HASH_LOOKUP_COUNT, ret); for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) { - dst_port[i] = (uint8_t) ((ret[i] < 0) ? + dst_port[i] = ((ret[i] < 0) ? portid : ipv4_l3fwd_out_if[ret[i]]); if (dst_port[i] >= RTE_MAX_ETHPORTS || @@ -79,7 +50,7 @@ em_get_dst_port_ipv4xN(struct lcore_conf *qconf, struct rte_mbuf *m[], static __rte_always_inline void em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[], - uint8_t portid, uint16_t dst_port[]) + uint16_t portid, uint16_t dst_port[]) { int i; int32_t ret[EM_HASH_LOOKUP_COUNT]; @@ -95,7 +66,7 @@ em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[], EM_HASH_LOOKUP_COUNT, ret); for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) { - dst_port[i] = (uint8_t) ((ret[i] < 0) ? + dst_port[i] = ((ret[i] < 0) ? portid : ipv6_l3fwd_out_if[ret[i]]); if (dst_port[i] >= RTE_MAX_ETHPORTS || @@ -104,13 +75,67 @@ em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[], } } +static __rte_always_inline void +em_get_dst_port_ipv4xN_events(struct lcore_conf *qconf, struct rte_mbuf *m[], + uint16_t dst_port[]) +{ + int i; + int32_t ret[EM_HASH_LOOKUP_COUNT]; + union ipv4_5tuple_host key[EM_HASH_LOOKUP_COUNT]; + const void *key_array[EM_HASH_LOOKUP_COUNT]; + + for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) { + get_ipv4_5tuple(m[i], mask0.x, &key[i]); + key_array[i] = &key[i]; + } + + rte_hash_lookup_bulk(qconf->ipv4_lookup_struct, &key_array[0], + EM_HASH_LOOKUP_COUNT, ret); + + for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) { + dst_port[i] = ((ret[i] < 0) ? + m[i]->port : ipv4_l3fwd_out_if[ret[i]]); + + if (dst_port[i] >= RTE_MAX_ETHPORTS || + (enabled_port_mask & 1 << dst_port[i]) == 0) + dst_port[i] = m[i]->port; + } +} + +static __rte_always_inline void +em_get_dst_port_ipv6xN_events(struct lcore_conf *qconf, struct rte_mbuf *m[], + uint16_t dst_port[]) +{ + int i; + int32_t ret[EM_HASH_LOOKUP_COUNT]; + union ipv6_5tuple_host key[EM_HASH_LOOKUP_COUNT]; + const void *key_array[EM_HASH_LOOKUP_COUNT]; + + for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) { + get_ipv6_5tuple(m[i], mask1.x, mask2.x, &key[i]); + key_array[i] = &key[i]; + } + + rte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0], + EM_HASH_LOOKUP_COUNT, ret); + + for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) { + dst_port[i] = ((ret[i] < 0) ? + m[i]->port : ipv6_l3fwd_out_if[ret[i]]); + + if (dst_port[i] >= RTE_MAX_ETHPORTS || + (enabled_port_mask & 1 << dst_port[i]) == 0) + dst_port[i] = m[i]->port; + } +} + static __rte_always_inline uint16_t em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt, - uint8_t portid) + uint16_t portid) { - uint8_t next_hop; - struct ipv4_hdr *ipv4_hdr; - struct ipv6_hdr *ipv6_hdr; + uint16_t next_hop; + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; uint32_t tcp_or_udp; uint32_t l3_ptypes; @@ -120,8 +145,8 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt, if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) { /* Handle IPv4 headers.*/ - ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *, - sizeof(struct ether_hdr)); + ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *, + sizeof(struct rte_ether_hdr)); next_hop = em_get_ipv4_dst_port(ipv4_hdr, portid, qconf->ipv4_lookup_struct); @@ -135,8 +160,8 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt, } else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) { /* Handle IPv6 headers.*/ - ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv6_hdr *, - sizeof(struct ether_hdr)); + ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv6_hdr *, + sizeof(struct rte_ether_hdr)); next_hop = em_get_ipv6_dst_port(ipv6_hdr, portid, qconf->ipv6_lookup_struct); @@ -158,7 +183,7 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt, */ static inline void l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, - uint8_t portid, struct lcore_conf *qconf) + uint16_t portid, struct lcore_conf *qconf) { int32_t i, j, pos; uint16_t dst_port[MAX_PKT_BURST]; @@ -171,7 +196,7 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, for (j = 0; j < EM_HASH_LOOKUP_COUNT && j < nb_rx; j++) { rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j], - struct ether_hdr *) + 1); + struct rte_ether_hdr *) + 1); } for (j = 0; j < n; j += EM_HASH_LOOKUP_COUNT) { @@ -188,8 +213,9 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, for (i = 0, pos = j + EM_HASH_LOOKUP_COUNT; i < EM_HASH_LOOKUP_COUNT && pos < nb_rx; i++, pos++) { - rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[pos], - struct ether_hdr *) + 1); + rte_prefetch0(rte_pktmbuf_mtod( + pkts_burst[pos], + struct rte_ether_hdr *) + 1); } if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV4)) { @@ -215,4 +241,81 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, send_packets_multi(qconf, pkts_burst, dst_port, nb_rx); } + +/* + * Buffer optimized handling of events, invoked + * from main_loop. + */ +static inline void +l3fwd_em_process_events(int nb_rx, struct rte_event **ev, + struct lcore_conf *qconf) +{ + int32_t i, j, pos; + uint16_t dst_port[MAX_PKT_BURST]; + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + + /* + * Send nb_rx - nb_rx % EM_HASH_LOOKUP_COUNT packets + * in groups of EM_HASH_LOOKUP_COUNT. + */ + int32_t n = RTE_ALIGN_FLOOR(nb_rx, EM_HASH_LOOKUP_COUNT); + + for (j = 0; j < EM_HASH_LOOKUP_COUNT && j < nb_rx; j++) { + pkts_burst[j] = ev[j]->mbuf; + rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j], + struct rte_ether_hdr *) + 1); + } + + for (j = 0; j < n; j += EM_HASH_LOOKUP_COUNT) { + + uint32_t pkt_type = RTE_PTYPE_L3_MASK | + RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP; + uint32_t l3_type, tcp_or_udp; + + for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) + pkt_type &= pkts_burst[j + i]->packet_type; + + l3_type = pkt_type & RTE_PTYPE_L3_MASK; + tcp_or_udp = pkt_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP); + + for (i = 0, pos = j + EM_HASH_LOOKUP_COUNT; + i < EM_HASH_LOOKUP_COUNT && pos < nb_rx; i++, pos++) { + rte_prefetch0(rte_pktmbuf_mtod( + pkts_burst[pos], + struct rte_ether_hdr *) + 1); + } + + if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV4)) { + + em_get_dst_port_ipv4xN_events(qconf, &pkts_burst[j], + &dst_port[j]); + + } else if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV6)) { + + em_get_dst_port_ipv6xN_events(qconf, &pkts_burst[j], + &dst_port[j]); + + } else { + for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) { + pkts_burst[j + i]->port = em_get_dst_port(qconf, + pkts_burst[j + i], + pkts_burst[j + i]->port); + process_packet(pkts_burst[j + i], + &pkts_burst[j + i]->port); + } + continue; + } + processx4_step3(&pkts_burst[j], &dst_port[j]); + + for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) + pkts_burst[j + i]->port = dst_port[j + i]; + + } + + for (; j < nb_rx; j++) { + pkts_burst[j]->port = em_get_dst_port(qconf, pkts_burst[j], + pkts_burst[j]->port); + process_packet(pkts_burst[j], &pkts_burst[j]->port); + } +} #endif /* __L3FWD_EM_HLM_H__ */