4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
5 * Copyright(c) 2017, Linaro Limited
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #ifndef __L3FWD_EM_HLM_H__
36 #define __L3FWD_EM_HLM_H__
38 #if defined RTE_ARCH_X86
39 #include "l3fwd_sse.h"
40 #include "l3fwd_em_hlm_sse.h"
41 #elif defined RTE_MACHINE_CPUFLAG_NEON
42 #include "l3fwd_neon.h"
43 #include "l3fwd_em_hlm_neon.h"
47 #define EM_HASH_LOOKUP_COUNT 16
49 #define EM_HASH_LOOKUP_COUNT 8
53 static __rte_always_inline void
54 em_get_dst_port_ipv4xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
55 uint8_t portid, uint16_t dst_port[])
58 int32_t ret[EM_HASH_LOOKUP_COUNT];
59 union ipv4_5tuple_host key[EM_HASH_LOOKUP_COUNT];
60 const void *key_array[EM_HASH_LOOKUP_COUNT];
62 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
63 get_ipv4_5tuple(m[i], mask0.x, &key[i]);
64 key_array[i] = &key[i];
67 rte_hash_lookup_bulk(qconf->ipv4_lookup_struct, &key_array[0],
68 EM_HASH_LOOKUP_COUNT, ret);
70 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
71 dst_port[i] = (uint8_t) ((ret[i] < 0) ?
72 portid : ipv4_l3fwd_out_if[ret[i]]);
74 if (dst_port[i] >= RTE_MAX_ETHPORTS ||
75 (enabled_port_mask & 1 << dst_port[i]) == 0)
80 static __rte_always_inline void
81 em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
82 uint8_t portid, uint16_t dst_port[])
85 int32_t ret[EM_HASH_LOOKUP_COUNT];
86 union ipv6_5tuple_host key[EM_HASH_LOOKUP_COUNT];
87 const void *key_array[EM_HASH_LOOKUP_COUNT];
89 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
90 get_ipv6_5tuple(m[i], mask1.x, mask2.x, &key[i]);
91 key_array[i] = &key[i];
94 rte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0],
95 EM_HASH_LOOKUP_COUNT, ret);
97 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
98 dst_port[i] = (uint8_t) ((ret[i] < 0) ?
99 portid : ipv6_l3fwd_out_if[ret[i]]);
101 if (dst_port[i] >= RTE_MAX_ETHPORTS ||
102 (enabled_port_mask & 1 << dst_port[i]) == 0)
103 dst_port[i] = portid;
107 static __rte_always_inline uint16_t
108 em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
112 struct ipv4_hdr *ipv4_hdr;
113 struct ipv6_hdr *ipv6_hdr;
117 tcp_or_udp = pkt->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
118 l3_ptypes = pkt->packet_type & RTE_PTYPE_L3_MASK;
120 if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
122 /* Handle IPv4 headers.*/
123 ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *,
124 sizeof(struct ether_hdr));
126 next_hop = em_get_ipv4_dst_port(ipv4_hdr, portid,
127 qconf->ipv4_lookup_struct);
129 if (next_hop >= RTE_MAX_ETHPORTS ||
130 (enabled_port_mask & 1 << next_hop) == 0)
135 } else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
137 /* Handle IPv6 headers.*/
138 ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv6_hdr *,
139 sizeof(struct ether_hdr));
141 next_hop = em_get_ipv6_dst_port(ipv6_hdr, portid,
142 qconf->ipv6_lookup_struct);
144 if (next_hop >= RTE_MAX_ETHPORTS ||
145 (enabled_port_mask & 1 << next_hop) == 0)
156 * Buffer optimized handling of packets, invoked
160 l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
161 uint8_t portid, struct lcore_conf *qconf)
164 uint16_t dst_port[MAX_PKT_BURST];
167 * Send nb_rx - nb_rx % EM_HASH_LOOKUP_COUNT packets
168 * in groups of EM_HASH_LOOKUP_COUNT.
170 int32_t n = RTE_ALIGN_FLOOR(nb_rx, EM_HASH_LOOKUP_COUNT);
172 for (j = 0; j < EM_HASH_LOOKUP_COUNT && j < nb_rx; j++) {
173 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
174 struct ether_hdr *) + 1);
177 for (j = 0; j < n; j += EM_HASH_LOOKUP_COUNT) {
179 uint32_t pkt_type = RTE_PTYPE_L3_MASK |
180 RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP;
181 uint32_t l3_type, tcp_or_udp;
183 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)
184 pkt_type &= pkts_burst[j + i]->packet_type;
186 l3_type = pkt_type & RTE_PTYPE_L3_MASK;
187 tcp_or_udp = pkt_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
189 for (i = 0, pos = j + EM_HASH_LOOKUP_COUNT;
190 i < EM_HASH_LOOKUP_COUNT && pos < nb_rx; i++, pos++) {
191 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[pos],
192 struct ether_hdr *) + 1);
195 if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV4)) {
197 em_get_dst_port_ipv4xN(qconf, &pkts_burst[j], portid,
200 } else if (tcp_or_udp && (l3_type == RTE_PTYPE_L3_IPV6)) {
202 em_get_dst_port_ipv6xN(qconf, &pkts_burst[j], portid,
206 for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)
207 dst_port[j + i] = em_get_dst_port(qconf,
208 pkts_burst[j + i], portid);
212 for (; j < nb_rx; j++)
213 dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid);
215 send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
218 #endif /* __L3FWD_EM_HLM_H__ */