4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef __L3FWD_EM_HLM_SSE_H__
35 #define __L3FWD_EM_HLM_SSE_H__
39 * This is an optional implementation of packet classification in Exact-Match
40 * path using rte_hash_lookup_multi method from previous implementation.
41 * While sequential classification seems to be faster, it's disabled by default
42 * and can be enabled with HASH_LOOKUP_MULTI global define in compilation time.
45 #include "l3fwd_sse.h"
48 em_get_dst_port_ipv4x8(struct lcore_conf *qconf, struct rte_mbuf *m[8],
49 uint8_t portid, uint32_t dst_port[8])
52 union ipv4_5tuple_host key[8];
55 data[0] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[0], __m128i *,
56 sizeof(struct ether_hdr) +
57 offsetof(struct ipv4_hdr, time_to_live)));
58 data[1] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[1], __m128i *,
59 sizeof(struct ether_hdr) +
60 offsetof(struct ipv4_hdr, time_to_live)));
61 data[2] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[2], __m128i *,
62 sizeof(struct ether_hdr) +
63 offsetof(struct ipv4_hdr, time_to_live)));
64 data[3] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[3], __m128i *,
65 sizeof(struct ether_hdr) +
66 offsetof(struct ipv4_hdr, time_to_live)));
67 data[4] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[4], __m128i *,
68 sizeof(struct ether_hdr) +
69 offsetof(struct ipv4_hdr, time_to_live)));
70 data[5] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[5], __m128i *,
71 sizeof(struct ether_hdr) +
72 offsetof(struct ipv4_hdr, time_to_live)));
73 data[6] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[6], __m128i *,
74 sizeof(struct ether_hdr) +
75 offsetof(struct ipv4_hdr, time_to_live)));
76 data[7] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[7], __m128i *,
77 sizeof(struct ether_hdr) +
78 offsetof(struct ipv4_hdr, time_to_live)));
80 key[0].xmm = _mm_and_si128(data[0], mask0.x);
81 key[1].xmm = _mm_and_si128(data[1], mask0.x);
82 key[2].xmm = _mm_and_si128(data[2], mask0.x);
83 key[3].xmm = _mm_and_si128(data[3], mask0.x);
84 key[4].xmm = _mm_and_si128(data[4], mask0.x);
85 key[5].xmm = _mm_and_si128(data[5], mask0.x);
86 key[6].xmm = _mm_and_si128(data[6], mask0.x);
87 key[7].xmm = _mm_and_si128(data[7], mask0.x);
89 const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
90 &key[4], &key[5], &key[6], &key[7]};
92 rte_hash_lookup_multi(qconf->ipv4_lookup_struct, &key_array[0], 8, ret);
94 dst_port[0] = (uint8_t) ((ret[0] < 0) ?
95 portid : ipv4_l3fwd_out_if[ret[0]]);
96 dst_port[1] = (uint8_t) ((ret[1] < 0) ?
97 portid : ipv4_l3fwd_out_if[ret[1]]);
98 dst_port[2] = (uint8_t) ((ret[2] < 0) ?
99 portid : ipv4_l3fwd_out_if[ret[2]]);
100 dst_port[3] = (uint8_t) ((ret[3] < 0) ?
101 portid : ipv4_l3fwd_out_if[ret[3]]);
102 dst_port[4] = (uint8_t) ((ret[4] < 0) ?
103 portid : ipv4_l3fwd_out_if[ret[4]]);
104 dst_port[5] = (uint8_t) ((ret[5] < 0) ?
105 portid : ipv4_l3fwd_out_if[ret[5]]);
106 dst_port[6] = (uint8_t) ((ret[6] < 0) ?
107 portid : ipv4_l3fwd_out_if[ret[6]]);
108 dst_port[7] = (uint8_t) ((ret[7] < 0) ?
109 portid : ipv4_l3fwd_out_if[ret[7]]);
111 if (dst_port[0] >= RTE_MAX_ETHPORTS ||
112 (enabled_port_mask & 1 << dst_port[0]) == 0)
113 dst_port[0] = portid;
115 if (dst_port[1] >= RTE_MAX_ETHPORTS ||
116 (enabled_port_mask & 1 << dst_port[1]) == 0)
117 dst_port[1] = portid;
119 if (dst_port[2] >= RTE_MAX_ETHPORTS ||
120 (enabled_port_mask & 1 << dst_port[2]) == 0)
121 dst_port[2] = portid;
123 if (dst_port[3] >= RTE_MAX_ETHPORTS ||
124 (enabled_port_mask & 1 << dst_port[3]) == 0)
125 dst_port[3] = portid;
127 if (dst_port[4] >= RTE_MAX_ETHPORTS ||
128 (enabled_port_mask & 1 << dst_port[4]) == 0)
129 dst_port[4] = portid;
131 if (dst_port[5] >= RTE_MAX_ETHPORTS ||
132 (enabled_port_mask & 1 << dst_port[5]) == 0)
133 dst_port[5] = portid;
135 if (dst_port[6] >= RTE_MAX_ETHPORTS ||
136 (enabled_port_mask & 1 << dst_port[6]) == 0)
137 dst_port[6] = portid;
139 if (dst_port[7] >= RTE_MAX_ETHPORTS ||
140 (enabled_port_mask & 1 << dst_port[7]) == 0)
141 dst_port[7] = portid;
146 get_ipv6_5tuple(struct rte_mbuf *m0, __m128i mask0,
147 __m128i mask1, union ipv6_5tuple_host *key)
149 __m128i tmpdata0 = _mm_loadu_si128(
150 rte_pktmbuf_mtod_offset(m0, __m128i *,
151 sizeof(struct ether_hdr) +
152 offsetof(struct ipv6_hdr, payload_len)));
154 __m128i tmpdata1 = _mm_loadu_si128(
155 rte_pktmbuf_mtod_offset(m0, __m128i *,
156 sizeof(struct ether_hdr) +
157 offsetof(struct ipv6_hdr, payload_len) +
160 __m128i tmpdata2 = _mm_loadu_si128(
161 rte_pktmbuf_mtod_offset(m0, __m128i *,
162 sizeof(struct ether_hdr) +
163 offsetof(struct ipv6_hdr, payload_len) +
164 sizeof(__m128i) + sizeof(__m128i)));
166 key->xmm[0] = _mm_and_si128(tmpdata0, mask0);
167 key->xmm[1] = tmpdata1;
168 key->xmm[2] = _mm_and_si128(tmpdata2, mask1);
172 em_get_dst_port_ipv6x8(struct lcore_conf *qconf, struct rte_mbuf *m[8],
173 uint8_t portid, uint32_t dst_port[8])
176 union ipv6_5tuple_host key[8];
178 get_ipv6_5tuple(m[0], mask1.x, mask2.x, &key[0]);
179 get_ipv6_5tuple(m[1], mask1.x, mask2.x, &key[1]);
180 get_ipv6_5tuple(m[2], mask1.x, mask2.x, &key[2]);
181 get_ipv6_5tuple(m[3], mask1.x, mask2.x, &key[3]);
182 get_ipv6_5tuple(m[4], mask1.x, mask2.x, &key[4]);
183 get_ipv6_5tuple(m[5], mask1.x, mask2.x, &key[5]);
184 get_ipv6_5tuple(m[6], mask1.x, mask2.x, &key[6]);
185 get_ipv6_5tuple(m[7], mask1.x, mask2.x, &key[7]);
187 const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
188 &key[4], &key[5], &key[6], &key[7]};
190 rte_hash_lookup_multi(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
192 dst_port[0] = (uint8_t) ((ret[0] < 0) ?
193 portid : ipv6_l3fwd_out_if[ret[0]]);
194 dst_port[1] = (uint8_t) ((ret[1] < 0) ?
195 portid : ipv6_l3fwd_out_if[ret[1]]);
196 dst_port[2] = (uint8_t) ((ret[2] < 0) ?
197 portid : ipv6_l3fwd_out_if[ret[2]]);
198 dst_port[3] = (uint8_t) ((ret[3] < 0) ?
199 portid : ipv6_l3fwd_out_if[ret[3]]);
200 dst_port[4] = (uint8_t) ((ret[4] < 0) ?
201 portid : ipv6_l3fwd_out_if[ret[4]]);
202 dst_port[5] = (uint8_t) ((ret[5] < 0) ?
203 portid : ipv6_l3fwd_out_if[ret[5]]);
204 dst_port[6] = (uint8_t) ((ret[6] < 0) ?
205 portid : ipv6_l3fwd_out_if[ret[6]]);
206 dst_port[7] = (uint8_t) ((ret[7] < 0) ?
207 portid : ipv6_l3fwd_out_if[ret[7]]);
209 if (dst_port[0] >= RTE_MAX_ETHPORTS ||
210 (enabled_port_mask & 1 << dst_port[0]) == 0)
211 dst_port[0] = portid;
213 if (dst_port[1] >= RTE_MAX_ETHPORTS ||
214 (enabled_port_mask & 1 << dst_port[1]) == 0)
215 dst_port[1] = portid;
217 if (dst_port[2] >= RTE_MAX_ETHPORTS ||
218 (enabled_port_mask & 1 << dst_port[2]) == 0)
219 dst_port[2] = portid;
221 if (dst_port[3] >= RTE_MAX_ETHPORTS ||
222 (enabled_port_mask & 1 << dst_port[3]) == 0)
223 dst_port[3] = portid;
225 if (dst_port[4] >= RTE_MAX_ETHPORTS ||
226 (enabled_port_mask & 1 << dst_port[4]) == 0)
227 dst_port[4] = portid;
229 if (dst_port[5] >= RTE_MAX_ETHPORTS ||
230 (enabled_port_mask & 1 << dst_port[5]) == 0)
231 dst_port[5] = portid;
233 if (dst_port[6] >= RTE_MAX_ETHPORTS ||
234 (enabled_port_mask & 1 << dst_port[6]) == 0)
235 dst_port[6] = portid;
237 if (dst_port[7] >= RTE_MAX_ETHPORTS ||
238 (enabled_port_mask & 1 << dst_port[7]) == 0)
239 dst_port[7] = portid;
243 static inline __attribute__((always_inline)) uint16_t
244 em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
248 struct ipv4_hdr *ipv4_hdr;
249 struct ipv6_hdr *ipv6_hdr;
251 if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
253 /* Handle IPv4 headers.*/
254 ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *,
255 sizeof(struct ether_hdr));
257 next_hop = em_get_ipv4_dst_port(ipv4_hdr, portid,
258 qconf->ipv4_lookup_struct);
260 if (next_hop >= RTE_MAX_ETHPORTS ||
261 (enabled_port_mask & 1 << next_hop) == 0)
266 } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
268 /* Handle IPv6 headers.*/
269 ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv6_hdr *,
270 sizeof(struct ether_hdr));
272 next_hop = em_get_ipv6_dst_port(ipv6_hdr, portid,
273 qconf->ipv6_lookup_struct);
275 if (next_hop >= RTE_MAX_ETHPORTS ||
276 (enabled_port_mask & 1 << next_hop) == 0)
287 * Buffer optimized handling of packets, invoked
291 l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
292 uint8_t portid, struct lcore_conf *qconf)
295 uint32_t dst_port[MAX_PKT_BURST];
298 * Send nb_rx - nb_rx%8 packets
301 int32_t n = RTE_ALIGN_FLOOR(nb_rx, 8);
303 for (j = 0; j < n; j += 8) {
306 pkts_burst[j]->packet_type &
307 pkts_burst[j+1]->packet_type &
308 pkts_burst[j+2]->packet_type &
309 pkts_burst[j+3]->packet_type &
310 pkts_burst[j+4]->packet_type &
311 pkts_burst[j+5]->packet_type &
312 pkts_burst[j+6]->packet_type &
313 pkts_burst[j+7]->packet_type;
315 if (pkt_type & RTE_PTYPE_L3_IPV4) {
317 em_get_dst_port_ipv4x8(qconf, &pkts_burst[j], portid, &dst_port[j]);
319 } else if (pkt_type & RTE_PTYPE_L3_IPV6) {
321 em_get_dst_port_ipv6x8(qconf, &pkts_burst[j], portid, &dst_port[j]);
324 dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid);
325 dst_port[j+1] = em_get_dst_port(qconf, pkts_burst[j], portid);
326 dst_port[j+2] = em_get_dst_port(qconf, pkts_burst[j], portid);
327 dst_port[j+3] = em_get_dst_port(qconf, pkts_burst[j], portid);
328 dst_port[j+4] = em_get_dst_port(qconf, pkts_burst[j], portid);
329 dst_port[j+5] = em_get_dst_port(qconf, pkts_burst[j], portid);
330 dst_port[j+6] = em_get_dst_port(qconf, pkts_burst[j], portid);
331 dst_port[j+7] = em_get_dst_port(qconf, pkts_burst[j], portid);
336 dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid);
338 send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
341 #endif /* __L3FWD_EM_SSE_HLM_H__ */