4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef __L3FWD_EM_SSE_H__
35 #define __L3FWD_EM_SSE_H__
37 #define MASK_ALL_PKTS 0xff
38 #define EXCLUDE_1ST_PKT 0xfe
39 #define EXCLUDE_2ND_PKT 0xfd
40 #define EXCLUDE_3RD_PKT 0xfb
41 #define EXCLUDE_4TH_PKT 0xf7
42 #define EXCLUDE_5TH_PKT 0xef
43 #define EXCLUDE_6TH_PKT 0xdf
44 #define EXCLUDE_7TH_PKT 0xbf
45 #define EXCLUDE_8TH_PKT 0x7f
48 simple_ipv4_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid,
49 struct lcore_conf *qconf)
51 struct ether_hdr *eth_hdr[8];
52 struct ipv4_hdr *ipv4_hdr[8];
55 union ipv4_5tuple_host key[8];
58 eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
59 eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
60 eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
61 eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
62 eth_hdr[4] = rte_pktmbuf_mtod(m[4], struct ether_hdr *);
63 eth_hdr[5] = rte_pktmbuf_mtod(m[5], struct ether_hdr *);
64 eth_hdr[6] = rte_pktmbuf_mtod(m[6], struct ether_hdr *);
65 eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct ether_hdr *);
67 /* Handle IPv4 headers.*/
68 ipv4_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv4_hdr *,
69 sizeof(struct ether_hdr));
70 ipv4_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv4_hdr *,
71 sizeof(struct ether_hdr));
72 ipv4_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv4_hdr *,
73 sizeof(struct ether_hdr));
74 ipv4_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv4_hdr *,
75 sizeof(struct ether_hdr));
76 ipv4_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv4_hdr *,
77 sizeof(struct ether_hdr));
78 ipv4_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv4_hdr *,
79 sizeof(struct ether_hdr));
80 ipv4_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv4_hdr *,
81 sizeof(struct ether_hdr));
82 ipv4_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv4_hdr *,
83 sizeof(struct ether_hdr));
85 #ifdef DO_RFC_1812_CHECKS
86 /* Check to make sure the packet is valid (RFC1812) */
87 uint8_t valid_mask = MASK_ALL_PKTS;
89 if (is_valid_ipv4_pkt(ipv4_hdr[0], m[0]->pkt_len) < 0) {
90 rte_pktmbuf_free(m[0]);
91 valid_mask &= EXCLUDE_1ST_PKT;
93 if (is_valid_ipv4_pkt(ipv4_hdr[1], m[1]->pkt_len) < 0) {
94 rte_pktmbuf_free(m[1]);
95 valid_mask &= EXCLUDE_2ND_PKT;
97 if (is_valid_ipv4_pkt(ipv4_hdr[2], m[2]->pkt_len) < 0) {
98 rte_pktmbuf_free(m[2]);
99 valid_mask &= EXCLUDE_3RD_PKT;
101 if (is_valid_ipv4_pkt(ipv4_hdr[3], m[3]->pkt_len) < 0) {
102 rte_pktmbuf_free(m[3]);
103 valid_mask &= EXCLUDE_4TH_PKT;
105 if (is_valid_ipv4_pkt(ipv4_hdr[4], m[4]->pkt_len) < 0) {
106 rte_pktmbuf_free(m[4]);
107 valid_mask &= EXCLUDE_5TH_PKT;
109 if (is_valid_ipv4_pkt(ipv4_hdr[5], m[5]->pkt_len) < 0) {
110 rte_pktmbuf_free(m[5]);
111 valid_mask &= EXCLUDE_6TH_PKT;
113 if (is_valid_ipv4_pkt(ipv4_hdr[6], m[6]->pkt_len) < 0) {
114 rte_pktmbuf_free(m[6]);
115 valid_mask &= EXCLUDE_7TH_PKT;
117 if (is_valid_ipv4_pkt(ipv4_hdr[7], m[7]->pkt_len) < 0) {
118 rte_pktmbuf_free(m[7]);
119 valid_mask &= EXCLUDE_8TH_PKT;
121 if (unlikely(valid_mask != MASK_ALL_PKTS)) {
122 if (valid_mask == 0) {
127 for (i = 0; i < 8; i++) {
128 if ((0x1 << i) & valid_mask) {
129 l3fwd_em_simple_forward(m[i],
136 #endif /* End of #ifdef DO_RFC_1812_CHECKS */
138 data[0] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[0], __m128i *,
139 sizeof(struct ether_hdr) +
140 offsetof(struct ipv4_hdr, time_to_live)));
141 data[1] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[1], __m128i *,
142 sizeof(struct ether_hdr) +
143 offsetof(struct ipv4_hdr, time_to_live)));
144 data[2] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[2], __m128i *,
145 sizeof(struct ether_hdr) +
146 offsetof(struct ipv4_hdr, time_to_live)));
147 data[3] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[3], __m128i *,
148 sizeof(struct ether_hdr) +
149 offsetof(struct ipv4_hdr, time_to_live)));
150 data[4] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[4], __m128i *,
151 sizeof(struct ether_hdr) +
152 offsetof(struct ipv4_hdr, time_to_live)));
153 data[5] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[5], __m128i *,
154 sizeof(struct ether_hdr) +
155 offsetof(struct ipv4_hdr, time_to_live)));
156 data[6] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[6], __m128i *,
157 sizeof(struct ether_hdr) +
158 offsetof(struct ipv4_hdr, time_to_live)));
159 data[7] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[7], __m128i *,
160 sizeof(struct ether_hdr) +
161 offsetof(struct ipv4_hdr, time_to_live)));
163 key[0].xmm = _mm_and_si128(data[0], mask0);
164 key[1].xmm = _mm_and_si128(data[1], mask0);
165 key[2].xmm = _mm_and_si128(data[2], mask0);
166 key[3].xmm = _mm_and_si128(data[3], mask0);
167 key[4].xmm = _mm_and_si128(data[4], mask0);
168 key[5].xmm = _mm_and_si128(data[5], mask0);
169 key[6].xmm = _mm_and_si128(data[6], mask0);
170 key[7].xmm = _mm_and_si128(data[7], mask0);
172 const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
173 &key[4], &key[5], &key[6], &key[7]};
175 rte_hash_lookup_multi(qconf->ipv4_lookup_struct, &key_array[0], 8, ret);
176 dst_port[0] = (uint8_t) ((ret[0] < 0) ?
177 portid : ipv4_l3fwd_out_if[ret[0]]);
178 dst_port[1] = (uint8_t) ((ret[1] < 0) ?
179 portid : ipv4_l3fwd_out_if[ret[1]]);
180 dst_port[2] = (uint8_t) ((ret[2] < 0) ?
181 portid : ipv4_l3fwd_out_if[ret[2]]);
182 dst_port[3] = (uint8_t) ((ret[3] < 0) ?
183 portid : ipv4_l3fwd_out_if[ret[3]]);
184 dst_port[4] = (uint8_t) ((ret[4] < 0) ?
185 portid : ipv4_l3fwd_out_if[ret[4]]);
186 dst_port[5] = (uint8_t) ((ret[5] < 0) ?
187 portid : ipv4_l3fwd_out_if[ret[5]]);
188 dst_port[6] = (uint8_t) ((ret[6] < 0) ?
189 portid : ipv4_l3fwd_out_if[ret[6]]);
190 dst_port[7] = (uint8_t) ((ret[7] < 0) ?
191 portid : ipv4_l3fwd_out_if[ret[7]]);
193 if (dst_port[0] >= RTE_MAX_ETHPORTS ||
194 (enabled_port_mask & 1 << dst_port[0]) == 0)
195 dst_port[0] = portid;
197 if (dst_port[1] >= RTE_MAX_ETHPORTS ||
198 (enabled_port_mask & 1 << dst_port[1]) == 0)
199 dst_port[1] = portid;
201 if (dst_port[2] >= RTE_MAX_ETHPORTS ||
202 (enabled_port_mask & 1 << dst_port[2]) == 0)
203 dst_port[2] = portid;
205 if (dst_port[3] >= RTE_MAX_ETHPORTS ||
206 (enabled_port_mask & 1 << dst_port[3]) == 0)
207 dst_port[3] = portid;
209 if (dst_port[4] >= RTE_MAX_ETHPORTS ||
210 (enabled_port_mask & 1 << dst_port[4]) == 0)
211 dst_port[4] = portid;
213 if (dst_port[5] >= RTE_MAX_ETHPORTS ||
214 (enabled_port_mask & 1 << dst_port[5]) == 0)
215 dst_port[5] = portid;
217 if (dst_port[6] >= RTE_MAX_ETHPORTS ||
218 (enabled_port_mask & 1 << dst_port[6]) == 0)
219 dst_port[6] = portid;
221 if (dst_port[7] >= RTE_MAX_ETHPORTS ||
222 (enabled_port_mask & 1 << dst_port[7]) == 0)
223 dst_port[7] = portid;
225 #ifdef DO_RFC_1812_CHECKS
226 /* Update time to live and header checksum */
227 --(ipv4_hdr[0]->time_to_live);
228 --(ipv4_hdr[1]->time_to_live);
229 --(ipv4_hdr[2]->time_to_live);
230 --(ipv4_hdr[3]->time_to_live);
231 ++(ipv4_hdr[0]->hdr_checksum);
232 ++(ipv4_hdr[1]->hdr_checksum);
233 ++(ipv4_hdr[2]->hdr_checksum);
234 ++(ipv4_hdr[3]->hdr_checksum);
235 --(ipv4_hdr[4]->time_to_live);
236 --(ipv4_hdr[5]->time_to_live);
237 --(ipv4_hdr[6]->time_to_live);
238 --(ipv4_hdr[7]->time_to_live);
239 ++(ipv4_hdr[4]->hdr_checksum);
240 ++(ipv4_hdr[5]->hdr_checksum);
241 ++(ipv4_hdr[6]->hdr_checksum);
242 ++(ipv4_hdr[7]->hdr_checksum);
246 *(uint64_t *)ð_hdr[0]->d_addr = dest_eth_addr[dst_port[0]];
247 *(uint64_t *)ð_hdr[1]->d_addr = dest_eth_addr[dst_port[1]];
248 *(uint64_t *)ð_hdr[2]->d_addr = dest_eth_addr[dst_port[2]];
249 *(uint64_t *)ð_hdr[3]->d_addr = dest_eth_addr[dst_port[3]];
250 *(uint64_t *)ð_hdr[4]->d_addr = dest_eth_addr[dst_port[4]];
251 *(uint64_t *)ð_hdr[5]->d_addr = dest_eth_addr[dst_port[5]];
252 *(uint64_t *)ð_hdr[6]->d_addr = dest_eth_addr[dst_port[6]];
253 *(uint64_t *)ð_hdr[7]->d_addr = dest_eth_addr[dst_port[7]];
256 ether_addr_copy(&ports_eth_addr[dst_port[0]], ð_hdr[0]->s_addr);
257 ether_addr_copy(&ports_eth_addr[dst_port[1]], ð_hdr[1]->s_addr);
258 ether_addr_copy(&ports_eth_addr[dst_port[2]], ð_hdr[2]->s_addr);
259 ether_addr_copy(&ports_eth_addr[dst_port[3]], ð_hdr[3]->s_addr);
260 ether_addr_copy(&ports_eth_addr[dst_port[4]], ð_hdr[4]->s_addr);
261 ether_addr_copy(&ports_eth_addr[dst_port[5]], ð_hdr[5]->s_addr);
262 ether_addr_copy(&ports_eth_addr[dst_port[6]], ð_hdr[6]->s_addr);
263 ether_addr_copy(&ports_eth_addr[dst_port[7]], ð_hdr[7]->s_addr);
265 send_single_packet(qconf, m[0], (uint8_t)dst_port[0]);
266 send_single_packet(qconf, m[1], (uint8_t)dst_port[1]);
267 send_single_packet(qconf, m[2], (uint8_t)dst_port[2]);
268 send_single_packet(qconf, m[3], (uint8_t)dst_port[3]);
269 send_single_packet(qconf, m[4], (uint8_t)dst_port[4]);
270 send_single_packet(qconf, m[5], (uint8_t)dst_port[5]);
271 send_single_packet(qconf, m[6], (uint8_t)dst_port[6]);
272 send_single_packet(qconf, m[7], (uint8_t)dst_port[7]);
276 get_ipv6_5tuple(struct rte_mbuf *m0, __m128i mask0,
277 __m128i mask1, union ipv6_5tuple_host *key)
279 __m128i tmpdata0 = _mm_loadu_si128(
280 rte_pktmbuf_mtod_offset(m0, __m128i *,
281 sizeof(struct ether_hdr) +
282 offsetof(struct ipv6_hdr, payload_len)));
284 __m128i tmpdata1 = _mm_loadu_si128(
285 rte_pktmbuf_mtod_offset(m0, __m128i *,
286 sizeof(struct ether_hdr) +
287 offsetof(struct ipv6_hdr, payload_len) +
290 __m128i tmpdata2 = _mm_loadu_si128(
291 rte_pktmbuf_mtod_offset(m0, __m128i *,
292 sizeof(struct ether_hdr) +
293 offsetof(struct ipv6_hdr, payload_len) +
294 sizeof(__m128i) + sizeof(__m128i)));
296 key->xmm[0] = _mm_and_si128(tmpdata0, mask0);
297 key->xmm[1] = tmpdata1;
298 key->xmm[2] = _mm_and_si128(tmpdata2, mask1);
302 simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid,
303 struct lcore_conf *qconf)
305 struct ether_hdr *eth_hdr[8];
306 __attribute__((unused)) struct ipv6_hdr *ipv6_hdr[8];
309 union ipv6_5tuple_host key[8];
311 eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *);
312 eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *);
313 eth_hdr[2] = rte_pktmbuf_mtod(m[2], struct ether_hdr *);
314 eth_hdr[3] = rte_pktmbuf_mtod(m[3], struct ether_hdr *);
315 eth_hdr[4] = rte_pktmbuf_mtod(m[4], struct ether_hdr *);
316 eth_hdr[5] = rte_pktmbuf_mtod(m[5], struct ether_hdr *);
317 eth_hdr[6] = rte_pktmbuf_mtod(m[6], struct ether_hdr *);
318 eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct ether_hdr *);
320 /* Handle IPv6 headers.*/
321 ipv6_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv6_hdr *,
322 sizeof(struct ether_hdr));
323 ipv6_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv6_hdr *,
324 sizeof(struct ether_hdr));
325 ipv6_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv6_hdr *,
326 sizeof(struct ether_hdr));
327 ipv6_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv6_hdr *,
328 sizeof(struct ether_hdr));
329 ipv6_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv6_hdr *,
330 sizeof(struct ether_hdr));
331 ipv6_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv6_hdr *,
332 sizeof(struct ether_hdr));
333 ipv6_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv6_hdr *,
334 sizeof(struct ether_hdr));
335 ipv6_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv6_hdr *,
336 sizeof(struct ether_hdr));
338 get_ipv6_5tuple(m[0], mask1, mask2, &key[0]);
339 get_ipv6_5tuple(m[1], mask1, mask2, &key[1]);
340 get_ipv6_5tuple(m[2], mask1, mask2, &key[2]);
341 get_ipv6_5tuple(m[3], mask1, mask2, &key[3]);
342 get_ipv6_5tuple(m[4], mask1, mask2, &key[4]);
343 get_ipv6_5tuple(m[5], mask1, mask2, &key[5]);
344 get_ipv6_5tuple(m[6], mask1, mask2, &key[6]);
345 get_ipv6_5tuple(m[7], mask1, mask2, &key[7]);
347 const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
348 &key[4], &key[5], &key[6], &key[7]};
350 rte_hash_lookup_multi(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
351 dst_port[0] = (uint8_t) ((ret[0] < 0) ?
352 portid : ipv6_l3fwd_out_if[ret[0]]);
353 dst_port[1] = (uint8_t) ((ret[1] < 0) ?
354 portid : ipv6_l3fwd_out_if[ret[1]]);
355 dst_port[2] = (uint8_t) ((ret[2] < 0) ?
356 portid : ipv6_l3fwd_out_if[ret[2]]);
357 dst_port[3] = (uint8_t) ((ret[3] < 0) ?
358 portid : ipv6_l3fwd_out_if[ret[3]]);
359 dst_port[4] = (uint8_t) ((ret[4] < 0) ?
360 portid : ipv6_l3fwd_out_if[ret[4]]);
361 dst_port[5] = (uint8_t) ((ret[5] < 0) ?
362 portid : ipv6_l3fwd_out_if[ret[5]]);
363 dst_port[6] = (uint8_t) ((ret[6] < 0) ?
364 portid : ipv6_l3fwd_out_if[ret[6]]);
365 dst_port[7] = (uint8_t) ((ret[7] < 0) ?
366 portid : ipv6_l3fwd_out_if[ret[7]]);
368 if (dst_port[0] >= RTE_MAX_ETHPORTS ||
369 (enabled_port_mask & 1 << dst_port[0]) == 0)
370 dst_port[0] = portid;
372 if (dst_port[1] >= RTE_MAX_ETHPORTS ||
373 (enabled_port_mask & 1 << dst_port[1]) == 0)
374 dst_port[1] = portid;
376 if (dst_port[2] >= RTE_MAX_ETHPORTS ||
377 (enabled_port_mask & 1 << dst_port[2]) == 0)
378 dst_port[2] = portid;
380 if (dst_port[3] >= RTE_MAX_ETHPORTS ||
381 (enabled_port_mask & 1 << dst_port[3]) == 0)
382 dst_port[3] = portid;
384 if (dst_port[4] >= RTE_MAX_ETHPORTS ||
385 (enabled_port_mask & 1 << dst_port[4]) == 0)
386 dst_port[4] = portid;
388 if (dst_port[5] >= RTE_MAX_ETHPORTS ||
389 (enabled_port_mask & 1 << dst_port[5]) == 0)
390 dst_port[5] = portid;
392 if (dst_port[6] >= RTE_MAX_ETHPORTS ||
393 (enabled_port_mask & 1 << dst_port[6]) == 0)
394 dst_port[6] = portid;
396 if (dst_port[7] >= RTE_MAX_ETHPORTS ||
397 (enabled_port_mask & 1 << dst_port[7]) == 0)
398 dst_port[7] = portid;
401 *(uint64_t *)ð_hdr[0]->d_addr = dest_eth_addr[dst_port[0]];
402 *(uint64_t *)ð_hdr[1]->d_addr = dest_eth_addr[dst_port[1]];
403 *(uint64_t *)ð_hdr[2]->d_addr = dest_eth_addr[dst_port[2]];
404 *(uint64_t *)ð_hdr[3]->d_addr = dest_eth_addr[dst_port[3]];
405 *(uint64_t *)ð_hdr[4]->d_addr = dest_eth_addr[dst_port[4]];
406 *(uint64_t *)ð_hdr[5]->d_addr = dest_eth_addr[dst_port[5]];
407 *(uint64_t *)ð_hdr[6]->d_addr = dest_eth_addr[dst_port[6]];
408 *(uint64_t *)ð_hdr[7]->d_addr = dest_eth_addr[dst_port[7]];
411 ether_addr_copy(&ports_eth_addr[dst_port[0]], ð_hdr[0]->s_addr);
412 ether_addr_copy(&ports_eth_addr[dst_port[1]], ð_hdr[1]->s_addr);
413 ether_addr_copy(&ports_eth_addr[dst_port[2]], ð_hdr[2]->s_addr);
414 ether_addr_copy(&ports_eth_addr[dst_port[3]], ð_hdr[3]->s_addr);
415 ether_addr_copy(&ports_eth_addr[dst_port[4]], ð_hdr[4]->s_addr);
416 ether_addr_copy(&ports_eth_addr[dst_port[5]], ð_hdr[5]->s_addr);
417 ether_addr_copy(&ports_eth_addr[dst_port[6]], ð_hdr[6]->s_addr);
418 ether_addr_copy(&ports_eth_addr[dst_port[7]], ð_hdr[7]->s_addr);
420 send_single_packet(qconf, m[0], (uint8_t)dst_port[0]);
421 send_single_packet(qconf, m[1], (uint8_t)dst_port[1]);
422 send_single_packet(qconf, m[2], (uint8_t)dst_port[2]);
423 send_single_packet(qconf, m[3], (uint8_t)dst_port[3]);
424 send_single_packet(qconf, m[4], (uint8_t)dst_port[4]);
425 send_single_packet(qconf, m[5], (uint8_t)dst_port[5]);
426 send_single_packet(qconf, m[6], (uint8_t)dst_port[6]);
427 send_single_packet(qconf, m[7], (uint8_t)dst_port[7]);
431 * Buffer optimized handling of packets, invoked
435 l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
436 uint8_t portid, struct lcore_conf *qconf)
441 * Send nb_rx - nb_rx%8 packets
444 int32_t n = RTE_ALIGN_FLOOR(nb_rx, 8);
446 for (j = 0; j < n; j += 8) {
449 pkts_burst[j]->packet_type &
450 pkts_burst[j+1]->packet_type &
451 pkts_burst[j+2]->packet_type &
452 pkts_burst[j+3]->packet_type &
453 pkts_burst[j+4]->packet_type &
454 pkts_burst[j+5]->packet_type &
455 pkts_burst[j+6]->packet_type &
456 pkts_burst[j+7]->packet_type;
458 if (pkt_type & RTE_PTYPE_L3_IPV4) {
459 simple_ipv4_fwd_8pkts(
460 &pkts_burst[j], portid, qconf);
461 } else if (pkt_type & RTE_PTYPE_L3_IPV6) {
462 simple_ipv6_fwd_8pkts(&pkts_burst[j],
465 l3fwd_em_simple_forward(pkts_burst[j], portid, qconf);
466 l3fwd_em_simple_forward(pkts_burst[j+1], portid, qconf);
467 l3fwd_em_simple_forward(pkts_burst[j+2], portid, qconf);
468 l3fwd_em_simple_forward(pkts_burst[j+3], portid, qconf);
469 l3fwd_em_simple_forward(pkts_burst[j+4], portid, qconf);
470 l3fwd_em_simple_forward(pkts_burst[j+5], portid, qconf);
471 l3fwd_em_simple_forward(pkts_burst[j+6], portid, qconf);
472 l3fwd_em_simple_forward(pkts_burst[j+7], portid, qconf);
475 for (; j < nb_rx ; j++)
476 l3fwd_em_simple_forward(pkts_burst[j], portid, qconf);
479 #endif /* __L3FWD_EM_SSE_H__ */