4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef __L3FWD_EM_SSE_H__
35 #define __L3FWD_EM_SSE_H__
39 * This is an optional implementation of packet classification in Exact-Match
40 * path using sequential packet classification method.
41 * While hash lookup multi seems to provide better performance, it's disabled
42 * by default and can be enabled with NO_HASH_LOOKUP_MULTI global define in
46 #include "l3fwd_sse.h"
48 static inline __attribute__((always_inline)) uint16_t
49 em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
53 struct ipv4_hdr *ipv4_hdr;
54 struct ipv6_hdr *ipv6_hdr;
58 tcp_or_udp = pkt->packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP);
59 l3_ptypes = pkt->packet_type & RTE_PTYPE_L3_MASK;
61 if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
63 /* Handle IPv4 headers.*/
64 ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *,
65 sizeof(struct ether_hdr));
67 next_hop = em_get_ipv4_dst_port(ipv4_hdr, portid,
68 qconf->ipv4_lookup_struct);
70 if (next_hop >= RTE_MAX_ETHPORTS ||
71 (enabled_port_mask & 1 << next_hop) == 0)
76 } else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
78 /* Handle IPv6 headers.*/
79 ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv6_hdr *,
80 sizeof(struct ether_hdr));
82 next_hop = em_get_ipv6_dst_port(ipv6_hdr, portid,
83 qconf->ipv6_lookup_struct);
85 if (next_hop >= RTE_MAX_ETHPORTS ||
86 (enabled_port_mask & 1 << next_hop) == 0)
97 * Buffer optimized handling of packets, invoked
101 l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
102 uint8_t portid, struct lcore_conf *qconf)
105 uint16_t dst_port[MAX_PKT_BURST];
107 for (j = 0; j < nb_rx; j++)
108 dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid);
110 send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
112 #endif /* __L3FWD_EM_SSE_H__ */