1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation.
3 * Copyright(c) 2017-2018 Linaro Limited.
6 #ifndef __L3FWD_LPM_NEON_H__
7 #define __L3FWD_LPM_NEON_H__
11 #include "l3fwd_neon.h"
14 * Read packet_type and destination IPV4 addresses from 4 mbufs.
17 processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
21 struct ipv4_hdr *ipv4_hdr;
22 struct ether_hdr *eth_hdr;
25 eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *);
26 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
27 dst[0] = ipv4_hdr->dst_addr;
28 ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
30 eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *);
31 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
32 dst[1] = ipv4_hdr->dst_addr;
33 ipv4_flag[0] &= pkt[1]->packet_type;
35 eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *);
36 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
37 dst[2] = ipv4_hdr->dst_addr;
38 ipv4_flag[0] &= pkt[2]->packet_type;
40 eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *);
41 ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
42 dst[3] = ipv4_hdr->dst_addr;
43 ipv4_flag[0] &= pkt[3]->packet_type;
45 dip[0] = vld1q_s32(dst);
49 * Lookup into LPM for destination port.
50 * If lookup fails, use incoming port (portid) as destination port.
53 processx4_step2(const struct lcore_conf *qconf,
57 struct rte_mbuf *pkt[FWDSTEP],
58 uint16_t dprt[FWDSTEP])
62 dip = vreinterpretq_s32_u8(vrev32q_u8(vreinterpretq_u8_s32(dip)));
64 /* if all 4 packets are IPV4. */
65 if (likely(ipv4_flag)) {
66 rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dst.u32,
68 /* get rid of unused upper 16 bit for each dport. */
69 vst1_s16((int16_t *)dprt, vqmovn_s32(dst.x));
72 dprt[0] = lpm_get_dst_port_with_ipv4(qconf, pkt[0],
74 dprt[1] = lpm_get_dst_port_with_ipv4(qconf, pkt[1],
76 dprt[2] = lpm_get_dst_port_with_ipv4(qconf, pkt[2],
78 dprt[3] = lpm_get_dst_port_with_ipv4(qconf, pkt[3],
84 * Buffer optimized handling of packets, invoked
88 l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
89 uint16_t portid, struct lcore_conf *qconf)
92 uint16_t dst_port[MAX_PKT_BURST];
95 const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
96 const int32_t m = nb_rx % FWDSTEP;
99 for (i = 0; i < FWDSTEP; i++) {
100 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i],
101 struct ether_hdr *) + 1);
104 for (j = 0; j != k - FWDSTEP; j += FWDSTEP) {
105 for (i = 0; i < FWDSTEP; i++) {
106 rte_prefetch0(rte_pktmbuf_mtod(
107 pkts_burst[j + i + FWDSTEP],
108 struct ether_hdr *) + 1);
111 processx4_step1(&pkts_burst[j], &dip, &ipv4_flag);
112 processx4_step2(qconf, dip, ipv4_flag, portid,
113 &pkts_burst[j], &dst_port[j]);
116 processx4_step1(&pkts_burst[j], &dip, &ipv4_flag);
117 processx4_step2(qconf, dip, ipv4_flag, portid, &pkts_burst[j],
124 /* Prefetch last up to 3 packets one by one */
127 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
128 struct ether_hdr *) + 1);
132 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
133 struct ether_hdr *) + 1);
137 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
138 struct ether_hdr *) + 1);
143 /* Classify last up to 3 packets one by one */
146 dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
151 dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
156 dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
161 send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
164 #endif /* __L3FWD_LPM_NEON_H__ */