4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
5 * Copyright(c) 2017, Linaro Limited
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #ifndef _L3FWD_NEON_H_
37 #define _L3FWD_NEON_H_
40 #include "l3fwd_common.h"
43 * Update source and destination MAC addresses in the ethernet header.
44 * Perform RFC1812 checks and updates for IPV4 packets.
47 processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
49 uint32x4_t te[FWDSTEP];
50 uint32x4_t ve[FWDSTEP];
53 p[0] = rte_pktmbuf_mtod(pkt[0], uint32_t *);
54 p[1] = rte_pktmbuf_mtod(pkt[1], uint32_t *);
55 p[2] = rte_pktmbuf_mtod(pkt[2], uint32_t *);
56 p[3] = rte_pktmbuf_mtod(pkt[3], uint32_t *);
58 ve[0] = vreinterpretq_u32_s32(val_eth[dst_port[0]]);
59 te[0] = vld1q_u32(p[0]);
61 ve[1] = vreinterpretq_u32_s32(val_eth[dst_port[1]]);
62 te[1] = vld1q_u32(p[1]);
64 ve[2] = vreinterpretq_u32_s32(val_eth[dst_port[2]]);
65 te[2] = vld1q_u32(p[2]);
67 ve[3] = vreinterpretq_u32_s32(val_eth[dst_port[3]]);
68 te[3] = vld1q_u32(p[3]);
70 /* Update last 4 bytes */
71 ve[0] = vsetq_lane_u32(vgetq_lane_u32(te[0], 3), ve[0], 3);
72 ve[1] = vsetq_lane_u32(vgetq_lane_u32(te[1], 3), ve[1], 3);
73 ve[2] = vsetq_lane_u32(vgetq_lane_u32(te[2], 3), ve[2], 3);
74 ve[3] = vsetq_lane_u32(vgetq_lane_u32(te[3], 3), ve[3], 3);
76 vst1q_u32(p[0], ve[0]);
77 vst1q_u32(p[1], ve[1]);
78 vst1q_u32(p[2], ve[2]);
79 vst1q_u32(p[3], ve[3]);
81 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1),
82 &dst_port[0], pkt[0]->packet_type);
83 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1),
84 &dst_port[1], pkt[1]->packet_type);
85 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1),
86 &dst_port[2], pkt[2]->packet_type);
87 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1),
88 &dst_port[3], pkt[3]->packet_type);
92 * Group consecutive packets with the same destination port in bursts of 4.
93 * Suppose we have array of destionation ports:
94 * dst_port[] = {a, b, c, d,, e, ... }
95 * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
96 * We doing 4 comparisons at once and the result is 4 bit mask.
97 * This mask is used as an index into prebuild array of pnum values.
99 static inline uint16_t *
100 port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, uint16x8_t dp1,
104 uint16_t u16[FWDSTEP + 1];
106 } *pnum = (void *)pn;
109 uint16x8_t mask = {1, 2, 4, 8, 0, 0, 0, 0};
111 dp1 = vceqq_u16(dp1, dp2);
112 dp1 = vandq_u16(dp1, mask);
115 /* update last port counter. */
116 lp[0] += gptbl[v].lpv;
118 /* if dest port value has changed. */
120 pnum->u64 = gptbl[v].pnum;
121 pnum->u16[FWDSTEP] = 1;
122 lp = pnum->u16 + gptbl[v].idx;
129 * Process one packet:
130 * Update source and destination MAC addresses in the ethernet header.
131 * Perform RFC1812 checks and updates for IPV4 packets.
134 process_packet(struct rte_mbuf *pkt, uint16_t *dst_port)
136 struct ether_hdr *eth_hdr;
139 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
141 te = vld1q_u32((uint32_t *)eth_hdr);
142 ve = vreinterpretq_u32_s32(val_eth[dst_port[0]]);
145 rfc1812_process((struct ipv4_hdr *)(eth_hdr + 1), dst_port,
148 ve = vcopyq_laneq_u32(ve, 3, te, 3);
149 vst1q_u32((uint32_t *)eth_hdr, ve);
153 * Send packets burst from pkts_burst to the ports in dst_port array
155 static __rte_always_inline void
156 send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
157 uint16_t dst_port[MAX_PKT_BURST], int nb_rx)
163 uint16_t pnum[MAX_PKT_BURST + 1];
166 * Finish packet processing and group consecutive
167 * packets with the same destination port.
169 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
176 processx4_step3(pkts_burst, dst_port);
178 /* dp1: <d[0], d[1], d[2], d[3], ... > */
179 dp1 = vld1q_u16(dst_port);
181 for (j = FWDSTEP; j != k; j += FWDSTEP) {
182 processx4_step3(&pkts_burst[j], &dst_port[j]);
186 * <d[j-3], d[j-2], d[j-1], d[j], ... >
188 dp2 = vld1q_u16(&dst_port[j - FWDSTEP + 1]);
189 lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
193 * <d[j], d[j+1], d[j+2], d[j+3], ... >
195 dp1 = vextq_u16(dp1, dp1, FWDSTEP - 1);
199 * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... >
201 dp2 = vextq_u16(dp1, dp1, 1);
202 dp2 = vsetq_lane_u16(vgetq_lane_u16(dp2, 2), dp2, 3);
203 lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
206 * remove values added by the last repeated
210 dlp = dst_port[j - 1];
212 /* set dlp and lp to the never used values. */
214 lp = pnum + MAX_PKT_BURST;
217 /* Process up to last 3 packets one by one. */
218 switch (nb_rx % FWDSTEP) {
220 process_packet(pkts_burst[j], dst_port + j);
221 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
225 process_packet(pkts_burst[j], dst_port + j);
226 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
230 process_packet(pkts_burst[j], dst_port + j);
231 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
236 * Send packets out, through destination port.
237 * Consecutive packets with the same destination port
238 * are already grouped together.
239 * If destination port for the packet equals BAD_PORT,
240 * then free the packet without sending it out.
242 for (j = 0; j < nb_rx; j += k) {
250 if (likely(pn != BAD_PORT))
251 send_packetsx4(qconf, pn, pkts_burst + j, k);
253 for (m = j; m != j + k; m++)
254 rte_pktmbuf_free(pkts_burst[m]);
259 #endif /* _L3FWD_NEON_H_ */