1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Intel Corporation.
3 * Copyright(c) 2017-2018 Linaro Limited.
10 #include "l3fwd_common.h"
13 * Update source and destination MAC addresses in the ethernet header.
14 * Perform RFC1812 checks and updates for IPV4 packets.
17 processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
19 uint32x4_t te[FWDSTEP];
20 uint32x4_t ve[FWDSTEP];
23 p[0] = rte_pktmbuf_mtod(pkt[0], uint32_t *);
24 p[1] = rte_pktmbuf_mtod(pkt[1], uint32_t *);
25 p[2] = rte_pktmbuf_mtod(pkt[2], uint32_t *);
26 p[3] = rte_pktmbuf_mtod(pkt[3], uint32_t *);
28 ve[0] = vreinterpretq_u32_s32(val_eth[dst_port[0]]);
29 te[0] = vld1q_u32(p[0]);
31 ve[1] = vreinterpretq_u32_s32(val_eth[dst_port[1]]);
32 te[1] = vld1q_u32(p[1]);
34 ve[2] = vreinterpretq_u32_s32(val_eth[dst_port[2]]);
35 te[2] = vld1q_u32(p[2]);
37 ve[3] = vreinterpretq_u32_s32(val_eth[dst_port[3]]);
38 te[3] = vld1q_u32(p[3]);
40 /* Update last 4 bytes */
41 ve[0] = vsetq_lane_u32(vgetq_lane_u32(te[0], 3), ve[0], 3);
42 ve[1] = vsetq_lane_u32(vgetq_lane_u32(te[1], 3), ve[1], 3);
43 ve[2] = vsetq_lane_u32(vgetq_lane_u32(te[2], 3), ve[2], 3);
44 ve[3] = vsetq_lane_u32(vgetq_lane_u32(te[3], 3), ve[3], 3);
46 vst1q_u32(p[0], ve[0]);
47 vst1q_u32(p[1], ve[1]);
48 vst1q_u32(p[2], ve[2]);
49 vst1q_u32(p[3], ve[3]);
51 rfc1812_process((struct rte_ipv4_hdr *)
52 ((struct rte_ether_hdr *)p[0] + 1),
53 &dst_port[0], pkt[0]->packet_type);
54 rfc1812_process((struct rte_ipv4_hdr *)
55 ((struct rte_ether_hdr *)p[1] + 1),
56 &dst_port[1], pkt[1]->packet_type);
57 rfc1812_process((struct rte_ipv4_hdr *)
58 ((struct rte_ether_hdr *)p[2] + 1),
59 &dst_port[2], pkt[2]->packet_type);
60 rfc1812_process((struct rte_ipv4_hdr *)
61 ((struct rte_ether_hdr *)p[3] + 1),
62 &dst_port[3], pkt[3]->packet_type);
66 * Group consecutive packets with the same destination port in bursts of 4.
67 * Suppose we have array of destionation ports:
68 * dst_port[] = {a, b, c, d,, e, ... }
69 * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
70 * We doing 4 comparisons at once and the result is 4 bit mask.
71 * This mask is used as an index into prebuild array of pnum values.
73 static inline uint16_t *
74 port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, uint16x8_t dp1,
78 uint16_t u16[FWDSTEP + 1];
83 uint16x8_t mask = {1, 2, 4, 8, 0, 0, 0, 0};
85 dp1 = vceqq_u16(dp1, dp2);
86 dp1 = vandq_u16(dp1, mask);
89 /* update last port counter. */
90 lp[0] += gptbl[v].lpv;
91 rte_compiler_barrier();
93 /* if dest port value has changed. */
95 pnum->u64 = gptbl[v].pnum;
96 pnum->u16[FWDSTEP] = 1;
97 lp = pnum->u16 + gptbl[v].idx;
104 * Process one packet:
105 * Update source and destination MAC addresses in the ethernet header.
106 * Perform RFC1812 checks and updates for IPV4 packets.
109 process_packet(struct rte_mbuf *pkt, uint16_t *dst_port)
111 struct rte_ether_hdr *eth_hdr;
114 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
116 te = vld1q_u32((uint32_t *)eth_hdr);
117 ve = vreinterpretq_u32_s32(val_eth[dst_port[0]]);
120 rfc1812_process((struct rte_ipv4_hdr *)(eth_hdr + 1), dst_port,
123 ve = vcopyq_laneq_u32(ve, 3, te, 3);
124 vst1q_u32((uint32_t *)eth_hdr, ve);
128 * Send packets burst from pkts_burst to the ports in dst_port array
130 static __rte_always_inline void
131 send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
132 uint16_t dst_port[MAX_PKT_BURST], int nb_rx)
138 uint16_t pnum[MAX_PKT_BURST + 1];
141 * Finish packet processing and group consecutive
142 * packets with the same destination port.
144 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
151 processx4_step3(pkts_burst, dst_port);
153 /* dp1: <d[0], d[1], d[2], d[3], ... > */
154 dp1 = vld1q_u16(dst_port);
156 for (j = FWDSTEP; j != k; j += FWDSTEP) {
157 processx4_step3(&pkts_burst[j], &dst_port[j]);
161 * <d[j-3], d[j-2], d[j-1], d[j], ... >
163 dp2 = vld1q_u16(&dst_port[j - FWDSTEP + 1]);
164 lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
168 * <d[j], d[j+1], d[j+2], d[j+3], ... >
170 dp1 = vextq_u16(dp2, dp1, FWDSTEP - 1);
174 * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... >
176 dp2 = vextq_u16(dp1, dp1, 1);
177 dp2 = vsetq_lane_u16(vgetq_lane_u16(dp2, 2), dp2, 3);
178 lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
181 * remove values added by the last repeated
185 dlp = dst_port[j - 1];
187 /* set dlp and lp to the never used values. */
189 lp = pnum + MAX_PKT_BURST;
192 /* Process up to last 3 packets one by one. */
193 switch (nb_rx % FWDSTEP) {
195 process_packet(pkts_burst[j], dst_port + j);
196 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
200 process_packet(pkts_burst[j], dst_port + j);
201 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
205 process_packet(pkts_burst[j], dst_port + j);
206 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
211 * Send packets out, through destination port.
212 * Consecutive packets with the same destination port
213 * are already grouped together.
214 * If destination port for the packet equals BAD_PORT,
215 * then free the packet without sending it out.
217 for (j = 0; j < nb_rx; j += k) {
225 if (likely(pn != BAD_PORT))
226 send_packetsx4(qconf, pn, pkts_burst + j, k);
228 for (m = j; m != j + k; m++)
229 rte_pktmbuf_free(pkts_burst[m]);
234 #endif /* _L3FWD_NEON_H_ */