1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation
10 #include "l3fwd_common.h"
13 * Update source and destination MAC addresses in the ethernet header.
14 * Perform RFC1812 checks and updates for IPV4 packets.
17 processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
23 p[0] = rte_pktmbuf_mtod(pkt[0], __m128i *);
24 p[1] = rte_pktmbuf_mtod(pkt[1], __m128i *);
25 p[2] = rte_pktmbuf_mtod(pkt[2], __m128i *);
26 p[3] = rte_pktmbuf_mtod(pkt[3], __m128i *);
28 ve[0] = val_eth[dst_port[0]];
29 te[0] = _mm_loadu_si128(p[0]);
31 ve[1] = val_eth[dst_port[1]];
32 te[1] = _mm_loadu_si128(p[1]);
34 ve[2] = val_eth[dst_port[2]];
35 te[2] = _mm_loadu_si128(p[2]);
37 ve[3] = val_eth[dst_port[3]];
38 te[3] = _mm_loadu_si128(p[3]);
40 /* Update first 12 bytes, keep rest bytes intact. */
41 te[0] = _mm_blend_epi16(te[0], ve[0], MASK_ETH);
42 te[1] = _mm_blend_epi16(te[1], ve[1], MASK_ETH);
43 te[2] = _mm_blend_epi16(te[2], ve[2], MASK_ETH);
44 te[3] = _mm_blend_epi16(te[3], ve[3], MASK_ETH);
46 _mm_storeu_si128(p[0], te[0]);
47 _mm_storeu_si128(p[1], te[1]);
48 _mm_storeu_si128(p[2], te[2]);
49 _mm_storeu_si128(p[3], te[3]);
51 rfc1812_process((struct rte_ipv4_hdr *)
52 ((struct rte_ether_hdr *)p[0] + 1),
53 &dst_port[0], pkt[0]->packet_type);
54 rfc1812_process((struct rte_ipv4_hdr *)
55 ((struct rte_ether_hdr *)p[1] + 1),
56 &dst_port[1], pkt[1]->packet_type);
57 rfc1812_process((struct rte_ipv4_hdr *)
58 ((struct rte_ether_hdr *)p[2] + 1),
59 &dst_port[2], pkt[2]->packet_type);
60 rfc1812_process((struct rte_ipv4_hdr *)
61 ((struct rte_ether_hdr *)p[3] + 1),
62 &dst_port[3], pkt[3]->packet_type);
66 * Group consecutive packets with the same destination port in bursts of 4.
67 * Suppose we have array of destionation ports:
68 * dst_port[] = {a, b, c, d,, e, ... }
69 * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
70 * We doing 4 comparisons at once and the result is 4 bit mask.
71 * This mask is used as an index into prebuild array of pnum values.
73 static inline uint16_t *
74 port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2)
77 uint16_t u16[FWDSTEP + 1];
83 dp1 = _mm_cmpeq_epi16(dp1, dp2);
84 dp1 = _mm_unpacklo_epi16(dp1, dp1);
85 v = _mm_movemask_ps((__m128)dp1);
87 /* update last port counter. */
88 lp[0] += gptbl[v].lpv;
90 /* if dest port value has changed. */
92 pnum->u64 = gptbl[v].pnum;
93 pnum->u16[FWDSTEP] = 1;
94 lp = pnum->u16 + gptbl[v].idx;
101 * Process one packet:
102 * Update source and destination MAC addresses in the ethernet header.
103 * Perform RFC1812 checks and updates for IPV4 packets.
106 process_packet(struct rte_mbuf *pkt, uint16_t *dst_port)
108 struct rte_ether_hdr *eth_hdr;
111 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
113 te = _mm_loadu_si128((__m128i *)eth_hdr);
114 ve = val_eth[dst_port[0]];
116 rfc1812_process((struct rte_ipv4_hdr *)(eth_hdr + 1), dst_port,
119 te = _mm_blend_epi16(te, ve, MASK_ETH);
120 _mm_storeu_si128((__m128i *)eth_hdr, te);
124 * Send packets burst from pkts_burst to the ports in dst_port array
126 static __rte_always_inline void
127 send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
128 uint16_t dst_port[MAX_PKT_BURST], int nb_rx)
134 uint16_t pnum[MAX_PKT_BURST + 1];
137 * Finish packet processing and group consecutive
138 * packets with the same destination port.
140 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
147 processx4_step3(pkts_burst, dst_port);
149 /* dp1: <d[0], d[1], d[2], d[3], ... > */
150 dp1 = _mm_loadu_si128((__m128i *)dst_port);
152 for (j = FWDSTEP; j != k; j += FWDSTEP) {
153 processx4_step3(&pkts_burst[j], &dst_port[j]);
157 * <d[j-3], d[j-2], d[j-1], d[j], ... >
159 dp2 = _mm_loadu_si128((__m128i *)
160 &dst_port[j - FWDSTEP + 1]);
161 lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
165 * <d[j], d[j+1], d[j+2], d[j+3], ... >
167 dp1 = _mm_srli_si128(dp2, (FWDSTEP - 1) *
168 sizeof(dst_port[0]));
172 * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... >
174 dp2 = _mm_shufflelo_epi16(dp1, 0xf9);
175 lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
178 * remove values added by the last repeated
182 dlp = dst_port[j - 1];
184 /* set dlp and lp to the never used values. */
186 lp = pnum + MAX_PKT_BURST;
189 /* Process up to last 3 packets one by one. */
190 switch (nb_rx % FWDSTEP) {
192 process_packet(pkts_burst[j], dst_port + j);
193 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
197 process_packet(pkts_burst[j], dst_port + j);
198 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
202 process_packet(pkts_burst[j], dst_port + j);
203 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
208 * Send packets out, through destination port.
209 * Consecutive packets with the same destination port
210 * are already grouped together.
211 * If destination port for the packet equals BAD_PORT,
212 * then free the packet without sending it out.
214 for (j = 0; j < nb_rx; j += k) {
222 if (likely(pn != BAD_PORT))
223 send_packetsx4(qconf, pn, pkts_burst + j, k);
225 for (m = j; m != j + k; m++)
226 rte_pktmbuf_free(pkts_burst[m]);
231 #endif /* _L3FWD_SSE_H_ */