4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
5 * Copyright(c) 2017 IBM Corporation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #ifndef _L3FWD_ALTIVEC_H_
37 #define _L3FWD_ALTIVEC_H_
40 #include "l3fwd_common.h"
43 * Update source and destination MAC addresses in the ethernet header.
44 * Perform RFC1812 checks and updates for IPV4 packets.
47 processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
49 vector unsigned int te[FWDSTEP];
50 vector unsigned int ve[FWDSTEP];
51 vector unsigned int *p[FWDSTEP];
53 p[0] = rte_pktmbuf_mtod(pkt[0], vector unsigned int *);
54 p[1] = rte_pktmbuf_mtod(pkt[1], vector unsigned int *);
55 p[2] = rte_pktmbuf_mtod(pkt[2], vector unsigned int *);
56 p[3] = rte_pktmbuf_mtod(pkt[3], vector unsigned int *);
58 ve[0] = (vector unsigned int)val_eth[dst_port[0]];
61 ve[1] = (vector unsigned int)val_eth[dst_port[1]];
64 ve[2] = (vector unsigned int)val_eth[dst_port[2]];
67 ve[3] = (vector unsigned int)val_eth[dst_port[3]];
70 /* Update first 12 bytes, keep rest bytes intact. */
71 te[0] = (vector unsigned int)vec_sel(
72 (vector unsigned short)ve[0],
73 (vector unsigned short)te[0],
74 (vector unsigned short) {0, 0, 0, 0,
75 0, 0, 0xffff, 0xffff});
77 te[1] = (vector unsigned int)vec_sel(
78 (vector unsigned short)ve[1],
79 (vector unsigned short)te[1],
80 (vector unsigned short) {0, 0, 0, 0,
81 0, 0, 0xffff, 0xffff});
83 te[2] = (vector unsigned int)vec_sel(
84 (vector unsigned short)ve[2],
85 (vector unsigned short)te[2],
86 (vector unsigned short) {0, 0, 0, 0, 0,
89 te[3] = (vector unsigned int)vec_sel(
90 (vector unsigned short)ve[3],
91 (vector unsigned short)te[3],
92 (vector unsigned short) {0, 0, 0, 0,
93 0, 0, 0xffff, 0xffff});
100 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1),
101 &dst_port[0], pkt[0]->packet_type);
102 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1),
103 &dst_port[1], pkt[1]->packet_type);
104 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1),
105 &dst_port[2], pkt[2]->packet_type);
106 rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1),
107 &dst_port[3], pkt[3]->packet_type);
111 * Group consecutive packets with the same destination port in bursts of 4.
112 * Suppose we have array of destination ports:
113 * dst_port[] = {a, b, c, d,, e, ... }
114 * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
115 * We doing 4 comparisons at once and the result is 4 bit mask.
116 * This mask is used as an index into prebuild array of pnum values.
118 static inline uint16_t *
119 port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, vector unsigned short dp1,
120 vector unsigned short dp2)
123 uint16_t u16[FWDSTEP + 1];
125 } *pnum = (void *)pn;
129 v = vec_any_eq(dp1, dp2);
132 /* update last port counter. */
133 lp[0] += gptbl[v].lpv;
135 /* if dest port value has changed. */
137 pnum->u64 = gptbl[v].pnum;
138 pnum->u16[FWDSTEP] = 1;
139 lp = pnum->u16 + gptbl[v].idx;
146 * Process one packet:
147 * Update source and destination MAC addresses in the ethernet header.
148 * Perform RFC1812 checks and updates for IPV4 packets.
151 process_packet(struct rte_mbuf *pkt, uint16_t *dst_port)
153 struct ether_hdr *eth_hdr;
154 vector unsigned int te, ve;
156 eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
158 te = *(vector unsigned int *)eth_hdr;
159 ve = (vector unsigned int)val_eth[dst_port[0]];
161 rfc1812_process((struct ipv4_hdr *)(eth_hdr + 1), dst_port,
164 /* dynamically vec_sel te and ve for MASK_ETH (0x3f) */
165 te = (vector unsigned int)vec_sel(
166 (vector unsigned short)ve,
167 (vector unsigned short)te,
168 (vector unsigned short){0, 0, 0, 0,
169 0, 0, 0xffff, 0xffff});
171 *(vector unsigned int *)eth_hdr = te;
175 * Send packets burst from pkts_burst to the ports in dst_port array
177 static __rte_always_inline void
178 send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
179 uint16_t dst_port[MAX_PKT_BURST], int nb_rx)
185 uint16_t pnum[MAX_PKT_BURST + 1];
188 * Finish packet processing and group consecutive
189 * packets with the same destination port.
191 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
193 vector unsigned short dp1, dp2;
198 processx4_step3(pkts_burst, dst_port);
200 /* dp1: <d[0], d[1], d[2], d[3], ... > */
201 dp1 = *(vector unsigned short *)dst_port;
203 for (j = FWDSTEP; j != k; j += FWDSTEP) {
204 processx4_step3(&pkts_burst[j], &dst_port[j]);
208 * <d[j-3], d[j-2], d[j-1], d[j], ... >
210 dp2 = *((vector unsigned short *)
211 &dst_port[j - FWDSTEP + 1]);
212 lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
216 * <d[j], d[j+1], d[j+2], d[j+3], ... >
218 dp1 = vec_sro(dp2, (vector unsigned char) {
219 0, 0, 0, 0, 0, 0, 0, 0,
220 0, 0, 0, (FWDSTEP - 1) * sizeof(dst_port[0])});
224 * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... >
226 dp2 = vec_perm(dp1, (vector unsigned short){},
227 (vector unsigned char){0xf9});
228 lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
231 * remove values added by the last repeated
235 dlp = dst_port[j - 1];
237 /* set dlp and lp to the never used values. */
239 lp = pnum + MAX_PKT_BURST;
242 /* Process up to last 3 packets one by one. */
243 switch (nb_rx % FWDSTEP) {
245 process_packet(pkts_burst[j], dst_port + j);
246 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
250 process_packet(pkts_burst[j], dst_port + j);
251 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
255 process_packet(pkts_burst[j], dst_port + j);
256 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
261 * Send packets out, through destination port.
262 * Consecutive packets with the same destination port
263 * are already grouped together.
264 * If destination port for the packet equals BAD_PORT,
265 * then free the packet without sending it out.
267 for (j = 0; j < nb_rx; j += k) {
275 if (likely(pn != BAD_PORT))
276 send_packetsx4(qconf, pn, pkts_burst + j, k);
278 for (m = j; m != j + k; m++)
279 rte_pktmbuf_free(pkts_burst[m]);
284 #endif /* _L3FWD_ALTIVEC_H_ */