1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation.
3 * Copyright(c) 2017 IBM Corporation.
7 #ifndef _L3FWD_ALTIVEC_H_
8 #define _L3FWD_ALTIVEC_H_
11 #include "l3fwd_common.h"
14 * Update source and destination MAC addresses in the ethernet header.
15 * Perform RFC1812 checks and updates for IPV4 packets.
18 processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
20 __vector unsigned int te[FWDSTEP];
21 __vector unsigned int ve[FWDSTEP];
22 __vector unsigned int *p[FWDSTEP];
24 p[0] = rte_pktmbuf_mtod(pkt[0], __vector unsigned int *);
25 p[1] = rte_pktmbuf_mtod(pkt[1], __vector unsigned int *);
26 p[2] = rte_pktmbuf_mtod(pkt[2], __vector unsigned int *);
27 p[3] = rte_pktmbuf_mtod(pkt[3], __vector unsigned int *);
29 ve[0] = (__vector unsigned int)val_eth[dst_port[0]];
32 ve[1] = (__vector unsigned int)val_eth[dst_port[1]];
35 ve[2] = (__vector unsigned int)val_eth[dst_port[2]];
38 ve[3] = (__vector unsigned int)val_eth[dst_port[3]];
41 /* Update first 12 bytes, keep rest bytes intact. */
42 te[0] = (__vector unsigned int)vec_sel(
43 (__vector unsigned short)ve[0],
44 (__vector unsigned short)te[0],
45 (__vector unsigned short) {0, 0, 0, 0,
46 0, 0, 0xffff, 0xffff});
48 te[1] = (__vector unsigned int)vec_sel(
49 (__vector unsigned short)ve[1],
50 (__vector unsigned short)te[1],
51 (__vector unsigned short) {0, 0, 0, 0,
52 0, 0, 0xffff, 0xffff});
54 te[2] = (__vector unsigned int)vec_sel(
55 (__vector unsigned short)ve[2],
56 (__vector unsigned short)te[2],
57 (__vector unsigned short) {0, 0, 0, 0, 0,
60 te[3] = (__vector unsigned int)vec_sel(
61 (__vector unsigned short)ve[3],
62 (__vector unsigned short)te[3],
63 (__vector unsigned short) {0, 0, 0, 0,
64 0, 0, 0xffff, 0xffff});
71 rfc1812_process((struct rte_ipv4_hdr *)
72 ((struct rte_ether_hdr *)p[0] + 1),
73 &dst_port[0], pkt[0]->packet_type);
74 rfc1812_process((struct rte_ipv4_hdr *)
75 ((struct rte_ether_hdr *)p[1] + 1),
76 &dst_port[1], pkt[1]->packet_type);
77 rfc1812_process((struct rte_ipv4_hdr *)
78 ((struct rte_ether_hdr *)p[2] + 1),
79 &dst_port[2], pkt[2]->packet_type);
80 rfc1812_process((struct rte_ipv4_hdr *)
81 ((struct rte_ether_hdr *)p[3] + 1),
82 &dst_port[3], pkt[3]->packet_type);
86 * Group consecutive packets with the same destination port in bursts of 4.
87 * Suppose we have array of destination ports:
88 * dst_port[] = {a, b, c, d,, e, ... }
89 * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
90 * We doing 4 comparisons at once and the result is 4 bit mask.
91 * This mask is used as an index into prebuild array of pnum values.
93 static inline uint16_t *
94 port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp,
95 __vector unsigned short dp1,
96 __vector unsigned short dp2)
99 uint16_t u16[FWDSTEP + 1];
101 } *pnum = (void *)pn;
105 v = vec_any_eq(dp1, dp2);
108 /* update last port counter. */
109 lp[0] += gptbl[v].lpv;
111 /* if dest port value has changed. */
113 pnum->u64 = gptbl[v].pnum;
114 pnum->u16[FWDSTEP] = 1;
115 lp = pnum->u16 + gptbl[v].idx;
122 * Process one packet:
123 * Update source and destination MAC addresses in the ethernet header.
124 * Perform RFC1812 checks and updates for IPV4 packets.
127 process_packet(struct rte_mbuf *pkt, uint16_t *dst_port)
129 struct rte_ether_hdr *eth_hdr;
130 __vector unsigned int te, ve;
132 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
134 te = *(__vector unsigned int *)eth_hdr;
135 ve = (__vector unsigned int)val_eth[dst_port[0]];
137 rfc1812_process((struct rte_ipv4_hdr *)(eth_hdr + 1), dst_port,
140 /* dynamically vec_sel te and ve for MASK_ETH (0x3f) */
141 te = (__vector unsigned int)vec_sel(
142 (__vector unsigned short)ve,
143 (__vector unsigned short)te,
144 (__vector unsigned short){0, 0, 0, 0,
145 0, 0, 0xffff, 0xffff});
147 *(__vector unsigned int *)eth_hdr = te;
151 * Send packets burst from pkts_burst to the ports in dst_port array
153 static __rte_always_inline void
154 send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
155 uint16_t dst_port[MAX_PKT_BURST], int nb_rx)
161 uint16_t pnum[MAX_PKT_BURST + 1];
164 * Finish packet processing and group consecutive
165 * packets with the same destination port.
167 k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
169 __vector unsigned short dp1, dp2;
174 processx4_step3(pkts_burst, dst_port);
176 /* dp1: <d[0], d[1], d[2], d[3], ... > */
177 dp1 = *(__vector unsigned short *)dst_port;
179 for (j = FWDSTEP; j != k; j += FWDSTEP) {
180 processx4_step3(&pkts_burst[j], &dst_port[j]);
184 * <d[j-3], d[j-2], d[j-1], d[j], ... >
186 dp2 = *((__vector unsigned short *)
187 &dst_port[j - FWDSTEP + 1]);
188 lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
192 * <d[j], d[j+1], d[j+2], d[j+3], ... >
194 dp1 = vec_sro(dp2, (__vector unsigned char) {
195 0, 0, 0, 0, 0, 0, 0, 0,
196 0, 0, 0, (FWDSTEP - 1) * sizeof(dst_port[0])});
200 * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... >
202 dp2 = vec_perm(dp1, (__vector unsigned short){},
203 (__vector unsigned char){0xf9});
204 lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
207 * remove values added by the last repeated
211 dlp = dst_port[j - 1];
213 /* set dlp and lp to the never used values. */
215 lp = pnum + MAX_PKT_BURST;
218 /* Process up to last 3 packets one by one. */
219 switch (nb_rx % FWDSTEP) {
221 process_packet(pkts_burst[j], dst_port + j);
222 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
226 process_packet(pkts_burst[j], dst_port + j);
227 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
231 process_packet(pkts_burst[j], dst_port + j);
232 GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
237 * Send packets out, through destination port.
238 * Consecutive packets with the same destination port
239 * are already grouped together.
240 * If destination port for the packet equals BAD_PORT,
241 * then free the packet without sending it out.
243 for (j = 0; j < nb_rx; j += k) {
251 if (likely(pn != BAD_PORT))
252 send_packetsx4(qconf, pn, pkts_burst + j, k);
254 for (m = j; m != j + k; m++)
255 rte_pktmbuf_free(pkts_burst[m]);
260 #endif /* _L3FWD_ALTIVEC_H_ */