1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
5 #ifndef __INCLUDE_IP4_LOOKUP_NEON_H__
6 #define __INCLUDE_IP4_LOOKUP_NEON_H__
10 ip4_lookup_node_process_vec(struct rte_graph *graph, struct rte_node *node,
11 void **objs, uint16_t nb_objs)
13 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts;
14 struct rte_lpm *lpm = IP4_LOOKUP_NODE_LPM(node->ctx);
15 const int dyn = IP4_LOOKUP_NODE_PRIV1_OFF(node->ctx);
16 struct rte_ipv4_hdr *ipv4_hdr;
17 void **to_next, **from;
18 uint16_t last_spec = 0;
19 rte_edge_t next_index;
29 /* Speculative next */
30 next_index = RTE_NODE_IP4_LOOKUP_NEXT_REWRITE;
32 drop_nh = ((uint32_t)RTE_NODE_IP4_LOOKUP_NEXT_PKT_DROP) << 16;
34 pkts = (struct rte_mbuf **)objs;
36 n_left_from = nb_objs;
38 for (i = OBJS_PER_CLINE; i < RTE_GRAPH_BURST_SIZE; i += OBJS_PER_CLINE)
39 rte_prefetch0(&objs[i]);
41 for (i = 0; i < 4 && i < n_left_from; i++)
42 rte_prefetch0(rte_pktmbuf_mtod_offset(pkts[i], void *,
43 sizeof(struct rte_ether_hdr)));
46 /* Get stream for the speculated next node */
47 to_next = rte_node_next_stream_get(graph, node, next_index, nb_objs);
48 while (n_left_from >= 4) {
49 #if RTE_GRAPH_BURST_SIZE > 64
50 /* Prefetch next-next mbufs */
51 if (likely(n_left_from > 11)) {
52 rte_prefetch0(pkts[8]);
53 rte_prefetch0(pkts[9]);
54 rte_prefetch0(pkts[10]);
55 rte_prefetch0(pkts[11]);
58 /* Prefetch next mbuf data */
59 if (likely(n_left_from > 7)) {
60 rte_prefetch0(rte_pktmbuf_mtod_offset(pkts[4], void *,
61 sizeof(struct rte_ether_hdr)));
62 rte_prefetch0(rte_pktmbuf_mtod_offset(pkts[5], void *,
63 sizeof(struct rte_ether_hdr)));
64 rte_prefetch0(rte_pktmbuf_mtod_offset(pkts[6], void *,
65 sizeof(struct rte_ether_hdr)));
66 rte_prefetch0(rte_pktmbuf_mtod_offset(pkts[7], void *,
67 sizeof(struct rte_ether_hdr)));
78 /* Extract DIP of mbuf0 */
79 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf0, struct rte_ipv4_hdr *,
80 sizeof(struct rte_ether_hdr));
81 dip = vsetq_lane_s32(ipv4_hdr->dst_addr, dip, 0);
82 /* Extract cksum, ttl as ipv4 hdr is in cache */
83 priv01.u16[1] = ipv4_hdr->time_to_live;
84 priv01.u32[1] = ipv4_hdr->hdr_checksum;
86 /* Extract DIP of mbuf1 */
87 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf1, struct rte_ipv4_hdr *,
88 sizeof(struct rte_ether_hdr));
89 dip = vsetq_lane_s32(ipv4_hdr->dst_addr, dip, 1);
90 /* Extract cksum, ttl as ipv4 hdr is in cache */
91 priv01.u16[5] = ipv4_hdr->time_to_live;
92 priv01.u32[3] = ipv4_hdr->hdr_checksum;
94 /* Extract DIP of mbuf2 */
95 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf2, struct rte_ipv4_hdr *,
96 sizeof(struct rte_ether_hdr));
97 dip = vsetq_lane_s32(ipv4_hdr->dst_addr, dip, 2);
98 /* Extract cksum, ttl as ipv4 hdr is in cache */
99 priv23.u16[1] = ipv4_hdr->time_to_live;
100 priv23.u32[1] = ipv4_hdr->hdr_checksum;
102 /* Extract DIP of mbuf3 */
103 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf3, struct rte_ipv4_hdr *,
104 sizeof(struct rte_ether_hdr));
105 dip = vsetq_lane_s32(ipv4_hdr->dst_addr, dip, 3);
107 dip = vreinterpretq_s32_u8(
108 vrev32q_u8(vreinterpretq_u8_s32(dip)));
109 /* Extract cksum, ttl as ipv4 hdr is in cache */
110 priv23.u16[5] = ipv4_hdr->time_to_live;
111 priv23.u32[3] = ipv4_hdr->hdr_checksum;
113 /* Perform LPM lookup to get NH and next node */
114 rte_lpm_lookupx4(lpm, dip, result.u32, drop_nh);
115 priv01.u16[0] = result.u16[0];
116 priv01.u16[4] = result.u16[2];
117 priv23.u16[0] = result.u16[4];
118 priv23.u16[4] = result.u16[6];
120 node_mbuf_priv1(mbuf0, dyn)->u = priv01.u64[0];
121 node_mbuf_priv1(mbuf1, dyn)->u = priv01.u64[1];
122 node_mbuf_priv1(mbuf2, dyn)->u = priv23.u64[0];
123 node_mbuf_priv1(mbuf3, dyn)->u = priv23.u64[1];
125 /* Enqueue four to next node */
126 rte_edge_t fix_spec = ((next_index == result.u16[1]) &&
127 (result.u16[1] == result.u16[3]) &&
128 (result.u16[3] == result.u16[5]) &&
129 (result.u16[5] == result.u16[7]));
131 if (unlikely(fix_spec == 0)) {
132 /* Copy things successfully speculated till now */
133 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
135 to_next += last_spec;
140 if (next_index == result.u16[1]) {
141 to_next[0] = from[0];
145 rte_node_enqueue_x1(graph, node, result.u16[1],
150 if (next_index == result.u16[3]) {
151 to_next[0] = from[1];
155 rte_node_enqueue_x1(graph, node, result.u16[3],
160 if (next_index == result.u16[5]) {
161 to_next[0] = from[2];
165 rte_node_enqueue_x1(graph, node, result.u16[5],
170 if (next_index == result.u16[7]) {
171 to_next[0] = from[3];
175 rte_node_enqueue_x1(graph, node, result.u16[7],
185 while (n_left_from > 0) {
194 /* Extract DIP of mbuf0 */
195 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf0, struct rte_ipv4_hdr *,
196 sizeof(struct rte_ether_hdr));
197 /* Extract cksum, ttl as ipv4 hdr is in cache */
198 node_mbuf_priv1(mbuf0, dyn)->cksum = ipv4_hdr->hdr_checksum;
199 node_mbuf_priv1(mbuf0, dyn)->ttl = ipv4_hdr->time_to_live;
201 rc = rte_lpm_lookup(lpm, rte_be_to_cpu_32(ipv4_hdr->dst_addr),
203 next_hop = (rc == 0) ? next_hop : drop_nh;
205 node_mbuf_priv1(mbuf0, dyn)->nh = (uint16_t)next_hop;
206 next_hop = next_hop >> 16;
207 next0 = (uint16_t)next_hop;
209 if (unlikely(next_index ^ next0)) {
210 /* Copy things successfully speculated till now */
211 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
213 to_next += last_spec;
217 rte_node_enqueue_x1(graph, node, next0, from[0]);
224 /* !!! Home run !!! */
225 if (likely(last_spec == nb_objs)) {
226 rte_node_next_stream_move(graph, node, next_index);
230 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
231 rte_node_next_stream_put(graph, node, next_index, held);
236 #endif /* __INCLUDE_IP4_LOOKUP_NEON_H__ */