1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
5 #ifndef __INCLUDE_IP4_LOOKUP_NEON_H__
6 #define __INCLUDE_IP4_LOOKUP_NEON_H__
10 ip4_lookup_node_process(struct rte_graph *graph, struct rte_node *node,
11 void **objs, uint16_t nb_objs)
13 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts;
14 struct rte_ipv4_hdr *ipv4_hdr;
15 void **to_next, **from;
16 uint16_t last_spec = 0;
17 rte_edge_t next_index;
28 /* Speculative next */
29 next_index = RTE_NODE_IP4_LOOKUP_NEXT_REWRITE;
31 drop_nh = ((uint32_t)RTE_NODE_IP4_LOOKUP_NEXT_PKT_DROP) << 16;
33 /* Get socket specific LPM from ctx */
34 lpm = *((struct rte_lpm **)node->ctx);
36 pkts = (struct rte_mbuf **)objs;
38 n_left_from = nb_objs;
40 for (i = OBJS_PER_CLINE; i < RTE_GRAPH_BURST_SIZE; i += OBJS_PER_CLINE)
41 rte_prefetch0(&objs[i]);
43 for (i = 0; i < 4 && i < n_left_from; i++)
44 rte_prefetch0(rte_pktmbuf_mtod_offset(pkts[i], void *,
45 sizeof(struct rte_ether_hdr)));
48 /* Get stream for the speculated next node */
49 to_next = rte_node_next_stream_get(graph, node, next_index, nb_objs);
50 while (n_left_from >= 4) {
51 #if RTE_GRAPH_BURST_SIZE > 64
52 /* Prefetch next-next mbufs */
53 if (likely(n_left_from > 11)) {
54 rte_prefetch0(pkts[8]);
55 rte_prefetch0(pkts[9]);
56 rte_prefetch0(pkts[10]);
57 rte_prefetch0(pkts[11]);
60 /* Prefetch next mbuf data */
61 if (likely(n_left_from > 7)) {
62 rte_prefetch0(rte_pktmbuf_mtod_offset(pkts[4], void *,
63 sizeof(struct rte_ether_hdr)));
64 rte_prefetch0(rte_pktmbuf_mtod_offset(pkts[5], void *,
65 sizeof(struct rte_ether_hdr)));
66 rte_prefetch0(rte_pktmbuf_mtod_offset(pkts[6], void *,
67 sizeof(struct rte_ether_hdr)));
68 rte_prefetch0(rte_pktmbuf_mtod_offset(pkts[7], void *,
69 sizeof(struct rte_ether_hdr)));
80 /* Extract DIP of mbuf0 */
81 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf0, struct rte_ipv4_hdr *,
82 sizeof(struct rte_ether_hdr));
83 dip = vsetq_lane_s32(ipv4_hdr->dst_addr, dip, 0);
84 /* Extract cksum, ttl as ipv4 hdr is in cache */
85 priv01.u16[1] = ipv4_hdr->time_to_live;
86 priv01.u32[1] = ipv4_hdr->hdr_checksum;
88 /* Extract DIP of mbuf1 */
89 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf1, struct rte_ipv4_hdr *,
90 sizeof(struct rte_ether_hdr));
91 dip = vsetq_lane_s32(ipv4_hdr->dst_addr, dip, 1);
92 /* Extract cksum, ttl as ipv4 hdr is in cache */
93 priv01.u16[5] = ipv4_hdr->time_to_live;
94 priv01.u32[3] = ipv4_hdr->hdr_checksum;
96 /* Extract DIP of mbuf2 */
97 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf2, struct rte_ipv4_hdr *,
98 sizeof(struct rte_ether_hdr));
99 dip = vsetq_lane_s32(ipv4_hdr->dst_addr, dip, 2);
100 /* Extract cksum, ttl as ipv4 hdr is in cache */
101 priv23.u16[1] = ipv4_hdr->time_to_live;
102 priv23.u32[1] = ipv4_hdr->hdr_checksum;
104 /* Extract DIP of mbuf3 */
105 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf3, struct rte_ipv4_hdr *,
106 sizeof(struct rte_ether_hdr));
107 dip = vsetq_lane_s32(ipv4_hdr->dst_addr, dip, 3);
109 dip = vreinterpretq_s32_u8(
110 vrev32q_u8(vreinterpretq_u8_s32(dip)));
111 /* Extract cksum, ttl as ipv4 hdr is in cache */
112 priv23.u16[5] = ipv4_hdr->time_to_live;
113 priv23.u32[3] = ipv4_hdr->hdr_checksum;
115 /* Perform LPM lookup to get NH and next node */
116 rte_lpm_lookupx4(lpm, dip, result.u32, drop_nh);
117 priv01.u16[0] = result.u16[0];
118 priv01.u16[4] = result.u16[2];
119 priv23.u16[0] = result.u16[4];
120 priv23.u16[4] = result.u16[6];
122 node_mbuf_priv1(mbuf0)->u = priv01.u64[0];
123 node_mbuf_priv1(mbuf1)->u = priv01.u64[1];
124 node_mbuf_priv1(mbuf2)->u = priv23.u64[0];
125 node_mbuf_priv1(mbuf3)->u = priv23.u64[1];
127 /* Enqueue four to next node */
128 rte_edge_t fix_spec = ((next_index == result.u16[1]) &&
129 (result.u16[1] == result.u16[3]) &&
130 (result.u16[3] == result.u16[5]) &&
131 (result.u16[5] == result.u16[7]));
133 if (unlikely(fix_spec == 0)) {
134 /* Copy things successfully speculated till now */
135 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
137 to_next += last_spec;
142 if (next_index == result.u16[1]) {
143 to_next[0] = from[0];
147 rte_node_enqueue_x1(graph, node, result.u16[1],
152 if (next_index == result.u16[3]) {
153 to_next[0] = from[1];
157 rte_node_enqueue_x1(graph, node, result.u16[3],
162 if (next_index == result.u16[5]) {
163 to_next[0] = from[2];
167 rte_node_enqueue_x1(graph, node, result.u16[5],
172 if (next_index == result.u16[7]) {
173 to_next[0] = from[3];
177 rte_node_enqueue_x1(graph, node, result.u16[7],
187 while (n_left_from > 0) {
196 /* Extract DIP of mbuf0 */
197 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf0, struct rte_ipv4_hdr *,
198 sizeof(struct rte_ether_hdr));
199 /* Extract cksum, ttl as ipv4 hdr is in cache */
200 node_mbuf_priv1(mbuf0)->cksum = ipv4_hdr->hdr_checksum;
201 node_mbuf_priv1(mbuf0)->ttl = ipv4_hdr->time_to_live;
203 rc = rte_lpm_lookup(lpm, rte_be_to_cpu_32(ipv4_hdr->dst_addr),
205 next_hop = (rc == 0) ? next_hop : drop_nh;
207 node_mbuf_priv1(mbuf0)->nh = (uint16_t)next_hop;
208 next_hop = next_hop >> 16;
209 next0 = (uint16_t)next_hop;
211 if (unlikely(next_index ^ next0)) {
212 /* Copy things successfully speculated till now */
213 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
215 to_next += last_spec;
219 rte_node_enqueue_x1(graph, node, next0, from[0]);
226 /* !!! Home run !!! */
227 if (likely(last_spec == nb_objs)) {
228 rte_node_next_stream_move(graph, node, next_index);
232 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
233 rte_node_next_stream_put(graph, node, next_index, held);
238 #endif /* __INCLUDE_IP4_LOOKUP_NEON_H__ */