1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
5 #ifndef __INCLUDE_IP4_LOOKUP_NEON_H__
6 #define __INCLUDE_IP4_LOOKUP_NEON_H__
10 ip4_lookup_node_process(struct rte_graph *graph, struct rte_node *node,
11 void **objs, uint16_t nb_objs)
13 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts;
14 struct rte_ipv4_hdr *ipv4_hdr;
15 void **to_next, **from;
16 uint16_t last_spec = 0;
17 rte_edge_t next_index;
28 /* Speculative next */
29 next_index = RTE_NODE_IP4_LOOKUP_NEXT_REWRITE;
31 drop_nh = ((uint32_t)RTE_NODE_IP4_LOOKUP_NEXT_PKT_DROP) << 16;
33 /* Get socket specific LPM from ctx */
34 lpm = *((struct rte_lpm **)node->ctx);
36 pkts = (struct rte_mbuf **)objs;
38 n_left_from = nb_objs;
40 #define OBJS_PER_CLINE (RTE_CACHE_LINE_SIZE / sizeof(void *))
41 for (i = OBJS_PER_CLINE; i < RTE_GRAPH_BURST_SIZE; i += OBJS_PER_CLINE)
42 rte_prefetch0(&objs[i]);
44 for (i = 0; i < 4 && i < n_left_from; i++)
45 rte_prefetch0(rte_pktmbuf_mtod_offset(pkts[i], void *,
46 sizeof(struct rte_ether_hdr)));
49 /* Get stream for the speculated next node */
50 to_next = rte_node_next_stream_get(graph, node, next_index, nb_objs);
51 while (n_left_from >= 4) {
52 #if RTE_GRAPH_BURST_SIZE > 64
53 /* Prefetch next-next mbufs */
54 if (likely(n_left_from > 11)) {
55 rte_prefetch0(pkts[8]);
56 rte_prefetch0(pkts[9]);
57 rte_prefetch0(pkts[10]);
58 rte_prefetch0(pkts[11]);
61 /* Prefetch next mbuf data */
62 if (likely(n_left_from > 7)) {
63 rte_prefetch0(rte_pktmbuf_mtod_offset(pkts[4], void *,
64 sizeof(struct rte_ether_hdr)));
65 rte_prefetch0(rte_pktmbuf_mtod_offset(pkts[5], void *,
66 sizeof(struct rte_ether_hdr)));
67 rte_prefetch0(rte_pktmbuf_mtod_offset(pkts[6], void *,
68 sizeof(struct rte_ether_hdr)));
69 rte_prefetch0(rte_pktmbuf_mtod_offset(pkts[7], void *,
70 sizeof(struct rte_ether_hdr)));
81 /* Extract DIP of mbuf0 */
82 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf0, struct rte_ipv4_hdr *,
83 sizeof(struct rte_ether_hdr));
84 dip = vsetq_lane_s32(ipv4_hdr->dst_addr, dip, 0);
85 /* Extract cksum, ttl as ipv4 hdr is in cache */
86 priv01.u16[1] = ipv4_hdr->time_to_live;
87 priv01.u32[1] = ipv4_hdr->hdr_checksum;
89 /* Extract DIP of mbuf1 */
90 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf1, struct rte_ipv4_hdr *,
91 sizeof(struct rte_ether_hdr));
92 dip = vsetq_lane_s32(ipv4_hdr->dst_addr, dip, 1);
93 /* Extract cksum, ttl as ipv4 hdr is in cache */
94 priv01.u16[5] = ipv4_hdr->time_to_live;
95 priv01.u32[3] = ipv4_hdr->hdr_checksum;
97 /* Extract DIP of mbuf2 */
98 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf2, struct rte_ipv4_hdr *,
99 sizeof(struct rte_ether_hdr));
100 dip = vsetq_lane_s32(ipv4_hdr->dst_addr, dip, 2);
101 /* Extract cksum, ttl as ipv4 hdr is in cache */
102 priv23.u16[1] = ipv4_hdr->time_to_live;
103 priv23.u32[1] = ipv4_hdr->hdr_checksum;
105 /* Extract DIP of mbuf3 */
106 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf3, struct rte_ipv4_hdr *,
107 sizeof(struct rte_ether_hdr));
108 dip = vsetq_lane_s32(ipv4_hdr->dst_addr, dip, 3);
110 dip = vreinterpretq_s32_u8(
111 vrev32q_u8(vreinterpretq_u8_s32(dip)));
112 /* Extract cksum, ttl as ipv4 hdr is in cache */
113 priv23.u16[5] = ipv4_hdr->time_to_live;
114 priv23.u32[3] = ipv4_hdr->hdr_checksum;
116 /* Perform LPM lookup to get NH and next node */
117 rte_lpm_lookupx4(lpm, dip, result.u32, drop_nh);
118 priv01.u16[0] = result.u16[0];
119 priv01.u16[4] = result.u16[2];
120 priv23.u16[0] = result.u16[4];
121 priv23.u16[4] = result.u16[6];
123 node_mbuf_priv1(mbuf0)->u = priv01.u64[0];
124 node_mbuf_priv1(mbuf1)->u = priv01.u64[1];
125 node_mbuf_priv1(mbuf2)->u = priv23.u64[0];
126 node_mbuf_priv1(mbuf3)->u = priv23.u64[1];
128 /* Enqueue four to next node */
129 rte_edge_t fix_spec = ((next_index == result.u16[1]) &&
130 (result.u16[1] == result.u16[3]) &&
131 (result.u16[3] == result.u16[5]) &&
132 (result.u16[5] == result.u16[7]));
134 if (unlikely(fix_spec == 0)) {
135 /* Copy things successfully speculated till now */
136 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
138 to_next += last_spec;
143 if (next_index == result.u16[1]) {
144 to_next[0] = from[0];
148 rte_node_enqueue_x1(graph, node, result.u16[1],
153 if (next_index == result.u16[3]) {
154 to_next[0] = from[1];
158 rte_node_enqueue_x1(graph, node, result.u16[3],
163 if (next_index == result.u16[5]) {
164 to_next[0] = from[2];
168 rte_node_enqueue_x1(graph, node, result.u16[5],
173 if (next_index == result.u16[7]) {
174 to_next[0] = from[3];
178 rte_node_enqueue_x1(graph, node, result.u16[7],
188 while (n_left_from > 0) {
197 /* Extract DIP of mbuf0 */
198 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf0, struct rte_ipv4_hdr *,
199 sizeof(struct rte_ether_hdr));
200 /* Extract cksum, ttl as ipv4 hdr is in cache */
201 node_mbuf_priv1(mbuf0)->cksum = ipv4_hdr->hdr_checksum;
202 node_mbuf_priv1(mbuf0)->ttl = ipv4_hdr->time_to_live;
204 rc = rte_lpm_lookup(lpm, rte_be_to_cpu_32(ipv4_hdr->dst_addr),
206 next_hop = (rc == 0) ? next_hop : drop_nh;
208 node_mbuf_priv1(mbuf0)->nh = (uint16_t)next_hop;
209 next_hop = next_hop >> 16;
210 next0 = (uint16_t)next_hop;
212 if (unlikely(next_index ^ next0)) {
213 /* Copy things successfully speculated till now */
214 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
216 to_next += last_spec;
220 rte_node_enqueue_x1(graph, node, next0, from[0]);
227 /* !!! Home run !!! */
228 if (likely(last_spec == nb_objs)) {
229 rte_node_next_stream_move(graph, node, next_index);
233 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
234 rte_node_next_stream_put(graph, node, next_index, held);
239 #endif /* __INCLUDE_IP4_LOOKUP_NEON_H__ */