1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
6 #include <rte_ethdev.h>
9 #include <rte_graph_worker.h>
11 #include <rte_malloc.h>
17 #include "rte_node_ip4_api.h"
19 #include "ip4_rewrite_priv.h"
20 #include "node_private.h"
22 static struct ip4_rewrite_node_main *ip4_rewrite_nm;
25 ip4_rewrite_node_process(struct rte_graph *graph, struct rte_node *node,
26 void **objs, uint16_t nb_objs)
28 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts;
29 struct ip4_rewrite_nh_header *nh = ip4_rewrite_nm->nh;
30 uint16_t next0, next1, next2, next3, next_index;
31 struct rte_ipv4_hdr *ip0, *ip1, *ip2, *ip3;
32 uint16_t n_left_from, held = 0, last_spec = 0;
33 void *d0, *d1, *d2, *d3;
34 void **to_next, **from;
39 /* Speculative next as last next */
40 next_index = *(uint16_t *)node->ctx;
43 pkts = (struct rte_mbuf **)objs;
45 n_left_from = nb_objs;
47 for (i = 0; i < 4 && i < n_left_from; i++)
48 rte_prefetch0(pkts[i]);
50 /* Get stream for the speculated next node */
51 to_next = rte_node_next_stream_get(graph, node, next_index, nb_objs);
52 /* Update Ethernet header of pkts */
53 while (n_left_from >= 4) {
54 if (likely(n_left_from > 7)) {
55 /* Prefetch only next-mbuf struct and priv area.
56 * Data need not be prefetched as we only write.
58 rte_prefetch0(pkts[4]);
59 rte_prefetch0(pkts[5]);
60 rte_prefetch0(pkts[6]);
61 rte_prefetch0(pkts[7]);
71 priv01.u64[0] = node_mbuf_priv1(mbuf0)->u;
72 priv01.u64[1] = node_mbuf_priv1(mbuf1)->u;
73 priv23.u64[0] = node_mbuf_priv1(mbuf2)->u;
74 priv23.u64[1] = node_mbuf_priv1(mbuf3)->u;
76 /* Increment checksum by one. */
77 priv01.u32[1] += rte_cpu_to_be_16(0x0100);
78 priv01.u32[3] += rte_cpu_to_be_16(0x0100);
79 priv23.u32[1] += rte_cpu_to_be_16(0x0100);
80 priv23.u32[3] += rte_cpu_to_be_16(0x0100);
82 /* Update ttl,cksum rewrite ethernet hdr on mbuf0 */
83 d0 = rte_pktmbuf_mtod(mbuf0, void *);
84 rte_memcpy(d0, nh[priv01.u16[0]].rewrite_data,
85 nh[priv01.u16[0]].rewrite_len);
87 next0 = nh[priv01.u16[0]].tx_node;
88 ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
89 sizeof(struct rte_ether_hdr));
90 ip0->time_to_live = priv01.u16[1] - 1;
91 ip0->hdr_checksum = priv01.u16[2] + priv01.u16[3];
93 /* Update ttl,cksum rewrite ethernet hdr on mbuf1 */
94 d1 = rte_pktmbuf_mtod(mbuf1, void *);
95 rte_memcpy(d1, nh[priv01.u16[4]].rewrite_data,
96 nh[priv01.u16[4]].rewrite_len);
98 next1 = nh[priv01.u16[4]].tx_node;
99 ip1 = (struct rte_ipv4_hdr *)((uint8_t *)d1 +
100 sizeof(struct rte_ether_hdr));
101 ip1->time_to_live = priv01.u16[5] - 1;
102 ip1->hdr_checksum = priv01.u16[6] + priv01.u16[7];
104 /* Update ttl,cksum rewrite ethernet hdr on mbuf2 */
105 d2 = rte_pktmbuf_mtod(mbuf2, void *);
106 rte_memcpy(d2, nh[priv23.u16[0]].rewrite_data,
107 nh[priv23.u16[0]].rewrite_len);
108 next2 = nh[priv23.u16[0]].tx_node;
109 ip2 = (struct rte_ipv4_hdr *)((uint8_t *)d2 +
110 sizeof(struct rte_ether_hdr));
111 ip2->time_to_live = priv23.u16[1] - 1;
112 ip2->hdr_checksum = priv23.u16[2] + priv23.u16[3];
114 /* Update ttl,cksum rewrite ethernet hdr on mbuf3 */
115 d3 = rte_pktmbuf_mtod(mbuf3, void *);
116 rte_memcpy(d3, nh[priv23.u16[4]].rewrite_data,
117 nh[priv23.u16[4]].rewrite_len);
119 next3 = nh[priv23.u16[4]].tx_node;
120 ip3 = (struct rte_ipv4_hdr *)((uint8_t *)d3 +
121 sizeof(struct rte_ether_hdr));
122 ip3->time_to_live = priv23.u16[5] - 1;
123 ip3->hdr_checksum = priv23.u16[6] + priv23.u16[7];
125 /* Enqueue four to next node */
126 rte_edge_t fix_spec =
127 ((next_index == next0) && (next0 == next1) &&
128 (next1 == next2) && (next2 == next3));
130 if (unlikely(fix_spec == 0)) {
131 /* Copy things successfully speculated till now */
132 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
134 to_next += last_spec;
139 if (next_index == next0) {
140 to_next[0] = from[0];
144 rte_node_enqueue_x1(graph, node, next0,
149 if (next_index == next1) {
150 to_next[0] = from[1];
154 rte_node_enqueue_x1(graph, node, next1,
159 if (next_index == next2) {
160 to_next[0] = from[2];
164 rte_node_enqueue_x1(graph, node, next2,
169 if (next_index == next3) {
170 to_next[0] = from[3];
174 rte_node_enqueue_x1(graph, node, next3,
180 /* Change speculation if last two are same */
181 if ((next_index != next3) && (next2 == next3)) {
182 /* Put the current speculated node */
183 rte_node_next_stream_put(graph, node,
187 /* Get next speculated stream */
189 to_next = rte_node_next_stream_get(
190 graph, node, next_index, nb_objs);
197 while (n_left_from > 0) {
205 d0 = rte_pktmbuf_mtod(mbuf0, void *);
206 rte_memcpy(d0, nh[node_mbuf_priv1(mbuf0)->nh].rewrite_data,
207 nh[node_mbuf_priv1(mbuf0)->nh].rewrite_len);
209 next0 = nh[node_mbuf_priv1(mbuf0)->nh].tx_node;
210 ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
211 sizeof(struct rte_ether_hdr));
212 chksum = node_mbuf_priv1(mbuf0)->cksum +
213 rte_cpu_to_be_16(0x0100);
214 chksum += chksum >= 0xffff;
215 ip0->hdr_checksum = chksum;
216 ip0->time_to_live = node_mbuf_priv1(mbuf0)->ttl - 1;
218 if (unlikely(next_index ^ next0)) {
219 /* Copy things successfully speculated till now */
220 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
222 to_next += last_spec;
226 rte_node_enqueue_x1(graph, node, next0, from[0]);
233 /* !!! Home run !!! */
234 if (likely(last_spec == nb_objs)) {
235 rte_node_next_stream_move(graph, node, next_index);
240 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
241 rte_node_next_stream_put(graph, node, next_index, held);
242 /* Save the last next used */
243 *(uint16_t *)node->ctx = next_index;
249 ip4_rewrite_node_init(const struct rte_graph *graph, struct rte_node *node)
254 node_dbg("ip4_rewrite", "Initialized ip4_rewrite node initialized");
260 ip4_rewrite_set_next(uint16_t port_id, uint16_t next_index)
262 if (ip4_rewrite_nm == NULL) {
263 ip4_rewrite_nm = rte_zmalloc(
264 "ip4_rewrite", sizeof(struct ip4_rewrite_node_main),
265 RTE_CACHE_LINE_SIZE);
266 if (ip4_rewrite_nm == NULL)
269 ip4_rewrite_nm->next_index[port_id] = next_index;
275 rte_node_ip4_rewrite_add(uint16_t next_hop, uint8_t *rewrite_data,
276 uint8_t rewrite_len, uint16_t dst_port)
278 struct ip4_rewrite_nh_header *nh;
280 if (next_hop >= RTE_GRAPH_IP4_REWRITE_MAX_NH)
283 if (rewrite_len > RTE_GRAPH_IP4_REWRITE_MAX_LEN)
286 if (ip4_rewrite_nm == NULL) {
287 ip4_rewrite_nm = rte_zmalloc(
288 "ip4_rewrite", sizeof(struct ip4_rewrite_node_main),
289 RTE_CACHE_LINE_SIZE);
290 if (ip4_rewrite_nm == NULL)
294 /* Check if dst port doesn't exist as edge */
295 if (!ip4_rewrite_nm->next_index[dst_port])
298 /* Update next hop */
299 nh = &ip4_rewrite_nm->nh[next_hop];
301 memcpy(nh->rewrite_data, rewrite_data, rewrite_len);
302 nh->tx_node = ip4_rewrite_nm->next_index[dst_port];
303 nh->rewrite_len = rewrite_len;
309 static struct rte_node_register ip4_rewrite_node = {
310 .process = ip4_rewrite_node_process,
311 .name = "ip4_rewrite",
312 /* Default edge i.e '0' is pkt drop */
317 .init = ip4_rewrite_node_init,
320 struct rte_node_register *
321 ip4_rewrite_node_get(void)
323 return &ip4_rewrite_node;
326 RTE_NODE_REGISTER(ip4_rewrite_node);