1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
5 #include <rte_ethdev.h>
8 #include <rte_graph_worker.h>
10 #include <rte_malloc.h>
13 #include "rte_node_ip4_api.h"
15 #include "ip4_rewrite_priv.h"
16 #include "node_private.h"
18 struct ip4_rewrite_node_ctx {
19 /* Dynamic offset to mbuf priv1 */
21 /* Cached next index */
25 static struct ip4_rewrite_node_main *ip4_rewrite_nm;
27 #define IP4_REWRITE_NODE_LAST_NEXT(ctx) \
28 (((struct ip4_rewrite_node_ctx *)ctx)->next_index)
30 #define IP4_REWRITE_NODE_PRIV1_OFF(ctx) \
31 (((struct ip4_rewrite_node_ctx *)ctx)->mbuf_priv1_off)
34 ip4_rewrite_node_process(struct rte_graph *graph, struct rte_node *node,
35 void **objs, uint16_t nb_objs)
37 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts;
38 struct ip4_rewrite_nh_header *nh = ip4_rewrite_nm->nh;
39 const int dyn = IP4_REWRITE_NODE_PRIV1_OFF(node->ctx);
40 uint16_t next0, next1, next2, next3, next_index;
41 struct rte_ipv4_hdr *ip0, *ip1, *ip2, *ip3;
42 uint16_t n_left_from, held = 0, last_spec = 0;
43 void *d0, *d1, *d2, *d3;
44 void **to_next, **from;
49 /* Speculative next as last next */
50 next_index = IP4_REWRITE_NODE_LAST_NEXT(node->ctx);
53 pkts = (struct rte_mbuf **)objs;
55 n_left_from = nb_objs;
57 for (i = 0; i < 4 && i < n_left_from; i++)
58 rte_prefetch0(pkts[i]);
60 /* Get stream for the speculated next node */
61 to_next = rte_node_next_stream_get(graph, node, next_index, nb_objs);
62 /* Update Ethernet header of pkts */
63 while (n_left_from >= 4) {
64 if (likely(n_left_from > 7)) {
65 /* Prefetch only next-mbuf struct and priv area.
66 * Data need not be prefetched as we only write.
68 rte_prefetch0(pkts[4]);
69 rte_prefetch0(pkts[5]);
70 rte_prefetch0(pkts[6]);
71 rte_prefetch0(pkts[7]);
81 priv01.u64[0] = node_mbuf_priv1(mbuf0, dyn)->u;
82 priv01.u64[1] = node_mbuf_priv1(mbuf1, dyn)->u;
83 priv23.u64[0] = node_mbuf_priv1(mbuf2, dyn)->u;
84 priv23.u64[1] = node_mbuf_priv1(mbuf3, dyn)->u;
86 /* Increment checksum by one. */
87 priv01.u32[1] += rte_cpu_to_be_16(0x0100);
88 priv01.u32[3] += rte_cpu_to_be_16(0x0100);
89 priv23.u32[1] += rte_cpu_to_be_16(0x0100);
90 priv23.u32[3] += rte_cpu_to_be_16(0x0100);
92 /* Update ttl,cksum rewrite ethernet hdr on mbuf0 */
93 d0 = rte_pktmbuf_mtod(mbuf0, void *);
94 rte_memcpy(d0, nh[priv01.u16[0]].rewrite_data,
95 nh[priv01.u16[0]].rewrite_len);
97 next0 = nh[priv01.u16[0]].tx_node;
98 ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
99 sizeof(struct rte_ether_hdr));
100 ip0->time_to_live = priv01.u16[1] - 1;
101 ip0->hdr_checksum = priv01.u16[2] + priv01.u16[3];
103 /* Update ttl,cksum rewrite ethernet hdr on mbuf1 */
104 d1 = rte_pktmbuf_mtod(mbuf1, void *);
105 rte_memcpy(d1, nh[priv01.u16[4]].rewrite_data,
106 nh[priv01.u16[4]].rewrite_len);
108 next1 = nh[priv01.u16[4]].tx_node;
109 ip1 = (struct rte_ipv4_hdr *)((uint8_t *)d1 +
110 sizeof(struct rte_ether_hdr));
111 ip1->time_to_live = priv01.u16[5] - 1;
112 ip1->hdr_checksum = priv01.u16[6] + priv01.u16[7];
114 /* Update ttl,cksum rewrite ethernet hdr on mbuf2 */
115 d2 = rte_pktmbuf_mtod(mbuf2, void *);
116 rte_memcpy(d2, nh[priv23.u16[0]].rewrite_data,
117 nh[priv23.u16[0]].rewrite_len);
118 next2 = nh[priv23.u16[0]].tx_node;
119 ip2 = (struct rte_ipv4_hdr *)((uint8_t *)d2 +
120 sizeof(struct rte_ether_hdr));
121 ip2->time_to_live = priv23.u16[1] - 1;
122 ip2->hdr_checksum = priv23.u16[2] + priv23.u16[3];
124 /* Update ttl,cksum rewrite ethernet hdr on mbuf3 */
125 d3 = rte_pktmbuf_mtod(mbuf3, void *);
126 rte_memcpy(d3, nh[priv23.u16[4]].rewrite_data,
127 nh[priv23.u16[4]].rewrite_len);
129 next3 = nh[priv23.u16[4]].tx_node;
130 ip3 = (struct rte_ipv4_hdr *)((uint8_t *)d3 +
131 sizeof(struct rte_ether_hdr));
132 ip3->time_to_live = priv23.u16[5] - 1;
133 ip3->hdr_checksum = priv23.u16[6] + priv23.u16[7];
135 /* Enqueue four to next node */
136 rte_edge_t fix_spec =
137 ((next_index == next0) && (next0 == next1) &&
138 (next1 == next2) && (next2 == next3));
140 if (unlikely(fix_spec == 0)) {
141 /* Copy things successfully speculated till now */
142 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
144 to_next += last_spec;
149 if (next_index == next0) {
150 to_next[0] = from[0];
154 rte_node_enqueue_x1(graph, node, next0,
159 if (next_index == next1) {
160 to_next[0] = from[1];
164 rte_node_enqueue_x1(graph, node, next1,
169 if (next_index == next2) {
170 to_next[0] = from[2];
174 rte_node_enqueue_x1(graph, node, next2,
179 if (next_index == next3) {
180 to_next[0] = from[3];
184 rte_node_enqueue_x1(graph, node, next3,
190 /* Change speculation if last two are same */
191 if ((next_index != next3) && (next2 == next3)) {
192 /* Put the current speculated node */
193 rte_node_next_stream_put(graph, node,
197 /* Get next speculated stream */
199 to_next = rte_node_next_stream_get(
200 graph, node, next_index, nb_objs);
207 while (n_left_from > 0) {
215 d0 = rte_pktmbuf_mtod(mbuf0, void *);
216 rte_memcpy(d0, nh[node_mbuf_priv1(mbuf0, dyn)->nh].rewrite_data,
217 nh[node_mbuf_priv1(mbuf0, dyn)->nh].rewrite_len);
219 next0 = nh[node_mbuf_priv1(mbuf0, dyn)->nh].tx_node;
220 ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
221 sizeof(struct rte_ether_hdr));
222 chksum = node_mbuf_priv1(mbuf0, dyn)->cksum +
223 rte_cpu_to_be_16(0x0100);
224 chksum += chksum >= 0xffff;
225 ip0->hdr_checksum = chksum;
226 ip0->time_to_live = node_mbuf_priv1(mbuf0, dyn)->ttl - 1;
228 if (unlikely(next_index ^ next0)) {
229 /* Copy things successfully speculated till now */
230 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
232 to_next += last_spec;
236 rte_node_enqueue_x1(graph, node, next0, from[0]);
243 /* !!! Home run !!! */
244 if (likely(last_spec == nb_objs)) {
245 rte_node_next_stream_move(graph, node, next_index);
250 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
251 rte_node_next_stream_put(graph, node, next_index, held);
252 /* Save the last next used */
253 IP4_REWRITE_NODE_LAST_NEXT(node->ctx) = next_index;
259 ip4_rewrite_node_init(const struct rte_graph *graph, struct rte_node *node)
261 static bool init_once;
264 RTE_BUILD_BUG_ON(sizeof(struct ip4_rewrite_node_ctx) > RTE_NODE_CTX_SZ);
267 node_mbuf_priv1_dynfield_offset = rte_mbuf_dynfield_register(
268 &node_mbuf_priv1_dynfield_desc);
269 if (node_mbuf_priv1_dynfield_offset < 0)
273 IP4_REWRITE_NODE_PRIV1_OFF(node->ctx) = node_mbuf_priv1_dynfield_offset;
275 node_dbg("ip4_rewrite", "Initialized ip4_rewrite node initialized");
281 ip4_rewrite_set_next(uint16_t port_id, uint16_t next_index)
283 if (ip4_rewrite_nm == NULL) {
284 ip4_rewrite_nm = rte_zmalloc(
285 "ip4_rewrite", sizeof(struct ip4_rewrite_node_main),
286 RTE_CACHE_LINE_SIZE);
287 if (ip4_rewrite_nm == NULL)
290 ip4_rewrite_nm->next_index[port_id] = next_index;
296 rte_node_ip4_rewrite_add(uint16_t next_hop, uint8_t *rewrite_data,
297 uint8_t rewrite_len, uint16_t dst_port)
299 struct ip4_rewrite_nh_header *nh;
301 if (next_hop >= RTE_GRAPH_IP4_REWRITE_MAX_NH)
304 if (rewrite_len > RTE_GRAPH_IP4_REWRITE_MAX_LEN)
307 if (ip4_rewrite_nm == NULL) {
308 ip4_rewrite_nm = rte_zmalloc(
309 "ip4_rewrite", sizeof(struct ip4_rewrite_node_main),
310 RTE_CACHE_LINE_SIZE);
311 if (ip4_rewrite_nm == NULL)
315 /* Check if dst port doesn't exist as edge */
316 if (!ip4_rewrite_nm->next_index[dst_port])
319 /* Update next hop */
320 nh = &ip4_rewrite_nm->nh[next_hop];
322 memcpy(nh->rewrite_data, rewrite_data, rewrite_len);
323 nh->tx_node = ip4_rewrite_nm->next_index[dst_port];
324 nh->rewrite_len = rewrite_len;
330 static struct rte_node_register ip4_rewrite_node = {
331 .process = ip4_rewrite_node_process,
332 .name = "ip4_rewrite",
333 /* Default edge i.e '0' is pkt drop */
338 .init = ip4_rewrite_node_init,
341 struct rte_node_register *
342 ip4_rewrite_node_get(void)
344 return &ip4_rewrite_node;
347 RTE_NODE_REGISTER(ip4_rewrite_node);