1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
6 #include <rte_ethdev.h>
9 #include <rte_graph_worker.h>
11 #include <rte_malloc.h>
17 #include "rte_node_ip4_api.h"
19 #include "ip4_rewrite_priv.h"
20 #include "node_private.h"
22 struct ip4_rewrite_node_ctx {
23 /* Dynamic offset to mbuf priv1 */
25 /* Cached next index */
29 static struct ip4_rewrite_node_main *ip4_rewrite_nm;
31 #define IP4_REWRITE_NODE_LAST_NEXT(ctx) \
32 (((struct ip4_rewrite_node_ctx *)ctx)->next_index)
34 #define IP4_REWRITE_NODE_PRIV1_OFF(ctx) \
35 (((struct ip4_rewrite_node_ctx *)ctx)->mbuf_priv1_off)
38 ip4_rewrite_node_process(struct rte_graph *graph, struct rte_node *node,
39 void **objs, uint16_t nb_objs)
41 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts;
42 struct ip4_rewrite_nh_header *nh = ip4_rewrite_nm->nh;
43 const int dyn = IP4_REWRITE_NODE_PRIV1_OFF(node->ctx);
44 uint16_t next0, next1, next2, next3, next_index;
45 struct rte_ipv4_hdr *ip0, *ip1, *ip2, *ip3;
46 uint16_t n_left_from, held = 0, last_spec = 0;
47 void *d0, *d1, *d2, *d3;
48 void **to_next, **from;
53 /* Speculative next as last next */
54 next_index = IP4_REWRITE_NODE_LAST_NEXT(node->ctx);
57 pkts = (struct rte_mbuf **)objs;
59 n_left_from = nb_objs;
61 for (i = 0; i < 4 && i < n_left_from; i++)
62 rte_prefetch0(pkts[i]);
64 /* Get stream for the speculated next node */
65 to_next = rte_node_next_stream_get(graph, node, next_index, nb_objs);
66 /* Update Ethernet header of pkts */
67 while (n_left_from >= 4) {
68 if (likely(n_left_from > 7)) {
69 /* Prefetch only next-mbuf struct and priv area.
70 * Data need not be prefetched as we only write.
72 rte_prefetch0(pkts[4]);
73 rte_prefetch0(pkts[5]);
74 rte_prefetch0(pkts[6]);
75 rte_prefetch0(pkts[7]);
85 priv01.u64[0] = node_mbuf_priv1(mbuf0, dyn)->u;
86 priv01.u64[1] = node_mbuf_priv1(mbuf1, dyn)->u;
87 priv23.u64[0] = node_mbuf_priv1(mbuf2, dyn)->u;
88 priv23.u64[1] = node_mbuf_priv1(mbuf3, dyn)->u;
90 /* Increment checksum by one. */
91 priv01.u32[1] += rte_cpu_to_be_16(0x0100);
92 priv01.u32[3] += rte_cpu_to_be_16(0x0100);
93 priv23.u32[1] += rte_cpu_to_be_16(0x0100);
94 priv23.u32[3] += rte_cpu_to_be_16(0x0100);
96 /* Update ttl,cksum rewrite ethernet hdr on mbuf0 */
97 d0 = rte_pktmbuf_mtod(mbuf0, void *);
98 rte_memcpy(d0, nh[priv01.u16[0]].rewrite_data,
99 nh[priv01.u16[0]].rewrite_len);
101 next0 = nh[priv01.u16[0]].tx_node;
102 ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
103 sizeof(struct rte_ether_hdr));
104 ip0->time_to_live = priv01.u16[1] - 1;
105 ip0->hdr_checksum = priv01.u16[2] + priv01.u16[3];
107 /* Update ttl,cksum rewrite ethernet hdr on mbuf1 */
108 d1 = rte_pktmbuf_mtod(mbuf1, void *);
109 rte_memcpy(d1, nh[priv01.u16[4]].rewrite_data,
110 nh[priv01.u16[4]].rewrite_len);
112 next1 = nh[priv01.u16[4]].tx_node;
113 ip1 = (struct rte_ipv4_hdr *)((uint8_t *)d1 +
114 sizeof(struct rte_ether_hdr));
115 ip1->time_to_live = priv01.u16[5] - 1;
116 ip1->hdr_checksum = priv01.u16[6] + priv01.u16[7];
118 /* Update ttl,cksum rewrite ethernet hdr on mbuf2 */
119 d2 = rte_pktmbuf_mtod(mbuf2, void *);
120 rte_memcpy(d2, nh[priv23.u16[0]].rewrite_data,
121 nh[priv23.u16[0]].rewrite_len);
122 next2 = nh[priv23.u16[0]].tx_node;
123 ip2 = (struct rte_ipv4_hdr *)((uint8_t *)d2 +
124 sizeof(struct rte_ether_hdr));
125 ip2->time_to_live = priv23.u16[1] - 1;
126 ip2->hdr_checksum = priv23.u16[2] + priv23.u16[3];
128 /* Update ttl,cksum rewrite ethernet hdr on mbuf3 */
129 d3 = rte_pktmbuf_mtod(mbuf3, void *);
130 rte_memcpy(d3, nh[priv23.u16[4]].rewrite_data,
131 nh[priv23.u16[4]].rewrite_len);
133 next3 = nh[priv23.u16[4]].tx_node;
134 ip3 = (struct rte_ipv4_hdr *)((uint8_t *)d3 +
135 sizeof(struct rte_ether_hdr));
136 ip3->time_to_live = priv23.u16[5] - 1;
137 ip3->hdr_checksum = priv23.u16[6] + priv23.u16[7];
139 /* Enqueue four to next node */
140 rte_edge_t fix_spec =
141 ((next_index == next0) && (next0 == next1) &&
142 (next1 == next2) && (next2 == next3));
144 if (unlikely(fix_spec == 0)) {
145 /* Copy things successfully speculated till now */
146 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
148 to_next += last_spec;
153 if (next_index == next0) {
154 to_next[0] = from[0];
158 rte_node_enqueue_x1(graph, node, next0,
163 if (next_index == next1) {
164 to_next[0] = from[1];
168 rte_node_enqueue_x1(graph, node, next1,
173 if (next_index == next2) {
174 to_next[0] = from[2];
178 rte_node_enqueue_x1(graph, node, next2,
183 if (next_index == next3) {
184 to_next[0] = from[3];
188 rte_node_enqueue_x1(graph, node, next3,
194 /* Change speculation if last two are same */
195 if ((next_index != next3) && (next2 == next3)) {
196 /* Put the current speculated node */
197 rte_node_next_stream_put(graph, node,
201 /* Get next speculated stream */
203 to_next = rte_node_next_stream_get(
204 graph, node, next_index, nb_objs);
211 while (n_left_from > 0) {
219 d0 = rte_pktmbuf_mtod(mbuf0, void *);
220 rte_memcpy(d0, nh[node_mbuf_priv1(mbuf0, dyn)->nh].rewrite_data,
221 nh[node_mbuf_priv1(mbuf0, dyn)->nh].rewrite_len);
223 next0 = nh[node_mbuf_priv1(mbuf0, dyn)->nh].tx_node;
224 ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
225 sizeof(struct rte_ether_hdr));
226 chksum = node_mbuf_priv1(mbuf0, dyn)->cksum +
227 rte_cpu_to_be_16(0x0100);
228 chksum += chksum >= 0xffff;
229 ip0->hdr_checksum = chksum;
230 ip0->time_to_live = node_mbuf_priv1(mbuf0, dyn)->ttl - 1;
232 if (unlikely(next_index ^ next0)) {
233 /* Copy things successfully speculated till now */
234 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
236 to_next += last_spec;
240 rte_node_enqueue_x1(graph, node, next0, from[0]);
247 /* !!! Home run !!! */
248 if (likely(last_spec == nb_objs)) {
249 rte_node_next_stream_move(graph, node, next_index);
254 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
255 rte_node_next_stream_put(graph, node, next_index, held);
256 /* Save the last next used */
257 IP4_REWRITE_NODE_LAST_NEXT(node->ctx) = next_index;
263 ip4_rewrite_node_init(const struct rte_graph *graph, struct rte_node *node)
265 static bool init_once;
268 RTE_BUILD_BUG_ON(sizeof(struct ip4_rewrite_node_ctx) > RTE_NODE_CTX_SZ);
271 node_mbuf_priv1_dynfield_offset = rte_mbuf_dynfield_register(
272 &node_mbuf_priv1_dynfield_desc);
273 if (node_mbuf_priv1_dynfield_offset < 0)
277 IP4_REWRITE_NODE_PRIV1_OFF(node->ctx) = node_mbuf_priv1_dynfield_offset;
279 node_dbg("ip4_rewrite", "Initialized ip4_rewrite node initialized");
285 ip4_rewrite_set_next(uint16_t port_id, uint16_t next_index)
287 if (ip4_rewrite_nm == NULL) {
288 ip4_rewrite_nm = rte_zmalloc(
289 "ip4_rewrite", sizeof(struct ip4_rewrite_node_main),
290 RTE_CACHE_LINE_SIZE);
291 if (ip4_rewrite_nm == NULL)
294 ip4_rewrite_nm->next_index[port_id] = next_index;
300 rte_node_ip4_rewrite_add(uint16_t next_hop, uint8_t *rewrite_data,
301 uint8_t rewrite_len, uint16_t dst_port)
303 struct ip4_rewrite_nh_header *nh;
305 if (next_hop >= RTE_GRAPH_IP4_REWRITE_MAX_NH)
308 if (rewrite_len > RTE_GRAPH_IP4_REWRITE_MAX_LEN)
311 if (ip4_rewrite_nm == NULL) {
312 ip4_rewrite_nm = rte_zmalloc(
313 "ip4_rewrite", sizeof(struct ip4_rewrite_node_main),
314 RTE_CACHE_LINE_SIZE);
315 if (ip4_rewrite_nm == NULL)
319 /* Check if dst port doesn't exist as edge */
320 if (!ip4_rewrite_nm->next_index[dst_port])
323 /* Update next hop */
324 nh = &ip4_rewrite_nm->nh[next_hop];
326 memcpy(nh->rewrite_data, rewrite_data, rewrite_len);
327 nh->tx_node = ip4_rewrite_nm->next_index[dst_port];
328 nh->rewrite_len = rewrite_len;
334 static struct rte_node_register ip4_rewrite_node = {
335 .process = ip4_rewrite_node_process,
336 .name = "ip4_rewrite",
337 /* Default edge i.e '0' is pkt drop */
342 .init = ip4_rewrite_node_init,
345 struct rte_node_register *
346 ip4_rewrite_node_get(void)
348 return &ip4_rewrite_node;
351 RTE_NODE_REGISTER(ip4_rewrite_node);