4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/queue.h>
44 #include <rte_common.h>
45 #include <rte_byteorder.h>
47 #include <rte_debug.h>
48 #include <rte_cycles.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
52 #include <rte_launch.h>
53 #include <rte_tailq.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
60 #include <rte_memory.h>
61 #include <rte_mempool.h>
63 #include <rte_memcpy.h>
64 #include <rte_interrupts.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
72 #include <rte_prefetch.h>
73 #include <rte_string_fns.h>
78 #define IP_DEFTTL 64 /* from RFC 1340. */
79 #define IP_VERSION 0x40
80 #define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
81 #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
83 static inline uint16_t
84 get_16b_sum(uint16_t *ptr16, uint32_t nr)
90 nr -= sizeof(uint16_t);
96 /* If length is in odd bytes */
98 sum += *((uint8_t*)ptr16);
100 sum = ((sum & 0xffff0000) >> 16) + (sum & 0xffff);
102 return (uint16_t)sum;
105 static inline uint16_t
106 get_ipv4_cksum(struct ipv4_hdr *ipv4_hdr)
109 cksum = get_16b_sum((uint16_t*)ipv4_hdr, sizeof(struct ipv4_hdr));
110 return (uint16_t)((cksum == 0xffff)?cksum:~cksum);
114 static inline uint16_t
115 get_ipv4_psd_sum (struct ipv4_hdr * ip_hdr)
117 /* Pseudo Header for IPv4/UDP/TCP checksum */
118 union ipv4_psd_header {
120 uint32_t src_addr; /* IP address of source host. */
121 uint32_t dst_addr; /* IP address of destination host(s). */
122 uint8_t zero; /* zero. */
123 uint8_t proto; /* L4 protocol type. */
124 uint16_t len; /* L4 length. */
125 } __attribute__((__packed__));
129 psd_hdr.src_addr = ip_hdr->src_addr;
130 psd_hdr.dst_addr = ip_hdr->dst_addr;
132 psd_hdr.proto = ip_hdr->next_proto_id;
133 psd_hdr.len = rte_cpu_to_be_16((uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length)
134 - sizeof(struct ipv4_hdr)));
135 return get_16b_sum(psd_hdr.u16_arr, sizeof(psd_hdr));
138 static inline uint16_t
139 get_ipv6_psd_sum (struct ipv6_hdr * ip_hdr)
141 /* Pseudo Header for IPv6/UDP/TCP checksum */
142 union ipv6_psd_header {
144 uint8_t src_addr[16]; /* IP address of source host. */
145 uint8_t dst_addr[16]; /* IP address of destination host(s). */
146 uint32_t len; /* L4 length. */
147 uint32_t proto; /* L4 protocol - top 3 bytes must be zero */
148 } __attribute__((__packed__));
150 uint16_t u16_arr[0]; /* allow use as 16-bit values with safe aliasing */
153 rte_memcpy(&psd_hdr.src_addr, ip_hdr->src_addr,
154 sizeof(ip_hdr->src_addr) + sizeof(ip_hdr->dst_addr));
155 psd_hdr.len = ip_hdr->payload_len;
156 psd_hdr.proto = (ip_hdr->proto << 24);
158 return get_16b_sum(psd_hdr.u16_arr, sizeof(psd_hdr));
161 static inline uint16_t
162 get_ipv4_udptcp_checksum(struct ipv4_hdr *ipv4_hdr, uint16_t *l4_hdr)
167 l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - sizeof(struct ipv4_hdr);
169 cksum = get_16b_sum(l4_hdr, l4_len);
170 cksum += get_ipv4_psd_sum(ipv4_hdr);
172 cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
173 cksum = (~cksum) & 0xffff;
176 return (uint16_t)cksum;
180 static inline uint16_t
181 get_ipv6_udptcp_checksum(struct ipv6_hdr *ipv6_hdr, uint16_t *l4_hdr)
186 l4_len = rte_be_to_cpu_16(ipv6_hdr->payload_len);
188 cksum = get_16b_sum(l4_hdr, l4_len);
189 cksum += get_ipv6_psd_sum(ipv6_hdr);
191 cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
192 cksum = (~cksum) & 0xffff;
196 return (uint16_t)cksum;
201 * Forwarding of packets. Change the checksum field with HW or SW methods
202 * The HW/SW method selection depends on the ol_flags on every packet
205 pkt_burst_checksum_forward(struct fwd_stream *fs)
207 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
208 struct rte_port *txp;
210 struct ether_hdr *eth_hdr;
211 struct ipv4_hdr *ipv4_hdr;
212 struct ipv6_hdr *ipv6_hdr;
213 struct udp_hdr *udp_hdr;
214 struct tcp_hdr *tcp_hdr;
215 struct sctp_hdr *sctp_hdr;
221 uint16_t pkt_ol_flags;
222 uint16_t tx_ol_flags;
228 uint32_t rx_bad_ip_csum;
229 uint32_t rx_bad_l4_csum;
231 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
234 uint64_t core_cycles;
237 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
238 start_tsc = rte_rdtsc();
242 * Receive a burst of packets and forward them.
244 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
246 if (unlikely(nb_rx == 0))
249 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
250 fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
252 fs->rx_packets += nb_rx;
256 txp = &ports[fs->tx_port];
257 tx_ol_flags = txp->tx_ol_flags;
259 for (i = 0; i < nb_rx; i++) {
262 l2_len = sizeof(struct ether_hdr);
263 pkt_ol_flags = mb->ol_flags;
264 ol_flags = (uint16_t) (pkt_ol_flags & (~PKT_TX_L4_MASK));
266 eth_hdr = (struct ether_hdr *) mb->pkt.data;
267 eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
268 if (eth_type == ETHER_TYPE_VLAN) {
269 /* Only allow single VLAN label here */
270 l2_len += sizeof(struct vlan_hdr);
271 eth_type = rte_be_to_cpu_16(*(uint16_t *)
272 ((uintptr_t)ð_hdr->ether_type +
273 sizeof(struct vlan_hdr)));
276 /* Update the L3/L4 checksum error packet count */
277 rx_bad_ip_csum += (uint16_t) ((pkt_ol_flags & PKT_RX_IP_CKSUM_BAD) != 0);
278 rx_bad_l4_csum += (uint16_t) ((pkt_ol_flags & PKT_RX_L4_CKSUM_BAD) != 0);
281 * Try to figure out L3 packet type by SW.
283 if ((pkt_ol_flags & (PKT_RX_IPV4_HDR | PKT_RX_IPV4_HDR_EXT |
284 PKT_RX_IPV6_HDR | PKT_RX_IPV6_HDR_EXT)) == 0) {
285 if (eth_type == ETHER_TYPE_IPv4)
286 pkt_ol_flags |= PKT_RX_IPV4_HDR;
287 else if (eth_type == ETHER_TYPE_IPv6)
288 pkt_ol_flags |= PKT_RX_IPV6_HDR;
292 * Simplify the protocol parsing
293 * Assuming the incoming packets format as
294 * Ethernet2 + optional single VLAN
296 * + udp or tcp or sctp or others
298 if (pkt_ol_flags & PKT_RX_IPV4_HDR) {
300 /* Do not support ipv4 option field */
301 l3_len = sizeof(struct ipv4_hdr) ;
303 ipv4_hdr = (struct ipv4_hdr *) (rte_pktmbuf_mtod(mb,
304 unsigned char *) + l2_len);
306 l4_proto = ipv4_hdr->next_proto_id;
308 /* Do not delete, this is required by HW*/
309 ipv4_hdr->hdr_checksum = 0;
311 if (tx_ol_flags & 0x1) {
313 ol_flags |= PKT_TX_IP_CKSUM;
316 /* SW checksum calculation */
317 ipv4_hdr->src_addr++;
318 ipv4_hdr->hdr_checksum = get_ipv4_cksum(ipv4_hdr);
321 if (l4_proto == IPPROTO_UDP) {
322 udp_hdr = (struct udp_hdr*) (rte_pktmbuf_mtod(mb,
323 unsigned char *) + l2_len + l3_len);
324 if (tx_ol_flags & 0x2) {
326 ol_flags |= PKT_TX_UDP_CKSUM;
327 /* Pseudo header sum need be set properly */
328 udp_hdr->dgram_cksum = get_ipv4_psd_sum(ipv4_hdr);
331 /* SW Implementation, clear checksum field first */
332 udp_hdr->dgram_cksum = 0;
333 udp_hdr->dgram_cksum = get_ipv4_udptcp_checksum(ipv4_hdr,
337 else if (l4_proto == IPPROTO_TCP){
338 tcp_hdr = (struct tcp_hdr*) (rte_pktmbuf_mtod(mb,
339 unsigned char *) + l2_len + l3_len);
340 if (tx_ol_flags & 0x4) {
341 ol_flags |= PKT_TX_TCP_CKSUM;
342 tcp_hdr->cksum = get_ipv4_psd_sum(ipv4_hdr);
346 tcp_hdr->cksum = get_ipv4_udptcp_checksum(ipv4_hdr,
350 else if (l4_proto == IPPROTO_SCTP) {
351 sctp_hdr = (struct sctp_hdr*) (rte_pktmbuf_mtod(mb,
352 unsigned char *) + l2_len + l3_len);
354 if (tx_ol_flags & 0x8) {
355 ol_flags |= PKT_TX_SCTP_CKSUM;
358 /* Sanity check, only number of 4 bytes supported */
359 if ((rte_be_to_cpu_16(ipv4_hdr->total_length) % 4) != 0)
360 printf("sctp payload must be a multiple "
361 "of 4 bytes for checksum offload");
365 /* CRC32c sample code available in RFC3309 */
368 /* End of L4 Handling*/
370 else if (pkt_ol_flags & PKT_RX_IPV6_HDR) {
372 ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb,
373 unsigned char *) + l2_len);
374 l3_len = sizeof(struct ipv6_hdr) ;
375 l4_proto = ipv6_hdr->proto;
377 if (l4_proto == IPPROTO_UDP) {
378 udp_hdr = (struct udp_hdr*) (rte_pktmbuf_mtod(mb,
379 unsigned char *) + l2_len + l3_len);
380 if (tx_ol_flags & 0x2) {
382 ol_flags |= PKT_TX_UDP_CKSUM;
383 udp_hdr->dgram_cksum = get_ipv6_psd_sum(ipv6_hdr);
386 /* SW Implementation */
387 /* checksum field need be clear first */
388 udp_hdr->dgram_cksum = 0;
389 udp_hdr->dgram_cksum = get_ipv6_udptcp_checksum(ipv6_hdr,
393 else if (l4_proto == IPPROTO_TCP) {
394 tcp_hdr = (struct tcp_hdr*) (rte_pktmbuf_mtod(mb,
395 unsigned char *) + l2_len + l3_len);
396 if (tx_ol_flags & 0x4) {
397 ol_flags |= PKT_TX_TCP_CKSUM;
398 tcp_hdr->cksum = get_ipv6_psd_sum(ipv6_hdr);
402 tcp_hdr->cksum = get_ipv6_udptcp_checksum(ipv6_hdr,
406 else if (l4_proto == IPPROTO_SCTP) {
407 sctp_hdr = (struct sctp_hdr*) (rte_pktmbuf_mtod(mb,
408 unsigned char *) + l2_len + l3_len);
410 if (tx_ol_flags & 0x8) {
411 ol_flags |= PKT_TX_SCTP_CKSUM;
413 /* Sanity check, only number of 4 bytes supported by HW */
414 if ((rte_be_to_cpu_16(ipv6_hdr->payload_len) % 4) != 0)
415 printf("sctp payload must be a multiple "
416 "of 4 bytes for checksum offload");
419 /* CRC32c sample code available in RFC3309 */
423 printf("Test flow control for 1G PMD \n");
425 /* End of L6 Handling*/
429 printf("Unhandled packet type: %#hx\n", eth_type);
432 /* Combine the packet header write. VLAN is not consider here */
433 mb->pkt.vlan_macip.f.l2_len = l2_len;
434 mb->pkt.vlan_macip.f.l3_len = l3_len;
435 mb->ol_flags = ol_flags;
437 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
438 fs->tx_packets += nb_tx;
439 fs->rx_bad_ip_csum += rx_bad_ip_csum;
440 fs->rx_bad_l4_csum += rx_bad_l4_csum;
442 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
443 fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
445 if (unlikely(nb_tx < nb_rx)) {
446 fs->fwd_dropped += (nb_rx - nb_tx);
448 rte_pktmbuf_free(pkts_burst[nb_tx]);
449 } while (++nb_tx < nb_rx);
451 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
452 end_tsc = rte_rdtsc();
453 core_cycles = (end_tsc - start_tsc);
454 fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
459 struct fwd_engine csum_fwd_engine = {
460 .fwd_mode_name = "csum",
461 .port_fwd_begin = NULL,
462 .port_fwd_end = NULL,
463 .packet_fwd = pkt_burst_checksum_forward,