1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
13 #include <sys/queue.h>
16 #include <rte_common.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
20 #include <rte_cycles.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_launch.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_atomic.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_mempool.h>
31 #include <rte_interrupts.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
43 #define UDP_SRC_PORT 1024
44 #define UDP_DST_PORT 1024
46 #define IP_SRC_ADDR ((192U << 24) | (168 << 16) | (0 << 8) | 1)
47 #define IP_DST_ADDR ((192U << 24) | (168 << 16) | (0 << 8) | 2)
49 #define IP_DEFTTL 64 /* from RFC 1340. */
50 #define IP_VERSION 0x40
51 #define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
52 #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
54 static struct ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
55 RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
56 static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */
59 copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
67 while (offset >= seg->data_len) {
68 offset -= seg->data_len;
71 copy_len = seg->data_len - offset;
72 seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
73 while (len > copy_len) {
74 rte_memcpy(seg_buf, buf, (size_t) copy_len);
76 buf = ((char*) buf + copy_len);
78 seg_buf = rte_pktmbuf_mtod(seg, char *);
79 copy_len = seg->data_len;
81 rte_memcpy(seg_buf, buf, (size_t) len);
85 copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
87 if (offset + len <= pkt->data_len) {
88 rte_memcpy(rte_pktmbuf_mtod_offset(pkt, char *, offset),
92 copy_buf_to_pkt_segs(buf, len, pkt, offset);
96 setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr,
97 struct udp_hdr *udp_hdr,
98 uint16_t pkt_data_len)
105 * Initialize UDP header.
107 pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr));
108 udp_hdr->src_port = rte_cpu_to_be_16(UDP_SRC_PORT);
109 udp_hdr->dst_port = rte_cpu_to_be_16(UDP_DST_PORT);
110 udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);
111 udp_hdr->dgram_cksum = 0; /* No UDP checksum. */
114 * Initialize IP header.
116 pkt_len = (uint16_t) (pkt_len + sizeof(struct ipv4_hdr));
117 ip_hdr->version_ihl = IP_VHL_DEF;
118 ip_hdr->type_of_service = 0;
119 ip_hdr->fragment_offset = 0;
120 ip_hdr->time_to_live = IP_DEFTTL;
121 ip_hdr->next_proto_id = IPPROTO_UDP;
122 ip_hdr->packet_id = 0;
123 ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_len);
124 ip_hdr->src_addr = rte_cpu_to_be_32(IP_SRC_ADDR);
125 ip_hdr->dst_addr = rte_cpu_to_be_32(IP_DST_ADDR);
128 * Compute IP header checksum.
130 ptr16 = (unaligned_uint16_t*) ip_hdr;
132 ip_cksum += ptr16[0]; ip_cksum += ptr16[1];
133 ip_cksum += ptr16[2]; ip_cksum += ptr16[3];
134 ip_cksum += ptr16[4];
135 ip_cksum += ptr16[6]; ip_cksum += ptr16[7];
136 ip_cksum += ptr16[8]; ip_cksum += ptr16[9];
139 * Reduce 32 bit checksum to 16 bits and complement it.
141 ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +
142 (ip_cksum & 0x0000FFFF);
143 if (ip_cksum > 65535)
145 ip_cksum = (~ip_cksum) & 0x0000FFFF;
148 ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
152 * Transmit a burst of multi-segments packets.
155 pkt_burst_transmit(struct fwd_stream *fs)
157 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
158 struct rte_port *txp;
159 struct rte_mbuf *pkt;
160 struct rte_mbuf *pkt_seg;
161 struct rte_mempool *mbp;
162 struct ether_hdr eth_hdr;
165 uint16_t vlan_tci, vlan_tci_outer;
167 uint64_t ol_flags = 0;
168 uint8_t ip_var = RTE_PER_LCORE(_ip_var);
170 uint64_t tx_offloads;
171 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
174 uint64_t core_cycles;
176 uint32_t nb_segs, pkt_len;
178 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
179 start_tsc = rte_rdtsc();
182 mbp = current_fwd_lcore()->mbp;
183 txp = &ports[fs->tx_port];
184 tx_offloads = txp->dev_conf.txmode.offloads;
185 vlan_tci = txp->tx_vlan_id;
186 vlan_tci_outer = txp->tx_vlan_id_outer;
187 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
188 ol_flags = PKT_TX_VLAN_PKT;
189 if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
190 ol_flags |= PKT_TX_QINQ_PKT;
191 if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
192 ol_flags |= PKT_TX_MACSEC;
193 for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
194 pkt = rte_mbuf_raw_alloc(mbp);
203 * Using raw alloc is good to improve performance,
204 * but some consumers may use the headroom and so
205 * decrement data_off. We need to make sure it is
206 * reset to default value.
208 rte_pktmbuf_reset_headroom(pkt);
209 pkt->data_len = tx_pkt_seg_lengths[0];
211 if (tx_pkt_split == TX_PKT_SPLIT_RND)
212 nb_segs = random() % tx_pkt_nb_segs + 1;
214 nb_segs = tx_pkt_nb_segs;
215 pkt_len = pkt->data_len;
216 for (i = 1; i < nb_segs; i++) {
217 pkt_seg->next = rte_mbuf_raw_alloc(mbp);
218 if (pkt_seg->next == NULL) {
220 rte_pktmbuf_free(pkt);
223 pkt_seg = pkt_seg->next;
224 pkt_seg->data_len = tx_pkt_seg_lengths[i];
225 pkt_len += pkt_seg->data_len;
227 pkt_seg->next = NULL; /* Last segment of packet. */
230 * Initialize Ethernet header.
232 ether_addr_copy(&peer_eth_addrs[fs->peer_addr],ð_hdr.d_addr);
233 ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.s_addr);
234 eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
237 * Copy headers in first packet segment(s).
239 copy_buf_to_pkt(ð_hdr, sizeof(eth_hdr), pkt, 0);
240 copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
241 sizeof(struct ether_hdr));
242 if (txonly_multi_flow) {
243 struct ipv4_hdr *ip_hdr;
246 ip_hdr = rte_pktmbuf_mtod_offset(pkt,
248 sizeof(struct ether_hdr));
250 * Generate multiple flows by varying IP src addr. This
251 * enables packets are well distributed by RSS in
252 * receiver side if any and txonly mode can be a decent
253 * packet generator for developer's quick performance
256 addr = (IP_DST_ADDR | (ip_var++ << 8)) + rte_lcore_id();
257 ip_hdr->src_addr = rte_cpu_to_be_32(addr);
259 copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
260 sizeof(struct ether_hdr) +
261 sizeof(struct ipv4_hdr));
264 * Complete first mbuf of packet and append it to the
265 * burst of packets to be transmitted.
267 pkt->nb_segs = nb_segs;
268 pkt->pkt_len = pkt_len;
269 pkt->ol_flags = ol_flags;
270 pkt->vlan_tci = vlan_tci;
271 pkt->vlan_tci_outer = vlan_tci_outer;
272 pkt->l2_len = sizeof(struct ether_hdr);
273 pkt->l3_len = sizeof(struct ipv4_hdr);
274 pkts_burst[nb_pkt] = pkt;
276 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
280 if (unlikely(nb_tx < nb_pkt) && fs->retry_enabled) {
282 while (nb_tx < nb_pkt && retry++ < burst_tx_retry_num) {
283 rte_delay_us(burst_tx_delay_time);
284 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
285 &pkts_burst[nb_tx], nb_pkt - nb_tx);
288 fs->tx_packets += nb_tx;
290 if (txonly_multi_flow)
291 RTE_PER_LCORE(_ip_var) += nb_tx;
293 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
294 fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
296 if (unlikely(nb_tx < nb_pkt)) {
297 if (verbose_level > 0 && fs->fwd_dropped == 0)
298 printf("port %d tx_queue %d - drop "
299 "(nb_pkt:%u - nb_tx:%u)=%u packets\n",
300 fs->tx_port, fs->tx_queue,
301 (unsigned) nb_pkt, (unsigned) nb_tx,
302 (unsigned) (nb_pkt - nb_tx));
303 fs->fwd_dropped += (nb_pkt - nb_tx);
305 rte_pktmbuf_free(pkts_burst[nb_tx]);
306 } while (++nb_tx < nb_pkt);
309 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
310 end_tsc = rte_rdtsc();
311 core_cycles = (end_tsc - start_tsc);
312 fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
317 tx_only_begin(__attribute__((unused)) portid_t pi)
319 uint16_t pkt_data_len;
321 pkt_data_len = (uint16_t) (tx_pkt_length - (sizeof(struct ether_hdr) +
322 sizeof(struct ipv4_hdr) +
323 sizeof(struct udp_hdr)));
324 setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len);
327 struct fwd_engine tx_only_engine = {
328 .fwd_mode_name = "txonly",
329 .port_fwd_begin = tx_only_begin,
330 .port_fwd_end = NULL,
331 .packet_fwd = pkt_burst_transmit,