1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
13 #include <sys/queue.h>
16 #include <rte_common.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
20 #include <rte_cycles.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_launch.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_atomic.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_mempool.h>
31 #include <rte_interrupts.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
43 /* use RFC863 Discard Protocol */
44 uint16_t tx_udp_src_port = 9;
45 uint16_t tx_udp_dst_port = 9;
47 /* use RFC5735 / RFC2544 reserved network test addresses */
48 uint32_t tx_ip_src_addr = (192U << 24) | (18 << 16) | (0 << 8) | 1;
49 uint32_t tx_ip_dst_addr = (192U << 24) | (18 << 16) | (0 << 8) | 2;
51 #define IP_DEFTTL 64 /* from RFC 1340. */
52 #define IP_VERSION 0x40
53 #define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
54 #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
56 static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
57 RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
58 static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */
61 copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
69 while (offset >= seg->data_len) {
70 offset -= seg->data_len;
73 copy_len = seg->data_len - offset;
74 seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
75 while (len > copy_len) {
76 rte_memcpy(seg_buf, buf, (size_t) copy_len);
78 buf = ((char*) buf + copy_len);
80 seg_buf = rte_pktmbuf_mtod(seg, char *);
81 copy_len = seg->data_len;
83 rte_memcpy(seg_buf, buf, (size_t) len);
87 copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
89 if (offset + len <= pkt->data_len) {
90 rte_memcpy(rte_pktmbuf_mtod_offset(pkt, char *, offset),
94 copy_buf_to_pkt_segs(buf, len, pkt, offset);
98 setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr,
99 struct rte_udp_hdr *udp_hdr,
100 uint16_t pkt_data_len)
107 * Initialize UDP header.
109 pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
110 udp_hdr->src_port = rte_cpu_to_be_16(tx_udp_src_port);
111 udp_hdr->dst_port = rte_cpu_to_be_16(tx_udp_dst_port);
112 udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);
113 udp_hdr->dgram_cksum = 0; /* No UDP checksum. */
116 * Initialize IP header.
118 pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr));
119 ip_hdr->version_ihl = IP_VHL_DEF;
120 ip_hdr->type_of_service = 0;
121 ip_hdr->fragment_offset = 0;
122 ip_hdr->time_to_live = IP_DEFTTL;
123 ip_hdr->next_proto_id = IPPROTO_UDP;
124 ip_hdr->packet_id = 0;
125 ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_len);
126 ip_hdr->src_addr = rte_cpu_to_be_32(tx_ip_src_addr);
127 ip_hdr->dst_addr = rte_cpu_to_be_32(tx_ip_dst_addr);
130 * Compute IP header checksum.
132 ptr16 = (unaligned_uint16_t*) ip_hdr;
134 ip_cksum += ptr16[0]; ip_cksum += ptr16[1];
135 ip_cksum += ptr16[2]; ip_cksum += ptr16[3];
136 ip_cksum += ptr16[4];
137 ip_cksum += ptr16[6]; ip_cksum += ptr16[7];
138 ip_cksum += ptr16[8]; ip_cksum += ptr16[9];
141 * Reduce 32 bit checksum to 16 bits and complement it.
143 ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +
144 (ip_cksum & 0x0000FFFF);
145 if (ip_cksum > 65535)
147 ip_cksum = (~ip_cksum) & 0x0000FFFF;
150 ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
154 pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
155 struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci,
156 const uint16_t vlan_tci_outer, const uint64_t ol_flags)
158 struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
159 uint8_t ip_var = RTE_PER_LCORE(_ip_var);
160 struct rte_mbuf *pkt_seg;
161 uint32_t nb_segs, pkt_len;
164 if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
165 nb_segs = random() % tx_pkt_nb_segs + 1;
167 nb_segs = tx_pkt_nb_segs;
170 if (rte_mempool_get_bulk(mbp, (void **)pkt_segs, nb_segs - 1))
174 rte_pktmbuf_reset_headroom(pkt);
175 pkt->data_len = tx_pkt_seg_lengths[0];
176 pkt->ol_flags = ol_flags;
177 pkt->vlan_tci = vlan_tci;
178 pkt->vlan_tci_outer = vlan_tci_outer;
179 pkt->l2_len = sizeof(struct rte_ether_hdr);
180 pkt->l3_len = sizeof(struct rte_ipv4_hdr);
182 pkt_len = pkt->data_len;
184 for (i = 1; i < nb_segs; i++) {
185 pkt_seg->next = pkt_segs[i - 1];
186 pkt_seg = pkt_seg->next;
187 pkt_seg->data_len = tx_pkt_seg_lengths[i];
188 pkt_len += pkt_seg->data_len;
190 pkt_seg->next = NULL; /* Last segment of packet. */
192 * Copy headers in first packet segment(s).
194 copy_buf_to_pkt(eth_hdr, sizeof(*eth_hdr), pkt, 0);
195 copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
196 sizeof(struct rte_ether_hdr));
197 if (txonly_multi_flow) {
198 struct rte_ipv4_hdr *ip_hdr;
201 ip_hdr = rte_pktmbuf_mtod_offset(pkt,
202 struct rte_ipv4_hdr *,
203 sizeof(struct rte_ether_hdr));
205 * Generate multiple flows by varying IP src addr. This
206 * enables packets are well distributed by RSS in
207 * receiver side if any and txonly mode can be a decent
208 * packet generator for developer's quick performance
211 addr = (tx_ip_dst_addr | (ip_var++ << 8)) + rte_lcore_id();
212 ip_hdr->src_addr = rte_cpu_to_be_32(addr);
214 copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
215 sizeof(struct rte_ether_hdr) +
216 sizeof(struct rte_ipv4_hdr));
218 * Complete first mbuf of packet and append it to the
219 * burst of packets to be transmitted.
221 pkt->nb_segs = nb_segs;
222 pkt->pkt_len = pkt_len;
228 * Transmit a burst of multi-segments packets.
231 pkt_burst_transmit(struct fwd_stream *fs)
233 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
234 struct rte_port *txp;
235 struct rte_mbuf *pkt;
236 struct rte_mempool *mbp;
237 struct rte_ether_hdr eth_hdr;
240 uint16_t vlan_tci, vlan_tci_outer;
242 uint64_t ol_flags = 0;
243 uint64_t tx_offloads;
244 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
247 uint64_t core_cycles;
250 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
251 start_tsc = rte_rdtsc();
254 mbp = current_fwd_lcore()->mbp;
255 txp = &ports[fs->tx_port];
256 tx_offloads = txp->dev_conf.txmode.offloads;
257 vlan_tci = txp->tx_vlan_id;
258 vlan_tci_outer = txp->tx_vlan_id_outer;
259 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
260 ol_flags = PKT_TX_VLAN_PKT;
261 if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
262 ol_flags |= PKT_TX_QINQ_PKT;
263 if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
264 ol_flags |= PKT_TX_MACSEC;
267 * Initialize Ethernet header.
269 rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], ð_hdr.d_addr);
270 rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.s_addr);
271 eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
273 if (rte_mempool_get_bulk(mbp, (void **)pkts_burst,
274 nb_pkt_per_burst) == 0) {
275 for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
276 if (unlikely(!pkt_burst_prepare(pkts_burst[nb_pkt], mbp,
280 rte_mempool_put_bulk(mbp,
281 (void **)&pkts_burst[nb_pkt],
282 nb_pkt_per_burst - nb_pkt);
287 for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
288 pkt = rte_mbuf_raw_alloc(mbp);
291 if (unlikely(!pkt_burst_prepare(pkt, mbp, ð_hdr,
295 rte_pktmbuf_free(pkt);
298 pkts_burst[nb_pkt] = pkt;
305 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
309 if (unlikely(nb_tx < nb_pkt) && fs->retry_enabled) {
311 while (nb_tx < nb_pkt && retry++ < burst_tx_retry_num) {
312 rte_delay_us(burst_tx_delay_time);
313 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
314 &pkts_burst[nb_tx], nb_pkt - nb_tx);
317 fs->tx_packets += nb_tx;
319 if (txonly_multi_flow)
320 RTE_PER_LCORE(_ip_var) += nb_tx;
322 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
323 fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
325 if (unlikely(nb_tx < nb_pkt)) {
326 if (verbose_level > 0 && fs->fwd_dropped == 0)
327 printf("port %d tx_queue %d - drop "
328 "(nb_pkt:%u - nb_tx:%u)=%u packets\n",
329 fs->tx_port, fs->tx_queue,
330 (unsigned) nb_pkt, (unsigned) nb_tx,
331 (unsigned) (nb_pkt - nb_tx));
332 fs->fwd_dropped += (nb_pkt - nb_tx);
334 rte_pktmbuf_free(pkts_burst[nb_tx]);
335 } while (++nb_tx < nb_pkt);
338 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
339 end_tsc = rte_rdtsc();
340 core_cycles = (end_tsc - start_tsc);
341 fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
346 tx_only_begin(__attribute__((unused)) portid_t pi)
348 uint16_t pkt_data_len;
350 pkt_data_len = (uint16_t) (tx_pkt_length - (
351 sizeof(struct rte_ether_hdr) +
352 sizeof(struct rte_ipv4_hdr) +
353 sizeof(struct rte_udp_hdr)));
354 setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len);
357 struct fwd_engine tx_only_engine = {
358 .fwd_mode_name = "txonly",
359 .port_fwd_begin = tx_only_begin,
360 .port_fwd_end = NULL,
361 .packet_fwd = pkt_burst_transmit,