X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-pmd%2Ftxonly.c;h=c16b6b47e7716c208dd249e6e9349380952c8b77;hb=a7c528e5d71ff3f569898d268f9de129fdfc152b;hp=f8027f17af66b95c7c684b561bc0c10a7f5b19c1;hpb=82be8d544253a4b5c49b778babf717e5e63f3dc1;p=dpdk.git diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index f8027f17af..c16b6b47e7 100644 --- a/app/test-pmd/txonly.c +++ b/app/test-pmd/txonly.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include @@ -49,18 +20,14 @@ #include #include #include -#include #include #include #include #include #include #include -#include -#include #include #include -#include #include #include #include @@ -69,33 +36,27 @@ #include #include #include +#include #include "testpmd.h" -#define UDP_SRC_PORT 1024 -#define UDP_DST_PORT 1024 +/* use RFC863 Discard Protocol */ +uint16_t tx_udp_src_port = 9; +uint16_t tx_udp_dst_port = 9; -#define IP_SRC_ADDR ((192U << 24) | (168 << 16) | (0 << 8) | 1) -#define IP_DST_ADDR ((192U << 24) | (168 << 16) | (0 << 8) | 2) +/* use RFC5735 / RFC2544 reserved network test addresses */ +uint32_t tx_ip_src_addr = (192U << 24) | (18 << 16) | (0 << 8) | 1; +uint32_t tx_ip_dst_addr = (192U << 24) | (18 << 16) | (0 << 8) | 2; #define IP_DEFTTL 64 /* from RFC 1340. */ #define IP_VERSION 0x40 #define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */ #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN) -static struct ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */ +static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */ +RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */ static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */ -static inline struct rte_mbuf * -tx_mbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - return (m); -} - static void copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset) @@ -117,6 +78,7 @@ copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt, buf = ((char*) buf + copy_len); seg = seg->next; seg_buf = rte_pktmbuf_mtod(seg, char *); + copy_len = seg->data_len; } rte_memcpy(seg_buf, buf, (size_t) len); } @@ -133,7 +95,7 @@ copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset) } static void -setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr, +setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr, struct udp_hdr *udp_hdr, uint16_t pkt_data_len) { @@ -145,15 +107,15 @@ setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr, * Initialize UDP header. */ pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr)); - udp_hdr->src_port = rte_cpu_to_be_16(UDP_SRC_PORT); - udp_hdr->dst_port = rte_cpu_to_be_16(UDP_DST_PORT); + udp_hdr->src_port = rte_cpu_to_be_16(tx_udp_src_port); + udp_hdr->dst_port = rte_cpu_to_be_16(tx_udp_dst_port); udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len); udp_hdr->dgram_cksum = 0; /* No UDP checksum. */ /* * Initialize IP header. */ - pkt_len = (uint16_t) (pkt_len + sizeof(struct ipv4_hdr)); + pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr)); ip_hdr->version_ihl = IP_VHL_DEF; ip_hdr->type_of_service = 0; ip_hdr->fragment_offset = 0; @@ -161,8 +123,8 @@ setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr, ip_hdr->next_proto_id = IPPROTO_UDP; ip_hdr->packet_id = 0; ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_len); - ip_hdr->src_addr = rte_cpu_to_be_32(IP_SRC_ADDR); - ip_hdr->dst_addr = rte_cpu_to_be_32(IP_DST_ADDR); + ip_hdr->src_addr = rte_cpu_to_be_32(tx_ip_src_addr); + ip_hdr->dst_addr = rte_cpu_to_be_32(tx_ip_dst_addr); /* * Compute IP header checksum. @@ -188,6 +150,80 @@ setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr, ip_hdr->hdr_checksum = (uint16_t) ip_cksum; } +static inline bool +pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, + struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci, + const uint16_t vlan_tci_outer, const uint64_t ol_flags) +{ + struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT]; + uint8_t ip_var = RTE_PER_LCORE(_ip_var); + struct rte_mbuf *pkt_seg; + uint32_t nb_segs, pkt_len; + uint8_t i; + + if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND)) + nb_segs = random() % tx_pkt_nb_segs + 1; + else + nb_segs = tx_pkt_nb_segs; + + if (nb_segs > 1) { + if (rte_mempool_get_bulk(mbp, (void **)pkt_segs, nb_segs - 1)) + return false; + } + + rte_pktmbuf_reset_headroom(pkt); + pkt->data_len = tx_pkt_seg_lengths[0]; + pkt->ol_flags = ol_flags; + pkt->vlan_tci = vlan_tci; + pkt->vlan_tci_outer = vlan_tci_outer; + pkt->l2_len = sizeof(struct rte_ether_hdr); + pkt->l3_len = sizeof(struct rte_ipv4_hdr); + + pkt_len = pkt->data_len; + pkt_seg = pkt; + for (i = 1; i < nb_segs; i++) { + pkt_seg->next = pkt_segs[i - 1]; + pkt_seg = pkt_seg->next; + pkt_seg->data_len = tx_pkt_seg_lengths[i]; + pkt_len += pkt_seg->data_len; + } + pkt_seg->next = NULL; /* Last segment of packet. */ + /* + * Copy headers in first packet segment(s). + */ + copy_buf_to_pkt(eth_hdr, sizeof(*eth_hdr), pkt, 0); + copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt, + sizeof(struct rte_ether_hdr)); + if (txonly_multi_flow) { + struct rte_ipv4_hdr *ip_hdr; + uint32_t addr; + + ip_hdr = rte_pktmbuf_mtod_offset(pkt, + struct rte_ipv4_hdr *, + sizeof(struct rte_ether_hdr)); + /* + * Generate multiple flows by varying IP src addr. This + * enables packets are well distributed by RSS in + * receiver side if any and txonly mode can be a decent + * packet generator for developer's quick performance + * regression test. + */ + addr = (tx_ip_dst_addr | (ip_var++ << 8)) + rte_lcore_id(); + ip_hdr->src_addr = rte_cpu_to_be_32(addr); + } + copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt, + sizeof(struct rte_ether_hdr) + + sizeof(struct rte_ipv4_hdr)); + /* + * Complete first mbuf of packet and append it to the + * burst of packets to be transmitted. + */ + pkt->nb_segs = nb_segs; + pkt->pkt_len = pkt_len; + + return true; +} + /* * Transmit a burst of multi-segments packets. */ @@ -197,14 +233,14 @@ pkt_burst_transmit(struct fwd_stream *fs) struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; struct rte_port *txp; struct rte_mbuf *pkt; - struct rte_mbuf *pkt_seg; struct rte_mempool *mbp; - struct ether_hdr eth_hdr; + struct rte_ether_hdr eth_hdr; uint16_t nb_tx; uint16_t nb_pkt; - uint16_t vlan_tci; + uint16_t vlan_tci, vlan_tci_outer; + uint32_t retry; uint64_t ol_flags = 0; - uint8_t i; + uint64_t tx_offloads; #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES uint64_t start_tsc; uint64_t end_tsc; @@ -217,63 +253,72 @@ pkt_burst_transmit(struct fwd_stream *fs) mbp = current_fwd_lcore()->mbp; txp = &ports[fs->tx_port]; + tx_offloads = txp->dev_conf.txmode.offloads; vlan_tci = txp->tx_vlan_id; - if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN) + vlan_tci_outer = txp->tx_vlan_id_outer; + if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) ol_flags = PKT_TX_VLAN_PKT; - for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { - pkt = tx_mbuf_alloc(mbp); - if (pkt == NULL) { - nomore_mbuf: - if (nb_pkt == 0) - return; - break; + if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT) + ol_flags |= PKT_TX_QINQ_PKT; + if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) + ol_flags |= PKT_TX_MACSEC; + + /* + * Initialize Ethernet header. + */ + rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], ð_hdr.d_addr); + rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.s_addr); + eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4); + + if (rte_mempool_get_bulk(mbp, (void **)pkts_burst, + nb_pkt_per_burst) == 0) { + for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { + if (unlikely(!pkt_burst_prepare(pkts_burst[nb_pkt], mbp, + ð_hdr, vlan_tci, + vlan_tci_outer, + ol_flags))) { + rte_mempool_put_bulk(mbp, + (void **)&pkts_burst[nb_pkt], + nb_pkt_per_burst - nb_pkt); + break; + } } - pkt->data_len = tx_pkt_seg_lengths[0]; - pkt_seg = pkt; - for (i = 1; i < tx_pkt_nb_segs; i++) { - pkt_seg->next = tx_mbuf_alloc(mbp); - if (pkt_seg->next == NULL) { - pkt->nb_segs = i; + } else { + for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { + pkt = rte_mbuf_raw_alloc(mbp); + if (pkt == NULL) + break; + if (unlikely(!pkt_burst_prepare(pkt, mbp, ð_hdr, + vlan_tci, + vlan_tci_outer, + ol_flags))) { rte_pktmbuf_free(pkt); - goto nomore_mbuf; + break; } - pkt_seg = pkt_seg->next; - pkt_seg->data_len = tx_pkt_seg_lengths[i]; + pkts_burst[nb_pkt] = pkt; } - pkt_seg->next = NULL; /* Last segment of packet. */ - - /* - * Initialize Ethernet header. - */ - ether_addr_copy(&peer_eth_addrs[fs->peer_addr],ð_hdr.d_addr); - ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.s_addr); - eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + } - /* - * Copy headers in first packet segment(s). - */ - copy_buf_to_pkt(ð_hdr, sizeof(eth_hdr), pkt, 0); - copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt, - sizeof(struct ether_hdr)); - copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt, - sizeof(struct ether_hdr) + - sizeof(struct ipv4_hdr)); + if (nb_pkt == 0) + return; - /* - * Complete first mbuf of packet and append it to the - * burst of packets to be transmitted. - */ - pkt->nb_segs = tx_pkt_nb_segs; - pkt->pkt_len = tx_pkt_length; - pkt->ol_flags = ol_flags; - pkt->vlan_tci = vlan_tci; - pkt->l2_len = sizeof(struct ether_hdr); - pkt->l3_len = sizeof(struct ipv4_hdr); - pkts_burst[nb_pkt] = pkt; - } nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt); + /* + * Retry if necessary + */ + if (unlikely(nb_tx < nb_pkt) && fs->retry_enabled) { + retry = 0; + while (nb_tx < nb_pkt && retry++ < burst_tx_retry_num) { + rte_delay_us(burst_tx_delay_time); + nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + &pkts_burst[nb_tx], nb_pkt - nb_tx); + } + } fs->tx_packets += nb_tx; + if (txonly_multi_flow) + RTE_PER_LCORE(_ip_var) += nb_tx; + #ifdef RTE_TEST_PMD_RECORD_BURST_STATS fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; #endif @@ -302,9 +347,10 @@ tx_only_begin(__attribute__((unused)) portid_t pi) { uint16_t pkt_data_len; - pkt_data_len = (uint16_t) (tx_pkt_length - (sizeof(struct ether_hdr) + - sizeof(struct ipv4_hdr) + - sizeof(struct udp_hdr))); + pkt_data_len = (uint16_t) (tx_pkt_length - ( + sizeof(struct rte_ether_hdr) + + sizeof(struct rte_ipv4_hdr) + + sizeof(struct udp_hdr))); setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len); }