#include <rte_cycles.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_ring.h>
-#include <rte_memory.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
-#include <rte_memcpy.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_string_fns.h>
+#include <rte_flow.h>
#include "testpmd.h"
static struct ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */
-static inline struct rte_mbuf *
-tx_mbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
- return (m);
-}
-
static void
copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
unsigned offset)
uint16_t nb_tx;
uint16_t nb_pkt;
uint16_t vlan_tci, vlan_tci_outer;
+ uint32_t retry;
uint64_t ol_flags = 0;
uint8_t i;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t end_tsc;
uint64_t core_cycles;
#endif
+ uint32_t nb_segs, pkt_len;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
start_tsc = rte_rdtsc();
ol_flags = PKT_TX_VLAN_PKT;
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
ol_flags |= PKT_TX_QINQ_PKT;
+ if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+ ol_flags |= PKT_TX_MACSEC;
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
- pkt = tx_mbuf_alloc(mbp);
+ pkt = rte_mbuf_raw_alloc(mbp);
if (pkt == NULL) {
nomore_mbuf:
if (nb_pkt == 0)
return;
break;
}
+
+ /*
+ * Using raw alloc is good to improve performance,
+ * but some consumers may use the headroom and so
+ * decrement data_off. We need to make sure it is
+ * reset to default value.
+ */
+ rte_pktmbuf_reset_headroom(pkt);
pkt->data_len = tx_pkt_seg_lengths[0];
pkt_seg = pkt;
- for (i = 1; i < tx_pkt_nb_segs; i++) {
- pkt_seg->next = tx_mbuf_alloc(mbp);
+ if (tx_pkt_split == TX_PKT_SPLIT_RND)
+ nb_segs = random() % tx_pkt_nb_segs + 1;
+ else
+ nb_segs = tx_pkt_nb_segs;
+ pkt_len = pkt->data_len;
+ for (i = 1; i < nb_segs; i++) {
+ pkt_seg->next = rte_mbuf_raw_alloc(mbp);
if (pkt_seg->next == NULL) {
pkt->nb_segs = i;
rte_pktmbuf_free(pkt);
}
pkt_seg = pkt_seg->next;
pkt_seg->data_len = tx_pkt_seg_lengths[i];
+ pkt_len += pkt_seg->data_len;
}
pkt_seg->next = NULL; /* Last segment of packet. */
* Complete first mbuf of packet and append it to the
* burst of packets to be transmitted.
*/
- pkt->nb_segs = tx_pkt_nb_segs;
- pkt->pkt_len = tx_pkt_length;
+ pkt->nb_segs = nb_segs;
+ pkt->pkt_len = pkt_len;
pkt->ol_flags = ol_flags;
pkt->vlan_tci = vlan_tci;
pkt->vlan_tci_outer = vlan_tci_outer;
pkts_burst[nb_pkt] = pkt;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
+ /*
+ * Retry if necessary
+ */
+ if (unlikely(nb_tx < nb_pkt) && fs->retry_enabled) {
+ retry = 0;
+ while (nb_tx < nb_pkt && retry++ < burst_tx_retry_num) {
+ rte_delay_us(burst_tx_delay_time);
+ nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+ &pkts_burst[nb_tx], nb_pkt - nb_tx);
+ }
+ }
fs->tx_packets += nb_tx;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS