A new function ``rte_pktmbuf_alloc_bulk()`` has been added to allow the user
to allocate a bulk of mbufs.
+* **Restored vmxnet3 Tx data ring.**
+
+ Tx data ring has been shown to improve small pkt forwarding performance
+ on vSphere environment.
+
* **Virtio 1.0.**
Enabled virtio 1.0 support for virtio pmd driver.
uint32_t first2fill, avail, dw2;
struct rte_mbuf *txm = tx_pkts[nb_tx];
struct rte_mbuf *m_seg = txm;
+ int copy_size = 0;
/* Is this packet execessively fragmented, then drop */
if (unlikely(txm->nb_segs > VMXNET3_MAX_TXD_PER_PKT)) {
break;
}
+ if (txm->nb_segs == 1 && rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
+ struct Vmxnet3_TxDataDesc *tdd;
+
+ tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
+ copy_size = rte_pktmbuf_pkt_len(txm);
+ rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
+ }
+
/* use the previous gen bit for the SOP desc */
dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
first2fill = txq->cmd_ring.next2fill;
transmit buffer size (16K) is greater than
maximum sizeof mbuf segment size. */
gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
- gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
+ if (copy_size)
+ gdesc->txd.addr = rte_cpu_to_le_64(txq->data_ring.basePA +
+ txq->cmd_ring.next2fill *
+ sizeof(struct Vmxnet3_TxDataDesc));
+ else
+ gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
+
gdesc->dword[2] = dw2 | m_seg->data_len;
gdesc->dword[3] = 0;