#include <rte_log.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
+#include <rte_net.h>
#include <rte_vhost.h>
#include <rte_ip.h>
#include <rte_tcp.h>
return 0;
}
-static uint16_t
-get_psd_sum(void *l3_hdr, uint64_t ol_flags)
-{
- if (ol_flags & PKT_TX_IPV4)
- return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
- else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
- return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
-}
-
static void virtio_tx_offload(struct rte_mbuf *m)
{
+ struct rte_net_hdr_lens hdr_lens;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
+ uint32_t ptype;
void *l3_hdr;
- struct rte_ipv4_hdr *ipv4_hdr = NULL;
- struct rte_tcp_hdr *tcp_hdr = NULL;
- struct rte_ether_hdr *eth_hdr =
- rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
- l3_hdr = (char *)eth_hdr + m->l2_len;
+ ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
+ m->l2_len = hdr_lens.l2_len;
+ m->l3_len = hdr_lens.l3_len;
+ m->l4_len = hdr_lens.l4_len;
- if (m->ol_flags & PKT_TX_IPV4) {
+ l3_hdr = rte_pktmbuf_mtod_offset(m, void *, m->l2_len);
+ tcp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *,
+ m->l2_len + m->l3_len);
+
+ m->ol_flags |= PKT_TX_TCP_SEG;
+ if ((ptype & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) {
+ m->ol_flags |= PKT_TX_IPV4;
+ m->ol_flags |= PKT_TX_IP_CKSUM;
ipv4_hdr = l3_hdr;
ipv4_hdr->hdr_checksum = 0;
- m->ol_flags |= PKT_TX_IP_CKSUM;
+ tcp_hdr->cksum = rte_ipv4_phdr_cksum(l3_hdr, m->ol_flags);
+ } else { /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
+ m->ol_flags |= PKT_TX_IPV6;
+ tcp_hdr->cksum = rte_ipv6_phdr_cksum(l3_hdr, m->ol_flags);
}
-
- tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + m->l3_len);
- tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
}
static __rte_always_inline void
m->vlan_tci = vlan_tag;
}
- if (m->ol_flags & PKT_TX_TCP_SEG)
+ if (m->ol_flags & PKT_RX_LRO)
virtio_tx_offload(m);
tx_q->m_table[tx_q->len++] = m;
int ret, i;
uint16_t portid;
static pthread_t tid;
- uint64_t flags = 0;
+ uint64_t flags = RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS;
signal(SIGINT, sigint_handler);
#include <rte_mbuf.h>
#include <rte_memcpy.h>
+#include <rte_net.h>
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_vhost.h>
}
static __rte_always_inline void
-vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
+vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
{
uint16_t l4_proto = 0;
void *l4_hdr = NULL;
struct rte_tcp_hdr *tcp_hdr = NULL;
- if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
- return;
-
parse_ethernet(m, &l4_proto, &l4_hdr);
if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
if (hdr->csum_start == (m->l2_len + m->l3_len)) {
}
}
+static __rte_always_inline void
+vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m,
+ bool legacy_ol_flags)
+{
+ struct rte_net_hdr_lens hdr_lens;
+ int l4_supported = 0;
+ uint32_t ptype;
+
+ if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
+ return;
+
+ if (legacy_ol_flags) {
+ vhost_dequeue_offload_legacy(hdr, m);
+ return;
+ }
+
+ m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+
+ ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
+ m->packet_type = ptype;
+ if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
+ (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
+ (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
+ l4_supported = 1;
+
+ /* According to Virtio 1.1 spec, the device only needs to look at
+ * VIRTIO_NET_HDR_F_NEEDS_CSUM in the packet transmission path.
+ * This differs from the processing incoming packets path where the
+ * driver could rely on VIRTIO_NET_HDR_F_DATA_VALID flag set by the
+ * device.
+ *
+ * 5.1.6.2.1 Driver Requirements: Packet Transmission
+ * The driver MUST NOT set the VIRTIO_NET_HDR_F_DATA_VALID and
+ * VIRTIO_NET_HDR_F_RSC_INFO bits in flags.
+ *
+ * 5.1.6.2.2 Device Requirements: Packet Transmission
+ * The device MUST ignore flag bits that it does not recognize.
+ */
+ if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ uint32_t hdrlen;
+
+ hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
+ if (hdr->csum_start <= hdrlen && l4_supported != 0) {
+ m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+ } else {
+ /* Unknown proto or tunnel, do sw cksum. We can assume
+ * the cksum field is in the first segment since the
+ * buffers we provided to the host are large enough.
+ * In case of SCTP, this will be wrong since it's a CRC
+ * but there's nothing we can do.
+ */
+ uint16_t csum = 0, off;
+
+ if (rte_raw_cksum_mbuf(m, hdr->csum_start,
+ rte_pktmbuf_pkt_len(m) - hdr->csum_start, &csum) < 0)
+ return;
+ if (likely(csum != 0xffff))
+ csum = ~csum;
+ off = hdr->csum_offset + hdr->csum_start;
+ if (rte_pktmbuf_data_len(m) >= off + 1)
+ *rte_pktmbuf_mtod_offset(m, uint16_t *, off) = csum;
+ }
+ }
+
+ if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ if (hdr->gso_size == 0)
+ return;
+
+ switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_TCP)
+ break;
+ m->ol_flags |= PKT_RX_LRO | PKT_RX_L4_CKSUM_NONE;
+ m->tso_segsz = hdr->gso_size;
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
+ if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_UDP)
+ break;
+ m->ol_flags |= PKT_RX_LRO | PKT_RX_L4_CKSUM_NONE;
+ m->tso_segsz = hdr->gso_size;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
static __rte_noinline void
copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
struct buf_vector *buf_vec)
static __rte_always_inline int
copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct buf_vector *buf_vec, uint16_t nr_vec,
- struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
+ struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
+ bool legacy_ol_flags)
{
uint32_t buf_avail, buf_offset;
uint64_t buf_addr, buf_len;
m->pkt_len += mbuf_offset;
if (hdr)
- vhost_dequeue_offload(hdr, m);
+ vhost_dequeue_offload(hdr, m, legacy_ol_flags);
out:
return pkt;
}
-static __rte_noinline uint16_t
+__rte_always_inline
+static uint16_t
virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
- struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
+ bool legacy_ol_flags)
{
uint16_t i;
uint16_t free_entries;
}
err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
- mbuf_pool);
+ mbuf_pool, legacy_ol_flags);
if (unlikely(err)) {
rte_pktmbuf_free(pkts[i]);
if (!allocerr_warned) {
return (i - dropped);
}
+__rte_noinline
+static uint16_t
+virtio_dev_tx_split_legacy(struct virtio_net *dev,
+ struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
+}
+
+__rte_noinline
+static uint16_t
+virtio_dev_tx_split_compliant(struct virtio_net *dev,
+ struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
+}
+
static __rte_always_inline int
vhost_reserve_avail_batch_packed(struct virtio_net *dev,
struct vhost_virtqueue *vq,
static __rte_always_inline int
virtio_dev_tx_batch_packed(struct virtio_net *dev,
struct vhost_virtqueue *vq,
- struct rte_mbuf **pkts)
+ struct rte_mbuf **pkts,
+ bool legacy_ol_flags)
{
uint16_t avail_idx = vq->last_avail_idx;
uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
if (virtio_net_with_host_offload(dev)) {
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
- vhost_dequeue_offload(hdr, pkts[i]);
+ vhost_dequeue_offload(hdr, pkts[i], legacy_ol_flags);
}
}
struct rte_mempool *mbuf_pool,
struct rte_mbuf *pkts,
uint16_t *buf_id,
- uint16_t *desc_count)
+ uint16_t *desc_count,
+ bool legacy_ol_flags)
{
struct buf_vector buf_vec[BUF_VECTOR_MAX];
uint32_t buf_len;
}
err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
- mbuf_pool);
+ mbuf_pool, legacy_ol_flags);
if (unlikely(err)) {
if (!allocerr_warned) {
VHOST_LOG_DATA(ERR,
virtio_dev_tx_single_packed(struct virtio_net *dev,
struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool,
- struct rte_mbuf *pkts)
+ struct rte_mbuf *pkts,
+ bool legacy_ol_flags)
{
uint16_t buf_id, desc_count = 0;
int ret;
ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
- &desc_count);
+ &desc_count, legacy_ol_flags);
if (likely(desc_count > 0)) {
if (virtio_net_is_inorder(dev))
return ret;
}
-static __rte_noinline uint16_t
+__rte_always_inline
+static uint16_t
virtio_dev_tx_packed(struct virtio_net *dev,
struct vhost_virtqueue *__rte_restrict vq,
struct rte_mempool *mbuf_pool,
struct rte_mbuf **__rte_restrict pkts,
- uint32_t count)
+ uint32_t count,
+ bool legacy_ol_flags)
{
uint32_t pkt_idx = 0;
if (count - pkt_idx >= PACKED_BATCH_SIZE) {
if (!virtio_dev_tx_batch_packed(dev, vq,
- &pkts[pkt_idx])) {
+ &pkts[pkt_idx],
+ legacy_ol_flags)) {
pkt_idx += PACKED_BATCH_SIZE;
continue;
}
}
if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
- pkts[pkt_idx]))
+ pkts[pkt_idx],
+ legacy_ol_flags))
break;
pkt_idx++;
} while (pkt_idx < count);
return pkt_idx;
}
+__rte_noinline
+static uint16_t
+virtio_dev_tx_packed_legacy(struct virtio_net *dev,
+ struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **__rte_restrict pkts, uint32_t count)
+{
+ return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
+}
+
+__rte_noinline
+static uint16_t
+virtio_dev_tx_packed_compliant(struct virtio_net *dev,
+ struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **__rte_restrict pkts, uint32_t count)
+{
+ return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
+}
+
uint16_t
rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
count -= 1;
}
- if (vq_is_packed(dev))
- count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);
- else
- count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
+ if (vq_is_packed(dev)) {
+ if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
+ count = virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool, pkts, count);
+ else
+ count = virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool, pkts, count);
+ } else {
+ if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
+ count = virtio_dev_tx_split_legacy(dev, vq, mbuf_pool, pkts, count);
+ else
+ count = virtio_dev_tx_split_compliant(dev, vq, mbuf_pool, pkts, count);
+ }
out:
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))