+ return 0;
+}
+
+static __rte_noinline void
+copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct buf_vector *buf_vec,
+ struct virtio_net_hdr_mrg_rxbuf *hdr)
+{
+ uint64_t len;
+ uint64_t remain = dev->vhost_hlen;
+ uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
+ uint64_t iova = buf_vec->buf_iova;
+
+ while (remain) {
+ len = RTE_MIN(remain,
+ buf_vec->buf_len);
+ dst = buf_vec->buf_addr;
+ rte_memcpy((void *)(uintptr_t)dst,
+ (void *)(uintptr_t)src,
+ len);
+
+ PRINT_PACKET(dev, (uintptr_t)dst,
+ (uint32_t)len, 0);
+ vhost_log_cache_write_iova(dev, vq,
+ iova, len);
+
+ remain -= len;
+ iova += len;
+ src += len;
+ buf_vec++;
+ }
+}
+
+static __rte_always_inline int
+copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf *m, struct buf_vector *buf_vec,
+ uint16_t nr_vec, uint16_t num_buffers)
+{
+ uint32_t vec_idx = 0;
+ uint32_t mbuf_offset, mbuf_avail;
+ uint32_t buf_offset, buf_avail;
+ uint64_t buf_addr, buf_iova, buf_len;
+ uint32_t cpy_len;
+ uint64_t hdr_addr;
+ struct rte_mbuf *hdr_mbuf;
+ struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
+ struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
+ int error = 0;
+
+ if (unlikely(m == NULL)) {
+ error = -1;
+ goto out;
+ }
+
+ buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
+ buf_len = buf_vec[vec_idx].buf_len;
+
+ if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
+ error = -1;
+ goto out;
+ }
+
+ hdr_mbuf = m;
+ hdr_addr = buf_addr;
+ if (unlikely(buf_len < dev->vhost_hlen))
+ hdr = &tmp_hdr;
+ else
+ hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
+ dev->vid, num_buffers);
+
+ if (unlikely(buf_len < dev->vhost_hlen)) {
+ buf_offset = dev->vhost_hlen - buf_len;
+ vec_idx++;
+ buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
+ buf_len = buf_vec[vec_idx].buf_len;
+ buf_avail = buf_len - buf_offset;
+ } else {
+ buf_offset = dev->vhost_hlen;
+ buf_avail = buf_len - dev->vhost_hlen;
+ }
+
+ mbuf_avail = rte_pktmbuf_data_len(m);
+ mbuf_offset = 0;
+ while (mbuf_avail != 0 || m->next != NULL) {
+ /* done with current buf, get the next one */
+ if (buf_avail == 0) {
+ vec_idx++;
+ if (unlikely(vec_idx >= nr_vec)) {
+ error = -1;
+ goto out;
+ }
+
+ buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
+ buf_len = buf_vec[vec_idx].buf_len;
+
+ buf_offset = 0;
+ buf_avail = buf_len;
+ }
+
+ /* done with current mbuf, get the next one */
+ if (mbuf_avail == 0) {
+ m = m->next;
+
+ mbuf_offset = 0;
+ mbuf_avail = rte_pktmbuf_data_len(m);
+ }
+
+ if (hdr_addr) {
+ virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
+ if (rxvq_is_mergeable(dev))
+ ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
+ num_buffers);
+
+ if (unlikely(hdr == &tmp_hdr)) {
+ copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
+ } else {
+ PRINT_PACKET(dev, (uintptr_t)hdr_addr,
+ dev->vhost_hlen, 0);
+ vhost_log_cache_write_iova(dev, vq,
+ buf_vec[0].buf_iova,
+ dev->vhost_hlen);
+ }
+
+ hdr_addr = 0;
+ }
+
+ cpy_len = RTE_MIN(buf_avail, mbuf_avail);
+
+ if (likely(cpy_len > MAX_BATCH_LEN ||
+ vq->batch_copy_nb_elems >= vq->size)) {
+ rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
+ cpy_len);
+ vhost_log_cache_write_iova(dev, vq,
+ buf_iova + buf_offset,
+ cpy_len);
+ PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
+ cpy_len, 0);
+ } else {
+ batch_copy[vq->batch_copy_nb_elems].dst =
+ (void *)((uintptr_t)(buf_addr + buf_offset));
+ batch_copy[vq->batch_copy_nb_elems].src =
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
+ batch_copy[vq->batch_copy_nb_elems].log_addr =
+ buf_iova + buf_offset;
+ batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
+ vq->batch_copy_nb_elems++;
+ }
+
+ mbuf_avail -= cpy_len;
+ mbuf_offset += cpy_len;
+ buf_avail -= cpy_len;
+ buf_offset += cpy_len;
+ }
+
+out:
+
+ return error;
+}
+
+static __rte_always_inline int
+vhost_enqueue_single_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mbuf *pkt,
+ struct buf_vector *buf_vec,
+ uint16_t *nr_descs)
+{
+ uint16_t nr_vec = 0;
+ uint16_t avail_idx = vq->last_avail_idx;
+ uint16_t max_tries, tries = 0;
+ uint16_t buf_id = 0;
+ uint32_t len = 0;
+ uint16_t desc_count;
+ uint32_t size = pkt->pkt_len + dev->vhost_hlen;
+ uint16_t num_buffers = 0;
+ uint32_t buffer_len[vq->size];
+ uint16_t buffer_buf_id[vq->size];
+ uint16_t buffer_desc_count[vq->size];
+
+ if (rxvq_is_mergeable(dev))
+ max_tries = vq->size - 1;
+ else
+ max_tries = 1;
+
+ while (size > 0) {
+ /*
+ * if we tried all available ring items, and still
+ * can't get enough buf, it means something abnormal
+ * happened.
+ */
+ if (unlikely(++tries > max_tries))
+ return -1;
+
+ if (unlikely(fill_vec_buf_packed(dev, vq,
+ avail_idx, &desc_count,
+ buf_vec, &nr_vec,
+ &buf_id, &len,
+ VHOST_ACCESS_RW) < 0))
+ return -1;
+
+ len = RTE_MIN(len, size);
+ size -= len;
+
+ buffer_len[num_buffers] = len;
+ buffer_buf_id[num_buffers] = buf_id;
+ buffer_desc_count[num_buffers] = desc_count;
+ num_buffers += 1;
+
+ *nr_descs += desc_count;
+ avail_idx += desc_count;
+ if (avail_idx >= vq->size)
+ avail_idx -= vq->size;
+ }
+
+ if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
+ return -1;
+
+ vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
+ buffer_desc_count, num_buffers);
+
+ return 0;
+}
+
+static __rte_noinline uint32_t
+virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ uint32_t pkt_idx = 0;
+ uint16_t num_buffers;
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
+ uint16_t avail_head;
+
+ /*
+ * The ordering between avail index and
+ * desc reads needs to be enforced.
+ */
+ avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
+
+ rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
+
+ for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+ uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
+ uint16_t nr_vec = 0;
+
+ if (unlikely(reserve_avail_buf_split(dev, vq,
+ pkt_len, buf_vec, &num_buffers,
+ avail_head, &nr_vec) < 0)) {
+ VHOST_LOG_DATA(DEBUG,
+ "(%d) failed to get enough desc from vring\n",
+ dev->vid);
+ vq->shadow_used_idx -= num_buffers;
+ break;
+ }
+
+ VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+ dev->vid, vq->last_avail_idx,
+ vq->last_avail_idx + num_buffers);
+
+ if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
+ buf_vec, nr_vec,
+ num_buffers) < 0) {
+ vq->shadow_used_idx -= num_buffers;
+ break;
+ }
+
+ vq->last_avail_idx += num_buffers;
+ }
+
+ do_data_copy_enqueue(dev, vq);
+
+ if (likely(vq->shadow_used_idx)) {
+ flush_shadow_used_ring_split(dev, vq);
+ vhost_vring_call_split(dev, vq);
+ }
+
+ return pkt_idx;
+}
+
+static __rte_always_inline int
+virtio_dev_rx_batch_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mbuf **pkts)
+{
+ bool wrap_counter = vq->avail_wrap_counter;
+ struct vring_packed_desc *descs = vq->desc_packed;
+ uint16_t avail_idx = vq->last_avail_idx;
+ uint64_t desc_addrs[PACKED_BATCH_SIZE];
+ struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
+ uint32_t buf_offset = dev->vhost_hlen;
+ uint64_t lens[PACKED_BATCH_SIZE];
+ uint16_t ids[PACKED_BATCH_SIZE];
+ uint16_t i;
+
+ if (unlikely(avail_idx & PACKED_BATCH_MASK))
+ return -1;
+
+ if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
+ return -1;
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ if (unlikely(pkts[i]->next != NULL))
+ return -1;
+ if (unlikely(!desc_is_avail(&descs[avail_idx + i],
+ wrap_counter)))
+ return -1;
+ }
+
+ rte_smp_rmb();
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ lens[i] = descs[avail_idx + i].len;
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
+ return -1;
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ desc_addrs[i] = vhost_iova_to_vva(dev, vq,
+ descs[avail_idx + i].addr,
+ &lens[i],
+ VHOST_ACCESS_RW);
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ if (unlikely(!desc_addrs[i]))
+ return -1;
+ if (unlikely(lens[i] != descs[avail_idx + i].len))
+ return -1;
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
+ hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
+ (uintptr_t)desc_addrs[i];
+ lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
+
+ vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
+ rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
+ pkts[i]->pkt_len);
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
+ lens[i]);
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ ids[i] = descs[avail_idx + i].id;
+
+ vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
+
+ return 0;
+}
+
+static __rte_always_inline int16_t
+virtio_dev_rx_single_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mbuf *pkt)
+{
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
+ uint16_t nr_descs = 0;
+
+ rte_smp_rmb();
+ if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
+ &nr_descs) < 0)) {
+ VHOST_LOG_DATA(DEBUG,
+ "(%d) failed to get enough desc from vring\n",
+ dev->vid);
+ return -1;
+ }
+
+ VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+ dev->vid, vq->last_avail_idx,
+ vq->last_avail_idx + nr_descs);
+
+ vq_inc_last_avail_packed(vq, nr_descs);
+
+ return 0;
+}
+
+static __rte_noinline uint32_t
+virtio_dev_rx_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mbuf **pkts,
+ uint32_t count)
+{
+ uint32_t pkt_idx = 0;
+ uint32_t remained = count;
+
+ do {
+ rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
+
+ if (remained >= PACKED_BATCH_SIZE) {
+ if (!virtio_dev_rx_batch_packed(dev, vq,
+ &pkts[pkt_idx])) {
+ pkt_idx += PACKED_BATCH_SIZE;
+ remained -= PACKED_BATCH_SIZE;
+ continue;
+ }
+ }
+
+ if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
+ break;
+ pkt_idx++;
+ remained--;
+
+ } while (pkt_idx < count);
+
+ if (vq->shadow_used_idx) {
+ do_data_copy_enqueue(dev, vq);
+ vhost_flush_enqueue_shadow_packed(dev, vq);
+ }
+
+ if (pkt_idx)
+ vhost_vring_call_packed(dev, vq);
+
+ return pkt_idx;
+}
+
+static __rte_always_inline uint32_t
+virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ struct vhost_virtqueue *vq;
+ uint32_t nb_tx = 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ if (unlikely(vq->enabled == 0))
+ goto out_access_unlock;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ if (unlikely(vq->access_ok == 0))
+ if (unlikely(vring_translate(dev, vq) < 0))
+ goto out;
+
+ count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
+ if (count == 0)
+ goto out;
+
+ if (vq_is_packed(dev))
+ nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
+ else
+ nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
+
+out:
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_unlock(vq);
+
+out_access_unlock:
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return nb_tx;
+}
+
+uint16_t
+rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return 0;
+
+ if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: built-in vhost net backend is disabled.\n",
+ dev->vid, __func__);
+ return 0;
+ }
+
+ return virtio_dev_rx(dev, queue_id, pkts, count);
+}
+
+static inline bool
+virtio_net_with_host_offload(struct virtio_net *dev)
+{
+ if (dev->features &
+ ((1ULL << VIRTIO_NET_F_CSUM) |
+ (1ULL << VIRTIO_NET_F_HOST_ECN) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO6) |
+ (1ULL << VIRTIO_NET_F_HOST_UFO)))
+ return true;
+
+ return false;
+}
+
+static void
+parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
+{
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
+ void *l3_hdr = NULL;
+ struct rte_ether_hdr *eth_hdr;
+ uint16_t ethertype;
+
+ eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+
+ m->l2_len = sizeof(struct rte_ether_hdr);
+ ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
+
+ if (ethertype == RTE_ETHER_TYPE_VLAN) {
+ struct rte_vlan_hdr *vlan_hdr =
+ (struct rte_vlan_hdr *)(eth_hdr + 1);
+
+ m->l2_len += sizeof(struct rte_vlan_hdr);
+ ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+ }
+
+ l3_hdr = (char *)eth_hdr + m->l2_len;
+
+ switch (ethertype) {
+ case RTE_ETHER_TYPE_IPV4:
+ ipv4_hdr = l3_hdr;
+ *l4_proto = ipv4_hdr->next_proto_id;
+ m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
+ *l4_hdr = (char *)l3_hdr + m->l3_len;
+ m->ol_flags |= PKT_TX_IPV4;
+ break;
+ case RTE_ETHER_TYPE_IPV6:
+ ipv6_hdr = l3_hdr;
+ *l4_proto = ipv6_hdr->proto;
+ m->l3_len = sizeof(struct rte_ipv6_hdr);
+ *l4_hdr = (char *)l3_hdr + m->l3_len;
+ m->ol_flags |= PKT_TX_IPV6;
+ break;
+ default:
+ m->l3_len = 0;
+ *l4_proto = 0;
+ *l4_hdr = NULL;
+ break;
+ }
+}
+
+static __rte_always_inline void
+vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
+{
+ uint16_t l4_proto = 0;
+ void *l4_hdr = NULL;
+ struct rte_tcp_hdr *tcp_hdr = NULL;
+
+ if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
+ return;
+
+ parse_ethernet(m, &l4_proto, &l4_hdr);
+ if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ if (hdr->csum_start == (m->l2_len + m->l3_len)) {
+ switch (hdr->csum_offset) {
+ case (offsetof(struct rte_tcp_hdr, cksum)):
+ if (l4_proto == IPPROTO_TCP)
+ m->ol_flags |= PKT_TX_TCP_CKSUM;
+ break;
+ case (offsetof(struct rte_udp_hdr, dgram_cksum)):
+ if (l4_proto == IPPROTO_UDP)
+ m->ol_flags |= PKT_TX_UDP_CKSUM;
+ break;
+ case (offsetof(struct rte_sctp_hdr, cksum)):
+ if (l4_proto == IPPROTO_SCTP)
+ m->ol_flags |= PKT_TX_SCTP_CKSUM;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ tcp_hdr = l4_hdr;
+ m->ol_flags |= PKT_TX_TCP_SEG;
+ m->tso_segsz = hdr->gso_size;
+ m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
+ m->ol_flags |= PKT_TX_UDP_SEG;
+ m->tso_segsz = hdr->gso_size;
+ m->l4_len = sizeof(struct rte_udp_hdr);
+ break;
+ default:
+ VHOST_LOG_DATA(WARNING,
+ "unsupported gso type %u.\n", hdr->gso_type);
+ break;
+ }
+ }
+}
+
+static __rte_noinline void
+copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
+ struct buf_vector *buf_vec)
+{
+ uint64_t len;
+ uint64_t remain = sizeof(struct virtio_net_hdr);
+ uint64_t src;
+ uint64_t dst = (uint64_t)(uintptr_t)hdr;
+
+ while (remain) {
+ len = RTE_MIN(remain, buf_vec->buf_len);
+ src = buf_vec->buf_addr;
+ rte_memcpy((void *)(uintptr_t)dst,
+ (void *)(uintptr_t)src, len);
+
+ remain -= len;
+ dst += len;
+ buf_vec++;
+ }
+}
+
+static __rte_always_inline int
+copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct buf_vector *buf_vec, uint16_t nr_vec,
+ struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
+{
+ uint32_t buf_avail, buf_offset;
+ uint64_t buf_addr, buf_iova, buf_len;
+ uint32_t mbuf_avail, mbuf_offset;
+ uint32_t cpy_len;
+ struct rte_mbuf *cur = m, *prev = m;
+ struct virtio_net_hdr tmp_hdr;
+ struct virtio_net_hdr *hdr = NULL;
+ /* A counter to avoid desc dead loop chain */
+ uint16_t vec_idx = 0;
+ struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
+ int error = 0;
+
+ buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
+ buf_len = buf_vec[vec_idx].buf_len;
+
+ if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
+ error = -1;
+ goto out;
+ }
+
+ if (virtio_net_with_host_offload(dev)) {
+ if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
+ /*
+ * No luck, the virtio-net header doesn't fit
+ * in a contiguous virtual area.
+ */
+ copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
+ hdr = &tmp_hdr;
+ } else {
+ hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
+ }
+ }
+
+ /*
+ * A virtio driver normally uses at least 2 desc buffers
+ * for Tx: the first for storing the header, and others
+ * for storing the data.
+ */
+ if (unlikely(buf_len < dev->vhost_hlen)) {
+ buf_offset = dev->vhost_hlen - buf_len;
+ vec_idx++;
+ buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
+ buf_len = buf_vec[vec_idx].buf_len;
+ buf_avail = buf_len - buf_offset;
+ } else if (buf_len == dev->vhost_hlen) {
+ if (unlikely(++vec_idx >= nr_vec))
+ goto out;
+ buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
+ buf_len = buf_vec[vec_idx].buf_len;
+
+ buf_offset = 0;
+ buf_avail = buf_len;
+ } else {
+ buf_offset = dev->vhost_hlen;
+ buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
+ }
+
+ PRINT_PACKET(dev,
+ (uintptr_t)(buf_addr + buf_offset),
+ (uint32_t)buf_avail, 0);
+
+ mbuf_offset = 0;
+ mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
+ while (1) {
+ uint64_t hpa;
+
+ cpy_len = RTE_MIN(buf_avail, mbuf_avail);
+
+ /*
+ * A desc buf might across two host physical pages that are
+ * not continuous. In such case (gpa_to_hpa returns 0), data
+ * will be copied even though zero copy is enabled.
+ */
+ if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
+ buf_iova + buf_offset, cpy_len)))) {
+ cur->data_len = cpy_len;
+ cur->data_off = 0;
+ cur->buf_addr =
+ (void *)(uintptr_t)(buf_addr + buf_offset);
+ cur->buf_iova = hpa;
+
+ /*
+ * In zero copy mode, one mbuf can only reference data
+ * for one or partial of one desc buff.
+ */
+ mbuf_avail = cpy_len;
+ } else {
+ if (likely(cpy_len > MAX_BATCH_LEN ||
+ vq->batch_copy_nb_elems >= vq->size ||
+ (hdr && cur == m))) {
+ rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
+ mbuf_offset),
+ (void *)((uintptr_t)(buf_addr +
+ buf_offset)),
+ cpy_len);
+ } else {
+ batch_copy[vq->batch_copy_nb_elems].dst =
+ rte_pktmbuf_mtod_offset(cur, void *,
+ mbuf_offset);
+ batch_copy[vq->batch_copy_nb_elems].src =
+ (void *)((uintptr_t)(buf_addr +
+ buf_offset));
+ batch_copy[vq->batch_copy_nb_elems].len =
+ cpy_len;
+ vq->batch_copy_nb_elems++;
+ }
+ }
+
+ mbuf_avail -= cpy_len;
+ mbuf_offset += cpy_len;
+ buf_avail -= cpy_len;
+ buf_offset += cpy_len;
+
+ /* This buf reaches to its end, get the next one */
+ if (buf_avail == 0) {
+ if (++vec_idx >= nr_vec)
+ break;
+
+ buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
+ buf_len = buf_vec[vec_idx].buf_len;
+
+ buf_offset = 0;
+ buf_avail = buf_len;
+
+ PRINT_PACKET(dev, (uintptr_t)buf_addr,
+ (uint32_t)buf_avail, 0);
+ }
+
+ /*
+ * This mbuf reaches to its end, get a new one
+ * to hold more data.
+ */
+ if (mbuf_avail == 0) {
+ cur = rte_pktmbuf_alloc(mbuf_pool);
+ if (unlikely(cur == NULL)) {
+ VHOST_LOG_DATA(ERR, "Failed to "
+ "allocate memory for mbuf.\n");
+ error = -1;
+ goto out;
+ }
+ if (unlikely(dev->dequeue_zero_copy))
+ rte_mbuf_refcnt_update(cur, 1);
+
+ prev->next = cur;
+ prev->data_len = mbuf_offset;
+ m->nb_segs += 1;
+ m->pkt_len += mbuf_offset;
+ prev = cur;
+
+ mbuf_offset = 0;
+ mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
+ }
+ }
+
+ prev->data_len = mbuf_offset;
+ m->pkt_len += mbuf_offset;
+
+ if (hdr)
+ vhost_dequeue_offload(hdr, m);
+
+out:
+
+ return error;
+}
+
+static __rte_always_inline struct zcopy_mbuf *
+get_zmbuf(struct vhost_virtqueue *vq)
+{
+ uint16_t i;
+ uint16_t last;
+ int tries = 0;
+
+ /* search [last_zmbuf_idx, zmbuf_size) */
+ i = vq->last_zmbuf_idx;
+ last = vq->zmbuf_size;
+
+again:
+ for (; i < last; i++) {
+ if (vq->zmbufs[i].in_use == 0) {
+ vq->last_zmbuf_idx = i + 1;
+ vq->zmbufs[i].in_use = 1;
+ return &vq->zmbufs[i];
+ }
+ }
+
+ tries++;
+ if (tries == 1) {
+ /* search [0, last_zmbuf_idx) */
+ i = 0;
+ last = vq->last_zmbuf_idx;
+ goto again;
+ }
+
+ return NULL;
+}
+
+static void
+virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
+{
+ rte_free(opaque);
+}
+
+static int
+virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
+{
+ struct rte_mbuf_ext_shared_info *shinfo = NULL;
+ uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
+ uint16_t buf_len;
+ rte_iova_t iova;
+ void *buf;
+
+ /* Try to use pkt buffer to store shinfo to reduce the amount of memory
+ * required, otherwise store shinfo in the new buffer.
+ */
+ if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
+ shinfo = rte_pktmbuf_mtod(pkt,
+ struct rte_mbuf_ext_shared_info *);
+ else {
+ total_len += sizeof(*shinfo) + sizeof(uintptr_t);
+ total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
+ }
+
+ if (unlikely(total_len > UINT16_MAX))
+ return -ENOSPC;
+
+ buf_len = total_len;
+ buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
+ if (unlikely(buf == NULL))
+ return -ENOMEM;
+
+ /* Initialize shinfo */
+ if (shinfo) {
+ shinfo->free_cb = virtio_dev_extbuf_free;
+ shinfo->fcb_opaque = buf;
+ rte_mbuf_ext_refcnt_set(shinfo, 1);
+ } else {
+ shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
+ virtio_dev_extbuf_free, buf);
+ if (unlikely(shinfo == NULL)) {
+ rte_free(buf);
+ VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
+ return -1;
+ }
+ }
+
+ iova = rte_malloc_virt2iova(buf);
+ rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
+ rte_pktmbuf_reset_headroom(pkt);
+
+ return 0;
+}
+
+/*
+ * Allocate a host supported pktmbuf.
+ */
+static __rte_always_inline struct rte_mbuf *
+virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
+ uint32_t data_len)
+{
+ struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
+
+ if (unlikely(pkt == NULL)) {
+ VHOST_LOG_DATA(ERR,
+ "Failed to allocate memory for mbuf.\n");
+ return NULL;
+ }
+
+ if (rte_pktmbuf_tailroom(pkt) >= data_len)
+ return pkt;
+
+ /* attach an external buffer if supported */
+ if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
+ return pkt;
+
+ /* check if chained buffers are allowed */
+ if (!dev->linearbuf)
+ return pkt;
+
+ /* Data doesn't fit into the buffer and the host supports
+ * only linear buffers
+ */
+ rte_pktmbuf_free(pkt);
+
+ return NULL;
+}
+
+static __rte_noinline uint16_t
+virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
+{
+ uint16_t i;
+ uint16_t free_entries;
+ uint16_t dropped = 0;
+ static bool allocerr_warned;
+
+ if (unlikely(dev->dequeue_zero_copy)) {
+ struct zcopy_mbuf *zmbuf, *next;
+
+ for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
+ zmbuf != NULL; zmbuf = next) {
+ next = TAILQ_NEXT(zmbuf, next);
+
+ if (mbuf_is_consumed(zmbuf->mbuf)) {
+ update_shadow_used_ring_split(vq,
+ zmbuf->desc_idx, 0);
+ TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+ restore_mbuf(zmbuf->mbuf);
+ rte_pktmbuf_free(zmbuf->mbuf);
+ put_zmbuf(zmbuf);
+ vq->nr_zmbuf -= 1;
+ }
+ }
+
+ if (likely(vq->shadow_used_idx)) {
+ flush_shadow_used_ring_split(dev, vq);
+ vhost_vring_call_split(dev, vq);
+ }
+ }
+
+ /*
+ * The ordering between avail index and
+ * desc reads needs to be enforced.
+ */
+ free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
+ vq->last_avail_idx;
+ if (free_entries == 0)
+ return 0;
+
+ rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+
+ count = RTE_MIN(count, MAX_PKT_BURST);
+ count = RTE_MIN(count, free_entries);
+ VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n",
+ dev->vid, count);
+
+ for (i = 0; i < count; i++) {
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
+ uint16_t head_idx;
+ uint32_t buf_len;
+ uint16_t nr_vec = 0;
+ int err;
+
+ if (unlikely(fill_vec_buf_split(dev, vq,
+ vq->last_avail_idx + i,
+ &nr_vec, buf_vec,
+ &head_idx, &buf_len,
+ VHOST_ACCESS_RO) < 0))
+ break;
+
+ if (likely(dev->dequeue_zero_copy == 0))
+ update_shadow_used_ring_split(vq, head_idx, 0);
+
+ pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
+ if (unlikely(pkts[i] == NULL)) {
+ /*
+ * mbuf allocation fails for jumbo packets when external
+ * buffer allocation is not allowed and linear buffer
+ * is required. Drop this packet.
+ */
+ if (!allocerr_warned) {
+ VHOST_LOG_DATA(ERR,
+ "Failed mbuf alloc of size %d from %s on %s.\n",
+ buf_len, mbuf_pool->name, dev->ifname);
+ allocerr_warned = true;
+ }
+ dropped += 1;
+ i++;
+ break;
+ }
+
+ err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
+ mbuf_pool);
+ if (unlikely(err)) {
+ rte_pktmbuf_free(pkts[i]);
+ if (!allocerr_warned) {
+ VHOST_LOG_DATA(ERR,
+ "Failed to copy desc to mbuf on %s.\n",
+ dev->ifname);
+ allocerr_warned = true;
+ }
+ dropped += 1;
+ i++;
+ break;