+ if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx],
+ buf_vec, nr_vec, num_buffers,
+ src_iovec, dst_iovec, src_it, dst_it) < 0) {
+ vq->shadow_used_idx -= num_buffers;
+ break;
+ }
+
+ slot_idx = (vq->async_pkts_idx + pkt_idx) & (vq->size - 1);
+ if (src_it->count) {
+ async_fill_desc(&tdes[pkt_burst_idx], src_it, dst_it);
+ pkt_burst_idx++;
+ pkts_info[slot_idx].descs = num_buffers;
+ pkts_info[slot_idx].segs = src_it->nr_segs;
+ src_iovec += src_it->nr_segs;
+ dst_iovec += dst_it->nr_segs;
+ src_it += 2;
+ dst_it += 2;
+ segs_await += src_it->nr_segs;
+ } else {
+ pkts_info[slot_idx].info = num_buffers;
+ vq->async_pkts_inflight_n++;
+ }
+
+ vq->last_avail_idx += num_buffers;
+
+ /*
+ * conditions to trigger async device transfer:
+ * - buffered packet number reaches transfer threshold
+ * - this is the last packet in the burst enqueue
+ * - unused async iov number is less than max vhost vector
+ */
+ if (pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
+ (pkt_idx == count - 1 && pkt_burst_idx) ||
+ (VHOST_MAX_ASYNC_VEC / 2 - segs_await <
+ BUF_VECTOR_MAX)) {
+ n_pkts = vq->async_ops.transfer_data(dev->vid,
+ queue_id, tdes, 0, pkt_burst_idx);
+ src_iovec = vec_pool;
+ dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+ src_it = it_pool;
+ dst_it = it_pool + 1;
+ segs_await = 0;
+ vq->async_pkts_inflight_n += pkt_burst_idx;
+
+ if (unlikely(n_pkts < (int)pkt_burst_idx)) {
+ /*
+ * log error packets number here and do actual
+ * error processing when applications poll
+ * completion
+ */
+ pkt_err = pkt_burst_idx - n_pkts;
+ pkt_burst_idx = 0;
+ break;
+ }
+
+ pkt_burst_idx = 0;
+ }
+ }
+
+ if (pkt_burst_idx) {
+ n_pkts = vq->async_ops.transfer_data(dev->vid,
+ queue_id, tdes, 0, pkt_burst_idx);
+ vq->async_pkts_inflight_n += pkt_burst_idx;
+
+ if (unlikely(n_pkts < (int)pkt_burst_idx))
+ pkt_err = pkt_burst_idx - n_pkts;
+ }
+
+ do_data_copy_enqueue(dev, vq);
+
+ while (unlikely(pkt_err && pkt_idx)) {
+ if (pkts_info[slot_idx].segs)
+ pkt_err--;
+ vq->last_avail_idx -= pkts_info[slot_idx].descs;
+ vq->shadow_used_idx -= pkts_info[slot_idx].descs;
+ vq->async_pkts_inflight_n--;
+ slot_idx = (slot_idx - 1) & (vq->size - 1);
+ pkt_idx--;
+ }
+
+ n_free_slot = vq->size - vq->async_pkts_idx;
+ if (n_free_slot > pkt_idx) {
+ rte_memcpy(&vq->async_pkts_pending[vq->async_pkts_idx],
+ pkts, pkt_idx * sizeof(uintptr_t));
+ vq->async_pkts_idx += pkt_idx;
+ } else {
+ rte_memcpy(&vq->async_pkts_pending[vq->async_pkts_idx],
+ pkts, n_free_slot * sizeof(uintptr_t));
+ rte_memcpy(&vq->async_pkts_pending[0],
+ &pkts[n_free_slot],
+ (pkt_idx - n_free_slot) * sizeof(uintptr_t));
+ vq->async_pkts_idx = pkt_idx - n_free_slot;
+ }
+
+ if (likely(vq->shadow_used_idx))
+ async_flush_shadow_used_ring_split(dev, vq);
+
+ return pkt_idx;
+}
+
+uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0;
+ uint16_t start_idx, pkts_idx, vq_size;
+ uint16_t n_inflight;
+ struct async_inflight_info *pkts_info;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ n_inflight = vq->async_pkts_inflight_n;
+ pkts_idx = vq->async_pkts_idx;
+ pkts_info = vq->async_pkts_info;
+ vq_size = vq->size;
+ start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
+ vq_size, vq->async_pkts_inflight_n);
+
+ if (count > vq->async_last_pkts_n)
+ n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+ queue_id, 0, count - vq->async_last_pkts_n);
+ n_pkts_cpl += vq->async_last_pkts_n;
+
+ rte_smp_wmb();
+
+ while (likely((n_pkts_put < count) && n_inflight)) {
+ uint16_t info_idx = (start_idx + n_pkts_put) & (vq_size - 1);
+ if (n_pkts_cpl && pkts_info[info_idx].segs)
+ n_pkts_cpl--;
+ else if (!n_pkts_cpl && pkts_info[info_idx].segs)
+ break;
+ n_pkts_put++;
+ n_inflight--;
+ n_descs += pkts_info[info_idx].descs;
+ }
+
+ vq->async_last_pkts_n = n_pkts_cpl;
+
+ if (n_pkts_put) {
+ vq->async_pkts_inflight_n = n_inflight;
+ if (likely(vq->enabled && vq->access_ok)) {
+ __atomic_add_fetch(&vq->used->idx,
+ n_descs, __ATOMIC_RELEASE);
+ vhost_vring_call_split(dev, vq);
+ }
+
+ if (start_idx + n_pkts_put <= vq_size) {
+ rte_memcpy(pkts, &vq->async_pkts_pending[start_idx],
+ n_pkts_put * sizeof(uintptr_t));
+ } else {
+ rte_memcpy(pkts, &vq->async_pkts_pending[start_idx],
+ (vq_size - start_idx) * sizeof(uintptr_t));
+ rte_memcpy(&pkts[vq_size - start_idx],
+ vq->async_pkts_pending,
+ (n_pkts_put + start_idx - vq_size) *
+ sizeof(uintptr_t));
+ }
+ }
+
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return n_pkts_put;
+}
+
+static __rte_always_inline uint32_t
+virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ struct vhost_virtqueue *vq;
+ uint32_t nb_tx = 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ if (unlikely(vq->enabled == 0 || !vq->async_registered))
+ goto out_access_unlock;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ if (unlikely(vq->access_ok == 0))
+ if (unlikely(vring_translate(dev, vq) < 0))
+ goto out;
+
+ count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
+ if (count == 0)
+ goto out;
+
+ /* TODO: packed queue not implemented */
+ if (vq_is_packed(dev))
+ nb_tx = 0;
+ else
+ nb_tx = virtio_dev_rx_async_submit_split(dev,
+ vq, queue_id, pkts, count);
+
+out:
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_unlock(vq);
+
+out_access_unlock:
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return nb_tx;
+}
+
+uint16_t
+rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return 0;
+
+ if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: built-in vhost net backend is disabled.\n",
+ dev->vid, __func__);
+ return 0;
+ }
+
+ return virtio_dev_rx_async_submit(dev, queue_id, pkts, count);
+}
+
+static inline bool
+virtio_net_with_host_offload(struct virtio_net *dev)
+{
+ if (dev->features &
+ ((1ULL << VIRTIO_NET_F_CSUM) |
+ (1ULL << VIRTIO_NET_F_HOST_ECN) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO6) |
+ (1ULL << VIRTIO_NET_F_HOST_UFO)))
+ return true;
+
+ return false;
+}
+
+static void
+parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
+{
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
+ void *l3_hdr = NULL;
+ struct rte_ether_hdr *eth_hdr;
+ uint16_t ethertype;
+
+ eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+
+ m->l2_len = sizeof(struct rte_ether_hdr);
+ ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
+
+ if (ethertype == RTE_ETHER_TYPE_VLAN) {
+ struct rte_vlan_hdr *vlan_hdr =
+ (struct rte_vlan_hdr *)(eth_hdr + 1);
+
+ m->l2_len += sizeof(struct rte_vlan_hdr);
+ ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+ }
+
+ l3_hdr = (char *)eth_hdr + m->l2_len;
+
+ switch (ethertype) {
+ case RTE_ETHER_TYPE_IPV4:
+ ipv4_hdr = l3_hdr;
+ *l4_proto = ipv4_hdr->next_proto_id;
+ m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
+ *l4_hdr = (char *)l3_hdr + m->l3_len;
+ m->ol_flags |= PKT_TX_IPV4;
+ break;
+ case RTE_ETHER_TYPE_IPV6:
+ ipv6_hdr = l3_hdr;
+ *l4_proto = ipv6_hdr->proto;
+ m->l3_len = sizeof(struct rte_ipv6_hdr);
+ *l4_hdr = (char *)l3_hdr + m->l3_len;
+ m->ol_flags |= PKT_TX_IPV6;
+ break;
+ default:
+ m->l3_len = 0;
+ *l4_proto = 0;
+ *l4_hdr = NULL;
+ break;
+ }
+}
+
+static __rte_always_inline void
+vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
+{
+ uint16_t l4_proto = 0;
+ void *l4_hdr = NULL;
+ struct rte_tcp_hdr *tcp_hdr = NULL;