+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ if (unlikely(pkts[i]->next != NULL))
+ return -1;
+ if (unlikely(!desc_is_avail(&descs[avail_idx + i],
+ wrap_counter)))
+ return -1;
+ }
+
+ rte_smp_rmb();
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ lens[i] = descs[avail_idx + i].len;
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
+ return -1;
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ desc_addrs[i] = vhost_iova_to_vva(dev, vq,
+ descs[avail_idx + i].addr,
+ &lens[i],
+ VHOST_ACCESS_RW);
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ if (unlikely(lens[i] != descs[avail_idx + i].len))
+ return -1;
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
+ hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
+ (uintptr_t)desc_addrs[i];
+ lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
+
+ vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
+ rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
+ pkts[i]->pkt_len);
+ }
+
+ return 0;
+}
+
+static __rte_unused int16_t
+virtio_dev_rx_single_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mbuf *pkt)
+{
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
+ uint16_t nr_descs = 0;
+
+ rte_smp_rmb();
+ if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
+ &nr_descs) < 0)) {
+ VHOST_LOG_DEBUG(VHOST_DATA,
+ "(%d) failed to get enough desc from vring\n",
+ dev->vid);
+ return -1;
+ }
+
+ VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
+ dev->vid, vq->last_avail_idx,
+ vq->last_avail_idx + nr_descs);
+
+ vq_inc_last_avail_packed(vq, nr_descs);
+
+ return 0;
+}
+
+static __rte_noinline uint32_t
+virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ uint32_t pkt_idx = 0;
+ uint16_t num_buffers;
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
+
+ for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+ uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
+ uint16_t nr_vec = 0;
+ uint16_t nr_descs = 0;
+
+ if (unlikely(reserve_avail_buf_packed(dev, vq,
+ pkt_len, buf_vec, &nr_vec,
+ &num_buffers, &nr_descs) < 0)) {
+ VHOST_LOG_DEBUG(VHOST_DATA,
+ "(%d) failed to get enough desc from vring\n",
+ dev->vid);
+ vq->shadow_used_idx -= num_buffers;
+ break;
+ }
+
+ VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
+ dev->vid, vq->last_avail_idx,
+ vq->last_avail_idx + num_buffers);
+
+ if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
+ buf_vec, nr_vec,
+ num_buffers) < 0) {
+ vq->shadow_used_idx -= num_buffers;
+ break;
+ }
+
+ vq_inc_last_avail_packed(vq, nr_descs);
+ }
+
+ do_data_copy_enqueue(dev, vq);
+
+ if (likely(vq->shadow_used_idx)) {
+ flush_shadow_used_ring_packed(dev, vq);
+ vhost_vring_call_packed(dev, vq);