+static inline void
+virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
+ struct rte_mbuf *cookie,
+ int in_order)
+{
+ struct virtqueue *vq = txvq->vq;
+ struct vring_packed_desc *dp;
+ struct vq_desc_extra *dxp;
+ uint16_t idx, id, flags;
+ uint16_t head_size = vq->hw->vtnet_hdr_size;
+ struct virtio_net_hdr *hdr;
+
+ id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
+ idx = vq->vq_avail_idx;
+ dp = &vq->vq_packed.ring.desc[idx];
+
+ dxp = &vq->vq_descx[id];
+ dxp->ndescs = 1;
+ dxp->cookie = cookie;
+
+ flags = vq->vq_packed.cached_flags;
+
+ /* prepend cannot fail, checked by caller */
+ hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
+ cookie->data_off - head_size;
+
+ /* if offload disabled, hdr is not zeroed yet, do it now */
+ if (!vq->hw->has_tx_offload)
+ virtqueue_clear_net_hdr(hdr);
+ else
+ virtqueue_xmit_offload(hdr, cookie, true);
+
+ dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
+ dp->len = cookie->data_len + head_size;
+ dp->id = id;
+
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
+ }
+
+ vq->vq_free_cnt--;
+
+ if (!in_order) {
+ vq->vq_desc_head_idx = dxp->next;
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
+ }
+
+ virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
+}
+