virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
struct rte_mbuf **tx_pkts)
{
- struct virtqueue *vq = txvq->vq;
+ struct virtqueue *vq = virtnet_txq_to_vq(txvq);
uint16_t head_size = vq->hw->vtnet_hdr_size;
uint16_t idx = vq->vq_avail_idx;
struct virtio_net_hdr *hdr;
uint32x4_t def_ref_seg = vdupq_n_u32(0x10001);
/* Check refcnt and nb_segs. */
uint32x4_t ref_seg = vreinterpretq_u32_u8(vqtbl2q_u8(mbuf, ref_seg_msk));
- poly128_t cmp1 = vreinterpretq_p128_u32(~vceqq_u32(ref_seg, def_ref_seg));
- if (unlikely(cmp1))
+ uint64x2_t cmp1 = vreinterpretq_u64_u32(~vceqq_u32(ref_seg, def_ref_seg));
+ if (unlikely(vgetq_lane_u64(cmp1, 0) || vgetq_lane_u64(cmp1, 1)))
return -1;
/* Check headroom is enough. */
virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
struct virtio_net_hdr *, -head_size);
- virtqueue_xmit_offload(hdr, tx_pkts[i], true);
+ virtqueue_xmit_offload(hdr, tx_pkts[i]);
}
}
virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,
struct rte_mbuf **rx_pkts)
{
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
struct virtio_hw *hw = vq->hw;
uint16_t head_size = hw->vtnet_hdr_size;
uint16_t id = vq->vq_used_cons_idx;
if (vq->vq_packed.used_wrap_counter)
v_used_flag = vdupq_n_u32(PACKED_FLAGS_MASK);
- poly128_t desc_stats = vreinterpretq_p128_u32(~vceqq_u32(v_flag, v_used_flag));
+ uint64x2_t desc_stats = vreinterpretq_u64_u32(~vceqq_u32(v_flag, v_used_flag));
/* Check all descs are used. */
- if (desc_stats)
+ if (unlikely(vgetq_lane_u64(desc_stats, 0) || vgetq_lane_u64(desc_stats, 1)))
return -1;
/* Load 2 mbuf pointers per time. */