struct virtnet_rx *rxvq = rxq;
struct virtqueue *vq = rxvq->vq;
- return VIRTQUEUE_NUSED(vq) >= offset;
+ return virtqueue_nused(vq) >= offset;
}
void
for (i = 0; i < num; i++) {
used_idx = vq->vq_used_cons_idx;
- /* desc_is_used has a load-acquire or rte_cio_rmb inside
+ /* desc_is_used has a load-acquire or rte_io_rmb inside
* and wait for used desc in virtqueue.
*/
if (!desc_is_used(&desc[used_idx], vq))
*/
uint16_t csum = 0, off;
- rte_raw_cksum_mbuf(m, hdr->csum_start,
+ if (rte_raw_cksum_mbuf(m, hdr->csum_start,
rte_pktmbuf_pkt_len(m) - hdr->csum_start,
- &csum);
+ &csum) < 0)
+ return -EINVAL;
if (likely(csum != 0xffff))
csum = ~csum;
off = hdr->csum_offset + hdr->csum_start;
if (unlikely(hw->started == 0))
return nb_rx;
- nb_used = VIRTQUEUE_NUSED(vq);
-
- virtio_rmb(hw->weak_barriers);
+ nb_used = virtqueue_nused(vq);
num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
if (unlikely(hw->started == 0))
return nb_rx;
- nb_used = VIRTQUEUE_NUSED(vq);
+ nb_used = virtqueue_nused(vq);
nb_used = RTE_MIN(nb_used, nb_pkts);
nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
- virtio_rmb(hw->weak_barriers);
-
PMD_RX_LOG(DEBUG, "used:%d", nb_used);
nb_enqueued = 0;
uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
VIRTIO_MBUF_BURST_SZ);
- if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
- virtio_rmb(hw->weak_barriers);
+ if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
rcv_cnt);
uint16_t extra_idx = 0;
if (unlikely(hw->started == 0))
return nb_rx;
- nb_used = VIRTQUEUE_NUSED(vq);
-
- virtio_rmb(hw->weak_barriers);
+ nb_used = virtqueue_nused(vq);
PMD_RX_LOG(DEBUG, "used:%d", nb_used);
uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
VIRTIO_MBUF_BURST_SZ);
- if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
- virtio_rmb(hw->weak_barriers);
+ if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
rcv_cnt);
uint16_t extra_idx = 0;
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
struct rte_mbuf *txm = tx_pkts[nb_tx];
- int can_push = 0, slots, need;
+ int can_push = 0, use_indirect = 0, slots, need;
/* optimize ring usage */
if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
__alignof__(struct virtio_net_hdr_mrg_rxbuf)))
can_push = 1;
-
+ else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
+ txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
+ use_indirect = 1;
/* How many main ring entries are needed to this Tx?
+ * indirect => 1
* any_layout => number of segments
* default => number of segments + 1
*/
- slots = txm->nb_segs + !can_push;
+ slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
need = slots - vq->vq_free_cnt;
/* Positive value indicates it need free vring descriptors */
if (can_push)
virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
else
- virtqueue_enqueue_xmit_packed(txvq, txm, slots, 0,
+ virtqueue_enqueue_xmit_packed(txvq, txm, slots,
+ use_indirect, 0,
in_order);
virtio_update_packet_stats(&txvq->stats, txm);
return nb_pkts;
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
- nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb(hw->weak_barriers);
+ nb_used = virtqueue_nused(vq);
+
if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
virtio_xmit_cleanup(vq, nb_used);
/* Positive value indicates it need free vring descriptors */
if (unlikely(need > 0)) {
- nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb(hw->weak_barriers);
+ nb_used = virtqueue_nused(vq);
+
need = RTE_MIN(need, (int)nb_used);
virtio_xmit_cleanup(vq, need);
virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
{
uint16_t nb_used, nb_clean, nb_descs;
- struct virtio_hw *hw = vq->hw;
nb_descs = vq->vq_free_cnt + need;
- nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb(hw->weak_barriers);
+ nb_used = virtqueue_nused(vq);
nb_clean = RTE_MIN(need, (int)nb_used);
virtio_xmit_cleanup_inorder(vq, nb_clean);
VIRTQUEUE_DUMP(vq);
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
- nb_used = VIRTQUEUE_NUSED(vq);
+ nb_used = virtqueue_nused(vq);
- virtio_rmb(hw->weak_barriers);
if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
virtio_xmit_cleanup_inorder(vq, nb_used);
{
return 0;
}
+
+uint16_t
+virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,
+ struct rte_mbuf **tx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
#endif /* ifndef CC_AVX512_SUPPORT */