if (virtio_negotiate_features(hw, req_features) < 0)
return -1;
+ hw->weak_barriers = !vtpci_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
+
if (!hw->virtio_user_dev) {
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
1ULL << VIRTIO_F_VERSION_1 | \
1ULL << VIRTIO_F_IN_ORDER | \
1ULL << VIRTIO_F_RING_PACKED | \
- 1ULL << VIRTIO_F_IOMMU_PLATFORM)
+ 1ULL << VIRTIO_F_IOMMU_PLATFORM | \
+ 1ULL << VIRTIO_F_ORDER_PLATFORM)
#define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
(VIRTIO_PMD_DEFAULT_GUEST_FEATURES | \
*/
#define VIRTIO_F_IN_ORDER 35
+/*
+ * This feature indicates that memory accesses by the driver and the device
+ * are ordered in a way described by the platform.
+ */
+#define VIRTIO_F_ORDER_PLATFORM 36
+
/* The Guest publishes the used index for which it expects an interrupt
* at the end of the avail ring. Host should ignore the avail->flags field. */
/* The Host publishes the avail index for which it expects a kick
uint8_t use_simple_rx;
uint8_t use_inorder_rx;
uint8_t use_inorder_tx;
+ uint8_t weak_barriers;
bool has_tx_offload;
bool has_rx_offload;
uint16_t port_id;
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
nb_used = RTE_MIN(nb_used, nb_pkts);
nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
PMD_RX_LOG(DEBUG, "used:%d", nb_used);
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
PMD_RX_LOG(DEBUG, "used:%d", nb_used);
/* Positive value indicates it need free vring descriptors */
if (unlikely(need > 0)) {
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
need = RTE_MIN(need, (int)nb_pkts);
virtio_xmit_cleanup_packed(vq, need);
need = slots - vq->vq_free_cnt;
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
virtio_xmit_cleanup(vq, nb_used);
/* Positive value indicates it need free vring descriptors */
if (unlikely(need > 0)) {
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
need = RTE_MIN(need, (int)nb_used);
virtio_xmit_cleanup(vq, need);
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
virtio_xmit_cleanup_inorder(vq, nb_used);
need = slots - vq->vq_free_cnt;
if (unlikely(need > 0)) {
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
need = RTE_MIN(need, (int)nb_used);
virtio_xmit_cleanup_inorder(vq, need);
struct rte_mbuf;
/*
- * Per virtio_config.h in Linux.
+ * Per virtio_ring.h in Linux.
* For virtio_pci on SMP, we don't need to order with respect to MMIO
* accesses through relaxed memory I/O windows, so smp_mb() et al are
* sufficient.
*
+ * For using virtio to talk to real devices (eg. vDPA) we do need real
+ * barriers.
*/
-#define virtio_mb() rte_smp_mb()
-#define virtio_rmb() rte_smp_rmb()
-#define virtio_wmb() rte_smp_wmb()
+static inline void
+virtio_mb(uint8_t weak_barriers)
+{
+ if (weak_barriers)
+ rte_smp_mb();
+ else
+ rte_mb();
+}
+
+static inline void
+virtio_rmb(uint8_t weak_barriers)
+{
+ if (weak_barriers)
+ rte_smp_rmb();
+ else
+ rte_cio_rmb();
+}
+
+static inline void
+virtio_wmb(uint8_t weak_barriers)
+{
+ if (weak_barriers)
+ rte_smp_wmb();
+ else
+ rte_cio_wmb();
+}
#ifdef RTE_PMD_PACKET_PREFETCH
#define rte_packet_prefetch(p) rte_prefetch1(p)
if (vq->event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
- virtio_wmb();
+ virtio_wmb(vq->hw->weak_barriers);
vq->event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
*event_flags = vq->event_flags_shadow;
}
static inline void
vq_update_avail_idx(struct virtqueue *vq)
{
- virtio_wmb();
+ virtio_wmb(vq->hw->weak_barriers);
vq->vq_ring.avail->idx = vq->vq_avail_idx;
}
* Ensure updated avail->idx is visible to vhost before reading
* the used->flags.
*/
- virtio_mb();
+ virtio_mb(vq->hw->weak_barriers);
return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
}
/*
* Ensure updated data is visible to vhost before reading the flags.
*/
- virtio_mb();
+ virtio_mb(vq->hw->weak_barriers);
flags = vq->ring_packed.device_event->desc_event_flags;
return flags != RING_EVENT_FLAGS_DISABLE;