rte_cio_wmb();
}
+static inline uint16_t
+virtqueue_fetch_flags_packed(struct vring_packed_desc *dp,
+ uint8_t weak_barriers)
+{
+ uint16_t flags;
+
+ if (weak_barriers) {
+/* x86 prefers to using rte_smp_rmb over __atomic_load_n as it reports
+ * a better perf(~1.5%), which comes from the saved branch by the compiler.
+ * The if and else branch are identical with the smp and cio barriers both
+ * defined as compiler barriers on x86.
+ */
+#ifdef RTE_ARCH_X86_64
+ flags = dp->flags;
+ rte_smp_rmb();
+#else
+ flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
+#endif
+ } else {
+ flags = dp->flags;
+ rte_cio_rmb();
+ }
+
+ return flags;
+}
+
static inline void
virtqueue_store_flags_packed(struct vring_packed_desc *dp,
uint16_t flags, uint8_t weak_barriers)
struct virtio_net_ctrl_mac {
uint32_t entries;
uint8_t macs[][RTE_ETHER_ADDR_LEN];
-} __attribute__((__packed__));
+} __rte_packed;
#define VIRTIO_NET_CTRL_MAC 1
#define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
struct virtio_net_ctrl_hdr {
uint8_t class;
uint8_t cmd;
-} __attribute__((packed));
+} __rte_packed;
typedef uint8_t virtio_net_ctrl_ack;
struct virtio_tx_region {
struct virtio_net_hdr_mrg_rxbuf tx_hdr;
struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
- __attribute__((__aligned__(16)));
+ __rte_aligned(16);
};
static inline int
{
uint16_t used, avail, flags;
- flags = desc->flags;
+ flags = virtqueue_fetch_flags_packed(desc, vq->hw->weak_barriers);
used = !!(flags & VRING_PACKED_DESC_F_USED);
avail = !!(flags & VRING_PACKED_DESC_F_AVAIL);
/* Flush the elements in the used ring. */
void virtqueue_rxvq_flush(struct virtqueue *vq);
+int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
+
+int virtqueue_txvq_reset_packed(struct virtqueue *vq);
+
static inline int
virtqueue_full(const struct virtqueue *vq)
{