#include <rte_mempool.h>
#include <rte_net.h>
-#include "virtio_pci.h"
+#include "virtio.h"
#include "virtio_ring.h"
#include "virtio_logs.h"
#include "virtio_rxtx.h"
/*
* Per virtio_ring.h in Linux.
* For virtio_pci on SMP, we don't need to order with respect to MMIO
- * accesses through relaxed memory I/O windows, so smp_mb() et al are
+ * accesses through relaxed memory I/O windows, so thread_fence is
* sufficient.
*
* For using virtio to talk to real devices (eg. vDPA) we do need real
virtio_mb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
else
rte_mb();
}
virtio_rmb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_smp_rmb();
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
else
rte_io_rmb();
}
virtio_wmb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
else
rte_io_wmb();
}
uint16_t flags;
if (weak_barriers) {
-/* x86 prefers to using rte_smp_rmb over __atomic_load_n as it reports
+/* x86 prefers to using rte_io_rmb over __atomic_load_n as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
- * The if and else branch are identical with the smp and io barriers both
- * defined as compiler barriers on x86.
+ * The if and else branch are identical on the platforms except Arm.
*/
-#ifdef RTE_ARCH_X86_64
- flags = dp->flags;
- rte_smp_rmb();
-#else
+#ifdef RTE_ARCH_ARM
flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
+#else
+ flags = dp->flags;
+ rte_io_rmb();
#endif
} else {
flags = dp->flags;
uint16_t flags, uint8_t weak_barriers)
{
if (weak_barriers) {
-/* x86 prefers to using rte_smp_wmb over __atomic_store_n as it reports
+/* x86 prefers to using rte_io_wmb over __atomic_store_n as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
- * The if and else branch are identical with the smp and io barriers both
- * defined as compiler barriers on x86.
+ * The if and else branch are identical on the platforms except Arm.
*/
-#ifdef RTE_ARCH_X86_64
- rte_smp_wmb();
- dp->flags = flags;
-#else
+#ifdef RTE_ARCH_ARM
__atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
+#else
+ rte_io_wmb();
+ dp->flags = flags;
#endif
} else {
rte_io_wmb();
dp->flags = flags;
}
}
+
#ifdef RTE_PMD_PACKET_PREFETCH
#define rte_packet_prefetch(p) rte_prefetch1(p)
#else
#define VIRTQUEUE_MAX_NAME_SZ 32
-#ifdef RTE_VIRTIO_USER
-/**
- * Return the physical address (or virtual address in case of
- * virtio-user) of mbuf data buffer.
- *
- * The address is firstly casted to the word size (sizeof(uintptr_t))
- * before casting it to uint64_t. This is to make it work with different
- * combination of word size (64 bit and 32 bit) and virtio device
- * (virtio-pci and virtio-user).
- */
-#define VIRTIO_MBUF_ADDR(mb, vq) \
- ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset)))
-#else
-#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_iova)
-#endif
-
-/**
- * Return the physical address (or virtual address in case of
- * virtio-user) of mbuf data buffer, taking care of mbuf data offset
- */
-#define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \
- (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
-
#define VTNET_SQ_RQ_QUEUE_IDX 0
#define VTNET_SQ_TQ_QUEUE_IDX 1
#define VTNET_SQ_CQ_QUEUE_IDX 2
*/
uint16_t vq_desc_head_idx;
uint16_t vq_desc_tail_idx;
- uint16_t vq_queue_index; /**< PCI queue index */
+ uint16_t vq_queue_index;
uint16_t offset; /**< relative offset to obtain addr in mbuf */
uint16_t *notify_addr;
struct rte_mbuf **sw_ring; /**< RX software ring. */
static inline void
virtqueue_disable_intr(struct virtqueue *vq)
{
- if (vtpci_packed_queue(vq->hw))
+ if (virtio_with_packed_queue(vq->hw))
virtqueue_disable_intr_packed(vq);
else
virtqueue_disable_intr_split(vq);
static inline void
virtqueue_enable_intr(struct virtqueue *vq)
{
- if (vtpci_packed_queue(vq->hw))
+ if (virtio_with_packed_queue(vq->hw))
virtqueue_enable_intr_packed(vq);
else
virtqueue_enable_intr_split(vq);
}
static inline int
-virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
+virtio_get_queue_type(struct virtio_hw *hw, uint16_t vq_idx)
{
- if (vtpci_queue_idx == hw->max_queue_pairs * 2)
+ if (vq_idx == hw->max_queue_pairs * 2)
return VTNET_CQ;
- else if (vtpci_queue_idx % 2 == 0)
+ else if (vq_idx % 2 == 0)
return VTNET_RQ;
else
return VTNET_TQ;
static inline void
virtqueue_notify(struct virtqueue *vq)
{
- VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
+ VIRTIO_OPS(vq->hw)->notify_queue(vq->hw, vq);
}
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
__ATOMIC_RELAXED); \
nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
- if (vtpci_packed_queue((vq)->hw)) { \
+ if (virtio_with_packed_queue((vq)->hw)) { \
PMD_INIT_LOG(DEBUG, \
"VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
" cached_flags=0x%x; used_wrap_counter=%d", \
static inline void
virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
struct rte_mbuf *cookie,
- bool offload)
+ uint8_t offload)
{
if (offload) {
if (cookie->ol_flags & PKT_TX_TCP_SEG)
struct virtio_net_hdr *hdr;
uint16_t prev;
bool prepend_header = false;
+ uint16_t seg_num = cookie->nb_segs;
id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
*/
start_dp[idx].addr = txvq->virtio_net_hdr_mem +
RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);
- start_dp[idx].len = (needed + 1) *
+ start_dp[idx].len = (seg_num + 1) *
sizeof(struct vring_packed_desc);
/* reset flags for indirect desc */
head_flags = VRING_DESC_F_INDIRECT;
do {
uint16_t flags;
- start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
+ start_dp[idx].addr = rte_mbuf_data_iova(cookie);
start_dp[idx].len = cookie->data_len;
if (prepend_header) {
start_dp[idx].addr -= head_size;