X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtio_rxtx_packed.h;h=1d1db60da8b74e1cf0c94bc24ec8bb8e0e695dab;hb=9b7466f6ce5bb43200f082fea1e8eb1c22ecd5d3;hp=8f5198ad7337420fba8130c3d391584a12313093;hpb=5971ce5e2a591adcbadbe0de203985ab3ac70232;p=dpdk.git diff --git a/drivers/net/virtio/virtio_rxtx_packed.h b/drivers/net/virtio/virtio_rxtx_packed.h index 8f5198ad73..1d1db60da8 100644 --- a/drivers/net/virtio/virtio_rxtx_packed.h +++ b/drivers/net/virtio/virtio_rxtx_packed.h @@ -15,7 +15,7 @@ #include "virtio_logs.h" #include "virtio_ethdev.h" -#include "virtio_pci.h" +#include "virtio.h" #include "virtqueue.h" #define BYTE_SIZE 8 @@ -28,6 +28,8 @@ /* flag bits offset in packed ring desc from ID */ #define FLAGS_BITS_OFFSET ((offsetof(struct vring_packed_desc, flags) - \ offsetof(struct vring_packed_desc, id)) * BYTE_SIZE) +#define FLAGS_LEN_BITS_OFFSET ((offsetof(struct vring_packed_desc, flags) - \ + offsetof(struct vring_packed_desc, len)) * BYTE_SIZE) #endif #define PACKED_FLAGS_MASK ((0ULL | VRING_PACKED_DESC_F_AVAIL_USED) << \ @@ -36,13 +38,15 @@ /* reference count offset in mbuf rearm data */ #define REFCNT_BITS_OFFSET ((offsetof(struct rte_mbuf, refcnt) - \ offsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE) + +#ifdef CC_AVX512_SUPPORT /* segment number offset in mbuf rearm data */ #define SEG_NUM_BITS_OFFSET ((offsetof(struct rte_mbuf, nb_segs) - \ offsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE) - /* default rearm data */ #define DEFAULT_REARM_DATA (1ULL << SEG_NUM_BITS_OFFSET | \ 1ULL << REFCNT_BITS_OFFSET) +#endif /* id bits offset in packed ring desc higher 64bits */ #define ID_BITS_OFFSET ((offsetof(struct vring_packed_desc, id) - \ @@ -100,19 +104,19 @@ static inline int virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq, struct rte_mbuf *txm) { - struct virtqueue *vq = txvq->vq; + struct virtqueue *vq = virtnet_txq_to_vq(txvq); struct virtio_hw *hw = vq->hw; uint16_t hdr_size = hw->vtnet_hdr_size; uint16_t slots, can_push = 0, use_indirect = 0; int16_t need; /* optimize ring usage */ - if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && + if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || + virtio_with_feature(hw, VIRTIO_F_VERSION_1)) && rte_mbuf_refcnt_read(txm) == 1 && RTE_MBUF_DIRECT(txm) && txm->nb_segs == 1 && rte_pktmbuf_headroom(txm) >= hdr_size) can_push = 1; - else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) && + else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) && txm->nb_segs < VIRTIO_MAX_TX_INDIRECT) use_indirect = 1; @@ -208,7 +212,7 @@ virtqueue_dequeue_single_packed_vec(struct virtnet_rx *rxvq, { uint16_t used_idx, id; uint32_t len; - struct virtqueue *vq = rxvq->vq; + struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); struct virtio_hw *hw = vq->hw; uint32_t hdr_size = hw->vtnet_hdr_size; struct virtio_net_hdr *hdr; @@ -260,7 +264,7 @@ virtio_recv_refill_packed_vec(struct virtnet_rx *rxvq, struct rte_mbuf **cookie, uint16_t num) { - struct virtqueue *vq = rxvq->vq; + struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc; uint16_t flags = vq->vq_packed.cached_flags; struct virtio_hw *hw = vq->hw; @@ -284,7 +288,7 @@ virtio_recv_refill_packed_vec(struct virtnet_rx *rxvq, dxp = &vq->vq_descx[idx + i]; dxp->cookie = (void *)cookie[total_num + i]; - addr = VIRTIO_MBUF_ADDR(cookie[total_num + i], vq) + + addr = cookie[total_num + i]->buf_iova + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; start_dp[idx + i].addr = addr; start_dp[idx + i].len = cookie[total_num + i]->buf_len