virtio: deinline some code
authorStephen Hemminger <stephen@networkplumber.org>
Sat, 14 Jun 2014 01:06:20 +0000 (18:06 -0700)
committerThomas Monjalon <thomas.monjalon@6wind.com>
Tue, 22 Jul 2014 13:00:01 +0000 (15:00 +0200)
This driver has lots of functions marked always inline which is actually
counterproductive with modern compilers. Better to move the functions to
the one file they are used (proper scope) and let compiler decide.

For trivial functions leave them as static inline.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Acked-by: Alan Carew <alan.carew@intel.com>
lib/librte_pmd_virtio/virtio_rxtx.c
lib/librte_pmd_virtio/virtqueue.h

index 8bb6651..4c45cdb 100644 (file)
 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
 #endif
 
+static void
+vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
+{
+       struct vring_desc *dp, *dp_tail;
+       struct vq_desc_extra *dxp;
+       uint16_t desc_idx_last = desc_idx;
+
+       dp  = &vq->vq_ring.desc[desc_idx];
+       dxp = &vq->vq_descx[desc_idx];
+       vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
+       if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
+               while (dp->flags & VRING_DESC_F_NEXT) {
+                       desc_idx_last = dp->next;
+                       dp = &vq->vq_ring.desc[dp->next];
+               }
+       }
+       dxp->ndescs = 0;
+
+       /*
+        * We must append the existing free chain, if any, to the end of
+        * newly freed chain. If the virtqueue was completely used, then
+        * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
+        */
+       if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
+               vq->vq_desc_head_idx = desc_idx;
+       } else {
+               dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
+               dp_tail->next = desc_idx;
+       }
+
+       vq->vq_desc_tail_idx = desc_idx_last;
+       dp->next = VQ_RING_DESC_CHAIN_END;
+}
+
+static uint16_t
+virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
+                          uint32_t *len, uint16_t num)
+{
+       struct vring_used_elem *uep;
+       struct rte_mbuf *cookie;
+       uint16_t used_idx, desc_idx;
+       uint16_t i;
+
+       /*  Caller does the check */
+       for (i = 0; i < num ; i++) {
+               used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+               uep = &vq->vq_ring.used->ring[used_idx];
+               desc_idx = (uint16_t) uep->id;
+               len[i] = uep->len;
+               cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
+
+               if (unlikely(cookie == NULL)) {
+                       PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
+                               vq->vq_used_cons_idx);
+                       break;
+               }
+
+               rte_prefetch0(cookie);
+               rte_packet_prefetch(cookie->pkt.data);
+               rx_pkts[i]  = cookie;
+               vq->vq_used_cons_idx++;
+               vq_ring_free_chain(vq, desc_idx);
+               vq->vq_descx[desc_idx].cookie = NULL;
+       }
+
+       return i;
+}
+
+static void
+virtqueue_dequeue_pkt_tx(struct virtqueue *vq)
+{
+       struct vring_used_elem *uep;
+       uint16_t used_idx, desc_idx;
+
+       used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+       uep = &vq->vq_ring.used->ring[used_idx];
+       desc_idx = (uint16_t) uep->id;
+       vq->vq_used_cons_idx++;
+       vq_ring_free_chain(vq, desc_idx);
+}
+
+
+static inline int
+virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
+{
+       struct vq_desc_extra *dxp;
+       struct vring_desc *start_dp;
+       uint16_t needed = 1;
+       uint16_t head_idx, idx;
+
+       if (unlikely(vq->vq_free_cnt == 0))
+               return -ENOSPC;
+       if (unlikely(vq->vq_free_cnt < needed))
+               return -EMSGSIZE;
+
+       head_idx = vq->vq_desc_head_idx;
+       if (unlikely(head_idx >= vq->vq_nentries))
+               return -EFAULT;
+
+       idx = head_idx;
+       dxp = &vq->vq_descx[idx];
+       dxp->cookie = (void *)cookie;
+       dxp->ndescs = needed;
+
+       start_dp = vq->vq_ring.desc;
+       start_dp[idx].addr  =
+               (uint64_t) (cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
+       start_dp[idx].len   = cookie->buf_len - RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);
+       start_dp[idx].flags =  VRING_DESC_F_WRITE;
+       idx = start_dp[idx].next;
+       vq->vq_desc_head_idx = idx;
+       if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+               vq->vq_desc_tail_idx = idx;
+       vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+       vq_update_avail_ring(vq, head_idx);
+
+       return 0;
+}
+
+static int
+virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
+{
+       struct vq_desc_extra *dxp;
+       struct vring_desc *start_dp;
+       uint16_t needed = 2;
+       uint16_t head_idx, idx;
+
+       if (unlikely(txvq->vq_free_cnt == 0))
+               return -ENOSPC;
+       if (unlikely(txvq->vq_free_cnt < needed))
+               return -EMSGSIZE;
+       head_idx = txvq->vq_desc_head_idx;
+       if (unlikely(head_idx >= txvq->vq_nentries))
+               return -EFAULT;
+
+       idx = head_idx;
+       dxp = &txvq->vq_descx[idx];
+       if (dxp->cookie != NULL)
+               rte_pktmbuf_free_seg(dxp->cookie);
+       dxp->cookie = (void *)cookie;
+       dxp->ndescs = needed;
+
+       start_dp = txvq->vq_ring.desc;
+       start_dp[idx].addr  =
+               txvq->virtio_net_hdr_mem + idx * sizeof(struct virtio_net_hdr);
+       start_dp[idx].len   = sizeof(struct virtio_net_hdr);
+       start_dp[idx].flags = VRING_DESC_F_NEXT;
+       idx = start_dp[idx].next;
+       start_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);
+       start_dp[idx].len   = cookie->pkt.data_len;
+       start_dp[idx].flags = 0;
+       idx = start_dp[idx].next;
+       txvq->vq_desc_head_idx = idx;
+       if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+               txvq->vq_desc_tail_idx = idx;
+       txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
+       vq_update_avail_ring(txvq, head_idx);
+
+       return 0;
+}
+
 static inline struct rte_mbuf *
 rte_rxmbuf_alloc(struct rte_mempool *mp)
 {
index 29dd233..18e9cbb 100644 (file)
@@ -224,14 +224,14 @@ virtqueue_full(const struct virtqueue *vq)
 
 #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
 
-static inline void __attribute__((always_inline))
+static inline void
 vq_update_avail_idx(struct virtqueue *vq)
 {
        rte_compiler_barrier();
        vq->vq_ring.avail->idx = vq->vq_avail_idx;
 }
 
-static inline void __attribute__((always_inline))
+static inline void
 vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
 {
        uint16_t avail_idx;
@@ -247,13 +247,13 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
        vq->vq_avail_idx++;
 }
 
-static inline int __attribute__((always_inline))
+static inline int
 virtqueue_kick_prepare(struct virtqueue *vq)
 {
        return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
 }
 
-static inline void __attribute__((always_inline))
+static inline void
 virtqueue_notify(struct virtqueue *vq)
 {
        /*
@@ -264,167 +264,6 @@ virtqueue_notify(struct virtqueue *vq)
        VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_NOTIFY, vq->vq_queue_index);
 }
 
-static inline void __attribute__((always_inline))
-vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
-{
-       struct vring_desc *dp, *dp_tail;
-       struct vq_desc_extra *dxp;
-       uint16_t desc_idx_last = desc_idx;
-
-       dp  = &vq->vq_ring.desc[desc_idx];
-       dxp = &vq->vq_descx[desc_idx];
-       vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
-       if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
-               while (dp->flags & VRING_DESC_F_NEXT) {
-                       desc_idx_last = dp->next;
-                       dp = &vq->vq_ring.desc[dp->next];
-               }
-       }
-       dxp->ndescs = 0;
-
-       /*
-        * We must append the existing free chain, if any, to the end of
-        * newly freed chain. If the virtqueue was completely used, then
-        * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
-        */
-       if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
-               vq->vq_desc_head_idx = desc_idx;
-       } else {
-               dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
-               dp_tail->next = desc_idx;
-       }
-
-       vq->vq_desc_tail_idx = desc_idx_last;
-       dp->next = VQ_RING_DESC_CHAIN_END;
-}
-
-static inline int __attribute__((always_inline))
-virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
-{
-       struct vq_desc_extra *dxp;
-       struct vring_desc *start_dp;
-       uint16_t needed = 1;
-       uint16_t head_idx, idx;
-
-       if (unlikely(vq->vq_free_cnt == 0))
-               return -ENOSPC;
-       if (unlikely(vq->vq_free_cnt < needed))
-               return -EMSGSIZE;
-
-       head_idx = vq->vq_desc_head_idx;
-       if (unlikely(head_idx >= vq->vq_nentries))
-               return -EFAULT;
-
-       idx = head_idx;
-       dxp = &vq->vq_descx[idx];
-       dxp->cookie = (void *)cookie;
-       dxp->ndescs = needed;
-
-       start_dp = vq->vq_ring.desc;
-       start_dp[idx].addr  =
-               (uint64_t) (cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
-       start_dp[idx].len   = cookie->buf_len - RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);
-       start_dp[idx].flags =  VRING_DESC_F_WRITE;
-       idx = start_dp[idx].next;
-       vq->vq_desc_head_idx = idx;
-       if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
-               vq->vq_desc_tail_idx = idx;
-       vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
-       vq_update_avail_ring(vq, head_idx);
-
-       return 0;
-}
-
-static inline int __attribute__((always_inline))
-virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
-{
-       struct vq_desc_extra *dxp;
-       struct vring_desc *start_dp;
-       uint16_t needed = 2;
-       uint16_t head_idx, idx;
-
-       if (unlikely(txvq->vq_free_cnt == 0))
-               return -ENOSPC;
-       if (unlikely(txvq->vq_free_cnt < needed))
-               return -EMSGSIZE;
-       head_idx = txvq->vq_desc_head_idx;
-       if (unlikely(head_idx >= txvq->vq_nentries))
-               return -EFAULT;
-
-       idx = head_idx;
-       dxp = &txvq->vq_descx[idx];
-       if (dxp->cookie != NULL)
-               rte_pktmbuf_free_seg(dxp->cookie);
-       dxp->cookie = (void *)cookie;
-       dxp->ndescs = needed;
-
-       start_dp = txvq->vq_ring.desc;
-       start_dp[idx].addr  =
-               txvq->virtio_net_hdr_mem + idx * sizeof(struct virtio_net_hdr);
-       start_dp[idx].len   = sizeof(struct virtio_net_hdr);
-       start_dp[idx].flags = VRING_DESC_F_NEXT;
-       idx = start_dp[idx].next;
-       start_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);
-       start_dp[idx].len   = cookie->pkt.data_len;
-       start_dp[idx].flags = 0;
-       idx = start_dp[idx].next;
-       txvq->vq_desc_head_idx = idx;
-       if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
-               txvq->vq_desc_tail_idx = idx;
-       txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
-       vq_update_avail_ring(txvq, head_idx);
-
-       return 0;
-}
-
-static inline uint16_t __attribute__((always_inline))
-virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_t *len, uint16_t num)
-{
-       struct vring_used_elem *uep;
-       struct rte_mbuf *cookie;
-       uint16_t used_idx, desc_idx;
-       uint16_t i;
-
-       /*  Caller does the check */
-       for (i = 0; i < num; i++) {
-               used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
-               uep = &vq->vq_ring.used->ring[used_idx];
-               desc_idx = (uint16_t) uep->id;
-               len[i] = uep->len;
-               cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
-
-               if (unlikely(cookie == NULL)) {
-                       PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
-                               vq->vq_used_cons_idx);
-                       break;
-               }
-
-               rte_prefetch0(cookie);
-               rte_packet_prefetch(cookie->pkt.data);
-               rx_pkts[i]  = cookie;
-               vq->vq_used_cons_idx++;
-               vq_ring_free_chain(vq, desc_idx);
-               vq->vq_descx[desc_idx].cookie = NULL;
-       }
-
-       return i;
-}
-
-static inline uint16_t __attribute__((always_inline))
-virtqueue_dequeue_pkt_tx(struct virtqueue *vq)
-{
-       struct vring_used_elem *uep;
-       uint16_t used_idx, desc_idx;
-
-       used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
-       uep = &vq->vq_ring.used->ring[used_idx];
-       desc_idx = (uint16_t) uep->id;
-       vq->vq_used_cons_idx++;
-       vq_ring_free_chain(vq, desc_idx);
-
-       return 0;
-}
-
 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
 #define VIRTQUEUE_DUMP(vq) do { \
        uint16_t used_idx, nused; \