virtio: deinline some code
[dpdk.git] / lib / librte_pmd_virtio / virtio_rxtx.c
index 8bb6651..4c45cdb 100644 (file)
 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
 #endif
 
+static void
+vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
+{
+       struct vring_desc *dp, *dp_tail;
+       struct vq_desc_extra *dxp;
+       uint16_t desc_idx_last = desc_idx;
+
+       dp  = &vq->vq_ring.desc[desc_idx];
+       dxp = &vq->vq_descx[desc_idx];
+       vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
+       if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
+               while (dp->flags & VRING_DESC_F_NEXT) {
+                       desc_idx_last = dp->next;
+                       dp = &vq->vq_ring.desc[dp->next];
+               }
+       }
+       dxp->ndescs = 0;
+
+       /*
+        * We must append the existing free chain, if any, to the end of
+        * newly freed chain. If the virtqueue was completely used, then
+        * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
+        */
+       if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
+               vq->vq_desc_head_idx = desc_idx;
+       } else {
+               dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
+               dp_tail->next = desc_idx;
+       }
+
+       vq->vq_desc_tail_idx = desc_idx_last;
+       dp->next = VQ_RING_DESC_CHAIN_END;
+}
+
+static uint16_t
+virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
+                          uint32_t *len, uint16_t num)
+{
+       struct vring_used_elem *uep;
+       struct rte_mbuf *cookie;
+       uint16_t used_idx, desc_idx;
+       uint16_t i;
+
+       /*  Caller does the check */
+       for (i = 0; i < num ; i++) {
+               used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+               uep = &vq->vq_ring.used->ring[used_idx];
+               desc_idx = (uint16_t) uep->id;
+               len[i] = uep->len;
+               cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
+
+               if (unlikely(cookie == NULL)) {
+                       PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
+                               vq->vq_used_cons_idx);
+                       break;
+               }
+
+               rte_prefetch0(cookie);
+               rte_packet_prefetch(cookie->pkt.data);
+               rx_pkts[i]  = cookie;
+               vq->vq_used_cons_idx++;
+               vq_ring_free_chain(vq, desc_idx);
+               vq->vq_descx[desc_idx].cookie = NULL;
+       }
+
+       return i;
+}
+
+static void
+virtqueue_dequeue_pkt_tx(struct virtqueue *vq)
+{
+       struct vring_used_elem *uep;
+       uint16_t used_idx, desc_idx;
+
+       used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+       uep = &vq->vq_ring.used->ring[used_idx];
+       desc_idx = (uint16_t) uep->id;
+       vq->vq_used_cons_idx++;
+       vq_ring_free_chain(vq, desc_idx);
+}
+
+
+static inline int
+virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
+{
+       struct vq_desc_extra *dxp;
+       struct vring_desc *start_dp;
+       uint16_t needed = 1;
+       uint16_t head_idx, idx;
+
+       if (unlikely(vq->vq_free_cnt == 0))
+               return -ENOSPC;
+       if (unlikely(vq->vq_free_cnt < needed))
+               return -EMSGSIZE;
+
+       head_idx = vq->vq_desc_head_idx;
+       if (unlikely(head_idx >= vq->vq_nentries))
+               return -EFAULT;
+
+       idx = head_idx;
+       dxp = &vq->vq_descx[idx];
+       dxp->cookie = (void *)cookie;
+       dxp->ndescs = needed;
+
+       start_dp = vq->vq_ring.desc;
+       start_dp[idx].addr  =
+               (uint64_t) (cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
+       start_dp[idx].len   = cookie->buf_len - RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);
+       start_dp[idx].flags =  VRING_DESC_F_WRITE;
+       idx = start_dp[idx].next;
+       vq->vq_desc_head_idx = idx;
+       if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+               vq->vq_desc_tail_idx = idx;
+       vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+       vq_update_avail_ring(vq, head_idx);
+
+       return 0;
+}
+
+static int
+virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
+{
+       struct vq_desc_extra *dxp;
+       struct vring_desc *start_dp;
+       uint16_t needed = 2;
+       uint16_t head_idx, idx;
+
+       if (unlikely(txvq->vq_free_cnt == 0))
+               return -ENOSPC;
+       if (unlikely(txvq->vq_free_cnt < needed))
+               return -EMSGSIZE;
+       head_idx = txvq->vq_desc_head_idx;
+       if (unlikely(head_idx >= txvq->vq_nentries))
+               return -EFAULT;
+
+       idx = head_idx;
+       dxp = &txvq->vq_descx[idx];
+       if (dxp->cookie != NULL)
+               rte_pktmbuf_free_seg(dxp->cookie);
+       dxp->cookie = (void *)cookie;
+       dxp->ndescs = needed;
+
+       start_dp = txvq->vq_ring.desc;
+       start_dp[idx].addr  =
+               txvq->virtio_net_hdr_mem + idx * sizeof(struct virtio_net_hdr);
+       start_dp[idx].len   = sizeof(struct virtio_net_hdr);
+       start_dp[idx].flags = VRING_DESC_F_NEXT;
+       idx = start_dp[idx].next;
+       start_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);
+       start_dp[idx].len   = cookie->pkt.data_len;
+       start_dp[idx].flags = 0;
+       idx = start_dp[idx].next;
+       txvq->vq_desc_head_idx = idx;
+       if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+               txvq->vq_desc_tail_idx = idx;
+       txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
+       vq_update_avail_ring(txvq, head_idx);
+
+       return 0;
+}
+
 static inline struct rte_mbuf *
 rte_rxmbuf_alloc(struct rte_mempool *mp)
 {