X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_virtio%2Fvirtqueue.h;h=48b5092c94c8c288acd1117f67ca76a4b2c57e73;hb=3031749c2df04a63cdcef186dcce3781e61436e8;hp=9e50a868460930b1f539f09a71d5663b2cd96380;hpb=c1f86306a0261f50c503a73fea40293e2b2b724c;p=dpdk.git diff --git a/lib/librte_pmd_virtio/virtqueue.h b/lib/librte_pmd_virtio/virtqueue.h index 9e50a86846..48b5092c94 100644 --- a/lib/librte_pmd_virtio/virtqueue.h +++ b/lib/librte_pmd_virtio/virtqueue.h @@ -1,13 +1,13 @@ /*- * BSD LICENSE - * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright @@ -17,7 +17,7 @@ * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -50,6 +50,12 @@ #define wmb() rte_wmb() #define rmb() rte_rmb() +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while(0) +#endif + #define VIRTQUEUE_MAX_NAME_SZ 32 #define RTE_MBUF_DATA_DMA_ADDR(mb) \ @@ -82,7 +88,7 @@ enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 }; #define VIRTIO_NET_CTRL_RX_ALLUNI 2 #define VIRTIO_NET_CTRL_RX_NOMULTI 3 #define VIRTIO_NET_CTRL_RX_NOUNI 4 -#define VIRTIO_NET_CTRL_RX_NOBCAST 5 +#define VIRTIO_NET_CTRL_RX_NOBCAST 5 /** * Control VLAN filtering @@ -97,6 +103,24 @@ enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 }; #define VIRTIO_NET_CTRL_VLAN_ADD 0 #define VIRTIO_NET_CTRL_VLAN_DEL 1 +struct virtio_net_ctrl_hdr { + uint8_t class; + uint8_t cmd; +} __attribute__((packed)); + +typedef uint8_t virtio_net_ctrl_ack; + +#define VIRTIO_NET_OK 0 +#define VIRTIO_NET_ERR 1 + +#define VIRTIO_MAX_CTRL_DATA 128 + +struct virtio_pmd_ctrl { + struct virtio_net_ctrl_hdr hdr; + virtio_net_ctrl_ack status; + uint8_t data[VIRTIO_MAX_CTRL_DATA]; +}; + struct virtqueue { char vq_name[VIRTQUEUE_MAX_NAME_SZ]; struct virtio_hw *hw; /**< virtio_hw structure pointer. */ @@ -121,11 +145,13 @@ struct virtqueue { * VQ_RING_DESC_CHAIN_END. */ uint16_t vq_desc_head_idx; + uint16_t vq_desc_tail_idx; /** * Last consumed descriptor in the used table, * trails vq_ring.used->idx. */ uint16_t vq_used_cons_idx; + uint16_t vq_avail_idx; void *virtio_net_hdr_mem; /**< hdr for each xmit packet */ struct vq_desc_extra { @@ -134,6 +160,16 @@ struct virtqueue { } vq_descx[0]; }; +/* If multiqueue is provided by host, then we suppport it. */ +#ifndef VIRTIO_NET_F_MQ +/* Device supports Receive Flow Steering */ +#define VIRTIO_NET_F_MQ 0x400000 +#define VIRTIO_NET_CTRL_MQ 4 +#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0 +#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 +#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 +#endif + /** * This is the first element of the scatter-gather list. If you don't * specify GSO or CSUM features, you can simply ignore the header. @@ -183,8 +219,15 @@ virtqueue_full(const struct virtqueue *vq) #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx)) -static inline void -vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx) +static inline void __attribute__((always_inline)) +vq_update_avail_idx(struct virtqueue *vq) +{ + rte_compiler_barrier(); + vq->vq_ring.avail->idx = vq->vq_avail_idx; +} + +static inline void __attribute__((always_inline)) +vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx) { uint16_t avail_idx; /* @@ -194,21 +237,21 @@ vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx) * currently running on another CPU, we can keep it processing the new * descriptor. */ - avail_idx = (uint16_t)(vq->vq_ring.avail->idx & (vq->vq_nentries - 1)); + avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1)); vq->vq_ring.avail->ring[avail_idx] = desc_idx; - mb(); - vq->vq_ring.avail->idx++; + vq->vq_avail_idx++; } -static inline int virtqueue_kick_prepare(struct virtqueue * vq) +static inline int __attribute__((always_inline)) +virtqueue_kick_prepare(struct virtqueue * vq) { return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY); } -static inline void +static inline void __attribute__((always_inline)) virtqueue_notify(struct virtqueue *vq) { - /* + /* * Ensure updated avail->idx is visible to host. mb() necessary? * For virtio on IA, the notificaiton is through io port operation * which is a serialization instruction itself. @@ -216,32 +259,41 @@ virtqueue_notify(struct virtqueue *vq) VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_NOTIFY, vq->vq_queue_index); } -static inline void +static inline void __attribute__((always_inline)) vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) { - struct vring_desc *dp; + struct vring_desc *dp, *dp_tail; struct vq_desc_extra *dxp; + uint16_t desc_idx_last = desc_idx; dp = &vq->vq_ring.desc[desc_idx]; dxp = &vq->vq_descx[desc_idx]; vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs); if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) { while (dp->flags & VRING_DESC_F_NEXT) { + desc_idx_last = dp->next; dp = &vq->vq_ring.desc[dp->next]; } } dxp->ndescs = 0; - + /* * We must append the existing free chain, if any, to the end of * newly freed chain. If the virtqueue was completely used, then * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above). */ - dp->next = vq->vq_desc_head_idx; - vq->vq_desc_head_idx = desc_idx; + if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) { + vq->vq_desc_head_idx = desc_idx; + } else { + dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx]; + dp_tail->next = desc_idx; + } + + vq->vq_desc_tail_idx = desc_idx_last; + dp->next = VQ_RING_DESC_CHAIN_END; } -static inline int +static inline int __attribute__((always_inline)) virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie) { struct vq_desc_extra *dxp; @@ -271,13 +323,15 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie) start_dp[idx].flags = VRING_DESC_F_WRITE; idx = start_dp[idx].next; vq->vq_desc_head_idx = idx; + if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) + vq->vq_desc_tail_idx = idx; vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); - vq_ring_update_avail(vq, head_idx); + vq_update_avail_ring(vq, head_idx); return (0); } -static inline int +static inline int __attribute__((always_inline)) virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie) { struct vq_desc_extra *dxp; @@ -290,11 +344,13 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie) if (unlikely(txvq->vq_free_cnt < needed)) return (-EMSGSIZE); head_idx = txvq->vq_desc_head_idx; - if (unlikely(head_idx >= txvq->vq_nentries)) + if (unlikely(head_idx >= txvq->vq_nentries)) return (-EFAULT); idx = head_idx; dxp = &txvq->vq_descx[idx]; + if (dxp->cookie != NULL) + rte_pktmbuf_free_seg(dxp->cookie); dxp->cookie = (void *)cookie; dxp->ndescs = needed; @@ -308,39 +364,62 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie) start_dp[idx].flags = 0; idx = start_dp[idx].next; txvq->vq_desc_head_idx = idx; + if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) + txvq->vq_desc_tail_idx = idx; txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed); - vq_ring_update_avail(txvq, head_idx); + vq_update_avail_ring(txvq, head_idx); return (0); } -static inline uint16_t -virtqueue_dequeue_burst(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_t *len, uint16_t num) +static inline uint16_t __attribute__((always_inline)) +virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_t *len, uint16_t num) { struct vring_used_elem *uep; struct rte_mbuf *cookie; uint16_t used_idx, desc_idx; uint16_t i; + /* Caller does the check */ for (i = 0; i < num ; i ++) { used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); uep = &vq->vq_ring.used->ring[used_idx]; desc_idx = (uint16_t) uep->id; + len[i] = uep->len; cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie; + if (unlikely(cookie == NULL)) { - PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n", + PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n", vq->vq_used_cons_idx); break; } - len[i] = uep->len; + + rte_prefetch0(cookie); + rte_packet_prefetch(cookie->pkt.data); rx_pkts[i] = cookie; vq->vq_used_cons_idx++; vq_ring_free_chain(vq, desc_idx); vq->vq_descx[desc_idx].cookie = NULL; } + return (i); } +static inline uint16_t __attribute__((always_inline)) +virtqueue_dequeue_pkt_tx(struct virtqueue *vq) +{ + struct vring_used_elem *uep; + uint16_t used_idx, desc_idx; + + used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); + uep = &vq->vq_ring.used->ring[used_idx]; + desc_idx = (uint16_t) uep->id; + vq->vq_used_cons_idx++; + vq_ring_free_chain(vq, desc_idx); + + return 0; +} + #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP #define VIRTQUEUE_DUMP(vq) do { \ uint16_t used_idx, nused; \