X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvhost_rxtx.c;h=ccfd82f4fb384bd6f000537428a77b6777d63426;hb=8828a3210c5978bbe3b9b426bc31be8f420bd15a;hp=5f48566cea970136f4b68301da1a2a5f8f7d4596;hpb=7f456f6d619af682655b519f7cc9b0c2873c2c03;p=dpdk.git diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c index 5f48566cea..ccfd82f4fb 100644 --- a/lib/librte_vhost/vhost_rxtx.c +++ b/lib/librte_vhost/vhost_rxtx.c @@ -31,45 +31,32 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "main.h" -#include "virtio-net.h" +#include + +#include +#include +#include + #include "vhost-net-cdev.h" -#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */ +#define MAX_PKT_BURST 32 -/* +/** * This function adds buffers to the virtio devices RX virtqueue. Buffers can * be received from the physical port or from another virtio device. A packet - * count is returned to indicate the number of packets that were succesfully + * count is returned to indicate the number of packets that are succesfully * added to the RX queue. This function works when mergeable is disabled. */ static inline uint32_t __attribute__((always_inline)) -virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count) +virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, + struct rte_mbuf **pkts, uint32_t count) { struct vhost_virtqueue *vq; struct vring_desc *desc; struct rte_mbuf *buff; /* The virtio_hdr is initialised to 0. */ - struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0,0,0,0,0,0},0}; + struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0}; uint64_t buff_addr = 0; uint64_t buff_hdr_addr = 0; uint32_t head[MAX_PKT_BURST], packet_len = 0; @@ -80,10 +67,18 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count) uint8_t success = 0; LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh); + if (unlikely(queue_id != VIRTIO_RXQ)) { + LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n"); + return 0; + } + vq = dev->virtqueue[VIRTIO_RXQ]; count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count; - /* As many data cores may want access to available buffers, they need to be reserved. */ + /* + * As many data cores may want access to available buffers, + * they need to be reserved. + */ do { res_base_idx = vq->last_used_idx_res; avail_idx = *((volatile uint16_t *)&vq->avail->idx); @@ -98,18 +93,21 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count) res_end_idx = res_base_idx + count; /* vq->last_used_idx_res is atomically updated. */ - success = rte_atomic16_cmpset(&vq->last_used_idx_res, res_base_idx, - res_end_idx); + /* TODO: Allow to disable cmpset if no concurrency in application. */ + success = rte_atomic16_cmpset(&vq->last_used_idx_res, + res_base_idx, res_end_idx); } while (unlikely(success == 0)); res_cur_idx = res_base_idx; - LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx); + LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", + dev->device_fh, res_cur_idx, res_end_idx); /* Prefetch available ring to retrieve indexes. */ rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]); /* Retrieve all of the head indexes first to avoid caching issues. */ for (head_idx = 0; head_idx < count; head_idx++) - head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) & (vq->size - 1)]; + head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) & + (vq->size - 1)]; /*Prefetch descriptor index. */ rte_prefetch0(&vq->desc[head[packet_success]]); @@ -123,7 +121,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count) /* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */ buff_addr = gpa_to_vva(dev, desc->addr); /* Prefetch buffer address. */ - rte_prefetch0((void*)(uintptr_t)buff_addr); + rte_prefetch0((void *)(uintptr_t)buff_addr); /* Copy virtio_hdr to packet and increment buffer address */ buff_hdr_addr = buff_addr; @@ -145,10 +143,12 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count) } /* Update used ring with desc information */ - vq->used->ring[res_cur_idx & (vq->size - 1)].id = head[packet_success]; + vq->used->ring[res_cur_idx & (vq->size - 1)].id = + head[packet_success]; vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len; /* Copy mbuf data to buffer */ + /* FIXME for sg mbuf and the case that desc couldn't hold the mbuf data */ rte_memcpy((void *)(uintptr_t)buff_addr, rte_pktmbuf_mtod(buff, const void *), rte_pktmbuf_data_len(buff)); @@ -185,9 +185,8 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count) } static inline uint32_t __attribute__((always_inline)) -copy_from_mbuf_to_vring(struct virtio_net *dev, - uint16_t res_base_idx, uint16_t res_end_idx, - struct rte_mbuf *pkt) +copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx, + uint16_t res_end_idx, struct rte_mbuf *pkt) { uint32_t vec_idx = 0; uint32_t entry_success = 0; @@ -391,14 +390,11 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, } /* - * This function adds buffers to the virtio devices RX virtqueue. Buffers can - * be received from the physical port or from another virtio device. A packet - * count is returned to indicate the number of packets that were succesfully - * added to the RX queue. This function works for mergeable RX. + * This function works for mergeable RX. */ static inline uint32_t __attribute__((always_inline)) -virtio_dev_merge_rx(struct virtio_net *dev, struct rte_mbuf **pkts, - uint32_t count) +virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, + struct rte_mbuf **pkts, uint32_t count) { struct vhost_virtqueue *vq; uint32_t pkt_idx = 0, entry_success = 0; @@ -408,6 +404,10 @@ virtio_dev_merge_rx(struct virtio_net *dev, struct rte_mbuf **pkts, LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n", dev->device_fh); + if (unlikely(queue_id != VIRTIO_RXQ)) { + LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n"); + } + vq = dev->virtqueue[VIRTIO_RXQ]; count = RTE_MIN((uint32_t)MAX_PKT_BURST, count); @@ -513,9 +513,19 @@ virtio_dev_merge_rx(struct virtio_net *dev, struct rte_mbuf **pkts, return count; } -/* This function works for TX packets with mergeable feature enabled. */ -static inline void __attribute__((always_inline)) -virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool) +uint16_t +rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id, + struct rte_mbuf **pkts, uint16_t count) +{ + if (unlikely(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))) + return virtio_dev_merge_rx(dev, queue_id, pkts, count); + else + return virtio_dev_rx(dev, queue_id, pkts, count); +} + +uint16_t +rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) { struct rte_mbuf *m, *prev; struct vhost_virtqueue *vq; @@ -526,17 +536,20 @@ virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool) uint32_t i; uint16_t free_entries, entry_success = 0; uint16_t avail_idx; - uint32_t buf_size = MBUF_SIZE - (sizeof(struct rte_mbuf) - + RTE_PKTMBUF_HEADROOM); + + if (unlikely(queue_id != VIRTIO_TXQ)) { + LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n"); + return 0; + } vq = dev->virtqueue[VIRTIO_TXQ]; avail_idx = *((volatile uint16_t *)&vq->avail->idx); /* If there are no available buffers then return. */ if (vq->last_used_idx == avail_idx) - return; + return 0; - LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_tx()\n", + LOG_DEBUG(VHOST_DATA, "%s (%"PRIu64")\n", __func__, dev->device_fh); /* Prefetch available ring to retrieve head indexes. */ @@ -545,11 +558,12 @@ virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool) /*get the number of free entries in the ring*/ free_entries = (avail_idx - vq->last_used_idx); + free_entries = RTE_MIN(free_entries, count); /* Limit to MAX_PKT_BURST. */ free_entries = RTE_MIN(free_entries, MAX_PKT_BURST); LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n", - dev->device_fh, free_entries); + dev->device_fh, free_entries); /* Retrieve all of the head indexes first to avoid caching issues. */ for (i = 0; i < free_entries; i++) head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)]; @@ -590,19 +604,18 @@ virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool) vb_offset = 0; vb_avail = desc->len; - seg_offset = 0; - seg_avail = buf_size; - cpy_len = RTE_MIN(vb_avail, seg_avail); - - PRINT_PACKET(dev, (uintptr_t)vb_addr, desc->len, 0); - /* Allocate an mbuf and populate the structure. */ m = rte_pktmbuf_alloc(mbuf_pool); if (unlikely(m == NULL)) { RTE_LOG(ERR, VHOST_DATA, "Failed to allocate memory for mbuf.\n"); - return; + return entry_success; } + seg_offset = 0; + seg_avail = m->buf_len - RTE_PKTMBUF_HEADROOM; + cpy_len = RTE_MIN(vb_avail, seg_avail); + + PRINT_PACKET(dev, (uintptr_t)vb_addr, desc->len, 0); seg_num++; cur = m; @@ -639,7 +652,7 @@ virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool) prev->next = cur; prev = cur; seg_offset = 0; - seg_avail = buf_size; + seg_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM; } else { if (desc->flags & VRING_DESC_F_NEXT) { /* @@ -673,7 +686,7 @@ virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool) prev->next = cur; prev = cur; seg_offset = 0; - seg_avail = buf_size; + seg_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM; } desc = &vq->desc[desc->next]; @@ -703,9 +716,9 @@ virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool) m->nb_segs = seg_num; + pkts[entry_success] = m; vq->last_used_idx++; entry_success++; - rte_pktmbuf_free(m); } rte_compiler_barrier(); @@ -713,5 +726,5 @@ virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool) /* Kick guest if required. */ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) eventfd_write((int)vq->kickfd, 1); - + return entry_success; }