* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <arpa/inet.h>
-#include <getopt.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-#include <linux/virtio_net.h>
-#include <linux/virtio_ring.h>
-#include <signal.h>
#include <stdint.h>
-#include <sys/eventfd.h>
-#include <sys/param.h>
-#include <unistd.h>
+#include <linux/virtio_net.h>
-#include <rte_atomic.h>
-#include <rte_cycles.h>
-#include <rte_ethdev.h>
-#include <rte_log.h>
-#include <rte_string_fns.h>
-#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_virtio_net.h>
-#include "main.h"
-#include "virtio-net.h"
-#include "vhost-net-cdev.h"
+#include "vhost-net.h"
-#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
+#define MAX_PKT_BURST 32
-/*
+/**
* This function adds buffers to the virtio devices RX virtqueue. Buffers can
* be received from the physical port or from another virtio device. A packet
- * count is returned to indicate the number of packets that were succesfully
+ * count is returned to indicate the number of packets that are succesfully
* added to the RX queue. This function works when mergeable is disabled.
*/
static inline uint32_t __attribute__((always_inline))
-virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
+virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
{
struct vhost_virtqueue *vq;
struct vring_desc *desc;
struct rte_mbuf *buff;
/* The virtio_hdr is initialised to 0. */
- struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0,0,0,0,0,0},0};
+ struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
uint64_t buff_addr = 0;
uint64_t buff_hdr_addr = 0;
uint32_t head[MAX_PKT_BURST], packet_len = 0;
uint8_t success = 0;
LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
+ if (unlikely(queue_id != VIRTIO_RXQ)) {
+ LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
+ return 0;
+ }
+
vq = dev->virtqueue[VIRTIO_RXQ];
count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
- /* As many data cores may want access to available buffers, they need to be reserved. */
+ /*
+ * As many data cores may want access to available buffers,
+ * they need to be reserved.
+ */
do {
res_base_idx = vq->last_used_idx_res;
avail_idx = *((volatile uint16_t *)&vq->avail->idx);
res_end_idx = res_base_idx + count;
/* vq->last_used_idx_res is atomically updated. */
- success = rte_atomic16_cmpset(&vq->last_used_idx_res, res_base_idx,
- res_end_idx);
+ /* TODO: Allow to disable cmpset if no concurrency in application. */
+ success = rte_atomic16_cmpset(&vq->last_used_idx_res,
+ res_base_idx, res_end_idx);
} while (unlikely(success == 0));
res_cur_idx = res_base_idx;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
+ dev->device_fh, res_cur_idx, res_end_idx);
/* Prefetch available ring to retrieve indexes. */
rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (head_idx = 0; head_idx < count; head_idx++)
- head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) & (vq->size - 1)];
+ head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) &
+ (vq->size - 1)];
/*Prefetch descriptor index. */
rte_prefetch0(&vq->desc[head[packet_success]]);
/* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
buff_addr = gpa_to_vva(dev, desc->addr);
/* Prefetch buffer address. */
- rte_prefetch0((void*)(uintptr_t)buff_addr);
+ rte_prefetch0((void *)(uintptr_t)buff_addr);
/* Copy virtio_hdr to packet and increment buffer address */
buff_hdr_addr = buff_addr;
}
/* Update used ring with desc information */
- vq->used->ring[res_cur_idx & (vq->size - 1)].id = head[packet_success];
+ vq->used->ring[res_cur_idx & (vq->size - 1)].id =
+ head[packet_success];
vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
/* Copy mbuf data to buffer */
+ /* FIXME for sg mbuf and the case that desc couldn't hold the mbuf data */
rte_memcpy((void *)(uintptr_t)buff_addr,
rte_pktmbuf_mtod(buff, const void *),
rte_pktmbuf_data_len(buff));
*(volatile uint16_t *)&vq->used->idx += count;
vq->last_used_idx = res_end_idx;
+ /* flush used->idx update before we read avail->flags. */
+ rte_mb();
+
/* Kick the guest if necessary. */
if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write((int)vq->kickfd, 1);
+ eventfd_write((int)vq->callfd, 1);
return count;
}
static inline uint32_t __attribute__((always_inline))
-copy_from_mbuf_to_vring(struct virtio_net *dev,
- uint16_t res_base_idx, uint16_t res_end_idx,
- struct rte_mbuf *pkt)
+copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
+ uint16_t res_end_idx, struct rte_mbuf *pkt)
{
uint32_t vec_idx = 0;
uint32_t entry_success = 0;
}
/*
- * This function adds buffers to the virtio devices RX virtqueue. Buffers can
- * be received from the physical port or from another virtio device. A packet
- * count is returned to indicate the number of packets that were succesfully
- * added to the RX queue. This function works for mergeable RX.
+ * This function works for mergeable RX.
*/
static inline uint32_t __attribute__((always_inline))
-virtio_dev_merge_rx(struct virtio_net *dev, struct rte_mbuf **pkts,
- uint32_t count)
+virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
{
struct vhost_virtqueue *vq;
uint32_t pkt_idx = 0, entry_success = 0;
LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n",
dev->device_fh);
+ if (unlikely(queue_id != VIRTIO_RXQ)) {
+ LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
+ }
+
vq = dev->virtqueue[VIRTIO_RXQ];
count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
*(volatile uint16_t *)&vq->used->idx += entry_success;
vq->last_used_idx = res_end_idx;
+ /* flush used->idx update before we read avail->flags. */
+ rte_mb();
+
/* Kick the guest if necessary. */
if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write((int)vq->kickfd, 1);
+ eventfd_write((int)vq->callfd, 1);
}
return count;
}
-/* This function works for TX packets with mergeable feature enabled. */
-static inline void __attribute__((always_inline))
-virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool)
+uint16_t
+rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ if (unlikely(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)))
+ return virtio_dev_merge_rx(dev, queue_id, pkts, count);
+ else
+ return virtio_dev_rx(dev, queue_id, pkts, count);
+}
+
+uint16_t
+rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
{
struct rte_mbuf *m, *prev;
struct vhost_virtqueue *vq;
uint32_t i;
uint16_t free_entries, entry_success = 0;
uint16_t avail_idx;
- uint32_t buf_size = MBUF_SIZE - (sizeof(struct rte_mbuf)
- + RTE_PKTMBUF_HEADROOM);
+
+ if (unlikely(queue_id != VIRTIO_TXQ)) {
+ LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
+ return 0;
+ }
vq = dev->virtqueue[VIRTIO_TXQ];
avail_idx = *((volatile uint16_t *)&vq->avail->idx);
/* If there are no available buffers then return. */
if (vq->last_used_idx == avail_idx)
- return;
+ return 0;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_tx()\n",
+ LOG_DEBUG(VHOST_DATA, "%s (%"PRIu64")\n", __func__,
dev->device_fh);
/* Prefetch available ring to retrieve head indexes. */
/*get the number of free entries in the ring*/
free_entries = (avail_idx - vq->last_used_idx);
+ free_entries = RTE_MIN(free_entries, count);
/* Limit to MAX_PKT_BURST. */
free_entries = RTE_MIN(free_entries, MAX_PKT_BURST);
LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n",
- dev->device_fh, free_entries);
+ dev->device_fh, free_entries);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (i = 0; i < free_entries; i++)
head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
vb_offset = 0;
vb_avail = desc->len;
- seg_offset = 0;
- seg_avail = buf_size;
- cpy_len = RTE_MIN(vb_avail, seg_avail);
-
- PRINT_PACKET(dev, (uintptr_t)vb_addr, desc->len, 0);
-
/* Allocate an mbuf and populate the structure. */
m = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(m == NULL)) {
RTE_LOG(ERR, VHOST_DATA,
"Failed to allocate memory for mbuf.\n");
- return;
+ break;
}
+ seg_offset = 0;
+ seg_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
+ cpy_len = RTE_MIN(vb_avail, seg_avail);
+
+ PRINT_PACKET(dev, (uintptr_t)vb_addr, desc->len, 0);
seg_num++;
cur = m;
prev->next = cur;
prev = cur;
seg_offset = 0;
- seg_avail = buf_size;
+ seg_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
} else {
if (desc->flags & VRING_DESC_F_NEXT) {
/*
prev->next = cur;
prev = cur;
seg_offset = 0;
- seg_avail = buf_size;
+ seg_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
}
desc = &vq->desc[desc->next];
m->nb_segs = seg_num;
+ pkts[entry_success] = m;
vq->last_used_idx++;
entry_success++;
- rte_pktmbuf_free(m);
}
rte_compiler_barrier();
vq->used->idx += entry_success;
/* Kick guest if required. */
if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write((int)vq->kickfd, 1);
-
+ eventfd_write((int)vq->callfd, 1);
+ return entry_success;
}