static void
virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
{
- memset(net_hdr, 0, sizeof(struct virtio_net_hdr));
-
if (m_buf->ol_flags & PKT_TX_L4_MASK) {
net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
+ m_buf->l4_len;
}
+}
- return;
+static inline void
+copy_virtio_net_hdr(struct virtio_net *dev, uint64_t desc_addr,
+ struct virtio_net_hdr_mrg_rxbuf hdr)
+{
+ if (dev->vhost_hlen == sizeof(struct virtio_net_hdr_mrg_rxbuf))
+ *(struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)desc_addr = hdr;
+ else
+ *(struct virtio_net_hdr *)(uintptr_t)desc_addr = hdr.hdr;
}
-/**
- * This function adds buffers to the virtio devices RX virtqueue. Buffers can
- * be received from the physical port or from another virtio device. A packet
- * count is returned to indicate the number of packets that are succesfully
- * added to the RX queue. This function works when the mbuf is scattered, but
- * it doesn't support the mergeable feature.
- */
-static inline uint32_t __attribute__((always_inline))
-virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
- struct rte_mbuf **pkts, uint32_t count)
+static inline int __attribute__((always_inline))
+copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf *m, uint16_t desc_idx)
{
- struct vhost_virtqueue *vq;
- struct vring_desc *desc, *hdr_desc;
- struct rte_mbuf *buff, *first_buff;
- /* The virtio_hdr is initialised to 0. */
+ uint32_t desc_avail, desc_offset;
+ uint32_t mbuf_avail, mbuf_offset;
+ uint32_t cpy_len;
+ struct vring_desc *desc;
+ uint64_t desc_addr;
struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
- uint64_t buff_addr = 0;
- uint64_t buff_hdr_addr = 0;
- uint32_t head[MAX_PKT_BURST];
- uint32_t head_idx, packet_success = 0;
- uint16_t avail_idx, res_cur_idx;
- uint16_t res_base_idx, res_end_idx;
- uint16_t free_entries;
- uint8_t success = 0;
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
- RTE_LOG(ERR, VHOST_DATA,
- "%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
- __func__, dev->device_fh, queue_id);
- return 0;
- }
-
- vq = dev->virtqueue[queue_id];
- if (unlikely(vq->enabled == 0))
- return 0;
-
- count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
+ desc = &vq->desc[desc_idx];
+ desc_addr = gpa_to_vva(dev, desc->addr);
/*
- * As many data cores may want access to available buffers,
- * they need to be reserved.
+ * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
+ * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
+ * otherwise stores offset on the stack instead of in a register.
*/
- do {
- res_base_idx = vq->last_used_idx_res;
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
-
- free_entries = (avail_idx - res_base_idx);
- /*check that we have enough buffers*/
- if (unlikely(count > free_entries))
- count = free_entries;
-
- if (count == 0)
- return 0;
-
- res_end_idx = res_base_idx + count;
- /* vq->last_used_idx_res is atomically updated. */
- /* TODO: Allow to disable cmpset if no concurrency in application. */
- success = rte_atomic16_cmpset(&vq->last_used_idx_res,
- res_base_idx, res_end_idx);
- } while (unlikely(success == 0));
- res_cur_idx = res_base_idx;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
- dev->device_fh, res_cur_idx, res_end_idx);
-
- /* Prefetch available ring to retrieve indexes. */
- rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
-
- /* Retrieve all of the head indexes first to avoid caching issues. */
- for (head_idx = 0; head_idx < count; head_idx++)
- head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) &
- (vq->size - 1)];
+ if (unlikely(desc->len < dev->vhost_hlen) || !desc_addr)
+ return -1;
- /*Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
+ rte_prefetch0((void *)(uintptr_t)desc_addr);
- while (res_cur_idx != res_end_idx) {
- uint32_t offset = 0, vb_offset = 0;
- uint32_t pkt_len, len_to_cpy, data_len, total_copied = 0;
- uint8_t hdr = 0, uncompleted_pkt = 0;
- uint16_t idx;
+ virtio_enqueue_offload(m, &virtio_hdr.hdr);
+ copy_virtio_net_hdr(dev, desc_addr, virtio_hdr);
+ vhost_log_write(dev, desc->addr, dev->vhost_hlen);
+ PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
- /* Get descriptor from available ring */
- desc = &vq->desc[head[packet_success]];
+ desc_offset = dev->vhost_hlen;
+ desc_avail = desc->len - dev->vhost_hlen;
- buff = pkts[packet_success];
- first_buff = buff;
+ mbuf_avail = rte_pktmbuf_data_len(m);
+ mbuf_offset = 0;
+ while (mbuf_avail != 0 || m->next != NULL) {
+ /* done with current mbuf, fetch next */
+ if (mbuf_avail == 0) {
+ m = m->next;
- /* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
- buff_addr = gpa_to_vva(dev, desc->addr);
- /* Prefetch buffer address. */
- rte_prefetch0((void *)(uintptr_t)buff_addr);
+ mbuf_offset = 0;
+ mbuf_avail = rte_pktmbuf_data_len(m);
+ }
- /* Copy virtio_hdr to packet and increment buffer address */
- buff_hdr_addr = buff_addr;
- hdr_desc = desc;
+ /* done with current desc buf, fetch next */
+ if (desc_avail == 0) {
+ if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
+ /* Room in vring buffer is not enough */
+ return -1;
+ }
+ if (unlikely(desc->next >= vq->size))
+ return -1;
- /*
- * If the descriptors are chained the header and data are
- * placed in separate buffers.
- */
- if ((desc->flags & VRING_DESC_F_NEXT) &&
- (desc->len == vq->vhost_hlen)) {
desc = &vq->desc[desc->next];
- /* Buffer address translation. */
- buff_addr = gpa_to_vva(dev, desc->addr);
- } else {
- vb_offset += vq->vhost_hlen;
- hdr = 1;
- }
+ desc_addr = gpa_to_vva(dev, desc->addr);
+ if (unlikely(!desc_addr))
+ return -1;
- pkt_len = rte_pktmbuf_pkt_len(buff);
- data_len = rte_pktmbuf_data_len(buff);
- len_to_cpy = RTE_MIN(data_len,
- hdr ? desc->len - vq->vhost_hlen : desc->len);
- while (total_copied < pkt_len) {
- /* Copy mbuf data to buffer */
- rte_memcpy((void *)(uintptr_t)(buff_addr + vb_offset),
- rte_pktmbuf_mtod_offset(buff, const void *, offset),
- len_to_cpy);
- vhost_log_write(dev, desc->addr + vb_offset, len_to_cpy);
- PRINT_PACKET(dev, (uintptr_t)(buff_addr + vb_offset),
- len_to_cpy, 0);
-
- offset += len_to_cpy;
- vb_offset += len_to_cpy;
- total_copied += len_to_cpy;
-
- /* The whole packet completes */
- if (total_copied == pkt_len)
- break;
+ desc_offset = 0;
+ desc_avail = desc->len;
+ }
- /* The current segment completes */
- if (offset == data_len) {
- buff = buff->next;
- offset = 0;
- data_len = rte_pktmbuf_data_len(buff);
- }
+ cpy_len = RTE_MIN(desc_avail, mbuf_avail);
+ rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
+ cpy_len);
+ vhost_log_write(dev, desc->addr + desc_offset, cpy_len);
+ PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
+ cpy_len, 0);
- /* The current vring descriptor done */
- if (vb_offset == desc->len) {
- if (desc->flags & VRING_DESC_F_NEXT) {
- desc = &vq->desc[desc->next];
- buff_addr = gpa_to_vva(dev, desc->addr);
- vb_offset = 0;
- } else {
- /* Room in vring buffer is not enough */
- uncompleted_pkt = 1;
- break;
- }
- }
- len_to_cpy = RTE_MIN(data_len - offset, desc->len - vb_offset);
- }
+ mbuf_avail -= cpy_len;
+ mbuf_offset += cpy_len;
+ desc_avail -= cpy_len;
+ desc_offset += cpy_len;
+ }
- /* Update used ring with desc information */
- idx = res_cur_idx & (vq->size - 1);
- vq->used->ring[idx].id = head[packet_success];
+ return 0;
+}
- /* Drop the packet if it is uncompleted */
- if (unlikely(uncompleted_pkt == 1))
- vq->used->ring[idx].len = vq->vhost_hlen;
- else
- vq->used->ring[idx].len = pkt_len + vq->vhost_hlen;
+/**
+ * This function adds buffers to the virtio devices RX virtqueue. Buffers can
+ * be received from the physical port or from another virtio device. A packet
+ * count is returned to indicate the number of packets that are succesfully
+ * added to the RX queue. This function works when the mbuf is scattered, but
+ * it doesn't support the mergeable feature.
+ */
+static inline uint32_t __attribute__((always_inline))
+virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ struct vhost_virtqueue *vq;
+ uint16_t avail_idx, free_entries, start_idx;
+ uint16_t desc_indexes[MAX_PKT_BURST];
+ uint16_t used_idx;
+ uint32_t i;
- vhost_log_used_vring(dev, vq,
- offsetof(struct vring_used, ring[idx]),
- sizeof(vq->used->ring[idx]));
+ LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
+ RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
- res_cur_idx++;
- packet_success++;
+ vq = dev->virtqueue[queue_id];
+ if (unlikely(vq->enabled == 0))
+ return 0;
- if (unlikely(uncompleted_pkt == 1))
- continue;
+ avail_idx = *((volatile uint16_t *)&vq->avail->idx);
+ start_idx = vq->last_used_idx;
+ free_entries = avail_idx - start_idx;
+ count = RTE_MIN(count, free_entries);
+ count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
+ if (count == 0)
+ return 0;
- virtio_enqueue_offload(first_buff, &virtio_hdr.hdr);
+ LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n",
+ dev->vid, start_idx, start_idx + count);
- rte_memcpy((void *)(uintptr_t)buff_hdr_addr,
- (const void *)&virtio_hdr, vq->vhost_hlen);
- vhost_log_write(dev, hdr_desc->addr, vq->vhost_hlen);
+ /* Retrieve all of the desc indexes first to avoid caching issues. */
+ rte_prefetch0(&vq->avail->ring[start_idx & (vq->size - 1)]);
+ for (i = 0; i < count; i++) {
+ used_idx = (start_idx + i) & (vq->size - 1);
+ desc_indexes[i] = vq->avail->ring[used_idx];
+ vq->used->ring[used_idx].id = desc_indexes[i];
+ vq->used->ring[used_idx].len = pkts[i]->pkt_len +
+ dev->vhost_hlen;
+ vhost_log_used_vring(dev, vq,
+ offsetof(struct vring_used, ring[used_idx]),
+ sizeof(vq->used->ring[used_idx]));
+ }
- PRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1);
+ rte_prefetch0(&vq->desc[desc_indexes[0]]);
+ for (i = 0; i < count; i++) {
+ uint16_t desc_idx = desc_indexes[i];
+ int err;
- if (res_cur_idx < res_end_idx) {
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
+ err = copy_mbuf_to_desc(dev, vq, pkts[i], desc_idx);
+ if (unlikely(err)) {
+ used_idx = (start_idx + i) & (vq->size - 1);
+ vq->used->ring[used_idx].len = dev->vhost_hlen;
+ vhost_log_used_vring(dev, vq,
+ offsetof(struct vring_used, ring[used_idx]),
+ sizeof(vq->used->ring[used_idx]));
}
- }
- rte_compiler_barrier();
+ if (i + 1 < count)
+ rte_prefetch0(&vq->desc[desc_indexes[i+1]]);
+ }
- /* Wait until it's our turn to add our buffer to the used ring. */
- while (unlikely(vq->last_used_idx != res_base_idx))
- rte_pause();
+ rte_smp_wmb();
*(volatile uint16_t *)&vq->used->idx += count;
- vq->last_used_idx = res_end_idx;
+ vq->last_used_idx += count;
vhost_log_used_vring(dev, vq,
offsetof(struct vring_used, idx),
sizeof(vq->used->idx));
rte_mb();
/* Kick the guest if necessary. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
+ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
+ && (vq->callfd >= 0))
eventfd_write(vq->callfd, (eventfd_t)1);
return count;
}
-static inline uint32_t __attribute__((always_inline))
-copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
- uint16_t res_base_idx, uint16_t res_end_idx,
- struct rte_mbuf *pkt)
+static inline int
+fill_vec_buf(struct vhost_virtqueue *vq, uint32_t avail_idx,
+ uint32_t *allocated, uint32_t *vec_idx,
+ struct buf_vector *buf_vec)
{
- uint32_t vec_idx = 0;
- uint32_t entry_success = 0;
- struct vhost_virtqueue *vq;
- /* The virtio_hdr is initialised to 0. */
- struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {
- {0, 0, 0, 0, 0, 0}, 0};
- uint16_t cur_idx = res_base_idx;
- uint64_t vb_addr = 0;
- uint64_t vb_hdr_addr = 0;
- uint32_t seg_offset = 0;
- uint32_t vb_offset = 0;
- uint32_t seg_avail;
- uint32_t vb_avail;
- uint32_t cpy_len, entry_len;
- uint16_t idx;
-
- if (pkt == NULL)
- return 0;
+ uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
+ uint32_t vec_id = *vec_idx;
+ uint32_t len = *allocated;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| "
- "End Index %d\n",
- dev->device_fh, cur_idx, res_end_idx);
+ while (1) {
+ if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size))
+ return -1;
- /*
- * Convert from gpa to vva
- * (guest physical addr -> vhost virtual addr)
- */
- vq = dev->virtqueue[queue_id];
+ len += vq->desc[idx].len;
+ buf_vec[vec_id].buf_addr = vq->desc[idx].addr;
+ buf_vec[vec_id].buf_len = vq->desc[idx].len;
+ buf_vec[vec_id].desc_idx = idx;
+ vec_id++;
- vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
- vb_hdr_addr = vb_addr;
+ if ((vq->desc[idx].flags & VRING_DESC_F_NEXT) == 0)
+ break;
- /* Prefetch buffer address. */
- rte_prefetch0((void *)(uintptr_t)vb_addr);
+ idx = vq->desc[idx].next;
+ }
- virtio_hdr.num_buffers = res_end_idx - res_base_idx;
+ *allocated = len;
+ *vec_idx = vec_id;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") RX: Num merge buffers %d\n",
- dev->device_fh, virtio_hdr.num_buffers);
+ return 0;
+}
- virtio_enqueue_offload(pkt, &virtio_hdr.hdr);
+/*
+ * Returns -1 on fail, 0 on success
+ */
+static inline int
+reserve_avail_buf_mergeable(struct vhost_virtqueue *vq, uint32_t size,
+ uint16_t *end, struct buf_vector *buf_vec)
+{
+ uint16_t cur_idx;
+ uint16_t avail_idx;
+ uint32_t allocated = 0;
+ uint32_t vec_idx = 0;
+ uint16_t tries = 0;
- rte_memcpy((void *)(uintptr_t)vb_hdr_addr,
- (const void *)&virtio_hdr, vq->vhost_hlen);
- vhost_log_write(dev, vq->buf_vec[vec_idx].buf_addr, vq->vhost_hlen);
+ cur_idx = vq->last_used_idx;
- PRINT_PACKET(dev, (uintptr_t)vb_hdr_addr, vq->vhost_hlen, 1);
+ while (1) {
+ avail_idx = *((volatile uint16_t *)&vq->avail->idx);
+ if (unlikely(cur_idx == avail_idx))
+ return -1;
- seg_avail = rte_pktmbuf_data_len(pkt);
- vb_offset = vq->vhost_hlen;
- vb_avail = vq->buf_vec[vec_idx].buf_len - vq->vhost_hlen;
+ if (unlikely(fill_vec_buf(vq, cur_idx, &allocated,
+ &vec_idx, buf_vec) < 0))
+ return -1;
- entry_len = vq->vhost_hlen;
+ cur_idx++;
+ tries++;
- if (vb_avail == 0) {
- uint32_t desc_idx = vq->buf_vec[vec_idx].desc_idx;
+ if (allocated >= size)
+ break;
- if ((vq->desc[desc_idx].flags & VRING_DESC_F_NEXT) == 0) {
- idx = cur_idx & (vq->size - 1);
+ /*
+ * if we tried all available ring items, and still
+ * can't get enough buf, it means something abnormal
+ * happened.
+ */
+ if (unlikely(tries >= vq->size))
+ return -1;
+ }
- /* Update used ring with desc information */
- vq->used->ring[idx].id = vq->buf_vec[vec_idx].desc_idx;
- vq->used->ring[idx].len = entry_len;
+ *end = cur_idx;
+ return 0;
+}
- vhost_log_used_vring(dev, vq,
- offsetof(struct vring_used, ring[idx]),
- sizeof(vq->used->ring[idx]));
+static inline uint32_t __attribute__((always_inline))
+copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint16_t end_idx, struct rte_mbuf *m,
+ struct buf_vector *buf_vec)
+{
+ struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
+ uint32_t vec_idx = 0;
+ uint16_t start_idx = vq->last_used_idx;
+ uint16_t cur_idx = start_idx;
+ uint64_t desc_addr;
+ uint32_t mbuf_offset, mbuf_avail;
+ uint32_t desc_offset, desc_avail;
+ uint32_t cpy_len;
+ uint16_t desc_idx, used_idx;
- entry_len = 0;
- cur_idx++;
- entry_success++;
- }
+ if (unlikely(m == NULL))
+ return 0;
- vec_idx++;
- vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
+ LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
+ dev->vid, cur_idx, end_idx);
- /* Prefetch buffer address. */
- rte_prefetch0((void *)(uintptr_t)vb_addr);
- vb_offset = 0;
- vb_avail = vq->buf_vec[vec_idx].buf_len;
- }
+ desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
+ if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr)
+ return 0;
- cpy_len = RTE_MIN(vb_avail, seg_avail);
+ rte_prefetch0((void *)(uintptr_t)desc_addr);
- while (cpy_len > 0) {
- /* Copy mbuf data to vring buffer */
- rte_memcpy((void *)(uintptr_t)(vb_addr + vb_offset),
- rte_pktmbuf_mtod_offset(pkt, const void *, seg_offset),
- cpy_len);
- vhost_log_write(dev, vq->buf_vec[vec_idx].buf_addr + vb_offset,
- cpy_len);
+ virtio_hdr.num_buffers = end_idx - start_idx;
+ LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
+ dev->vid, virtio_hdr.num_buffers);
- PRINT_PACKET(dev,
- (uintptr_t)(vb_addr + vb_offset),
- cpy_len, 0);
+ virtio_enqueue_offload(m, &virtio_hdr.hdr);
+ copy_virtio_net_hdr(dev, desc_addr, virtio_hdr);
+ vhost_log_write(dev, buf_vec[vec_idx].buf_addr, dev->vhost_hlen);
+ PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
+
+ desc_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
+ desc_offset = dev->vhost_hlen;
+
+ mbuf_avail = rte_pktmbuf_data_len(m);
+ mbuf_offset = 0;
+ while (mbuf_avail != 0 || m->next != NULL) {
+ /* done with current desc buf, get the next one */
+ if (desc_avail == 0) {
+ desc_idx = buf_vec[vec_idx].desc_idx;
- seg_offset += cpy_len;
- vb_offset += cpy_len;
- seg_avail -= cpy_len;
- vb_avail -= cpy_len;
- entry_len += cpy_len;
-
- if (seg_avail != 0) {
- /*
- * The virtio buffer in this vring
- * entry reach to its end.
- * But the segment doesn't complete.
- */
- if ((vq->desc[vq->buf_vec[vec_idx].desc_idx].flags &
- VRING_DESC_F_NEXT) == 0) {
+ if (!(vq->desc[desc_idx].flags & VRING_DESC_F_NEXT)) {
/* Update used ring with desc information */
- idx = cur_idx & (vq->size - 1);
- vq->used->ring[idx].id
- = vq->buf_vec[vec_idx].desc_idx;
- vq->used->ring[idx].len = entry_len;
+ used_idx = cur_idx++ & (vq->size - 1);
+ vq->used->ring[used_idx].id = desc_idx;
+ vq->used->ring[used_idx].len = desc_offset;
vhost_log_used_vring(dev, vq,
- offsetof(struct vring_used, ring[idx]),
- sizeof(vq->used->ring[idx]));
- entry_len = 0;
- cur_idx++;
- entry_success++;
+ offsetof(struct vring_used,
+ ring[used_idx]),
+ sizeof(vq->used->ring[used_idx]));
}
vec_idx++;
- vb_addr = gpa_to_vva(dev,
- vq->buf_vec[vec_idx].buf_addr);
- vb_offset = 0;
- vb_avail = vq->buf_vec[vec_idx].buf_len;
- cpy_len = RTE_MIN(vb_avail, seg_avail);
- } else {
- /*
- * This current segment complete, need continue to
- * check if the whole packet complete or not.
- */
- pkt = pkt->next;
- if (pkt != NULL) {
- /*
- * There are more segments.
- */
- if (vb_avail == 0) {
- /*
- * This current buffer from vring is
- * used up, need fetch next buffer
- * from buf_vec.
- */
- uint32_t desc_idx =
- vq->buf_vec[vec_idx].desc_idx;
-
- if ((vq->desc[desc_idx].flags &
- VRING_DESC_F_NEXT) == 0) {
- idx = cur_idx & (vq->size - 1);
- /*
- * Update used ring with the
- * descriptor information
- */
- vq->used->ring[idx].id
- = desc_idx;
- vq->used->ring[idx].len
- = entry_len;
- vhost_log_used_vring(dev, vq,
- offsetof(struct vring_used, ring[idx]),
- sizeof(vq->used->ring[idx]));
- entry_success++;
- entry_len = 0;
- cur_idx++;
- }
-
- /* Get next buffer from buf_vec. */
- vec_idx++;
- vb_addr = gpa_to_vva(dev,
- vq->buf_vec[vec_idx].buf_addr);
- vb_avail =
- vq->buf_vec[vec_idx].buf_len;
- vb_offset = 0;
- }
-
- seg_offset = 0;
- seg_avail = rte_pktmbuf_data_len(pkt);
- cpy_len = RTE_MIN(vb_avail, seg_avail);
- } else {
- /*
- * This whole packet completes.
- */
- /* Update used ring with desc information */
- idx = cur_idx & (vq->size - 1);
- vq->used->ring[idx].id
- = vq->buf_vec[vec_idx].desc_idx;
- vq->used->ring[idx].len = entry_len;
- vhost_log_used_vring(dev, vq,
- offsetof(struct vring_used, ring[idx]),
- sizeof(vq->used->ring[idx]));
- entry_success++;
- break;
- }
+ desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
+ if (unlikely(!desc_addr))
+ return 0;
+
+ /* Prefetch buffer address. */
+ rte_prefetch0((void *)(uintptr_t)desc_addr);
+ desc_offset = 0;
+ desc_avail = buf_vec[vec_idx].buf_len;
}
- }
- return entry_success;
-}
+ /* done with current mbuf, get the next one */
+ if (mbuf_avail == 0) {
+ m = m->next;
-static inline void __attribute__((always_inline))
-update_secure_len(struct vhost_virtqueue *vq, uint32_t id,
- uint32_t *secure_len, uint32_t *vec_idx)
-{
- uint16_t wrapped_idx = id & (vq->size - 1);
- uint32_t idx = vq->avail->ring[wrapped_idx];
- uint8_t next_desc;
- uint32_t len = *secure_len;
- uint32_t vec_id = *vec_idx;
+ mbuf_offset = 0;
+ mbuf_avail = rte_pktmbuf_data_len(m);
+ }
- do {
- next_desc = 0;
- len += vq->desc[idx].len;
- vq->buf_vec[vec_id].buf_addr = vq->desc[idx].addr;
- vq->buf_vec[vec_id].buf_len = vq->desc[idx].len;
- vq->buf_vec[vec_id].desc_idx = idx;
- vec_id++;
+ cpy_len = RTE_MIN(desc_avail, mbuf_avail);
+ rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
+ cpy_len);
+ vhost_log_write(dev, buf_vec[vec_idx].buf_addr + desc_offset,
+ cpy_len);
+ PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
+ cpy_len, 0);
- if (vq->desc[idx].flags & VRING_DESC_F_NEXT) {
- idx = vq->desc[idx].next;
- next_desc = 1;
- }
- } while (next_desc);
+ mbuf_avail -= cpy_len;
+ mbuf_offset += cpy_len;
+ desc_avail -= cpy_len;
+ desc_offset += cpy_len;
+ }
+
+ used_idx = cur_idx & (vq->size - 1);
+ vq->used->ring[used_idx].id = buf_vec[vec_idx].desc_idx;
+ vq->used->ring[used_idx].len = desc_offset;
+ vhost_log_used_vring(dev, vq,
+ offsetof(struct vring_used, ring[used_idx]),
+ sizeof(vq->used->ring[used_idx]));
- *secure_len = len;
- *vec_idx = vec_id;
+ return end_idx - start_idx;
}
-/*
- * This function works for mergeable RX.
- */
static inline uint32_t __attribute__((always_inline))
virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count)
{
struct vhost_virtqueue *vq;
- uint32_t pkt_idx = 0, entry_success = 0;
- uint16_t avail_idx;
- uint16_t res_base_idx, res_cur_idx;
- uint8_t success = 0;
+ uint32_t pkt_idx = 0, nr_used = 0;
+ uint16_t end;
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n",
- dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
- RTE_LOG(ERR, VHOST_DATA,
- "%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
- __func__, dev->device_fh, queue_id);
+ RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
return 0;
}
return 0;
count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
-
if (count == 0)
return 0;
for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
- uint32_t pkt_len = pkts[pkt_idx]->pkt_len + vq->vhost_hlen;
-
- do {
- /*
- * As many data cores may want access to available
- * buffers, they need to be reserved.
- */
- uint32_t secure_len = 0;
- uint32_t vec_idx = 0;
-
- res_base_idx = vq->last_used_idx_res;
- res_cur_idx = res_base_idx;
-
- do {
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
- if (unlikely(res_cur_idx == avail_idx))
- goto merge_rx_exit;
-
- update_secure_len(vq, res_cur_idx,
- &secure_len, &vec_idx);
- res_cur_idx++;
- } while (pkt_len > secure_len);
-
- /* vq->last_used_idx_res is atomically updated. */
- success = rte_atomic16_cmpset(&vq->last_used_idx_res,
- res_base_idx,
- res_cur_idx);
- } while (success == 0);
+ uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
- entry_success = copy_from_mbuf_to_vring(dev, queue_id,
- res_base_idx, res_cur_idx, pkts[pkt_idx]);
-
- rte_compiler_barrier();
+ if (unlikely(reserve_avail_buf_mergeable(vq, pkt_len,
+ &end, buf_vec) < 0)) {
+ LOG_DEBUG(VHOST_DATA,
+ "(%d) failed to get enough desc from vring\n",
+ dev->vid);
+ break;
+ }
- /*
- * Wait until it's our turn to add our buffer
- * to the used ring.
- */
- while (unlikely(vq->last_used_idx != res_base_idx))
- rte_pause();
+ nr_used = copy_mbuf_to_desc_mergeable(dev, vq, end,
+ pkts[pkt_idx], buf_vec);
+ rte_smp_wmb();
- *(volatile uint16_t *)&vq->used->idx += entry_success;
- vq->last_used_idx = res_cur_idx;
+ *(volatile uint16_t *)&vq->used->idx += nr_used;
+ vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
+ sizeof(vq->used->idx));
+ vq->last_used_idx += nr_used;
}
-merge_rx_exit:
if (likely(pkt_idx)) {
/* flush used->idx update before we read avail->flags. */
rte_mb();
/* Kick the guest if necessary. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
+ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
+ && (vq->callfd >= 0))
eventfd_write(vq->callfd, (eventfd_t)1);
}
}
uint16_t
-rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
+rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count)
{
- if (unlikely(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)))
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return 0;
+
+ if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
return virtio_dev_merge_rx(dev, queue_id, pkts, count);
else
return virtio_dev_rx(dev, queue_id, pkts, count);
uint32_t cpy_len;
struct rte_mbuf *cur = m, *prev = m;
struct virtio_net_hdr *hdr;
+ /* A counter to avoid desc dead loop chain */
+ uint32_t nr_desc = 1;
desc = &vq->desc[desc_idx];
+ if (unlikely(desc->len < dev->vhost_hlen))
+ return -1;
+
desc_addr = gpa_to_vva(dev, desc->addr);
- rte_prefetch0((void *)(uintptr_t)desc_addr);
+ if (unlikely(!desc_addr))
+ return -1;
- /* Retrieve virtio net header */
hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
- desc_avail = desc->len - vq->vhost_hlen;
- desc_offset = vq->vhost_hlen;
+ rte_prefetch0(hdr);
+
+ /*
+ * A virtio driver normally uses at least 2 desc buffers
+ * for Tx: the first for storing the header, and others
+ * for storing the data.
+ */
+ if (likely((desc->len == dev->vhost_hlen) &&
+ (desc->flags & VRING_DESC_F_NEXT) != 0)) {
+ desc = &vq->desc[desc->next];
+
+ desc_addr = gpa_to_vva(dev, desc->addr);
+ if (unlikely(!desc_addr))
+ return -1;
+
+ rte_prefetch0((void *)(uintptr_t)desc_addr);
+
+ desc_offset = 0;
+ desc_avail = desc->len;
+ nr_desc += 1;
+
+ PRINT_PACKET(dev, (uintptr_t)desc_addr, desc->len, 0);
+ } else {
+ desc_avail = desc->len - dev->vhost_hlen;
+ desc_offset = dev->vhost_hlen;
+ }
mbuf_offset = 0;
mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
- while (desc_avail != 0 || (desc->flags & VRING_DESC_F_NEXT) != 0) {
+ while (1) {
+ cpy_len = RTE_MIN(desc_avail, mbuf_avail);
+ rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, mbuf_offset),
+ (void *)((uintptr_t)(desc_addr + desc_offset)),
+ cpy_len);
+
+ mbuf_avail -= cpy_len;
+ mbuf_offset += cpy_len;
+ desc_avail -= cpy_len;
+ desc_offset += cpy_len;
+
/* This desc reaches to its end, get the next one */
if (desc_avail == 0) {
+ if ((desc->flags & VRING_DESC_F_NEXT) == 0)
+ break;
+
+ if (unlikely(desc->next >= vq->size ||
+ ++nr_desc > vq->size))
+ return -1;
desc = &vq->desc[desc->next];
desc_addr = gpa_to_vva(dev, desc->addr);
+ if (unlikely(!desc_addr))
+ return -1;
+
rte_prefetch0((void *)(uintptr_t)desc_addr);
desc_offset = 0;
mbuf_offset = 0;
mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
}
-
- cpy_len = RTE_MIN(desc_avail, mbuf_avail);
- rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, mbuf_offset),
- (void *)((uintptr_t)(desc_addr + desc_offset)),
- cpy_len);
-
- mbuf_avail -= cpy_len;
- mbuf_offset += cpy_len;
- desc_avail -= cpy_len;
- desc_offset += cpy_len;
}
prev->data_len = mbuf_offset;
}
uint16_t
-rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
+rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
{
+ struct virtio_net *dev;
struct rte_mbuf *rarp_mbuf = NULL;
struct vhost_virtqueue *vq;
uint32_t desc_indexes[MAX_PKT_BURST];
uint16_t free_entries;
uint16_t avail_idx;
+ dev = get_device(vid);
+ if (!dev)
+ return 0;
+
if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) {
- RTE_LOG(ERR, VHOST_DATA,
- "%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
- __func__, dev->device_fh, queue_id);
+ RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
return 0;
}
if (free_entries == 0)
goto out;
- LOG_DEBUG(VHOST_DATA, "%s (%"PRIu64")\n", __func__, dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
/* Prefetch available ring to retrieve head indexes. */
used_idx = vq->last_used_idx & (vq->size - 1);
rte_prefetch0(&vq->avail->ring[used_idx]);
+ rte_prefetch0(&vq->used->ring[used_idx]);
count = RTE_MIN(count, MAX_PKT_BURST);
count = RTE_MIN(count, free_entries);
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") about to dequeue %u buffers\n",
- dev->device_fh, count);
+ LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
+ dev->vid, count);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (i = 0; i < count; i++) {
- desc_indexes[i] = vq->avail->ring[(vq->last_used_idx + i) &
- (vq->size - 1)];
+ used_idx = (vq->last_used_idx + i) & (vq->size - 1);
+ desc_indexes[i] = vq->avail->ring[used_idx];
+
+ vq->used->ring[used_idx].id = desc_indexes[i];
+ vq->used->ring[used_idx].len = 0;
+ vhost_log_used_vring(dev, vq,
+ offsetof(struct vring_used, ring[used_idx]),
+ sizeof(vq->used->ring[used_idx]));
}
/* Prefetch descriptor index. */
rte_prefetch0(&vq->desc[desc_indexes[0]]);
- rte_prefetch0(&vq->used->ring[vq->last_used_idx & (vq->size - 1)]);
-
for (i = 0; i < count; i++) {
int err;
- if (likely(i + 1 < count)) {
+ if (likely(i + 1 < count))
rte_prefetch0(&vq->desc[desc_indexes[i + 1]]);
- rte_prefetch0(&vq->used->ring[(used_idx + 1) &
- (vq->size - 1)]);
- }
pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(pkts[i] == NULL)) {
rte_pktmbuf_free(pkts[i]);
break;
}
-
- used_idx = vq->last_used_idx++ & (vq->size - 1);
- vq->used->ring[used_idx].id = desc_indexes[i];
- vq->used->ring[used_idx].len = 0;
- vhost_log_used_vring(dev, vq,
- offsetof(struct vring_used, ring[used_idx]),
- sizeof(vq->used->ring[used_idx]));
}
- rte_compiler_barrier();
+ rte_smp_wmb();
+ rte_smp_rmb();
vq->used->idx += i;
+ vq->last_used_idx += i;
vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
sizeof(vq->used->idx));
/* Kick guest if required. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
+ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
+ && (vq->callfd >= 0))
eventfd_write(vq->callfd, (eventfd_t)1);
out: