dev->mem = NULL;
}
- free(dev->guest_pages);
+ rte_free(dev->guest_pages);
dev->guest_pages = NULL;
if (dev->log_addr) {
if (dev->nr_guest_pages == dev->max_guest_pages) {
dev->max_guest_pages *= 2;
old_pages = dev->guest_pages;
- dev->guest_pages = realloc(dev->guest_pages,
- dev->max_guest_pages * sizeof(*page));
- if (!dev->guest_pages) {
+ dev->guest_pages = rte_realloc(dev->guest_pages,
+ dev->max_guest_pages * sizeof(*page),
+ RTE_CACHE_LINE_SIZE);
+ if (dev->guest_pages == NULL) {
VHOST_LOG_CONFIG(ERR, "cannot realloc guest_pages\n");
- free(old_pages);
+ rte_free(old_pages);
return -1;
}
}
reg_size -= size;
}
+ /* sort guest page array if over binary search threshold */
+ if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
+ qsort((void *)dev->guest_pages, dev->nr_guest_pages,
+ sizeof(struct guest_page), guest_page_addrcmp);
+ }
+
return 0;
}
vhost_user_iotlb_flush_all(dev->virtqueue[i]);
dev->nr_guest_pages = 0;
- if (!dev->guest_pages) {
+ if (dev->guest_pages == NULL) {
dev->max_guest_pages = 8;
- dev->guest_pages = malloc(dev->max_guest_pages *
- sizeof(struct guest_page));
+ dev->guest_pages = rte_zmalloc(NULL,
+ dev->max_guest_pages *
+ sizeof(struct guest_page),
+ RTE_CACHE_LINE_SIZE);
if (dev->guest_pages == NULL) {
VHOST_LOG_CONFIG(ERR,
"(%d) failed to allocate memory "
size = msg->payload.log.mmap_size;
off = msg->payload.log.mmap_offset;
- /* Don't allow mmap_offset to point outside the mmap region */
- if (off > size) {
+ /* Check for mmap size and offset overflow. */
+ if (off >= -size) {
VHOST_LOG_CONFIG(ERR,
- "log offset %#"PRIx64" exceeds log size %#"PRIx64"\n",
+ "log offset %#"PRIx64" and log size %#"PRIx64" overflow\n",
off, size);
return RTE_VHOST_MSG_RESULT_ERR;
}
* Set the flag to inject a RARP broadcast packet at
* rte_vhost_dequeue_burst().
*
- * rte_smp_wmb() is for making sure the mac is copied
- * before the flag is set.
+ * __ATOMIC_RELEASE ordering is for making sure the mac is
+ * copied before the flag is set.
*/
- rte_smp_wmb();
- rte_atomic16_set(&dev->broadcast_rarp, 1);
+ __atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE);
did = dev->vdpa_dev_id;
vdpa_dev = rte_vdpa_get_device(did);
if (vdpa_dev && vdpa_dev->ops->migration_done)
vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
struct VhostUserMsg *msg)
{
- uint16_t vring_idx;
+ uint32_t vring_idx;
switch (msg->request.master) {
case VHOST_USER_SET_VRING_KICK: