#define PACKED_DESC_ENQUEUE_USED_FLAG(w) \
((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
VRING_DESC_F_WRITE)
+#define PACKED_DESC_DEQUEUE_USED_FLAG(w) \
+ ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED) : 0x0)
#define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \
VRING_DESC_F_INDIRECT)
struct vring_used_elem_packed {
uint16_t id;
+ uint16_t flags;
uint32_t len;
uint32_t count;
};
uint16_t shadow_used_idx;
/* Record packed ring enqueue latest desc cache aligned index */
uint16_t shadow_aligned_idx;
+ /* Record packed ring first dequeue desc index */
+ uint16_t shadow_last_used_idx;
struct vhost_vring_addr ring_addrs;
struct batch_copy_elem *batch_copy_elems;
uint32_t flags;
uint16_t vhost_hlen;
/* to tell if we need broadcast rarp packet */
- rte_atomic16_t broadcast_rarp;
+ int16_t broadcast_rarp;
uint32_t nr_vring;
int dequeue_zero_copy;
int extbuf;
int postcopy_ufd;
int postcopy_listening;
- /*
- * Device id to identify a specific backend device.
- * It's set to -1 for the default software implementation.
- */
- int vdpa_dev_id;
+ struct rte_vdpa_device *vdpa_dev;
/* context data for the external message handlers */
void *extern_data;
vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint64_t offset, uint64_t len)
{
- vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, len);
+ if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
+ if (unlikely(vq->log_guest_addr == 0))
+ return;
+ __vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset,
+ len);
+ }
}
static __rte_always_inline void
vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint64_t offset, uint64_t len)
{
- vhost_log_write(dev, vq->log_guest_addr + offset, len);
+ if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
+ if (unlikely(vq->log_guest_addr == 0))
+ return;
+ __vhost_log_write(dev, vq->log_guest_addr + offset, len);
+ }
}
static __rte_always_inline void
__vhost_log_write(dev, iova, len);
}
-/* Macros for printing using RTE_LOG */
-#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
-#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1
+extern int vhost_config_log_level;
+extern int vhost_data_log_level;
+
+#define VHOST_LOG_CONFIG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, vhost_config_log_level, \
+ "VHOST_CONFIG: " fmt, ##args)
+
+#define VHOST_LOG_DATA(level, fmt, args...) \
+ (void)((RTE_LOG_ ## level <= RTE_LOG_DP_LEVEL) ? \
+ rte_log(RTE_LOG_ ## level, vhost_data_log_level, \
+ "VHOST_DATA : " fmt, ##args) : \
+ 0)
#ifdef RTE_LIBRTE_VHOST_DEBUG
#define VHOST_MAX_PRINT_BUFF 6072
-#define VHOST_LOG_DEBUG(log_type, fmt, args...) \
- RTE_LOG(DEBUG, log_type, fmt, ##args)
#define PRINT_PACKET(device, addr, size, header) do { \
char *pkt_addr = (char *)(addr); \
unsigned int index; \
} \
snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
\
- VHOST_LOG_DEBUG(VHOST_DATA, "%s", packet); \
+ VHOST_LOG_DATA(DEBUG, "%s", packet); \
} while (0)
#else
-#define VHOST_LOG_DEBUG(log_type, fmt, args...) do {} while (0)
#define PRINT_PACKET(device, addr, size, header) do {} while (0)
#endif
-extern uint64_t VHOST_FEATURES;
#define MAX_VHOST_DEVICE 1024
extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
+#define VHOST_BINARY_SEARCH_THRESH 256
+
+static __rte_always_inline int guest_page_addrcmp(const void *p1,
+ const void *p2)
+{
+ const struct guest_page *page1 = (const struct guest_page *)p1;
+ const struct guest_page *page2 = (const struct guest_page *)p2;
+
+ if (page1->guest_phys_addr > page2->guest_phys_addr)
+ return 1;
+ if (page1->guest_phys_addr < page2->guest_phys_addr)
+ return -1;
+
+ return 0;
+}
+
/* Convert guest physical address to host physical address */
static __rte_always_inline rte_iova_t
gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
{
uint32_t i;
struct guest_page *page;
-
- for (i = 0; i < dev->nr_guest_pages; i++) {
- page = &dev->guest_pages[i];
-
- if (gpa >= page->guest_phys_addr &&
- gpa + size < page->guest_phys_addr + page->size) {
- return gpa - page->guest_phys_addr +
- page->host_phys_addr;
+ struct guest_page key;
+
+ if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
+ key.guest_phys_addr = gpa;
+ page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages,
+ sizeof(struct guest_page), guest_page_addrcmp);
+ if (page) {
+ if (gpa + size < page->guest_phys_addr + page->size)
+ return gpa - page->guest_phys_addr +
+ page->host_phys_addr;
+ }
+ } else {
+ for (i = 0; i < dev->nr_guest_pages; i++) {
+ page = &dev->guest_pages[i];
+
+ if (gpa >= page->guest_phys_addr &&
+ gpa + size < page->guest_phys_addr +
+ page->size)
+ return gpa - page->guest_phys_addr +
+ page->host_phys_addr;
}
}
struct virtio_net *dev = vhost_devices[vid];
if (unlikely(!dev)) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_CONFIG(ERR,
"(%d) device not found.\n", vid);
}
int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
-void vhost_attach_vdpa_device(int vid, int did);
+void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev);
void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
void vhost_enable_dequeue_zero_copy(int vid);
struct vhost_virtqueue *vq,
uint64_t desc_addr, uint64_t desc_len);
int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
+uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t log_addr);
void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
static __rte_always_inline uint64_t
vq->signalled_used = new;
vq->signalled_used_valid = true;
- VHOST_LOG_DEBUG(VHOST_DATA, "%s: used_event_idx=%d, old=%d, new=%d\n",
+ VHOST_LOG_DATA(DEBUG, "%s: used_event_idx=%d, old=%d, new=%d\n",
__func__,
vhost_used_event(vq),
old, new);