}
static __rte_always_inline int
-async_iter_initialize(struct vhost_async *async)
+async_iter_initialize(struct virtio_net *dev, struct vhost_async *async)
{
struct rte_vhost_iov_iter *iter;
if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
- VHOST_LOG_DATA(ERR, "no more async iovec available\n");
+ VHOST_LOG_DATA(ERR, "(%s) no more async iovec available\n", dev->ifname);
return -1;
}
}
static __rte_always_inline int
-async_iter_add_iovec(struct vhost_async *async, void *src, void *dst, size_t len)
+async_iter_add_iovec(struct virtio_net *dev, struct vhost_async *async,
+ void *src, void *dst, size_t len)
{
struct rte_vhost_iov_iter *iter;
struct rte_vhost_iovec *iovec;
static bool vhost_max_async_vec_log;
if (!vhost_max_async_vec_log) {
- VHOST_LOG_DATA(ERR, "no more async iovec available\n");
+ VHOST_LOG_DATA(ERR, "(%s) no more async iovec available\n", dev->ifname);
vhost_max_async_vec_log = true;
}
hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
buf_iova + buf_offset, cpy_len, &mapped_len);
if (unlikely(!hpa)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: failed to get hpa.\n", dev->vid, __func__);
+ VHOST_LOG_DATA(ERR, "(%s) %s: failed to get hpa.\n", dev->ifname, __func__);
return -1;
}
- if (unlikely(async_iter_add_iovec(async,
+ if (unlikely(async_iter_add_iovec(dev, async,
(void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
mbuf_offset),
hpa, (size_t)mapped_len)))
} else
hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
- VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
- dev->vid, num_buffers);
+ VHOST_LOG_DATA(DEBUG, "(%s) RX: num merge buffers %d\n",
+ dev->ifname, num_buffers);
if (unlikely(buf_len < dev->vhost_hlen)) {
buf_offset = dev->vhost_hlen - buf_len;
mbuf_offset = 0;
if (is_async) {
- if (async_iter_initialize(async))
+ if (async_iter_initialize(dev, async))
return -1;
}
pkt_len, buf_vec, &num_buffers,
avail_head, &nr_vec) < 0)) {
VHOST_LOG_DATA(DEBUG,
- "(%d) failed to get enough desc from vring\n",
- dev->vid);
+ "(%s) failed to get enough desc from vring\n",
+ dev->ifname);
vq->shadow_used_idx -= num_buffers;
break;
}
- VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
- dev->vid, vq->last_avail_idx,
+ VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
+ dev->ifname, vq->last_avail_idx,
vq->last_avail_idx + num_buffers);
if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec,
if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
&nr_descs) < 0)) {
- VHOST_LOG_DATA(DEBUG,
- "(%d) failed to get enough desc from vring\n",
- dev->vid);
+ VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n",
+ dev->ifname);
return -1;
}
- VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
- dev->vid, vq->last_avail_idx,
+ VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
+ dev->ifname, vq->last_avail_idx,
vq->last_avail_idx + nr_descs);
vq_inc_last_avail_packed(vq, nr_descs);
struct vhost_virtqueue *vq;
uint32_t nb_tx = 0;
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
return 0;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- VHOST_LOG_DATA(ERR,
- "(%d) %s: built-in vhost net backend is disabled.\n",
- dev->vid, __func__);
+ VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
+ dev->ifname, __func__);
return 0;
}
if (unlikely(reserve_avail_buf_split(dev, vq, pkt_len, buf_vec,
&num_buffers, avail_head, &nr_vec) < 0)) {
- VHOST_LOG_DATA(DEBUG, "(%d) failed to get enough desc from vring\n",
- dev->vid);
+ VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n",
+ dev->ifname);
vq->shadow_used_idx -= num_buffers;
break;
}
- VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
- dev->vid, vq->last_avail_idx, vq->last_avail_idx + num_buffers);
+ VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
+ dev->ifname, vq->last_avail_idx, vq->last_avail_idx + num_buffers);
if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers, true) < 0) {
vq->shadow_used_idx -= num_buffers;
n_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx);
if (unlikely(n_xfer < 0)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: failed to transfer data for queue id %d.\n",
+ dev->ifname, __func__, queue_id);
n_xfer = 0;
}
if (unlikely(vhost_enqueue_async_packed(dev, vq, pkt, buf_vec,
nr_descs, nr_buffers) < 0)) {
- VHOST_LOG_DATA(DEBUG, "(%d) failed to get enough desc from vring\n", dev->vid);
+ VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n", dev->ifname);
return -1;
}
- VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
- dev->vid, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
+ VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
+ dev->ifname, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
return 0;
}
n_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx);
if (unlikely(n_xfer < 0)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: failed to transfer data for queue id %d.\n",
+ dev->ifname, __func__, queue_id);
n_xfer = 0;
}
n_cpl = async->ops.check_completed_copies(dev->vid, queue_id, 0, count);
if (unlikely(n_cpl < 0)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: failed to check completed copies for queue id %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: failed to check completed copies for queue id %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
if (unlikely(!dev))
return 0;
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
vq = dev->virtqueue[queue_id];
if (unlikely(!vq->async)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for queue id %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
if (!dev)
return 0;
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
vq = dev->virtqueue[queue_id];
if (unlikely(!vq->async)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for queue id %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
struct vhost_virtqueue *vq;
uint32_t nb_tx = 0;
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
return 0;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- VHOST_LOG_DATA(ERR,
- "(%d) %s: built-in vhost net backend is disabled.\n",
- dev->vid, __func__);
+ VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
+ dev->ifname, __func__);
return 0;
}
}
static __rte_always_inline void
-vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
+vhost_dequeue_offload_legacy(struct virtio_net *dev, struct virtio_net_hdr *hdr,
+ struct rte_mbuf *m)
{
uint8_t l4_proto = 0;
struct rte_tcp_hdr *tcp_hdr = NULL;
m->l4_len = sizeof(struct rte_udp_hdr);
break;
default:
- VHOST_LOG_DATA(WARNING,
- "unsupported gso type %u.\n", hdr->gso_type);
+ VHOST_LOG_DATA(WARNING, "(%s) unsupported gso type %u.\n",
+ dev->ifname, hdr->gso_type);
goto error;
}
}
}
static __rte_always_inline void
-vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m,
- bool legacy_ol_flags)
+vhost_dequeue_offload(struct virtio_net *dev, struct virtio_net_hdr *hdr,
+ struct rte_mbuf *m, bool legacy_ol_flags)
{
struct rte_net_hdr_lens hdr_lens;
int l4_supported = 0;
return;
if (legacy_ol_flags) {
- vhost_dequeue_offload_legacy(hdr, m);
+ vhost_dequeue_offload_legacy(dev, hdr, m);
return;
}
if (mbuf_avail == 0) {
cur = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(cur == NULL)) {
- VHOST_LOG_DATA(ERR, "Failed to "
- "allocate memory for mbuf.\n");
+ VHOST_LOG_DATA(ERR, "(%s) failed to allocate memory for mbuf.\n",
+ dev->ifname);
error = -1;
goto out;
}
m->pkt_len += mbuf_offset;
if (hdr)
- vhost_dequeue_offload(hdr, m, legacy_ol_flags);
+ vhost_dequeue_offload(dev, hdr, m, legacy_ol_flags);
out:
}
static int
-virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
+virtio_dev_extbuf_alloc(struct virtio_net *dev, struct rte_mbuf *pkt, uint32_t size)
{
struct rte_mbuf_ext_shared_info *shinfo = NULL;
uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
virtio_dev_extbuf_free, buf);
if (unlikely(shinfo == NULL)) {
rte_free(buf);
- VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
+ VHOST_LOG_DATA(ERR, "(%s) failed to init shinfo\n", dev->ifname);
return -1;
}
return 0;
/* attach an external buffer if supported */
- if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
+ if (dev->extbuf && !virtio_dev_extbuf_alloc(dev, pkt, data_len))
return 0;
/* check if chained buffers are allowed */
rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
count = RTE_MIN(count, MAX_PKT_BURST);
count = RTE_MIN(count, free_entries);
- VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n",
- dev->vid, count);
+ VHOST_LOG_DATA(DEBUG, "(%s) about to dequeue %u buffers\n",
+ dev->ifname, count);
if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
return 0;
* is required. Drop this packet.
*/
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR,
- "Failed mbuf alloc of size %d from %s on %s.\n",
- buf_len, mbuf_pool->name, dev->ifname);
+ VHOST_LOG_DATA(ERR, "(%s) failed mbuf alloc of size %d from %s.\n",
+ dev->ifname, buf_len, mbuf_pool->name);
allocerr_warned = true;
}
dropped += 1;
mbuf_pool, legacy_ol_flags);
if (unlikely(err)) {
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR,
- "Failed to copy desc to mbuf on %s.\n",
+ VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
dev->ifname);
allocerr_warned = true;
}
if (virtio_net_with_host_offload(dev)) {
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
- vhost_dequeue_offload(hdr, pkts[i], legacy_ol_flags);
+ vhost_dequeue_offload(dev, hdr, pkts[i], legacy_ol_flags);
}
}
if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR,
- "Failed mbuf alloc of size %d from %s on %s.\n",
- buf_len, mbuf_pool->name, dev->ifname);
+ VHOST_LOG_DATA(ERR, "(%s) failed mbuf alloc of size %d from %s.\n",
+ dev->ifname, buf_len, mbuf_pool->name);
allocerr_warned = true;
}
return -1;
mbuf_pool, legacy_ol_flags);
if (unlikely(err)) {
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR,
- "Failed to copy desc to mbuf on %s.\n",
+ VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
dev->ifname);
allocerr_warned = true;
}
return 0;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- VHOST_LOG_DATA(ERR,
- "(%d) %s: built-in vhost net backend is disabled.\n",
- dev->vid, __func__);
+ VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
+ dev->ifname, __func__);
return 0;
}
if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR,
- "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
if (rarp_mbuf == NULL) {
- VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
+ VHOST_LOG_DATA(ERR, "(%s) failed to make RARP packet.\n", dev->ifname);
count = 0;
goto out;
}