{
uint16_t i;
uint16_t free_entries;
+ uint16_t dropped = 0;
+ static bool allocerr_warned;
if (unlikely(dev->dequeue_zero_copy)) {
struct zcopy_mbuf *zmbuf, *next;
update_shadow_used_ring_split(vq, head_idx, 0);
pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
- if (unlikely(pkts[i] == NULL))
+ if (unlikely(pkts[i] == NULL)) {
+ /*
+ * mbuf allocation fails for jumbo packets when external
+ * buffer allocation is not allowed and linear buffer
+ * is required. Drop this packet.
+ */
+ if (!allocerr_warned) {
+ VHOST_LOG_DATA(ERR,
+ "Failed mbuf alloc of size %d from %s on %s.\n",
+ buf_len, mbuf_pool->name, dev->ifname);
+ allocerr_warned = true;
+ }
+ dropped += 1;
+ i++;
break;
+ }
err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
mbuf_pool);
if (unlikely(err)) {
rte_pktmbuf_free(pkts[i]);
+ if (!allocerr_warned) {
+ VHOST_LOG_DATA(ERR,
+ "Failed to copy desc to mbuf on %s.\n",
+ dev->ifname);
+ allocerr_warned = true;
+ }
+ dropped += 1;
+ i++;
break;
}
zmbuf = get_zmbuf(vq);
if (!zmbuf) {
rte_pktmbuf_free(pkts[i]);
+ dropped += 1;
+ i++;
break;
}
zmbuf->mbuf = pkts[i];
}
}
- return i;
+ return (i - dropped);
}
static __rte_always_inline int
uint32_t buf_len;
uint16_t nr_vec = 0;
int err;
+ static bool allocerr_warned;
if (unlikely(fill_vec_buf_packed(dev, vq,
vq->last_avail_idx, desc_count,
*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
if (unlikely(*pkts == NULL)) {
- VHOST_LOG_DATA(ERR,
- "Failed to allocate memory for mbuf.\n");
+ if (!allocerr_warned) {
+ VHOST_LOG_DATA(ERR,
+ "Failed mbuf alloc of size %d from %s on %s.\n",
+ buf_len, mbuf_pool->name, dev->ifname);
+ allocerr_warned = true;
+ }
return -1;
}
err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
mbuf_pool);
if (unlikely(err)) {
+ if (!allocerr_warned) {
+ VHOST_LOG_DATA(ERR,
+ "Failed to copy desc to mbuf on %s.\n",
+ dev->ifname);
+ allocerr_warned = true;
+ }
rte_pktmbuf_free(*pkts);
return -1;
}
struct rte_mbuf **pkts)
{
- uint16_t buf_id, desc_count;
+ uint16_t buf_id, desc_count = 0;
+ int ret;
- if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
- &desc_count))
- return -1;
+ ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
+ &desc_count);
- if (virtio_net_is_inorder(dev))
- vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
- desc_count);
- else
- vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
+ if (likely(desc_count > 0)) {
+ if (virtio_net_is_inorder(dev))
+ vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
+ desc_count);
+ else
+ vhost_shadow_dequeue_single_packed(vq, buf_id,
+ desc_count);
- vq_inc_last_avail_packed(vq, desc_count);
+ vq_inc_last_avail_packed(vq, desc_count);
+ }
- return 0;
+ return ret;
}
static __rte_always_inline int