return 0;
}
-static int
+static inline void
virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie,
uint16_t needed, int use_indirect, int can_push)
{
uint16_t head_size = txvq->hw->vtnet_hdr_size;
unsigned long offs;
- if (unlikely(txvq->vq_free_cnt == 0))
- return -ENOSPC;
- if (unlikely(txvq->vq_free_cnt < needed))
- return -EMSGSIZE;
head_idx = txvq->vq_desc_head_idx;
- if (unlikely(head_idx >= txvq->vq_nentries))
- return -EFAULT;
-
idx = head_idx;
dxp = &txvq->vq_descx[idx];
dxp->cookie = (void *)cookie;
idx = start_dp[idx].next;
} while ((cookie = cookie->next) != NULL);
- start_dp[idx].flags &= ~VRING_DESC_F_NEXT;
-
if (use_indirect)
idx = txvq->vq_ring.desc[head_idx].next;
txvq->vq_desc_tail_idx = idx;
txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
vq_update_avail_ring(txvq, head_idx);
-
- return 0;
-}
-
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
-
- return m;
}
static void
vq->sw_ring[vq->vq_nentries + i] = &vq->fake_mbuf;
while (!virtqueue_full(vq)) {
- m = rte_rxmbuf_alloc(vq->mpool);
+ m = rte_mbuf_raw_alloc(vq->mpool);
if (m == NULL)
break;
vq->vq_ring.desc[i + mid_idx].next = i;
vq->vq_ring.desc[i + mid_idx].addr =
vq->virtio_net_hdr_mem +
- i * vq->hw->vtnet_hdr_size;
+ offsetof(struct virtio_tx_region, tx_hdr);
vq->vq_ring.desc[i + mid_idx].len =
vq->hw->vtnet_hdr_size;
vq->vq_ring.desc[i + mid_idx].flags =
if (likely(num > DESC_PER_CACHELINE))
num = num - ((rxvq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
- if (num == 0)
- return 0;
-
num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num);
PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
/* Allocate new mbuf for the used descriptor */
error = ENOSPC;
while (likely(!virtqueue_full(rxvq))) {
- new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
+ new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
if (unlikely(new_mbuf == NULL)) {
struct rte_eth_dev *dev
= &rte_eth_devices[rxvq->port_id];
if (unlikely(virtqueue_kick_prepare(rxvq))) {
virtqueue_notify(rxvq);
- PMD_RX_LOG(DEBUG, "Notified\n");
+ PMD_RX_LOG(DEBUG, "Notified");
}
}
virtio_rmb();
- if (nb_used == 0)
- return 0;
-
- PMD_RX_LOG(DEBUG, "used:%d\n", nb_used);
+ PMD_RX_LOG(DEBUG, "used:%d", nb_used);
hw = rxvq->hw;
nb_rx = 0;
i++;
- PMD_RX_LOG(DEBUG, "dequeue:%d\n", num);
- PMD_RX_LOG(DEBUG, "packet len:%d\n", len[0]);
+ PMD_RX_LOG(DEBUG, "dequeue:%d", num);
+ PMD_RX_LOG(DEBUG, "packet len:%d", len[0]);
rxm = rcv_pkts[0];
if (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) {
- PMD_RX_LOG(ERR, "Packet drop\n");
+ PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(rxvq, rxm);
rxvq->errors++;
rcv_cnt = rx_num;
} else {
PMD_RX_LOG(ERR,
- "No enough segments for packet.\n");
+ "No enough segments for packet.");
nb_enqueued++;
virtio_discard_rxbuf(rxvq, rxm);
rxvq->errors++;
/* Allocate new mbuf for the used descriptor */
error = ENOSPC;
while (likely(!virtqueue_full(rxvq))) {
- new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
+ new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
if (unlikely(new_mbuf == NULL)) {
struct rte_eth_dev *dev
= &rte_eth_devices[rxvq->port_id];
}
/* Enqueue Packet buffers */
- error = virtqueue_enqueue_xmit(txvq, txm, slots,
- use_indirect, can_push);
- if (unlikely(error)) {
- if (error == ENOSPC)
- PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0");
- else if (error == EMSGSIZE)
- PMD_TX_LOG(ERR, "virtqueue_enqueue Free count < 1");
- else
- PMD_TX_LOG(ERR, "virtqueue_enqueue error: %d", error);
- break;
- }
+ virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, can_push);
txvq->bytes += txm->pkt_len;
virtio_update_packet_stats(txvq, txm);