if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
vq->vq_packed.cached_flags ^=
- VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
+ VRING_PACKED_DESC_F_AVAIL_USED;
flags = vq->vq_packed.cached_flags;
}
}
/* common case: header is not fragmented */
if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
m->l4_len)) {
- struct ipv4_hdr *iph;
- struct ipv6_hdr *ip6h;
- struct tcp_hdr *th;
+ struct rte_ipv4_hdr *iph;
+ struct rte_ipv6_hdr *ip6h;
+ struct rte_tcp_hdr *th;
uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
uint32_t tmp;
- iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
+ iph = rte_pktmbuf_mtod_offset(m,
+ struct rte_ipv4_hdr *, m->l2_len);
th = RTE_PTR_ADD(iph, m->l3_len);
if ((iph->version_ihl >> 4) == 4) {
iph->hdr_checksum = 0;
ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
m->l3_len);
} else {
- ip6h = (struct ipv6_hdr *)iph;
+ ip6h = (struct rte_ipv6_hdr *)iph;
ip_paylen = ip6h->payload_len;
}
switch (cookie->ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_UDP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct udp_hdr,
+ hdr->csum_offset = offsetof(struct rte_udp_hdr,
dgram_cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
case PKT_TX_TCP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
+ hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
- vq->vq_packed.cached_flags ^=
- VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
+ vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
}
vq->vq_free_cnt--;
if (idx >= vq->vq_nentries) {
idx -= vq->vq_nentries;
vq->vq_packed.cached_flags ^=
- VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
+ VRING_PACKED_DESC_F_AVAIL_USED;
}
}
if (idx >= vq->vq_nentries) {
idx -= vq->vq_nentries;
vq->vq_packed.cached_flags ^=
- VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
+ VRING_PACKED_DESC_F_AVAIL_USED;
}
} while ((cookie = cookie->next) != NULL);
virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
{
uint32_t s = mbuf->pkt_len;
- struct ether_addr *ea;
+ struct rte_ether_addr *ea;
stats->bytes += s;
stats->size_bins[0]++;
else if (s < 1519)
stats->size_bins[6]++;
- else if (s >= 1519)
+ else
stats->size_bins[7]++;
}
- ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
- if (is_multicast_ether_addr(ea)) {
- if (is_broadcast_ether_addr(ea))
+ ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
+ if (rte_is_multicast_ether_addr(ea)) {
+ if (rte_is_broadcast_ether_addr(ea))
stats->broadcast++;
else
stats->multicast++;
struct virtnet_rx *rxvq = rx_queue;
struct virtqueue *vq = rxvq->vq;
struct virtio_hw *hw = vq->hw;
- struct rte_mbuf *rxm, *new_mbuf;
+ struct rte_mbuf *rxm;
uint16_t nb_used, num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
rxvq->stats.packets += nb_rx;
/* Allocate new mbuf for the used descriptor */
- while (likely(!virtqueue_full(vq))) {
- new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
- if (unlikely(new_mbuf == NULL)) {
- struct rte_eth_dev *dev
- = &rte_eth_devices[rxvq->port_id];
- dev->data->rx_mbuf_alloc_failed++;
- break;
- }
- error = virtqueue_enqueue_recv_refill(vq, &new_mbuf, 1);
- if (unlikely(error)) {
- rte_pktmbuf_free(new_mbuf);
- break;
+ if (likely(!virtqueue_full(vq))) {
+ uint16_t free_cnt = vq->vq_free_cnt;
+ struct rte_mbuf *new_pkts[free_cnt];
+
+ if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
+ free_cnt) == 0)) {
+ error = virtqueue_enqueue_recv_refill(vq, new_pkts,
+ free_cnt);
+ if (unlikely(error)) {
+ for (i = 0; i < free_cnt; i++)
+ rte_pktmbuf_free(new_pkts[i]);
+ }
+ nb_enqueued += free_cnt;
+ } else {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed += free_cnt;
}
- nb_enqueued++;
}
if (likely(nb_enqueued)) {
struct virtnet_rx *rxvq = rx_queue;
struct virtqueue *vq = rxvq->vq;
struct virtio_hw *hw = vq->hw;
- struct rte_mbuf *rxm, *new_mbuf;
+ struct rte_mbuf *rxm;
uint16_t num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
rxvq->stats.packets += nb_rx;
/* Allocate new mbuf for the used descriptor */
- while (likely(!virtqueue_full(vq))) {
- new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
- if (unlikely(new_mbuf == NULL)) {
+ if (likely(!virtqueue_full(vq))) {
+ uint16_t free_cnt = vq->vq_free_cnt;
+ struct rte_mbuf *new_pkts[free_cnt];
+
+ if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
+ free_cnt) == 0)) {
+ error = virtqueue_enqueue_recv_refill_packed(vq,
+ new_pkts, free_cnt);
+ if (unlikely(error)) {
+ for (i = 0; i < free_cnt; i++)
+ rte_pktmbuf_free(new_pkts[i]);
+ }
+ nb_enqueued += free_cnt;
+ } else {
struct rte_eth_dev *dev =
&rte_eth_devices[rxvq->port_id];
- dev->data->rx_mbuf_alloc_failed++;
- break;
- }
- error = virtqueue_enqueue_recv_refill_packed(vq, &new_mbuf, 1);
- if (unlikely(error)) {
- rte_pktmbuf_free(new_mbuf);
- break;
+ dev->data->rx_mbuf_alloc_failed += free_cnt;
}
- nb_enqueued++;
}
if (likely(nb_enqueued)) {
rxm = rcv_pkts[i];
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf_inorder(vq, rxm);
rxm = rcv_pkts[i];
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
rxm = rcv_pkts[i];
- if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
virtio_discard_rxbuf(vq, rxm);
rte_pktmbuf_free(txm);
continue;
}
+ /* vlan_insert may add a header mbuf */
+ tx_pkts[nb_tx] = txm;
}
/* optimize ring usage */
rte_pktmbuf_free(txm);
continue;
}
+ /* vlan_insert may add a header mbuf */
+ tx_pkts[nb_tx] = txm;
}
/* optimize ring usage */
rte_pktmbuf_free(txm);
continue;
}
+ /* vlan_insert may add a header mbuf */
+ tx_pkts[nb_tx] = txm;
}
/* optimize ring usage */