X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtio_rxtx.c;h=1de28540cd0f701b5c67343519589efdb682e9ef;hb=e73e3547ce54d7ae48dff82d87efac0b7a30692a;hp=42d0f533c0001e1aa9298df063c8f9c601fc910b;hpb=4cdc4d98313e717df7d1dba769d71e7e4050da54;p=dpdk.git diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 42d0f533c0..1de28540cd 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -461,7 +461,7 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq, if (++vq->vq_avail_idx >= vq->vq_nentries) { vq->vq_avail_idx -= vq->vq_nentries; vq->vq_packed.cached_flags ^= - VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1); + VRING_PACKED_DESC_F_AVAIL_USED; flags = vq->vq_packed.cached_flags; } } @@ -479,13 +479,14 @@ virtio_tso_fix_cksum(struct rte_mbuf *m) /* common case: header is not fragmented */ if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len + m->l4_len)) { - struct ipv4_hdr *iph; - struct ipv6_hdr *ip6h; - struct tcp_hdr *th; + struct rte_ipv4_hdr *iph; + struct rte_ipv6_hdr *ip6h; + struct rte_tcp_hdr *th; uint16_t prev_cksum, new_cksum, ip_len, ip_paylen; uint32_t tmp; - iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len); + iph = rte_pktmbuf_mtod_offset(m, + struct rte_ipv4_hdr *, m->l2_len); th = RTE_PTR_ADD(iph, m->l3_len); if ((iph->version_ihl >> 4) == 4) { iph->hdr_checksum = 0; @@ -494,7 +495,7 @@ virtio_tso_fix_cksum(struct rte_mbuf *m) ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) - m->l3_len); } else { - ip6h = (struct ipv6_hdr *)iph; + ip6h = (struct rte_ipv6_hdr *)iph; ip_paylen = ip6h->payload_len; } @@ -538,14 +539,14 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr, switch (cookie->ol_flags & PKT_TX_L4_MASK) { case PKT_TX_UDP_CKSUM: hdr->csum_start = cookie->l2_len + cookie->l3_len; - hdr->csum_offset = offsetof(struct udp_hdr, + hdr->csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum); hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; break; case PKT_TX_TCP_CKSUM: hdr->csum_start = cookie->l2_len + cookie->l3_len; - hdr->csum_offset = offsetof(struct tcp_hdr, cksum); + hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum); hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; break; @@ -660,8 +661,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq, if (++vq->vq_avail_idx >= vq->vq_nentries) { vq->vq_avail_idx -= vq->vq_nentries; - vq->vq_packed.cached_flags ^= - VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1); + vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED; } vq->vq_free_cnt--; @@ -728,7 +728,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, if (idx >= vq->vq_nentries) { idx -= vq->vq_nentries; vq->vq_packed.cached_flags ^= - VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1); + VRING_PACKED_DESC_F_AVAIL_USED; } } @@ -749,7 +749,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, if (idx >= vq->vq_nentries) { idx -= vq->vq_nentries; vq->vq_packed.cached_flags ^= - VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1); + VRING_PACKED_DESC_F_AVAIL_USED; } } while ((cookie = cookie->next) != NULL); @@ -1093,7 +1093,7 @@ static inline void virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf) { uint32_t s = mbuf->pkt_len; - struct ether_addr *ea; + struct rte_ether_addr *ea; stats->bytes += s; @@ -1110,13 +1110,13 @@ virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf) stats->size_bins[0]++; else if (s < 1519) stats->size_bins[6]++; - else if (s >= 1519) + else stats->size_bins[7]++; } - ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *); - if (is_multicast_ether_addr(ea)) { - if (is_broadcast_ether_addr(ea)) + ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *); + if (rte_is_multicast_ether_addr(ea)) { + if (rte_is_broadcast_ether_addr(ea)) stats->broadcast++; else stats->multicast++; @@ -1211,7 +1211,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw = vq->hw; - struct rte_mbuf *rxm, *new_mbuf; + struct rte_mbuf *rxm; uint16_t nb_used, num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; @@ -1245,7 +1245,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); - if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { + if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { PMD_RX_LOG(ERR, "Packet drop"); nb_enqueued++; virtio_discard_rxbuf(vq, rxm); @@ -1281,20 +1281,24 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxvq->stats.packets += nb_rx; /* Allocate new mbuf for the used descriptor */ - while (likely(!virtqueue_full(vq))) { - new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool); - if (unlikely(new_mbuf == NULL)) { - struct rte_eth_dev *dev - = &rte_eth_devices[rxvq->port_id]; - dev->data->rx_mbuf_alloc_failed++; - break; - } - error = virtqueue_enqueue_recv_refill(vq, &new_mbuf, 1); - if (unlikely(error)) { - rte_pktmbuf_free(new_mbuf); - break; + if (likely(!virtqueue_full(vq))) { + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *new_pkts[free_cnt]; + + if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, + free_cnt) == 0)) { + error = virtqueue_enqueue_recv_refill(vq, new_pkts, + free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + nb_enqueued += free_cnt; + } else { + struct rte_eth_dev *dev = + &rte_eth_devices[rxvq->port_id]; + dev->data->rx_mbuf_alloc_failed += free_cnt; } - nb_enqueued++; } if (likely(nb_enqueued)) { @@ -1316,7 +1320,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw = vq->hw; - struct rte_mbuf *rxm, *new_mbuf; + struct rte_mbuf *rxm; uint16_t num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; @@ -1344,7 +1348,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); - if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { + if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { PMD_RX_LOG(ERR, "Packet drop"); nb_enqueued++; virtio_discard_rxbuf(vq, rxm); @@ -1380,20 +1384,24 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, rxvq->stats.packets += nb_rx; /* Allocate new mbuf for the used descriptor */ - while (likely(!virtqueue_full(vq))) { - new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool); - if (unlikely(new_mbuf == NULL)) { + if (likely(!virtqueue_full(vq))) { + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *new_pkts[free_cnt]; + + if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, + free_cnt) == 0)) { + error = virtqueue_enqueue_recv_refill_packed(vq, + new_pkts, free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + nb_enqueued += free_cnt; + } else { struct rte_eth_dev *dev = &rte_eth_devices[rxvq->port_id]; - dev->data->rx_mbuf_alloc_failed++; - break; - } - error = virtqueue_enqueue_recv_refill_packed(vq, &new_mbuf, 1); - if (unlikely(error)) { - rte_pktmbuf_free(new_mbuf); - break; + dev->data->rx_mbuf_alloc_failed += free_cnt; } - nb_enqueued++; } if (likely(nb_enqueued)) { @@ -1454,7 +1462,7 @@ virtio_recv_pkts_inorder(void *rx_queue, rxm = rcv_pkts[i]; - if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { + if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { PMD_RX_LOG(ERR, "Packet drop"); nb_enqueued++; virtio_discard_rxbuf_inorder(vq, rxm); @@ -1646,7 +1654,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, rxm = rcv_pkts[i]; - if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { + if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { PMD_RX_LOG(ERR, "Packet drop"); nb_enqueued++; virtio_discard_rxbuf(vq, rxm); @@ -1825,7 +1833,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue, rxm = rcv_pkts[i]; - if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { + if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { PMD_RX_LOG(ERR, "Packet drop"); nb_enqueued++; virtio_discard_rxbuf(vq, rxm); @@ -1996,6 +2004,8 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, rte_pktmbuf_free(txm); continue; } + /* vlan_insert may add a header mbuf */ + tx_pkts[nb_tx] = txm; } /* optimize ring usage */ @@ -2083,6 +2093,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) rte_pktmbuf_free(txm); continue; } + /* vlan_insert may add a header mbuf */ + tx_pkts[nb_tx] = txm; } /* optimize ring usage */ @@ -2186,6 +2198,8 @@ virtio_xmit_pkts_inorder(void *tx_queue, rte_pktmbuf_free(txm); continue; } + /* vlan_insert may add a header mbuf */ + tx_pkts[nb_tx] = txm; } /* optimize ring usage */