X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtio_rxtx.c;h=3805e217a602953f852dec05a900d50b84c37228;hb=35b2d13fd6fdcbd191f2a30d74648faeb1186c65;hp=42d0f533c0001e1aa9298df063c8f9c601fc910b;hpb=4cdc4d98313e717df7d1dba769d71e7e4050da54;p=dpdk.git diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 42d0f533c0..3805e217a6 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -461,7 +461,7 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq, if (++vq->vq_avail_idx >= vq->vq_nentries) { vq->vq_avail_idx -= vq->vq_nentries; vq->vq_packed.cached_flags ^= - VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1); + VRING_PACKED_DESC_F_AVAIL_USED; flags = vq->vq_packed.cached_flags; } } @@ -660,8 +660,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq, if (++vq->vq_avail_idx >= vq->vq_nentries) { vq->vq_avail_idx -= vq->vq_nentries; - vq->vq_packed.cached_flags ^= - VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1); + vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED; } vq->vq_free_cnt--; @@ -728,7 +727,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, if (idx >= vq->vq_nentries) { idx -= vq->vq_nentries; vq->vq_packed.cached_flags ^= - VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1); + VRING_PACKED_DESC_F_AVAIL_USED; } } @@ -749,7 +748,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, if (idx >= vq->vq_nentries) { idx -= vq->vq_nentries; vq->vq_packed.cached_flags ^= - VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1); + VRING_PACKED_DESC_F_AVAIL_USED; } } while ((cookie = cookie->next) != NULL); @@ -1093,7 +1092,7 @@ static inline void virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf) { uint32_t s = mbuf->pkt_len; - struct ether_addr *ea; + struct rte_ether_addr *ea; stats->bytes += s; @@ -1110,13 +1109,13 @@ virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf) stats->size_bins[0]++; else if (s < 1519) stats->size_bins[6]++; - else if (s >= 1519) + else stats->size_bins[7]++; } - ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *); - if (is_multicast_ether_addr(ea)) { - if (is_broadcast_ether_addr(ea)) + ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *); + if (rte_is_multicast_ether_addr(ea)) { + if (rte_is_broadcast_ether_addr(ea)) stats->broadcast++; else stats->multicast++; @@ -1211,7 +1210,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw = vq->hw; - struct rte_mbuf *rxm, *new_mbuf; + struct rte_mbuf *rxm; uint16_t nb_used, num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; @@ -1245,7 +1244,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); - if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { + if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { PMD_RX_LOG(ERR, "Packet drop"); nb_enqueued++; virtio_discard_rxbuf(vq, rxm); @@ -1281,20 +1280,24 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxvq->stats.packets += nb_rx; /* Allocate new mbuf for the used descriptor */ - while (likely(!virtqueue_full(vq))) { - new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool); - if (unlikely(new_mbuf == NULL)) { - struct rte_eth_dev *dev - = &rte_eth_devices[rxvq->port_id]; - dev->data->rx_mbuf_alloc_failed++; - break; - } - error = virtqueue_enqueue_recv_refill(vq, &new_mbuf, 1); - if (unlikely(error)) { - rte_pktmbuf_free(new_mbuf); - break; + if (likely(!virtqueue_full(vq))) { + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *new_pkts[free_cnt]; + + if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, + free_cnt) == 0)) { + error = virtqueue_enqueue_recv_refill(vq, new_pkts, + free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + nb_enqueued += free_cnt; + } else { + struct rte_eth_dev *dev = + &rte_eth_devices[rxvq->port_id]; + dev->data->rx_mbuf_alloc_failed += free_cnt; } - nb_enqueued++; } if (likely(nb_enqueued)) { @@ -1316,7 +1319,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw = vq->hw; - struct rte_mbuf *rxm, *new_mbuf; + struct rte_mbuf *rxm; uint16_t num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; @@ -1344,7 +1347,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); - if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { + if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { PMD_RX_LOG(ERR, "Packet drop"); nb_enqueued++; virtio_discard_rxbuf(vq, rxm); @@ -1380,20 +1383,24 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, rxvq->stats.packets += nb_rx; /* Allocate new mbuf for the used descriptor */ - while (likely(!virtqueue_full(vq))) { - new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool); - if (unlikely(new_mbuf == NULL)) { + if (likely(!virtqueue_full(vq))) { + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *new_pkts[free_cnt]; + + if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, + free_cnt) == 0)) { + error = virtqueue_enqueue_recv_refill_packed(vq, + new_pkts, free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + nb_enqueued += free_cnt; + } else { struct rte_eth_dev *dev = &rte_eth_devices[rxvq->port_id]; - dev->data->rx_mbuf_alloc_failed++; - break; - } - error = virtqueue_enqueue_recv_refill_packed(vq, &new_mbuf, 1); - if (unlikely(error)) { - rte_pktmbuf_free(new_mbuf); - break; + dev->data->rx_mbuf_alloc_failed += free_cnt; } - nb_enqueued++; } if (likely(nb_enqueued)) { @@ -1454,7 +1461,7 @@ virtio_recv_pkts_inorder(void *rx_queue, rxm = rcv_pkts[i]; - if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { + if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { PMD_RX_LOG(ERR, "Packet drop"); nb_enqueued++; virtio_discard_rxbuf_inorder(vq, rxm); @@ -1646,7 +1653,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, rxm = rcv_pkts[i]; - if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { + if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { PMD_RX_LOG(ERR, "Packet drop"); nb_enqueued++; virtio_discard_rxbuf(vq, rxm); @@ -1825,7 +1832,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue, rxm = rcv_pkts[i]; - if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { + if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { PMD_RX_LOG(ERR, "Packet drop"); nb_enqueued++; virtio_discard_rxbuf(vq, rxm); @@ -1996,6 +2003,8 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, rte_pktmbuf_free(txm); continue; } + /* vlan_insert may add a header mbuf */ + tx_pkts[nb_tx] = txm; } /* optimize ring usage */ @@ -2083,6 +2092,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) rte_pktmbuf_free(txm); continue; } + /* vlan_insert may add a header mbuf */ + tx_pkts[nb_tx] = txm; } /* optimize ring usage */ @@ -2186,6 +2197,8 @@ virtio_xmit_pkts_inorder(void *tx_queue, rte_pktmbuf_free(txm); continue; } + /* vlan_insert may add a header mbuf */ + tx_pkts[nb_tx] = txm; } /* optimize ring usage */