X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtio_rxtx.c;h=e30377c510cb093410ca2c3a02375fbcb117e719;hb=3d555728c9332551efacf0b93307d475a456834c;hp=6e7ff27e8e0bf72f3a7440826696bdde9e5d319b;hpb=69c80d4ef89b82a9440628d861528cb55f0af513;p=dpdk.git diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 6e7ff27e8e..e30377c510 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -72,6 +72,15 @@ #define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ ETH_TXQ_FLAGS_NOOFFLOADS) +int +virtio_dev_rx_queue_done(void *rxq, uint16_t offset) +{ + struct virtnet_rx *rxvq = rxq; + struct virtqueue *vq = rxvq->vq; + + return VIRTQUEUE_NUSED(vq) >= offset; +} + static void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) { @@ -124,7 +133,7 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie; if (unlikely(cookie == NULL)) { - PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n", + PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u", vq->vq_used_cons_idx); break; } @@ -258,6 +267,12 @@ tx_offload_enabled(struct virtio_hw *hw) vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6); } +/* avoid write operation when necessary, to lessen cache issues */ +#define ASSIGN_UNLESS_EQUAL(var, val) do { \ + if ((var) != (val)) \ + (var) = (val); \ +} while (0) + static inline void virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, uint16_t needed, int use_indirect, int can_push) @@ -286,8 +301,14 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, hdr = (struct virtio_net_hdr *) rte_pktmbuf_prepend(cookie, head_size); /* if offload disabled, it is not zeroed below, do it now */ - if (offload == 0) - memset(hdr, 0, head_size); + if (offload == 0) { + ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); + ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0); + ASSIGN_UNLESS_EQUAL(hdr->flags, 0); + ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0); + ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0); + ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0); + } } else if (use_indirect) { /* setup tx ring slot to point to indirect * descriptor list stored in reserved region. @@ -337,9 +358,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, break; default: - hdr->csum_start = 0; - hdr->csum_offset = 0; - hdr->flags = 0; + ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); + ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0); + ASSIGN_UNLESS_EQUAL(hdr->flags, 0); break; } @@ -355,9 +376,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, cookie->l3_len + cookie->l4_len; } else { - hdr->gso_type = 0; - hdr->gso_size = 0; - hdr->hdr_len = 0; + ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0); + ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0); + ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0); } } @@ -378,154 +399,16 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, vq_update_avail_ring(vq, head_idx); } -static void -virtio_dev_vring_start(struct virtqueue *vq) -{ - int size = vq->vq_nentries; - struct vring *vr = &vq->vq_ring; - uint8_t *ring_mem = vq->vq_ring_virt_mem; - - PMD_INIT_FUNC_TRACE(); - - /* - * Reinitialise since virtio port might have been stopped and restarted - */ - memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size); - vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN); - vq->vq_used_cons_idx = 0; - vq->vq_desc_head_idx = 0; - vq->vq_avail_idx = 0; - vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); - vq->vq_free_cnt = vq->vq_nentries; - memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries); - - vring_desc_init(vr->desc, size); - - /* - * Disable device(host) interrupting guest - */ - virtqueue_disable_intr(vq); -} - void virtio_dev_cq_start(struct rte_eth_dev *dev) { struct virtio_hw *hw = dev->data->dev_private; if (hw->cvq && hw->cvq->vq) { - virtio_dev_vring_start(hw->cvq->vq); VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq); } } -void -virtio_dev_rxtx_start(struct rte_eth_dev *dev) -{ - /* - * Start receive and transmit vrings - * - Setup vring structure for all queues - * - Initialize descriptor for the rx vring - * - Allocate blank mbufs for the each rx descriptor - * - */ - uint16_t i; - uint16_t desc_idx; - struct virtio_hw *hw = dev->data->dev_private; - - PMD_INIT_FUNC_TRACE(); - - /* Start rx vring. */ - for (i = 0; i < dev->data->nb_rx_queues; i++) { - struct virtnet_rx *rxvq = dev->data->rx_queues[i]; - struct virtqueue *vq = rxvq->vq; - int error, nbufs; - struct rte_mbuf *m; - - virtio_dev_vring_start(vq); - if (rxvq->mpool == NULL) { - rte_exit(EXIT_FAILURE, - "Cannot allocate mbufs for rx virtqueue"); - } - - /* Allocate blank mbufs for the each rx descriptor */ - nbufs = 0; - error = ENOSPC; - - if (hw->use_simple_rxtx) { - for (desc_idx = 0; desc_idx < vq->vq_nentries; - desc_idx++) { - vq->vq_ring.avail->ring[desc_idx] = desc_idx; - vq->vq_ring.desc[desc_idx].flags = - VRING_DESC_F_WRITE; - } - } - - memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf)); - for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; - desc_idx++) { - vq->sw_ring[vq->vq_nentries + desc_idx] = - &rxvq->fake_mbuf; - } - - while (!virtqueue_full(vq)) { - m = rte_mbuf_raw_alloc(rxvq->mpool); - if (m == NULL) - break; - - /****************************************** - * Enqueue allocated buffers * - *******************************************/ - if (hw->use_simple_rxtx) - error = virtqueue_enqueue_recv_refill_simple(vq, m); - else - error = virtqueue_enqueue_recv_refill(vq, m); - - if (error) { - rte_pktmbuf_free(m); - break; - } - nbufs++; - } - - vq_update_avail_idx(vq); - - PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); - - VIRTQUEUE_DUMP(vq); - } - - /* Start tx vring. */ - for (i = 0; i < dev->data->nb_tx_queues; i++) { - struct virtnet_tx *txvq = dev->data->tx_queues[i]; - struct virtqueue *vq = txvq->vq; - - virtio_dev_vring_start(vq); - if (hw->use_simple_rxtx) { - uint16_t mid_idx = vq->vq_nentries >> 1; - - for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) { - vq->vq_ring.avail->ring[desc_idx] = - desc_idx + mid_idx; - vq->vq_ring.desc[desc_idx + mid_idx].next = - desc_idx; - vq->vq_ring.desc[desc_idx + mid_idx].addr = - txvq->virtio_net_hdr_mem + - offsetof(struct virtio_tx_region, tx_hdr); - vq->vq_ring.desc[desc_idx + mid_idx].len = - vq->hw->vtnet_hdr_size; - vq->vq_ring.desc[desc_idx + mid_idx].flags = - VRING_DESC_F_NEXT; - vq->vq_ring.desc[desc_idx].flags = 0; - } - for (desc_idx = mid_idx; desc_idx < vq->vq_nentries; - desc_idx++) - vq->vq_ring.avail->ring[desc_idx] = desc_idx; - } - - VIRTQUEUE_DUMP(vq); - } -} - int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -538,6 +421,9 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, struct virtio_hw *hw = dev->data->dev_private; struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; struct virtnet_rx *rxvq; + int error, nbufs; + struct rte_mbuf *m; + uint16_t desc_idx; PMD_INIT_FUNC_TRACE(); @@ -546,13 +432,61 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); rxvq = &vq->rxq; - rxvq->mpool = mp; rxvq->queue_id = queue_idx; - + rxvq->mpool = mp; + if (rxvq->mpool == NULL) { + rte_exit(EXIT_FAILURE, + "Cannot allocate mbufs for rx virtqueue"); + } dev->data->rx_queues[queue_idx] = rxvq; + + /* Allocate blank mbufs for the each rx descriptor */ + nbufs = 0; + error = ENOSPC; + + if (hw->use_simple_rxtx) { + for (desc_idx = 0; desc_idx < vq->vq_nentries; + desc_idx++) { + vq->vq_ring.avail->ring[desc_idx] = desc_idx; + vq->vq_ring.desc[desc_idx].flags = + VRING_DESC_F_WRITE; + } + } + + memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf)); + for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; + desc_idx++) { + vq->sw_ring[vq->vq_nentries + desc_idx] = + &rxvq->fake_mbuf; + } + + while (!virtqueue_full(vq)) { + m = rte_mbuf_raw_alloc(rxvq->mpool); + if (m == NULL) + break; + + /* Enqueue allocated buffers */ + if (hw->use_simple_rxtx) + error = virtqueue_enqueue_recv_refill_simple(vq, m); + else + error = virtqueue_enqueue_recv_refill(vq, m); + + if (error) { + rte_pktmbuf_free(m); + break; + } + nbufs++; + } + + vq_update_avail_idx(vq); + + PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); + virtio_rxq_vec_setup(rxvq); + VIRTQUEUE_DUMP(vq); + return 0; } @@ -600,6 +534,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; struct virtnet_tx *txvq; uint16_t tx_free_thresh; + uint16_t desc_idx; PMD_INIT_FUNC_TRACE(); @@ -628,6 +563,30 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, vq->vq_free_thresh = tx_free_thresh; + if (hw->use_simple_rxtx) { + uint16_t mid_idx = vq->vq_nentries >> 1; + + for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) { + vq->vq_ring.avail->ring[desc_idx] = + desc_idx + mid_idx; + vq->vq_ring.desc[desc_idx + mid_idx].next = + desc_idx; + vq->vq_ring.desc[desc_idx + mid_idx].addr = + txvq->virtio_net_hdr_mem + + offsetof(struct virtio_tx_region, tx_hdr); + vq->vq_ring.desc[desc_idx + mid_idx].len = + vq->hw->vtnet_hdr_size; + vq->vq_ring.desc[desc_idx + mid_idx].flags = + VRING_DESC_F_NEXT; + vq->vq_ring.desc[desc_idx].flags = 0; + } + for (desc_idx = mid_idx; desc_idx < vq->vq_nentries; + desc_idx++) + vq->vq_ring.avail->ring[desc_idx] = desc_idx; + } + + VIRTQUEUE_DUMP(vq); + dev->data->tx_queues[queue_idx] = txvq; return 0; } @@ -766,7 +725,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; - struct virtio_hw *hw; + struct virtio_hw *hw = vq->hw; struct rte_mbuf *rxm, *new_mbuf; uint16_t nb_used, num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; @@ -777,20 +736,23 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) int offload; struct virtio_net_hdr *hdr; + nb_rx = 0; + if (unlikely(hw->started == 0)) + return nb_rx; + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); - num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts); - num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ); + num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts; + if (unlikely(num > VIRTIO_MBUF_BURST_SZ)) + num = VIRTIO_MBUF_BURST_SZ; if (likely(num > DESC_PER_CACHELINE)) num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num); PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num); - hw = vq->hw; - nb_rx = 0; nb_enqueued = 0; hdr_size = hw->vtnet_hdr_size; offload = rx_offload_enabled(hw); @@ -813,8 +775,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxm->ol_flags = 0; rxm->vlan_tci = 0; - rxm->nb_segs = 1; - rxm->next = NULL; rxm->pkt_len = (uint32_t)(len[i] - hdr_size); rxm->data_len = (uint16_t)(len[i] - hdr_size); @@ -834,7 +794,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_pkts[nb_rx++] = rxm; - rxvq->stats.bytes += rx_pkts[nb_rx - 1]->pkt_len; + rxvq->stats.bytes += rxm->pkt_len; virtio_update_packet_stats(&rxvq->stats, rxm); } @@ -877,7 +837,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, { struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; - struct virtio_hw *hw; + struct virtio_hw *hw = vq->hw; struct rte_mbuf *rxm, *new_mbuf; uint16_t nb_used, num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; @@ -891,14 +851,16 @@ virtio_recv_mergeable_pkts(void *rx_queue, uint32_t hdr_size; int offload; + nb_rx = 0; + if (unlikely(hw->started == 0)) + return nb_rx; + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); PMD_RX_LOG(DEBUG, "used:%d", nb_used); - hw = vq->hw; - nb_rx = 0; i = 0; nb_enqueued = 0; seg_num = 0; @@ -941,7 +903,6 @@ virtio_recv_mergeable_pkts(void *rx_queue, rxm->data_off = RTE_PKTMBUF_HEADROOM; rxm->nb_segs = seg_num; - rxm->next = NULL; rxm->ol_flags = 0; rxm->vlan_tci = 0; rxm->pkt_len = (uint32_t)(len[0] - hdr_size); @@ -986,7 +947,6 @@ virtio_recv_mergeable_pkts(void *rx_queue, rxm = rcv_pkts[extra_idx]; rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; - rxm->next = NULL; rxm->pkt_len = (uint32_t)(len[extra_idx]); rxm->data_len = (uint16_t)(len[extra_idx]); @@ -1050,9 +1010,12 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct virtqueue *vq = txvq->vq; struct virtio_hw *hw = vq->hw; uint16_t hdr_size = hw->vtnet_hdr_size; - uint16_t nb_used, nb_tx; + uint16_t nb_used, nb_tx = 0; int error; + if (unlikely(hw->started == 0)) + return nb_tx; + if (unlikely(nb_pkts < 1)) return nb_pkts; @@ -1077,7 +1040,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } /* optimize ring usage */ - if (vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) && + if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || + vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && rte_mbuf_refcnt_read(txm) == 1 && RTE_MBUF_DIRECT(txm) && txm->nb_segs == 1 &&