X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtio_ethdev.c;h=aff791fbd0c06ed220f66bd7ebeffb6f84d9e63d;hb=63abf8d292252193db075e3264cceb3f6817ed79;hp=061d02f666a202e92f7981b8dff3d851bb9a8c40;hpb=5be2325ecec83ab9dd5e25dde6112aed56f3e4f6;p=dpdk.git diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 061d02f666..aff791fbd0 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -213,7 +213,7 @@ virtio_send_command_packed(struct virtnet_ctl *cvq, "vq->vq_avail_idx=%d\n" "vq->vq_used_cons_idx=%d\n" "vq->vq_packed.cached_flags=0x%x\n" - "vq->vq_packed.used_wrap_counter=%d\n", + "vq->vq_packed.used_wrap_counter=%d", vq->vq_free_cnt, vq->vq_avail_idx, vq->vq_used_cons_idx, @@ -370,12 +370,6 @@ virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues) return 0; } -static void -virtio_dev_queue_release(void *queue __rte_unused) -{ - /* do nothing */ -} - static uint16_t virtio_get_nr_vq(struct virtio_hw *hw) { @@ -515,12 +509,14 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx) memset(mz->addr, 0, mz->len); - vq->vq_ring_mem = mz->iova; + if (hw->use_va) + vq->vq_ring_mem = (uintptr_t)mz->addr; + else + vq->vq_ring_mem = mz->iova; + vq->vq_ring_virt_mem = mz->addr; - PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, - (uint64_t)mz->iova); - PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64, - (uint64_t)(uintptr_t)mz->addr); + PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, vq->vq_ring_mem); + PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: %p", vq->vq_ring_virt_mem); virtio_init_vring(vq); @@ -570,17 +566,28 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx) txvq->port_id = dev->data->port_id; txvq->mz = mz; txvq->virtio_net_hdr_mz = hdr_mz; - txvq->virtio_net_hdr_mem = hdr_mz->iova; + if (hw->use_va) + txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr; + else + txvq->virtio_net_hdr_mem = hdr_mz->iova; } else if (queue_type == VTNET_CQ) { cvq = &vq->cq; cvq->mz = mz; cvq->virtio_net_hdr_mz = hdr_mz; - cvq->virtio_net_hdr_mem = hdr_mz->iova; + if (hw->use_va) + cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr; + else + cvq->virtio_net_hdr_mem = hdr_mz->iova; memset(cvq->virtio_net_hdr_mz->addr, 0, rte_mem_page_size()); hw->cvq = cvq; } + if (hw->use_va) + vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_addr); + else + vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova); + if (queue_type == VTNET_TQ) { struct virtio_tx_region *txr; unsigned int i; @@ -632,6 +639,7 @@ free_mz: rte_memzone_free(mz); free_vq: rte_free(vq); + hw->vqs[queue_idx] = NULL; return ret; } @@ -967,9 +975,7 @@ static const struct eth_dev_ops virtio_eth_dev_ops = { .rx_queue_setup = virtio_dev_rx_queue_setup, .rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable, .rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable, - .rx_queue_release = virtio_dev_queue_release, .tx_queue_setup = virtio_dev_tx_queue_setup, - .tx_queue_release = virtio_dev_queue_release, /* collect stats per queue */ .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set, .vlan_filter_set = virtio_vlan_filter_set, @@ -1893,7 +1899,6 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) } eth_dev->dev_ops = &virtio_eth_dev_ops; - eth_dev->rx_descriptor_done = virtio_dev_rx_queue_done; if (rte_eal_process_type() == RTE_PROC_SECONDARY) { set_rxtx_funcs(eth_dev); @@ -2102,10 +2107,14 @@ virtio_dev_configure(struct rte_eth_dev *dev) return ret; } - if (rxmode->max_rx_pkt_len > hw->max_mtu + ether_hdr_len) + if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) && + (rxmode->max_rx_pkt_len > hw->max_mtu + ether_hdr_len)) req_features &= ~(1ULL << VIRTIO_NET_F_MTU); - hw->max_rx_pkt_len = rxmode->max_rx_pkt_len; + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + hw->max_rx_pkt_len = rxmode->max_rx_pkt_len; + else + hw->max_rx_pkt_len = ether_hdr_len + dev->data->mtu; if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM)) @@ -2396,6 +2405,35 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev) PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num); } +static void +virtio_tx_completed_cleanup(struct rte_eth_dev *dev) +{ + struct virtio_hw *hw = dev->data->dev_private; + struct virtqueue *vq; + int qidx; + void (*xmit_cleanup)(struct virtqueue *vq, uint16_t nb_used); + + if (virtio_with_packed_queue(hw)) { + if (hw->use_vec_tx) + xmit_cleanup = &virtio_xmit_cleanup_inorder_packed; + else if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER)) + xmit_cleanup = &virtio_xmit_cleanup_inorder_packed; + else + xmit_cleanup = &virtio_xmit_cleanup_normal_packed; + } else { + if (hw->use_inorder_tx) + xmit_cleanup = &virtio_xmit_cleanup_inorder; + else + xmit_cleanup = &virtio_xmit_cleanup; + } + + for (qidx = 0; qidx < hw->max_queue_pairs; qidx++) { + vq = hw->vqs[2 * qidx + VTNET_SQ_TQ_QUEUE_IDX]; + if (vq != NULL) + xmit_cleanup(vq, virtqueue_nused(vq)); + } +} + /* * Stop device: disable interrupt and mark link down */ @@ -2414,6 +2452,8 @@ virtio_dev_stop(struct rte_eth_dev *dev) goto out_unlock; hw->started = 0; + virtio_tx_completed_cleanup(dev); + if (intr_conf->lsc || intr_conf->rxq) { virtio_intr_disable(dev); @@ -2546,6 +2586,7 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) * The Queue Size value does not have to be a power of 2. */ dev_info->rx_desc_lim.nb_max = UINT16_MAX; + dev_info->tx_desc_lim.nb_max = UINT16_MAX; } else { /* * According to 2.6 Split Virtqueues: @@ -2553,6 +2594,7 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) * Size value is 32768. */ dev_info->rx_desc_lim.nb_max = 32768; + dev_info->tx_desc_lim.nb_max = 32768; } /* * Actual minimum is not the same for virtqueues of different kinds, @@ -2561,7 +2603,9 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) */ dev_info->rx_desc_lim.nb_min = RTE_MAX(DEFAULT_RX_FREE_THRESH, RTE_VIRTIO_VPMD_RX_REARM_THRESH); + dev_info->tx_desc_lim.nb_min = DEFAULT_TX_FREE_THRESH; dev_info->rx_desc_lim.nb_align = 1; + dev_info->tx_desc_lim.nb_align = 1; return 0; }