X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fvirtio%2Fvirtio_ethdev.c;h=d25c08f0a2d534b663ab905e3dc64a7b4ca16eb6;hb=4e968cacb873d8497e95075b749295297745f92d;hp=d0c65e3752f383166a81bf99c779695363dadb0a;hpb=ec194c2f189525b2fb4be5604422a28ea5f08acd;p=dpdk.git diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index d0c65e3752..d25c08f0a2 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -71,7 +71,6 @@ static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); static int virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr); -static int virtio_intr_enable(struct rte_eth_dev *dev); static int virtio_intr_disable(struct rte_eth_dev *dev); static int virtio_dev_queue_stats_mapping_set( @@ -142,16 +141,17 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = { struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS]; static struct virtio_pmd_ctrl * -virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, - int *dlen, int pkt_num) +virtio_send_command_packed(struct virtnet_ctl *cvq, + struct virtio_pmd_ctrl *ctrl, + int *dlen, int pkt_num) { struct virtqueue *vq = cvq->vq; int head; - struct vring_packed_desc *desc = vq->ring_packed.desc_packed; + struct vring_packed_desc *desc = vq->vq_packed.ring.desc; struct virtio_pmd_ctrl *result; - int wrap_counter; uint16_t flags; int sum = 0; + int nb_descs = 0; int k; /* @@ -161,14 +161,14 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, * One RX packet for ACK. */ head = vq->vq_avail_idx; - wrap_counter = vq->avail_wrap_counter; - desc[head].flags = VRING_DESC_F_NEXT; + flags = vq->vq_packed.cached_flags; desc[head].addr = cvq->virtio_net_hdr_mem; desc[head].len = sizeof(struct virtio_net_ctrl_hdr); vq->vq_free_cnt--; + nb_descs++; if (++vq->vq_avail_idx >= vq->vq_nentries) { vq->vq_avail_idx -= vq->vq_nentries; - vq->avail_wrap_counter ^= 1; + vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED; } for (k = 0; k < pkt_num; k++) { @@ -176,129 +176,107 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, + sizeof(struct virtio_net_ctrl_hdr) + sizeof(ctrl->status) + sizeof(uint8_t) * sum; desc[vq->vq_avail_idx].len = dlen[k]; - flags = VRING_DESC_F_NEXT; + desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT | + vq->vq_packed.cached_flags; sum += dlen[k]; vq->vq_free_cnt--; - flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) | - VRING_DESC_F_USED(!vq->avail_wrap_counter); - desc[vq->vq_avail_idx].flags = flags; - rte_smp_wmb(); - vq->vq_free_cnt--; + nb_descs++; if (++vq->vq_avail_idx >= vq->vq_nentries) { vq->vq_avail_idx -= vq->vq_nentries; - vq->avail_wrap_counter ^= 1; + vq->vq_packed.cached_flags ^= + VRING_PACKED_DESC_F_AVAIL_USED; } } - desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem + sizeof(struct virtio_net_ctrl_hdr); desc[vq->vq_avail_idx].len = sizeof(ctrl->status); - flags = VRING_DESC_F_WRITE; - flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) | - VRING_DESC_F_USED(!vq->avail_wrap_counter); - desc[vq->vq_avail_idx].flags = flags; - flags = VRING_DESC_F_NEXT; - flags |= VRING_DESC_F_AVAIL(wrap_counter) | - VRING_DESC_F_USED(!wrap_counter); - desc[head].flags = flags; - rte_smp_wmb(); - + desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE | + vq->vq_packed.cached_flags; vq->vq_free_cnt--; + nb_descs++; if (++vq->vq_avail_idx >= vq->vq_nentries) { vq->vq_avail_idx -= vq->vq_nentries; - vq->avail_wrap_counter ^= 1; + vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED; } + virtio_wmb(vq->hw->weak_barriers); + desc[head].flags = VRING_DESC_F_NEXT | flags; + + virtio_wmb(vq->hw->weak_barriers); virtqueue_notify(vq); /* wait for used descriptors in virtqueue */ - do { - rte_rmb(); + while (!desc_is_used(&desc[head], vq)) usleep(100); - } while (!desc_is_used(&desc[head], vq)); + + virtio_rmb(vq->hw->weak_barriers); /* now get used descriptors */ - while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) { - vq->vq_free_cnt++; - if (++vq->vq_used_cons_idx >= vq->vq_nentries) { - vq->vq_used_cons_idx -= vq->vq_nentries; - vq->used_wrap_counter ^= 1; - } - } + vq->vq_free_cnt += nb_descs; + vq->vq_used_cons_idx += nb_descs; + if (vq->vq_used_cons_idx >= vq->vq_nentries) { + vq->vq_used_cons_idx -= vq->vq_nentries; + vq->vq_packed.used_wrap_counter ^= 1; + } + + PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n" + "vq->vq_avail_idx=%d\n" + "vq->vq_used_cons_idx=%d\n" + "vq->vq_packed.cached_flags=0x%x\n" + "vq->vq_packed.used_wrap_counter=%d\n", + vq->vq_free_cnt, + vq->vq_avail_idx, + vq->vq_used_cons_idx, + vq->vq_packed.cached_flags, + vq->vq_packed.used_wrap_counter); result = cvq->virtio_net_hdr_mz->addr; return result; } -static int -virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, - int *dlen, int pkt_num) +static struct virtio_pmd_ctrl * +virtio_send_command_split(struct virtnet_ctl *cvq, + struct virtio_pmd_ctrl *ctrl, + int *dlen, int pkt_num) { + struct virtio_pmd_ctrl *result; + struct virtqueue *vq = cvq->vq; uint32_t head, i; int k, sum = 0; - virtio_net_ctrl_ack status = ~0; - struct virtio_pmd_ctrl *result; - struct virtqueue *vq; - ctrl->status = status; - - if (!cvq || !cvq->vq) { - PMD_INIT_LOG(ERR, "Control queue is not supported."); - return -1; - } - - rte_spinlock_lock(&cvq->lock); - vq = cvq->vq; head = vq->vq_desc_head_idx; - PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, " - "vq->hw->cvq = %p vq = %p", - vq->vq_desc_head_idx, status, vq->hw->cvq, vq); - - if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) { - rte_spinlock_unlock(&cvq->lock); - return -1; - } - - memcpy(cvq->virtio_net_hdr_mz->addr, ctrl, - sizeof(struct virtio_pmd_ctrl)); - - if (vtpci_packed_queue(vq->hw)) { - result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num); - goto out_unlock; - } - /* * Format is enforced in qemu code: * One TX packet for header; * At least one TX packet per argument; * One RX packet for ACK. */ - vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT; - vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem; - vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr); + vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT; + vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem; + vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr); vq->vq_free_cnt--; - i = vq->vq_ring.desc[head].next; + i = vq->vq_split.ring.desc[head].next; for (k = 0; k < pkt_num; k++) { - vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT; - vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem + vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT; + vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem + sizeof(struct virtio_net_ctrl_hdr) + sizeof(ctrl->status) + sizeof(uint8_t)*sum; - vq->vq_ring.desc[i].len = dlen[k]; + vq->vq_split.ring.desc[i].len = dlen[k]; sum += dlen[k]; vq->vq_free_cnt--; - i = vq->vq_ring.desc[i].next; + i = vq->vq_split.ring.desc[i].next; } - vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE; - vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem + vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE; + vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem + sizeof(struct virtio_net_ctrl_hdr); - vq->vq_ring.desc[i].len = sizeof(ctrl->status); + vq->vq_split.ring.desc[i].len = sizeof(ctrl->status); vq->vq_free_cnt--; - vq->vq_desc_head_idx = vq->vq_ring.desc[i].next; + vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next; vq_update_avail_ring(vq, head); vq_update_avail_idx(vq); @@ -319,16 +297,17 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, used_idx = (uint32_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); - uep = &vq->vq_ring.used->ring[used_idx]; + uep = &vq->vq_split.ring.used->ring[used_idx]; idx = (uint32_t) uep->id; desc_idx = idx; - while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) { - desc_idx = vq->vq_ring.desc[desc_idx].next; + while (vq->vq_split.ring.desc[desc_idx].flags & + VRING_DESC_F_NEXT) { + desc_idx = vq->vq_split.ring.desc[desc_idx].next; vq->vq_free_cnt++; } - vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx; + vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx; vq->vq_desc_head_idx = idx; vq->vq_used_cons_idx++; @@ -339,8 +318,44 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, vq->vq_free_cnt, vq->vq_desc_head_idx); result = cvq->virtio_net_hdr_mz->addr; + return result; +} + +static int +virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, + int *dlen, int pkt_num) +{ + virtio_net_ctrl_ack status = ~0; + struct virtio_pmd_ctrl *result; + struct virtqueue *vq; + + ctrl->status = status; + + if (!cvq || !cvq->vq) { + PMD_INIT_LOG(ERR, "Control queue is not supported."); + return -1; + } + + rte_spinlock_lock(&cvq->lock); + vq = cvq->vq; + + PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, " + "vq->hw->cvq = %p vq = %p", + vq->vq_desc_head_idx, status, vq->hw->cvq, vq); + + if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) { + rte_spinlock_unlock(&cvq->lock); + return -1; + } + + memcpy(cvq->virtio_net_hdr_mz->addr, ctrl, + sizeof(struct virtio_pmd_ctrl)); + + if (vtpci_packed_queue(vq->hw)) + result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num); + else + result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num); -out_unlock: rte_spinlock_unlock(&cvq->lock); return result->status; } @@ -390,7 +405,6 @@ static void virtio_init_vring(struct virtqueue *vq) { int size = vq->vq_nentries; - struct vring *vr = &vq->vq_ring; uint8_t *ring_mem = vq->vq_ring_virt_mem; PMD_INIT_FUNC_TRACE(); @@ -404,10 +418,12 @@ virtio_init_vring(struct virtqueue *vq) vq->vq_free_cnt = vq->vq_nentries; memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries); if (vtpci_packed_queue(vq->hw)) { - vring_init_packed(&vq->ring_packed, ring_mem, + vring_init_packed(&vq->vq_packed.ring, ring_mem, VIRTIO_PCI_VRING_ALIGN, size); vring_desc_init_packed(vq, size); } else { + struct vring *vr = &vq->vq_split.ring; + vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size); vring_desc_init_split(vr->desc, size); } @@ -482,13 +498,12 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) vq->hw = hw; vq->vq_queue_index = vtpci_queue_idx; vq->vq_nentries = vq_size; - vq->event_flags_shadow = 0; if (vtpci_packed_queue(hw)) { - vq->avail_wrap_counter = 1; - vq->used_wrap_counter = 1; - vq->avail_used_flags = - VRING_DESC_F_AVAIL(vq->avail_wrap_counter) | - VRING_DESC_F_USED(!vq->avail_wrap_counter); + vq->vq_packed.used_wrap_counter = 1; + vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL; + vq->vq_packed.event_flags_shadow = 0; + if (queue_type == VTNET_RQ) + vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE; } /* @@ -596,17 +611,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) memset(txr, 0, vq_size * sizeof(*txr)); for (i = 0; i < vq_size; i++) { struct vring_desc *start_dp = txr[i].tx_indir; - struct vring_packed_desc *start_dp_packed = - txr[i].tx_indir_pq; /* first indirect descriptor is always the tx header */ - if (vtpci_packed_queue(hw)) { - start_dp_packed->addr = txvq->virtio_net_hdr_mem - + i * sizeof(*txr) - + offsetof(struct virtio_tx_region, - tx_hdr); - start_dp_packed->len = hw->vtnet_hdr_size; - } else { + if (!vtpci_packed_queue(hw)) { vring_desc_init_split(start_dp, RTE_DIM(txr[i].tx_indir)); start_dp->addr = txvq->virtio_net_hdr_mem @@ -843,10 +850,12 @@ virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) static int virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { + struct virtio_hw *hw = dev->data->dev_private; struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id]; struct virtqueue *vq = rxvq->vq; virtqueue_enable_intr(vq); + virtio_mb(hw->weak_barriers); return 0; } @@ -896,6 +905,21 @@ static const struct eth_dev_ops virtio_eth_dev_ops = { .mac_addr_set = virtio_mac_addr_set, }; +/* + * dev_ops for virtio-user in secondary processes, as we just have + * some limited supports currently. + */ +const struct eth_dev_ops virtio_user_secondary_eth_dev_ops = { + .dev_infos_get = virtio_dev_info_get, + .stats_get = virtio_dev_stats_get, + .xstats_get = virtio_dev_xstats_get, + .xstats_get_names = virtio_dev_xstats_get_names, + .stats_reset = virtio_dev_stats_reset, + .xstats_reset = virtio_dev_stats_reset, + /* collect stats per queue */ + .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set, +}; + static void virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { @@ -1446,7 +1470,8 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) if (vtpci_packed_queue(hw)) { PMD_INIT_LOG(INFO, - "virtio: using packed ring standard Tx path on port %u", + "virtio: using packed ring %s Tx path on port %u", + hw->use_inorder_tx ? "inorder" : "standard", eth_dev->data->port_id); eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed; } else { @@ -1481,10 +1506,9 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; } else if (hw->use_inorder_rx) { PMD_INIT_LOG(INFO, - "virtio: using inorder mergeable buffer Rx path on port %u", + "virtio: using inorder Rx path on port %u", eth_dev->data->port_id); - eth_dev->rx_pkt_burst = - &virtio_recv_mergeable_pkts_inorder; + eth_dev->rx_pkt_burst = &virtio_recv_pkts_inorder; } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { PMD_INIT_LOG(INFO, "virtio: using mergeable buffer Rx path on port %u", @@ -1614,6 +1638,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) if (virtio_negotiate_features(hw, req_features) < 0) return -1; + hw->weak_barriers = !vtpci_with_feature(hw, VIRTIO_F_ORDER_PLATFORM); + if (!hw->virtio_user_dev) { pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); rte_eth_copy_pci_info(eth_dev, pci_dev); @@ -1836,6 +1862,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) out: rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; return ret; } @@ -1963,6 +1990,8 @@ virtio_dev_configure(struct rte_eth_dev *dev) const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode; struct virtio_hw *hw = dev->data->dev_private; + uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN + + hw->vtnet_hdr_size; uint64_t rx_offloads = rxmode->offloads; uint64_t tx_offloads = txmode->offloads; uint64_t req_features; @@ -1977,6 +2006,9 @@ virtio_dev_configure(struct rte_eth_dev *dev) return ret; } + if (rxmode->max_rx_pkt_len > hw->max_mtu + ether_hdr_len) + req_features &= ~(1ULL << VIRTIO_NET_F_MTU); + if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM)) req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM); @@ -2049,19 +2081,13 @@ virtio_dev_configure(struct rte_eth_dev *dev) if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) { hw->use_inorder_tx = 1; - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) && - !vtpci_packed_queue(hw)) { - hw->use_inorder_rx = 1; - hw->use_simple_rx = 0; - } else { - hw->use_inorder_rx = 0; - } + hw->use_inorder_rx = 1; + hw->use_simple_rx = 0; } if (vtpci_packed_queue(hw)) { hw->use_simple_rx = 0; hw->use_inorder_rx = 0; - hw->use_inorder_tx = 0; } #if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM @@ -2335,6 +2361,7 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) host_features = VTPCI_OPS(hw)->get_features(hw); dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME; if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) { dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_CKSUM |