X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtio_ethdev.c;h=04aecb75983b4b004dfc8eb0fb500ab6cbd253ae;hb=e876497c4f6baef09fbee14b702025b67f4fed1c;hp=2ba66d2916c25027c6355d84cefa2c4d6f6ba363;hpb=705dced4a72a1053368c84c4b68f04f028a78b30;p=dpdk.git diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 2ba66d2916..04aecb7598 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -35,6 +35,7 @@ #include "virtio_logs.h" #include "virtqueue.h" #include "virtio_rxtx.h" +#include "virtio_user/virtio_user_dev.h" static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev); static int virtio_dev_configure(struct rte_eth_dev *dev); @@ -65,13 +66,12 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev); static int virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static int virtio_mac_addr_add(struct rte_eth_dev *dev, - struct ether_addr *mac_addr, + struct rte_ether_addr *mac_addr, uint32_t index, uint32_t vmdq); static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); static int virtio_mac_addr_set(struct rte_eth_dev *dev, - struct ether_addr *mac_addr); + struct rte_ether_addr *mac_addr); -static int virtio_intr_enable(struct rte_eth_dev *dev); static int virtio_intr_disable(struct rte_eth_dev *dev); static int virtio_dev_queue_stats_mapping_set( @@ -141,38 +141,112 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = { struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS]; -static int -virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, - int *dlen, int pkt_num) +static struct virtio_pmd_ctrl * +virtio_send_command_packed(struct virtnet_ctl *cvq, + struct virtio_pmd_ctrl *ctrl, + int *dlen, int pkt_num) { - uint32_t head, i; - int k, sum = 0; - virtio_net_ctrl_ack status = ~0; + struct virtqueue *vq = cvq->vq; + int head; + struct vring_packed_desc *desc = vq->vq_packed.ring.desc; struct virtio_pmd_ctrl *result; - struct virtqueue *vq; + uint16_t flags; + int sum = 0; + int nb_descs = 0; + int k; - ctrl->status = status; + /* + * Format is enforced in qemu code: + * One TX packet for header; + * At least one TX packet per argument; + * One RX packet for ACK. + */ + head = vq->vq_avail_idx; + flags = vq->vq_packed.cached_flags; + desc[head].addr = cvq->virtio_net_hdr_mem; + desc[head].len = sizeof(struct virtio_net_ctrl_hdr); + vq->vq_free_cnt--; + nb_descs++; + if (++vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED; + } - if (!cvq || !cvq->vq) { - PMD_INIT_LOG(ERR, "Control queue is not supported."); - return -1; + for (k = 0; k < pkt_num; k++) { + desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem + + sizeof(struct virtio_net_ctrl_hdr) + + sizeof(ctrl->status) + sizeof(uint8_t) * sum; + desc[vq->vq_avail_idx].len = dlen[k]; + desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT | + vq->vq_packed.cached_flags; + sum += dlen[k]; + vq->vq_free_cnt--; + nb_descs++; + if (++vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= + VRING_PACKED_DESC_F_AVAIL_USED; + } } - rte_spinlock_lock(&cvq->lock); - vq = cvq->vq; - head = vq->vq_desc_head_idx; + desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem + + sizeof(struct virtio_net_ctrl_hdr); + desc[vq->vq_avail_idx].len = sizeof(ctrl->status); + desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE | + vq->vq_packed.cached_flags; + vq->vq_free_cnt--; + nb_descs++; + if (++vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED; + } - PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, " - "vq->hw->cvq = %p vq = %p", - vq->vq_desc_head_idx, status, vq->hw->cvq, vq); + virtio_wmb(vq->hw->weak_barriers); + desc[head].flags = VRING_DESC_F_NEXT | flags; - if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) { - rte_spinlock_unlock(&cvq->lock); - return -1; + virtio_wmb(vq->hw->weak_barriers); + virtqueue_notify(vq); + + /* wait for used descriptors in virtqueue */ + while (!desc_is_used(&desc[head], vq)) + usleep(100); + + virtio_rmb(vq->hw->weak_barriers); + + /* now get used descriptors */ + vq->vq_free_cnt += nb_descs; + vq->vq_used_cons_idx += nb_descs; + if (vq->vq_used_cons_idx >= vq->vq_nentries) { + vq->vq_used_cons_idx -= vq->vq_nentries; + vq->vq_packed.used_wrap_counter ^= 1; } - memcpy(cvq->virtio_net_hdr_mz->addr, ctrl, - sizeof(struct virtio_pmd_ctrl)); + PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n" + "vq->vq_avail_idx=%d\n" + "vq->vq_used_cons_idx=%d\n" + "vq->vq_packed.cached_flags=0x%x\n" + "vq->vq_packed.used_wrap_counter=%d\n", + vq->vq_free_cnt, + vq->vq_avail_idx, + vq->vq_used_cons_idx, + vq->vq_packed.cached_flags, + vq->vq_packed.used_wrap_counter); + + result = cvq->virtio_net_hdr_mz->addr; + return result; +} + +static struct virtio_pmd_ctrl * +virtio_send_command_split(struct virtnet_ctl *cvq, + struct virtio_pmd_ctrl *ctrl, + int *dlen, int pkt_num) +{ + struct virtio_pmd_ctrl *result; + struct virtqueue *vq = cvq->vq; + uint32_t head, i; + int k, sum = 0; + + head = vq->vq_desc_head_idx; /* * Format is enforced in qemu code: @@ -180,30 +254,30 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, * At least one TX packet per argument; * One RX packet for ACK. */ - vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT; - vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem; - vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr); + vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT; + vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem; + vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr); vq->vq_free_cnt--; - i = vq->vq_ring.desc[head].next; + i = vq->vq_split.ring.desc[head].next; for (k = 0; k < pkt_num; k++) { - vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT; - vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem + vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT; + vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem + sizeof(struct virtio_net_ctrl_hdr) + sizeof(ctrl->status) + sizeof(uint8_t)*sum; - vq->vq_ring.desc[i].len = dlen[k]; + vq->vq_split.ring.desc[i].len = dlen[k]; sum += dlen[k]; vq->vq_free_cnt--; - i = vq->vq_ring.desc[i].next; + i = vq->vq_split.ring.desc[i].next; } - vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE; - vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem + vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE; + vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem + sizeof(struct virtio_net_ctrl_hdr); - vq->vq_ring.desc[i].len = sizeof(ctrl->status); + vq->vq_split.ring.desc[i].len = sizeof(ctrl->status); vq->vq_free_cnt--; - vq->vq_desc_head_idx = vq->vq_ring.desc[i].next; + vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next; vq_update_avail_ring(vq, head); vq_update_avail_idx(vq); @@ -224,16 +298,17 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, used_idx = (uint32_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); - uep = &vq->vq_ring.used->ring[used_idx]; + uep = &vq->vq_split.ring.used->ring[used_idx]; idx = (uint32_t) uep->id; desc_idx = idx; - while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) { - desc_idx = vq->vq_ring.desc[desc_idx].next; + while (vq->vq_split.ring.desc[desc_idx].flags & + VRING_DESC_F_NEXT) { + desc_idx = vq->vq_split.ring.desc[desc_idx].next; vq->vq_free_cnt++; } - vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx; + vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx; vq->vq_desc_head_idx = idx; vq->vq_used_cons_idx++; @@ -244,6 +319,43 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, vq->vq_free_cnt, vq->vq_desc_head_idx); result = cvq->virtio_net_hdr_mz->addr; + return result; +} + +static int +virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, + int *dlen, int pkt_num) +{ + virtio_net_ctrl_ack status = ~0; + struct virtio_pmd_ctrl *result; + struct virtqueue *vq; + + ctrl->status = status; + + if (!cvq || !cvq->vq) { + PMD_INIT_LOG(ERR, "Control queue is not supported."); + return -1; + } + + rte_spinlock_lock(&cvq->lock); + vq = cvq->vq; + + PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, " + "vq->hw->cvq = %p vq = %p", + vq->vq_desc_head_idx, status, vq->hw->cvq, vq); + + if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) { + rte_spinlock_unlock(&cvq->lock); + return -1; + } + + memcpy(cvq->virtio_net_hdr_mz->addr, ctrl, + sizeof(struct virtio_pmd_ctrl)); + + if (vtpci_packed_queue(vq->hw)) + result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num); + else + result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num); rte_spinlock_unlock(&cvq->lock); return result->status; @@ -294,25 +406,28 @@ static void virtio_init_vring(struct virtqueue *vq) { int size = vq->vq_nentries; - struct vring *vr = &vq->vq_ring; uint8_t *ring_mem = vq->vq_ring_virt_mem; PMD_INIT_FUNC_TRACE(); - /* - * Reinitialise since virtio port might have been stopped and restarted - */ memset(ring_mem, 0, vq->vq_ring_size); - vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN); + vq->vq_used_cons_idx = 0; vq->vq_desc_head_idx = 0; vq->vq_avail_idx = 0; vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); vq->vq_free_cnt = vq->vq_nentries; memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries); + if (vtpci_packed_queue(vq->hw)) { + vring_init_packed(&vq->vq_packed.ring, ring_mem, + VIRTIO_PCI_VRING_ALIGN, size); + vring_desc_init_packed(vq, size); + } else { + struct vring *vr = &vq->vq_split.ring; - vring_desc_init(vr->desc, size); - + vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size); + vring_desc_init_split(vr->desc, size); + } /* * Disable device(host) interrupting guest */ @@ -335,8 +450,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) void *sw_ring = NULL; int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx); int ret; + int numa_node = dev->device->numa_node; - PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx); + PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d", + vtpci_queue_idx, numa_node); /* * Read the virtqueue size from the Queue Size field @@ -372,7 +489,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) } vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE, - SOCKET_ID_ANY); + numa_node); if (vq == NULL) { PMD_INIT_LOG(ERR, "can not allocate vq"); return -ENOMEM; @@ -382,17 +499,24 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) vq->hw = hw; vq->vq_queue_index = vtpci_queue_idx; vq->vq_nentries = vq_size; + if (vtpci_packed_queue(hw)) { + vq->vq_packed.used_wrap_counter = 1; + vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL; + vq->vq_packed.event_flags_shadow = 0; + if (queue_type == VTNET_RQ) + vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE; + } /* * Reserve a memzone for vring elements */ - size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN); + size = vring_size(hw, vq_size, VIRTIO_PCI_VRING_ALIGN); vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN); PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size); mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, - SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, + numa_node, RTE_MEMZONE_IOVA_CONTIG, VIRTIO_PCI_VRING_ALIGN); if (mz == NULL) { if (rte_errno == EEXIST) @@ -418,7 +542,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr", dev->data->port_id, vtpci_queue_idx); hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz, - SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, + numa_node, RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE); if (hdr_mz == NULL) { if (rte_errno == EEXIST) @@ -435,7 +559,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) sizeof(vq->sw_ring[0]); sw_ring = rte_zmalloc_socket("sw_ring", sz_sw, - RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); + RTE_CACHE_LINE_SIZE, numa_node); if (!sw_ring) { PMD_INIT_LOG(ERR, "can not allocate RX soft ring"); ret = -ENOMEM; @@ -489,15 +613,17 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) for (i = 0; i < vq_size; i++) { struct vring_desc *start_dp = txr[i].tx_indir; - vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir)); - /* first indirect descriptor is always the tx header */ - start_dp->addr = txvq->virtio_net_hdr_mem - + i * sizeof(*txr) - + offsetof(struct virtio_tx_region, tx_hdr); - - start_dp->len = hw->vtnet_hdr_size; - start_dp->flags = VRING_DESC_F_NEXT; + if (!vtpci_packed_queue(hw)) { + vring_desc_init_split(start_dp, + RTE_DIM(txr[i].tx_indir)); + start_dp->addr = txvq->virtio_net_hdr_mem + + i * sizeof(*txr) + + offsetof(struct virtio_tx_region, + tx_hdr); + start_dp->len = hw->vtnet_hdr_size; + start_dp->flags = VRING_DESC_F_NEXT; + } } } @@ -608,6 +734,17 @@ virtio_dev_close(struct rte_eth_dev *dev) vtpci_reset(hw); virtio_dev_free_mbufs(dev); virtio_free_queues(hw); + +#ifdef RTE_VIRTIO_USER + if (hw->virtio_user_dev) + virtio_user_dev_uninit(hw->virtio_user_dev); + else +#endif + if (dev->device) { + rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(dev)); + if (!hw->modern) + rte_pci_ioport_unmap(VTPCI_IO(hw)); + } } static void @@ -707,16 +844,16 @@ static int virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct virtio_hw *hw = dev->data->dev_private; - uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN + + uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN + hw->vtnet_hdr_size; uint32_t frame_size = mtu + ether_hdr_len; uint32_t max_frame_size = hw->max_mtu + ether_hdr_len; max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN); - if (mtu < ETHER_MIN_MTU || frame_size > max_frame_size) { + if (mtu < RTE_ETHER_MIN_MTU || frame_size > max_frame_size) { PMD_INIT_LOG(ERR, "MTU should be between %d and %d", - ETHER_MIN_MTU, max_frame_size - ether_hdr_len); + RTE_ETHER_MIN_MTU, max_frame_size - ether_hdr_len); return -EINVAL; } return 0; @@ -725,10 +862,12 @@ virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) static int virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { + struct virtio_hw *hw = dev->data->dev_private; struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id]; struct virtqueue *vq = rxvq->vq; virtqueue_enable_intr(vq); + virtio_mb(hw->weak_barriers); return 0; } @@ -778,6 +917,21 @@ static const struct eth_dev_ops virtio_eth_dev_ops = { .mac_addr_set = virtio_mac_addr_set, }; +/* + * dev_ops for virtio-user in secondary processes, as we just have + * some limited supports currently. + */ +const struct eth_dev_ops virtio_user_secondary_eth_dev_ops = { + .dev_infos_get = virtio_dev_info_get, + .stats_get = virtio_dev_stats_get, + .xstats_get = virtio_dev_xstats_get, + .xstats_get_names = virtio_dev_xstats_get_names, + .stats_reset = virtio_dev_stats_reset, + .xstats_reset = virtio_dev_stats_reset, + /* collect stats per queue */ + .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set, +}; + static void virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { @@ -955,7 +1109,7 @@ virtio_set_hwaddr(struct virtio_hw *hw) { vtpci_write_dev_config(hw, offsetof(struct virtio_net_config, mac), - &hw->mac_addr, ETHER_ADDR_LEN); + &hw->mac_addr, RTE_ETHER_ADDR_LEN); } static void @@ -964,9 +1118,9 @@ virtio_get_hwaddr(struct virtio_hw *hw) if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) { vtpci_read_dev_config(hw, offsetof(struct virtio_net_config, mac), - &hw->mac_addr, ETHER_ADDR_LEN); + &hw->mac_addr, RTE_ETHER_ADDR_LEN); } else { - eth_random_addr(&hw->mac_addr[0]); + rte_eth_random_addr(&hw->mac_addr[0]); virtio_set_hwaddr(hw); } } @@ -987,10 +1141,10 @@ virtio_mac_table_set(struct virtio_hw *hw, ctrl.hdr.class = VIRTIO_NET_CTRL_MAC; ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; - len[0] = uc->entries * ETHER_ADDR_LEN + sizeof(uc->entries); + len[0] = uc->entries * RTE_ETHER_ADDR_LEN + sizeof(uc->entries); memcpy(ctrl.data, uc, len[0]); - len[1] = mc->entries * ETHER_ADDR_LEN + sizeof(mc->entries); + len[1] = mc->entries * RTE_ETHER_ADDR_LEN + sizeof(mc->entries); memcpy(ctrl.data + len[0], mc, len[1]); err = virtio_send_command(hw->cvq, &ctrl, len, 2); @@ -1000,11 +1154,11 @@ virtio_mac_table_set(struct virtio_hw *hw, } static int -virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, +virtio_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, uint32_t index, uint32_t vmdq __rte_unused) { struct virtio_hw *hw = dev->data->dev_private; - const struct ether_addr *addrs = dev->data->mac_addrs; + const struct rte_ether_addr *addrs = dev->data->mac_addrs; unsigned int i; struct virtio_net_ctrl_mac *uc, *mc; @@ -1013,18 +1167,20 @@ virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, return -EINVAL; } - uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries)); + uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN + + sizeof(uc->entries)); uc->entries = 0; - mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries)); + mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN + + sizeof(mc->entries)); mc->entries = 0; for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) { - const struct ether_addr *addr + const struct rte_ether_addr *addr = (i == index) ? mac_addr : addrs + i; struct virtio_net_ctrl_mac *tbl - = is_multicast_ether_addr(addr) ? mc : uc; + = rte_is_multicast_ether_addr(addr) ? mc : uc; - memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN); + memcpy(&tbl->macs[tbl->entries++], addr, RTE_ETHER_ADDR_LEN); } return virtio_mac_table_set(hw, uc, mc); @@ -1034,7 +1190,7 @@ static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) { struct virtio_hw *hw = dev->data->dev_private; - struct ether_addr *addrs = dev->data->mac_addrs; + struct rte_ether_addr *addrs = dev->data->mac_addrs; struct virtio_net_ctrl_mac *uc, *mc; unsigned int i; @@ -1043,40 +1199,43 @@ virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) return; } - uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries)); + uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN + + sizeof(uc->entries)); uc->entries = 0; - mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries)); + mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN + + sizeof(mc->entries)); mc->entries = 0; for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) { struct virtio_net_ctrl_mac *tbl; - if (i == index || is_zero_ether_addr(addrs + i)) + if (i == index || rte_is_zero_ether_addr(addrs + i)) continue; - tbl = is_multicast_ether_addr(addrs + i) ? mc : uc; - memcpy(&tbl->macs[tbl->entries++], addrs + i, ETHER_ADDR_LEN); + tbl = rte_is_multicast_ether_addr(addrs + i) ? mc : uc; + memcpy(&tbl->macs[tbl->entries++], addrs + i, + RTE_ETHER_ADDR_LEN); } virtio_mac_table_set(hw, uc, mc); } static int -virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) +virtio_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) { struct virtio_hw *hw = dev->data->dev_private; - memcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN); + memcpy(hw->mac_addr, mac_addr, RTE_ETHER_ADDR_LEN); /* Use atomic update if available */ if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) { struct virtio_pmd_ctrl ctrl; - int len = ETHER_ADDR_LEN; + int len = RTE_ETHER_ADDR_LEN; ctrl.hdr.class = VIRTIO_NET_CTRL_MAC; ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; - memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN); + memcpy(ctrl.data, mac_addr, RTE_ETHER_ADDR_LEN); return virtio_send_command(hw->cvq, &ctrl, &len, 1); } @@ -1155,7 +1314,7 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features) offsetof(struct virtio_net_config, mtu), &config.mtu, sizeof(config.mtu)); - if (config.mtu < ETHER_MIN_MTU) + if (config.mtu < RTE_ETHER_MIN_MTU) req_features &= ~(1ULL << VIRTIO_NET_F_MTU); } @@ -1253,7 +1412,7 @@ virtio_notify_peers(struct rte_eth_dev *dev) return; rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool, - (struct ether_addr *)hw->mac_addr); + (struct rte_ether_addr *)hw->mac_addr); if (rarp_mbuf == NULL) { PMD_DRV_LOG(ERR, "failed to make RARP packet."); return; @@ -1326,35 +1485,60 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) { struct virtio_hw *hw = eth_dev->data->dev_private; - if (hw->use_simple_rx) { - PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u", - eth_dev->data->port_id); - eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; - } else if (hw->use_inorder_rx) { - PMD_INIT_LOG(INFO, - "virtio: using inorder mergeable buffer Rx path on port %u", - eth_dev->data->port_id); - eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts_inorder; - } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { + eth_dev->tx_pkt_prepare = virtio_xmit_pkts_prepare; + if (vtpci_packed_queue(hw)) { PMD_INIT_LOG(INFO, - "virtio: using mergeable buffer Rx path on port %u", + "virtio: using packed ring %s Tx path on port %u", + hw->use_inorder_tx ? "inorder" : "standard", eth_dev->data->port_id); - eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts; + eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed; } else { - PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u", - eth_dev->data->port_id); - eth_dev->rx_pkt_burst = &virtio_recv_pkts; + if (hw->use_inorder_tx) { + PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u", + eth_dev->data->port_id); + eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder; + } else { + PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u", + eth_dev->data->port_id); + eth_dev->tx_pkt_burst = virtio_xmit_pkts; + } } - if (hw->use_inorder_tx) { - PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u", - eth_dev->data->port_id); - eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder; + if (vtpci_packed_queue(hw)) { + if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { + PMD_INIT_LOG(INFO, + "virtio: using packed ring mergeable buffer Rx path on port %u", + eth_dev->data->port_id); + eth_dev->rx_pkt_burst = + &virtio_recv_mergeable_pkts_packed; + } else { + PMD_INIT_LOG(INFO, + "virtio: using packed ring standard Rx path on port %u", + eth_dev->data->port_id); + eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed; + } } else { - PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u", - eth_dev->data->port_id); - eth_dev->tx_pkt_burst = virtio_xmit_pkts; + if (hw->use_simple_rx) { + PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u", + eth_dev->data->port_id); + eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; + } else if (hw->use_inorder_rx) { + PMD_INIT_LOG(INFO, + "virtio: using inorder Rx path on port %u", + eth_dev->data->port_id); + eth_dev->rx_pkt_burst = &virtio_recv_pkts_inorder; + } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { + PMD_INIT_LOG(INFO, + "virtio: using mergeable buffer Rx path on port %u", + eth_dev->data->port_id); + eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts; + } else { + PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u", + eth_dev->data->port_id); + eth_dev->rx_pkt_burst = &virtio_recv_pkts; + } } + } /* Only support 1:1 queue/interrupt mapping so far. @@ -1472,10 +1656,10 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) if (virtio_negotiate_features(hw, req_features) < 0) return -1; - if (!hw->virtio_user_dev) { + hw->weak_barriers = !vtpci_with_feature(hw, VIRTIO_F_ORDER_PLATFORM); + + if (!hw->virtio_user_dev) pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - rte_eth_copy_pci_info(eth_dev, pci_dev); - } /* If host does not support both status and MSI-X then disable LSC */ if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) && @@ -1486,14 +1670,15 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) /* Setting up rx_header size for the device */ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) + vtpci_with_feature(hw, VIRTIO_F_VERSION_1) || + vtpci_with_feature(hw, VIRTIO_F_RING_PACKED)) hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); else hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr); /* Copy the permanent MAC address to: virtio_hw */ virtio_get_hwaddr(hw); - ether_addr_copy((struct ether_addr *) hw->mac_addr, + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, ð_dev->data->mac_addrs[0]); PMD_INIT_LOG(DEBUG, "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X", @@ -1541,7 +1726,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) * time, but check again in case it has changed since * then, which should not happen. */ - if (config->mtu < ETHER_MIN_MTU) { + if (config->mtu < RTE_ETHER_MIN_MTU) { PMD_INIT_LOG(ERR, "invalid max MTU value (%u)", config->mtu); return -1; @@ -1552,7 +1737,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) eth_dev->data->mtu = config->mtu; } else { - hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN - + hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN - VLAN_TAG_LEN - hw->vtnet_hdr_size; } @@ -1567,7 +1752,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) } else { PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1"); hw->max_queue_pairs = 1; - hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN - + hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN - VLAN_TAG_LEN - hw->vtnet_hdr_size; } @@ -1578,6 +1763,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) if (eth_dev->data->dev_conf.intr_conf.rxq) { if (virtio_configure_intr(eth_dev) < 0) { PMD_INIT_LOG(ERR, "failed to configure interrupt"); + virtio_free_queues(hw); return -1; } } @@ -1665,12 +1851,19 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) return 0; } + /* + * Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + /* Allocate memory for storing MAC addresses */ - eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0); + eth_dev->data->mac_addrs = rte_zmalloc("virtio", + VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses", - VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN); + VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN); return -ENOMEM; } @@ -1681,18 +1874,27 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) if (!hw->virtio_user_dev) { ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), hw); if (ret) - goto out; + goto err_vtpci_init; } /* reset device and negotiate default features */ ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES); if (ret < 0) - goto out; + goto err_virtio_init; + + hw->opened = true; return 0; -out: +err_virtio_init: + if (!hw->virtio_user_dev) { + rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev)); + if (!hw->modern) + rte_pci_ioport_unmap(VTPCI_IO(hw)); + } +err_vtpci_init: rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; return ret; } @@ -1711,9 +1913,6 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev) eth_dev->tx_pkt_burst = NULL; eth_dev->rx_pkt_burst = NULL; - if (eth_dev->device) - rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev)); - PMD_INIT_LOG(DEBUG, "dev_uninit completed"); return 0; @@ -1775,7 +1974,13 @@ static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_dev_uninit); + int ret; + + ret = rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_dev_uninit); + /* Port has already been released by close. */ + if (ret == -ENODEV) + ret = 0; + return ret; } static struct rte_pci_driver rte_virtio_pmd = { @@ -1820,6 +2025,8 @@ virtio_dev_configure(struct rte_eth_dev *dev) const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode; struct virtio_hw *hw = dev->data->dev_private; + uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN + + hw->vtnet_hdr_size; uint64_t rx_offloads = rxmode->offloads; uint64_t tx_offloads = txmode->offloads; uint64_t req_features; @@ -1834,6 +2041,9 @@ virtio_dev_configure(struct rte_eth_dev *dev) return ret; } + if (rxmode->max_rx_pkt_len > hw->max_mtu + ether_hdr_len) + req_features &= ~(1ULL << VIRTIO_NET_F_MTU); + if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM)) req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM); @@ -1906,12 +2116,13 @@ virtio_dev_configure(struct rte_eth_dev *dev) if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) { hw->use_inorder_tx = 1; - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { - hw->use_inorder_rx = 1; - hw->use_simple_rx = 0; - } else { - hw->use_inorder_rx = 0; - } + hw->use_inorder_rx = 1; + hw->use_simple_rx = 0; + } + + if (vtpci_packed_queue(hw)) { + hw->use_simple_rx = 0; + hw->use_inorder_rx = 0; } #if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM @@ -1929,8 +2140,6 @@ virtio_dev_configure(struct rte_eth_dev *dev) DEV_RX_OFFLOAD_VLAN_STRIP)) hw->use_simple_rx = 0; - hw->opened = true; - return 0; } @@ -2185,6 +2394,7 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) host_features = VTPCI_OPS(hw)->get_features(hw); dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME; if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) { dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_CKSUM |