X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvirtio_net.c;h=b779034dca83b9f50f67763858960acefc605c08;hb=9253c34cfb9dd32faeb8e513f6d8fafd356a2e62;hp=1b233279caec48d6813b848232af2e9effcd24aa;hpb=77888192967c7cfbbdc6836e712660e967a912c6;p=dpdk.git diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 1b233279ca..b779034dca 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -222,8 +222,9 @@ vhost_flush_dequeue_shadow_packed(struct virtio_net *dev, struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0]; vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id; - rte_smp_wmb(); - vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags; + /* desc flags is the synchronization point for virtio packed vring */ + __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags, + used_elem->flags, __ATOMIC_RELEASE); vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx * sizeof(struct vring_packed_desc), @@ -1281,8 +1282,6 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev, return -1; } - rte_smp_rmb(); - vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) lens[i] = descs[avail_idx + i].len; @@ -1343,7 +1342,6 @@ virtio_dev_rx_single_packed(struct virtio_net *dev, struct buf_vector buf_vec[BUF_VECTOR_MAX]; uint16_t nr_descs = 0; - rte_smp_rmb(); if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec, &nr_descs) < 0)) { VHOST_LOG_DATA(DEBUG, @@ -1474,37 +1472,6 @@ virtio_dev_rx_async_get_info_idx(uint16_t pkts_idx, (vq_size - n_inflight + pkts_idx) & (vq_size - 1); } -static __rte_always_inline void -virtio_dev_rx_async_submit_split_err(struct virtio_net *dev, - struct vhost_virtqueue *vq, uint16_t queue_id, - uint16_t last_idx, uint16_t shadow_idx) -{ - uint16_t start_idx, pkts_idx, vq_size; - uint64_t *async_pending_info; - - pkts_idx = vq->async_pkts_idx; - async_pending_info = vq->async_pending_info; - vq_size = vq->size; - start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx, - vq_size, vq->async_pkts_inflight_n); - - while (likely((start_idx & (vq_size - 1)) != pkts_idx)) { - uint64_t n_seg = - async_pending_info[(start_idx) & (vq_size - 1)] >> - ASYNC_PENDING_INFO_N_SFT; - - while (n_seg) - n_seg -= vq->async_ops.check_completed_copies(dev->vid, - queue_id, 0, 1); - } - - vq->async_pkts_inflight_n = 0; - vq->batch_copy_nb_elems = 0; - - vq->shadow_used_idx = shadow_idx; - vq->last_avail_idx = last_idx; -} - static __rte_noinline uint32_t virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t queue_id, @@ -1513,7 +1480,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, uint32_t pkt_idx = 0, pkt_burst_idx = 0; uint16_t num_buffers; struct buf_vector buf_vec[BUF_VECTOR_MAX]; - uint16_t avail_head, last_idx, shadow_idx; + uint16_t avail_head; struct rte_vhost_iov_iter *it_pool = vq->it_pool; struct iovec *vec_pool = vq->vec_pool; @@ -1522,18 +1489,16 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1); struct rte_vhost_iov_iter *src_it = it_pool; struct rte_vhost_iov_iter *dst_it = it_pool + 1; - uint16_t n_free_slot, slot_idx; + uint16_t n_free_slot, slot_idx = 0; + uint16_t pkt_err = 0; + uint16_t segs_await = 0; + struct async_inflight_info *pkts_info = vq->async_pkts_info; int n_pkts = 0; - avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE); - last_idx = vq->last_avail_idx; - shadow_idx = vq->shadow_used_idx; - /* - * The ordering between avail index and - * desc reads needs to be enforced. + * The ordering between avail index and desc reads need to be enforced. */ - rte_smp_rmb(); + avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE); rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); @@ -1566,56 +1531,75 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, if (src_it->count) { async_fill_desc(&tdes[pkt_burst_idx], src_it, dst_it); pkt_burst_idx++; - vq->async_pending_info[slot_idx] = - num_buffers | (src_it->nr_segs << 16); + pkts_info[slot_idx].descs = num_buffers; + pkts_info[slot_idx].segs = src_it->nr_segs; src_iovec += src_it->nr_segs; dst_iovec += dst_it->nr_segs; src_it += 2; dst_it += 2; + segs_await += src_it->nr_segs; } else { - vq->async_pending_info[slot_idx] = num_buffers; + pkts_info[slot_idx].info = num_buffers; vq->async_pkts_inflight_n++; } vq->last_avail_idx += num_buffers; + /* + * conditions to trigger async device transfer: + * - buffered packet number reaches transfer threshold + * - this is the last packet in the burst enqueue + * - unused async iov number is less than max vhost vector + */ if (pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD || - (pkt_idx == count - 1 && pkt_burst_idx)) { + (pkt_idx == count - 1 && pkt_burst_idx) || + (VHOST_MAX_ASYNC_VEC / 2 - segs_await < + BUF_VECTOR_MAX)) { n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx); src_iovec = vec_pool; dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1); src_it = it_pool; dst_it = it_pool + 1; + segs_await = 0; + vq->async_pkts_inflight_n += pkt_burst_idx; if (unlikely(n_pkts < (int)pkt_burst_idx)) { - vq->async_pkts_inflight_n += - n_pkts > 0 ? n_pkts : 0; - virtio_dev_rx_async_submit_split_err(dev, - vq, queue_id, last_idx, shadow_idx); - return 0; + /* + * log error packets number here and do actual + * error processing when applications poll + * completion + */ + pkt_err = pkt_burst_idx - n_pkts; + pkt_burst_idx = 0; + break; } pkt_burst_idx = 0; - vq->async_pkts_inflight_n += n_pkts; } } if (pkt_burst_idx) { n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx); - if (unlikely(n_pkts < (int)pkt_burst_idx)) { - vq->async_pkts_inflight_n += n_pkts > 0 ? n_pkts : 0; - virtio_dev_rx_async_submit_split_err(dev, vq, queue_id, - last_idx, shadow_idx); - return 0; - } + vq->async_pkts_inflight_n += pkt_burst_idx; - vq->async_pkts_inflight_n += n_pkts; + if (unlikely(n_pkts < (int)pkt_burst_idx)) + pkt_err = pkt_burst_idx - n_pkts; } do_data_copy_enqueue(dev, vq); + while (unlikely(pkt_err && pkt_idx)) { + if (pkts_info[slot_idx].segs) + pkt_err--; + vq->last_avail_idx -= pkts_info[slot_idx].descs; + vq->shadow_used_idx -= pkts_info[slot_idx].descs; + vq->async_pkts_inflight_n--; + slot_idx = (slot_idx - 1) & (vq->size - 1); + pkt_idx--; + } + n_free_slot = vq->size - vq->async_pkts_idx; if (n_free_slot > pkt_idx) { rte_memcpy(&vq->async_pkts_pending[vq->async_pkts_idx], @@ -1641,10 +1625,10 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id, { struct virtio_net *dev = get_device(vid); struct vhost_virtqueue *vq; - uint16_t n_segs_cpl, n_pkts_put = 0, n_descs = 0; + uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0; uint16_t start_idx, pkts_idx, vq_size; uint16_t n_inflight; - uint64_t *async_pending_info; + struct async_inflight_info *pkts_info; if (!dev) return 0; @@ -1658,51 +1642,40 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id, vq = dev->virtqueue[queue_id]; + if (unlikely(!vq->async_registered)) { + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n", + dev->vid, __func__, queue_id); + return 0; + } + rte_spinlock_lock(&vq->access_lock); n_inflight = vq->async_pkts_inflight_n; pkts_idx = vq->async_pkts_idx; - async_pending_info = vq->async_pending_info; + pkts_info = vq->async_pkts_info; vq_size = vq->size; start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx, vq_size, vq->async_pkts_inflight_n); - n_segs_cpl = vq->async_ops.check_completed_copies(vid, queue_id, - 0, ASYNC_MAX_POLL_SEG - vq->async_last_seg_n) + - vq->async_last_seg_n; + if (count > vq->async_last_pkts_n) + n_pkts_cpl = vq->async_ops.check_completed_copies(vid, + queue_id, 0, count - vq->async_last_pkts_n); + n_pkts_cpl += vq->async_last_pkts_n; rte_smp_wmb(); while (likely((n_pkts_put < count) && n_inflight)) { - uint64_t info = async_pending_info[ - (start_idx + n_pkts_put) & (vq_size - 1)]; - uint64_t n_segs; + uint16_t info_idx = (start_idx + n_pkts_put) & (vq_size - 1); + if (n_pkts_cpl && pkts_info[info_idx].segs) + n_pkts_cpl--; + else if (!n_pkts_cpl && pkts_info[info_idx].segs) + break; n_pkts_put++; n_inflight--; - n_descs += info & ASYNC_PENDING_INFO_N_MSK; - n_segs = info >> ASYNC_PENDING_INFO_N_SFT; - - if (n_segs) { - if (unlikely(n_segs_cpl < n_segs)) { - n_pkts_put--; - n_inflight++; - n_descs -= info & ASYNC_PENDING_INFO_N_MSK; - if (n_segs_cpl) { - async_pending_info[ - (start_idx + n_pkts_put) & - (vq_size - 1)] = - ((n_segs - n_segs_cpl) << - ASYNC_PENDING_INFO_N_SFT) | - (info & ASYNC_PENDING_INFO_N_MSK); - n_segs_cpl = 0; - } - break; - } - n_segs_cpl -= n_segs; - } + n_descs += pkts_info[info_idx].descs; } - vq->async_last_seg_n = n_segs_cpl; + vq->async_last_pkts_n = n_pkts_cpl; if (n_pkts_put) { vq->async_pkts_inflight_n = n_inflight; @@ -1711,16 +1684,18 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id, n_descs, __ATOMIC_RELEASE); vhost_vring_call_split(dev, vq); } - } - if (start_idx + n_pkts_put <= vq_size) { - rte_memcpy(pkts, &vq->async_pkts_pending[start_idx], - n_pkts_put * sizeof(uintptr_t)); - } else { - rte_memcpy(pkts, &vq->async_pkts_pending[start_idx], - (vq_size - start_idx) * sizeof(uintptr_t)); - rte_memcpy(&pkts[vq_size - start_idx], vq->async_pkts_pending, - (n_pkts_put - vq_size + start_idx) * sizeof(uintptr_t)); + if (start_idx + n_pkts_put <= vq_size) { + rte_memcpy(pkts, &vq->async_pkts_pending[start_idx], + n_pkts_put * sizeof(uintptr_t)); + } else { + rte_memcpy(pkts, &vq->async_pkts_pending[start_idx], + (vq_size - start_idx) * sizeof(uintptr_t)); + rte_memcpy(&pkts[vq_size - start_idx], + vq->async_pkts_pending, + (n_pkts_put + start_idx - vq_size) * + sizeof(uintptr_t)); + } } rte_spinlock_unlock(&vq->access_lock); @@ -1734,7 +1709,6 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id, { struct vhost_virtqueue *vq; uint32_t nb_tx = 0; - bool drawback = false; VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__); if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { @@ -1747,13 +1721,8 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id, rte_spinlock_lock(&vq->access_lock); - if (unlikely(vq->enabled == 0)) - goto out_access_unlock; - - if (unlikely(!vq->async_registered)) { - drawback = true; + if (unlikely(vq->enabled == 0 || !vq->async_registered)) goto out_access_unlock; - } if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) vhost_user_iotlb_rd_lock(vq); @@ -1780,9 +1749,6 @@ out: out_access_unlock: rte_spinlock_unlock(&vq->access_lock); - if (drawback) - return rte_vhost_enqueue_burst(dev->vid, queue_id, pkts, count); - return nb_tx; } @@ -1847,7 +1813,7 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr) case RTE_ETHER_TYPE_IPV4: ipv4_hdr = l3_hdr; *l4_proto = ipv4_hdr->next_proto_id; - m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4; + m->l3_len = rte_ipv4_hdr_len(ipv4_hdr); *l4_hdr = (char *)l3_hdr + m->l3_len; m->ol_flags |= PKT_TX_IPV4; break;