#define MAX_PKT_BURST 32
#define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2)
-#define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 4)
+#define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2)
#define PACKED_DESC_ENQUEUE_USED_FLAG(w) \
((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
struct rte_vhost_async_channel_ops ops;
struct rte_vhost_iov_iter it_pool[VHOST_MAX_ASYNC_IT];
- struct iovec vec_pool[VHOST_MAX_ASYNC_VEC];
+ struct iovec src_iovec[VHOST_MAX_ASYNC_VEC];
+ struct iovec dst_iovec[VHOST_MAX_ASYNC_VEC];
/* data transfer status */
struct async_inflight_info *pkts_info;
struct vhost_async *async = vq->async;
struct rte_vhost_iov_iter *it_pool = async->it_pool;
- struct iovec *vec_pool = async->vec_pool;
struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
- struct iovec *src_iovec = vec_pool;
- struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+ struct iovec *src_iovec = async->src_iovec;
+ struct iovec *dst_iovec = async->dst_iovec;
struct async_inflight_info *pkts_info = async->pkts_info;
uint32_t n_pkts = 0, pkt_err = 0;
int32_t n_xfer;
- uint16_t segs_await = 0;
uint16_t iovec_idx = 0, it_idx = 0, slot_idx = 0;
/*
pkts_info[slot_idx].mbuf = pkts[pkt_idx];
iovec_idx += it_pool[it_idx].nr_segs;
- segs_await += it_pool[it_idx].nr_segs;
it_idx += 2;
vq->last_avail_idx += num_buffers;
* - unused async iov number is less than max vhost vector
*/
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
- ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
- BUF_VECTOR_MAX))) {
+ (VHOST_MAX_ASYNC_VEC - iovec_idx < BUF_VECTOR_MAX))) {
n_xfer = async->ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
if (likely(n_xfer >= 0)) {
iovec_idx = 0;
it_idx = 0;
- segs_await = 0;
if (unlikely(n_pkts < pkt_burst_idx)) {
/*
if (unlikely(++tries > max_tries))
return -1;
- if (unlikely(fill_vec_buf_packed(dev, vq, avail_idx, &desc_count, buf_vec, &nr_vec,
- &buf_id, &len, VHOST_ACCESS_RW) < 0))
+ if (unlikely(fill_vec_buf_packed(dev, vq,
+ avail_idx, &desc_count,
+ buf_vec, &nr_vec,
+ &buf_id, &len,
+ VHOST_ACCESS_RW) < 0))
return -1;
len = RTE_MIN(len, size);
struct vhost_async *async = vq->async;
struct rte_vhost_iov_iter *it_pool = async->it_pool;
- struct iovec *vec_pool = async->vec_pool;
struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
- struct iovec *src_iovec = vec_pool;
- struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+ struct iovec *src_iovec = async->src_iovec;
+ struct iovec *dst_iovec = async->dst_iovec;
struct async_inflight_info *pkts_info = async->pkts_info;
uint32_t n_pkts = 0, pkt_err = 0;
uint16_t slot_idx = 0;
- uint16_t segs_await = 0;
uint16_t iovec_idx = 0, it_idx = 0;
do {
pkts_info[slot_idx].nr_buffers = num_buffers;
pkts_info[slot_idx].mbuf = pkts[pkt_idx];
iovec_idx += it_pool[it_idx].nr_segs;
- segs_await += it_pool[it_idx].nr_segs;
it_idx += 2;
pkt_idx++;
* - unused async iov number is less than max vhost vector
*/
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
- ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
+ (VHOST_MAX_ASYNC_VEC - iovec_idx < BUF_VECTOR_MAX))) {
n_xfer = async->ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
if (likely(n_xfer >= 0)) {
iovec_idx = 0;
it_idx = 0;
- segs_await = 0;
if (unlikely(n_pkts < pkt_burst_idx)) {
/*