The async vhost callback ops should return negative value when there
are something wrong in the callback, so the return type should be
changed into int32_t. The issue in vhost example is also fixed.
Fixes:
cd6760da1076 ("vhost: introduce async enqueue for split ring")
Fixes:
819a71685826 ("vhost: fix async callback return type")
Fixes:
6b3c81db8bb7 ("vhost: simplify async copy completion")
Fixes:
abec60e7115d ("examples/vhost: support vhost async data path")
Fixes:
6e9a9d2a02ae ("examples/vhost: fix ioat dependency")
Fixes:
873e8dad6f49 ("vhost: support packed ring in async datapath")
Cc: stable@dpdk.org
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
ioat_transfer_data_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data, uint16_t count)
ioat_transfer_data_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data, uint16_t count)
ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets)
ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets)
#ifdef RTE_RAW_IOAT
int open_ioat(const char *value);
#ifdef RTE_RAW_IOAT
int open_ioat(const char *value);
ioat_transfer_data_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data, uint16_t count);
ioat_transfer_data_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data, uint16_t count);
ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets);
ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets);
ioat_transfer_data_cb(int vid __rte_unused, uint16_t queue_id __rte_unused,
struct rte_vhost_async_desc *descs __rte_unused,
struct rte_vhost_async_status *opaque_data __rte_unused,
ioat_transfer_data_cb(int vid __rte_unused, uint16_t queue_id __rte_unused,
struct rte_vhost_async_desc *descs __rte_unused,
struct rte_vhost_async_status *opaque_data __rte_unused,
ioat_check_completed_copies_cb(int vid __rte_unused,
uint16_t queue_id __rte_unused,
struct rte_vhost_async_status *opaque_data __rte_unused,
ioat_check_completed_copies_cb(int vid __rte_unused,
uint16_t queue_id __rte_unused,
struct rte_vhost_async_status *opaque_data __rte_unused,
* @param count
* number of elements in the "descs" array
* @return
* @param count
* number of elements in the "descs" array
* @return
- * number of descs processed
+ * number of descs processed, negative value means error
- uint32_t (*transfer_data)(int vid, uint16_t queue_id,
+ int32_t (*transfer_data)(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data,
uint16_t count);
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data,
uint16_t count);
* @param max_packets
* max number of packets could be completed
* @return
* @param max_packets
* max number of packets could be completed
* @return
- * number of async descs completed
+ * number of async descs completed, negative value means error
- uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
+ int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets);
};
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets);
};
struct async_inflight_info *pkts_info = vq->async_pkts_info;
uint32_t n_pkts = 0, pkt_err = 0;
uint32_t num_async_pkts = 0, num_done_pkts = 0;
struct async_inflight_info *pkts_info = vq->async_pkts_info;
uint32_t n_pkts = 0, pkt_err = 0;
uint32_t num_async_pkts = 0, num_done_pkts = 0;
struct {
uint16_t pkt_idx;
uint16_t last_avail_idx;
struct {
uint16_t pkt_idx;
uint16_t last_avail_idx;
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
BUF_VECTOR_MAX))) {
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
BUF_VECTOR_MAX))) {
- n_pkts = vq->async_ops.transfer_data(dev->vid,
+ n_xfer = vq->async_ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
iovec_idx = 0;
it_idx = 0;
iovec_idx = 0;
it_idx = 0;
- n_pkts = vq->async_ops.transfer_data(dev->vid,
- queue_id, tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
uint16_t async_descs_idx = 0;
uint16_t num_buffers;
uint16_t num_descs;
uint16_t async_descs_idx = 0;
uint16_t num_buffers;
uint16_t num_descs;
struct rte_vhost_iov_iter *it_pool = vq->it_pool;
struct iovec *vec_pool = vq->vec_pool;
struct rte_vhost_iov_iter *it_pool = vq->it_pool;
struct iovec *vec_pool = vq->vec_pool;
*/
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
*/
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
- n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
- tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid,
+ queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
iovec_idx = 0;
it_idx = 0;
segs_await = 0;
iovec_idx = 0;
it_idx = 0;
segs_await = 0;
} while (pkt_idx < count);
if (pkt_burst_idx) {
} while (pkt_idx < count);
if (pkt_burst_idx) {
- n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
uint16_t start_idx, pkts_idx, vq_size;
struct async_inflight_info *pkts_info;
uint16_t from, i;
uint16_t start_idx, pkts_idx, vq_size;
struct async_inflight_info *pkts_info;
uint16_t from, i;
start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
vq_size, vq->async_pkts_inflight_n);
start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
vq_size, vq->async_pkts_inflight_n);
- if (count > vq->async_last_pkts_n)
- n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+ if (count > vq->async_last_pkts_n) {
+ n_cpl = vq->async_ops.check_completed_copies(vid,
queue_id, 0, count - vq->async_last_pkts_n);
queue_id, 0, count - vq->async_last_pkts_n);
+ if (n_cpl >= 0) {
+ n_pkts_cpl = n_cpl;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to check completed copies for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts_cpl = 0;
+ }
+ }
n_pkts_cpl += vq->async_last_pkts_n;
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
n_pkts_cpl += vq->async_last_pkts_n;
n_pkts_put = RTE_MIN(count, n_pkts_cpl);