.dev_info_get = idxd_info_get,
.stats_get = idxd_stats_get,
.stats_reset = idxd_stats_reset,
+ .vchan_status = idxd_vchan_status,
};
static void *
}
}
+int
+idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
+ enum rte_dma_vchan_status *status)
+{
+ struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
+ uint16_t last_batch_write = idxd->batch_idx_write == 0 ? idxd->max_batches :
+ idxd->batch_idx_write - 1;
+ uint8_t bstatus = (idxd->batch_comp_ring[last_batch_write].status != 0);
+
+ /* An IDXD device will always be either active or idle.
+ * RTE_DMA_VCHAN_HALTED_ERROR is therefore not supported by IDXD.
+ */
+ *status = bstatus ? RTE_DMA_VCHAN_IDLE : RTE_DMA_VCHAN_ACTIVE;
+
+ return 0;
+}
+
static __rte_always_inline int
batch_ok(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_status_code *status)
{
int idxd_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
struct rte_dma_stats *stats, uint32_t stats_sz);
int idxd_stats_reset(struct rte_dma_dev *dev, uint16_t vchan);
+int idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan,
+ enum rte_dma_vchan_status *status);
#endif /* _IDXD_INTERNAL_H_ */
.stats_reset = idxd_stats_reset,
.dev_start = idxd_pci_dev_start,
.dev_stop = idxd_pci_dev_stop,
+ .vchan_status = idxd_vchan_status,
};
/* each portal uses 4 x 4k pages */