hw->zero_req_count = 0;
rte_memcpy(desc->dst, desc->src, desc->len);
- hw->completed_count++;
+ __atomic_add_fetch(&hw->completed_count, 1, __ATOMIC_RELEASE);
(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
}
return vchan_setup(hw, conf->nb_desc);
}
+static int
+skeldma_vchan_status(const struct rte_dma_dev *dev,
+ uint16_t vchan, enum rte_dma_vchan_status *status)
+{
+ struct skeldma_hw *hw = dev->data->dev_private;
+
+ RTE_SET_USED(vchan);
+
+ *status = RTE_DMA_VCHAN_IDLE;
+ if (hw->submitted_count != __atomic_load_n(&hw->completed_count, __ATOMIC_ACQUIRE)
+ || hw->zero_req_count == 0)
+ *status = RTE_DMA_VCHAN_ACTIVE;
+ return 0;
+}
+
static int
skeldma_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
struct rte_dma_stats *stats, uint32_t stats_sz)
.dev_close = skeldma_close,
.vchan_setup = skeldma_vchan_setup,
+ .vchan_status = skeldma_vchan_status,
.stats_get = skeldma_stats_get,
.stats_reset = skeldma_stats_reset,
/* Cache delimiter for cpucopy thread's operation data */
char cache2 __rte_cache_aligned;
- uint32_t zero_req_count;
+ volatile uint32_t zero_req_count;
uint64_t completed_count;
};
return (*dev->dev_ops->stats_reset)(dev, vchan);
}
+int
+rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status)
+{
+ struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+ if (!rte_dma_is_valid(dev_id))
+ return -EINVAL;
+
+ if (vchan >= dev->data->dev_conf.nb_vchans) {
+ RTE_DMA_LOG(ERR, "Device %u vchan %u out of range\n", dev_id, vchan);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_status, -ENOTSUP);
+ return (*dev->dev_ops->vchan_status)(dev, vchan, status);
+}
+
static const char *
dma_capability_name(uint64_t capability)
{
__rte_experimental
int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
+/**
+ * device vchannel status
+ *
+ * Enum with the options for the channel status, either idle, active or halted due to error
+ * @see rte_dma_vchan_status
+ */
+enum rte_dma_vchan_status {
+ RTE_DMA_VCHAN_IDLE, /**< not processing, awaiting ops */
+ RTE_DMA_VCHAN_ACTIVE, /**< currently processing jobs */
+ RTE_DMA_VCHAN_HALTED_ERROR, /**< not processing due to error, cannot accept new ops */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Determine if all jobs have completed on a device channel.
+ * This function is primarily designed for testing use, as it allows a process to check if
+ * all jobs are completed, without actually gathering completions from those jobs.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param vchan
+ * The identifier of virtual DMA channel.
+ * @param[out] status
+ * The vchan status
+ * @return
+ * 0 - call completed successfully
+ * < 0 - error code indicating there was a problem calling the API
+ */
+__rte_experimental
+int
+rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status);
+
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
/** @internal Used to reset basic statistics. */
typedef int (*rte_dma_stats_reset_t)(struct rte_dma_dev *dev, uint16_t vchan);
+/** @internal Used to check if a virtual channel has finished all jobs. */
+typedef int (*rte_dma_vchan_status_t)(const struct rte_dma_dev *dev, uint16_t vchan,
+ enum rte_dma_vchan_status *status);
+
/** @internal Used to dump internal information. */
typedef int (*rte_dma_dump_t)(const struct rte_dma_dev *dev, FILE *f);
rte_dma_stats_get_t stats_get;
rte_dma_stats_reset_t stats_reset;
+ rte_dma_vchan_status_t vchan_status;
rte_dma_dump_t dev_dump;
};
rte_dma_stop;
rte_dma_submit;
rte_dma_vchan_setup;
+ rte_dma_vchan_status;
local: *;
};