static int16_t dma_devices_max;
struct rte_dma_fp_object *rte_dma_fp_objs;
-struct rte_dma_dev *rte_dma_devices;
+static struct rte_dma_dev *rte_dma_devices;
static struct {
/* Hold the dev_max information of the primary process. This field is
* set by the primary process and is read by the secondary process.
return 0;
}
+int16_t
+rte_dma_next_dev(int16_t start_dev_id)
+{
+ int16_t dev_id = start_dev_id;
+ while (dev_id < dma_devices_max && rte_dma_devices[dev_id].state == RTE_DMA_DEV_UNUSED)
+ dev_id++;
+
+ if (dev_id < dma_devices_max)
+ return dev_id;
+
+ return -1;
+}
+
static int
dma_check_name(const char *name)
{
return (*dev->dev_ops->stats_reset)(dev, vchan);
}
+int
+rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status)
+{
+ struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+ if (!rte_dma_is_valid(dev_id))
+ return -EINVAL;
+
+ if (vchan >= dev->data->dev_conf.nb_vchans) {
+ RTE_DMA_LOG(ERR, "Device %u vchan %u out of range\n", dev_id, vchan);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_status, -ENOTSUP);
+ return (*dev->dev_ops->vchan_status)(dev, vchan, status);
+}
+
static const char *
dma_capability_name(uint64_t capability)
{
{ RTE_DMA_CAPA_DEV_TO_DEV, "dev2dev" },
{ RTE_DMA_CAPA_SVA, "sva" },
{ RTE_DMA_CAPA_SILENT, "silent" },
+ { RTE_DMA_CAPA_HANDLES_ERRORS, "handles_errors" },
{ RTE_DMA_CAPA_OPS_COPY, "copy" },
{ RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
{ RTE_DMA_CAPA_OPS_FILL, "fill" },
return 0;
}
+static uint16_t
+dummy_burst_capacity(__rte_unused const void *dev_private,
+ __rte_unused uint16_t vchan)
+{
+ RTE_DMA_LOG(ERR, "burst_capacity is not configured or not supported.");
+ return 0;
+}
+
static void
dma_fp_object_dummy(struct rte_dma_fp_object *obj)
{
obj->submit = dummy_submit;
obj->completed = dummy_completed;
obj->completed_status = dummy_completed_status;
+ obj->burst_capacity = dummy_burst_capacity;
}