sched: move grinder configuration
[dpdk.git] / lib / dmadev / rte_dmadev.c
index 42a4693..d4b32b2 100644 (file)
 
 static int16_t dma_devices_max;
 
-struct rte_dma_dev *rte_dma_devices;
+struct rte_dma_fp_object *rte_dma_fp_objs;
+static struct rte_dma_dev *rte_dma_devices;
+static struct {
+       /* Hold the dev_max information of the primary process. This field is
+        * set by the primary process and is read by the secondary process.
+        */
+       int16_t dev_max;
+       struct rte_dma_dev_data data[0];
+} *dma_devices_shared_data;
 
 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
 #define RTE_DMA_LOG(level, ...) \
@@ -41,6 +49,19 @@ rte_dma_dev_max(size_t dev_max)
        return 0;
 }
 
+int16_t
+rte_dma_next_dev(int16_t start_dev_id)
+{
+       int16_t dev_id = start_dev_id;
+       while (dev_id < dma_devices_max && rte_dma_devices[dev_id].state == RTE_DMA_DEV_UNUSED)
+               dev_id++;
+
+       if (dev_id < dma_devices_max)
+               return dev_id;
+
+       return -1;
+}
+
 static int
 dma_check_name(const char *name)
 {
@@ -69,11 +90,11 @@ dma_find_free_id(void)
 {
        int16_t i;
 
-       if (rte_dma_devices == NULL)
+       if (rte_dma_devices == NULL || dma_devices_shared_data == NULL)
                return -1;
 
        for (i = 0; i < dma_devices_max; i++) {
-               if (rte_dma_devices[i].state == RTE_DMA_DEV_UNUSED)
+               if (dma_devices_shared_data->data[i].dev_name[0] == '\0')
                        return i;
        }
 
@@ -90,13 +111,45 @@ dma_find_by_name(const char *name)
 
        for (i = 0; i < dma_devices_max; i++) {
                if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
-                   (!strcmp(name, rte_dma_devices[i].dev_name)))
+                   (!strcmp(name, rte_dma_devices[i].data->dev_name)))
                        return &rte_dma_devices[i];
        }
 
        return NULL;
 }
 
+static void dma_fp_object_dummy(struct rte_dma_fp_object *obj);
+
+static int
+dma_fp_data_prepare(void)
+{
+       size_t size;
+       void *ptr;
+       int i;
+
+       if (rte_dma_fp_objs != NULL)
+               return 0;
+
+       /* Fast-path object must align cacheline, but the return value of malloc
+        * may not be aligned to the cache line. Therefore, extra memory is
+        * applied for realignment.
+        * note: We do not call posix_memalign/aligned_alloc because it is
+        * version dependent on libc.
+        */
+       size = dma_devices_max * sizeof(struct rte_dma_fp_object) +
+               RTE_CACHE_LINE_SIZE;
+       ptr = malloc(size);
+       if (ptr == NULL)
+               return -ENOMEM;
+       memset(ptr, 0, size);
+
+       rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
+       for (i = 0; i < dma_devices_max; i++)
+               dma_fp_object_dummy(&rte_dma_fp_objs[i]);
+
+       return 0;
+}
+
 static int
 dma_dev_data_prepare(void)
 {
@@ -114,16 +167,71 @@ dma_dev_data_prepare(void)
        return 0;
 }
 
+static int
+dma_shared_data_prepare(void)
+{
+       const char *mz_name = "rte_dma_dev_data";
+       const struct rte_memzone *mz;
+       size_t size;
+
+       if (dma_devices_shared_data != NULL)
+               return 0;
+
+       size = sizeof(*dma_devices_shared_data) +
+               sizeof(struct rte_dma_dev_data) * dma_devices_max;
+
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+               mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0);
+       else
+               mz = rte_memzone_lookup(mz_name);
+       if (mz == NULL)
+               return -ENOMEM;
+
+       dma_devices_shared_data = mz->addr;
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+               memset(dma_devices_shared_data, 0, size);
+               dma_devices_shared_data->dev_max = dma_devices_max;
+       } else {
+               dma_devices_max = dma_devices_shared_data->dev_max;
+       }
+
+       return 0;
+}
+
 static int
 dma_data_prepare(void)
 {
-       if (dma_devices_max == 0)
-               dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
-       return dma_dev_data_prepare();
+       int ret;
+
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+               if (dma_devices_max == 0)
+                       dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
+               ret = dma_fp_data_prepare();
+               if (ret)
+                       return ret;
+               ret = dma_dev_data_prepare();
+               if (ret)
+                       return ret;
+               ret = dma_shared_data_prepare();
+               if (ret)
+                       return ret;
+       } else {
+               ret = dma_shared_data_prepare();
+               if (ret)
+                       return ret;
+               ret = dma_fp_data_prepare();
+               if (ret)
+                       return ret;
+               ret = dma_dev_data_prepare();
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
 }
 
 static struct rte_dma_dev *
-dma_allocate(const char *name, int numa_node, size_t private_data_size)
+dma_allocate_primary(const char *name, int numa_node, size_t private_data_size)
 {
        struct rte_dma_dev *dev;
        void *dev_private;
@@ -157,10 +265,59 @@ dma_allocate(const char *name, int numa_node, size_t private_data_size)
        }
 
        dev = &rte_dma_devices[dev_id];
-       rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name));
-       dev->dev_id = dev_id;
-       dev->numa_node = numa_node;
-       dev->dev_private = dev_private;
+       dev->data = &dma_devices_shared_data->data[dev_id];
+       rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+       dev->data->dev_id = dev_id;
+       dev->data->numa_node = numa_node;
+       dev->data->dev_private = dev_private;
+
+       return dev;
+}
+
+static struct rte_dma_dev *
+dma_attach_secondary(const char *name)
+{
+       struct rte_dma_dev *dev;
+       int16_t i;
+       int ret;
+
+       ret = dma_data_prepare();
+       if (ret < 0) {
+               RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
+               return NULL;
+       }
+
+       for (i = 0; i < dma_devices_max; i++) {
+               if (!strcmp(dma_devices_shared_data->data[i].dev_name, name))
+                       break;
+       }
+       if (i == dma_devices_max) {
+               RTE_DMA_LOG(ERR,
+                       "Device %s is not driven by the primary process",
+                       name);
+               return NULL;
+       }
+
+       dev = &rte_dma_devices[i];
+       dev->data = &dma_devices_shared_data->data[i];
+
+       return dev;
+}
+
+static struct rte_dma_dev *
+dma_allocate(const char *name, int numa_node, size_t private_data_size)
+{
+       struct rte_dma_dev *dev;
+
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+               dev = dma_allocate_primary(name, numa_node, private_data_size);
+       else
+               dev = dma_attach_secondary(name);
+
+       if (dev) {
+               dev->fp_obj = &rte_dma_fp_objs[dev->data->dev_id];
+               dma_fp_object_dummy(dev->fp_obj);
+       }
 
        return dev;
 }
@@ -168,7 +325,12 @@ dma_allocate(const char *name, int numa_node, size_t private_data_size)
 static void
 dma_release(struct rte_dma_dev *dev)
 {
-       rte_free(dev->dev_private);
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+               rte_free(dev->data->dev_private);
+               memset(dev->data, 0, sizeof(struct rte_dma_dev_data));
+       }
+
+       dma_fp_object_dummy(dev->fp_obj);
        memset(dev, 0, sizeof(struct rte_dma_dev));
 }
 
@@ -201,6 +363,9 @@ rte_dma_pmd_release(const char *name)
        if (dev == NULL)
                return -EINVAL;
 
+       if (dev->state == RTE_DMA_DEV_READY)
+               return rte_dma_close(dev->data->dev_id);
+
        dma_release(dev);
        return 0;
 }
@@ -217,7 +382,7 @@ rte_dma_get_dev_id_by_name(const char *name)
        if (dev == NULL)
                return -EINVAL;
 
-       return dev->dev_id;
+       return dev->data->dev_id;
 }
 
 bool
@@ -244,3 +409,458 @@ rte_dma_count_avail(void)
 
        return count;
 }
+
+int
+rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
+{
+       const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+       int ret;
+
+       if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
+               return -EINVAL;
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+       memset(dev_info, 0, sizeof(struct rte_dma_info));
+       ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+                                           sizeof(struct rte_dma_info));
+       if (ret != 0)
+               return ret;
+
+       dev_info->dev_name = dev->data->dev_name;
+       dev_info->numa_node = dev->device->numa_node;
+       dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
+
+       return 0;
+}
+
+int
+rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
+{
+       struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+       struct rte_dma_info dev_info;
+       int ret;
+
+       if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
+               return -EINVAL;
+
+       if (dev->data->dev_started != 0) {
+               RTE_DMA_LOG(ERR,
+                       "Device %d must be stopped to allow configuration",
+                       dev_id);
+               return -EBUSY;
+       }
+
+       ret = rte_dma_info_get(dev_id, &dev_info);
+       if (ret != 0) {
+               RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+               return -EINVAL;
+       }
+       if (dev_conf->nb_vchans == 0) {
+               RTE_DMA_LOG(ERR,
+                       "Device %d configure zero vchans", dev_id);
+               return -EINVAL;
+       }
+       if (dev_conf->nb_vchans > dev_info.max_vchans) {
+               RTE_DMA_LOG(ERR,
+                       "Device %d configure too many vchans", dev_id);
+               return -EINVAL;
+       }
+       if (dev_conf->enable_silent &&
+           !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
+               RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
+               return -EINVAL;
+       }
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+       ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
+                                            sizeof(struct rte_dma_conf));
+       if (ret == 0)
+               memcpy(&dev->data->dev_conf, dev_conf,
+                      sizeof(struct rte_dma_conf));
+
+       return ret;
+}
+
+int
+rte_dma_start(int16_t dev_id)
+{
+       struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+       int ret;
+
+       if (!rte_dma_is_valid(dev_id))
+               return -EINVAL;
+
+       if (dev->data->dev_conf.nb_vchans == 0) {
+               RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+               return -EINVAL;
+       }
+
+       if (dev->data->dev_started != 0) {
+               RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
+               return 0;
+       }
+
+       if (dev->dev_ops->dev_start == NULL)
+               goto mark_started;
+
+       ret = (*dev->dev_ops->dev_start)(dev);
+       if (ret != 0)
+               return ret;
+
+mark_started:
+       dev->data->dev_started = 1;
+       return 0;
+}
+
+int
+rte_dma_stop(int16_t dev_id)
+{
+       struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+       int ret;
+
+       if (!rte_dma_is_valid(dev_id))
+               return -EINVAL;
+
+       if (dev->data->dev_started == 0) {
+               RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
+               return 0;
+       }
+
+       if (dev->dev_ops->dev_stop == NULL)
+               goto mark_stopped;
+
+       ret = (*dev->dev_ops->dev_stop)(dev);
+       if (ret != 0)
+               return ret;
+
+mark_stopped:
+       dev->data->dev_started = 0;
+       return 0;
+}
+
+int
+rte_dma_close(int16_t dev_id)
+{
+       struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+       int ret;
+
+       if (!rte_dma_is_valid(dev_id))
+               return -EINVAL;
+
+       /* Device must be stopped before it can be closed */
+       if (dev->data->dev_started == 1) {
+               RTE_DMA_LOG(ERR,
+                       "Device %d must be stopped before closing", dev_id);
+               return -EBUSY;
+       }
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+       ret = (*dev->dev_ops->dev_close)(dev);
+       if (ret == 0)
+               dma_release(dev);
+
+       return ret;
+}
+
+int
+rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+                   const struct rte_dma_vchan_conf *conf)
+{
+       struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+       struct rte_dma_info dev_info;
+       bool src_is_dev, dst_is_dev;
+       int ret;
+
+       if (!rte_dma_is_valid(dev_id) || conf == NULL)
+               return -EINVAL;
+
+       if (dev->data->dev_started != 0) {
+               RTE_DMA_LOG(ERR,
+                       "Device %d must be stopped to allow configuration",
+                       dev_id);
+               return -EBUSY;
+       }
+
+       ret = rte_dma_info_get(dev_id, &dev_info);
+       if (ret != 0) {
+               RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+               return -EINVAL;
+       }
+       if (dev->data->dev_conf.nb_vchans == 0) {
+               RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+               return -EINVAL;
+       }
+       if (vchan >= dev_info.nb_vchans) {
+               RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
+               return -EINVAL;
+       }
+       if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+           conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+           conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+           conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+               RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
+               return -EINVAL;
+       }
+       if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+           !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
+               RTE_DMA_LOG(ERR,
+                       "Device %d don't support mem2mem transfer", dev_id);
+               return -EINVAL;
+       }
+       if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+           !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
+               RTE_DMA_LOG(ERR,
+                       "Device %d don't support mem2dev transfer", dev_id);
+               return -EINVAL;
+       }
+       if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+           !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
+               RTE_DMA_LOG(ERR,
+                       "Device %d don't support dev2mem transfer", dev_id);
+               return -EINVAL;
+       }
+       if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+           !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
+               RTE_DMA_LOG(ERR,
+                       "Device %d don't support dev2dev transfer", dev_id);
+               return -EINVAL;
+       }
+       if (conf->nb_desc < dev_info.min_desc ||
+           conf->nb_desc > dev_info.max_desc) {
+               RTE_DMA_LOG(ERR,
+                       "Device %d number of descriptors invalid", dev_id);
+               return -EINVAL;
+       }
+       src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+                    conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+       if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
+           (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
+               RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
+               return -EINVAL;
+       }
+       dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+                    conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+       if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
+           (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
+               RTE_DMA_LOG(ERR,
+                       "Device %d destination port type invalid", dev_id);
+               return -EINVAL;
+       }
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+       return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
+                                       sizeof(struct rte_dma_vchan_conf));
+}
+
+int
+rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
+{
+       const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+       if (!rte_dma_is_valid(dev_id) || stats == NULL)
+               return -EINVAL;
+
+       if (vchan >= dev->data->dev_conf.nb_vchans &&
+           vchan != RTE_DMA_ALL_VCHAN) {
+               RTE_DMA_LOG(ERR,
+                       "Device %d vchan %u out of range", dev_id, vchan);
+               return -EINVAL;
+       }
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+       memset(stats, 0, sizeof(struct rte_dma_stats));
+       return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+                                         sizeof(struct rte_dma_stats));
+}
+
+int
+rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
+{
+       struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+       if (!rte_dma_is_valid(dev_id))
+               return -EINVAL;
+
+       if (vchan >= dev->data->dev_conf.nb_vchans &&
+           vchan != RTE_DMA_ALL_VCHAN) {
+               RTE_DMA_LOG(ERR,
+                       "Device %d vchan %u out of range", dev_id, vchan);
+               return -EINVAL;
+       }
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+       return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status)
+{
+       struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+       if (!rte_dma_is_valid(dev_id))
+               return -EINVAL;
+
+       if (vchan >= dev->data->dev_conf.nb_vchans) {
+               RTE_DMA_LOG(ERR, "Device %u vchan %u out of range\n", dev_id, vchan);
+               return -EINVAL;
+       }
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_status, -ENOTSUP);
+       return (*dev->dev_ops->vchan_status)(dev, vchan, status);
+}
+
+static const char *
+dma_capability_name(uint64_t capability)
+{
+       static const struct {
+               uint64_t capability;
+               const char *name;
+       } capa_names[] = {
+               { RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
+               { RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
+               { RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
+               { RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
+               { RTE_DMA_CAPA_SVA,         "sva"     },
+               { RTE_DMA_CAPA_SILENT,      "silent"  },
+               { RTE_DMA_CAPA_HANDLES_ERRORS, "handles_errors" },
+               { RTE_DMA_CAPA_OPS_COPY,    "copy"    },
+               { RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
+               { RTE_DMA_CAPA_OPS_FILL,    "fill"    },
+       };
+
+       const char *name = "unknown";
+       uint32_t i;
+
+       for (i = 0; i < RTE_DIM(capa_names); i++) {
+               if (capability == capa_names[i].capability) {
+                       name = capa_names[i].name;
+                       break;
+               }
+       }
+
+       return name;
+}
+
+static void
+dma_dump_capability(FILE *f, uint64_t dev_capa)
+{
+       uint64_t capa;
+
+       (void)fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
+       while (dev_capa > 0) {
+               capa = 1ull << __builtin_ctzll(dev_capa);
+               (void)fprintf(f, " %s", dma_capability_name(capa));
+               dev_capa &= ~capa;
+       }
+       (void)fprintf(f, "\n");
+}
+
+int
+rte_dma_dump(int16_t dev_id, FILE *f)
+{
+       const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+       struct rte_dma_info dev_info;
+       int ret;
+
+       if (!rte_dma_is_valid(dev_id) || f == NULL)
+               return -EINVAL;
+
+       ret = rte_dma_info_get(dev_id, &dev_info);
+       if (ret != 0) {
+               RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+               return -EINVAL;
+       }
+
+       (void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
+               dev->data->dev_id,
+               dev->data->dev_name,
+               dev->data->dev_started ? "started" : "stopped");
+       dma_dump_capability(f, dev_info.dev_capa);
+       (void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
+       (void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
+       (void)fprintf(f, "  silent_mode: %s\n",
+               dev->data->dev_conf.enable_silent ? "on" : "off");
+
+       if (dev->dev_ops->dev_dump != NULL)
+               return (*dev->dev_ops->dev_dump)(dev, f);
+
+       return 0;
+}
+
+static int
+dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
+          __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst,
+          __rte_unused uint32_t length, __rte_unused uint64_t flags)
+{
+       RTE_DMA_LOG(ERR, "copy is not configured or not supported.");
+       return -EINVAL;
+}
+
+static int
+dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
+             __rte_unused const struct rte_dma_sge *src,
+             __rte_unused const struct rte_dma_sge *dst,
+             __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst,
+             __rte_unused uint64_t flags)
+{
+       RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported.");
+       return -EINVAL;
+}
+
+static int
+dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
+          __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst,
+          __rte_unused uint32_t length, __rte_unused uint64_t flags)
+{
+       RTE_DMA_LOG(ERR, "fill is not configured or not supported.");
+       return -EINVAL;
+}
+
+static int
+dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan)
+{
+       RTE_DMA_LOG(ERR, "submit is not configured or not supported.");
+       return -EINVAL;
+}
+
+static uint16_t
+dummy_completed(__rte_unused void *dev_private,        __rte_unused uint16_t vchan,
+               __rte_unused const uint16_t nb_cpls,
+               __rte_unused uint16_t *last_idx, __rte_unused bool *has_error)
+{
+       RTE_DMA_LOG(ERR, "completed is not configured or not supported.");
+       return 0;
+}
+
+static uint16_t
+dummy_completed_status(__rte_unused void *dev_private,
+                      __rte_unused uint16_t vchan,
+                      __rte_unused const uint16_t nb_cpls,
+                      __rte_unused uint16_t *last_idx,
+                      __rte_unused enum rte_dma_status_code *status)
+{
+       RTE_DMA_LOG(ERR,
+                   "completed_status is not configured or not supported.");
+       return 0;
+}
+
+static uint16_t
+dummy_burst_capacity(__rte_unused const void *dev_private,
+                    __rte_unused uint16_t vchan)
+{
+       RTE_DMA_LOG(ERR, "burst_capacity is not configured or not supported.");
+       return 0;
+}
+
+static void
+dma_fp_object_dummy(struct rte_dma_fp_object *obj)
+{
+       obj->dev_private      = NULL;
+       obj->copy             = dummy_copy;
+       obj->copy_sg          = dummy_copy_sg;
+       obj->fill             = dummy_fill;
+       obj->submit           = dummy_submit;
+       obj->completed        = dummy_completed;
+       obj->completed_status = dummy_completed_status;
+       obj->burst_capacity   = dummy_burst_capacity;
+}