X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Fdmadev%2Frte_dmadev.c;h=d4b32b2971b0019ff937ba0ad4dcc67fc959d34e;hb=ec487c189686ee9b3b7551d3aca138cec3f91e74;hp=9223ae1c95e98d3957ddac6abf569a8b9c02b4b5;hpb=e0180db144f843795aad11765c779c4beb544339;p=dpdk.git diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c index 9223ae1c95..d4b32b2971 100644 --- a/lib/dmadev/rte_dmadev.c +++ b/lib/dmadev/rte_dmadev.c @@ -17,7 +17,15 @@ static int16_t dma_devices_max; -struct rte_dma_dev *rte_dma_devices; +struct rte_dma_fp_object *rte_dma_fp_objs; +static struct rte_dma_dev *rte_dma_devices; +static struct { + /* Hold the dev_max information of the primary process. This field is + * set by the primary process and is read by the secondary process. + */ + int16_t dev_max; + struct rte_dma_dev_data data[0]; +} *dma_devices_shared_data; RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO); #define RTE_DMA_LOG(level, ...) \ @@ -41,6 +49,19 @@ rte_dma_dev_max(size_t dev_max) return 0; } +int16_t +rte_dma_next_dev(int16_t start_dev_id) +{ + int16_t dev_id = start_dev_id; + while (dev_id < dma_devices_max && rte_dma_devices[dev_id].state == RTE_DMA_DEV_UNUSED) + dev_id++; + + if (dev_id < dma_devices_max) + return dev_id; + + return -1; +} + static int dma_check_name(const char *name) { @@ -69,11 +90,11 @@ dma_find_free_id(void) { int16_t i; - if (rte_dma_devices == NULL) + if (rte_dma_devices == NULL || dma_devices_shared_data == NULL) return -1; for (i = 0; i < dma_devices_max; i++) { - if (rte_dma_devices[i].state == RTE_DMA_DEV_UNUSED) + if (dma_devices_shared_data->data[i].dev_name[0] == '\0') return i; } @@ -90,13 +111,45 @@ dma_find_by_name(const char *name) for (i = 0; i < dma_devices_max; i++) { if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) && - (!strcmp(name, rte_dma_devices[i].dev_name))) + (!strcmp(name, rte_dma_devices[i].data->dev_name))) return &rte_dma_devices[i]; } return NULL; } +static void dma_fp_object_dummy(struct rte_dma_fp_object *obj); + +static int +dma_fp_data_prepare(void) +{ + size_t size; + void *ptr; + int i; + + if (rte_dma_fp_objs != NULL) + return 0; + + /* Fast-path object must align cacheline, but the return value of malloc + * may not be aligned to the cache line. Therefore, extra memory is + * applied for realignment. + * note: We do not call posix_memalign/aligned_alloc because it is + * version dependent on libc. + */ + size = dma_devices_max * sizeof(struct rte_dma_fp_object) + + RTE_CACHE_LINE_SIZE; + ptr = malloc(size); + if (ptr == NULL) + return -ENOMEM; + memset(ptr, 0, size); + + rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE); + for (i = 0; i < dma_devices_max; i++) + dma_fp_object_dummy(&rte_dma_fp_objs[i]); + + return 0; +} + static int dma_dev_data_prepare(void) { @@ -114,16 +167,71 @@ dma_dev_data_prepare(void) return 0; } +static int +dma_shared_data_prepare(void) +{ + const char *mz_name = "rte_dma_dev_data"; + const struct rte_memzone *mz; + size_t size; + + if (dma_devices_shared_data != NULL) + return 0; + + size = sizeof(*dma_devices_shared_data) + + sizeof(struct rte_dma_dev_data) * dma_devices_max; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0); + else + mz = rte_memzone_lookup(mz_name); + if (mz == NULL) + return -ENOMEM; + + dma_devices_shared_data = mz->addr; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + memset(dma_devices_shared_data, 0, size); + dma_devices_shared_data->dev_max = dma_devices_max; + } else { + dma_devices_max = dma_devices_shared_data->dev_max; + } + + return 0; +} + static int dma_data_prepare(void) { - if (dma_devices_max == 0) - dma_devices_max = RTE_DMADEV_DEFAULT_MAX; - return dma_dev_data_prepare(); + int ret; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (dma_devices_max == 0) + dma_devices_max = RTE_DMADEV_DEFAULT_MAX; + ret = dma_fp_data_prepare(); + if (ret) + return ret; + ret = dma_dev_data_prepare(); + if (ret) + return ret; + ret = dma_shared_data_prepare(); + if (ret) + return ret; + } else { + ret = dma_shared_data_prepare(); + if (ret) + return ret; + ret = dma_fp_data_prepare(); + if (ret) + return ret; + ret = dma_dev_data_prepare(); + if (ret) + return ret; + } + + return 0; } static struct rte_dma_dev * -dma_allocate(const char *name, int numa_node, size_t private_data_size) +dma_allocate_primary(const char *name, int numa_node, size_t private_data_size) { struct rte_dma_dev *dev; void *dev_private; @@ -157,10 +265,59 @@ dma_allocate(const char *name, int numa_node, size_t private_data_size) } dev = &rte_dma_devices[dev_id]; - rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name)); - dev->dev_id = dev_id; - dev->numa_node = numa_node; - dev->dev_private = dev_private; + dev->data = &dma_devices_shared_data->data[dev_id]; + rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name)); + dev->data->dev_id = dev_id; + dev->data->numa_node = numa_node; + dev->data->dev_private = dev_private; + + return dev; +} + +static struct rte_dma_dev * +dma_attach_secondary(const char *name) +{ + struct rte_dma_dev *dev; + int16_t i; + int ret; + + ret = dma_data_prepare(); + if (ret < 0) { + RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data"); + return NULL; + } + + for (i = 0; i < dma_devices_max; i++) { + if (!strcmp(dma_devices_shared_data->data[i].dev_name, name)) + break; + } + if (i == dma_devices_max) { + RTE_DMA_LOG(ERR, + "Device %s is not driven by the primary process", + name); + return NULL; + } + + dev = &rte_dma_devices[i]; + dev->data = &dma_devices_shared_data->data[i]; + + return dev; +} + +static struct rte_dma_dev * +dma_allocate(const char *name, int numa_node, size_t private_data_size) +{ + struct rte_dma_dev *dev; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + dev = dma_allocate_primary(name, numa_node, private_data_size); + else + dev = dma_attach_secondary(name); + + if (dev) { + dev->fp_obj = &rte_dma_fp_objs[dev->data->dev_id]; + dma_fp_object_dummy(dev->fp_obj); + } return dev; } @@ -168,7 +325,12 @@ dma_allocate(const char *name, int numa_node, size_t private_data_size) static void dma_release(struct rte_dma_dev *dev) { - rte_free(dev->dev_private); + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + rte_free(dev->data->dev_private); + memset(dev->data, 0, sizeof(struct rte_dma_dev_data)); + } + + dma_fp_object_dummy(dev->fp_obj); memset(dev, 0, sizeof(struct rte_dma_dev)); } @@ -202,7 +364,7 @@ rte_dma_pmd_release(const char *name) return -EINVAL; if (dev->state == RTE_DMA_DEV_READY) - return rte_dma_close(dev->dev_id); + return rte_dma_close(dev->data->dev_id); dma_release(dev); return 0; @@ -220,7 +382,7 @@ rte_dma_get_dev_id_by_name(const char *name) if (dev == NULL) return -EINVAL; - return dev->dev_id; + return dev->data->dev_id; } bool @@ -264,9 +426,9 @@ rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info) if (ret != 0) return ret; - dev_info->dev_name = dev->dev_name; + dev_info->dev_name = dev->data->dev_name; dev_info->numa_node = dev->device->numa_node; - dev_info->nb_vchans = dev->dev_conf.nb_vchans; + dev_info->nb_vchans = dev->data->dev_conf.nb_vchans; return 0; } @@ -281,7 +443,7 @@ rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf) if (!rte_dma_is_valid(dev_id) || dev_conf == NULL) return -EINVAL; - if (dev->dev_started != 0) { + if (dev->data->dev_started != 0) { RTE_DMA_LOG(ERR, "Device %d must be stopped to allow configuration", dev_id); @@ -313,7 +475,8 @@ rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf) ret = (*dev->dev_ops->dev_configure)(dev, dev_conf, sizeof(struct rte_dma_conf)); if (ret == 0) - memcpy(&dev->dev_conf, dev_conf, sizeof(struct rte_dma_conf)); + memcpy(&dev->data->dev_conf, dev_conf, + sizeof(struct rte_dma_conf)); return ret; } @@ -327,12 +490,12 @@ rte_dma_start(int16_t dev_id) if (!rte_dma_is_valid(dev_id)) return -EINVAL; - if (dev->dev_conf.nb_vchans == 0) { + if (dev->data->dev_conf.nb_vchans == 0) { RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id); return -EINVAL; } - if (dev->dev_started != 0) { + if (dev->data->dev_started != 0) { RTE_DMA_LOG(WARNING, "Device %d already started", dev_id); return 0; } @@ -345,7 +508,7 @@ rte_dma_start(int16_t dev_id) return ret; mark_started: - dev->dev_started = 1; + dev->data->dev_started = 1; return 0; } @@ -358,7 +521,7 @@ rte_dma_stop(int16_t dev_id) if (!rte_dma_is_valid(dev_id)) return -EINVAL; - if (dev->dev_started == 0) { + if (dev->data->dev_started == 0) { RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id); return 0; } @@ -371,7 +534,7 @@ rte_dma_stop(int16_t dev_id) return ret; mark_stopped: - dev->dev_started = 0; + dev->data->dev_started = 0; return 0; } @@ -385,7 +548,7 @@ rte_dma_close(int16_t dev_id) return -EINVAL; /* Device must be stopped before it can be closed */ - if (dev->dev_started == 1) { + if (dev->data->dev_started == 1) { RTE_DMA_LOG(ERR, "Device %d must be stopped before closing", dev_id); return -EBUSY; @@ -411,7 +574,7 @@ rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan, if (!rte_dma_is_valid(dev_id) || conf == NULL) return -EINVAL; - if (dev->dev_started != 0) { + if (dev->data->dev_started != 0) { RTE_DMA_LOG(ERR, "Device %d must be stopped to allow configuration", dev_id); @@ -423,7 +586,7 @@ rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan, RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id); return -EINVAL; } - if (dev->dev_conf.nb_vchans == 0) { + if (dev->data->dev_conf.nb_vchans == 0) { RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id); return -EINVAL; } @@ -497,7 +660,7 @@ rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats) if (!rte_dma_is_valid(dev_id) || stats == NULL) return -EINVAL; - if (vchan >= dev->dev_conf.nb_vchans && + if (vchan >= dev->data->dev_conf.nb_vchans && vchan != RTE_DMA_ALL_VCHAN) { RTE_DMA_LOG(ERR, "Device %d vchan %u out of range", dev_id, vchan); @@ -518,7 +681,7 @@ rte_dma_stats_reset(int16_t dev_id, uint16_t vchan) if (!rte_dma_is_valid(dev_id)) return -EINVAL; - if (vchan >= dev->dev_conf.nb_vchans && + if (vchan >= dev->data->dev_conf.nb_vchans && vchan != RTE_DMA_ALL_VCHAN) { RTE_DMA_LOG(ERR, "Device %d vchan %u out of range", dev_id, vchan); @@ -529,6 +692,23 @@ rte_dma_stats_reset(int16_t dev_id, uint16_t vchan) return (*dev->dev_ops->stats_reset)(dev, vchan); } +int +rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status) +{ + struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; + + if (!rte_dma_is_valid(dev_id)) + return -EINVAL; + + if (vchan >= dev->data->dev_conf.nb_vchans) { + RTE_DMA_LOG(ERR, "Device %u vchan %u out of range\n", dev_id, vchan); + return -EINVAL; + } + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_status, -ENOTSUP); + return (*dev->dev_ops->vchan_status)(dev, vchan, status); +} + static const char * dma_capability_name(uint64_t capability) { @@ -542,6 +722,7 @@ dma_capability_name(uint64_t capability) { RTE_DMA_CAPA_DEV_TO_DEV, "dev2dev" }, { RTE_DMA_CAPA_SVA, "sva" }, { RTE_DMA_CAPA_SILENT, "silent" }, + { RTE_DMA_CAPA_HANDLES_ERRORS, "handles_errors" }, { RTE_DMA_CAPA_OPS_COPY, "copy" }, { RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" }, { RTE_DMA_CAPA_OPS_FILL, "fill" }, @@ -591,17 +772,95 @@ rte_dma_dump(int16_t dev_id, FILE *f) } (void)fprintf(f, "DMA Dev %d, '%s' [%s]\n", - dev->dev_id, - dev->dev_name, - dev->dev_started ? "started" : "stopped"); + dev->data->dev_id, + dev->data->dev_name, + dev->data->dev_started ? "started" : "stopped"); dma_dump_capability(f, dev_info.dev_capa); (void)fprintf(f, " max_vchans_supported: %u\n", dev_info.max_vchans); (void)fprintf(f, " nb_vchans_configured: %u\n", dev_info.nb_vchans); (void)fprintf(f, " silent_mode: %s\n", - dev->dev_conf.enable_silent ? "on" : "off"); + dev->data->dev_conf.enable_silent ? "on" : "off"); if (dev->dev_ops->dev_dump != NULL) return (*dev->dev_ops->dev_dump)(dev, f); return 0; } + +static int +dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan, + __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst, + __rte_unused uint32_t length, __rte_unused uint64_t flags) +{ + RTE_DMA_LOG(ERR, "copy is not configured or not supported."); + return -EINVAL; +} + +static int +dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan, + __rte_unused const struct rte_dma_sge *src, + __rte_unused const struct rte_dma_sge *dst, + __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst, + __rte_unused uint64_t flags) +{ + RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported."); + return -EINVAL; +} + +static int +dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan, + __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst, + __rte_unused uint32_t length, __rte_unused uint64_t flags) +{ + RTE_DMA_LOG(ERR, "fill is not configured or not supported."); + return -EINVAL; +} + +static int +dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan) +{ + RTE_DMA_LOG(ERR, "submit is not configured or not supported."); + return -EINVAL; +} + +static uint16_t +dummy_completed(__rte_unused void *dev_private, __rte_unused uint16_t vchan, + __rte_unused const uint16_t nb_cpls, + __rte_unused uint16_t *last_idx, __rte_unused bool *has_error) +{ + RTE_DMA_LOG(ERR, "completed is not configured or not supported."); + return 0; +} + +static uint16_t +dummy_completed_status(__rte_unused void *dev_private, + __rte_unused uint16_t vchan, + __rte_unused const uint16_t nb_cpls, + __rte_unused uint16_t *last_idx, + __rte_unused enum rte_dma_status_code *status) +{ + RTE_DMA_LOG(ERR, + "completed_status is not configured or not supported."); + return 0; +} + +static uint16_t +dummy_burst_capacity(__rte_unused const void *dev_private, + __rte_unused uint16_t vchan) +{ + RTE_DMA_LOG(ERR, "burst_capacity is not configured or not supported."); + return 0; +} + +static void +dma_fp_object_dummy(struct rte_dma_fp_object *obj) +{ + obj->dev_private = NULL; + obj->copy = dummy_copy; + obj->copy_sg = dummy_copy_sg; + obj->fill = dummy_fill; + obj->submit = dummy_submit; + obj->completed = dummy_completed; + obj->completed_status = dummy_completed_status; + obj->burst_capacity = dummy_burst_capacity; +}