+int
+idxd_dev_dump(struct rte_rawdev *dev, FILE *f)
+{
+ struct idxd_rawdev *idxd = dev->dev_private;
+ struct rte_idxd_rawdev *rte_idxd = &idxd->public;
+ int i;
+
+ fprintf(f, "Raw Device #%d\n", dev->dev_id);
+ fprintf(f, "Driver: %s\n\n", dev->driver_name);
+
+ fprintf(f, "Portal: %p\n", rte_idxd->portal);
+ fprintf(f, "Config: {ring_size: %u, hdls_disable: %u}\n\n",
+ rte_idxd->cfg.ring_size, rte_idxd->cfg.hdls_disable);
+
+ fprintf(f, "max batches: %u\n", rte_idxd->max_batches);
+ fprintf(f, "batch idx read: %u\n", rte_idxd->batch_idx_read);
+ fprintf(f, "batch idx write: %u\n", rte_idxd->batch_idx_write);
+ fprintf(f, "batch idxes:");
+ for (i = 0; i < rte_idxd->max_batches + 1; i++)
+ fprintf(f, "%u ", rte_idxd->batch_idx_ring[i]);
+ fprintf(f, "\n\n");
+
+ fprintf(f, "hdls read: %u\n", rte_idxd->max_batches);
+ fprintf(f, "hdls avail: %u\n", rte_idxd->hdls_avail);
+ fprintf(f, "batch start: %u\n", rte_idxd->batch_start);
+ fprintf(f, "batch size: %u\n", rte_idxd->batch_size);
+
+ return 0;
+}
+
+int
+idxd_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info,
+ size_t info_size)
+{
+ struct rte_ioat_rawdev_config *cfg = dev_info;
+ struct idxd_rawdev *idxd = dev->dev_private;
+ struct rte_idxd_rawdev *rte_idxd = &idxd->public;
+
+ if (info_size != sizeof(*cfg))
+ return -EINVAL;
+
+ if (cfg != NULL)
+ *cfg = rte_idxd->cfg;
+ return 0;
+}
+
+int
+idxd_dev_configure(const struct rte_rawdev *dev,
+ rte_rawdev_obj_t config, size_t config_size)
+{
+ struct idxd_rawdev *idxd = dev->dev_private;
+ struct rte_idxd_rawdev *rte_idxd = &idxd->public;
+ struct rte_ioat_rawdev_config *cfg = config;
+ uint16_t max_desc = cfg->ring_size;
+
+ if (config_size != sizeof(*cfg))
+ return -EINVAL;
+
+ if (dev->started) {
+ IOAT_PMD_ERR("%s: Error, device is started.", __func__);
+ return -EAGAIN;
+ }
+
+ rte_idxd->cfg = *cfg;
+
+ if (!rte_is_power_of_2(max_desc))
+ max_desc = rte_align32pow2(max_desc);
+ IOAT_PMD_DEBUG("Rawdev %u using %u descriptors",
+ dev->dev_id, max_desc);
+ rte_idxd->desc_ring_mask = max_desc - 1;
+
+ /* in case we are reconfiguring a device, free any existing memory */
+ rte_free(rte_idxd->desc_ring);
+ rte_free(rte_idxd->hdl_ring);
+ rte_free(rte_idxd->hdl_ring_flags);
+
+ /* allocate the descriptor ring at 2x size as batches can't wrap */
+ rte_idxd->desc_ring = rte_zmalloc(NULL,
+ sizeof(*rte_idxd->desc_ring) * max_desc * 2, 0);
+ if (rte_idxd->desc_ring == NULL)
+ return -ENOMEM;
+ rte_idxd->desc_iova = rte_mem_virt2iova(rte_idxd->desc_ring);
+
+ rte_idxd->hdl_ring = rte_zmalloc(NULL,
+ sizeof(*rte_idxd->hdl_ring) * max_desc, 0);
+ if (rte_idxd->hdl_ring == NULL) {
+ rte_free(rte_idxd->desc_ring);
+ rte_idxd->desc_ring = NULL;
+ return -ENOMEM;
+ }
+ rte_idxd->hdl_ring_flags = rte_zmalloc(NULL,
+ sizeof(*rte_idxd->hdl_ring_flags) * max_desc, 0);
+ if (rte_idxd->hdl_ring_flags == NULL) {
+ rte_free(rte_idxd->desc_ring);
+ rte_free(rte_idxd->hdl_ring);
+ rte_idxd->desc_ring = NULL;
+ rte_idxd->hdl_ring = NULL;
+ return -ENOMEM;
+ }
+ rte_idxd->hdls_read = rte_idxd->batch_start = 0;
+ rte_idxd->batch_size = 0;
+ rte_idxd->hdls_avail = 0;
+
+ return 0;
+}
+