+}
+
+static void
+ioat_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info)
+{
+ struct rte_ioat_rawdev_config *cfg = dev_info;
+ struct rte_ioat_rawdev *ioat = dev->dev_private;
+
+ if (cfg != NULL)
+ cfg->ring_size = ioat->ring_size;
+}
+
+static const char * const xstat_names[] = {
+ "failed_enqueues", "successful_enqueues",
+ "copies_started", "copies_completed"
+};
+
+static int
+ioat_xstats_get(const struct rte_rawdev *dev, const unsigned int ids[],
+ uint64_t values[], unsigned int n)
+{
+ const struct rte_ioat_rawdev *ioat = dev->dev_private;
+ unsigned int i;
+
+ for (i = 0; i < n; i++) {
+ switch (ids[i]) {
+ case 0: values[i] = ioat->enqueue_failed; break;
+ case 1: values[i] = ioat->enqueued; break;
+ case 2: values[i] = ioat->started; break;
+ case 3: values[i] = ioat->completed; break;
+ default: values[i] = 0; break;
+ }
+ }
+ return n;
+}
+
+static int
+ioat_xstats_get_names(const struct rte_rawdev *dev,
+ struct rte_rawdev_xstats_name *names,
+ unsigned int size)
+{
+ unsigned int i;
+
+ RTE_SET_USED(dev);
+ if (size < RTE_DIM(xstat_names))
+ return RTE_DIM(xstat_names);
+
+ for (i = 0; i < RTE_DIM(xstat_names); i++)
+ strlcpy(names[i].name, xstat_names[i], sizeof(names[i]));
+
+ return RTE_DIM(xstat_names);
+}
+
+extern int ioat_rawdev_test(uint16_t dev_id);
+
+static int
+ioat_rawdev_create(const char *name, struct rte_pci_device *dev)
+{
+ static const struct rte_rawdev_ops ioat_rawdev_ops = {
+ .dev_configure = ioat_dev_configure,
+ .dev_start = ioat_dev_start,
+ .dev_stop = ioat_dev_stop,
+ .dev_info_get = ioat_dev_info_get,
+ .xstats_get = ioat_xstats_get,
+ .xstats_get_names = ioat_xstats_get_names,
+ .dev_selftest = ioat_rawdev_test,
+ };
+
+ struct rte_rawdev *rawdev = NULL;
+ struct rte_ioat_rawdev *ioat = NULL;
+ const struct rte_memzone *mz = NULL;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ int ret = 0;
+ int retry = 0;
+
+ if (!name) {
+ IOAT_PMD_ERR("Invalid name of the device!");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct rte_ioat_rawdev),
+ dev->device.numa_node);
+ if (rawdev == NULL) {
+ IOAT_PMD_ERR("Unable to allocate raw device");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ snprintf(mz_name, sizeof(mz_name), "rawdev%u_private", rawdev->dev_id);
+ mz = rte_memzone_reserve(mz_name, sizeof(struct rte_ioat_rawdev),
+ dev->device.numa_node, RTE_MEMZONE_IOVA_CONTIG);
+ if (mz == NULL) {
+ IOAT_PMD_ERR("Unable to reserve memzone for private data\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ rawdev->dev_private = mz->addr;
+ rawdev->dev_ops = &ioat_rawdev_ops;
+ rawdev->device = &dev->device;
+ rawdev->driver_name = dev->device.driver->name;
+
+ ioat = rawdev->dev_private;
+ ioat->rawdev = rawdev;
+ ioat->mz = mz;
+ ioat->regs = dev->mem_resource[0].addr;
+ ioat->ring_size = 0;
+ ioat->desc_ring = NULL;
+ ioat->status_addr = ioat->mz->iova +
+ offsetof(struct rte_ioat_rawdev, status);
+
+ /* do device initialization - reset and set error behaviour */
+ if (ioat->regs->chancnt != 1)
+ IOAT_PMD_ERR("%s: Channel count == %d\n", __func__,
+ ioat->regs->chancnt);
+
+ if (ioat->regs->chanctrl & 0x100) { /* locked by someone else */
+ IOAT_PMD_WARN("%s: Channel appears locked\n", __func__);
+ ioat->regs->chanctrl = 0;
+ }
+
+ ioat->regs->chancmd = RTE_IOAT_CHANCMD_SUSPEND;
+ rte_delay_ms(1);
+ ioat->regs->chancmd = RTE_IOAT_CHANCMD_RESET;
+ rte_delay_ms(1);
+ while (ioat->regs->chancmd & RTE_IOAT_CHANCMD_RESET) {
+ ioat->regs->chainaddr = 0;
+ rte_delay_ms(1);
+ if (++retry >= 200) {
+ IOAT_PMD_ERR("%s: cannot reset device. CHANCMD=0x%"PRIx8", CHANSTS=0x%"PRIx64", CHANERR=0x%"PRIx32"\n",
+ __func__,
+ ioat->regs->chancmd,
+ ioat->regs->chansts,
+ ioat->regs->chanerr);
+ ret = -EIO;
+ }
+ }
+ ioat->regs->chanctrl = RTE_IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
+ RTE_IOAT_CHANCTRL_ERR_COMPLETION_EN;
+