+#define IOAT_DEVICE_ID_ICX 0x0b00
+
+RTE_LOG_REGISTER(ioat_pmd_logtype, rawdev.ioat, INFO);
+
+#define DESC_SZ sizeof(struct rte_ioat_generic_hw_desc)
+#define COMPLETION_SZ sizeof(__m128i)
+
+static int
+ioat_dev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config,
+ size_t config_size)
+{
+ struct rte_ioat_rawdev_config *params = config;
+ struct rte_ioat_rawdev *ioat = dev->dev_private;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ unsigned short i;
+
+ if (dev->started)
+ return -EBUSY;
+
+ if (params == NULL || config_size != sizeof(*params))
+ return -EINVAL;
+
+ if (params->ring_size > 4096 || params->ring_size < 64 ||
+ !rte_is_power_of_2(params->ring_size))
+ return -EINVAL;
+
+ ioat->ring_size = params->ring_size;
+ ioat->hdls_disable = params->hdls_disable;
+ if (ioat->desc_ring != NULL) {
+ rte_memzone_free(ioat->desc_mz);
+ ioat->desc_ring = NULL;
+ ioat->desc_mz = NULL;
+ }
+
+ /* allocate one block of memory for both descriptors
+ * and completion handles.
+ */
+ snprintf(mz_name, sizeof(mz_name), "rawdev%u_desc_ring", dev->dev_id);
+ ioat->desc_mz = rte_memzone_reserve(mz_name,
+ (DESC_SZ + COMPLETION_SZ) * ioat->ring_size,
+ dev->device->numa_node, RTE_MEMZONE_IOVA_CONTIG);
+ if (ioat->desc_mz == NULL)
+ return -ENOMEM;
+ ioat->desc_ring = ioat->desc_mz->addr;
+ ioat->hdls = (void *)&ioat->desc_ring[ioat->ring_size];
+
+ ioat->ring_addr = ioat->desc_mz->iova;
+
+ /* configure descriptor ring - each one points to next */
+ for (i = 0; i < ioat->ring_size; i++) {
+ ioat->desc_ring[i].next = ioat->ring_addr +
+ (((i + 1) % ioat->ring_size) * DESC_SZ);
+ }
+
+ return 0;
+}
+
+static int
+ioat_dev_start(struct rte_rawdev *dev)
+{
+ struct rte_ioat_rawdev *ioat = dev->dev_private;
+
+ if (ioat->ring_size == 0 || ioat->desc_ring == NULL)
+ return -EBUSY;
+
+ /* inform hardware of where the descriptor ring is */
+ ioat->regs->chainaddr = ioat->ring_addr;
+ /* inform hardware of where to write the status/completions */
+ ioat->regs->chancmp = ioat->status_addr;
+
+ /* prime the status register to be set to the last element */
+ ioat->status = ioat->ring_addr + ((ioat->ring_size - 1) * DESC_SZ);
+ return 0;
+}
+
+static void
+ioat_dev_stop(struct rte_rawdev *dev)
+{
+ RTE_SET_USED(dev);
+}
+
+static int
+ioat_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info,
+ size_t dev_info_size)
+{
+ struct rte_ioat_rawdev_config *cfg = dev_info;
+ struct rte_ioat_rawdev *ioat = dev->dev_private;
+
+ if (dev_info == NULL || dev_info_size != sizeof(*cfg))
+ return -EINVAL;
+
+ cfg->ring_size = ioat->ring_size;
+ cfg->hdls_disable = ioat->hdls_disable;
+ return 0;
+}
+
+static const char * const xstat_names[] = {
+ "failed_enqueues", "successful_enqueues",
+ "copies_started", "copies_completed"
+};
+
+static int
+ioat_xstats_get(const struct rte_rawdev *dev, const unsigned int ids[],
+ uint64_t values[], unsigned int n)
+{
+ const struct rte_ioat_rawdev *ioat = dev->dev_private;
+ const uint64_t *stats = (const void *)&ioat->xstats;
+ unsigned int i;
+
+ for (i = 0; i < n; i++) {
+ if (ids[i] < sizeof(ioat->xstats)/sizeof(*stats))
+ values[i] = stats[ids[i]];
+ else
+ values[i] = 0;
+ }
+ return n;
+}
+
+static int
+ioat_xstats_get_names(const struct rte_rawdev *dev,
+ struct rte_rawdev_xstats_name *names,
+ unsigned int size)
+{
+ unsigned int i;
+
+ RTE_SET_USED(dev);
+ if (size < RTE_DIM(xstat_names))
+ return RTE_DIM(xstat_names);
+
+ for (i = 0; i < RTE_DIM(xstat_names); i++)
+ strlcpy(names[i].name, xstat_names[i], sizeof(names[i]));
+
+ return RTE_DIM(xstat_names);
+}