.dev_start = idxd_pci_dev_start,
.dev_stop = idxd_pci_dev_stop,
.dev_info_get = idxd_dev_info_get,
+ .xstats_get = ioat_xstats_get,
+ .xstats_get_names = ioat_xstats_get_names,
+ .xstats_reset = ioat_xstats_reset,
};
/* each portal uses 4 x 4k pages */
.dump = idxd_dev_dump,
.dev_configure = idxd_dev_configure,
.dev_info_get = idxd_dev_info_get,
+ .xstats_get = ioat_xstats_get,
+ .xstats_get_names = ioat_xstats_get_names,
+ .xstats_reset = ioat_xstats_reset,
};
static void *
idxd_rawdev_test(uint16_t dev_id)
{
rte_rawdev_dump(dev_id, stdout);
- return 0;
+ return ioat_rawdev_test(dev_id);
}
*/
struct rte_idxd_rawdev {
enum rte_ioat_dev_type type;
+ struct rte_ioat_xstats xstats;
+
void *portal; /* address to write the batch descriptor */
/* counters to track the batches and the individual op handles */
if (++idxd->next_free_hdl == idxd->hdl_ring_sz)
idxd->next_free_hdl = 0;
+ idxd->xstats.enqueued++;
return 1;
failed:
+ idxd->xstats.enqueue_failed++;
rte_errno = ENOSPC;
return 0;
}
if (++idxd->next_batch == idxd->batch_ring_sz)
idxd->next_batch = 0;
+ idxd->xstats.started = idxd->xstats.enqueued;
}
static __rte_always_inline int
idxd->next_ret_hdl = h_idx;
+ idxd->xstats.completed += n;
return n;
}