Add update of the relevant stats for the data path functions and point the
overall device struct xstats function pointers to the existing ioat
functions.
At this point, all necessary hooks for supporting the existing unit tests
are in place so call them for each device.
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Acked-by: Radu Nicolau <radu.nicolau@intel.com>
.dev_start = idxd_pci_dev_start,
.dev_stop = idxd_pci_dev_stop,
.dev_info_get = idxd_dev_info_get,
.dev_start = idxd_pci_dev_start,
.dev_stop = idxd_pci_dev_stop,
.dev_info_get = idxd_dev_info_get,
+ .xstats_get = ioat_xstats_get,
+ .xstats_get_names = ioat_xstats_get_names,
+ .xstats_reset = ioat_xstats_reset,
};
/* each portal uses 4 x 4k pages */
};
/* each portal uses 4 x 4k pages */
.dump = idxd_dev_dump,
.dev_configure = idxd_dev_configure,
.dev_info_get = idxd_dev_info_get,
.dump = idxd_dev_dump,
.dev_configure = idxd_dev_configure,
.dev_info_get = idxd_dev_info_get,
+ .xstats_get = ioat_xstats_get,
+ .xstats_get_names = ioat_xstats_get_names,
+ .xstats_reset = ioat_xstats_reset,
idxd_rawdev_test(uint16_t dev_id)
{
rte_rawdev_dump(dev_id, stdout);
idxd_rawdev_test(uint16_t dev_id)
{
rte_rawdev_dump(dev_id, stdout);
+ return ioat_rawdev_test(dev_id);
*/
struct rte_idxd_rawdev {
enum rte_ioat_dev_type type;
*/
struct rte_idxd_rawdev {
enum rte_ioat_dev_type type;
+ struct rte_ioat_xstats xstats;
+
void *portal; /* address to write the batch descriptor */
/* counters to track the batches and the individual op handles */
void *portal; /* address to write the batch descriptor */
/* counters to track the batches and the individual op handles */
if (++idxd->next_free_hdl == idxd->hdl_ring_sz)
idxd->next_free_hdl = 0;
if (++idxd->next_free_hdl == idxd->hdl_ring_sz)
idxd->next_free_hdl = 0;
+ idxd->xstats.enqueued++;
+ idxd->xstats.enqueue_failed++;
rte_errno = ENOSPC;
return 0;
}
rte_errno = ENOSPC;
return 0;
}
if (++idxd->next_batch == idxd->batch_ring_sz)
idxd->next_batch = 0;
if (++idxd->next_batch == idxd->batch_ring_sz)
idxd->next_batch = 0;
+ idxd->xstats.started = idxd->xstats.enqueued;
}
static __rte_always_inline int
}
static __rte_always_inline int
idxd->next_ret_hdl = h_idx;
idxd->next_ret_hdl = h_idx;
+ idxd->xstats.completed += n;