#ifndef _RTE_IOAT_RAWDEV_FNS_H_
#define _RTE_IOAT_RAWDEV_FNS_H_
+/**
+ * @file
+ * This header file contains the implementation of the various ioat
+ * rawdev functions for IOAT/CBDMA hardware. The API specification and key
+ * public structures are defined in "rte_ioat_rawdev.h".
+ *
+ * This file should not be included directly, but instead applications should
+ * include "rte_ioat_rawdev.h", which then includes this file - and the IDXD/DSA
+ * equivalent header - in turn.
+ */
+
#include <x86intrin.h>
#include <rte_rawdev.h>
#include <rte_memzone.h>
#include <rte_prefetch.h>
+/**
+ * @internal
+ * Identify the data path to use.
+ * Must be first field of rte_ioat_rawdev and rte_idxd_rawdev structs
+ */
+enum rte_ioat_dev_type {
+ RTE_IOAT_DEV,
+ RTE_IDXD_DEV,
+};
+
+/**
+ * @internal
+ * some statistics for tracking, if added/changed update xstats fns
+ */
+struct rte_ioat_xstats {
+ uint64_t enqueue_failed;
+ uint64_t enqueued;
+ uint64_t started;
+ uint64_t completed;
+};
+
+#include "rte_idxd_rawdev_fns.h"
+
/**
* @internal
* Structure representing a device descriptor
uint64_t op_specific[4];
};
-/**
- * @internal
- * Identify the data path to use.
- * Must be first field of rte_ioat_rawdev and rte_idxd_rawdev structs
- */
-enum rte_ioat_dev_type {
- RTE_IOAT_DEV,
- RTE_IDXD_DEV,
-};
-
/**
* @internal
* Structure representing an IOAT device instance
*/
struct rte_ioat_rawdev {
+ /* common fields at the top - match those in rte_idxd_rawdev */
enum rte_ioat_dev_type type;
+ struct rte_ioat_xstats xstats;
+
struct rte_rawdev *rawdev;
const struct rte_memzone *mz;
const struct rte_memzone *desc_mz;
- volatile uint16_t *doorbell;
+ volatile uint16_t *doorbell __rte_cache_aligned;
phys_addr_t status_addr;
phys_addr_t ring_addr;
unsigned short next_read;
unsigned short next_write;
- /* some statistics for tracking, if added/changed update xstats fns*/
- uint64_t enqueue_failed __rte_cache_aligned;
- uint64_t enqueued;
- uint64_t started;
- uint64_t completed;
-
/* to report completions, the device will write status back here */
volatile uint64_t status __rte_cache_aligned;
#define RTE_IOAT_CHANSTS_HALTED 0x3
#define RTE_IOAT_CHANSTS_ARMED 0x4
-/*
- * Defines used in the data path for interacting with hardware.
- */
-#define IDXD_CMD_OP_SHIFT 24
-enum rte_idxd_ops {
- idxd_op_nop = 0,
- idxd_op_batch,
- idxd_op_drain,
- idxd_op_memmove,
- idxd_op_fill
-};
-
-#define IDXD_FLAG_FENCE (1 << 0)
-#define IDXD_FLAG_COMPLETION_ADDR_VALID (1 << 2)
-#define IDXD_FLAG_REQUEST_COMPLETION (1 << 3)
-#define IDXD_FLAG_CACHE_CONTROL (1 << 8)
-
-/**
- * Hardware descriptor used by DSA hardware, for both bursts and
- * for individual operations.
- */
-struct rte_idxd_hw_desc {
- uint32_t pasid;
- uint32_t op_flags;
- rte_iova_t completion;
-
- RTE_STD_C11
- union {
- rte_iova_t src; /* source address for copy ops etc. */
- rte_iova_t desc_addr; /* descriptor pointer for batch */
- };
- rte_iova_t dst;
-
- uint32_t size; /* length of data for op, or batch size */
-
- /* 28 bytes of padding here */
-} __rte_aligned(64);
-
-/**
- * Completion record structure written back by DSA
- */
-struct rte_idxd_completion {
- uint8_t status;
- uint8_t result;
- /* 16-bits pad here */
- uint32_t completed_size; /* data length, or descriptors for batch */
-
- rte_iova_t fault_address;
- uint32_t invalid_flags;
-} __rte_aligned(32);
-
-#define BATCH_SIZE 64
-
-/**
- * Structure used inside the driver for building up and submitting
- * a batch of operations to the DSA hardware.
- */
-struct rte_idxd_desc_batch {
- struct rte_idxd_completion comp; /* the completion record for batch */
-
- uint16_t submitted;
- uint16_t op_count;
- uint16_t hdl_end;
-
- struct rte_idxd_hw_desc batch_desc;
-
- /* batches must always have 2 descriptors, so put a null at the start */
- struct rte_idxd_hw_desc null_desc;
- struct rte_idxd_hw_desc ops[BATCH_SIZE];
-};
-
-/**
- * structure used to save the "handles" provided by the user to be
- * returned to the user on job completion.
- */
-struct rte_idxd_user_hdl {
- uint64_t src;
- uint64_t dst;
-};
-
-/**
- * @internal
- * Structure representing an IDXD device instance
- */
-struct rte_idxd_rawdev {
- enum rte_ioat_dev_type type;
- void *portal; /* address to write the batch descriptor */
-
- /* counters to track the batches and the individual op handles */
- uint16_t batch_ring_sz; /* size of batch ring */
- uint16_t hdl_ring_sz; /* size of the user hdl ring */
-
- uint16_t next_batch; /* where we write descriptor ops */
- uint16_t next_completed; /* batch where we read completions */
- uint16_t next_ret_hdl; /* the next user hdl to return */
- uint16_t last_completed_hdl; /* the last user hdl that has completed */
- uint16_t next_free_hdl; /* where the handle for next op will go */
- uint16_t hdls_disable; /* disable tracking completion handles */
+static __rte_always_inline uint16_t
+__ioat_burst_capacity(int dev_id)
+{
+ struct rte_ioat_rawdev *ioat =
+ (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
+ unsigned short size = ioat->ring_size - 1;
+ unsigned short read = ioat->next_read;
+ unsigned short write = ioat->next_write;
+ unsigned short space = size - (write - read);
- struct rte_idxd_user_hdl *hdl_ring;
- struct rte_idxd_desc_batch *batch_ring;
-};
+ return space;
+}
-/*
- * Enqueue a copy operation onto the ioat device
- */
-static inline int
-rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
+static __rte_always_inline int
+__ioat_write_desc(int dev_id, uint32_t op, uint64_t src, phys_addr_t dst,
unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
{
struct rte_ioat_rawdev *ioat =
struct rte_ioat_generic_hw_desc *desc;
if (space == 0) {
- ioat->enqueue_failed++;
+ ioat->xstats.enqueue_failed++;
return 0;
}
desc = &ioat->desc_ring[write];
desc->size = length;
/* set descriptor write-back every 16th descriptor */
- desc->u.control_raw = (uint32_t)((!(write & 0xF)) << 3);
+ desc->u.control_raw = (uint32_t)((op << IOAT_CMD_OP_SHIFT) |
+ (!(write & 0xF) << IOAT_COMP_UPDATE_SHIFT));
desc->src_addr = src;
desc->dest_addr = dst;
(int64_t)src_hdl);
rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
- ioat->enqueued++;
+ ioat->xstats.enqueued++;
return 1;
}
+static __rte_always_inline int
+__ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst,
+ unsigned int length, uintptr_t dst_hdl)
+{
+ static const uintptr_t null_hdl;
+
+ return __ioat_write_desc(dev_id, ioat_op_fill, pattern, dst, length,
+ null_hdl, dst_hdl);
+}
+
+/*
+ * Enqueue a copy operation onto the ioat device
+ */
+static __rte_always_inline int
+__ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
+ unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
+{
+ return __ioat_write_desc(dev_id, ioat_op_copy, src, dst, length,
+ src_hdl, dst_hdl);
+}
+
/* add fence to last written descriptor */
-static inline int
-rte_ioat_fence(int dev_id)
+static __rte_always_inline int
+__ioat_fence(int dev_id)
{
struct rte_ioat_rawdev *ioat =
(struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
/*
* Trigger hardware to begin performing enqueued operations
*/
-static inline void
-rte_ioat_perform_ops(int dev_id)
+static __rte_always_inline int
+__ioat_perform_ops(int dev_id)
{
struct rte_ioat_rawdev *ioat =
(struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
.control.completion_update = 1;
rte_compiler_barrier();
*ioat->doorbell = ioat->next_write;
- ioat->started = ioat->enqueued;
+ ioat->xstats.started = ioat->xstats.enqueued;
+
+ return 0;
}
/**
* @internal
* Returns the index of the last completed operation.
*/
-static inline int
-rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
+static __rte_always_inline int
+__ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
{
uint64_t status = ioat->status;
/*
* Returns details of operations that have been completed
*/
-static inline int
-rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
+static __rte_always_inline int
+__ioat_completed_ops(int dev_id, uint8_t max_copies,
uintptr_t *src_hdls, uintptr_t *dst_hdls)
{
struct rte_ioat_rawdev *ioat =
int error;
int i = 0;
- end_read = (rte_ioat_get_last_completed(ioat, &error) + 1) & mask;
+ end_read = (__ioat_get_last_completed(ioat, &error) + 1) & mask;
count = (end_read - (read & mask)) & mask;
if (error) {
end:
ioat->next_read = read;
- ioat->completed += count;
+ ioat->xstats.completed += count;
return count;
}
+static inline uint16_t
+rte_ioat_burst_capacity(int dev_id)
+{
+ enum rte_ioat_dev_type *type =
+ (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
+ if (*type == RTE_IDXD_DEV)
+ return __idxd_burst_capacity(dev_id);
+ else
+ return __ioat_burst_capacity(dev_id);
+}
+
+static inline int
+rte_ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst,
+ unsigned int len, uintptr_t dst_hdl)
+{
+ enum rte_ioat_dev_type *type =
+ (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
+ if (*type == RTE_IDXD_DEV)
+ return __idxd_enqueue_fill(dev_id, pattern, dst, len, dst_hdl);
+ else
+ return __ioat_enqueue_fill(dev_id, pattern, dst, len, dst_hdl);
+}
+
+static inline int
+rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
+ unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
+{
+ enum rte_ioat_dev_type *type =
+ (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
+ if (*type == RTE_IDXD_DEV)
+ return __idxd_enqueue_copy(dev_id, src, dst, length,
+ src_hdl, dst_hdl);
+ else
+ return __ioat_enqueue_copy(dev_id, src, dst, length,
+ src_hdl, dst_hdl);
+}
+
+static inline int
+rte_ioat_fence(int dev_id)
+{
+ enum rte_ioat_dev_type *type =
+ (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
+ if (*type == RTE_IDXD_DEV)
+ return __idxd_fence(dev_id);
+ else
+ return __ioat_fence(dev_id);
+}
+
+static inline int
+rte_ioat_perform_ops(int dev_id)
+{
+ enum rte_ioat_dev_type *type =
+ (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
+ if (*type == RTE_IDXD_DEV)
+ return __idxd_perform_ops(dev_id);
+ else
+ return __ioat_perform_ops(dev_id);
+}
+
+static inline int
+rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
+ uint32_t *status, uint8_t *num_unsuccessful,
+ uintptr_t *src_hdls, uintptr_t *dst_hdls)
+{
+ enum rte_ioat_dev_type *type =
+ (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
+ uint8_t tmp; /* used so functions don't need to check for null parameter */
+
+ if (num_unsuccessful == NULL)
+ num_unsuccessful = &tmp;
+
+ *num_unsuccessful = 0;
+ if (*type == RTE_IDXD_DEV)
+ return __idxd_completed_ops(dev_id, max_copies, status, num_unsuccessful,
+ src_hdls, dst_hdls);
+ else
+ return __ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
+}
+
static inline void
__rte_deprecated_msg("use rte_ioat_perform_ops() instead")
rte_ioat_do_copies(int dev_id) { rte_ioat_perform_ops(dev_id); }
rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
uintptr_t *src_hdls, uintptr_t *dst_hdls)
{
- return rte_ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
+ return rte_ioat_completed_ops(dev_id, max_copies, NULL, NULL,
+ src_hdls, dst_hdls);
}
#endif /* _RTE_IOAT_RAWDEV_FNS_H_ */