crypto/cnxk: add null check for RSA key data
[dpdk.git] / drivers / raw / ioat / rte_ioat_rawdev_fns.h
index fa2eb53..6049e3b 100644 (file)
@@ -4,11 +4,45 @@
 #ifndef _RTE_IOAT_RAWDEV_FNS_H_
 #define _RTE_IOAT_RAWDEV_FNS_H_
 
+/**
+ * @file
+ * This header file contains the implementation of the various ioat
+ * rawdev functions for IOAT/CBDMA hardware. The API specification and key
+ * public structures are defined in "rte_ioat_rawdev.h".
+ *
+ * This file should not be included directly, but instead applications should
+ * include "rte_ioat_rawdev.h", which then includes this file - and the IDXD/DSA
+ * equivalent header - in turn.
+ */
+
 #include <x86intrin.h>
 #include <rte_rawdev.h>
 #include <rte_memzone.h>
 #include <rte_prefetch.h>
 
+/**
+ * @internal
+ * Identify the data path to use.
+ * Must be first field of rte_ioat_rawdev and rte_idxd_rawdev structs
+ */
+enum rte_ioat_dev_type {
+       RTE_IOAT_DEV,
+       RTE_IDXD_DEV,
+};
+
+/**
+ * @internal
+ * some statistics for tracking, if added/changed update xstats fns
+ */
+struct rte_ioat_xstats {
+       uint64_t enqueue_failed;
+       uint64_t enqueued;
+       uint64_t started;
+       uint64_t completed;
+};
+
+#include "rte_idxd_rawdev_fns.h"
+
 /**
  * @internal
  * Structure representing a device descriptor
@@ -39,27 +73,20 @@ struct rte_ioat_generic_hw_desc {
        uint64_t op_specific[4];
 };
 
-/**
- * @internal
- * Identify the data path to use.
- * Must be first field of rte_ioat_rawdev and rte_idxd_rawdev structs
- */
-enum rte_ioat_dev_type {
-       RTE_IOAT_DEV,
-       RTE_IDXD_DEV,
-};
-
 /**
  * @internal
  * Structure representing an IOAT device instance
  */
 struct rte_ioat_rawdev {
+       /* common fields at the top - match those in rte_idxd_rawdev */
        enum rte_ioat_dev_type type;
+       struct rte_ioat_xstats xstats;
+
        struct rte_rawdev *rawdev;
        const struct rte_memzone *mz;
        const struct rte_memzone *desc_mz;
 
-       volatile uint16_t *doorbell;
+       volatile uint16_t *doorbell __rte_cache_aligned;
        phys_addr_t status_addr;
        phys_addr_t ring_addr;
 
@@ -72,12 +99,6 @@ struct rte_ioat_rawdev {
        unsigned short next_read;
        unsigned short next_write;
 
-       /* some statistics for tracking, if added/changed update xstats fns*/
-       uint64_t enqueue_failed __rte_cache_aligned;
-       uint64_t enqueued;
-       uint64_t started;
-       uint64_t completed;
-
        /* to report completions, the device will write status back here */
        volatile uint64_t status __rte_cache_aligned;
 
@@ -90,33 +111,21 @@ struct rte_ioat_rawdev {
 #define RTE_IOAT_CHANSTS_HALTED                        0x3
 #define RTE_IOAT_CHANSTS_ARMED                 0x4
 
-/**
- * @internal
- * Structure representing an IDXD device instance
- */
-struct rte_idxd_rawdev {
-       enum rte_ioat_dev_type type;
-       void *portal; /* address to write the batch descriptor */
-
-       /* counters to track the batches and the individual op handles */
-       uint16_t batch_ring_sz;  /* size of batch ring */
-       uint16_t hdl_ring_sz;    /* size of the user hdl ring */
-
-       uint16_t next_batch;     /* where we write descriptor ops */
-       uint16_t next_completed; /* batch where we read completions */
-       uint16_t next_ret_hdl;   /* the next user hdl to return */
-       uint16_t last_completed_hdl; /* the last user hdl that has completed */
-       uint16_t next_free_hdl;  /* where the handle for next op will go */
+static __rte_always_inline uint16_t
+__ioat_burst_capacity(int dev_id)
+{
+       struct rte_ioat_rawdev *ioat =
+                       (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
+       unsigned short size = ioat->ring_size - 1;
+       unsigned short read = ioat->next_read;
+       unsigned short write = ioat->next_write;
+       unsigned short space = size - (write - read);
 
-       struct rte_idxd_user_hdl *hdl_ring;
-       struct rte_idxd_desc_batch *batch_ring;
-};
+       return space;
+}
 
-/*
- * Enqueue a copy operation onto the ioat device
- */
-static inline int
-rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
+static __rte_always_inline int
+__ioat_write_desc(int dev_id, uint32_t op, uint64_t src, phys_addr_t dst,
                unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
 {
        struct rte_ioat_rawdev *ioat =
@@ -128,7 +137,7 @@ rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
        struct rte_ioat_generic_hw_desc *desc;
 
        if (space == 0) {
-               ioat->enqueue_failed++;
+               ioat->xstats.enqueue_failed++;
                return 0;
        }
 
@@ -138,7 +147,8 @@ rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
        desc = &ioat->desc_ring[write];
        desc->size = length;
        /* set descriptor write-back every 16th descriptor */
-       desc->u.control_raw = (uint32_t)((!(write & 0xF)) << 3);
+       desc->u.control_raw = (uint32_t)((op << IOAT_CMD_OP_SHIFT) |
+                       (!(write & 0xF) << IOAT_COMP_UPDATE_SHIFT));
        desc->src_addr = src;
        desc->dest_addr = dst;
 
@@ -147,13 +157,34 @@ rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
                                        (int64_t)src_hdl);
        rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
 
-       ioat->enqueued++;
+       ioat->xstats.enqueued++;
        return 1;
 }
 
+static __rte_always_inline int
+__ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst,
+               unsigned int length, uintptr_t dst_hdl)
+{
+       static const uintptr_t null_hdl;
+
+       return __ioat_write_desc(dev_id, ioat_op_fill, pattern, dst, length,
+                       null_hdl, dst_hdl);
+}
+
+/*
+ * Enqueue a copy operation onto the ioat device
+ */
+static __rte_always_inline int
+__ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
+               unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
+{
+       return __ioat_write_desc(dev_id, ioat_op_copy, src, dst, length,
+                       src_hdl, dst_hdl);
+}
+
 /* add fence to last written descriptor */
-static inline int
-rte_ioat_fence(int dev_id)
+static __rte_always_inline int
+__ioat_fence(int dev_id)
 {
        struct rte_ioat_rawdev *ioat =
                        (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
@@ -171,8 +202,8 @@ rte_ioat_fence(int dev_id)
 /*
  * Trigger hardware to begin performing enqueued operations
  */
-static inline void
-rte_ioat_perform_ops(int dev_id)
+static __rte_always_inline int
+__ioat_perform_ops(int dev_id)
 {
        struct rte_ioat_rawdev *ioat =
                        (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
@@ -180,15 +211,17 @@ rte_ioat_perform_ops(int dev_id)
                        .control.completion_update = 1;
        rte_compiler_barrier();
        *ioat->doorbell = ioat->next_write;
-       ioat->started = ioat->enqueued;
+       ioat->xstats.started = ioat->xstats.enqueued;
+
+       return 0;
 }
 
 /**
  * @internal
  * Returns the index of the last completed operation.
  */
-static inline int
-rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
+static __rte_always_inline int
+__ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
 {
        uint64_t status = ioat->status;
 
@@ -202,8 +235,8 @@ rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
 /*
  * Returns details of operations that have been completed
  */
-static inline int
-rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
+static __rte_always_inline int
+__ioat_completed_ops(int dev_id, uint8_t max_copies,
                uintptr_t *src_hdls, uintptr_t *dst_hdls)
 {
        struct rte_ioat_rawdev *ioat =
@@ -214,7 +247,7 @@ rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
        int error;
        int i = 0;
 
-       end_read = (rte_ioat_get_last_completed(ioat, &error) + 1) & mask;
+       end_read = (__ioat_get_last_completed(ioat, &error) + 1) & mask;
        count = (end_read - (read & mask)) & mask;
 
        if (error) {
@@ -247,10 +280,89 @@ rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
 
 end:
        ioat->next_read = read;
-       ioat->completed += count;
+       ioat->xstats.completed += count;
        return count;
 }
 
+static inline uint16_t
+rte_ioat_burst_capacity(int dev_id)
+{
+       enum rte_ioat_dev_type *type =
+               (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
+       if (*type == RTE_IDXD_DEV)
+               return __idxd_burst_capacity(dev_id);
+       else
+               return __ioat_burst_capacity(dev_id);
+}
+
+static inline int
+rte_ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst,
+               unsigned int len, uintptr_t dst_hdl)
+{
+       enum rte_ioat_dev_type *type =
+                       (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
+       if (*type == RTE_IDXD_DEV)
+               return __idxd_enqueue_fill(dev_id, pattern, dst, len, dst_hdl);
+       else
+               return __ioat_enqueue_fill(dev_id, pattern, dst, len, dst_hdl);
+}
+
+static inline int
+rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
+               unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl)
+{
+       enum rte_ioat_dev_type *type =
+                       (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
+       if (*type == RTE_IDXD_DEV)
+               return __idxd_enqueue_copy(dev_id, src, dst, length,
+                               src_hdl, dst_hdl);
+       else
+               return __ioat_enqueue_copy(dev_id, src, dst, length,
+                               src_hdl, dst_hdl);
+}
+
+static inline int
+rte_ioat_fence(int dev_id)
+{
+       enum rte_ioat_dev_type *type =
+                       (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
+       if (*type == RTE_IDXD_DEV)
+               return __idxd_fence(dev_id);
+       else
+               return __ioat_fence(dev_id);
+}
+
+static inline int
+rte_ioat_perform_ops(int dev_id)
+{
+       enum rte_ioat_dev_type *type =
+                       (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
+       if (*type == RTE_IDXD_DEV)
+               return __idxd_perform_ops(dev_id);
+       else
+               return __ioat_perform_ops(dev_id);
+}
+
+static inline int
+rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
+               uint32_t *status, uint8_t *num_unsuccessful,
+               uintptr_t *src_hdls, uintptr_t *dst_hdls)
+{
+       enum rte_ioat_dev_type *type =
+                       (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private;
+       uint8_t tmp; /* used so functions don't need to check for null parameter */
+
+       if (num_unsuccessful == NULL)
+               num_unsuccessful = &tmp;
+
+       *num_unsuccessful = 0;
+       if (*type == RTE_IDXD_DEV)
+               return __idxd_completed_ops(dev_id, max_copies, status, num_unsuccessful,
+                               src_hdls, dst_hdls);
+       else
+               return __ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
+}
+
 static inline void
 __rte_deprecated_msg("use rte_ioat_perform_ops() instead")
 rte_ioat_do_copies(int dev_id) { rte_ioat_perform_ops(dev_id); }
@@ -260,7 +372,8 @@ __rte_deprecated_msg("use rte_ioat_completed_ops() instead")
 rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
                uintptr_t *src_hdls, uintptr_t *dst_hdls)
 {
-       return rte_ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
+       return rte_ioat_completed_ops(dev_id, max_copies, NULL, NULL,
+                       src_hdls, dst_hdls);
 }
 
 #endif /* _RTE_IOAT_RAWDEV_FNS_H_ */