eal: introduce macro for always inline
[dpdk.git] / drivers / crypto / scheduler / scheduler_pmd_private.h
index 9a1409c..05a5916 100644 (file)
 #ifndef _SCHEDULER_PMD_PRIVATE_H
 #define _SCHEDULER_PMD_PRIVATE_H
 
-#include <rte_hash.h>
-#include <rte_reorder.h>
 #include "rte_cryptodev_scheduler.h"
 
-/**< Maximum number of bonded devices per devices */
-#ifndef MAX_SLAVES_NUM
-#define MAX_SLAVES_NUM                         (8)
-#endif
-
 #define PER_SLAVE_BUFF_SIZE                    (256)
 
 #define CS_LOG_ERR(fmt, args...)                                       \
@@ -82,7 +75,7 @@ struct scheduler_ctx {
 
        uint32_t max_nb_queue_pairs;
 
-       struct scheduler_slave slaves[MAX_SLAVES_NUM];
+       struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
        uint32_t nb_slaves;
 
        enum rte_cryptodev_scheduler_mode mode;
@@ -93,22 +86,70 @@ struct scheduler_ctx {
 
        char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
        char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
+
+       char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+       int nb_init_slaves;
 } __rte_cache_aligned;
 
 struct scheduler_qp_ctx {
        void *private_qp_ctx;
 
-       rte_cryptodev_scheduler_burst_enqueue_t schedule_enqueue;
-       rte_cryptodev_scheduler_burst_dequeue_t schedule_dequeue;
+       uint32_t max_nb_objs;
 
-       struct rte_reorder_buffer *reorder_buf;
+       struct rte_ring *order_ring;
        uint32_t seqn;
 } __rte_cache_aligned;
 
 struct scheduler_session {
-       struct rte_cryptodev_sym_session *sessions[MAX_SLAVES_NUM];
+       struct rte_cryptodev_sym_session *sessions[
+                       RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
 };
 
+static __rte_always_inline uint16_t
+get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
+{
+       uint32_t count = rte_ring_free_count(order_ring);
+
+       return count > nb_ops ? nb_ops : count;
+}
+
+static __rte_always_inline void
+scheduler_order_insert(struct rte_ring *order_ring,
+               struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+       rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL);
+}
+
+#define SCHEDULER_GET_RING_OBJ(order_ring, pos, op) do {            \
+       struct rte_crypto_op **ring = (void *)&order_ring[1];     \
+       op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \
+} while (0)
+
+static __rte_always_inline uint16_t
+scheduler_order_drain(struct rte_ring *order_ring,
+               struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+       struct rte_crypto_op *op;
+       uint32_t nb_objs = rte_ring_count(order_ring);
+       uint32_t nb_ops_to_deq = 0;
+       uint32_t nb_ops_deqd = 0;
+
+       if (nb_objs > nb_ops)
+               nb_objs = nb_ops;
+
+       while (nb_ops_to_deq < nb_objs) {
+               SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
+               if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+                       break;
+               nb_ops_to_deq++;
+       }
+
+       if (nb_ops_to_deq)
+               nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
+                               (void **)ops, nb_ops_to_deq, NULL);
+
+       return nb_ops_deqd;
+}
 /** device specific operations function pointer structure */
 extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;