net/tap: set BPF syscall ID for RISC-V
[dpdk.git] / drivers / crypto / cnxk / cnxk_cryptodev_ops.h
index 0d36365..7ece021 100644 (file)
@@ -30,16 +30,6 @@ struct cpt_qp_meta_info {
        int mlen;
 };
 
-enum sym_xform_type {
-       CNXK_CPT_CIPHER = 1,
-       CNXK_CPT_AUTH,
-       CNXK_CPT_AEAD,
-       CNXK_CPT_CIPHER_ENC_AUTH_GEN,
-       CNXK_CPT_AUTH_VRFY_CIPHER_DEC,
-       CNXK_CPT_AUTH_GEN_CIPHER_ENC,
-       CNXK_CPT_CIPHER_DEC_AUTH_VRFY
-};
-
 #define CPT_OP_FLAGS_METABUF          (1 << 1)
 #define CPT_OP_FLAGS_AUTH_VERIFY       (1 << 0)
 #define CPT_OP_FLAGS_IPSEC_DIR_INBOUND (1 << 2)
@@ -50,7 +40,9 @@ struct cpt_inflight_req {
        void *mdata;
        uint8_t op_flags;
        void *qp;
-} __rte_aligned(16);
+} __rte_aligned(ROC_ALIGN);
+
+PLT_STATIC_ASSERT(sizeof(struct cpt_inflight_req) == ROC_CACHE_LINE_SZ);
 
 struct pending_queue {
        /** Array of pending requests */
@@ -130,26 +122,8 @@ void cnxk_ae_session_clear(struct rte_cryptodev *dev,
                           struct rte_cryptodev_asym_session *sess);
 int cnxk_ae_session_cfg(struct rte_cryptodev *dev,
                        struct rte_crypto_asym_xform *xform,
-                       struct rte_cryptodev_asym_session *sess,
-                       struct rte_mempool *pool);
-
-static inline union rte_event_crypto_metadata *
-cnxk_event_crypto_mdata_get(struct rte_crypto_op *op)
-{
-       union rte_event_crypto_metadata *ec_mdata;
-
-       if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-               ec_mdata = rte_cryptodev_sym_session_get_user_data(
-                       op->sym->session);
-       else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
-                op->private_data_offset)
-               ec_mdata = (union rte_event_crypto_metadata
-                                   *)((uint8_t *)op + op->private_data_offset);
-       else
-               return NULL;
-
-       return ec_mdata;
-}
+                       struct rte_cryptodev_asym_session *sess);
+void cnxk_cpt_dump_on_err(struct cnxk_cpt_qp *qp);
 
 static __rte_always_inline void
 pending_queue_advance(uint64_t *index, const uint64_t mask)
@@ -166,7 +140,11 @@ pending_queue_retreat(uint64_t *index, const uint64_t mask, uint64_t nb_entry)
 static __rte_always_inline uint64_t
 pending_queue_infl_cnt(uint64_t head, uint64_t tail, const uint64_t mask)
 {
-       return (head - tail) & mask;
+       /*
+        * Mask is nb_desc - 1. Add nb_desc to head and mask to account for
+        * cases when tail > head, which happens during wrap around.
+        */
+       return ((head + mask + 1) - tail) & mask;
 }
 
 static __rte_always_inline uint64_t