+static inline uint32_t
+inb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
+ uint32_t *pofs, uint32_t plen, void *iv)
+{
+ struct aead_gcm_iv *gcm;
+ struct aesctr_cnt_blk *ctr;
+ uint64_t *ivp;
+ uint32_t clen;
+
+ ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
+ *pofs + sizeof(struct rte_esp_hdr));
+ clen = 0;
+
+ switch (sa->algo_type) {
+ case ALGO_TYPE_AES_GCM:
+ gcm = (struct aead_gcm_iv *)iv;
+ aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_AES_CBC:
+ case ALGO_TYPE_3DES_CBC:
+ copy_iv(iv, ivp, sa->iv_len);
+ break;
+ case ALGO_TYPE_AES_CTR:
+ ctr = (struct aesctr_cnt_blk *)iv;
+ aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+ break;
+ }
+
+ *pofs += sa->ctp.auth.offset;
+ clen = plen - sa->ctp.auth.length;
+ return clen;
+}
+
+/*
+ * Helper function for prepare() to deal with situation when
+ * ICV is spread by two segments. Tries to move ICV completely into the
+ * last segment.
+ */
+static struct rte_mbuf *
+move_icv(struct rte_mbuf *ml, uint32_t ofs)
+{
+ uint32_t n;
+ struct rte_mbuf *ms;
+ const void *prev;
+ void *new;
+
+ ms = ml->next;
+ n = ml->data_len - ofs;
+
+ prev = rte_pktmbuf_mtod_offset(ml, const void *, ofs);
+ new = rte_pktmbuf_prepend(ms, n);
+ if (new == NULL)
+ return NULL;
+
+ /* move n ICV bytes from ml into ms */
+ rte_memcpy(new, prev, n);
+ ml->data_len -= n;
+
+ return ms;
+}
+