+static MB_MGR *
+alloc_init_mb_mgr(enum aesni_mb_vector_mode vector_mode)
+{
+ MB_MGR *mb_mgr = alloc_mb_mgr(0);
+ if (mb_mgr == NULL)
+ return NULL;
+
+ switch (vector_mode) {
+ case RTE_AESNI_MB_SSE:
+ init_mb_mgr_sse(mb_mgr);
+ break;
+ case RTE_AESNI_MB_AVX:
+ init_mb_mgr_avx(mb_mgr);
+ break;
+ case RTE_AESNI_MB_AVX2:
+ init_mb_mgr_avx2(mb_mgr);
+ break;
+ case RTE_AESNI_MB_AVX512:
+ init_mb_mgr_avx512(mb_mgr);
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
+ free_mb_mgr(mb_mgr);
+ return NULL;
+ }
+
+ return mb_mgr;
+}
+
+static inline void
+aesni_mb_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t err)
+{
+ uint32_t i;
+
+ for (i = 0; i != vec->num; ++i)
+ vec->status[i] = err;
+}
+
+static inline int
+check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
+{
+ /* no multi-seg support with current AESNI-MB PMD */
+ if (sgl->num != 1)
+ return ENOTSUP;
+ else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len)
+ return EINVAL;
+ return 0;
+}
+
+static inline JOB_AES_HMAC *
+submit_sync_job(MB_MGR *mb_mgr)
+{
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
+ return IMB_SUBMIT_JOB(mb_mgr);
+#else
+ return IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
+#endif
+}
+
+static inline uint32_t
+generate_sync_dgst(struct rte_crypto_sym_vec *vec,
+ const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
+{
+ uint32_t i, k;
+
+ for (i = 0, k = 0; i != vec->num; i++) {
+ if (vec->status[i] == 0) {
+ memcpy(vec->digest[i].va, dgst[i], len);
+ k++;
+ }
+ }
+
+ return k;
+}
+
+static inline uint32_t
+verify_sync_dgst(struct rte_crypto_sym_vec *vec,
+ const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
+{
+ uint32_t i, k;
+
+ for (i = 0, k = 0; i != vec->num; i++) {
+ if (vec->status[i] == 0) {
+ if (memcmp(vec->digest[i].va, dgst[i], len) != 0)
+ vec->status[i] = EBADMSG;
+ else
+ k++;
+ }
+ }
+
+ return k;
+}
+
+uint32_t
+aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
+ struct rte_crypto_sym_vec *vec)
+{
+ int32_t ret;
+ uint32_t i, j, k, len;
+ void *buf;
+ JOB_AES_HMAC *job;
+ MB_MGR *mb_mgr;
+ struct aesni_mb_private *priv;
+ struct aesni_mb_session *s;
+ uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX];
+
+ s = get_sym_session_private_data(sess, dev->driver_id);
+ if (s == NULL) {
+ aesni_mb_fill_error_code(vec, EINVAL);
+ return 0;
+ }
+
+ /* get per-thread MB MGR, create one if needed */
+ mb_mgr = RTE_PER_LCORE(sync_mb_mgr);
+ if (mb_mgr == NULL) {
+
+ priv = dev->data->dev_private;
+ mb_mgr = alloc_init_mb_mgr(priv->vector_mode);
+ if (mb_mgr == NULL) {
+ aesni_mb_fill_error_code(vec, ENOMEM);
+ return 0;
+ }
+ RTE_PER_LCORE(sync_mb_mgr) = mb_mgr;
+ }
+
+ for (i = 0, j = 0, k = 0; i != vec->num; i++) {
+
+
+ ret = check_crypto_sgl(sofs, vec->sgl + i);
+ if (ret != 0) {
+ vec->status[i] = ret;
+ continue;
+ }
+
+ buf = vec->sgl[i].vec[0].base;
+ len = vec->sgl[i].vec[0].len;
+
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ if (job == NULL) {
+ k += flush_mb_sync_mgr(mb_mgr);
+ job = IMB_GET_NEXT_JOB(mb_mgr);
+ RTE_ASSERT(job != NULL);
+ }
+
+ /* Submit job for processing */
+ set_cpu_mb_job_params(job, s, sofs, buf, len, &vec->iv[i],
+ &vec->aad[i], tmp_dgst[i], &vec->status[i]);
+ job = submit_sync_job(mb_mgr);
+ j++;
+
+ /* handle completed jobs */
+ k += handle_completed_sync_jobs(job, mb_mgr);
+ }
+
+ /* flush remaining jobs */
+ while (k != j)
+ k += flush_mb_sync_mgr(mb_mgr);
+
+ /* finish processing for successful jobs: check/update digest */
+ if (k != 0) {
+ if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
+ k = verify_sync_dgst(vec,
+ (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
+ s->auth.req_digest_len);
+ else
+ k = generate_sync_dgst(vec,
+ (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
+ s->auth.req_digest_len);
+ }
+
+ return k;
+}
+