- k = rte_ipsec_pkt_process(ips, pg->m, pg->cnt);
- copy_to_trf(trf, satp, pg->m, k);
+ /* fallback to cryptodev with RX packets which inline
+ * processor was unable to process
+ */
+ if (pg->id.val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) {
+ /* offload packets to cryptodev */
+ struct rte_ipsec_session *fallback;
+
+ fallback = ipsec_get_fallback_session(sa);
+ if (fallback->crypto.ses == NULL &&
+ fill_ipsec_session(fallback, ctx, sa)
+ != 0)
+ k = 0;
+ else
+ k = ipsec_prepare_crypto_group(ctx, sa,
+ fallback, pg->m, pg->cnt);
+ } else {
+ /* finish processing of packets successfully
+ * decrypted by an inline processor
+ */
+ k = rte_ipsec_pkt_process(ips, pg->m, pg->cnt);
+ copy_to_trf(trf, satp, pg->m, k);