+ flc = &priv->flc_desc[DESC_INITFINAL].flc;
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ fle->length = sess->digest_length;
+ fle++;
+
+ /* Setting input FLE */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(fle);
+ fle->length = data_len;
+
+ if (sess->iv.length) {
+ uint8_t *iv_ptr;
+
+ iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
+ iv_ptr = conv_to_snow_f9_iv(iv_ptr);
+ sge->length = 12;
+ } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+ iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
+ sge->length = 8;
+ } else {
+ sge->length = sess->iv.length;
+ }
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ fle->length = fle->length + sge->length;
+ sge++;
+ }
+
+ /* Setting data to authenticate */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
+ sge->length = data_len;
+
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_digest = (uint8_t *)(sge + 1);
+ rte_memcpy(old_digest, sym_op->auth.digest.data,
+ sess->digest_length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+ sge->length = sess->digest_length;
+ fle->length = fle->length + sess->digest_length;
+ }
+
+ DPAA2_SET_FLE_FIN(sge);
+ DPAA2_SET_FLE_FIN(fle);
+ DPAA2_SET_FD_LEN(fd, fle->length);
+
+ return 0;
+}
+
+static int
+build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+ struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
+ int data_len, data_offset;
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct rte_mbuf *mbuf;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ data_len = sym_op->cipher.data.length;
+ data_offset = sym_op->cipher.data.offset;
+
+ if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
+ sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
+ if ((data_len & 7) || (data_offset & 7)) {
+ DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
+ return -ENOTSUP;
+ }
+
+ data_len = data_len >> 3;
+ data_offset = data_offset >> 3;
+ }
+
+ if (sym_op->m_dst)
+ mbuf = sym_op->m_dst;
+ else
+ mbuf = sym_op->m_src;
+
+ /* first FLE entry used to store mbuf and session ctxt */
+ fle = (struct qbman_fle *)rte_malloc(NULL,
+ FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
+ RTE_CACHE_LINE_SIZE);
+ if (!fle) {
+ DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
+ return -ENOMEM;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
+ /* first FLE entry used to store mbuf and session ctxt */
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ flc = &priv->flc_desc[0].flc;
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
+ " data_off: 0x%x\n",
+ data_offset,
+ data_len,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* o/p fle */
+ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+ op_fle->length = data_len;
+ DPAA2_SET_FLE_SG_EXT(op_fle);
+
+ /* o/p 1st seg */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
+ sge->length = mbuf->data_len - data_offset;
+
+ mbuf = mbuf->next;
+ /* o/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
+ flc, fle, fle->addr_hi, fle->addr_lo,
+ fle->length);
+
+ /* i/p fle */
+ mbuf = sym_op->m_src;
+ sge++;
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ ip_fle->length = sess->iv.length + data_len;
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+
+ /* i/p IV */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ DPAA2_SET_FLE_OFFSET(sge, 0);
+ sge->length = sess->iv.length;
+
+ sge++;
+
+ /* i/p 1st seg */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
+ sge->length = mbuf->data_len - data_offset;
+
+ mbuf = mbuf->next;
+ /* i/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+ DPAA2_SET_FLE_FIN(ip_fle);
+
+ /* sg fd */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
+ " off =%d, len =%d\n",
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+ return 0;
+}
+
+static int
+build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *fle, *sge;
+ int retval, data_len, data_offset;
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+ struct rte_mbuf *dst;
+
+ data_len = sym_op->cipher.data.length;
+ data_offset = sym_op->cipher.data.offset;
+
+ if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
+ sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
+ if ((data_len & 7) || (data_offset & 7)) {
+ DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
+ return -ENOTSUP;
+ }
+
+ data_len = data_len >> 3;
+ data_offset = data_offset >> 3;
+ }
+
+ if (sym_op->m_dst)
+ dst = sym_op->m_dst;
+ else
+ dst = sym_op->m_src;
+
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
+ return -ENOMEM;
+ }
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
+ /* TODO we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+ fle = fle + 1;
+ sge = fle + 2;
+
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ }
+
+ flc = &priv->flc_desc[0].flc;
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
+ " data_off: 0x%x\n",
+ data_offset,
+ data_len,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
+ DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
+
+ fle->length = data_len + sess->iv.length;
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
+ flc, fle, fle->addr_hi, fle->addr_lo,
+ fle->length);
+
+ fle++;
+
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ fle->length = data_len + sess->iv.length;
+
+ DPAA2_SET_FLE_SG_EXT(fle);
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ sge->length = sess->iv.length;
+
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
+
+ sge->length = data_len;
+ DPAA2_SET_FLE_FIN(sge);
+ DPAA2_SET_FLE_FIN(fle);
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
+ " off =%d, len =%d\n",
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+
+ return 0;
+}
+
+static inline int
+build_sec_fd(struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ int ret = -1;
+ dpaa2_sec_session *sess;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+ sess = (dpaa2_sec_session *)get_sym_session_private_data(
+ op->sym->session, cryptodev_driver_id);
+#ifdef RTE_LIBRTE_SECURITY
+ else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+ sess = (dpaa2_sec_session *)get_sec_session_private_data(
+ op->sym->sec_session);
+#endif
+ else
+ return -ENOTSUP;
+
+ if (!sess)
+ return -EINVAL;
+
+ /* Any of the buffer is segmented*/
+ if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
+ ((op->sym->m_dst != NULL) &&
+ !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
+ switch (sess->ctxt_type) {
+ case DPAA2_SEC_CIPHER:
+ ret = build_cipher_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AUTH:
+ ret = build_auth_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AEAD:
+ ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_CIPHER_HASH:
+ ret = build_authenc_sg_fd(sess, op, fd, bpid);
+ break;
+#ifdef RTE_LIBRTE_SECURITY
+ case DPAA2_SEC_IPSEC:
+ case DPAA2_SEC_PDCP:
+ ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
+ break;
+#endif
+ case DPAA2_SEC_HASH_CIPHER:
+ default:
+ DPAA2_SEC_ERR("error: Unsupported session");
+ }
+ } else {
+ switch (sess->ctxt_type) {
+ case DPAA2_SEC_CIPHER:
+ ret = build_cipher_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AUTH:
+ ret = build_auth_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AEAD:
+ ret = build_authenc_gcm_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_CIPHER_HASH:
+ ret = build_authenc_fd(sess, op, fd, bpid);
+ break;
+#ifdef RTE_LIBRTE_SECURITY
+ case DPAA2_SEC_IPSEC:
+ ret = build_proto_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_PDCP:
+ ret = build_proto_compound_fd(sess, op, fd, bpid);
+ break;
+#endif
+ case DPAA2_SEC_HASH_CIPHER:
+ default:
+ DPAA2_SEC_ERR("error: Unsupported session");
+ ret = -ENOTSUP;
+ }
+ }
+ return ret;
+}
+
+static uint16_t
+dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ /* Function to transmit the frames to given device and VQ*/
+ uint32_t loop;
+ int32_t ret;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t frames_to_send, retry_count;
+ struct qbman_eq_desc eqdesc;
+ struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+ struct qbman_swp *swp;
+ uint16_t num_tx = 0;
+ uint32_t flags[MAX_TX_RING_SLOTS] = {0};
+ /*todo - need to support multiple buffer pools */
+ uint16_t bpid;
+ struct rte_mempool *mb_pool;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ DPAA2_SEC_ERR("sessionless crypto op not supported");
+ return 0;
+ }
+ /*Prepare enqueue descriptor*/
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+ qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
+
+ if (!DPAA2_PER_LCORE_DPIO) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_SEC_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ while (nb_ops) {
+ frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
+ dpaa2_eqcr_size : nb_ops;
+
+ for (loop = 0; loop < frames_to_send; loop++) {
+ if ((*ops)->sym->m_src->seqn) {
+ uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
+
+ flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
+ DPAA2_PER_LCORE_DQRR_SIZE--;
+ DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
+ (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
+ }
+
+ /*Clear the unused FD fields before sending*/
+ memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+ mb_pool = (*ops)->sym->m_src->pool;
+ bpid = mempool_to_bpid(mb_pool);
+ ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
+ if (ret) {
+ DPAA2_SEC_ERR("error: Improper packet contents"
+ " for crypto operation");
+ goto skip_tx;
+ }
+ ops++;
+ }
+
+ loop = 0;
+ retry_count = 0;
+ while (loop < frames_to_send) {
+ ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
+ &fd_arr[loop],
+ &flags[loop],
+ frames_to_send - loop);
+ if (unlikely(ret < 0)) {
+ retry_count++;
+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+ num_tx += loop;
+ nb_ops -= loop;
+ goto skip_tx;
+ }
+ } else {
+ loop += ret;
+ retry_count = 0;
+ }
+ }
+
+ num_tx += loop;
+ nb_ops -= loop;
+ }
+skip_tx:
+ dpaa2_qp->tx_vq.tx_pkts += num_tx;
+ dpaa2_qp->tx_vq.err_pkts += nb_ops;
+ return num_tx;
+}
+
+#ifdef RTE_LIBRTE_SECURITY
+static inline struct rte_crypto_op *
+sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
+{
+ struct rte_crypto_op *op;
+ uint16_t len = DPAA2_GET_FD_LEN(fd);
+ int16_t diff = 0;
+ dpaa2_sec_session *sess_priv __rte_unused;
+
+ struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
+ DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+
+ diff = len - mbuf->pkt_len;
+ mbuf->pkt_len += diff;
+ mbuf->data_len += diff;
+ op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
+ mbuf->buf_iova = op->sym->aead.digest.phys_addr;
+ op->sym->aead.digest.phys_addr = 0L;
+
+ sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
+ op->sym->sec_session);
+ if (sess_priv->dir == DIR_ENC)
+ mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
+ else
+ mbuf->data_off += SEC_FLC_DHR_INBOUND;
+
+ return op;
+}
+#endif
+
+static inline struct rte_crypto_op *
+sec_fd_to_mbuf(const struct qbman_fd *fd)
+{
+ struct qbman_fle *fle;
+ struct rte_crypto_op *op;
+ struct ctxt_priv *priv;
+ struct rte_mbuf *dst, *src;