1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_crypto.h>
6 #include <cryptodev_pmd.h>
7 #include <rte_cycles.h>
10 #include "nitrox_sym_reqmgr.h"
11 #include "nitrox_logs.h"
13 #define MAX_SGBUF_CNT 16
14 #define MAX_SGCOMP_CNT 5
16 #define MIN_UDD_LEN 16
17 /* PKT_IN_HDR + SLC_STORE_INFO */
19 /* Base destination port for the solicited requests */
20 #define SOLICIT_BASE_DPORT 256
21 #define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
34 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
35 uint64_t raz_48_63 : 16;
40 uint64_t raz_30_31 : 2;
42 uint64_t raz_16_23 : 8;
46 uint64_t raz_16_23 : 8;
48 uint64_t raz_30_31 : 2;
53 uint64_t raz_48_63 : 16;
61 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
68 uint64_t destport : 9;
70 uint64_t raz_19_23 : 5;
80 uint64_t raz_19_23 : 5;
82 uint64_t destport : 9;
94 union slc_store_info {
97 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
98 uint64_t raz_39_63 : 25;
100 uint64_t raz_0_31 : 32;
102 uint64_t raz_0_31 : 32;
104 uint64_t raz_39_63 : 25;
110 struct nps_pkt_instr {
112 union pkt_instr_hdr ih;
114 union slc_store_info slc;
123 struct nitrox_sglist {
131 struct nitrox_sgcomp {
136 struct nitrox_sgtable {
137 uint8_t map_bufs_cnt;
139 uint16_t total_bytes;
141 struct nitrox_sglist sglist[MAX_SGBUF_CNT];
142 struct nitrox_sgcomp sgcomp[MAX_SGCOMP_CNT];
151 struct nitrox_softreq {
152 struct nitrox_crypto_ctx *ctx;
153 struct rte_crypto_op *op;
155 struct nps_pkt_instr instr;
156 struct resp_hdr resp;
157 struct nitrox_sgtable in;
158 struct nitrox_sgtable out;
167 softreq_init(struct nitrox_softreq *sr, rte_iova_t iova)
169 memset(sr, 0, sizeof(*sr));
174 * 64-Byte Instruction Format
176 * ----------------------
178 * ----------------------
179 * | PKT_IN_INSTR_HDR | 8 bytes
180 * ----------------------
181 * | PKT_IN_HDR | 16 bytes
182 * ----------------------
183 * | SLC_INFO | 16 bytes
184 * ----------------------
185 * | Front data | 16 bytes
186 * ----------------------
189 create_se_instr(struct nitrox_softreq *sr, uint8_t qno)
191 struct nitrox_crypto_ctx *ctx = sr->ctx;
192 rte_iova_t ctx_handle;
194 /* fill the packet instruction */
196 sr->instr.dptr0 = rte_cpu_to_be_64(sr->dptr);
199 sr->instr.ih.value = 0;
200 sr->instr.ih.s.g = 1;
201 sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
202 sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
203 sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
204 sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
205 sr->instr.ih.value = rte_cpu_to_be_64(sr->instr.ih.value);
208 sr->instr.irh.value[0] = 0;
209 sr->instr.irh.s.uddl = MIN_UDD_LEN;
210 /* context length in 64-bit words */
211 sr->instr.irh.s.ctxl = RTE_ALIGN_MUL_CEIL(sizeof(ctx->fctx), 8) / 8;
212 /* offset from solicit base port 256 */
213 sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
214 /* Invalid context cache */
215 sr->instr.irh.s.ctxc = 0x3;
216 sr->instr.irh.s.arg = ctx->req_op;
217 sr->instr.irh.s.opcode = ctx->opcode;
218 sr->instr.irh.value[0] = rte_cpu_to_be_64(sr->instr.irh.value[0]);
221 ctx_handle = ctx->iova + offsetof(struct nitrox_crypto_ctx, fctx);
222 sr->instr.irh.s.ctxp = rte_cpu_to_be_64(ctx_handle);
225 sr->instr.slc.value[0] = 0;
226 sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
227 sr->instr.slc.value[0] = rte_cpu_to_be_64(sr->instr.slc.value[0]);
230 sr->instr.slc.s.rptr = rte_cpu_to_be_64(sr->rptr);
232 * No conversion for front data,
233 * It goes into payload
234 * put GP Header in front data
236 memcpy(&sr->instr.fdata[0], &sr->gph, sizeof(sr->instr.fdata[0]));
237 sr->instr.fdata[1] = 0;
241 softreq_copy_iv(struct nitrox_softreq *sr, uint8_t salt_size)
243 uint16_t offset = sr->ctx->iv.offset + salt_size;
245 sr->iv.virt = rte_crypto_op_ctod_offset(sr->op, uint8_t *, offset);
246 sr->iv.iova = rte_crypto_op_ctophys_offset(sr->op, offset);
247 sr->iv.len = sr->ctx->iv.length - salt_size;
251 fill_sglist(struct nitrox_sgtable *sgtbl, uint16_t len, rte_iova_t iova,
254 struct nitrox_sglist *sglist = sgtbl->sglist;
255 uint8_t cnt = sgtbl->map_bufs_cnt;
260 sglist[cnt].len = len;
261 sglist[cnt].iova = iova;
262 sglist[cnt].virt = virt;
263 sgtbl->total_bytes += len;
265 sgtbl->map_bufs_cnt = cnt;
269 create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf,
270 uint32_t off, int datalen)
272 struct nitrox_sglist *sglist = sgtbl->sglist;
273 uint8_t cnt = sgtbl->map_bufs_cnt;
277 if (unlikely(datalen <= 0))
280 for (m = mbuf; m && off > rte_pktmbuf_data_len(m); m = m->next)
281 off -= rte_pktmbuf_data_len(m);
286 mlen = rte_pktmbuf_data_len(m) - off;
289 sglist[cnt].len = mlen;
290 sglist[cnt].iova = rte_pktmbuf_iova_offset(m, off);
291 sglist[cnt].virt = rte_pktmbuf_mtod_offset(m, uint8_t *, off);
292 sgtbl->total_bytes += mlen;
295 for (m = m->next; m && datalen; m = m->next) {
296 mlen = rte_pktmbuf_data_len(m) < datalen ?
297 rte_pktmbuf_data_len(m) : datalen;
298 sglist[cnt].len = mlen;
299 sglist[cnt].iova = rte_pktmbuf_iova(m);
300 sglist[cnt].virt = rte_pktmbuf_mtod(m, uint8_t *);
301 sgtbl->total_bytes += mlen;
306 RTE_VERIFY(cnt <= MAX_SGBUF_CNT);
307 sgtbl->map_bufs_cnt = cnt;
312 create_sgcomp(struct nitrox_sgtable *sgtbl)
315 struct nitrox_sgcomp *sgcomp = sgtbl->sgcomp;
316 struct nitrox_sglist *sglist = sgtbl->sglist;
318 nr_sgcomp = RTE_ALIGN_MUL_CEIL(sgtbl->map_bufs_cnt, 4) / 4;
319 sgtbl->nr_sgcomp = nr_sgcomp;
320 for (i = 0; i < nr_sgcomp; i++, sgcomp++) {
321 for (j = 0; j < 4; j++, sglist++) {
322 sgcomp->len[j] = rte_cpu_to_be_16(sglist->len);
323 sgcomp->iova[j] = rte_cpu_to_be_64(sglist->iova);
329 create_cipher_inbuf(struct nitrox_softreq *sr)
332 struct rte_crypto_op *op = sr->op;
334 fill_sglist(&sr->in, sr->iv.len, sr->iv.iova, sr->iv.virt);
335 err = create_sglist_from_mbuf(&sr->in, op->sym->m_src,
336 op->sym->cipher.data.offset,
337 op->sym->cipher.data.length);
341 create_sgcomp(&sr->in);
342 sr->dptr = sr->iova + offsetof(struct nitrox_softreq, in.sgcomp);
348 create_cipher_outbuf(struct nitrox_softreq *sr)
350 struct rte_crypto_op *op = sr->op;
352 struct rte_mbuf *m_dst = op->sym->m_dst ? op->sym->m_dst :
355 sr->resp.orh = PENDING_SIG;
356 sr->out.sglist[cnt].len = sizeof(sr->resp.orh);
357 sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
359 sr->out.sglist[cnt].virt = &sr->resp.orh;
362 sr->out.map_bufs_cnt = cnt;
363 fill_sglist(&sr->out, sr->iv.len, sr->iv.iova, sr->iv.virt);
364 err = create_sglist_from_mbuf(&sr->out, m_dst,
365 op->sym->cipher.data.offset,
366 op->sym->cipher.data.length);
370 cnt = sr->out.map_bufs_cnt;
371 sr->resp.completion = PENDING_SIG;
372 sr->out.sglist[cnt].len = sizeof(sr->resp.completion);
373 sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
375 sr->out.sglist[cnt].virt = &sr->resp.completion;
378 RTE_VERIFY(cnt <= MAX_SGBUF_CNT);
379 sr->out.map_bufs_cnt = cnt;
381 create_sgcomp(&sr->out);
382 sr->rptr = sr->iova + offsetof(struct nitrox_softreq, out.sgcomp);
388 create_cipher_gph(uint32_t cryptlen, uint16_t ivlen, struct gphdr *gph)
390 gph->param0 = rte_cpu_to_be_16(cryptlen);
392 gph->param2 = rte_cpu_to_be_16(ivlen);
397 process_cipher_data(struct nitrox_softreq *sr)
399 struct rte_crypto_op *op = sr->op;
402 softreq_copy_iv(sr, 0);
403 err = create_cipher_inbuf(sr);
407 err = create_cipher_outbuf(sr);
411 create_cipher_gph(op->sym->cipher.data.length, sr->iv.len, &sr->gph);
417 extract_cipher_auth_digest(struct nitrox_softreq *sr,
418 struct nitrox_sglist *digest)
420 struct rte_crypto_op *op = sr->op;
421 struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst :
424 if (sr->ctx->req_op == NITROX_OP_DECRYPT &&
425 unlikely(!op->sym->auth.digest.data))
428 digest->len = sr->ctx->digest_length;
429 if (op->sym->auth.digest.data) {
430 digest->iova = op->sym->auth.digest.phys_addr;
431 digest->virt = op->sym->auth.digest.data;
435 if (unlikely(rte_pktmbuf_data_len(mdst) < op->sym->auth.data.offset +
436 op->sym->auth.data.length + digest->len))
439 digest->iova = rte_pktmbuf_iova_offset(mdst,
440 op->sym->auth.data.offset +
441 op->sym->auth.data.length);
442 digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *,
443 op->sym->auth.data.offset +
444 op->sym->auth.data.length);
449 create_cipher_auth_sglist(struct nitrox_softreq *sr,
450 struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf)
452 struct rte_crypto_op *op = sr->op;
456 fill_sglist(sgtbl, sr->iv.len, sr->iv.iova, sr->iv.virt);
457 auth_only_len = op->sym->auth.data.length - op->sym->cipher.data.length;
458 if (unlikely(auth_only_len < 0))
462 op->sym->cipher.data.offset + op->sym->cipher.data.length !=
463 op->sym->auth.data.offset + op->sym->auth.data.length)) {
464 NITROX_LOG(ERR, "Auth only data after cipher data not supported\n");
468 err = create_sglist_from_mbuf(sgtbl, mbuf, op->sym->auth.data.offset,
473 err = create_sglist_from_mbuf(sgtbl, mbuf, op->sym->cipher.data.offset,
474 op->sym->cipher.data.length);
482 create_combined_sglist(struct nitrox_softreq *sr, struct nitrox_sgtable *sgtbl,
483 struct rte_mbuf *mbuf)
485 struct rte_crypto_op *op = sr->op;
487 fill_sglist(sgtbl, sr->iv.len, sr->iv.iova, sr->iv.virt);
488 fill_sglist(sgtbl, sr->ctx->aad_length, op->sym->aead.aad.phys_addr,
489 op->sym->aead.aad.data);
490 return create_sglist_from_mbuf(sgtbl, mbuf, op->sym->cipher.data.offset,
491 op->sym->cipher.data.length);
495 create_aead_sglist(struct nitrox_softreq *sr, struct nitrox_sgtable *sgtbl,
496 struct rte_mbuf *mbuf)
500 switch (sr->ctx->nitrox_chain) {
501 case NITROX_CHAIN_CIPHER_AUTH:
502 case NITROX_CHAIN_AUTH_CIPHER:
503 err = create_cipher_auth_sglist(sr, sgtbl, mbuf);
505 case NITROX_CHAIN_COMBINED:
506 err = create_combined_sglist(sr, sgtbl, mbuf);
517 create_aead_inbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
520 struct nitrox_crypto_ctx *ctx = sr->ctx;
522 err = create_aead_sglist(sr, &sr->in, sr->op->sym->m_src);
526 if (ctx->req_op == NITROX_OP_DECRYPT)
527 fill_sglist(&sr->in, digest->len, digest->iova, digest->virt);
529 create_sgcomp(&sr->in);
530 sr->dptr = sr->iova + offsetof(struct nitrox_softreq, in.sgcomp);
535 create_aead_oop_outbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
538 struct nitrox_crypto_ctx *ctx = sr->ctx;
540 err = create_aead_sglist(sr, &sr->out, sr->op->sym->m_dst);
544 if (ctx->req_op == NITROX_OP_ENCRYPT)
545 fill_sglist(&sr->out, digest->len, digest->iova, digest->virt);
551 create_aead_inplace_outbuf(struct nitrox_softreq *sr,
552 struct nitrox_sglist *digest)
555 struct nitrox_crypto_ctx *ctx = sr->ctx;
557 cnt = sr->out.map_bufs_cnt;
558 for (i = 0; i < sr->in.map_bufs_cnt; i++, cnt++) {
559 sr->out.sglist[cnt].len = sr->in.sglist[i].len;
560 sr->out.sglist[cnt].iova = sr->in.sglist[i].iova;
561 sr->out.sglist[cnt].virt = sr->in.sglist[i].virt;
564 sr->out.map_bufs_cnt = cnt;
565 if (ctx->req_op == NITROX_OP_ENCRYPT) {
566 fill_sglist(&sr->out, digest->len, digest->iova,
568 } else if (ctx->req_op == NITROX_OP_DECRYPT) {
569 sr->out.map_bufs_cnt--;
574 create_aead_outbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
576 struct rte_crypto_op *op = sr->op;
579 sr->resp.orh = PENDING_SIG;
580 sr->out.sglist[cnt].len = sizeof(sr->resp.orh);
581 sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
583 sr->out.sglist[cnt].virt = &sr->resp.orh;
585 sr->out.map_bufs_cnt = cnt;
586 if (op->sym->m_dst) {
589 err = create_aead_oop_outbuf(sr, digest);
593 create_aead_inplace_outbuf(sr, digest);
596 cnt = sr->out.map_bufs_cnt;
597 sr->resp.completion = PENDING_SIG;
598 sr->out.sglist[cnt].len = sizeof(sr->resp.completion);
599 sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
601 sr->out.sglist[cnt].virt = &sr->resp.completion;
603 RTE_VERIFY(cnt <= MAX_SGBUF_CNT);
604 sr->out.map_bufs_cnt = cnt;
606 create_sgcomp(&sr->out);
607 sr->rptr = sr->iova + offsetof(struct nitrox_softreq, out.sgcomp);
612 create_aead_gph(uint32_t cryptlen, uint16_t ivlen, uint32_t authlen,
618 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
619 uint16_t iv_offset : 8;
620 uint16_t auth_offset : 8;
622 uint16_t auth_offset : 8;
623 uint16_t iv_offset : 8;
629 gph->param0 = rte_cpu_to_be_16(cryptlen);
630 gph->param1 = rte_cpu_to_be_16(authlen);
632 auth_only_len = authlen - cryptlen;
633 gph->param2 = rte_cpu_to_be_16(ivlen + auth_only_len);
635 param3.iv_offset = 0;
636 param3.auth_offset = ivlen;
637 gph->param3 = rte_cpu_to_be_16(param3.value);
641 process_cipher_auth_data(struct nitrox_softreq *sr)
643 struct rte_crypto_op *op = sr->op;
645 struct nitrox_sglist digest;
647 softreq_copy_iv(sr, 0);
648 err = extract_cipher_auth_digest(sr, &digest);
652 err = create_aead_inbuf(sr, &digest);
656 err = create_aead_outbuf(sr, &digest);
660 create_aead_gph(op->sym->cipher.data.length, sr->iv.len,
661 op->sym->auth.data.length, &sr->gph);
666 softreq_copy_salt(struct nitrox_softreq *sr)
668 struct nitrox_crypto_ctx *ctx = sr->ctx;
671 if (unlikely(ctx->iv.length < AES_GCM_SALT_SIZE)) {
672 NITROX_LOG(ERR, "Invalid IV length %d\n", ctx->iv.length);
676 addr = rte_crypto_op_ctod_offset(sr->op, uint8_t *, ctx->iv.offset);
677 if (!memcmp(ctx->salt, addr, AES_GCM_SALT_SIZE))
680 memcpy(ctx->salt, addr, AES_GCM_SALT_SIZE);
681 memcpy(ctx->fctx.crypto.iv, addr, AES_GCM_SALT_SIZE);
686 extract_combined_digest(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
688 struct rte_crypto_op *op = sr->op;
689 struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst :
692 digest->len = sr->ctx->digest_length;
693 if (op->sym->aead.digest.data) {
694 digest->iova = op->sym->aead.digest.phys_addr;
695 digest->virt = op->sym->aead.digest.data;
700 if (unlikely(rte_pktmbuf_data_len(mdst) < op->sym->aead.data.offset +
701 op->sym->aead.data.length + digest->len))
704 digest->iova = rte_pktmbuf_iova_offset(mdst,
705 op->sym->aead.data.offset +
706 op->sym->aead.data.length);
707 digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *,
708 op->sym->aead.data.offset +
709 op->sym->aead.data.length);
715 process_combined_data(struct nitrox_softreq *sr)
718 struct nitrox_sglist digest;
719 struct rte_crypto_op *op = sr->op;
721 err = softreq_copy_salt(sr);
725 softreq_copy_iv(sr, AES_GCM_SALT_SIZE);
726 err = extract_combined_digest(sr, &digest);
730 err = create_aead_inbuf(sr, &digest);
734 err = create_aead_outbuf(sr, &digest);
738 create_aead_gph(op->sym->aead.data.length, sr->iv.len,
739 op->sym->aead.data.length + sr->ctx->aad_length,
746 process_softreq(struct nitrox_softreq *sr)
748 struct nitrox_crypto_ctx *ctx = sr->ctx;
751 switch (ctx->nitrox_chain) {
752 case NITROX_CHAIN_CIPHER_ONLY:
753 err = process_cipher_data(sr);
755 case NITROX_CHAIN_CIPHER_AUTH:
756 case NITROX_CHAIN_AUTH_CIPHER:
757 err = process_cipher_auth_data(sr);
759 case NITROX_CHAIN_COMBINED:
760 err = process_combined_data(sr);
771 nitrox_process_se_req(uint16_t qno, struct rte_crypto_op *op,
772 struct nitrox_crypto_ctx *ctx,
773 struct nitrox_softreq *sr)
777 softreq_init(sr, sr->iova);
780 err = process_softreq(sr);
784 create_se_instr(sr, qno);
785 sr->timeout = rte_get_timer_cycles() + CMD_TIMEOUT * rte_get_timer_hz();
790 nitrox_check_se_req(struct nitrox_softreq *sr, struct rte_crypto_op **op)
796 cc = *(volatile uint64_t *)(&sr->resp.completion);
797 orh = *(volatile uint64_t *)(&sr->resp.orh);
798 if (cc != PENDING_SIG)
800 else if ((orh != PENDING_SIG) && (orh & 0xff))
802 else if (rte_get_timer_cycles() >= sr->timeout)
808 NITROX_LOG(ERR, "Request err 0x%x, orh 0x%"PRIx64"\n", err,
816 nitrox_sym_instr_addr(struct nitrox_softreq *sr)
822 req_pool_obj_init(__rte_unused struct rte_mempool *mp,
823 __rte_unused void *opaque, void *obj,
824 __rte_unused unsigned int obj_idx)
826 softreq_init(obj, rte_mempool_virt2iova(obj));
830 nitrox_sym_req_pool_create(struct rte_cryptodev *cdev, uint32_t nobjs,
831 uint16_t qp_id, int socket_id)
833 char softreq_pool_name[RTE_RING_NAMESIZE];
834 struct rte_mempool *mp;
836 snprintf(softreq_pool_name, RTE_RING_NAMESIZE, "%s_sr_%d",
837 cdev->data->name, qp_id);
838 mp = rte_mempool_create(softreq_pool_name,
839 RTE_ALIGN_MUL_CEIL(nobjs, 64),
840 sizeof(struct nitrox_softreq),
841 64, 0, NULL, NULL, req_pool_obj_init, NULL,
844 NITROX_LOG(ERR, "Failed to create req pool, qid %d, err %d\n",
851 nitrox_sym_req_pool_free(struct rte_mempool *mp)
853 rte_mempool_free(mp);