1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
4 #include <cryptodev_pmd.h>
7 #include "virtio_cryptodev.h"
8 #include "virtio_crypto_algs.h"
11 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
13 struct vring_desc *dp, *dp_tail;
14 struct vq_desc_extra *dxp;
15 uint16_t desc_idx_last = desc_idx;
17 dp = &vq->vq_ring.desc[desc_idx];
18 dxp = &vq->vq_descx[desc_idx];
19 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
20 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
21 while (dp->flags & VRING_DESC_F_NEXT) {
22 desc_idx_last = dp->next;
23 dp = &vq->vq_ring.desc[dp->next];
29 * We must append the existing free chain, if any, to the end of
30 * newly freed chain. If the virtqueue was completely used, then
31 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
33 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
34 vq->vq_desc_head_idx = desc_idx;
36 dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
37 dp_tail->next = desc_idx;
40 vq->vq_desc_tail_idx = desc_idx_last;
41 dp->next = VQ_RING_DESC_CHAIN_END;
45 virtqueue_dequeue_burst_rx(struct virtqueue *vq,
46 struct rte_crypto_op **rx_pkts, uint16_t num)
48 struct vring_used_elem *uep;
49 struct rte_crypto_op *cop;
50 uint16_t used_idx, desc_idx;
52 struct virtio_crypto_inhdr *inhdr;
53 struct virtio_crypto_op_cookie *op_cookie;
55 /* Caller does the check */
56 for (i = 0; i < num ; i++) {
57 used_idx = (uint16_t)(vq->vq_used_cons_idx
58 & (vq->vq_nentries - 1));
59 uep = &vq->vq_ring.used->ring[used_idx];
60 desc_idx = (uint16_t)uep->id;
61 cop = (struct rte_crypto_op *)
62 vq->vq_descx[desc_idx].crypto_op;
63 if (unlikely(cop == NULL)) {
64 VIRTIO_CRYPTO_RX_LOG_DBG("vring descriptor with no "
66 vq->vq_used_cons_idx);
70 op_cookie = (struct virtio_crypto_op_cookie *)
71 vq->vq_descx[desc_idx].cookie;
72 inhdr = &(op_cookie->inhdr);
73 switch (inhdr->status) {
74 case VIRTIO_CRYPTO_OK:
75 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
77 case VIRTIO_CRYPTO_ERR:
78 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
79 vq->packets_received_failed++;
81 case VIRTIO_CRYPTO_BADMSG:
82 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
83 vq->packets_received_failed++;
85 case VIRTIO_CRYPTO_NOTSUPP:
86 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
87 vq->packets_received_failed++;
89 case VIRTIO_CRYPTO_INVSESS:
90 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
91 vq->packets_received_failed++;
97 vq->packets_received_total++;
100 rte_mempool_put(vq->mpool, op_cookie);
102 vq->vq_used_cons_idx++;
103 vq_ring_free_chain(vq, desc_idx);
104 vq->vq_descx[desc_idx].crypto_op = NULL;
111 virtqueue_crypto_sym_pkt_header_arrange(
112 struct rte_crypto_op *cop,
113 struct virtio_crypto_op_data_req *data,
114 struct virtio_crypto_session *session)
116 struct rte_crypto_sym_op *sym_op = cop->sym;
117 struct virtio_crypto_op_data_req *req_data = data;
118 struct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl;
119 struct virtio_crypto_sym_create_session_req *sym_sess_req =
120 &ctrl->u.sym_create_session;
121 struct virtio_crypto_alg_chain_session_para *chain_para =
122 &sym_sess_req->u.chain.para;
123 struct virtio_crypto_cipher_session_para *cipher_para;
125 req_data->header.session_id = session->session_id;
127 switch (sym_sess_req->op_type) {
128 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
129 req_data->u.sym_req.op_type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
131 cipher_para = &sym_sess_req->u.cipher.para;
132 if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
133 req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
135 req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
137 req_data->u.sym_req.u.cipher.para.iv_len
138 = session->iv.length;
140 req_data->u.sym_req.u.cipher.para.src_data_len =
141 (sym_op->cipher.data.length +
142 sym_op->cipher.data.offset);
143 req_data->u.sym_req.u.cipher.para.dst_data_len =
144 req_data->u.sym_req.u.cipher.para.src_data_len;
146 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
147 req_data->u.sym_req.op_type =
148 VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
150 cipher_para = &chain_para->cipher_param;
151 if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
152 req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
154 req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
156 req_data->u.sym_req.u.chain.para.iv_len = session->iv.length;
157 req_data->u.sym_req.u.chain.para.aad_len = session->aad.length;
159 req_data->u.sym_req.u.chain.para.src_data_len =
160 (sym_op->cipher.data.length +
161 sym_op->cipher.data.offset);
162 req_data->u.sym_req.u.chain.para.dst_data_len =
163 req_data->u.sym_req.u.chain.para.src_data_len;
164 req_data->u.sym_req.u.chain.para.cipher_start_src_offset =
165 sym_op->cipher.data.offset;
166 req_data->u.sym_req.u.chain.para.len_to_cipher =
167 sym_op->cipher.data.length;
168 req_data->u.sym_req.u.chain.para.hash_start_src_offset =
169 sym_op->auth.data.offset;
170 req_data->u.sym_req.u.chain.para.len_to_hash =
171 sym_op->auth.data.length;
172 req_data->u.sym_req.u.chain.para.aad_len =
175 if (chain_para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
176 req_data->u.sym_req.u.chain.para.hash_result_len =
177 chain_para->u.hash_param.hash_result_len;
178 if (chain_para->hash_mode ==
179 VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
180 req_data->u.sym_req.u.chain.para.hash_result_len =
181 chain_para->u.mac_param.hash_result_len;
191 virtqueue_crypto_sym_enqueue_xmit(
192 struct virtqueue *txvq,
193 struct rte_crypto_op *cop)
199 struct vq_desc_extra *dxp;
200 struct vring_desc *start_dp;
201 struct vring_desc *desc;
202 uint64_t indirect_op_data_req_phys_addr;
203 uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
204 uint32_t indirect_vring_addr_offset = req_data_len +
205 sizeof(struct virtio_crypto_inhdr);
206 uint32_t indirect_iv_addr_offset =
207 offsetof(struct virtio_crypto_op_cookie, iv);
208 struct rte_crypto_sym_op *sym_op = cop->sym;
209 struct virtio_crypto_session *session =
210 (struct virtio_crypto_session *)get_sym_session_private_data(
211 cop->sym->session, cryptodev_virtio_driver_id);
212 struct virtio_crypto_op_data_req *op_data_req;
213 uint32_t hash_result_len = 0;
214 struct virtio_crypto_op_cookie *crypto_op_cookie;
215 struct virtio_crypto_alg_chain_session_para *para;
217 if (unlikely(sym_op->m_src->nb_segs != 1))
219 if (unlikely(txvq->vq_free_cnt == 0))
221 if (unlikely(txvq->vq_free_cnt < needed))
223 head_idx = txvq->vq_desc_head_idx;
224 if (unlikely(head_idx >= txvq->vq_nentries))
226 if (unlikely(session == NULL))
229 dxp = &txvq->vq_descx[head_idx];
231 if (rte_mempool_get(txvq->mpool, &dxp->cookie)) {
232 VIRTIO_CRYPTO_TX_LOG_ERR("can not get cookie");
235 crypto_op_cookie = dxp->cookie;
236 indirect_op_data_req_phys_addr =
237 rte_mempool_virt2iova(crypto_op_cookie);
238 op_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie;
240 if (virtqueue_crypto_sym_pkt_header_arrange(cop, op_data_req, session))
243 /* status is initialized to VIRTIO_CRYPTO_ERR */
244 ((struct virtio_crypto_inhdr *)
245 ((uint8_t *)op_data_req + req_data_len))->status =
248 /* point to indirect vring entry */
249 desc = (struct vring_desc *)
250 ((uint8_t *)op_data_req + indirect_vring_addr_offset);
251 for (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++)
252 desc[idx].next = idx + 1;
253 desc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END;
257 /* indirect vring: first part, virtio_crypto_op_data_req */
258 desc[idx].addr = indirect_op_data_req_phys_addr;
259 desc[idx].len = req_data_len;
260 desc[idx++].flags = VRING_DESC_F_NEXT;
262 /* indirect vring: iv of cipher */
263 if (session->iv.length) {
265 desc[idx].addr = cop->phys_addr + session->iv.offset;
267 if (session->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE)
270 rte_memcpy(crypto_op_cookie->iv,
271 rte_crypto_op_ctod_offset(cop,
272 uint8_t *, session->iv.offset),
274 desc[idx].addr = indirect_op_data_req_phys_addr +
275 indirect_iv_addr_offset;
278 desc[idx].len = session->iv.length;
279 desc[idx++].flags = VRING_DESC_F_NEXT;
282 /* indirect vring: additional auth data */
283 if (session->aad.length) {
284 desc[idx].addr = session->aad.phys_addr;
285 desc[idx].len = session->aad.length;
286 desc[idx++].flags = VRING_DESC_F_NEXT;
289 /* indirect vring: src data */
290 desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_src, 0);
291 desc[idx].len = (sym_op->cipher.data.offset
292 + sym_op->cipher.data.length);
293 desc[idx++].flags = VRING_DESC_F_NEXT;
295 /* indirect vring: dst data */
297 desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_dst, 0);
298 desc[idx].len = (sym_op->cipher.data.offset
299 + sym_op->cipher.data.length);
301 desc[idx].addr = rte_pktmbuf_iova_offset(sym_op->m_src, 0);
302 desc[idx].len = (sym_op->cipher.data.offset
303 + sym_op->cipher.data.length);
305 desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
307 /* indirect vring: digest result */
308 para = &(session->ctrl.u.sym_create_session.u.chain.para);
309 if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
310 hash_result_len = para->u.hash_param.hash_result_len;
311 if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
312 hash_result_len = para->u.mac_param.hash_result_len;
313 if (hash_result_len > 0) {
314 desc[idx].addr = sym_op->auth.digest.phys_addr;
315 desc[idx].len = hash_result_len;
316 desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
319 /* indirect vring: last part, status returned */
320 desc[idx].addr = indirect_op_data_req_phys_addr + req_data_len;
321 desc[idx].len = sizeof(struct virtio_crypto_inhdr);
322 desc[idx++].flags = VRING_DESC_F_WRITE;
326 /* save the infos to use when receiving packets */
327 dxp->crypto_op = (void *)cop;
328 dxp->ndescs = needed;
330 /* use a single buffer */
331 start_dp = txvq->vq_ring.desc;
332 start_dp[head_idx].addr = indirect_op_data_req_phys_addr +
333 indirect_vring_addr_offset;
334 start_dp[head_idx].len = num_entry * sizeof(struct vring_desc);
335 start_dp[head_idx].flags = VRING_DESC_F_INDIRECT;
337 idx = start_dp[head_idx].next;
338 txvq->vq_desc_head_idx = idx;
339 if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
340 txvq->vq_desc_tail_idx = idx;
341 txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
342 vq_update_avail_ring(txvq, head_idx);
348 virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq,
349 struct rte_crypto_op *cop)
354 case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
355 ret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop);
358 VIRTIO_CRYPTO_TX_LOG_ERR("invalid crypto op type %u",
368 virtio_crypto_vring_start(struct virtqueue *vq)
370 struct virtio_crypto_hw *hw = vq->hw;
371 int i, size = vq->vq_nentries;
372 struct vring *vr = &vq->vq_ring;
373 uint8_t *ring_mem = vq->vq_ring_virt_mem;
375 PMD_INIT_FUNC_TRACE();
377 vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
378 vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
379 vq->vq_free_cnt = vq->vq_nentries;
381 /* Chain all the descriptors in the ring with an END */
382 for (i = 0; i < size - 1; i++)
383 vr->desc[i].next = (uint16_t)(i + 1);
384 vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
387 * Disable device(host) interrupting guest
389 virtqueue_disable_intr(vq);
392 * Set guest physical address of the virtqueue
393 * in VIRTIO_PCI_QUEUE_PFN config register of device
394 * to share with the backend
396 if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
397 VIRTIO_CRYPTO_INIT_LOG_ERR("setup_queue failed");
405 virtio_crypto_ctrlq_start(struct rte_cryptodev *dev)
407 struct virtio_crypto_hw *hw = dev->data->dev_private;
410 virtio_crypto_vring_start(hw->cvq);
411 VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
416 virtio_crypto_dataq_start(struct rte_cryptodev *dev)
420 * - Setup vring structure for data queues
423 struct virtio_crypto_hw *hw = dev->data->dev_private;
425 PMD_INIT_FUNC_TRACE();
427 /* Start data vring. */
428 for (i = 0; i < hw->max_dataqueues; i++) {
429 virtio_crypto_vring_start(dev->data->queue_pairs[i]);
430 VIRTQUEUE_DUMP((struct virtqueue *)dev->data->queue_pairs[i]);
434 /* vring size of data queue is 1024 */
435 #define VIRTIO_MBUF_BURST_SZ 1024
438 virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts,
441 struct virtqueue *txvq = tx_queue;
442 uint16_t nb_used, num, nb_rx;
444 nb_used = VIRTQUEUE_NUSED(txvq);
448 num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
449 num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ)
450 ? num : VIRTIO_MBUF_BURST_SZ);
455 nb_rx = virtqueue_dequeue_burst_rx(txvq, rx_pkts, num);
456 VIRTIO_CRYPTO_RX_LOG_DBG("used:%d dequeue:%d", nb_used, num);
462 virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts,
465 struct virtqueue *txvq;
469 if (unlikely(nb_pkts < 1))
471 if (unlikely(tx_queue == NULL)) {
472 VIRTIO_CRYPTO_TX_LOG_ERR("tx_queue is NULL");
477 VIRTIO_CRYPTO_TX_LOG_DBG("%d packets to xmit", nb_pkts);
479 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
480 struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src;
481 /* nb_segs is always 1 at virtio crypto situation */
482 int need = txm->nb_segs - txvq->vq_free_cnt;
485 * Positive value indicates it hasn't enough space in vring
488 if (unlikely(need > 0)) {
490 * try it again because the receive process may be
493 need = txm->nb_segs - txvq->vq_free_cnt;
494 if (unlikely(need > 0)) {
495 VIRTIO_CRYPTO_TX_LOG_DBG("No free tx "
496 "descriptors to transmit");
501 txvq->packets_sent_total++;
503 /* Enqueue Packet buffers */
504 error = virtqueue_crypto_enqueue_xmit(txvq, tx_pkts[nb_tx]);
505 if (unlikely(error)) {
507 VIRTIO_CRYPTO_TX_LOG_ERR(
508 "virtqueue_enqueue Free count = 0");
509 else if (error == EMSGSIZE)
510 VIRTIO_CRYPTO_TX_LOG_ERR(
511 "virtqueue_enqueue Free count < 1");
513 VIRTIO_CRYPTO_TX_LOG_ERR(
514 "virtqueue_enqueue error: %d", error);
515 txvq->packets_sent_failed++;
521 vq_update_avail_idx(txvq);
523 if (unlikely(virtqueue_kick_prepare(txvq))) {
524 virtqueue_notify(txvq);
525 VIRTIO_CRYPTO_TX_LOG_DBG("Notified backend after xmit");