X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fccp%2Frte_ccp_pmd.c;h=ba379a19f38cdf557a1d8dab2ca35ebb589e5f10;hb=e863fe3a13da89787fdf3b5c590101a3c0f10af6;hp=2061f465e82110d33c74697d9157ac91c11b2690;hpb=e0d88a394e91f446234aa04d0f9e01c150b0d347;p=dpdk.git diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c index 2061f465e8..ba379a19f3 100644 --- a/drivers/crypto/ccp/rte_ccp_pmd.c +++ b/drivers/crypto/ccp/rte_ccp_pmd.c @@ -2,10 +2,10 @@ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. */ +#include #include #include #include -#include #include #include #include @@ -21,6 +21,8 @@ */ static unsigned int ccp_pmd_init_done; uint8_t ccp_cryptodev_driver_id; +uint8_t cryptodev_cnt; +extern void *sha_ctx; struct ccp_pmd_init_params { struct rte_cryptodev_pmd_init_params def_p; @@ -30,14 +32,12 @@ struct ccp_pmd_init_params { #define CCP_CRYPTODEV_PARAM_NAME ("name") #define CCP_CRYPTODEV_PARAM_SOCKET_ID ("socket_id") #define CCP_CRYPTODEV_PARAM_MAX_NB_QP ("max_nb_queue_pairs") -#define CCP_CRYPTODEV_PARAM_MAX_NB_SESS ("max_nb_sessions") #define CCP_CRYPTODEV_PARAM_AUTH_OPT ("ccp_auth_opt") const char *ccp_pmd_valid_params[] = { CCP_CRYPTODEV_PARAM_NAME, CCP_CRYPTODEV_PARAM_SOCKET_ID, CCP_CRYPTODEV_PARAM_MAX_NB_QP, - CCP_CRYPTODEV_PARAM_MAX_NB_SESS, CCP_CRYPTODEV_PARAM_AUTH_OPT, }; @@ -124,13 +124,6 @@ ccp_pmd_parse_input_args(struct ccp_pmd_init_params *params, if (ret < 0) goto free_kvlist; - ret = rte_kvargs_process(kvlist, - CCP_CRYPTODEV_PARAM_MAX_NB_SESS, - &parse_integer_arg, - ¶ms->def_p.max_nb_sessions); - if (ret < 0) - goto free_kvlist; - ret = rte_kvargs_process(kvlist, CCP_CRYPTODEV_PARAM_SOCKET_ID, &parse_integer_arg, @@ -169,7 +162,7 @@ get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op) return NULL; sess = (struct ccp_session *) - get_session_private_data( + get_sym_session_private_data( op->sym->session, ccp_cryptodev_driver_id); } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { @@ -188,11 +181,11 @@ get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op) if (unlikely(ccp_set_session_parameters(sess, op->sym->xform, internals) != 0)) { rte_mempool_put(qp->sess_mp, _sess); - rte_mempool_put(qp->sess_mp, _sess_private_data); + rte_mempool_put(qp->sess_mp_priv, _sess_private_data); sess = NULL; } op->sym->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(op->sym->session, + set_sym_session_private_data(op->sym->session, ccp_cryptodev_driver_id, _sess_private_data); } @@ -209,30 +202,46 @@ ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, struct ccp_queue *cmd_q; struct rte_cryptodev *dev = qp->dev; uint16_t i, enq_cnt = 0, slots_req = 0; + uint16_t tmp_ops = nb_ops, b_idx, cur_ops = 0; if (nb_ops == 0) return 0; if (unlikely(rte_ring_full(qp->processed_pkts) != 0)) return 0; + if (tmp_ops >= cryptodev_cnt) + cur_ops = nb_ops / cryptodev_cnt + (nb_ops)%cryptodev_cnt; + else + cur_ops = tmp_ops; + while (tmp_ops) { + b_idx = nb_ops - tmp_ops; + slots_req = 0; + if (cur_ops <= tmp_ops) { + tmp_ops -= cur_ops; + } else { + cur_ops = tmp_ops; + tmp_ops = 0; + } + for (i = 0; i < cur_ops; i++) { + sess = get_ccp_session(qp, ops[i + b_idx]); + if (unlikely(sess == NULL) && (i == 0)) { + qp->qp_stats.enqueue_err_count++; + return 0; + } else if (sess == NULL) { + cur_ops = i; + break; + } + slots_req += ccp_compute_slot_count(sess); + } - for (i = 0; i < nb_ops; i++) { - sess = get_ccp_session(qp, ops[i]); - if (unlikely(sess == NULL) && (i == 0)) { - qp->qp_stats.enqueue_err_count++; + cmd_q = ccp_allot_queue(dev, slots_req); + if (unlikely(cmd_q == NULL)) return 0; - } else if (sess == NULL) { - nb_ops = i; - break; - } - slots_req += ccp_compute_slot_count(sess); + enq_cnt += process_ops_to_enqueue(qp, ops, cmd_q, cur_ops, + nb_ops, slots_req, b_idx); + i++; } - cmd_q = ccp_allot_queue(dev, slots_req); - if (unlikely(cmd_q == NULL)) - return 0; - - enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req); qp->qp_stats.enqueued_count += enq_cnt; return enq_cnt; } @@ -242,14 +251,28 @@ ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, uint16_t nb_ops) { struct ccp_qp *qp = queue_pair; - uint16_t nb_dequeued = 0, i; + uint16_t nb_dequeued = 0, i, total_nb_ops; - nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops); + nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops, &total_nb_ops); + + if (total_nb_ops) { + while (nb_dequeued != total_nb_ops) { + nb_dequeued = process_ops_to_dequeue(qp, + ops, nb_ops, &total_nb_ops); + } + } /* Free session if a session-less crypto op */ for (i = 0; i < nb_dequeued; i++) if (unlikely(ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) { + struct ccp_session *sess = (struct ccp_session *) + get_sym_session_private_data( + ops[i]->sym->session, + ccp_cryptodev_driver_id); + + rte_mempool_put(qp->sess_mp_priv, + sess); rte_mempool_put(qp->sess_mp, ops[i]->sym->session); ops[i]->sym->session = NULL; @@ -269,6 +292,9 @@ static struct rte_pci_id ccp_pci_id[] = { { RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */ }, + { + RTE_PCI_DEVICE(0x1022, 0x15df), /* AMD CCP RV */ + }, {.device_id = 0}, }; @@ -280,6 +306,7 @@ cryptodev_ccp_remove(struct rte_vdev_device *dev) ccp_pmd_init_done = 0; name = rte_vdev_device_name(dev); + rte_free(sha_ctx); if (name == NULL) return -EINVAL; @@ -297,12 +324,10 @@ cryptodev_ccp_create(const char *name, { struct rte_cryptodev *dev; struct ccp_private *internals; - uint8_t cryptodev_cnt = 0; if (init_params->def_p.name[0] == '\0') - snprintf(init_params->def_p.name, - sizeof(init_params->def_p.name), - "%s", name); + strlcpy(init_params->def_p.name, name, + sizeof(init_params->def_p.name)); dev = rte_cryptodev_pmd_create(init_params->def_p.name, &vdev->device, @@ -329,12 +354,12 @@ cryptodev_ccp_create(const char *name, dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_HW_ACCELERATED | - RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING; + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | + RTE_CRYPTODEV_FF_SYM_SESSIONLESS; internals = dev->data->dev_private; internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs; - internals->max_nb_sessions = init_params->def_p.max_nb_sessions; internals->auth_opt = init_params->auth_opt; internals->crypto_num_dev = cryptodev_cnt; @@ -359,13 +384,13 @@ cryptodev_ccp_probe(struct rte_vdev_device *vdev) "", sizeof(struct ccp_private), rte_socket_id(), - CCP_PMD_MAX_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + CCP_PMD_MAX_QUEUE_PAIRS }, .auth_opt = CCP_PMD_AUTH_OPT_CCP, }; const char *input_args; + sha_ctx = (void *)rte_malloc(NULL, SHA512_DIGEST_SIZE, 64); if (ccp_pmd_init_done) { RTE_LOG(INFO, PMD, "CCP PMD already initialized\n"); return -EFAULT; @@ -382,8 +407,6 @@ cryptodev_ccp_probe(struct rte_vdev_device *vdev) init_params.def_p.socket_id); RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n", init_params.def_p.max_nb_queue_pairs); - RTE_LOG(INFO, PMD, "Max number of sessions = %d\n", - init_params.def_p.max_nb_sessions); RTE_LOG(INFO, PMD, "Authentication offload to %s\n", ((init_params.auth_opt == 0) ? "CCP" : "CPU")); @@ -404,7 +427,6 @@ static struct cryptodev_driver ccp_crypto_drv; RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id= " "ccp_auth_opt="); RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv.driver,