1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
5 #include <rte_string_fns.h>
6 #include <rte_bus_pci.h>
7 #include <rte_bus_vdev.h>
8 #include <rte_common.h>
9 #include <rte_cryptodev.h>
10 #include <cryptodev_pmd.h>
13 #include <rte_malloc.h>
15 #include "ccp_crypto.h"
17 #include "ccp_pmd_private.h"
20 * Global static parameter used to find if CCP device is already initialized.
22 static unsigned int ccp_pmd_init_done;
23 uint8_t ccp_cryptodev_driver_id;
24 uint8_t cryptodev_cnt;
27 struct ccp_pmd_init_params {
28 struct rte_cryptodev_pmd_init_params def_p;
32 #define CCP_CRYPTODEV_PARAM_NAME ("name")
33 #define CCP_CRYPTODEV_PARAM_SOCKET_ID ("socket_id")
34 #define CCP_CRYPTODEV_PARAM_MAX_NB_QP ("max_nb_queue_pairs")
35 #define CCP_CRYPTODEV_PARAM_AUTH_OPT ("ccp_auth_opt")
37 const char *ccp_pmd_valid_params[] = {
38 CCP_CRYPTODEV_PARAM_NAME,
39 CCP_CRYPTODEV_PARAM_SOCKET_ID,
40 CCP_CRYPTODEV_PARAM_MAX_NB_QP,
41 CCP_CRYPTODEV_PARAM_AUTH_OPT,
44 /** ccp pmd auth option */
45 enum ccp_pmd_auth_opt {
46 CCP_PMD_AUTH_OPT_CCP = 0,
50 static struct ccp_session *
51 get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
53 struct ccp_session *sess = NULL;
55 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
56 if (unlikely(op->sym->session == NULL))
59 sess = (struct ccp_session *)
60 get_sym_session_private_data(
62 ccp_cryptodev_driver_id);
63 } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
65 void *_sess_private_data = NULL;
66 struct ccp_private *internals;
68 if (rte_mempool_get(qp->sess_mp, &_sess))
70 if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
73 sess = (struct ccp_session *)_sess_private_data;
75 internals = (struct ccp_private *)qp->dev->data->dev_private;
76 if (unlikely(ccp_set_session_parameters(sess, op->sym->xform,
78 rte_mempool_put(qp->sess_mp, _sess);
79 rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
82 op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
83 set_sym_session_private_data(op->sym->session,
84 ccp_cryptodev_driver_id,
92 ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
95 struct ccp_session *sess = NULL;
96 struct ccp_qp *qp = queue_pair;
97 struct ccp_queue *cmd_q;
98 struct rte_cryptodev *dev = qp->dev;
99 uint16_t i, enq_cnt = 0, slots_req = 0;
100 uint16_t tmp_ops = nb_ops, b_idx, cur_ops = 0;
105 if (unlikely(rte_ring_full(qp->processed_pkts) != 0))
107 if (tmp_ops >= cryptodev_cnt)
108 cur_ops = nb_ops / cryptodev_cnt + (nb_ops)%cryptodev_cnt;
112 b_idx = nb_ops - tmp_ops;
114 if (cur_ops <= tmp_ops) {
120 for (i = 0; i < cur_ops; i++) {
121 sess = get_ccp_session(qp, ops[i + b_idx]);
122 if (unlikely(sess == NULL) && (i == 0)) {
123 qp->qp_stats.enqueue_err_count++;
125 } else if (sess == NULL) {
129 slots_req += ccp_compute_slot_count(sess);
132 cmd_q = ccp_allot_queue(dev, slots_req);
133 if (unlikely(cmd_q == NULL))
135 enq_cnt += process_ops_to_enqueue(qp, ops, cmd_q, cur_ops,
136 nb_ops, slots_req, b_idx);
140 qp->qp_stats.enqueued_count += enq_cnt;
145 ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
148 struct ccp_qp *qp = queue_pair;
149 uint16_t nb_dequeued = 0, i, total_nb_ops;
151 nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops, &total_nb_ops);
154 while (nb_dequeued != total_nb_ops) {
155 nb_dequeued = process_ops_to_dequeue(qp,
156 ops, nb_ops, &total_nb_ops);
160 /* Free session if a session-less crypto op */
161 for (i = 0; i < nb_dequeued; i++)
162 if (unlikely(ops[i]->sess_type ==
163 RTE_CRYPTO_OP_SESSIONLESS)) {
164 struct ccp_session *sess = (struct ccp_session *)
165 get_sym_session_private_data(
166 ops[i]->sym->session,
167 ccp_cryptodev_driver_id);
169 rte_mempool_put(qp->sess_mp_priv,
171 rte_mempool_put(qp->sess_mp,
172 ops[i]->sym->session);
173 ops[i]->sym->session = NULL;
175 qp->qp_stats.dequeued_count += nb_dequeued;
181 * The set of PCI devices this driver supports
183 static struct rte_pci_id ccp_pci_id[] = {
185 RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */
188 RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */
191 RTE_PCI_DEVICE(0x1022, 0x15df), /* AMD CCP RV */
196 /** Remove ccp pmd */
198 cryptodev_ccp_remove(struct rte_pci_device *pci_dev)
200 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
201 struct rte_cryptodev *dev;
206 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
211 dev = rte_cryptodev_pmd_get_named_dev(name);
215 ccp_pmd_init_done = 0;
218 RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
219 name, rte_socket_id());
221 return rte_cryptodev_pmd_destroy(dev);
224 /** Create crypto device */
226 cryptodev_ccp_create(const char *name,
227 struct rte_pci_device *pci_dev,
228 struct ccp_pmd_init_params *init_params,
229 struct rte_pci_driver *pci_drv)
231 struct rte_cryptodev *dev;
232 struct ccp_private *internals;
234 if (init_params->def_p.name[0] == '\0')
235 strlcpy(init_params->def_p.name, name,
236 sizeof(init_params->def_p.name));
238 dev = rte_cryptodev_pmd_create(init_params->def_p.name,
240 &init_params->def_p);
242 CCP_LOG_ERR("failed to create cryptodev vdev");
246 cryptodev_cnt = ccp_probe_devices(pci_dev, ccp_pci_id);
248 if (cryptodev_cnt == 0) {
249 CCP_LOG_ERR("failed to detect CCP crypto device");
253 printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
254 dev->device = &pci_dev->device;
255 dev->device->driver = &pci_drv->driver;
256 dev->driver_id = ccp_cryptodev_driver_id;
258 /* register rx/tx burst functions for data path */
259 dev->dev_ops = ccp_pmd_ops;
260 dev->enqueue_burst = ccp_pmd_enqueue_burst;
261 dev->dequeue_burst = ccp_pmd_dequeue_burst;
263 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
264 RTE_CRYPTODEV_FF_HW_ACCELERATED |
265 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
266 RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
268 internals = dev->data->dev_private;
270 internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs;
271 internals->auth_opt = init_params->auth_opt;
272 internals->crypto_num_dev = cryptodev_cnt;
274 rte_cryptodev_pmd_probing_finish(dev);
279 CCP_LOG_ERR("driver %s: %s() failed",
280 init_params->def_p.name, __func__);
281 cryptodev_ccp_remove(pci_dev);
288 cryptodev_ccp_probe(struct rte_pci_driver *pci_drv __rte_unused,
289 struct rte_pci_device *pci_dev)
292 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
293 struct ccp_pmd_init_params init_params = {
296 sizeof(struct ccp_private),
298 CCP_PMD_MAX_QUEUE_PAIRS
300 .auth_opt = CCP_PMD_AUTH_OPT_CCP,
303 sha_ctx = (void *)rte_malloc(NULL, SHA512_DIGEST_SIZE, 64);
304 if (ccp_pmd_init_done) {
305 RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
308 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
312 init_params.def_p.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS;
314 RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
315 init_params.def_p.socket_id);
316 RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n",
317 init_params.def_p.max_nb_queue_pairs);
318 RTE_LOG(INFO, PMD, "Authentication offload to %s\n",
319 ((init_params.auth_opt == 0) ? "CCP" : "CPU"));
321 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
323 rc = cryptodev_ccp_create(name, pci_dev, &init_params, pci_drv);
326 ccp_pmd_init_done = 1;
330 static struct rte_pci_driver cryptodev_ccp_pmd_drv = {
331 .id_table = ccp_pci_id,
332 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
333 .probe = cryptodev_ccp_probe,
334 .remove = cryptodev_ccp_remove
337 static struct cryptodev_driver ccp_crypto_drv;
339 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
340 RTE_PMD_REGISTER_KMOD_DEP(CRYPTODEV_NAME_CCP_PMD, "* igb_uio | uio_pci_generic | vfio-pci");
341 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
342 "max_nb_queue_pairs=<int> "
344 "ccp_auth_opt=<int>");
345 RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv.driver,
346 ccp_cryptodev_driver_id);