1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2019 Marvell International Ltd.
7 #include <rte_cryptodev_pmd.h>
10 #include "otx2_cryptodev.h"
11 #include "otx2_cryptodev_hw_access.h"
12 #include "otx2_cryptodev_mbox.h"
13 #include "otx2_cryptodev_ops.h"
14 #include "otx2_mbox.h"
16 #include "cpt_hw_types.h"
17 #include "cpt_pmd_logs.h"
18 #include "cpt_pmd_ops_helper.h"
20 #define METABUF_POOL_CACHE_SIZE 512
22 /* Forward declarations */
25 otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id);
28 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
30 snprintf(name, size, "otx2_cpt_lf_mem_%u:%u", dev_id, qp_id);
34 otx2_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
35 struct otx2_cpt_qp *qp, uint8_t qp_id,
38 char mempool_name[RTE_MEMPOOL_NAMESIZE];
39 int sg_mlen, lb_mlen, max_mlen, ret;
40 struct cpt_qp_meta_info *meta_info;
41 struct rte_mempool *pool;
43 /* Get meta len for scatter gather mode */
44 sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
46 /* Extra 32B saved for future considerations */
47 sg_mlen += 4 * sizeof(uint64_t);
49 /* Get meta len for linear buffer (direct) mode */
50 lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
52 /* Extra 32B saved for future considerations */
53 lb_mlen += 4 * sizeof(uint64_t);
55 /* Check max requirement for meta buffer */
56 max_mlen = RTE_MAX(lb_mlen, sg_mlen);
58 /* Allocate mempool */
60 snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "otx2_cpt_mb_%u:%u",
61 dev->data->dev_id, qp_id);
63 pool = rte_mempool_create_empty(mempool_name, nb_elements, max_mlen,
64 METABUF_POOL_CACHE_SIZE, 0,
68 CPT_LOG_ERR("Could not create mempool for metabuf");
72 ret = rte_mempool_set_ops_byname(pool, RTE_MBUF_DEFAULT_MEMPOOL_OPS,
75 CPT_LOG_ERR("Could not set mempool ops");
79 ret = rte_mempool_populate_default(pool);
81 CPT_LOG_ERR("Could not populate metabuf pool");
85 meta_info = &qp->meta_info;
87 meta_info->pool = pool;
88 meta_info->lb_mlen = lb_mlen;
89 meta_info->sg_mlen = sg_mlen;
94 rte_mempool_free(pool);
99 otx2_cpt_metabuf_mempool_destroy(struct otx2_cpt_qp *qp)
101 struct cpt_qp_meta_info *meta_info = &qp->meta_info;
103 rte_mempool_free(meta_info->pool);
105 meta_info->pool = NULL;
106 meta_info->lb_mlen = 0;
107 meta_info->sg_mlen = 0;
110 static struct otx2_cpt_qp *
111 otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
114 struct otx2_cpt_vf *vf = dev->data->dev_private;
115 uint64_t pg_sz = sysconf(_SC_PAGESIZE);
116 const struct rte_memzone *lf_mem;
117 uint32_t len, iq_len, size_div40;
118 char name[RTE_MEMZONE_NAMESIZE];
119 uint64_t used_len, iova;
120 struct otx2_cpt_qp *qp;
125 /* Allocate queue pair */
126 qp = rte_zmalloc_socket("OCTEON TX2 Crypto PMD Queue Pair", sizeof(*qp),
129 CPT_LOG_ERR("Could not allocate queue pair");
133 iq_len = OTX2_CPT_IQ_LEN;
136 * Queue size must be a multiple of 40 and effective queue size to
137 * software is (size_div40 - 1) * 40
139 size_div40 = (iq_len + 40 - 1) / 40 + 1;
141 /* For pending queue */
142 len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
144 /* Space for instruction group memory */
145 len += size_div40 * 16;
147 /* So that instruction queues start as pg size aligned */
148 len = RTE_ALIGN(len, pg_sz);
150 /* For instruction queues */
151 len += OTX2_CPT_IQ_LEN * sizeof(union cpt_inst_s);
153 /* Wastage after instruction queues */
154 len = RTE_ALIGN(len, pg_sz);
156 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
159 lf_mem = rte_memzone_reserve_aligned(name, len, vf->otx2_dev.node,
160 RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
161 RTE_CACHE_LINE_SIZE);
162 if (lf_mem == NULL) {
163 CPT_LOG_ERR("Could not allocate reserved memzone");
172 ret = otx2_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
174 CPT_LOG_ERR("Could not create mempool for metabuf");
178 /* Initialize pending queue */
179 qp->pend_q.rid_queue = (struct rid *)va;
180 qp->pend_q.enq_tail = 0;
181 qp->pend_q.deq_head = 0;
182 qp->pend_q.pending_count = 0;
184 used_len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
185 used_len += size_div40 * 16;
186 used_len = RTE_ALIGN(used_len, pg_sz);
189 qp->iq_dma_addr = iova;
191 qp->base = OTX2_CPT_LF_BAR2(vf, qp_id);
193 lmtline = vf->otx2_dev.bar2 +
194 (RVU_BLOCK_ADDR_LMT << 20 | qp_id << 12) +
195 OTX2_LMT_LF_LMTLINE(0);
197 qp->lmtline = (void *)lmtline;
199 qp->lf_nq_reg = qp->base + OTX2_CPT_LF_NQ(0);
201 otx2_cpt_iq_disable(qp);
203 ret = otx2_cpt_iq_enable(dev, qp, group, OTX2_CPT_QUEUE_HI_PRIO,
206 CPT_LOG_ERR("Could not enable instruction queue");
207 goto mempool_destroy;
213 otx2_cpt_metabuf_mempool_destroy(qp);
215 rte_memzone_free(lf_mem);
222 otx2_cpt_qp_destroy(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
224 const struct rte_memzone *lf_mem;
225 char name[RTE_MEMZONE_NAMESIZE];
228 otx2_cpt_iq_disable(qp);
230 otx2_cpt_metabuf_mempool_destroy(qp);
232 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
235 lf_mem = rte_memzone_lookup(name);
237 ret = rte_memzone_free(lf_mem);
249 otx2_cpt_dev_config(struct rte_cryptodev *dev,
250 struct rte_cryptodev_config *conf)
252 struct otx2_cpt_vf *vf = dev->data->dev_private;
255 if (conf->nb_queue_pairs > vf->max_queues) {
256 CPT_LOG_ERR("Invalid number of queue pairs requested");
260 dev->feature_flags &= ~conf->ff_disable;
262 /* Unregister error interrupts */
263 if (vf->err_intr_registered)
264 otx2_cpt_err_intr_unregister(dev);
268 ret = otx2_cpt_queues_detach(dev);
270 CPT_LOG_ERR("Could not detach CPT queues");
276 ret = otx2_cpt_queues_attach(dev, conf->nb_queue_pairs);
278 CPT_LOG_ERR("Could not attach CPT queues");
282 ret = otx2_cpt_msix_offsets_get(dev);
284 CPT_LOG_ERR("Could not get MSI-X offsets");
288 /* Register error interrupts */
289 ret = otx2_cpt_err_intr_register(dev);
291 CPT_LOG_ERR("Could not register error interrupts");
299 otx2_cpt_queues_detach(dev);
304 otx2_cpt_dev_start(struct rte_cryptodev *dev)
308 CPT_PMD_INIT_FUNC_TRACE();
314 otx2_cpt_dev_stop(struct rte_cryptodev *dev)
318 CPT_PMD_INIT_FUNC_TRACE();
322 otx2_cpt_dev_close(struct rte_cryptodev *dev)
324 struct otx2_cpt_vf *vf = dev->data->dev_private;
327 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
328 ret = otx2_cpt_queue_pair_release(dev, i);
333 /* Unregister error interrupts */
334 if (vf->err_intr_registered)
335 otx2_cpt_err_intr_unregister(dev);
339 ret = otx2_cpt_queues_detach(dev);
341 CPT_LOG_ERR("Could not detach CPT queues");
348 otx2_cpt_dev_info_get(struct rte_cryptodev *dev,
349 struct rte_cryptodev_info *info)
351 struct otx2_cpt_vf *vf = dev->data->dev_private;
354 info->max_nb_queue_pairs = vf->max_queues;
355 info->feature_flags = dev->feature_flags;
356 info->capabilities = NULL;
357 info->sym.max_nb_sessions = 0;
358 info->driver_id = otx2_cryptodev_driver_id;
359 info->min_mbuf_headroom_req = OTX2_CPT_MIN_HEADROOM_REQ;
360 info->min_mbuf_tailroom_req = OTX2_CPT_MIN_TAILROOM_REQ;
365 otx2_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
366 const struct rte_cryptodev_qp_conf *conf,
367 int socket_id __rte_unused)
369 uint8_t grp_mask = OTX2_CPT_ENG_GRPS_MASK;
370 struct rte_pci_device *pci_dev;
371 struct otx2_cpt_qp *qp;
373 CPT_PMD_INIT_FUNC_TRACE();
375 if (dev->data->queue_pairs[qp_id] != NULL)
376 otx2_cpt_queue_pair_release(dev, qp_id);
378 if (conf->nb_descriptors > OTX2_CPT_DEFAULT_CMD_QLEN) {
379 CPT_LOG_ERR("Could not setup queue pair for %u descriptors",
380 conf->nb_descriptors);
384 pci_dev = RTE_DEV_TO_PCI(dev->device);
386 if (pci_dev->mem_resource[2].addr == NULL) {
387 CPT_LOG_ERR("Invalid PCI mem address");
391 qp = otx2_cpt_qp_create(dev, qp_id, grp_mask);
393 CPT_LOG_ERR("Could not create queue pair %d", qp_id);
397 qp->sess_mp = conf->mp_session;
398 qp->sess_mp_priv = conf->mp_session_private;
399 dev->data->queue_pairs[qp_id] = qp;
405 otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
407 struct otx2_cpt_qp *qp = dev->data->queue_pairs[qp_id];
410 CPT_PMD_INIT_FUNC_TRACE();
415 CPT_LOG_INFO("Releasing queue pair %d", qp_id);
417 ret = otx2_cpt_qp_destroy(dev, qp);
419 CPT_LOG_ERR("Could not destroy queue pair %d", qp_id);
423 dev->data->queue_pairs[qp_id] = NULL;
428 struct rte_cryptodev_ops otx2_cpt_ops = {
429 /* Device control ops */
430 .dev_configure = otx2_cpt_dev_config,
431 .dev_start = otx2_cpt_dev_start,
432 .dev_stop = otx2_cpt_dev_stop,
433 .dev_close = otx2_cpt_dev_close,
434 .dev_infos_get = otx2_cpt_dev_info_get,
438 .queue_pair_setup = otx2_cpt_queue_pair_setup,
439 .queue_pair_release = otx2_cpt_queue_pair_release,
440 .queue_pair_count = NULL,
442 /* Symmetric crypto ops */
443 .sym_session_get_size = NULL,
444 .sym_session_configure = NULL,
445 .sym_session_clear = NULL,