1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2019 Marvell International Ltd.
4 #include <cryptodev_pmd.h>
5 #include <rte_ethdev.h>
7 #include "otx2_cryptodev.h"
8 #include "otx2_cryptodev_hw_access.h"
9 #include "otx2_cryptodev_mbox.h"
11 #include "otx2_ethdev.h"
12 #include "otx2_sec_idev.h"
13 #include "otx2_mbox.h"
15 #include "cpt_pmd_logs.h"
18 otx2_cpt_hardware_caps_get(const struct rte_cryptodev *dev,
19 union cpt_eng_caps *hw_caps)
21 struct otx2_cpt_vf *vf = dev->data->dev_private;
22 struct otx2_dev *otx2_dev = &vf->otx2_dev;
23 struct cpt_caps_rsp_msg *rsp;
26 otx2_mbox_alloc_msg_cpt_caps_get(otx2_dev->mbox);
28 ret = otx2_mbox_process_msg(otx2_dev->mbox, (void *)&rsp);
32 if (rsp->cpt_pf_drv_version != OTX2_CPT_PMD_VERSION) {
33 otx2_err("Incompatible CPT PMD version"
34 "(Kernel: 0x%04x DPDK: 0x%04x)",
35 rsp->cpt_pf_drv_version, OTX2_CPT_PMD_VERSION);
39 vf->cpt_revision = rsp->cpt_revision;
40 otx2_mbox_memcpy(hw_caps, rsp->eng_caps,
41 sizeof(union cpt_eng_caps) * CPT_MAX_ENG_TYPES);
47 otx2_cpt_available_queues_get(const struct rte_cryptodev *dev,
50 struct otx2_cpt_vf *vf = dev->data->dev_private;
51 struct otx2_dev *otx2_dev = &vf->otx2_dev;
52 struct free_rsrcs_rsp *rsp;
55 otx2_mbox_alloc_msg_free_rsrc_cnt(otx2_dev->mbox);
57 ret = otx2_mbox_process_msg(otx2_dev->mbox, (void *)&rsp);
61 *nb_queues = rsp->cpt + rsp->cpt1;
66 otx2_cpt_queues_attach(const struct rte_cryptodev *dev, uint8_t nb_queues)
68 struct otx2_cpt_vf *vf = dev->data->dev_private;
69 struct otx2_mbox *mbox = vf->otx2_dev.mbox;
70 int blkaddr[OTX2_CPT_MAX_BLKS];
71 struct rsrc_attach_req *req;
75 blkaddr[0] = RVU_BLOCK_ADDR_CPT0;
76 blkaddr[1] = RVU_BLOCK_ADDR_CPT1;
78 /* Ask AF to attach required LFs */
80 req = otx2_mbox_alloc_msg_attach_resources(mbox);
82 if ((vf->cpt_revision == OTX2_CPT_REVISION_ID_3) &&
83 (vf->otx2_dev.pf_func & 0x1))
84 blknum = (blknum + 1) % OTX2_CPT_MAX_BLKS;
87 req->cptlfs = nb_queues;
88 req->cpt_blkaddr = blkaddr[blknum];
90 ret = otx2_mbox_process(mbox);
92 if (vf->cpt_revision == OTX2_CPT_REVISION_ID_3) {
93 blknum = (blknum + 1) % OTX2_CPT_MAX_BLKS;
94 req->cpt_blkaddr = blkaddr[blknum];
95 if (otx2_mbox_process(mbox) < 0)
100 } else if (ret < 0) {
104 /* Update number of attached queues */
105 vf->nb_queues = nb_queues;
106 for (i = 0; i < nb_queues; i++)
107 vf->lf_blkaddr[i] = req->cpt_blkaddr;
113 otx2_cpt_queues_detach(const struct rte_cryptodev *dev)
115 struct otx2_cpt_vf *vf = dev->data->dev_private;
116 struct otx2_mbox *mbox = vf->otx2_dev.mbox;
117 struct rsrc_detach_req *req;
119 req = otx2_mbox_alloc_msg_detach_resources(mbox);
122 if (otx2_mbox_process(mbox) < 0)
125 /* Queues have been detached */
132 otx2_cpt_msix_offsets_get(const struct rte_cryptodev *dev)
134 struct otx2_cpt_vf *vf = dev->data->dev_private;
135 struct otx2_mbox *mbox = vf->otx2_dev.mbox;
136 struct msix_offset_rsp *rsp;
139 /* Get CPT MSI-X vector offsets */
141 otx2_mbox_alloc_msg_msix_offset(mbox);
143 ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
147 for (i = 0; i < vf->nb_queues; i++)
148 vf->lf_msixoff[i] = (vf->lf_blkaddr[i] == RVU_BLOCK_ADDR_CPT1) ?
149 rsp->cpt1_lf_msixoff[i] : rsp->cptlf_msixoff[i];
155 otx2_cpt_send_mbox_msg(struct otx2_cpt_vf *vf)
157 struct otx2_mbox *mbox = vf->otx2_dev.mbox;
160 otx2_mbox_msg_send(mbox, 0);
162 ret = otx2_mbox_wait_for_rsp(mbox, 0);
164 CPT_LOG_ERR("Could not get mailbox response");
172 otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
173 uint8_t blkaddr, uint64_t *val)
175 struct otx2_cpt_vf *vf = dev->data->dev_private;
176 struct otx2_mbox *mbox = vf->otx2_dev.mbox;
177 struct otx2_mbox_dev *mdev = &mbox->dev[0];
178 struct cpt_rd_wr_reg_msg *msg;
181 msg = (struct cpt_rd_wr_reg_msg *)
182 otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*msg),
185 CPT_LOG_ERR("Could not allocate mailbox message");
189 msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
190 msg->hdr.sig = OTX2_MBOX_REQ_SIG;
191 msg->hdr.pcifunc = vf->otx2_dev.pf_func;
193 msg->reg_offset = reg;
195 msg->blkaddr = blkaddr;
197 ret = otx2_cpt_send_mbox_msg(vf);
201 off = mbox->rx_start +
202 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
203 msg = (struct cpt_rd_wr_reg_msg *) ((uintptr_t)mdev->mbase + off);
211 otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
212 uint8_t blkaddr, uint64_t val)
214 struct otx2_cpt_vf *vf = dev->data->dev_private;
215 struct otx2_mbox *mbox = vf->otx2_dev.mbox;
216 struct cpt_rd_wr_reg_msg *msg;
218 msg = (struct cpt_rd_wr_reg_msg *)
219 otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*msg),
222 CPT_LOG_ERR("Could not allocate mailbox message");
226 msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
227 msg->hdr.sig = OTX2_MBOX_REQ_SIG;
228 msg->hdr.pcifunc = vf->otx2_dev.pf_func;
230 msg->reg_offset = reg;
232 msg->blkaddr = blkaddr;
234 return otx2_cpt_send_mbox_msg(vf);
238 otx2_cpt_inline_init(const struct rte_cryptodev *dev)
240 struct otx2_cpt_vf *vf = dev->data->dev_private;
241 struct otx2_mbox *mbox = vf->otx2_dev.mbox;
242 struct cpt_rx_inline_lf_cfg_msg *msg;
245 msg = otx2_mbox_alloc_msg_cpt_rx_inline_lf_cfg(mbox);
246 msg->sso_pf_func = otx2_sso_pf_func_get();
248 otx2_mbox_msg_send(mbox, 0);
249 ret = otx2_mbox_process(mbox);
257 otx2_cpt_qp_ethdev_bind(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp,
260 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
261 struct otx2_cpt_vf *vf = dev->data->dev_private;
262 struct otx2_mbox *mbox = vf->otx2_dev.mbox;
263 struct cpt_inline_ipsec_cfg_msg *msg;
264 struct otx2_eth_dev *otx2_eth_dev;
267 if (!otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id]))
270 otx2_eth_dev = otx2_eth_pmd_priv(eth_dev);
272 msg = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(mbox);
273 msg->dir = CPT_INLINE_OUTBOUND;
277 msg->nix_pf_func = otx2_eth_dev->pf_func;
279 otx2_mbox_msg_send(mbox, 0);
280 ret = otx2_mbox_process(mbox);