1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_cryptodev.h>
6 #include <rte_security.h>
7 #include <rte_security_driver.h>
9 #include <cn9k_ethdev.h>
10 #include <cnxk_security.h>
12 static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
14 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
16 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
18 .algo = RTE_CRYPTO_AEAD_AES_GCM,
43 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
46 static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
47 { /* IPsec Inline Protocol ESP Tunnel Ingress */
48 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
49 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
51 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
52 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
53 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
56 .crypto_capabilities = cn9k_eth_sec_crypto_caps,
57 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
59 { /* IPsec Inline Protocol ESP Tunnel Egress */
60 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
61 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
63 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
64 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
65 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
68 .crypto_capabilities = cn9k_eth_sec_crypto_caps,
69 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
72 .action = RTE_SECURITY_ACTION_TYPE_NONE
77 cn9k_eth_sec_session_create(void *device,
78 struct rte_security_session_conf *conf,
79 struct rte_security_session *sess,
80 struct rte_mempool *mempool)
82 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
83 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
84 struct rte_security_ipsec_xform *ipsec;
85 struct cn9k_sec_sess_priv sess_priv;
86 struct rte_crypto_sym_xform *crypto;
87 struct cnxk_eth_sec_sess *eth_sec;
91 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
94 if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
97 if (rte_security_dynfield_register() < 0)
100 ipsec = &conf->ipsec;
101 crypto = conf->crypto_xform;
102 inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
104 /* Search if a session already exists */
105 if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
106 plt_err("%s SA with SPI %u already in use",
107 inbound ? "Inbound" : "Outbound", ipsec->spi);
111 if (rte_mempool_get(mempool, (void **)ð_sec)) {
112 plt_err("Could not allocate security session private data");
116 memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
120 struct cn9k_inb_priv_data *inb_priv;
121 struct roc_onf_ipsec_inb_sa *inb_sa;
123 PLT_STATIC_ASSERT(sizeof(struct cn9k_inb_priv_data) <
124 ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD);
126 /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE. Assume no inline
127 * device always for CN9K.
129 inb_sa = (struct roc_onf_ipsec_inb_sa *)
130 roc_nix_inl_inb_sa_get(&dev->nix, false, ipsec->spi);
132 plt_err("Failed to create ingress sa");
137 /* Check if SA is already in use */
138 if (inb_sa->ctl.valid) {
139 plt_err("Inbound SA with SPI %u already in use",
145 memset(inb_sa, 0, sizeof(struct roc_onf_ipsec_inb_sa));
147 /* Fill inbound sa params */
148 rc = cnxk_onf_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);
150 plt_err("Failed to init inbound sa, rc=%d", rc);
154 inb_priv = roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(inb_sa);
155 /* Back pointer to get eth_sec */
156 inb_priv->eth_sec = eth_sec;
158 /* Save userdata in inb private area */
159 inb_priv->userdata = conf->userdata;
161 sess_priv.inb_sa = 1;
162 sess_priv.sa_idx = ipsec->spi;
164 /* Pointer from eth_sec -> inb_sa */
165 eth_sec->sa = inb_sa;
166 eth_sec->sess = sess;
167 eth_sec->sa_idx = ipsec->spi;
168 eth_sec->spi = ipsec->spi;
171 TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
174 struct cn9k_outb_priv_data *outb_priv;
175 struct roc_onf_ipsec_outb_sa *outb_sa;
176 uintptr_t sa_base = dev->outb.sa_base;
177 struct cnxk_ipsec_outb_rlens *rlens;
180 PLT_STATIC_ASSERT(sizeof(struct cn9k_outb_priv_data) <
181 ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD);
183 /* Alloc an sa index */
184 rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
188 outb_sa = roc_nix_inl_onf_ipsec_outb_sa(sa_base, sa_idx);
189 outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(outb_sa);
190 rlens = &outb_priv->rlens;
192 memset(outb_sa, 0, sizeof(struct roc_onf_ipsec_outb_sa));
194 /* Fill outbound sa params */
195 rc = cnxk_onf_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);
197 plt_err("Failed to init outbound sa, rc=%d", rc);
198 rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
203 outb_priv->userdata = conf->userdata;
204 outb_priv->sa_idx = sa_idx;
205 outb_priv->eth_sec = eth_sec;
206 /* Start sequence number with 1 */
209 memcpy(&outb_priv->nonce, outb_sa->nonce, 4);
210 if (outb_sa->ctl.enc_type == ROC_IE_ON_SA_ENC_AES_GCM)
211 outb_priv->copy_salt = 1;
214 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
216 sess_priv.sa_idx = outb_priv->sa_idx;
217 sess_priv.roundup_byte = rlens->roundup_byte;
218 sess_priv.roundup_len = rlens->roundup_len;
219 sess_priv.partial_len = rlens->partial_len;
221 /* Pointer from eth_sec -> outb_sa */
222 eth_sec->sa = outb_sa;
223 eth_sec->sess = sess;
224 eth_sec->sa_idx = sa_idx;
225 eth_sec->spi = ipsec->spi;
227 TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
231 /* Sync SA content */
232 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
234 plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u",
235 inbound ? "inbound" : "outbound", eth_sec->spi,
238 * Update fast path info in priv area.
240 set_sec_session_private_data(sess, (void *)sess_priv.u64);
244 rte_mempool_put(mempool, eth_sec);
249 cn9k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
251 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
252 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
253 struct roc_onf_ipsec_outb_sa *outb_sa;
254 struct roc_onf_ipsec_inb_sa *inb_sa;
255 struct cnxk_eth_sec_sess *eth_sec;
256 struct rte_mempool *mp;
258 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
263 inb_sa = eth_sec->sa;
265 inb_sa->ctl.valid = 0;
267 TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
270 outb_sa = eth_sec->sa;
272 outb_sa->ctl.valid = 0;
274 /* Release Outbound SA index */
275 cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
276 TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
280 /* Sync SA content */
281 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
283 plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u",
284 eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
287 /* Put eth_sec object back to pool */
288 mp = rte_mempool_from_obj(eth_sec);
289 set_sec_session_private_data(sess, NULL);
290 rte_mempool_put(mp, eth_sec);
294 static const struct rte_security_capability *
295 cn9k_eth_sec_capabilities_get(void *device __rte_unused)
297 return cn9k_eth_sec_capabilities;
301 cn9k_eth_sec_ops_override(void)
303 static int init_once;
309 /* Update platform specific ops */
310 cnxk_eth_sec_ops.session_create = cn9k_eth_sec_session_create;
311 cnxk_eth_sec_ops.session_destroy = cn9k_eth_sec_session_destroy;
312 cnxk_eth_sec_ops.capabilities_get = cn9k_eth_sec_capabilities_get;