1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_cryptodev.h>
6 #include <rte_security.h>
7 #include <rte_security_driver.h>
9 #include <cn9k_ethdev.h>
10 #include <cnxk_security.h>
12 static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
14 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
16 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
18 .algo = RTE_CRYPTO_AEAD_AES_GCM,
44 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
46 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
48 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
64 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
66 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
68 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
83 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
86 static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
87 { /* IPsec Inline Protocol ESP Tunnel Ingress */
88 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
89 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
91 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
92 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
93 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
96 .crypto_capabilities = cn9k_eth_sec_crypto_caps,
97 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
99 { /* IPsec Inline Protocol ESP Tunnel Egress */
100 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
101 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
103 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
104 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
105 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
108 .crypto_capabilities = cn9k_eth_sec_crypto_caps,
109 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
112 .action = RTE_SECURITY_ACTION_TYPE_NONE
117 ar_window_init(struct cn9k_inb_priv_data *inb_priv)
119 if (inb_priv->replay_win_sz > CNXK_ON_AR_WIN_SIZE_MAX) {
120 plt_err("Replay window size:%u is not supported",
121 inb_priv->replay_win_sz);
125 rte_spinlock_init(&inb_priv->ar.lock);
127 * Set window bottom to 1, base and top to size of
130 inb_priv->ar.winb = 1;
131 inb_priv->ar.wint = inb_priv->replay_win_sz;
132 inb_priv->ar.base = inb_priv->replay_win_sz;
138 cn9k_eth_sec_session_create(void *device,
139 struct rte_security_session_conf *conf,
140 struct rte_security_session *sess,
141 struct rte_mempool *mempool)
143 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
144 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
145 struct rte_security_ipsec_xform *ipsec;
146 struct cn9k_sec_sess_priv sess_priv;
147 struct rte_crypto_sym_xform *crypto;
148 struct cnxk_eth_sec_sess *eth_sec;
149 rte_spinlock_t *lock;
150 char tbuf[128] = {0};
154 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
157 if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
160 if (rte_security_dynfield_register() < 0)
163 ipsec = &conf->ipsec;
164 crypto = conf->crypto_xform;
165 inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
167 /* Search if a session already exists */
168 if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
169 plt_err("%s SA with SPI %u already in use",
170 inbound ? "Inbound" : "Outbound", ipsec->spi);
174 if (rte_mempool_get(mempool, (void **)ð_sec)) {
175 plt_err("Could not allocate security session private data");
179 lock = inbound ? &dev->inb.lock : &dev->outb.lock;
180 rte_spinlock_lock(lock);
182 memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
186 struct cn9k_inb_priv_data *inb_priv;
187 struct roc_onf_ipsec_inb_sa *inb_sa;
189 PLT_STATIC_ASSERT(sizeof(struct cn9k_inb_priv_data) <
190 ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD);
192 /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE. Assume no inline
193 * device always for CN9K.
195 inb_sa = (struct roc_onf_ipsec_inb_sa *)
196 roc_nix_inl_inb_sa_get(&dev->nix, false, ipsec->spi);
198 snprintf(tbuf, sizeof(tbuf),
199 "Failed to create ingress sa");
204 /* Check if SA is already in use */
205 if (inb_sa->ctl.valid) {
206 snprintf(tbuf, sizeof(tbuf),
207 "Inbound SA with SPI %u already in use",
213 memset(inb_sa, 0, sizeof(struct roc_onf_ipsec_inb_sa));
215 /* Fill inbound sa params */
216 rc = cnxk_onf_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);
218 snprintf(tbuf, sizeof(tbuf),
219 "Failed to init inbound sa, rc=%d", rc);
223 inb_priv = roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(inb_sa);
224 /* Back pointer to get eth_sec */
225 inb_priv->eth_sec = eth_sec;
227 /* Save userdata in inb private area */
228 inb_priv->userdata = conf->userdata;
230 inb_priv->replay_win_sz = ipsec->replay_win_sz;
231 if (inb_priv->replay_win_sz) {
232 rc = ar_window_init(inb_priv);
237 /* Prepare session priv */
238 sess_priv.inb_sa = 1;
239 sess_priv.sa_idx = ipsec->spi;
241 /* Pointer from eth_sec -> inb_sa */
242 eth_sec->sa = inb_sa;
243 eth_sec->sess = sess;
244 eth_sec->sa_idx = ipsec->spi;
245 eth_sec->spi = ipsec->spi;
248 TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
251 struct cn9k_outb_priv_data *outb_priv;
252 struct roc_onf_ipsec_outb_sa *outb_sa;
253 uintptr_t sa_base = dev->outb.sa_base;
254 struct cnxk_ipsec_outb_rlens *rlens;
257 PLT_STATIC_ASSERT(sizeof(struct cn9k_outb_priv_data) <
258 ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD);
260 /* Alloc an sa index */
261 rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
265 outb_sa = roc_nix_inl_onf_ipsec_outb_sa(sa_base, sa_idx);
266 outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(outb_sa);
267 rlens = &outb_priv->rlens;
269 memset(outb_sa, 0, sizeof(struct roc_onf_ipsec_outb_sa));
271 /* Fill outbound sa params */
272 rc = cnxk_onf_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);
274 snprintf(tbuf, sizeof(tbuf),
275 "Failed to init outbound sa, rc=%d", rc);
276 rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
281 outb_priv->userdata = conf->userdata;
282 outb_priv->sa_idx = sa_idx;
283 outb_priv->eth_sec = eth_sec;
284 /* Start sequence number with 1 */
287 memcpy(&outb_priv->nonce, outb_sa->nonce, 4);
288 if (outb_sa->ctl.enc_type == ROC_IE_ON_SA_ENC_AES_GCM)
289 outb_priv->copy_salt = 1;
292 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
294 sess_priv.sa_idx = outb_priv->sa_idx;
295 sess_priv.roundup_byte = rlens->roundup_byte;
296 sess_priv.roundup_len = rlens->roundup_len;
297 sess_priv.partial_len = rlens->partial_len;
299 /* Pointer from eth_sec -> outb_sa */
300 eth_sec->sa = outb_sa;
301 eth_sec->sess = sess;
302 eth_sec->sa_idx = sa_idx;
303 eth_sec->spi = ipsec->spi;
305 TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
309 /* Sync SA content */
310 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
312 rte_spinlock_unlock(lock);
314 plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u",
315 inbound ? "inbound" : "outbound", eth_sec->spi,
318 * Update fast path info in priv area.
320 set_sec_session_private_data(sess, (void *)sess_priv.u64);
324 rte_spinlock_unlock(lock);
325 rte_mempool_put(mempool, eth_sec);
332 cn9k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
334 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
335 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
336 struct roc_onf_ipsec_outb_sa *outb_sa;
337 struct roc_onf_ipsec_inb_sa *inb_sa;
338 struct cnxk_eth_sec_sess *eth_sec;
339 struct rte_mempool *mp;
340 rte_spinlock_t *lock;
342 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
346 lock = eth_sec->inb ? &dev->inb.lock : &dev->outb.lock;
347 rte_spinlock_lock(lock);
350 inb_sa = eth_sec->sa;
352 inb_sa->ctl.valid = 0;
354 TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
357 outb_sa = eth_sec->sa;
359 outb_sa->ctl.valid = 0;
361 /* Release Outbound SA index */
362 cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
363 TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
367 /* Sync SA content */
368 plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
370 rte_spinlock_unlock(lock);
372 plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u",
373 eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
376 /* Put eth_sec object back to pool */
377 mp = rte_mempool_from_obj(eth_sec);
378 set_sec_session_private_data(sess, NULL);
379 rte_mempool_put(mp, eth_sec);
383 static const struct rte_security_capability *
384 cn9k_eth_sec_capabilities_get(void *device __rte_unused)
386 return cn9k_eth_sec_capabilities;
390 cn9k_eth_sec_ops_override(void)
392 static int init_once;
398 /* Update platform specific ops */
399 cnxk_eth_sec_ops.session_create = cn9k_eth_sec_session_create;
400 cnxk_eth_sec_ops.session_destroy = cn9k_eth_sec_session_destroy;
401 cnxk_eth_sec_ops.capabilities_get = cn9k_eth_sec_capabilities_get;