1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_cryptodev.h>
6 #include <rte_eventdev.h>
7 #include <rte_security.h>
8 #include <rte_security_driver.h>
10 #include <cn10k_ethdev.h>
11 #include <cnxk_security.h>
13 static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
15 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
17 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
19 .algo = RTE_CRYPTO_AEAD_AES_GCM,
45 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
47 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
49 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
65 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
67 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
69 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
84 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
87 static const struct rte_security_capability cn10k_eth_sec_capabilities[] = {
88 { /* IPsec Inline Protocol ESP Tunnel Ingress */
89 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
90 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
92 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
93 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
94 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
97 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
98 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
100 { /* IPsec Inline Protocol ESP Tunnel Egress */
101 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
102 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
104 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
105 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
106 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
109 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
110 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
112 { /* IPsec Inline Protocol ESP Transport Egress */
113 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
114 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
116 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
117 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
118 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
121 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
122 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
124 { /* IPsec Inline Protocol ESP Transport Ingress */
125 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
126 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
128 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
129 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
130 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
133 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
134 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
137 .action = RTE_SECURITY_ACTION_TYPE_NONE
142 cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args)
144 struct rte_eth_event_ipsec_desc desc;
145 struct cn10k_sec_sess_priv sess_priv;
146 struct cn10k_outb_priv_data *priv;
147 struct roc_ot_ipsec_outb_sa *sa;
148 struct cpt_cn10k_res_s *res;
149 struct rte_eth_dev *eth_dev;
150 struct cnxk_eth_dev *dev;
151 uint16_t dlen_adj, rlen;
152 struct rte_mbuf *mbuf;
159 switch ((gw[0] >> 28) & 0xF) {
160 case RTE_EVENT_TYPE_ETHDEV:
161 /* Event from inbound inline dev due to IPSEC packet bad L4 */
162 mbuf = (struct rte_mbuf *)(gw[1] - sizeof(struct rte_mbuf));
163 plt_nix_dbg("Received mbuf %p from inline dev inbound", mbuf);
164 rte_pktmbuf_free(mbuf);
166 case RTE_EVENT_TYPE_CPU:
167 /* Check for subtype */
168 if (((gw[0] >> 20) & 0xFF) == CNXK_ETHDEV_SEC_OUTB_EV_SUB) {
169 /* Event from outbound inline error */
170 mbuf = (struct rte_mbuf *)gw[1];
175 plt_err("Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx",
180 /* Get ethdev port from tag */
182 eth_dev = &rte_eth_devices[port];
183 dev = cnxk_eth_pmd_priv(eth_dev);
185 sess_priv.u64 = *rte_security_dynfield(mbuf);
186 /* Calculate dlen adj */
187 dlen_adj = mbuf->pkt_len - mbuf->l2_len;
188 rlen = (dlen_adj + sess_priv.roundup_len) +
189 (sess_priv.roundup_byte - 1);
190 rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
191 rlen += sess_priv.partial_len;
192 dlen_adj = rlen - dlen_adj;
194 /* Find the res area residing on next cacheline after end of data */
195 nixtx = rte_pktmbuf_mtod(mbuf, uintptr_t) + mbuf->pkt_len + dlen_adj;
197 nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
198 res = (struct cpt_cn10k_res_s *)nixtx;
200 plt_nix_dbg("Outbound error, mbuf %p, sa_index %u, compcode %x uc %x",
201 mbuf, sess_priv.sa_idx, res->compcode, res->uc_compcode);
203 sess_priv.u64 = *rte_security_dynfield(mbuf);
205 sa_base = dev->outb.sa_base;
206 sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
207 priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
209 memset(&desc, 0, sizeof(desc));
211 switch (res->uc_compcode) {
212 case ROC_IE_OT_UCC_ERR_SA_OVERFLOW:
213 desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
216 plt_warn("Outbound error, mbuf %p, sa_index %u, "
217 "compcode %x uc %x", mbuf, sess_priv.sa_idx,
218 res->compcode, res->uc_compcode);
219 desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
223 desc.metadata = (uint64_t)priv->userdata;
224 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
225 rte_pktmbuf_free(mbuf);
229 cn10k_eth_sec_session_create(void *device,
230 struct rte_security_session_conf *conf,
231 struct rte_security_session *sess,
232 struct rte_mempool *mempool)
234 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
235 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
236 struct rte_security_ipsec_xform *ipsec;
237 struct cn10k_sec_sess_priv sess_priv;
238 struct rte_crypto_sym_xform *crypto;
239 struct cnxk_eth_sec_sess *eth_sec;
240 bool inbound, inl_dev;
243 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
246 if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
249 if (rte_security_dynfield_register() < 0)
252 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
253 roc_nix_inl_cb_register(cn10k_eth_sec_sso_work_cb, NULL);
255 ipsec = &conf->ipsec;
256 crypto = conf->crypto_xform;
257 inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
258 inl_dev = !!dev->inb.inl_dev;
260 /* Search if a session already exits */
261 if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
262 plt_err("%s SA with SPI %u already in use",
263 inbound ? "Inbound" : "Outbound", ipsec->spi);
267 if (rte_mempool_get(mempool, (void **)ð_sec)) {
268 plt_err("Could not allocate security session private data");
272 memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
275 /* Acquire lock on inline dev for inbound */
276 if (inbound && inl_dev)
277 roc_nix_inl_dev_lock();
280 struct cn10k_inb_priv_data *inb_priv;
281 struct roc_ot_ipsec_inb_sa *inb_sa;
284 PLT_STATIC_ASSERT(sizeof(struct cn10k_inb_priv_data) <
285 ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
287 /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE */
288 sa = roc_nix_inl_inb_sa_get(&dev->nix, inl_dev, ipsec->spi);
289 if (!sa && dev->inb.inl_dev) {
290 plt_err("Failed to create ingress sa, inline dev "
291 "not found or spi not in range");
295 plt_err("Failed to create ingress sa");
300 inb_sa = (struct roc_ot_ipsec_inb_sa *)sa;
302 /* Check if SA is already in use */
303 if (inb_sa->w2.s.valid) {
304 plt_err("Inbound SA with SPI %u already in use",
310 memset(inb_sa, 0, sizeof(struct roc_ot_ipsec_inb_sa));
312 /* Fill inbound sa params */
313 rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);
315 plt_err("Failed to init inbound sa, rc=%d", rc);
319 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
320 /* Back pointer to get eth_sec */
321 inb_priv->eth_sec = eth_sec;
322 /* Save userdata in inb private area */
323 inb_priv->userdata = conf->userdata;
325 /* Save SA index/SPI in cookie for now */
326 inb_sa->w1.s.cookie = rte_cpu_to_be_32(ipsec->spi);
328 /* Prepare session priv */
329 sess_priv.inb_sa = 1;
330 sess_priv.sa_idx = ipsec->spi;
332 /* Pointer from eth_sec -> inb_sa */
333 eth_sec->sa = inb_sa;
334 eth_sec->sess = sess;
335 eth_sec->sa_idx = ipsec->spi;
336 eth_sec->spi = ipsec->spi;
337 eth_sec->inl_dev = !!dev->inb.inl_dev;
340 TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
343 struct cn10k_outb_priv_data *outb_priv;
344 struct roc_ot_ipsec_outb_sa *outb_sa;
345 struct cnxk_ipsec_outb_rlens *rlens;
346 uint64_t sa_base = dev->outb.sa_base;
349 PLT_STATIC_ASSERT(sizeof(struct cn10k_outb_priv_data) <
350 ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
352 /* Alloc an sa index */
353 rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
357 outb_sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sa_idx);
358 outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa);
359 rlens = &outb_priv->rlens;
361 memset(outb_sa, 0, sizeof(struct roc_ot_ipsec_outb_sa));
363 /* Fill outbound sa params */
364 rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);
366 plt_err("Failed to init outbound sa, rc=%d", rc);
367 rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
372 outb_priv->userdata = conf->userdata;
373 outb_priv->sa_idx = sa_idx;
374 outb_priv->eth_sec = eth_sec;
377 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
379 /* Prepare session priv */
380 sess_priv.sa_idx = outb_priv->sa_idx;
381 sess_priv.roundup_byte = rlens->roundup_byte;
382 sess_priv.roundup_len = rlens->roundup_len;
383 sess_priv.partial_len = rlens->partial_len;
384 sess_priv.mode = outb_sa->w2.s.ipsec_mode;
385 sess_priv.outer_ip_ver = outb_sa->w2.s.outer_ip_ver;
387 /* Pointer from eth_sec -> outb_sa */
388 eth_sec->sa = outb_sa;
389 eth_sec->sess = sess;
390 eth_sec->sa_idx = sa_idx;
391 eth_sec->spi = ipsec->spi;
393 TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
397 /* Sync session in context cache */
398 roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,
399 ROC_NIX_INL_SA_OP_RELOAD);
401 if (inbound && inl_dev)
402 roc_nix_inl_dev_unlock();
404 plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u inl_dev=%u",
405 inbound ? "inbound" : "outbound", eth_sec->spi,
406 eth_sec->sa_idx, eth_sec->inl_dev);
408 * Update fast path info in priv area.
410 set_sec_session_private_data(sess, (void *)sess_priv.u64);
414 if (inbound && inl_dev)
415 roc_nix_inl_dev_unlock();
416 rte_mempool_put(mempool, eth_sec);
421 cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
423 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
424 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
425 struct roc_ot_ipsec_inb_sa *inb_sa;
426 struct roc_ot_ipsec_outb_sa *outb_sa;
427 struct cnxk_eth_sec_sess *eth_sec;
428 struct rte_mempool *mp;
430 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
434 if (eth_sec->inl_dev)
435 roc_nix_inl_dev_lock();
438 inb_sa = eth_sec->sa;
440 inb_sa->w2.s.valid = 0;
442 TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
445 outb_sa = eth_sec->sa;
447 outb_sa->w2.s.valid = 0;
449 /* Release Outbound SA index */
450 cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
451 TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
455 /* Sync session in context cache */
456 roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,
457 ROC_NIX_INL_SA_OP_RELOAD);
459 if (eth_sec->inl_dev)
460 roc_nix_inl_dev_unlock();
462 plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u, inl_dev=%u",
463 eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
464 eth_sec->sa_idx, eth_sec->inl_dev);
466 /* Put eth_sec object back to pool */
467 mp = rte_mempool_from_obj(eth_sec);
468 set_sec_session_private_data(sess, NULL);
469 rte_mempool_put(mp, eth_sec);
473 static const struct rte_security_capability *
474 cn10k_eth_sec_capabilities_get(void *device __rte_unused)
476 return cn10k_eth_sec_capabilities;
480 cn10k_eth_sec_ops_override(void)
482 static int init_once;
488 /* Update platform specific ops */
489 cnxk_eth_sec_ops.session_create = cn10k_eth_sec_session_create;
490 cnxk_eth_sec_ops.session_destroy = cn10k_eth_sec_session_destroy;
491 cnxk_eth_sec_ops.capabilities_get = cn10k_eth_sec_capabilities_get;