1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_cryptodev.h>
6 #include <rte_eventdev.h>
7 #include <rte_security.h>
8 #include <rte_security_driver.h>
10 #include <cn10k_ethdev.h>
11 #include <cnxk_security.h>
13 static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
15 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
17 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
19 .algo = RTE_CRYPTO_AEAD_AES_GCM,
44 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
47 static const struct rte_security_capability cn10k_eth_sec_capabilities[] = {
48 { /* IPsec Inline Protocol ESP Tunnel Ingress */
49 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
50 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
52 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
53 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
54 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
57 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
58 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
60 { /* IPsec Inline Protocol ESP Tunnel Egress */
61 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
62 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
64 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
65 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
66 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
69 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
70 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
72 { /* IPsec Inline Protocol ESP Transport Egress */
73 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
74 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
76 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
77 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
78 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
81 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
82 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
84 { /* IPsec Inline Protocol ESP Transport Ingress */
85 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
86 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
88 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
89 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
90 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
93 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
94 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
97 .action = RTE_SECURITY_ACTION_TYPE_NONE
102 cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args)
104 struct rte_eth_event_ipsec_desc desc;
105 struct cn10k_sec_sess_priv sess_priv;
106 struct cn10k_outb_priv_data *priv;
107 struct roc_ot_ipsec_outb_sa *sa;
108 struct cpt_cn10k_res_s *res;
109 struct rte_eth_dev *eth_dev;
110 struct cnxk_eth_dev *dev;
111 uint16_t dlen_adj, rlen;
112 struct rte_mbuf *mbuf;
119 switch ((gw[0] >> 28) & 0xF) {
120 case RTE_EVENT_TYPE_ETHDEV:
121 /* Event from inbound inline dev due to IPSEC packet bad L4 */
122 mbuf = (struct rte_mbuf *)(gw[1] - sizeof(struct rte_mbuf));
123 plt_nix_dbg("Received mbuf %p from inline dev inbound", mbuf);
124 rte_pktmbuf_free(mbuf);
126 case RTE_EVENT_TYPE_CPU:
127 /* Check for subtype */
128 if (((gw[0] >> 20) & 0xFF) == CNXK_ETHDEV_SEC_OUTB_EV_SUB) {
129 /* Event from outbound inline error */
130 mbuf = (struct rte_mbuf *)gw[1];
135 plt_err("Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx",
140 /* Get ethdev port from tag */
142 eth_dev = &rte_eth_devices[port];
143 dev = cnxk_eth_pmd_priv(eth_dev);
145 sess_priv.u64 = *rte_security_dynfield(mbuf);
146 /* Calculate dlen adj */
147 dlen_adj = mbuf->pkt_len - mbuf->l2_len;
148 rlen = (dlen_adj + sess_priv.roundup_len) +
149 (sess_priv.roundup_byte - 1);
150 rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
151 rlen += sess_priv.partial_len;
152 dlen_adj = rlen - dlen_adj;
154 /* Find the res area residing on next cacheline after end of data */
155 nixtx = rte_pktmbuf_mtod(mbuf, uintptr_t) + mbuf->pkt_len + dlen_adj;
157 nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
158 res = (struct cpt_cn10k_res_s *)nixtx;
160 plt_nix_dbg("Outbound error, mbuf %p, sa_index %u, compcode %x uc %x",
161 mbuf, sess_priv.sa_idx, res->compcode, res->uc_compcode);
163 sess_priv.u64 = *rte_security_dynfield(mbuf);
165 sa_base = dev->outb.sa_base;
166 sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
167 priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
169 memset(&desc, 0, sizeof(desc));
171 switch (res->uc_compcode) {
172 case ROC_IE_OT_UCC_ERR_SA_OVERFLOW:
173 desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
176 plt_warn("Outbound error, mbuf %p, sa_index %u, "
177 "compcode %x uc %x", mbuf, sess_priv.sa_idx,
178 res->compcode, res->uc_compcode);
179 desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
183 desc.metadata = (uint64_t)priv->userdata;
184 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
185 rte_pktmbuf_free(mbuf);
189 cn10k_eth_sec_session_create(void *device,
190 struct rte_security_session_conf *conf,
191 struct rte_security_session *sess,
192 struct rte_mempool *mempool)
194 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
195 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
196 struct rte_security_ipsec_xform *ipsec;
197 struct cn10k_sec_sess_priv sess_priv;
198 struct rte_crypto_sym_xform *crypto;
199 struct cnxk_eth_sec_sess *eth_sec;
200 bool inbound, inl_dev;
203 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
206 if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
209 if (rte_security_dynfield_register() < 0)
212 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
213 roc_nix_inl_cb_register(cn10k_eth_sec_sso_work_cb, NULL);
215 ipsec = &conf->ipsec;
216 crypto = conf->crypto_xform;
217 inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
218 inl_dev = !!dev->inb.inl_dev;
220 /* Search if a session already exits */
221 if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
222 plt_err("%s SA with SPI %u already in use",
223 inbound ? "Inbound" : "Outbound", ipsec->spi);
227 if (rte_mempool_get(mempool, (void **)ð_sec)) {
228 plt_err("Could not allocate security session private data");
232 memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
235 /* Acquire lock on inline dev for inbound */
236 if (inbound && inl_dev)
237 roc_nix_inl_dev_lock();
240 struct cn10k_inb_priv_data *inb_priv;
241 struct roc_ot_ipsec_inb_sa *inb_sa;
244 PLT_STATIC_ASSERT(sizeof(struct cn10k_inb_priv_data) <
245 ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
247 /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE */
248 sa = roc_nix_inl_inb_sa_get(&dev->nix, inl_dev, ipsec->spi);
249 if (!sa && dev->inb.inl_dev) {
250 plt_err("Failed to create ingress sa, inline dev "
251 "not found or spi not in range");
255 plt_err("Failed to create ingress sa");
260 inb_sa = (struct roc_ot_ipsec_inb_sa *)sa;
262 /* Check if SA is already in use */
263 if (inb_sa->w2.s.valid) {
264 plt_err("Inbound SA with SPI %u already in use",
270 memset(inb_sa, 0, sizeof(struct roc_ot_ipsec_inb_sa));
272 /* Fill inbound sa params */
273 rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);
275 plt_err("Failed to init inbound sa, rc=%d", rc);
279 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
280 /* Back pointer to get eth_sec */
281 inb_priv->eth_sec = eth_sec;
282 /* Save userdata in inb private area */
283 inb_priv->userdata = conf->userdata;
285 /* Save SA index/SPI in cookie for now */
286 inb_sa->w1.s.cookie = rte_cpu_to_be_32(ipsec->spi);
288 /* Prepare session priv */
289 sess_priv.inb_sa = 1;
290 sess_priv.sa_idx = ipsec->spi;
292 /* Pointer from eth_sec -> inb_sa */
293 eth_sec->sa = inb_sa;
294 eth_sec->sess = sess;
295 eth_sec->sa_idx = ipsec->spi;
296 eth_sec->spi = ipsec->spi;
297 eth_sec->inl_dev = !!dev->inb.inl_dev;
300 TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
303 struct cn10k_outb_priv_data *outb_priv;
304 struct roc_ot_ipsec_outb_sa *outb_sa;
305 struct cnxk_ipsec_outb_rlens *rlens;
306 uint64_t sa_base = dev->outb.sa_base;
309 PLT_STATIC_ASSERT(sizeof(struct cn10k_outb_priv_data) <
310 ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
312 /* Alloc an sa index */
313 rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
317 outb_sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sa_idx);
318 outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa);
319 rlens = &outb_priv->rlens;
321 memset(outb_sa, 0, sizeof(struct roc_ot_ipsec_outb_sa));
323 /* Fill outbound sa params */
324 rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);
326 plt_err("Failed to init outbound sa, rc=%d", rc);
327 rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
332 outb_priv->userdata = conf->userdata;
333 outb_priv->sa_idx = sa_idx;
334 outb_priv->eth_sec = eth_sec;
337 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
339 /* Prepare session priv */
340 sess_priv.sa_idx = outb_priv->sa_idx;
341 sess_priv.roundup_byte = rlens->roundup_byte;
342 sess_priv.roundup_len = rlens->roundup_len;
343 sess_priv.partial_len = rlens->partial_len;
344 sess_priv.mode = outb_sa->w2.s.ipsec_mode;
345 sess_priv.outer_ip_ver = outb_sa->w2.s.outer_ip_ver;
347 /* Pointer from eth_sec -> outb_sa */
348 eth_sec->sa = outb_sa;
349 eth_sec->sess = sess;
350 eth_sec->sa_idx = sa_idx;
351 eth_sec->spi = ipsec->spi;
353 TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
357 /* Sync session in context cache */
358 roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,
359 ROC_NIX_INL_SA_OP_RELOAD);
361 if (inbound && inl_dev)
362 roc_nix_inl_dev_unlock();
364 plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u inl_dev=%u",
365 inbound ? "inbound" : "outbound", eth_sec->spi,
366 eth_sec->sa_idx, eth_sec->inl_dev);
368 * Update fast path info in priv area.
370 set_sec_session_private_data(sess, (void *)sess_priv.u64);
374 if (inbound && inl_dev)
375 roc_nix_inl_dev_unlock();
376 rte_mempool_put(mempool, eth_sec);
381 cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
383 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
384 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
385 struct roc_ot_ipsec_inb_sa *inb_sa;
386 struct roc_ot_ipsec_outb_sa *outb_sa;
387 struct cnxk_eth_sec_sess *eth_sec;
388 struct rte_mempool *mp;
390 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
394 if (eth_sec->inl_dev)
395 roc_nix_inl_dev_lock();
398 inb_sa = eth_sec->sa;
400 inb_sa->w2.s.valid = 0;
402 TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
405 outb_sa = eth_sec->sa;
407 outb_sa->w2.s.valid = 0;
409 /* Release Outbound SA index */
410 cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
411 TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
415 /* Sync session in context cache */
416 roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,
417 ROC_NIX_INL_SA_OP_RELOAD);
419 if (eth_sec->inl_dev)
420 roc_nix_inl_dev_unlock();
422 plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u, inl_dev=%u",
423 eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
424 eth_sec->sa_idx, eth_sec->inl_dev);
426 /* Put eth_sec object back to pool */
427 mp = rte_mempool_from_obj(eth_sec);
428 set_sec_session_private_data(sess, NULL);
429 rte_mempool_put(mp, eth_sec);
433 static const struct rte_security_capability *
434 cn10k_eth_sec_capabilities_get(void *device __rte_unused)
436 return cn10k_eth_sec_capabilities;
440 cn10k_eth_sec_ops_override(void)
442 static int init_once;
448 /* Update platform specific ops */
449 cnxk_eth_sec_ops.session_create = cn10k_eth_sec_session_create;
450 cnxk_eth_sec_ops.session_destroy = cn10k_eth_sec_session_destroy;
451 cnxk_eth_sec_ops.capabilities_get = cn10k_eth_sec_capabilities_get;