1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_cryptodev.h>
6 #include <rte_eventdev.h>
7 #include <rte_security.h>
8 #include <rte_security_driver.h>
10 #include <cn10k_ethdev.h>
11 #include <cnxk_security.h>
13 static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
15 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
17 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
19 .algo = RTE_CRYPTO_AEAD_AES_GCM,
44 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
47 static const struct rte_security_capability cn10k_eth_sec_capabilities[] = {
48 { /* IPsec Inline Protocol ESP Tunnel Ingress */
49 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
50 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
52 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
53 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
54 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
57 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
58 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
60 { /* IPsec Inline Protocol ESP Tunnel Egress */
61 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
62 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
64 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
65 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
66 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
69 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
70 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
73 .action = RTE_SECURITY_ACTION_TYPE_NONE
78 cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args)
80 struct rte_eth_event_ipsec_desc desc;
81 struct cn10k_sec_sess_priv sess_priv;
82 struct cn10k_outb_priv_data *priv;
83 struct roc_ot_ipsec_outb_sa *sa;
84 struct cpt_cn10k_res_s *res;
85 struct rte_eth_dev *eth_dev;
86 struct cnxk_eth_dev *dev;
87 uint16_t dlen_adj, rlen;
88 struct rte_mbuf *mbuf;
95 switch ((gw[0] >> 28) & 0xF) {
96 case RTE_EVENT_TYPE_ETHDEV:
97 /* Event from inbound inline dev due to IPSEC packet bad L4 */
98 mbuf = (struct rte_mbuf *)(gw[1] - sizeof(struct rte_mbuf));
99 plt_nix_dbg("Received mbuf %p from inline dev inbound", mbuf);
100 rte_pktmbuf_free(mbuf);
102 case RTE_EVENT_TYPE_CPU:
103 /* Check for subtype */
104 if (((gw[0] >> 20) & 0xFF) == CNXK_ETHDEV_SEC_OUTB_EV_SUB) {
105 /* Event from outbound inline error */
106 mbuf = (struct rte_mbuf *)gw[1];
111 plt_err("Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx",
116 /* Get ethdev port from tag */
118 eth_dev = &rte_eth_devices[port];
119 dev = cnxk_eth_pmd_priv(eth_dev);
121 sess_priv.u64 = *rte_security_dynfield(mbuf);
122 /* Calculate dlen adj */
123 dlen_adj = mbuf->pkt_len - mbuf->l2_len;
124 rlen = (dlen_adj + sess_priv.roundup_len) +
125 (sess_priv.roundup_byte - 1);
126 rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
127 rlen += sess_priv.partial_len;
128 dlen_adj = rlen - dlen_adj;
130 /* Find the res area residing on next cacheline after end of data */
131 nixtx = rte_pktmbuf_mtod(mbuf, uintptr_t) + mbuf->pkt_len + dlen_adj;
133 nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
134 res = (struct cpt_cn10k_res_s *)nixtx;
136 plt_nix_dbg("Outbound error, mbuf %p, sa_index %u, compcode %x uc %x",
137 mbuf, sess_priv.sa_idx, res->compcode, res->uc_compcode);
139 sess_priv.u64 = *rte_security_dynfield(mbuf);
141 sa_base = dev->outb.sa_base;
142 sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
143 priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
145 memset(&desc, 0, sizeof(desc));
147 switch (res->uc_compcode) {
148 case ROC_IE_OT_UCC_ERR_SA_OVERFLOW:
149 desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
152 plt_warn("Outbound error, mbuf %p, sa_index %u, "
153 "compcode %x uc %x", mbuf, sess_priv.sa_idx,
154 res->compcode, res->uc_compcode);
155 desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
159 desc.metadata = (uint64_t)priv->userdata;
160 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
161 rte_pktmbuf_free(mbuf);
165 cn10k_eth_sec_session_create(void *device,
166 struct rte_security_session_conf *conf,
167 struct rte_security_session *sess,
168 struct rte_mempool *mempool)
170 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
171 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
172 struct rte_security_ipsec_xform *ipsec;
173 struct cn10k_sec_sess_priv sess_priv;
174 struct rte_crypto_sym_xform *crypto;
175 struct cnxk_eth_sec_sess *eth_sec;
176 bool inbound, inl_dev;
179 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
182 if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
185 if (rte_security_dynfield_register() < 0)
188 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
189 roc_nix_inl_cb_register(cn10k_eth_sec_sso_work_cb, NULL);
191 ipsec = &conf->ipsec;
192 crypto = conf->crypto_xform;
193 inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
194 inl_dev = !!dev->inb.inl_dev;
196 /* Search if a session already exits */
197 if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
198 plt_err("%s SA with SPI %u already in use",
199 inbound ? "Inbound" : "Outbound", ipsec->spi);
203 if (rte_mempool_get(mempool, (void **)ð_sec)) {
204 plt_err("Could not allocate security session private data");
208 memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
211 /* Acquire lock on inline dev for inbound */
212 if (inbound && inl_dev)
213 roc_nix_inl_dev_lock();
216 struct cn10k_inb_priv_data *inb_priv;
217 struct roc_ot_ipsec_inb_sa *inb_sa;
220 PLT_STATIC_ASSERT(sizeof(struct cn10k_inb_priv_data) <
221 ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
223 /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE */
224 sa = roc_nix_inl_inb_sa_get(&dev->nix, inl_dev, ipsec->spi);
225 if (!sa && dev->inb.inl_dev) {
226 plt_err("Failed to create ingress sa, inline dev "
227 "not found or spi not in range");
231 plt_err("Failed to create ingress sa");
236 inb_sa = (struct roc_ot_ipsec_inb_sa *)sa;
238 /* Check if SA is already in use */
239 if (inb_sa->w2.s.valid) {
240 plt_err("Inbound SA with SPI %u already in use",
246 memset(inb_sa, 0, sizeof(struct roc_ot_ipsec_inb_sa));
248 /* Fill inbound sa params */
249 rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);
251 plt_err("Failed to init inbound sa, rc=%d", rc);
255 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
256 /* Back pointer to get eth_sec */
257 inb_priv->eth_sec = eth_sec;
258 /* Save userdata in inb private area */
259 inb_priv->userdata = conf->userdata;
261 /* Save SA index/SPI in cookie for now */
262 inb_sa->w1.s.cookie = rte_cpu_to_be_32(ipsec->spi);
264 /* Prepare session priv */
265 sess_priv.inb_sa = 1;
266 sess_priv.sa_idx = ipsec->spi;
268 /* Pointer from eth_sec -> inb_sa */
269 eth_sec->sa = inb_sa;
270 eth_sec->sess = sess;
271 eth_sec->sa_idx = ipsec->spi;
272 eth_sec->spi = ipsec->spi;
273 eth_sec->inl_dev = !!dev->inb.inl_dev;
276 TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
279 struct cn10k_outb_priv_data *outb_priv;
280 struct roc_ot_ipsec_outb_sa *outb_sa;
281 struct cnxk_ipsec_outb_rlens *rlens;
282 uint64_t sa_base = dev->outb.sa_base;
285 PLT_STATIC_ASSERT(sizeof(struct cn10k_outb_priv_data) <
286 ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
288 /* Alloc an sa index */
289 rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
293 outb_sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sa_idx);
294 outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa);
295 rlens = &outb_priv->rlens;
297 memset(outb_sa, 0, sizeof(struct roc_ot_ipsec_outb_sa));
299 /* Fill outbound sa params */
300 rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);
302 plt_err("Failed to init outbound sa, rc=%d", rc);
303 rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
308 outb_priv->userdata = conf->userdata;
309 outb_priv->sa_idx = sa_idx;
310 outb_priv->eth_sec = eth_sec;
313 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
315 /* Prepare session priv */
316 sess_priv.sa_idx = outb_priv->sa_idx;
317 sess_priv.roundup_byte = rlens->roundup_byte;
318 sess_priv.roundup_len = rlens->roundup_len;
319 sess_priv.partial_len = rlens->partial_len;
321 /* Pointer from eth_sec -> outb_sa */
322 eth_sec->sa = outb_sa;
323 eth_sec->sess = sess;
324 eth_sec->sa_idx = sa_idx;
325 eth_sec->spi = ipsec->spi;
327 TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
331 /* Sync session in context cache */
332 roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,
333 ROC_NIX_INL_SA_OP_RELOAD);
335 if (inbound && inl_dev)
336 roc_nix_inl_dev_unlock();
338 plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u inl_dev=%u",
339 inbound ? "inbound" : "outbound", eth_sec->spi,
340 eth_sec->sa_idx, eth_sec->inl_dev);
342 * Update fast path info in priv area.
344 set_sec_session_private_data(sess, (void *)sess_priv.u64);
348 if (inbound && inl_dev)
349 roc_nix_inl_dev_unlock();
350 rte_mempool_put(mempool, eth_sec);
355 cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
357 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
358 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
359 struct roc_ot_ipsec_inb_sa *inb_sa;
360 struct roc_ot_ipsec_outb_sa *outb_sa;
361 struct cnxk_eth_sec_sess *eth_sec;
362 struct rte_mempool *mp;
364 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
368 if (eth_sec->inl_dev)
369 roc_nix_inl_dev_lock();
372 inb_sa = eth_sec->sa;
374 inb_sa->w2.s.valid = 0;
376 TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
379 outb_sa = eth_sec->sa;
381 outb_sa->w2.s.valid = 0;
383 /* Release Outbound SA index */
384 cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
385 TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
389 /* Sync session in context cache */
390 roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,
391 ROC_NIX_INL_SA_OP_RELOAD);
393 if (eth_sec->inl_dev)
394 roc_nix_inl_dev_unlock();
396 plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u, inl_dev=%u",
397 eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
398 eth_sec->sa_idx, eth_sec->inl_dev);
400 /* Put eth_sec object back to pool */
401 mp = rte_mempool_from_obj(eth_sec);
402 set_sec_session_private_data(sess, NULL);
403 rte_mempool_put(mp, eth_sec);
407 static const struct rte_security_capability *
408 cn10k_eth_sec_capabilities_get(void *device __rte_unused)
410 return cn10k_eth_sec_capabilities;
414 cn10k_eth_sec_ops_override(void)
416 static int init_once;
422 /* Update platform specific ops */
423 cnxk_eth_sec_ops.session_create = cn10k_eth_sec_session_create;
424 cnxk_eth_sec_ops.session_destroy = cn10k_eth_sec_session_destroy;
425 cnxk_eth_sec_ops.capabilities_get = cn10k_eth_sec_capabilities_get;