net/cnxk: support IPsec transport mode in cn10k
[dpdk.git] / drivers / net / cnxk / cn10k_ethdev_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_eventdev.h>
7 #include <rte_security.h>
8 #include <rte_security_driver.h>
9
10 #include <cn10k_ethdev.h>
11 #include <cnxk_security.h>
12
13 static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
14         {       /* AES GCM */
15                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
16                 {.sym = {
17                         .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
18                         {.aead = {
19                                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
20                                 .block_size = 16,
21                                 .key_size = {
22                                         .min = 16,
23                                         .max = 32,
24                                         .increment = 8
25                                 },
26                                 .digest_size = {
27                                         .min = 16,
28                                         .max = 16,
29                                         .increment = 0
30                                 },
31                                 .aad_size = {
32                                         .min = 8,
33                                         .max = 12,
34                                         .increment = 4
35                                 },
36                                 .iv_size = {
37                                         .min = 12,
38                                         .max = 12,
39                                         .increment = 0
40                                 }
41                         }, }
42                 }, }
43         },
44         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
45 };
46
47 static const struct rte_security_capability cn10k_eth_sec_capabilities[] = {
48         {       /* IPsec Inline Protocol ESP Tunnel Ingress */
49                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
50                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
51                 .ipsec = {
52                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
53                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
54                         .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
55                         .options = { 0 }
56                 },
57                 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
58                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
59         },
60         {       /* IPsec Inline Protocol ESP Tunnel Egress */
61                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
62                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
63                 .ipsec = {
64                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
65                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
66                         .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
67                         .options = { 0 }
68                 },
69                 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
70                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
71         },
72         {       /* IPsec Inline Protocol ESP Transport Egress */
73                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
74                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
75                 .ipsec = {
76                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
77                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
78                         .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
79                         .options = { 0 }
80                 },
81                 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
82                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
83         },
84         {       /* IPsec Inline Protocol ESP Transport Ingress */
85                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
86                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
87                 .ipsec = {
88                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
89                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
90                         .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
91                         .options = { 0 }
92                 },
93                 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
94                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
95         },
96         {
97                 .action = RTE_SECURITY_ACTION_TYPE_NONE
98         }
99 };
100
101 static void
102 cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args)
103 {
104         struct rte_eth_event_ipsec_desc desc;
105         struct cn10k_sec_sess_priv sess_priv;
106         struct cn10k_outb_priv_data *priv;
107         struct roc_ot_ipsec_outb_sa *sa;
108         struct cpt_cn10k_res_s *res;
109         struct rte_eth_dev *eth_dev;
110         struct cnxk_eth_dev *dev;
111         uint16_t dlen_adj, rlen;
112         struct rte_mbuf *mbuf;
113         uintptr_t sa_base;
114         uintptr_t nixtx;
115         uint8_t port;
116
117         RTE_SET_USED(args);
118
119         switch ((gw[0] >> 28) & 0xF) {
120         case RTE_EVENT_TYPE_ETHDEV:
121                 /* Event from inbound inline dev due to IPSEC packet bad L4 */
122                 mbuf = (struct rte_mbuf *)(gw[1] - sizeof(struct rte_mbuf));
123                 plt_nix_dbg("Received mbuf %p from inline dev inbound", mbuf);
124                 rte_pktmbuf_free(mbuf);
125                 return;
126         case RTE_EVENT_TYPE_CPU:
127                 /* Check for subtype */
128                 if (((gw[0] >> 20) & 0xFF) == CNXK_ETHDEV_SEC_OUTB_EV_SUB) {
129                         /* Event from outbound inline error */
130                         mbuf = (struct rte_mbuf *)gw[1];
131                         break;
132                 }
133                 /* Fall through */
134         default:
135                 plt_err("Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx",
136                         gw[0], gw[1]);
137                 return;
138         }
139
140         /* Get ethdev port from tag */
141         port = gw[0] & 0xFF;
142         eth_dev = &rte_eth_devices[port];
143         dev = cnxk_eth_pmd_priv(eth_dev);
144
145         sess_priv.u64 = *rte_security_dynfield(mbuf);
146         /* Calculate dlen adj */
147         dlen_adj = mbuf->pkt_len - mbuf->l2_len;
148         rlen = (dlen_adj + sess_priv.roundup_len) +
149                (sess_priv.roundup_byte - 1);
150         rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
151         rlen += sess_priv.partial_len;
152         dlen_adj = rlen - dlen_adj;
153
154         /* Find the res area residing on next cacheline after end of data */
155         nixtx = rte_pktmbuf_mtod(mbuf, uintptr_t) + mbuf->pkt_len + dlen_adj;
156         nixtx += BIT_ULL(7);
157         nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
158         res = (struct cpt_cn10k_res_s *)nixtx;
159
160         plt_nix_dbg("Outbound error, mbuf %p, sa_index %u, compcode %x uc %x",
161                     mbuf, sess_priv.sa_idx, res->compcode, res->uc_compcode);
162
163         sess_priv.u64 = *rte_security_dynfield(mbuf);
164
165         sa_base = dev->outb.sa_base;
166         sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
167         priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
168
169         memset(&desc, 0, sizeof(desc));
170
171         switch (res->uc_compcode) {
172         case ROC_IE_OT_UCC_ERR_SA_OVERFLOW:
173                 desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
174                 break;
175         default:
176                 plt_warn("Outbound error, mbuf %p, sa_index %u, "
177                          "compcode %x uc %x", mbuf, sess_priv.sa_idx,
178                          res->compcode, res->uc_compcode);
179                 desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
180                 break;
181         }
182
183         desc.metadata = (uint64_t)priv->userdata;
184         rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
185         rte_pktmbuf_free(mbuf);
186 }
187
188 static int
189 cn10k_eth_sec_session_create(void *device,
190                              struct rte_security_session_conf *conf,
191                              struct rte_security_session *sess,
192                              struct rte_mempool *mempool)
193 {
194         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
195         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
196         struct rte_security_ipsec_xform *ipsec;
197         struct cn10k_sec_sess_priv sess_priv;
198         struct rte_crypto_sym_xform *crypto;
199         struct cnxk_eth_sec_sess *eth_sec;
200         bool inbound, inl_dev;
201         int rc = 0;
202
203         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
204                 return -ENOTSUP;
205
206         if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
207                 return -ENOTSUP;
208
209         if (rte_security_dynfield_register() < 0)
210                 return -ENOTSUP;
211
212         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
213                 roc_nix_inl_cb_register(cn10k_eth_sec_sso_work_cb, NULL);
214
215         ipsec = &conf->ipsec;
216         crypto = conf->crypto_xform;
217         inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
218         inl_dev = !!dev->inb.inl_dev;
219
220         /* Search if a session already exits */
221         if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
222                 plt_err("%s SA with SPI %u already in use",
223                         inbound ? "Inbound" : "Outbound", ipsec->spi);
224                 return -EEXIST;
225         }
226
227         if (rte_mempool_get(mempool, (void **)&eth_sec)) {
228                 plt_err("Could not allocate security session private data");
229                 return -ENOMEM;
230         }
231
232         memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
233         sess_priv.u64 = 0;
234
235         /* Acquire lock on inline dev for inbound */
236         if (inbound && inl_dev)
237                 roc_nix_inl_dev_lock();
238
239         if (inbound) {
240                 struct cn10k_inb_priv_data *inb_priv;
241                 struct roc_ot_ipsec_inb_sa *inb_sa;
242                 uintptr_t sa;
243
244                 PLT_STATIC_ASSERT(sizeof(struct cn10k_inb_priv_data) <
245                                   ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
246
247                 /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE */
248                 sa = roc_nix_inl_inb_sa_get(&dev->nix, inl_dev, ipsec->spi);
249                 if (!sa && dev->inb.inl_dev) {
250                         plt_err("Failed to create ingress sa, inline dev "
251                                 "not found or spi not in range");
252                         rc = -ENOTSUP;
253                         goto mempool_put;
254                 } else if (!sa) {
255                         plt_err("Failed to create ingress sa");
256                         rc = -EFAULT;
257                         goto mempool_put;
258                 }
259
260                 inb_sa = (struct roc_ot_ipsec_inb_sa *)sa;
261
262                 /* Check if SA is already in use */
263                 if (inb_sa->w2.s.valid) {
264                         plt_err("Inbound SA with SPI %u already in use",
265                                 ipsec->spi);
266                         rc = -EBUSY;
267                         goto mempool_put;
268                 }
269
270                 memset(inb_sa, 0, sizeof(struct roc_ot_ipsec_inb_sa));
271
272                 /* Fill inbound sa params */
273                 rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);
274                 if (rc) {
275                         plt_err("Failed to init inbound sa, rc=%d", rc);
276                         goto mempool_put;
277                 }
278
279                 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
280                 /* Back pointer to get eth_sec */
281                 inb_priv->eth_sec = eth_sec;
282                 /* Save userdata in inb private area */
283                 inb_priv->userdata = conf->userdata;
284
285                 /* Save SA index/SPI in cookie for now */
286                 inb_sa->w1.s.cookie = rte_cpu_to_be_32(ipsec->spi);
287
288                 /* Prepare session priv */
289                 sess_priv.inb_sa = 1;
290                 sess_priv.sa_idx = ipsec->spi;
291
292                 /* Pointer from eth_sec -> inb_sa */
293                 eth_sec->sa = inb_sa;
294                 eth_sec->sess = sess;
295                 eth_sec->sa_idx = ipsec->spi;
296                 eth_sec->spi = ipsec->spi;
297                 eth_sec->inl_dev = !!dev->inb.inl_dev;
298                 eth_sec->inb = true;
299
300                 TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
301                 dev->inb.nb_sess++;
302         } else {
303                 struct cn10k_outb_priv_data *outb_priv;
304                 struct roc_ot_ipsec_outb_sa *outb_sa;
305                 struct cnxk_ipsec_outb_rlens *rlens;
306                 uint64_t sa_base = dev->outb.sa_base;
307                 uint32_t sa_idx;
308
309                 PLT_STATIC_ASSERT(sizeof(struct cn10k_outb_priv_data) <
310                                   ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
311
312                 /* Alloc an sa index */
313                 rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
314                 if (rc)
315                         goto mempool_put;
316
317                 outb_sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sa_idx);
318                 outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa);
319                 rlens = &outb_priv->rlens;
320
321                 memset(outb_sa, 0, sizeof(struct roc_ot_ipsec_outb_sa));
322
323                 /* Fill outbound sa params */
324                 rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);
325                 if (rc) {
326                         plt_err("Failed to init outbound sa, rc=%d", rc);
327                         rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
328                         goto mempool_put;
329                 }
330
331                 /* Save userdata */
332                 outb_priv->userdata = conf->userdata;
333                 outb_priv->sa_idx = sa_idx;
334                 outb_priv->eth_sec = eth_sec;
335
336                 /* Save rlen info */
337                 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
338
339                 /* Prepare session priv */
340                 sess_priv.sa_idx = outb_priv->sa_idx;
341                 sess_priv.roundup_byte = rlens->roundup_byte;
342                 sess_priv.roundup_len = rlens->roundup_len;
343                 sess_priv.partial_len = rlens->partial_len;
344
345                 /* Pointer from eth_sec -> outb_sa */
346                 eth_sec->sa = outb_sa;
347                 eth_sec->sess = sess;
348                 eth_sec->sa_idx = sa_idx;
349                 eth_sec->spi = ipsec->spi;
350
351                 TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
352                 dev->outb.nb_sess++;
353         }
354
355         /* Sync session in context cache */
356         roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,
357                             ROC_NIX_INL_SA_OP_RELOAD);
358
359         if (inbound && inl_dev)
360                 roc_nix_inl_dev_unlock();
361
362         plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u inl_dev=%u",
363                     inbound ? "inbound" : "outbound", eth_sec->spi,
364                     eth_sec->sa_idx, eth_sec->inl_dev);
365         /*
366          * Update fast path info in priv area.
367          */
368         set_sec_session_private_data(sess, (void *)sess_priv.u64);
369
370         return 0;
371 mempool_put:
372         if (inbound && inl_dev)
373                 roc_nix_inl_dev_unlock();
374         rte_mempool_put(mempool, eth_sec);
375         return rc;
376 }
377
378 static int
379 cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
380 {
381         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
382         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
383         struct roc_ot_ipsec_inb_sa *inb_sa;
384         struct roc_ot_ipsec_outb_sa *outb_sa;
385         struct cnxk_eth_sec_sess *eth_sec;
386         struct rte_mempool *mp;
387
388         eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
389         if (!eth_sec)
390                 return -ENOENT;
391
392         if (eth_sec->inl_dev)
393                 roc_nix_inl_dev_lock();
394
395         if (eth_sec->inb) {
396                 inb_sa = eth_sec->sa;
397                 /* Disable SA */
398                 inb_sa->w2.s.valid = 0;
399
400                 TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
401                 dev->inb.nb_sess--;
402         } else {
403                 outb_sa = eth_sec->sa;
404                 /* Disable SA */
405                 outb_sa->w2.s.valid = 0;
406
407                 /* Release Outbound SA index */
408                 cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
409                 TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
410                 dev->outb.nb_sess--;
411         }
412
413         /* Sync session in context cache */
414         roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,
415                             ROC_NIX_INL_SA_OP_RELOAD);
416
417         if (eth_sec->inl_dev)
418                 roc_nix_inl_dev_unlock();
419
420         plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u, inl_dev=%u",
421                     eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
422                     eth_sec->sa_idx, eth_sec->inl_dev);
423
424         /* Put eth_sec object back to pool */
425         mp = rte_mempool_from_obj(eth_sec);
426         set_sec_session_private_data(sess, NULL);
427         rte_mempool_put(mp, eth_sec);
428         return 0;
429 }
430
431 static const struct rte_security_capability *
432 cn10k_eth_sec_capabilities_get(void *device __rte_unused)
433 {
434         return cn10k_eth_sec_capabilities;
435 }
436
437 void
438 cn10k_eth_sec_ops_override(void)
439 {
440         static int init_once;
441
442         if (init_once)
443                 return;
444         init_once = 1;
445
446         /* Update platform specific ops */
447         cnxk_eth_sec_ops.session_create = cn10k_eth_sec_session_create;
448         cnxk_eth_sec_ops.session_destroy = cn10k_eth_sec_session_destroy;
449         cnxk_eth_sec_ops.capabilities_get = cn10k_eth_sec_capabilities_get;
450 }