net/cnxk: support CPT CTX write through microcode op
[dpdk.git] / drivers / net / cnxk / cn10k_ethdev_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_eventdev.h>
7 #include <rte_security.h>
8 #include <rte_security_driver.h>
9
10 #include <cn10k_ethdev.h>
11 #include <cnxk_security.h>
12
13 static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
14         {       /* AES GCM */
15                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
16                 {.sym = {
17                         .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
18                         {.aead = {
19                                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
20                                 .block_size = 16,
21                                 .key_size = {
22                                         .min = 16,
23                                         .max = 32,
24                                         .increment = 8
25                                 },
26                                 .digest_size = {
27                                         .min = 16,
28                                         .max = 16,
29                                         .increment = 0
30                                 },
31                                 .aad_size = {
32                                         .min = 8,
33                                         .max = 12,
34                                         .increment = 4
35                                 },
36                                 .iv_size = {
37                                         .min = 12,
38                                         .max = 12,
39                                         .increment = 0
40                                 }
41                         }, }
42                 }, }
43         },
44         {       /* AES CBC */
45                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
46                 {.sym = {
47                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
48                         {.cipher = {
49                                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
50                                 .block_size = 16,
51                                 .key_size = {
52                                         .min = 16,
53                                         .max = 32,
54                                         .increment = 8
55                                 },
56                                 .iv_size = {
57                                         .min = 16,
58                                         .max = 16,
59                                         .increment = 0
60                                 }
61                         }, }
62                 }, }
63         },
64         {       /* SHA1 HMAC */
65                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
66                 {.sym = {
67                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
68                         {.auth = {
69                                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
70                                 .block_size = 64,
71                                 .key_size = {
72                                         .min = 20,
73                                         .max = 64,
74                                         .increment = 1
75                                 },
76                                 .digest_size = {
77                                         .min = 12,
78                                         .max = 12,
79                                         .increment = 0
80                                 },
81                         }, }
82                 }, }
83         },
84         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
85 };
86
87 static const struct rte_security_capability cn10k_eth_sec_capabilities[] = {
88         {       /* IPsec Inline Protocol ESP Tunnel Ingress */
89                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
90                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
91                 .ipsec = {
92                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
93                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
94                         .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
95                         .options = { 0 }
96                 },
97                 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
98                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
99         },
100         {       /* IPsec Inline Protocol ESP Tunnel Egress */
101                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
102                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
103                 .ipsec = {
104                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
105                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
106                         .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
107                         .options = { 0 }
108                 },
109                 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
110                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
111         },
112         {       /* IPsec Inline Protocol ESP Transport Egress */
113                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
114                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
115                 .ipsec = {
116                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
117                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
118                         .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
119                         .options = { 0 }
120                 },
121                 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
122                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
123         },
124         {       /* IPsec Inline Protocol ESP Transport Ingress */
125                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
126                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
127                 .ipsec = {
128                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
129                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
130                         .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
131                         .options = { 0 }
132                 },
133                 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
134                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
135         },
136         {
137                 .action = RTE_SECURITY_ACTION_TYPE_NONE
138         }
139 };
140
141 static void
142 cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args)
143 {
144         struct rte_eth_event_ipsec_desc desc;
145         struct cn10k_sec_sess_priv sess_priv;
146         struct cn10k_outb_priv_data *priv;
147         struct roc_ot_ipsec_outb_sa *sa;
148         struct cpt_cn10k_res_s *res;
149         struct rte_eth_dev *eth_dev;
150         struct cnxk_eth_dev *dev;
151         uint16_t dlen_adj, rlen;
152         struct rte_mbuf *mbuf;
153         uintptr_t sa_base;
154         uintptr_t nixtx;
155         uint8_t port;
156
157         RTE_SET_USED(args);
158
159         switch ((gw[0] >> 28) & 0xF) {
160         case RTE_EVENT_TYPE_ETHDEV:
161                 /* Event from inbound inline dev due to IPSEC packet bad L4 */
162                 mbuf = (struct rte_mbuf *)(gw[1] - sizeof(struct rte_mbuf));
163                 plt_nix_dbg("Received mbuf %p from inline dev inbound", mbuf);
164                 rte_pktmbuf_free(mbuf);
165                 return;
166         case RTE_EVENT_TYPE_CPU:
167                 /* Check for subtype */
168                 if (((gw[0] >> 20) & 0xFF) == CNXK_ETHDEV_SEC_OUTB_EV_SUB) {
169                         /* Event from outbound inline error */
170                         mbuf = (struct rte_mbuf *)gw[1];
171                         break;
172                 }
173                 /* Fall through */
174         default:
175                 plt_err("Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx",
176                         gw[0], gw[1]);
177                 return;
178         }
179
180         /* Get ethdev port from tag */
181         port = gw[0] & 0xFF;
182         eth_dev = &rte_eth_devices[port];
183         dev = cnxk_eth_pmd_priv(eth_dev);
184
185         sess_priv.u64 = *rte_security_dynfield(mbuf);
186         /* Calculate dlen adj */
187         dlen_adj = mbuf->pkt_len - mbuf->l2_len;
188         rlen = (dlen_adj + sess_priv.roundup_len) +
189                (sess_priv.roundup_byte - 1);
190         rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
191         rlen += sess_priv.partial_len;
192         dlen_adj = rlen - dlen_adj;
193
194         /* Find the res area residing on next cacheline after end of data */
195         nixtx = rte_pktmbuf_mtod(mbuf, uintptr_t) + mbuf->pkt_len + dlen_adj;
196         nixtx += BIT_ULL(7);
197         nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
198         res = (struct cpt_cn10k_res_s *)nixtx;
199
200         plt_nix_dbg("Outbound error, mbuf %p, sa_index %u, compcode %x uc %x",
201                     mbuf, sess_priv.sa_idx, res->compcode, res->uc_compcode);
202
203         sess_priv.u64 = *rte_security_dynfield(mbuf);
204
205         sa_base = dev->outb.sa_base;
206         sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
207         priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
208
209         memset(&desc, 0, sizeof(desc));
210
211         switch (res->uc_compcode) {
212         case ROC_IE_OT_UCC_ERR_SA_OVERFLOW:
213                 desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
214                 break;
215         default:
216                 plt_warn("Outbound error, mbuf %p, sa_index %u, "
217                          "compcode %x uc %x", mbuf, sess_priv.sa_idx,
218                          res->compcode, res->uc_compcode);
219                 desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
220                 break;
221         }
222
223         desc.metadata = (uint64_t)priv->userdata;
224         rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
225         rte_pktmbuf_free(mbuf);
226 }
227
228 static int
229 cn10k_eth_sec_session_create(void *device,
230                              struct rte_security_session_conf *conf,
231                              struct rte_security_session *sess,
232                              struct rte_mempool *mempool)
233 {
234         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
235         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
236         struct rte_security_ipsec_xform *ipsec;
237         struct cn10k_sec_sess_priv sess_priv;
238         struct rte_crypto_sym_xform *crypto;
239         struct cnxk_eth_sec_sess *eth_sec;
240         bool inbound, inl_dev;
241         int rc = 0;
242
243         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
244                 return -ENOTSUP;
245
246         if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
247                 return -ENOTSUP;
248
249         if (rte_security_dynfield_register() < 0)
250                 return -ENOTSUP;
251
252         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
253                 roc_nix_inl_cb_register(cn10k_eth_sec_sso_work_cb, NULL);
254
255         ipsec = &conf->ipsec;
256         crypto = conf->crypto_xform;
257         inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
258         inl_dev = !!dev->inb.inl_dev;
259
260         /* Search if a session already exits */
261         if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
262                 plt_err("%s SA with SPI %u already in use",
263                         inbound ? "Inbound" : "Outbound", ipsec->spi);
264                 return -EEXIST;
265         }
266
267         if (rte_mempool_get(mempool, (void **)&eth_sec)) {
268                 plt_err("Could not allocate security session private data");
269                 return -ENOMEM;
270         }
271
272         memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
273         sess_priv.u64 = 0;
274
275         /* Acquire lock on inline dev for inbound */
276         if (inbound && inl_dev)
277                 roc_nix_inl_dev_lock();
278
279         if (inbound) {
280                 struct roc_ot_ipsec_inb_sa *inb_sa, *inb_sa_dptr;
281                 struct cn10k_inb_priv_data *inb_priv;
282                 uintptr_t sa;
283
284                 PLT_STATIC_ASSERT(sizeof(struct cn10k_inb_priv_data) <
285                                   ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
286
287                 /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE */
288                 sa = roc_nix_inl_inb_sa_get(&dev->nix, inl_dev, ipsec->spi);
289                 if (!sa && dev->inb.inl_dev) {
290                         plt_err("Failed to create ingress sa, inline dev "
291                                 "not found or spi not in range");
292                         rc = -ENOTSUP;
293                         goto mempool_put;
294                 } else if (!sa) {
295                         plt_err("Failed to create ingress sa");
296                         rc = -EFAULT;
297                         goto mempool_put;
298                 }
299
300                 inb_sa = (struct roc_ot_ipsec_inb_sa *)sa;
301
302                 /* Check if SA is already in use */
303                 if (inb_sa->w2.s.valid) {
304                         plt_err("Inbound SA with SPI %u already in use",
305                                 ipsec->spi);
306                         rc = -EBUSY;
307                         goto mempool_put;
308                 }
309
310                 inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
311                 memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
312
313                 /* Fill inbound sa params */
314                 rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto);
315                 if (rc) {
316                         plt_err("Failed to init inbound sa, rc=%d", rc);
317                         goto mempool_put;
318                 }
319
320                 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
321                 /* Back pointer to get eth_sec */
322                 inb_priv->eth_sec = eth_sec;
323                 /* Save userdata in inb private area */
324                 inb_priv->userdata = conf->userdata;
325
326                 /* Save SA index/SPI in cookie for now */
327                 inb_sa_dptr->w1.s.cookie = rte_cpu_to_be_32(ipsec->spi);
328
329                 /* Prepare session priv */
330                 sess_priv.inb_sa = 1;
331                 sess_priv.sa_idx = ipsec->spi;
332
333                 /* Pointer from eth_sec -> inb_sa */
334                 eth_sec->sa = inb_sa;
335                 eth_sec->sess = sess;
336                 eth_sec->sa_idx = ipsec->spi;
337                 eth_sec->spi = ipsec->spi;
338                 eth_sec->inl_dev = !!dev->inb.inl_dev;
339                 eth_sec->inb = true;
340
341                 TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
342                 dev->inb.nb_sess++;
343                 /* Sync session in context cache */
344                 rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa,
345                                            eth_sec->inb,
346                                            sizeof(struct roc_ot_ipsec_inb_sa));
347                 if (rc)
348                         goto mempool_put;
349         } else {
350                 struct roc_ot_ipsec_outb_sa *outb_sa, *outb_sa_dptr;
351                 struct cn10k_outb_priv_data *outb_priv;
352                 struct cnxk_ipsec_outb_rlens *rlens;
353                 uint64_t sa_base = dev->outb.sa_base;
354                 uint32_t sa_idx;
355
356                 PLT_STATIC_ASSERT(sizeof(struct cn10k_outb_priv_data) <
357                                   ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
358
359                 /* Alloc an sa index */
360                 rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
361                 if (rc)
362                         goto mempool_put;
363
364                 outb_sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sa_idx);
365                 outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa);
366                 rlens = &outb_priv->rlens;
367
368                 outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr;
369                 memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa));
370
371                 /* Fill outbound sa params */
372                 rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
373                 if (rc) {
374                         plt_err("Failed to init outbound sa, rc=%d", rc);
375                         rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
376                         goto mempool_put;
377                 }
378
379                 /* Save userdata */
380                 outb_priv->userdata = conf->userdata;
381                 outb_priv->sa_idx = sa_idx;
382                 outb_priv->eth_sec = eth_sec;
383
384                 /* Save rlen info */
385                 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
386
387                 /* Prepare session priv */
388                 sess_priv.sa_idx = outb_priv->sa_idx;
389                 sess_priv.roundup_byte = rlens->roundup_byte;
390                 sess_priv.roundup_len = rlens->roundup_len;
391                 sess_priv.partial_len = rlens->partial_len;
392                 sess_priv.mode = outb_sa_dptr->w2.s.ipsec_mode;
393                 sess_priv.outer_ip_ver = outb_sa_dptr->w2.s.outer_ip_ver;
394
395                 /* Pointer from eth_sec -> outb_sa */
396                 eth_sec->sa = outb_sa;
397                 eth_sec->sess = sess;
398                 eth_sec->sa_idx = sa_idx;
399                 eth_sec->spi = ipsec->spi;
400
401                 TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
402                 dev->outb.nb_sess++;
403                 /* Sync session in context cache */
404                 rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa,
405                                            eth_sec->inb,
406                                            sizeof(struct roc_ot_ipsec_outb_sa));
407                 if (rc)
408                         goto mempool_put;
409         }
410         if (inbound && inl_dev)
411                 roc_nix_inl_dev_unlock();
412
413         plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u inl_dev=%u",
414                     inbound ? "inbound" : "outbound", eth_sec->spi,
415                     eth_sec->sa_idx, eth_sec->inl_dev);
416         /*
417          * Update fast path info in priv area.
418          */
419         set_sec_session_private_data(sess, (void *)sess_priv.u64);
420
421         return 0;
422 mempool_put:
423         if (inbound && inl_dev)
424                 roc_nix_inl_dev_unlock();
425         rte_mempool_put(mempool, eth_sec);
426         return rc;
427 }
428
429 static int
430 cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
431 {
432         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
433         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
434         struct cnxk_eth_sec_sess *eth_sec;
435         struct rte_mempool *mp;
436         void *sa_dptr;
437
438         eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
439         if (!eth_sec)
440                 return -ENOENT;
441
442         if (eth_sec->inl_dev)
443                 roc_nix_inl_dev_lock();
444
445         if (eth_sec->inb) {
446                 /* Disable SA */
447                 sa_dptr = dev->inb.sa_dptr;
448                 roc_nix_inl_inb_sa_init(sa_dptr);
449
450                 roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
451                                       eth_sec->inb,
452                                       sizeof(struct roc_ot_ipsec_inb_sa));
453                 TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
454                 dev->inb.nb_sess--;
455         } else {
456                 /* Disable SA */
457                 sa_dptr = dev->outb.sa_dptr;
458                 roc_nix_inl_outb_sa_init(sa_dptr);
459
460                 roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
461                                       eth_sec->inb,
462                                       sizeof(struct roc_ot_ipsec_outb_sa));
463                 /* Release Outbound SA index */
464                 cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
465                 TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
466                 dev->outb.nb_sess--;
467         }
468         if (eth_sec->inl_dev)
469                 roc_nix_inl_dev_unlock();
470
471         plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u, inl_dev=%u",
472                     eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
473                     eth_sec->sa_idx, eth_sec->inl_dev);
474
475         /* Put eth_sec object back to pool */
476         mp = rte_mempool_from_obj(eth_sec);
477         set_sec_session_private_data(sess, NULL);
478         rte_mempool_put(mp, eth_sec);
479         return 0;
480 }
481
482 static const struct rte_security_capability *
483 cn10k_eth_sec_capabilities_get(void *device __rte_unused)
484 {
485         return cn10k_eth_sec_capabilities;
486 }
487
488 void
489 cn10k_eth_sec_ops_override(void)
490 {
491         static int init_once;
492
493         if (init_once)
494                 return;
495         init_once = 1;
496
497         /* Update platform specific ops */
498         cnxk_eth_sec_ops.session_create = cn10k_eth_sec_session_create;
499         cnxk_eth_sec_ops.session_destroy = cn10k_eth_sec_session_destroy;
500         cnxk_eth_sec_ops.capabilities_get = cn10k_eth_sec_capabilities_get;
501 }