net/octeontx2: update max packet length for inline IPsec
[dpdk.git] / drivers / net / octeontx2 / otx2_ethdev_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2020 Marvell International Ltd.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_esp.h>
7 #include <rte_ethdev.h>
8 #include <rte_eventdev.h>
9 #include <rte_ip.h>
10 #include <rte_malloc.h>
11 #include <rte_memzone.h>
12 #include <rte_security.h>
13 #include <rte_security_driver.h>
14 #include <rte_udp.h>
15
16 #include "otx2_common.h"
17 #include "otx2_cryptodev_qp.h"
18 #include "otx2_ethdev.h"
19 #include "otx2_ethdev_sec.h"
20 #include "otx2_ipsec_fp.h"
21 #include "otx2_sec_idev.h"
22
23 #define AH_HDR_LEN      12
24 #define AES_GCM_IV_LEN  8
25 #define AES_GCM_MAC_LEN 16
26 #define AES_CBC_IV_LEN  16
27 #define SHA1_HMAC_LEN   12
28
29 #define AES_GCM_ROUNDUP_BYTE_LEN        4
30 #define AES_CBC_ROUNDUP_BYTE_LEN        16
31
32 struct eth_sec_tag_const {
33         RTE_STD_C11
34         union {
35                 struct {
36                         uint32_t rsvd_11_0  : 12;
37                         uint32_t port       : 8;
38                         uint32_t event_type : 4;
39                         uint32_t rsvd_31_24 : 8;
40                 };
41                 uint32_t u32;
42         };
43 };
44
45 static struct rte_cryptodev_capabilities otx2_eth_sec_crypto_caps[] = {
46         {       /* AES GCM */
47                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
48                 {.sym = {
49                         .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
50                         {.aead = {
51                                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
52                                 .block_size = 16,
53                                 .key_size = {
54                                         .min = 16,
55                                         .max = 32,
56                                         .increment = 8
57                                 },
58                                 .digest_size = {
59                                         .min = 16,
60                                         .max = 16,
61                                         .increment = 0
62                                 },
63                                 .aad_size = {
64                                         .min = 8,
65                                         .max = 12,
66                                         .increment = 4
67                                 },
68                                 .iv_size = {
69                                         .min = 12,
70                                         .max = 12,
71                                         .increment = 0
72                                 }
73                         }, }
74                 }, }
75         },
76         {       /* AES CBC */
77                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
78                 {.sym = {
79                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
80                         {.cipher = {
81                                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
82                                 .block_size = 16,
83                                 .key_size = {
84                                         .min = 16,
85                                         .max = 32,
86                                         .increment = 8
87                                 },
88                                 .iv_size = {
89                                         .min = 16,
90                                         .max = 16,
91                                         .increment = 0
92                                 }
93                         }, }
94                 }, }
95         },
96         {       /* SHA1 HMAC */
97                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
98                 {.sym = {
99                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
100                         {.auth = {
101                                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
102                                 .block_size = 64,
103                                 .key_size = {
104                                         .min = 20,
105                                         .max = 64,
106                                         .increment = 1
107                                 },
108                                 .digest_size = {
109                                         .min = 12,
110                                         .max = 12,
111                                         .increment = 0
112                                 },
113                         }, }
114                 }, }
115         },
116         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
117 };
118
119 static const struct rte_security_capability otx2_eth_sec_capabilities[] = {
120         {       /* IPsec Inline Protocol ESP Tunnel Ingress */
121                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
122                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
123                 .ipsec = {
124                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
125                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
126                         .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
127                         .options = { 0 }
128                 },
129                 .crypto_capabilities = otx2_eth_sec_crypto_caps,
130                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
131         },
132         {       /* IPsec Inline Protocol ESP Tunnel Egress */
133                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
134                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
135                 .ipsec = {
136                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
137                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
138                         .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
139                         .options = { 0 }
140                 },
141                 .crypto_capabilities = otx2_eth_sec_crypto_caps,
142                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
143         },
144         {
145                 .action = RTE_SECURITY_ACTION_TYPE_NONE
146         }
147 };
148
149 static void
150 lookup_mem_sa_tbl_clear(struct rte_eth_dev *eth_dev)
151 {
152         static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
153         uint16_t port = eth_dev->data->port_id;
154         const struct rte_memzone *mz;
155         uint64_t **sa_tbl;
156         uint8_t *mem;
157
158         mz = rte_memzone_lookup(name);
159         if (mz == NULL)
160                 return;
161
162         mem = mz->addr;
163
164         sa_tbl  = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
165         if (sa_tbl[port] == NULL)
166                 return;
167
168         rte_free(sa_tbl[port]);
169         sa_tbl[port] = NULL;
170 }
171
172 static int
173 lookup_mem_sa_index_update(struct rte_eth_dev *eth_dev, int spi, void *sa)
174 {
175         static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
176         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
177         uint16_t port = eth_dev->data->port_id;
178         const struct rte_memzone *mz;
179         uint64_t **sa_tbl;
180         uint8_t *mem;
181
182         mz = rte_memzone_lookup(name);
183         if (mz == NULL) {
184                 otx2_err("Could not find fastpath lookup table");
185                 return -EINVAL;
186         }
187
188         mem = mz->addr;
189
190         sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
191
192         if (sa_tbl[port] == NULL) {
193                 sa_tbl[port] = rte_malloc(NULL, dev->ipsec_in_max_spi *
194                                           sizeof(uint64_t), 0);
195         }
196
197         sa_tbl[port][spi] = (uint64_t)sa;
198
199         return 0;
200 }
201
202 static inline void
203 in_sa_mz_name_get(char *name, int size, uint16_t port)
204 {
205         snprintf(name, size, "otx2_ipsec_in_sadb_%u", port);
206 }
207
208 static struct otx2_ipsec_fp_in_sa *
209 in_sa_get(uint16_t port, int sa_index)
210 {
211         char name[RTE_MEMZONE_NAMESIZE];
212         struct otx2_ipsec_fp_in_sa *sa;
213         const struct rte_memzone *mz;
214
215         in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
216         mz = rte_memzone_lookup(name);
217         if (mz == NULL) {
218                 otx2_err("Could not get the memzone reserved for IN SA DB");
219                 return NULL;
220         }
221
222         sa = mz->addr;
223
224         return sa + sa_index;
225 }
226
227 static int
228 ipsec_sa_const_set(struct rte_security_ipsec_xform *ipsec,
229                    struct rte_crypto_sym_xform *xform,
230                    struct otx2_sec_session_ipsec_ip *sess)
231 {
232         struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
233
234         sess->partial_len = sizeof(struct rte_ipv4_hdr);
235
236         if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
237                 sess->partial_len += sizeof(struct rte_esp_hdr);
238                 sess->roundup_len = sizeof(struct rte_esp_tail);
239         } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
240                 sess->partial_len += AH_HDR_LEN;
241         } else {
242                 return -EINVAL;
243         }
244
245         if (ipsec->options.udp_encap)
246                 sess->partial_len += sizeof(struct rte_udp_hdr);
247
248         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
249                 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
250                         sess->partial_len += AES_GCM_IV_LEN;
251                         sess->partial_len += AES_GCM_MAC_LEN;
252                         sess->roundup_byte = AES_GCM_ROUNDUP_BYTE_LEN;
253                 }
254                 return 0;
255         }
256
257         if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
258                 cipher_xform = xform;
259                 auth_xform = xform->next;
260         } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
261                 auth_xform = xform;
262                 cipher_xform = xform->next;
263         } else {
264                 return -EINVAL;
265         }
266         if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
267                 sess->partial_len += AES_CBC_IV_LEN;
268                 sess->roundup_byte = AES_CBC_ROUNDUP_BYTE_LEN;
269         } else {
270                 return -EINVAL;
271         }
272
273         if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
274                 sess->partial_len += SHA1_HMAC_LEN;
275         else
276                 return -EINVAL;
277
278         return 0;
279 }
280
281 static int
282 hmac_init(struct otx2_ipsec_fp_sa_ctl *ctl, struct otx2_cpt_qp *qp,
283           const uint8_t *auth_key, int len, uint8_t *hmac_key)
284 {
285         struct inst_data {
286                 struct otx2_cpt_res cpt_res;
287                 uint8_t buffer[64];
288         } *md;
289
290         volatile struct otx2_cpt_res *res;
291         uint64_t timeout, lmt_status;
292         struct otx2_cpt_inst_s inst;
293         rte_iova_t md_iova;
294         int ret;
295
296         memset(&inst, 0, sizeof(struct otx2_cpt_inst_s));
297
298         md = rte_zmalloc(NULL, sizeof(struct inst_data), OTX2_CPT_RES_ALIGN);
299         if (md == NULL)
300                 return -ENOMEM;
301
302         memcpy(md->buffer, auth_key, len);
303
304         md_iova = rte_malloc_virt2iova(md);
305         if (md_iova == RTE_BAD_IOVA) {
306                 ret = -EINVAL;
307                 goto free_md;
308         }
309
310         inst.res_addr = md_iova + offsetof(struct inst_data, cpt_res);
311         inst.opcode = OTX2_CPT_OP_WRITE_HMAC_IPAD_OPAD;
312         inst.param2 = ctl->auth_type;
313         inst.dlen = len;
314         inst.dptr = md_iova + offsetof(struct inst_data, buffer);
315         inst.rptr = inst.dptr;
316         inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
317
318         md->cpt_res.compcode = 0;
319         md->cpt_res.uc_compcode = 0xff;
320
321         timeout = rte_get_timer_cycles() + 5 * rte_get_timer_hz();
322
323         rte_cio_wmb();
324
325         do {
326                 otx2_lmt_mov(qp->lmtline, &inst, 2);
327                 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
328         } while (lmt_status == 0);
329
330         res = (volatile struct otx2_cpt_res *)&md->cpt_res;
331
332         /* Wait until instruction completes or times out */
333         while (res->uc_compcode == 0xff) {
334                 if (rte_get_timer_cycles() > timeout)
335                         break;
336         }
337
338         if (res->u16[0] != OTX2_SEC_COMP_GOOD) {
339                 ret = -EIO;
340                 goto free_md;
341         }
342
343         /* Retrieve the ipad and opad from rptr */
344         memcpy(hmac_key, md->buffer, 48);
345
346         ret = 0;
347
348 free_md:
349         rte_free(md);
350         return ret;
351 }
352
353 static int
354 eth_sec_ipsec_out_sess_create(struct rte_eth_dev *eth_dev,
355                               struct rte_security_ipsec_xform *ipsec,
356                               struct rte_crypto_sym_xform *crypto_xform,
357                               struct rte_security_session *sec_sess)
358 {
359         struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
360         struct otx2_sec_session_ipsec_ip *sess;
361         uint16_t port = eth_dev->data->port_id;
362         int cipher_key_len, auth_key_len, ret;
363         const uint8_t *cipher_key, *auth_key;
364         struct otx2_ipsec_fp_sa_ctl *ctl;
365         struct otx2_ipsec_fp_out_sa *sa;
366         struct otx2_sec_session *priv;
367         struct otx2_cpt_inst_s inst;
368         struct otx2_cpt_qp *qp;
369
370         priv = get_sec_session_private_data(sec_sess);
371         sess = &priv->ipsec.ip;
372
373         sa = &sess->out_sa;
374         ctl = &sa->ctl;
375         if (ctl->valid) {
376                 otx2_err("SA already registered");
377                 return -EINVAL;
378         }
379
380         memset(sess, 0, sizeof(struct otx2_sec_session_ipsec_ip));
381
382         sess->seq = 1;
383
384         ret = ipsec_sa_const_set(ipsec, crypto_xform, sess);
385         if (ret < 0)
386                 return ret;
387
388         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
389                 memcpy(sa->nonce, &ipsec->salt, 4);
390
391         if (ipsec->options.udp_encap == 1) {
392                 sa->udp_src = 4500;
393                 sa->udp_dst = 4500;
394         }
395
396         if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
397                 /* Start ip id from 1 */
398                 sess->ip_id = 1;
399
400                 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
401                         memcpy(&sa->ip_src, &ipsec->tunnel.ipv4.src_ip,
402                                sizeof(struct in_addr));
403                         memcpy(&sa->ip_dst, &ipsec->tunnel.ipv4.dst_ip,
404                                sizeof(struct in_addr));
405                 } else {
406                         return -EINVAL;
407                 }
408         } else {
409                 return -EINVAL;
410         }
411
412         cipher_xform = crypto_xform;
413         auth_xform = crypto_xform->next;
414
415         cipher_key_len = 0;
416         auth_key_len = 0;
417         auth_key = NULL;
418
419         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
420                 cipher_key = crypto_xform->aead.key.data;
421                 cipher_key_len = crypto_xform->aead.key.length;
422         } else {
423                 cipher_key = cipher_xform->cipher.key.data;
424                 cipher_key_len = cipher_xform->cipher.key.length;
425                 auth_key = auth_xform->auth.key.data;
426                 auth_key_len = auth_xform->auth.key.length;
427         }
428
429         if (cipher_key_len != 0)
430                 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
431         else
432                 return -EINVAL;
433
434         /* Determine word 7 of CPT instruction */
435         inst.u64[7] = 0;
436         inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
437         inst.cptr = rte_mempool_virt2iova(sa);
438         sess->inst_w7 = inst.u64[7];
439
440         /* Get CPT QP to be used for this SA */
441         ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
442         if (ret)
443                 return ret;
444
445         sess->qp = qp;
446
447         sess->cpt_lmtline = qp->lmtline;
448         sess->cpt_nq_reg = qp->lf_nq_reg;
449
450         /* Populate control word */
451         ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
452         if (ret)
453                 goto cpt_put;
454
455         if (auth_key_len && auth_key) {
456                 ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
457                 if (ret)
458                         goto cpt_put;
459         }
460
461         return 0;
462 cpt_put:
463         otx2_sec_idev_tx_cpt_qp_put(sess->qp);
464         return ret;
465 }
466
467 static int
468 eth_sec_ipsec_in_sess_create(struct rte_eth_dev *eth_dev,
469                              struct rte_security_ipsec_xform *ipsec,
470                              struct rte_crypto_sym_xform *crypto_xform,
471                              struct rte_security_session *sec_sess)
472 {
473         struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
474         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
475         struct otx2_sec_session_ipsec_ip *sess;
476         uint16_t port = eth_dev->data->port_id;
477         int cipher_key_len, auth_key_len, ret;
478         const uint8_t *cipher_key, *auth_key;
479         struct otx2_ipsec_fp_sa_ctl *ctl;
480         struct otx2_ipsec_fp_in_sa *sa;
481         struct otx2_sec_session *priv;
482         struct otx2_cpt_qp *qp;
483
484         if (ipsec->spi >= dev->ipsec_in_max_spi) {
485                 otx2_err("SPI exceeds max supported");
486                 return -EINVAL;
487         }
488
489         sa = in_sa_get(port, ipsec->spi);
490         ctl = &sa->ctl;
491
492         priv = get_sec_session_private_data(sec_sess);
493         sess = &priv->ipsec.ip;
494
495         if (ctl->valid) {
496                 otx2_err("SA already registered");
497                 return -EINVAL;
498         }
499
500         memset(sa, 0, sizeof(struct otx2_ipsec_fp_in_sa));
501
502         auth_xform = crypto_xform;
503         cipher_xform = crypto_xform->next;
504
505         cipher_key_len = 0;
506         auth_key_len = 0;
507         auth_key = NULL;
508
509         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
510                 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
511                         memcpy(sa->nonce, &ipsec->salt, 4);
512                 cipher_key = crypto_xform->aead.key.data;
513                 cipher_key_len = crypto_xform->aead.key.length;
514         } else {
515                 cipher_key = cipher_xform->cipher.key.data;
516                 cipher_key_len = cipher_xform->cipher.key.length;
517                 auth_key = auth_xform->auth.key.data;
518                 auth_key_len = auth_xform->auth.key.length;
519         }
520
521         if (cipher_key_len != 0)
522                 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
523         else
524                 return -EINVAL;
525
526         sess->in_sa = sa;
527
528         sa->userdata = priv->userdata;
529
530         if (lookup_mem_sa_index_update(eth_dev, ipsec->spi, sa))
531                 return -EINVAL;
532
533         ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
534         if (ret)
535                 return ret;
536
537         if (auth_key_len && auth_key) {
538                 /* Get a queue pair for HMAC init */
539                 ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
540                 if (ret)
541                         return ret;
542                 ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
543                 otx2_sec_idev_tx_cpt_qp_put(qp);
544         }
545         return ret;
546 }
547
548 static int
549 eth_sec_ipsec_sess_create(struct rte_eth_dev *eth_dev,
550                           struct rte_security_ipsec_xform *ipsec,
551                           struct rte_crypto_sym_xform *crypto_xform,
552                           struct rte_security_session *sess)
553 {
554         int ret;
555
556         ret = ipsec_fp_xform_verify(ipsec, crypto_xform);
557         if (ret)
558                 return ret;
559
560         if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
561                 return eth_sec_ipsec_in_sess_create(eth_dev, ipsec,
562                                                     crypto_xform, sess);
563         else
564                 return eth_sec_ipsec_out_sess_create(eth_dev, ipsec,
565                                                      crypto_xform, sess);
566 }
567
568 static int
569 otx2_eth_sec_session_create(void *device,
570                             struct rte_security_session_conf *conf,
571                             struct rte_security_session *sess,
572                             struct rte_mempool *mempool)
573 {
574         struct otx2_sec_session *priv;
575         int ret;
576
577         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
578                 return -ENOTSUP;
579
580         if (rte_mempool_get(mempool, (void **)&priv)) {
581                 otx2_err("Could not allocate security session private data");
582                 return -ENOMEM;
583         }
584
585         set_sec_session_private_data(sess, priv);
586
587         /*
588          * Save userdata provided by the application. For ingress packets, this
589          * could be used to identify the SA.
590          */
591         priv->userdata = conf->userdata;
592
593         if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
594                 ret = eth_sec_ipsec_sess_create(device, &conf->ipsec,
595                                                 conf->crypto_xform,
596                                                 sess);
597         else
598                 ret = -ENOTSUP;
599
600         if (ret)
601                 goto mempool_put;
602
603         return 0;
604
605 mempool_put:
606         rte_mempool_put(mempool, priv);
607         set_sec_session_private_data(sess, NULL);
608         return ret;
609 }
610
611 static int
612 otx2_eth_sec_session_destroy(void *device __rte_unused,
613                              struct rte_security_session *sess)
614 {
615         struct otx2_sec_session_ipsec_ip *sess_ip;
616         struct otx2_sec_session *priv;
617         struct rte_mempool *sess_mp;
618         int ret;
619
620         priv = get_sec_session_private_data(sess);
621         if (priv == NULL)
622                 return -EINVAL;
623
624         sess_ip = &priv->ipsec.ip;
625
626         /* Release CPT LF used for this session */
627         if (sess_ip->qp != NULL) {
628                 ret = otx2_sec_idev_tx_cpt_qp_put(sess_ip->qp);
629                 if (ret)
630                         return ret;
631         }
632
633         sess_mp = rte_mempool_from_obj(priv);
634
635         set_sec_session_private_data(sess, NULL);
636         rte_mempool_put(sess_mp, priv);
637
638         return 0;
639 }
640
641 static unsigned int
642 otx2_eth_sec_session_get_size(void *device __rte_unused)
643 {
644         return sizeof(struct otx2_sec_session);
645 }
646
647 static int
648 otx2_eth_sec_set_pkt_mdata(void *device __rte_unused,
649                             struct rte_security_session *session,
650                             struct rte_mbuf *m, void *params __rte_unused)
651 {
652         /* Set security session as the pkt metadata */
653         m->udata64 = (uint64_t)session;
654
655         return 0;
656 }
657
658 static int
659 otx2_eth_sec_get_userdata(void *device __rte_unused, uint64_t md,
660                            void **userdata)
661 {
662         /* Retrieve userdata  */
663         *userdata = (void *)md;
664
665         return 0;
666 }
667
668 static const struct rte_security_capability *
669 otx2_eth_sec_capabilities_get(void *device __rte_unused)
670 {
671         return otx2_eth_sec_capabilities;
672 }
673
674 static struct rte_security_ops otx2_eth_sec_ops = {
675         .session_create         = otx2_eth_sec_session_create,
676         .session_destroy        = otx2_eth_sec_session_destroy,
677         .session_get_size       = otx2_eth_sec_session_get_size,
678         .set_pkt_metadata       = otx2_eth_sec_set_pkt_mdata,
679         .get_userdata           = otx2_eth_sec_get_userdata,
680         .capabilities_get       = otx2_eth_sec_capabilities_get
681 };
682
683 int
684 otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev)
685 {
686         struct rte_security_ctx *ctx;
687         int ret;
688
689         ctx = rte_malloc("otx2_eth_sec_ctx",
690                          sizeof(struct rte_security_ctx), 0);
691         if (ctx == NULL)
692                 return -ENOMEM;
693
694         ret = otx2_sec_idev_cfg_init(eth_dev->data->port_id);
695         if (ret) {
696                 rte_free(ctx);
697                 return ret;
698         }
699
700         /* Populate ctx */
701
702         ctx->device = eth_dev;
703         ctx->ops = &otx2_eth_sec_ops;
704         ctx->sess_cnt = 0;
705
706         eth_dev->security_ctx = ctx;
707
708         return 0;
709 }
710
711 void
712 otx2_eth_sec_ctx_destroy(struct rte_eth_dev *eth_dev)
713 {
714         rte_free(eth_dev->security_ctx);
715 }
716
717 static int
718 eth_sec_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt)
719 {
720         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
721         uint16_t port = eth_dev->data->port_id;
722         struct nix_inline_ipsec_lf_cfg *req;
723         struct otx2_mbox *mbox = dev->mbox;
724         struct eth_sec_tag_const tag_const;
725         char name[RTE_MEMZONE_NAMESIZE];
726         const struct rte_memzone *mz;
727
728         in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
729         mz = rte_memzone_lookup(name);
730         if (mz == NULL)
731                 return -EINVAL;
732
733         req = otx2_mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
734         req->enable = 1;
735         req->sa_base_addr = mz->iova;
736
737         req->ipsec_cfg0.tt = tt;
738
739         tag_const.u32 = 0;
740         tag_const.event_type = RTE_EVENT_TYPE_ETHDEV;
741         tag_const.port = port;
742         req->ipsec_cfg0.tag_const = tag_const.u32;
743
744         req->ipsec_cfg0.sa_pow2_size =
745                         rte_log2_u32(sizeof(struct otx2_ipsec_fp_in_sa));
746         req->ipsec_cfg0.lenm1_max = NIX_MAX_FRS - 1;
747
748         req->ipsec_cfg1.sa_idx_w = rte_log2_u32(dev->ipsec_in_max_spi);
749         req->ipsec_cfg1.sa_idx_max = dev->ipsec_in_max_spi - 1;
750
751         return otx2_mbox_process(mbox);
752 }
753
754 int
755 otx2_eth_sec_update_tag_type(struct rte_eth_dev *eth_dev)
756 {
757         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
758         struct otx2_mbox *mbox = dev->mbox;
759         struct nix_aq_enq_rsp *rsp;
760         struct nix_aq_enq_req *aq;
761         int ret;
762
763         aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
764         aq->qidx = 0; /* Read RQ:0 context */
765         aq->ctype = NIX_AQ_CTYPE_RQ;
766         aq->op = NIX_AQ_INSTOP_READ;
767
768         ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
769         if (ret < 0) {
770                 otx2_err("Could not read RQ context");
771                 return ret;
772         }
773
774         /* Update tag type */
775         ret = eth_sec_ipsec_cfg(eth_dev, rsp->rq.sso_tt);
776         if (ret < 0)
777                 otx2_err("Could not update sec eth tag type");
778
779         return ret;
780 }
781
782 int
783 otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
784 {
785         const size_t sa_width = sizeof(struct otx2_ipsec_fp_in_sa);
786         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
787         uint16_t port = eth_dev->data->port_id;
788         char name[RTE_MEMZONE_NAMESIZE];
789         const struct rte_memzone *mz;
790         int mz_sz, ret;
791         uint16_t nb_sa;
792
793         RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
794                          !RTE_IS_POWER_OF_2(sa_width));
795
796         if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
797             !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
798                 return 0;
799
800         nb_sa = dev->ipsec_in_max_spi;
801         mz_sz = nb_sa * sa_width;
802         in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
803         mz = rte_memzone_reserve_aligned(name, mz_sz, rte_socket_id(),
804                                          RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
805
806         if (mz == NULL) {
807                 otx2_err("Could not allocate inbound SA DB");
808                 return -ENOMEM;
809         }
810
811         memset(mz->addr, 0, mz_sz);
812
813         ret = eth_sec_ipsec_cfg(eth_dev, SSO_TT_ORDERED);
814         if (ret < 0) {
815                 otx2_err("Could not configure inline IPsec");
816                 goto sec_fini;
817         }
818
819         return 0;
820
821 sec_fini:
822         otx2_err("Could not configure device for security");
823         otx2_eth_sec_fini(eth_dev);
824         return ret;
825 }
826
827 void
828 otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
829 {
830         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
831         uint16_t port = eth_dev->data->port_id;
832         char name[RTE_MEMZONE_NAMESIZE];
833
834         if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
835             !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
836                 return;
837
838         lookup_mem_sa_tbl_clear(eth_dev);
839
840         in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
841         rte_memzone_free(rte_memzone_lookup(name));
842 }