eal: remove deprecated coherent IO memory barriers
[dpdk.git] / drivers / net / octeontx2 / otx2_ethdev_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2020 Marvell International Ltd.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_esp.h>
7 #include <rte_ethdev.h>
8 #include <rte_eventdev.h>
9 #include <rte_ip.h>
10 #include <rte_malloc.h>
11 #include <rte_memzone.h>
12 #include <rte_security.h>
13 #include <rte_security_driver.h>
14 #include <rte_udp.h>
15
16 #include "otx2_common.h"
17 #include "otx2_cryptodev_qp.h"
18 #include "otx2_ethdev.h"
19 #include "otx2_ethdev_sec.h"
20 #include "otx2_ipsec_fp.h"
21 #include "otx2_sec_idev.h"
22 #include "otx2_security.h"
23
24 struct eth_sec_tag_const {
25         RTE_STD_C11
26         union {
27                 struct {
28                         uint32_t rsvd_11_0  : 12;
29                         uint32_t port       : 8;
30                         uint32_t event_type : 4;
31                         uint32_t rsvd_31_24 : 8;
32                 };
33                 uint32_t u32;
34         };
35 };
36
37 static struct rte_cryptodev_capabilities otx2_eth_sec_crypto_caps[] = {
38         {       /* AES GCM */
39                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
40                 {.sym = {
41                         .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
42                         {.aead = {
43                                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
44                                 .block_size = 16,
45                                 .key_size = {
46                                         .min = 16,
47                                         .max = 32,
48                                         .increment = 8
49                                 },
50                                 .digest_size = {
51                                         .min = 16,
52                                         .max = 16,
53                                         .increment = 0
54                                 },
55                                 .aad_size = {
56                                         .min = 8,
57                                         .max = 12,
58                                         .increment = 4
59                                 },
60                                 .iv_size = {
61                                         .min = 12,
62                                         .max = 12,
63                                         .increment = 0
64                                 }
65                         }, }
66                 }, }
67         },
68         {       /* AES CBC */
69                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
70                 {.sym = {
71                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
72                         {.cipher = {
73                                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
74                                 .block_size = 16,
75                                 .key_size = {
76                                         .min = 16,
77                                         .max = 32,
78                                         .increment = 8
79                                 },
80                                 .iv_size = {
81                                         .min = 16,
82                                         .max = 16,
83                                         .increment = 0
84                                 }
85                         }, }
86                 }, }
87         },
88         {       /* SHA1 HMAC */
89                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
90                 {.sym = {
91                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
92                         {.auth = {
93                                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
94                                 .block_size = 64,
95                                 .key_size = {
96                                         .min = 20,
97                                         .max = 64,
98                                         .increment = 1
99                                 },
100                                 .digest_size = {
101                                         .min = 12,
102                                         .max = 12,
103                                         .increment = 0
104                                 },
105                         }, }
106                 }, }
107         },
108         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
109 };
110
111 static const struct rte_security_capability otx2_eth_sec_capabilities[] = {
112         {       /* IPsec Inline Protocol ESP Tunnel Ingress */
113                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
114                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
115                 .ipsec = {
116                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
117                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
118                         .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
119                         .options = { 0 }
120                 },
121                 .crypto_capabilities = otx2_eth_sec_crypto_caps,
122                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
123         },
124         {       /* IPsec Inline Protocol ESP Tunnel Egress */
125                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
126                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
127                 .ipsec = {
128                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
129                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
130                         .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
131                         .options = { 0 }
132                 },
133                 .crypto_capabilities = otx2_eth_sec_crypto_caps,
134                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
135         },
136         {
137                 .action = RTE_SECURITY_ACTION_TYPE_NONE
138         }
139 };
140
141 static void
142 lookup_mem_sa_tbl_clear(struct rte_eth_dev *eth_dev)
143 {
144         static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
145         uint16_t port = eth_dev->data->port_id;
146         const struct rte_memzone *mz;
147         uint64_t **sa_tbl;
148         uint8_t *mem;
149
150         mz = rte_memzone_lookup(name);
151         if (mz == NULL)
152                 return;
153
154         mem = mz->addr;
155
156         sa_tbl  = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
157         if (sa_tbl[port] == NULL)
158                 return;
159
160         rte_free(sa_tbl[port]);
161         sa_tbl[port] = NULL;
162 }
163
164 static int
165 lookup_mem_sa_index_update(struct rte_eth_dev *eth_dev, int spi, void *sa)
166 {
167         static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
168         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
169         uint16_t port = eth_dev->data->port_id;
170         const struct rte_memzone *mz;
171         uint64_t **sa_tbl;
172         uint8_t *mem;
173
174         mz = rte_memzone_lookup(name);
175         if (mz == NULL) {
176                 otx2_err("Could not find fastpath lookup table");
177                 return -EINVAL;
178         }
179
180         mem = mz->addr;
181
182         sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
183
184         if (sa_tbl[port] == NULL) {
185                 sa_tbl[port] = rte_malloc(NULL, dev->ipsec_in_max_spi *
186                                           sizeof(uint64_t), 0);
187         }
188
189         sa_tbl[port][spi] = (uint64_t)sa;
190
191         return 0;
192 }
193
194 static inline void
195 in_sa_mz_name_get(char *name, int size, uint16_t port)
196 {
197         snprintf(name, size, "otx2_ipsec_in_sadb_%u", port);
198 }
199
200 static struct otx2_ipsec_fp_in_sa *
201 in_sa_get(uint16_t port, int sa_index)
202 {
203         char name[RTE_MEMZONE_NAMESIZE];
204         struct otx2_ipsec_fp_in_sa *sa;
205         const struct rte_memzone *mz;
206
207         in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
208         mz = rte_memzone_lookup(name);
209         if (mz == NULL) {
210                 otx2_err("Could not get the memzone reserved for IN SA DB");
211                 return NULL;
212         }
213
214         sa = mz->addr;
215
216         return sa + sa_index;
217 }
218
219 static int
220 ipsec_sa_const_set(struct rte_security_ipsec_xform *ipsec,
221                    struct rte_crypto_sym_xform *xform,
222                    struct otx2_sec_session_ipsec_ip *sess)
223 {
224         struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
225
226         sess->partial_len = sizeof(struct rte_ipv4_hdr);
227
228         if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
229                 sess->partial_len += sizeof(struct rte_esp_hdr);
230                 sess->roundup_len = sizeof(struct rte_esp_tail);
231         } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
232                 sess->partial_len += OTX2_SEC_AH_HDR_LEN;
233         } else {
234                 return -EINVAL;
235         }
236
237         if (ipsec->options.udp_encap)
238                 sess->partial_len += sizeof(struct rte_udp_hdr);
239
240         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
241                 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
242                         sess->partial_len += OTX2_SEC_AES_GCM_IV_LEN;
243                         sess->partial_len += OTX2_SEC_AES_GCM_MAC_LEN;
244                         sess->roundup_byte = OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN;
245                 }
246                 return 0;
247         }
248
249         if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
250                 cipher_xform = xform;
251                 auth_xform = xform->next;
252         } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
253                 auth_xform = xform;
254                 cipher_xform = xform->next;
255         } else {
256                 return -EINVAL;
257         }
258         if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
259                 sess->partial_len += OTX2_SEC_AES_CBC_IV_LEN;
260                 sess->roundup_byte = OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN;
261         } else {
262                 return -EINVAL;
263         }
264
265         if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
266                 sess->partial_len += OTX2_SEC_SHA1_HMAC_LEN;
267         else
268                 return -EINVAL;
269
270         return 0;
271 }
272
273 static int
274 hmac_init(struct otx2_ipsec_fp_sa_ctl *ctl, struct otx2_cpt_qp *qp,
275           const uint8_t *auth_key, int len, uint8_t *hmac_key)
276 {
277         struct inst_data {
278                 struct otx2_cpt_res cpt_res;
279                 uint8_t buffer[64];
280         } *md;
281
282         volatile struct otx2_cpt_res *res;
283         uint64_t timeout, lmt_status;
284         struct otx2_cpt_inst_s inst;
285         rte_iova_t md_iova;
286         int ret;
287
288         memset(&inst, 0, sizeof(struct otx2_cpt_inst_s));
289
290         md = rte_zmalloc(NULL, sizeof(struct inst_data), OTX2_CPT_RES_ALIGN);
291         if (md == NULL)
292                 return -ENOMEM;
293
294         memcpy(md->buffer, auth_key, len);
295
296         md_iova = rte_malloc_virt2iova(md);
297         if (md_iova == RTE_BAD_IOVA) {
298                 ret = -EINVAL;
299                 goto free_md;
300         }
301
302         inst.res_addr = md_iova + offsetof(struct inst_data, cpt_res);
303         inst.opcode = OTX2_CPT_OP_WRITE_HMAC_IPAD_OPAD;
304         inst.param2 = ctl->auth_type;
305         inst.dlen = len;
306         inst.dptr = md_iova + offsetof(struct inst_data, buffer);
307         inst.rptr = inst.dptr;
308         inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
309
310         md->cpt_res.compcode = 0;
311         md->cpt_res.uc_compcode = 0xff;
312
313         timeout = rte_get_timer_cycles() + 5 * rte_get_timer_hz();
314
315         rte_io_wmb();
316
317         do {
318                 otx2_lmt_mov(qp->lmtline, &inst, 2);
319                 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
320         } while (lmt_status == 0);
321
322         res = (volatile struct otx2_cpt_res *)&md->cpt_res;
323
324         /* Wait until instruction completes or times out */
325         while (res->uc_compcode == 0xff) {
326                 if (rte_get_timer_cycles() > timeout)
327                         break;
328         }
329
330         if (res->u16[0] != OTX2_SEC_COMP_GOOD) {
331                 ret = -EIO;
332                 goto free_md;
333         }
334
335         /* Retrieve the ipad and opad from rptr */
336         memcpy(hmac_key, md->buffer, 48);
337
338         ret = 0;
339
340 free_md:
341         rte_free(md);
342         return ret;
343 }
344
345 static int
346 eth_sec_ipsec_out_sess_create(struct rte_eth_dev *eth_dev,
347                               struct rte_security_ipsec_xform *ipsec,
348                               struct rte_crypto_sym_xform *crypto_xform,
349                               struct rte_security_session *sec_sess)
350 {
351         struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
352         struct otx2_sec_session_ipsec_ip *sess;
353         uint16_t port = eth_dev->data->port_id;
354         int cipher_key_len, auth_key_len, ret;
355         const uint8_t *cipher_key, *auth_key;
356         struct otx2_ipsec_fp_sa_ctl *ctl;
357         struct otx2_ipsec_fp_out_sa *sa;
358         struct otx2_sec_session *priv;
359         struct otx2_cpt_inst_s inst;
360         struct otx2_cpt_qp *qp;
361
362         priv = get_sec_session_private_data(sec_sess);
363         sess = &priv->ipsec.ip;
364
365         sa = &sess->out_sa;
366         ctl = &sa->ctl;
367         if (ctl->valid) {
368                 otx2_err("SA already registered");
369                 return -EINVAL;
370         }
371
372         memset(sess, 0, sizeof(struct otx2_sec_session_ipsec_ip));
373
374         sess->seq = 1;
375
376         ret = ipsec_sa_const_set(ipsec, crypto_xform, sess);
377         if (ret < 0)
378                 return ret;
379
380         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
381                 memcpy(sa->nonce, &ipsec->salt, 4);
382
383         if (ipsec->options.udp_encap == 1) {
384                 sa->udp_src = 4500;
385                 sa->udp_dst = 4500;
386         }
387
388         if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
389                 /* Start ip id from 1 */
390                 sess->ip_id = 1;
391
392                 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
393                         memcpy(&sa->ip_src, &ipsec->tunnel.ipv4.src_ip,
394                                sizeof(struct in_addr));
395                         memcpy(&sa->ip_dst, &ipsec->tunnel.ipv4.dst_ip,
396                                sizeof(struct in_addr));
397                 } else {
398                         return -EINVAL;
399                 }
400         } else {
401                 return -EINVAL;
402         }
403
404         cipher_xform = crypto_xform;
405         auth_xform = crypto_xform->next;
406
407         cipher_key_len = 0;
408         auth_key_len = 0;
409         auth_key = NULL;
410
411         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
412                 cipher_key = crypto_xform->aead.key.data;
413                 cipher_key_len = crypto_xform->aead.key.length;
414         } else {
415                 cipher_key = cipher_xform->cipher.key.data;
416                 cipher_key_len = cipher_xform->cipher.key.length;
417                 auth_key = auth_xform->auth.key.data;
418                 auth_key_len = auth_xform->auth.key.length;
419         }
420
421         if (cipher_key_len != 0)
422                 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
423         else
424                 return -EINVAL;
425
426         /* Determine word 7 of CPT instruction */
427         inst.u64[7] = 0;
428         inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
429         inst.cptr = rte_mempool_virt2iova(sa);
430         sess->inst_w7 = inst.u64[7];
431
432         /* Get CPT QP to be used for this SA */
433         ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
434         if (ret)
435                 return ret;
436
437         sess->qp = qp;
438
439         sess->cpt_lmtline = qp->lmtline;
440         sess->cpt_nq_reg = qp->lf_nq_reg;
441
442         /* Populate control word */
443         ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
444         if (ret)
445                 goto cpt_put;
446
447         if (auth_key_len && auth_key) {
448                 ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
449                 if (ret)
450                         goto cpt_put;
451         }
452
453         return 0;
454 cpt_put:
455         otx2_sec_idev_tx_cpt_qp_put(sess->qp);
456         return ret;
457 }
458
459 static int
460 eth_sec_ipsec_in_sess_create(struct rte_eth_dev *eth_dev,
461                              struct rte_security_ipsec_xform *ipsec,
462                              struct rte_crypto_sym_xform *crypto_xform,
463                              struct rte_security_session *sec_sess)
464 {
465         struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
466         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
467         struct otx2_sec_session_ipsec_ip *sess;
468         uint16_t port = eth_dev->data->port_id;
469         int cipher_key_len, auth_key_len, ret;
470         const uint8_t *cipher_key, *auth_key;
471         struct otx2_ipsec_fp_sa_ctl *ctl;
472         struct otx2_ipsec_fp_in_sa *sa;
473         struct otx2_sec_session *priv;
474         struct otx2_cpt_qp *qp;
475
476         if (ipsec->spi >= dev->ipsec_in_max_spi) {
477                 otx2_err("SPI exceeds max supported");
478                 return -EINVAL;
479         }
480
481         sa = in_sa_get(port, ipsec->spi);
482         ctl = &sa->ctl;
483
484         priv = get_sec_session_private_data(sec_sess);
485         sess = &priv->ipsec.ip;
486
487         if (ctl->valid) {
488                 otx2_err("SA already registered");
489                 return -EINVAL;
490         }
491
492         memset(sa, 0, sizeof(struct otx2_ipsec_fp_in_sa));
493
494         auth_xform = crypto_xform;
495         cipher_xform = crypto_xform->next;
496
497         cipher_key_len = 0;
498         auth_key_len = 0;
499         auth_key = NULL;
500
501         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
502                 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
503                         memcpy(sa->nonce, &ipsec->salt, 4);
504                 cipher_key = crypto_xform->aead.key.data;
505                 cipher_key_len = crypto_xform->aead.key.length;
506         } else {
507                 cipher_key = cipher_xform->cipher.key.data;
508                 cipher_key_len = cipher_xform->cipher.key.length;
509                 auth_key = auth_xform->auth.key.data;
510                 auth_key_len = auth_xform->auth.key.length;
511         }
512
513         if (cipher_key_len != 0)
514                 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
515         else
516                 return -EINVAL;
517
518         sess->in_sa = sa;
519
520         sa->userdata = priv->userdata;
521
522         if (lookup_mem_sa_index_update(eth_dev, ipsec->spi, sa))
523                 return -EINVAL;
524
525         ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
526         if (ret)
527                 return ret;
528
529         if (auth_key_len && auth_key) {
530                 /* Get a queue pair for HMAC init */
531                 ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
532                 if (ret)
533                         return ret;
534                 ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
535                 otx2_sec_idev_tx_cpt_qp_put(qp);
536         }
537         return ret;
538 }
539
540 static int
541 eth_sec_ipsec_sess_create(struct rte_eth_dev *eth_dev,
542                           struct rte_security_ipsec_xform *ipsec,
543                           struct rte_crypto_sym_xform *crypto_xform,
544                           struct rte_security_session *sess)
545 {
546         int ret;
547
548         ret = ipsec_fp_xform_verify(ipsec, crypto_xform);
549         if (ret)
550                 return ret;
551
552         if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
553                 return eth_sec_ipsec_in_sess_create(eth_dev, ipsec,
554                                                     crypto_xform, sess);
555         else
556                 return eth_sec_ipsec_out_sess_create(eth_dev, ipsec,
557                                                      crypto_xform, sess);
558 }
559
560 static int
561 otx2_eth_sec_session_create(void *device,
562                             struct rte_security_session_conf *conf,
563                             struct rte_security_session *sess,
564                             struct rte_mempool *mempool)
565 {
566         struct otx2_sec_session *priv;
567         int ret;
568
569         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
570                 return -ENOTSUP;
571
572         if (rte_mempool_get(mempool, (void **)&priv)) {
573                 otx2_err("Could not allocate security session private data");
574                 return -ENOMEM;
575         }
576
577         set_sec_session_private_data(sess, priv);
578
579         /*
580          * Save userdata provided by the application. For ingress packets, this
581          * could be used to identify the SA.
582          */
583         priv->userdata = conf->userdata;
584
585         if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
586                 ret = eth_sec_ipsec_sess_create(device, &conf->ipsec,
587                                                 conf->crypto_xform,
588                                                 sess);
589         else
590                 ret = -ENOTSUP;
591
592         if (ret)
593                 goto mempool_put;
594
595         return 0;
596
597 mempool_put:
598         rte_mempool_put(mempool, priv);
599         set_sec_session_private_data(sess, NULL);
600         return ret;
601 }
602
603 static int
604 otx2_eth_sec_session_destroy(void *device __rte_unused,
605                              struct rte_security_session *sess)
606 {
607         struct otx2_sec_session_ipsec_ip *sess_ip;
608         struct otx2_sec_session *priv;
609         struct rte_mempool *sess_mp;
610         int ret;
611
612         priv = get_sec_session_private_data(sess);
613         if (priv == NULL)
614                 return -EINVAL;
615
616         sess_ip = &priv->ipsec.ip;
617
618         /* Release CPT LF used for this session */
619         if (sess_ip->qp != NULL) {
620                 ret = otx2_sec_idev_tx_cpt_qp_put(sess_ip->qp);
621                 if (ret)
622                         return ret;
623         }
624
625         sess_mp = rte_mempool_from_obj(priv);
626
627         set_sec_session_private_data(sess, NULL);
628         rte_mempool_put(sess_mp, priv);
629
630         return 0;
631 }
632
633 static unsigned int
634 otx2_eth_sec_session_get_size(void *device __rte_unused)
635 {
636         return sizeof(struct otx2_sec_session);
637 }
638
639 static int
640 otx2_eth_sec_set_pkt_mdata(void *device __rte_unused,
641                             struct rte_security_session *session,
642                             struct rte_mbuf *m, void *params __rte_unused)
643 {
644         /* Set security session as the pkt metadata */
645         m->udata64 = (uint64_t)session;
646
647         return 0;
648 }
649
650 static int
651 otx2_eth_sec_get_userdata(void *device __rte_unused, uint64_t md,
652                            void **userdata)
653 {
654         /* Retrieve userdata  */
655         *userdata = (void *)md;
656
657         return 0;
658 }
659
660 static const struct rte_security_capability *
661 otx2_eth_sec_capabilities_get(void *device __rte_unused)
662 {
663         return otx2_eth_sec_capabilities;
664 }
665
666 static struct rte_security_ops otx2_eth_sec_ops = {
667         .session_create         = otx2_eth_sec_session_create,
668         .session_destroy        = otx2_eth_sec_session_destroy,
669         .session_get_size       = otx2_eth_sec_session_get_size,
670         .set_pkt_metadata       = otx2_eth_sec_set_pkt_mdata,
671         .get_userdata           = otx2_eth_sec_get_userdata,
672         .capabilities_get       = otx2_eth_sec_capabilities_get
673 };
674
675 int
676 otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev)
677 {
678         struct rte_security_ctx *ctx;
679         int ret;
680
681         ctx = rte_malloc("otx2_eth_sec_ctx",
682                          sizeof(struct rte_security_ctx), 0);
683         if (ctx == NULL)
684                 return -ENOMEM;
685
686         ret = otx2_sec_idev_cfg_init(eth_dev->data->port_id);
687         if (ret) {
688                 rte_free(ctx);
689                 return ret;
690         }
691
692         /* Populate ctx */
693
694         ctx->device = eth_dev;
695         ctx->ops = &otx2_eth_sec_ops;
696         ctx->sess_cnt = 0;
697
698         eth_dev->security_ctx = ctx;
699
700         return 0;
701 }
702
703 void
704 otx2_eth_sec_ctx_destroy(struct rte_eth_dev *eth_dev)
705 {
706         rte_free(eth_dev->security_ctx);
707 }
708
709 static int
710 eth_sec_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt)
711 {
712         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
713         uint16_t port = eth_dev->data->port_id;
714         struct nix_inline_ipsec_lf_cfg *req;
715         struct otx2_mbox *mbox = dev->mbox;
716         struct eth_sec_tag_const tag_const;
717         char name[RTE_MEMZONE_NAMESIZE];
718         const struct rte_memzone *mz;
719
720         in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
721         mz = rte_memzone_lookup(name);
722         if (mz == NULL)
723                 return -EINVAL;
724
725         req = otx2_mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
726         req->enable = 1;
727         req->sa_base_addr = mz->iova;
728
729         req->ipsec_cfg0.tt = tt;
730
731         tag_const.u32 = 0;
732         tag_const.event_type = RTE_EVENT_TYPE_ETHDEV;
733         tag_const.port = port;
734         req->ipsec_cfg0.tag_const = tag_const.u32;
735
736         req->ipsec_cfg0.sa_pow2_size =
737                         rte_log2_u32(sizeof(struct otx2_ipsec_fp_in_sa));
738         req->ipsec_cfg0.lenm1_max = NIX_MAX_FRS - 1;
739
740         req->ipsec_cfg1.sa_idx_w = rte_log2_u32(dev->ipsec_in_max_spi);
741         req->ipsec_cfg1.sa_idx_max = dev->ipsec_in_max_spi - 1;
742
743         return otx2_mbox_process(mbox);
744 }
745
746 int
747 otx2_eth_sec_update_tag_type(struct rte_eth_dev *eth_dev)
748 {
749         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
750         struct otx2_mbox *mbox = dev->mbox;
751         struct nix_aq_enq_rsp *rsp;
752         struct nix_aq_enq_req *aq;
753         int ret;
754
755         aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
756         aq->qidx = 0; /* Read RQ:0 context */
757         aq->ctype = NIX_AQ_CTYPE_RQ;
758         aq->op = NIX_AQ_INSTOP_READ;
759
760         ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
761         if (ret < 0) {
762                 otx2_err("Could not read RQ context");
763                 return ret;
764         }
765
766         /* Update tag type */
767         ret = eth_sec_ipsec_cfg(eth_dev, rsp->rq.sso_tt);
768         if (ret < 0)
769                 otx2_err("Could not update sec eth tag type");
770
771         return ret;
772 }
773
774 int
775 otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
776 {
777         const size_t sa_width = sizeof(struct otx2_ipsec_fp_in_sa);
778         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
779         uint16_t port = eth_dev->data->port_id;
780         char name[RTE_MEMZONE_NAMESIZE];
781         const struct rte_memzone *mz;
782         int mz_sz, ret;
783         uint16_t nb_sa;
784
785         RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
786                          !RTE_IS_POWER_OF_2(sa_width));
787
788         if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
789             !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
790                 return 0;
791
792         nb_sa = dev->ipsec_in_max_spi;
793         mz_sz = nb_sa * sa_width;
794         in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
795         mz = rte_memzone_reserve_aligned(name, mz_sz, rte_socket_id(),
796                                          RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
797
798         if (mz == NULL) {
799                 otx2_err("Could not allocate inbound SA DB");
800                 return -ENOMEM;
801         }
802
803         memset(mz->addr, 0, mz_sz);
804
805         ret = eth_sec_ipsec_cfg(eth_dev, SSO_TT_ORDERED);
806         if (ret < 0) {
807                 otx2_err("Could not configure inline IPsec");
808                 goto sec_fini;
809         }
810
811         return 0;
812
813 sec_fini:
814         otx2_err("Could not configure device for security");
815         otx2_eth_sec_fini(eth_dev);
816         return ret;
817 }
818
819 void
820 otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
821 {
822         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
823         uint16_t port = eth_dev->data->port_id;
824         char name[RTE_MEMZONE_NAMESIZE];
825
826         if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
827             !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
828                 return;
829
830         lookup_mem_sa_tbl_clear(eth_dev);
831
832         in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
833         rte_memzone_free(rte_memzone_lookup(name));
834 }