net/ice/base: init marker group table for parser
[dpdk.git] / drivers / net / octeontx2 / otx2_ethdev_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2020 Marvell International Ltd.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_esp.h>
7 #include <rte_ethdev.h>
8 #include <rte_eventdev.h>
9 #include <rte_ip.h>
10 #include <rte_malloc.h>
11 #include <rte_memzone.h>
12 #include <rte_security.h>
13 #include <rte_security_driver.h>
14 #include <rte_udp.h>
15
16 #include "otx2_common.h"
17 #include "otx2_cryptodev_qp.h"
18 #include "otx2_ethdev.h"
19 #include "otx2_ethdev_sec.h"
20 #include "otx2_ipsec_fp.h"
21 #include "otx2_sec_idev.h"
22 #include "otx2_security.h"
23
24 #define ERR_STR_SZ 256
25
26 struct eth_sec_tag_const {
27         RTE_STD_C11
28         union {
29                 struct {
30                         uint32_t rsvd_11_0  : 12;
31                         uint32_t port       : 8;
32                         uint32_t event_type : 4;
33                         uint32_t rsvd_31_24 : 8;
34                 };
35                 uint32_t u32;
36         };
37 };
38
39 static struct rte_cryptodev_capabilities otx2_eth_sec_crypto_caps[] = {
40         {       /* AES GCM */
41                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
42                 {.sym = {
43                         .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
44                         {.aead = {
45                                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
46                                 .block_size = 16,
47                                 .key_size = {
48                                         .min = 16,
49                                         .max = 32,
50                                         .increment = 8
51                                 },
52                                 .digest_size = {
53                                         .min = 16,
54                                         .max = 16,
55                                         .increment = 0
56                                 },
57                                 .aad_size = {
58                                         .min = 8,
59                                         .max = 12,
60                                         .increment = 4
61                                 },
62                                 .iv_size = {
63                                         .min = 12,
64                                         .max = 12,
65                                         .increment = 0
66                                 }
67                         }, }
68                 }, }
69         },
70         {       /* AES CBC */
71                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
72                 {.sym = {
73                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
74                         {.cipher = {
75                                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
76                                 .block_size = 16,
77                                 .key_size = {
78                                         .min = 16,
79                                         .max = 32,
80                                         .increment = 8
81                                 },
82                                 .iv_size = {
83                                         .min = 16,
84                                         .max = 16,
85                                         .increment = 0
86                                 }
87                         }, }
88                 }, }
89         },
90         {       /* SHA1 HMAC */
91                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
92                 {.sym = {
93                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
94                         {.auth = {
95                                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
96                                 .block_size = 64,
97                                 .key_size = {
98                                         .min = 20,
99                                         .max = 64,
100                                         .increment = 1
101                                 },
102                                 .digest_size = {
103                                         .min = 12,
104                                         .max = 12,
105                                         .increment = 0
106                                 },
107                         }, }
108                 }, }
109         },
110         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
111 };
112
113 static const struct rte_security_capability otx2_eth_sec_capabilities[] = {
114         {       /* IPsec Inline Protocol ESP Tunnel Ingress */
115                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
116                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
117                 .ipsec = {
118                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
119                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
120                         .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
121                         .options = { 0 }
122                 },
123                 .crypto_capabilities = otx2_eth_sec_crypto_caps,
124                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
125         },
126         {       /* IPsec Inline Protocol ESP Tunnel Egress */
127                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
128                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
129                 .ipsec = {
130                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
131                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
132                         .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
133                         .options = { 0 }
134                 },
135                 .crypto_capabilities = otx2_eth_sec_crypto_caps,
136                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
137         },
138         {
139                 .action = RTE_SECURITY_ACTION_TYPE_NONE
140         }
141 };
142
143 static void
144 lookup_mem_sa_tbl_clear(struct rte_eth_dev *eth_dev)
145 {
146         static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
147         uint16_t port = eth_dev->data->port_id;
148         const struct rte_memzone *mz;
149         uint64_t **sa_tbl;
150         uint8_t *mem;
151
152         mz = rte_memzone_lookup(name);
153         if (mz == NULL)
154                 return;
155
156         mem = mz->addr;
157
158         sa_tbl  = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
159         if (sa_tbl[port] == NULL)
160                 return;
161
162         rte_free(sa_tbl[port]);
163         sa_tbl[port] = NULL;
164 }
165
166 static int
167 lookup_mem_sa_index_update(struct rte_eth_dev *eth_dev, int spi, void *sa,
168                            char *err_str)
169 {
170         static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
171         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
172         uint16_t port = eth_dev->data->port_id;
173         const struct rte_memzone *mz;
174         uint64_t **sa_tbl;
175         uint8_t *mem;
176
177         mz = rte_memzone_lookup(name);
178         if (mz == NULL) {
179                 snprintf(err_str, ERR_STR_SZ,
180                          "Could not find fastpath lookup table");
181                 return -EINVAL;
182         }
183
184         mem = mz->addr;
185
186         sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
187
188         if (sa_tbl[port] == NULL) {
189                 sa_tbl[port] = rte_malloc(NULL, dev->ipsec_in_max_spi *
190                                           sizeof(uint64_t), 0);
191         }
192
193         sa_tbl[port][spi] = (uint64_t)sa;
194
195         return 0;
196 }
197
198 static inline void
199 in_sa_mz_name_get(char *name, int size, uint16_t port)
200 {
201         snprintf(name, size, "otx2_ipsec_in_sadb_%u", port);
202 }
203
204 static struct otx2_ipsec_fp_in_sa *
205 in_sa_get(uint16_t port, int sa_index)
206 {
207         char name[RTE_MEMZONE_NAMESIZE];
208         struct otx2_ipsec_fp_in_sa *sa;
209         const struct rte_memzone *mz;
210
211         in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
212         mz = rte_memzone_lookup(name);
213         if (mz == NULL) {
214                 otx2_err("Could not get the memzone reserved for IN SA DB");
215                 return NULL;
216         }
217
218         sa = mz->addr;
219
220         return sa + sa_index;
221 }
222
223 static int
224 ipsec_sa_const_set(struct rte_security_ipsec_xform *ipsec,
225                    struct rte_crypto_sym_xform *xform,
226                    struct otx2_sec_session_ipsec_ip *sess)
227 {
228         struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
229
230         sess->partial_len = sizeof(struct rte_ipv4_hdr);
231
232         if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
233                 sess->partial_len += sizeof(struct rte_esp_hdr);
234                 sess->roundup_len = sizeof(struct rte_esp_tail);
235         } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
236                 sess->partial_len += OTX2_SEC_AH_HDR_LEN;
237         } else {
238                 return -EINVAL;
239         }
240
241         if (ipsec->options.udp_encap)
242                 sess->partial_len += sizeof(struct rte_udp_hdr);
243
244         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
245                 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
246                         sess->partial_len += OTX2_SEC_AES_GCM_IV_LEN;
247                         sess->partial_len += OTX2_SEC_AES_GCM_MAC_LEN;
248                         sess->roundup_byte = OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN;
249                 }
250                 return 0;
251         }
252
253         if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
254                 cipher_xform = xform;
255                 auth_xform = xform->next;
256         } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
257                 auth_xform = xform;
258                 cipher_xform = xform->next;
259         } else {
260                 return -EINVAL;
261         }
262         if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
263                 sess->partial_len += OTX2_SEC_AES_CBC_IV_LEN;
264                 sess->roundup_byte = OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN;
265         } else {
266                 return -EINVAL;
267         }
268
269         if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
270                 sess->partial_len += OTX2_SEC_SHA1_HMAC_LEN;
271         else
272                 return -EINVAL;
273
274         return 0;
275 }
276
277 static int
278 hmac_init(struct otx2_ipsec_fp_sa_ctl *ctl, struct otx2_cpt_qp *qp,
279           const uint8_t *auth_key, int len, uint8_t *hmac_key)
280 {
281         struct inst_data {
282                 struct otx2_cpt_res cpt_res;
283                 uint8_t buffer[64];
284         } *md;
285
286         volatile struct otx2_cpt_res *res;
287         uint64_t timeout, lmt_status;
288         struct otx2_cpt_inst_s inst;
289         rte_iova_t md_iova;
290         int ret;
291
292         memset(&inst, 0, sizeof(struct otx2_cpt_inst_s));
293
294         md = rte_zmalloc(NULL, sizeof(struct inst_data), OTX2_CPT_RES_ALIGN);
295         if (md == NULL)
296                 return -ENOMEM;
297
298         memcpy(md->buffer, auth_key, len);
299
300         md_iova = rte_malloc_virt2iova(md);
301         if (md_iova == RTE_BAD_IOVA) {
302                 ret = -EINVAL;
303                 goto free_md;
304         }
305
306         inst.res_addr = md_iova + offsetof(struct inst_data, cpt_res);
307         inst.opcode = OTX2_CPT_OP_WRITE_HMAC_IPAD_OPAD;
308         inst.param2 = ctl->auth_type;
309         inst.dlen = len;
310         inst.dptr = md_iova + offsetof(struct inst_data, buffer);
311         inst.rptr = inst.dptr;
312         inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
313
314         md->cpt_res.compcode = 0;
315         md->cpt_res.uc_compcode = 0xff;
316
317         timeout = rte_get_timer_cycles() + 5 * rte_get_timer_hz();
318
319         rte_io_wmb();
320
321         do {
322                 otx2_lmt_mov(qp->lmtline, &inst, 2);
323                 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
324         } while (lmt_status == 0);
325
326         res = (volatile struct otx2_cpt_res *)&md->cpt_res;
327
328         /* Wait until instruction completes or times out */
329         while (res->uc_compcode == 0xff) {
330                 if (rte_get_timer_cycles() > timeout)
331                         break;
332         }
333
334         if (res->u16[0] != OTX2_SEC_COMP_GOOD) {
335                 ret = -EIO;
336                 goto free_md;
337         }
338
339         /* Retrieve the ipad and opad from rptr */
340         memcpy(hmac_key, md->buffer, 48);
341
342         ret = 0;
343
344 free_md:
345         rte_free(md);
346         return ret;
347 }
348
349 static int
350 eth_sec_ipsec_out_sess_create(struct rte_eth_dev *eth_dev,
351                               struct rte_security_ipsec_xform *ipsec,
352                               struct rte_crypto_sym_xform *crypto_xform,
353                               struct rte_security_session *sec_sess)
354 {
355         struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
356         struct otx2_sec_session_ipsec_ip *sess;
357         uint16_t port = eth_dev->data->port_id;
358         int cipher_key_len, auth_key_len, ret;
359         const uint8_t *cipher_key, *auth_key;
360         struct otx2_ipsec_fp_sa_ctl *ctl;
361         struct otx2_ipsec_fp_out_sa *sa;
362         struct otx2_sec_session *priv;
363         struct otx2_cpt_inst_s inst;
364         struct otx2_cpt_qp *qp;
365
366         priv = get_sec_session_private_data(sec_sess);
367         priv->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
368         sess = &priv->ipsec.ip;
369
370         sa = &sess->out_sa;
371         ctl = &sa->ctl;
372         if (ctl->valid) {
373                 otx2_err("SA already registered");
374                 return -EINVAL;
375         }
376
377         memset(sess, 0, sizeof(struct otx2_sec_session_ipsec_ip));
378
379         sess->seq = 1;
380
381         ret = ipsec_sa_const_set(ipsec, crypto_xform, sess);
382         if (ret < 0)
383                 return ret;
384
385         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
386                 memcpy(sa->nonce, &ipsec->salt, 4);
387
388         if (ipsec->options.udp_encap == 1) {
389                 sa->udp_src = 4500;
390                 sa->udp_dst = 4500;
391         }
392
393         if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
394                 /* Start ip id from 1 */
395                 sess->ip_id = 1;
396
397                 if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
398                         memcpy(&sa->ip_src, &ipsec->tunnel.ipv4.src_ip,
399                                sizeof(struct in_addr));
400                         memcpy(&sa->ip_dst, &ipsec->tunnel.ipv4.dst_ip,
401                                sizeof(struct in_addr));
402                 } else {
403                         return -EINVAL;
404                 }
405         } else {
406                 return -EINVAL;
407         }
408
409         cipher_xform = crypto_xform;
410         auth_xform = crypto_xform->next;
411
412         cipher_key_len = 0;
413         auth_key_len = 0;
414         auth_key = NULL;
415
416         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
417                 cipher_key = crypto_xform->aead.key.data;
418                 cipher_key_len = crypto_xform->aead.key.length;
419         } else {
420                 cipher_key = cipher_xform->cipher.key.data;
421                 cipher_key_len = cipher_xform->cipher.key.length;
422                 auth_key = auth_xform->auth.key.data;
423                 auth_key_len = auth_xform->auth.key.length;
424         }
425
426         if (cipher_key_len != 0)
427                 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
428         else
429                 return -EINVAL;
430
431         /* Determine word 7 of CPT instruction */
432         inst.u64[7] = 0;
433         inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
434         inst.cptr = rte_mempool_virt2iova(sa);
435         sess->inst_w7 = inst.u64[7];
436
437         /* Get CPT QP to be used for this SA */
438         ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
439         if (ret)
440                 return ret;
441
442         sess->qp = qp;
443
444         sess->cpt_lmtline = qp->lmtline;
445         sess->cpt_nq_reg = qp->lf_nq_reg;
446
447         /* Populate control word */
448         ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
449         if (ret)
450                 goto cpt_put;
451
452         if (auth_key_len && auth_key) {
453                 ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
454                 if (ret)
455                         goto cpt_put;
456         }
457
458         rte_io_wmb();
459         ctl->valid = 1;
460
461         return 0;
462 cpt_put:
463         otx2_sec_idev_tx_cpt_qp_put(sess->qp);
464         return ret;
465 }
466
467 static int
468 eth_sec_ipsec_in_sess_create(struct rte_eth_dev *eth_dev,
469                              struct rte_security_ipsec_xform *ipsec,
470                              struct rte_crypto_sym_xform *crypto_xform,
471                              struct rte_security_session *sec_sess)
472 {
473         struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
474         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
475         struct otx2_sec_session_ipsec_ip *sess;
476         uint16_t port = eth_dev->data->port_id;
477         int cipher_key_len, auth_key_len, ret;
478         const uint8_t *cipher_key, *auth_key;
479         struct otx2_ipsec_fp_sa_ctl *ctl;
480         struct otx2_ipsec_fp_in_sa *sa;
481         struct otx2_sec_session *priv;
482         char err_str[ERR_STR_SZ];
483         struct otx2_cpt_qp *qp;
484
485         memset(err_str, 0, ERR_STR_SZ);
486
487         if (ipsec->spi >= dev->ipsec_in_max_spi) {
488                 otx2_err("SPI exceeds max supported");
489                 return -EINVAL;
490         }
491
492         sa = in_sa_get(port, ipsec->spi);
493         if (sa == NULL)
494                 return -ENOMEM;
495
496         ctl = &sa->ctl;
497
498         priv = get_sec_session_private_data(sec_sess);
499         priv->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
500         sess = &priv->ipsec.ip;
501
502         rte_spinlock_lock(&dev->ipsec_tbl_lock);
503
504         if (ctl->valid) {
505                 snprintf(err_str, ERR_STR_SZ, "SA already registered");
506                 ret = -EEXIST;
507                 goto tbl_unlock;
508         }
509
510         memset(sa, 0, sizeof(struct otx2_ipsec_fp_in_sa));
511
512         auth_xform = crypto_xform;
513         cipher_xform = crypto_xform->next;
514
515         cipher_key_len = 0;
516         auth_key_len = 0;
517         auth_key = NULL;
518
519         if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
520                 if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
521                         memcpy(sa->nonce, &ipsec->salt, 4);
522                 cipher_key = crypto_xform->aead.key.data;
523                 cipher_key_len = crypto_xform->aead.key.length;
524         } else {
525                 cipher_key = cipher_xform->cipher.key.data;
526                 cipher_key_len = cipher_xform->cipher.key.length;
527                 auth_key = auth_xform->auth.key.data;
528                 auth_key_len = auth_xform->auth.key.length;
529         }
530
531         if (cipher_key_len != 0) {
532                 memcpy(sa->cipher_key, cipher_key, cipher_key_len);
533         } else {
534                 snprintf(err_str, ERR_STR_SZ, "Invalid cipher key len");
535                 ret = -EINVAL;
536                 goto sa_clear;
537         }
538
539         sess->in_sa = sa;
540
541         sa->userdata = priv->userdata;
542
543         sa->replay_win_sz = ipsec->replay_win_sz;
544
545         if (lookup_mem_sa_index_update(eth_dev, ipsec->spi, sa, err_str)) {
546                 ret = -EINVAL;
547                 goto sa_clear;
548         }
549
550         ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
551         if (ret) {
552                 snprintf(err_str, ERR_STR_SZ,
553                         "Could not set SA CTL word (err: %d)", ret);
554                 goto sa_clear;
555         }
556
557         if (auth_key_len && auth_key) {
558                 /* Get a queue pair for HMAC init */
559                 ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
560                 if (ret) {
561                         snprintf(err_str, ERR_STR_SZ, "Could not get CPT QP");
562                         goto sa_clear;
563                 }
564
565                 ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
566                 otx2_sec_idev_tx_cpt_qp_put(qp);
567                 if (ret) {
568                         snprintf(err_str, ERR_STR_SZ, "Could not put CPT QP");
569                         goto sa_clear;
570                 }
571         }
572
573         if (sa->replay_win_sz) {
574                 if (sa->replay_win_sz > OTX2_IPSEC_MAX_REPLAY_WIN_SZ) {
575                         snprintf(err_str, ERR_STR_SZ,
576                                  "Replay window size is not supported");
577                         ret = -ENOTSUP;
578                         goto sa_clear;
579                 }
580                 sa->replay = rte_zmalloc(NULL, sizeof(struct otx2_ipsec_replay),
581                                 0);
582                 if (sa->replay == NULL) {
583                         snprintf(err_str, ERR_STR_SZ,
584                                 "Could not allocate memory");
585                         ret = -ENOMEM;
586                         goto sa_clear;
587                 }
588
589                 rte_spinlock_init(&sa->replay->lock);
590                 /*
591                  * Set window bottom to 1, base and top to size of
592                  * window
593                  */
594                 sa->replay->winb = 1;
595                 sa->replay->wint = sa->replay_win_sz;
596                 sa->replay->base = sa->replay_win_sz;
597                 sa->esn_low = 0;
598                 sa->esn_hi = 0;
599         }
600
601         rte_io_wmb();
602         ctl->valid = 1;
603
604         rte_spinlock_unlock(&dev->ipsec_tbl_lock);
605         return 0;
606
607 sa_clear:
608         memset(sa, 0, sizeof(struct otx2_ipsec_fp_in_sa));
609
610 tbl_unlock:
611         rte_spinlock_unlock(&dev->ipsec_tbl_lock);
612
613         otx2_err("%s", err_str);
614
615         return ret;
616 }
617
618 static int
619 eth_sec_ipsec_sess_create(struct rte_eth_dev *eth_dev,
620                           struct rte_security_ipsec_xform *ipsec,
621                           struct rte_crypto_sym_xform *crypto_xform,
622                           struct rte_security_session *sess)
623 {
624         int ret;
625
626         ret = ipsec_fp_xform_verify(ipsec, crypto_xform);
627         if (ret)
628                 return ret;
629
630         if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
631                 return eth_sec_ipsec_in_sess_create(eth_dev, ipsec,
632                                                     crypto_xform, sess);
633         else
634                 return eth_sec_ipsec_out_sess_create(eth_dev, ipsec,
635                                                      crypto_xform, sess);
636 }
637
638 static int
639 otx2_eth_sec_session_create(void *device,
640                             struct rte_security_session_conf *conf,
641                             struct rte_security_session *sess,
642                             struct rte_mempool *mempool)
643 {
644         struct otx2_sec_session *priv;
645         int ret;
646
647         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
648                 return -ENOTSUP;
649
650         if (rte_mempool_get(mempool, (void **)&priv)) {
651                 otx2_err("Could not allocate security session private data");
652                 return -ENOMEM;
653         }
654
655         set_sec_session_private_data(sess, priv);
656
657         /*
658          * Save userdata provided by the application. For ingress packets, this
659          * could be used to identify the SA.
660          */
661         priv->userdata = conf->userdata;
662
663         if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
664                 ret = eth_sec_ipsec_sess_create(device, &conf->ipsec,
665                                                 conf->crypto_xform,
666                                                 sess);
667         else
668                 ret = -ENOTSUP;
669
670         if (ret)
671                 goto mempool_put;
672
673         return 0;
674
675 mempool_put:
676         rte_mempool_put(mempool, priv);
677         set_sec_session_private_data(sess, NULL);
678         return ret;
679 }
680
681 static void
682 otx2_eth_sec_free_anti_replay(struct otx2_ipsec_fp_in_sa *sa)
683 {
684         if (sa != NULL) {
685                 if (sa->replay_win_sz && sa->replay)
686                         rte_free(sa->replay);
687         }
688 }
689
690 static int
691 otx2_eth_sec_session_destroy(void *device,
692                              struct rte_security_session *sess)
693 {
694         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(device);
695         struct otx2_sec_session_ipsec_ip *sess_ip;
696         struct otx2_ipsec_fp_in_sa *sa;
697         struct otx2_sec_session *priv;
698         struct rte_mempool *sess_mp;
699         int ret;
700
701         priv = get_sec_session_private_data(sess);
702         if (priv == NULL)
703                 return -EINVAL;
704
705         sess_ip = &priv->ipsec.ip;
706
707         if (priv->ipsec.dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
708                 rte_spinlock_lock(&dev->ipsec_tbl_lock);
709                 sa = sess_ip->in_sa;
710
711                 /* Release the anti replay window */
712                 otx2_eth_sec_free_anti_replay(sa);
713
714                 /* Clear SA table entry */
715                 if (sa != NULL) {
716                         sa->ctl.valid = 0;
717                         rte_io_wmb();
718                 }
719
720                 rte_spinlock_unlock(&dev->ipsec_tbl_lock);
721         }
722
723         /* Release CPT LF used for this session */
724         if (sess_ip->qp != NULL) {
725                 ret = otx2_sec_idev_tx_cpt_qp_put(sess_ip->qp);
726                 if (ret)
727                         return ret;
728         }
729
730         sess_mp = rte_mempool_from_obj(priv);
731
732         set_sec_session_private_data(sess, NULL);
733         rte_mempool_put(sess_mp, priv);
734
735         return 0;
736 }
737
738 static unsigned int
739 otx2_eth_sec_session_get_size(void *device __rte_unused)
740 {
741         return sizeof(struct otx2_sec_session);
742 }
743
744 static int
745 otx2_eth_sec_set_pkt_mdata(void *device __rte_unused,
746                             struct rte_security_session *session,
747                             struct rte_mbuf *m, void *params __rte_unused)
748 {
749         /* Set security session as the pkt metadata */
750         *rte_security_dynfield(m) = (rte_security_dynfield_t)session;
751
752         return 0;
753 }
754
755 static int
756 otx2_eth_sec_get_userdata(void *device __rte_unused, uint64_t md,
757                            void **userdata)
758 {
759         /* Retrieve userdata  */
760         *userdata = (void *)md;
761
762         return 0;
763 }
764
765 static const struct rte_security_capability *
766 otx2_eth_sec_capabilities_get(void *device __rte_unused)
767 {
768         return otx2_eth_sec_capabilities;
769 }
770
771 static struct rte_security_ops otx2_eth_sec_ops = {
772         .session_create         = otx2_eth_sec_session_create,
773         .session_destroy        = otx2_eth_sec_session_destroy,
774         .session_get_size       = otx2_eth_sec_session_get_size,
775         .set_pkt_metadata       = otx2_eth_sec_set_pkt_mdata,
776         .get_userdata           = otx2_eth_sec_get_userdata,
777         .capabilities_get       = otx2_eth_sec_capabilities_get
778 };
779
780 int
781 otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev)
782 {
783         struct rte_security_ctx *ctx;
784         int ret;
785
786         ctx = rte_malloc("otx2_eth_sec_ctx",
787                          sizeof(struct rte_security_ctx), 0);
788         if (ctx == NULL)
789                 return -ENOMEM;
790
791         ret = otx2_sec_idev_cfg_init(eth_dev->data->port_id);
792         if (ret) {
793                 rte_free(ctx);
794                 return ret;
795         }
796
797         /* Populate ctx */
798
799         ctx->device = eth_dev;
800         ctx->ops = &otx2_eth_sec_ops;
801         ctx->sess_cnt = 0;
802
803         eth_dev->security_ctx = ctx;
804
805         return 0;
806 }
807
808 void
809 otx2_eth_sec_ctx_destroy(struct rte_eth_dev *eth_dev)
810 {
811         rte_free(eth_dev->security_ctx);
812 }
813
814 static int
815 eth_sec_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt)
816 {
817         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
818         uint16_t port = eth_dev->data->port_id;
819         struct nix_inline_ipsec_lf_cfg *req;
820         struct otx2_mbox *mbox = dev->mbox;
821         struct eth_sec_tag_const tag_const;
822         char name[RTE_MEMZONE_NAMESIZE];
823         const struct rte_memzone *mz;
824
825         in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
826         mz = rte_memzone_lookup(name);
827         if (mz == NULL)
828                 return -EINVAL;
829
830         req = otx2_mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
831         req->enable = 1;
832         req->sa_base_addr = mz->iova;
833
834         req->ipsec_cfg0.tt = tt;
835
836         tag_const.u32 = 0;
837         tag_const.event_type = RTE_EVENT_TYPE_ETHDEV;
838         tag_const.port = port;
839         req->ipsec_cfg0.tag_const = tag_const.u32;
840
841         req->ipsec_cfg0.sa_pow2_size =
842                         rte_log2_u32(sizeof(struct otx2_ipsec_fp_in_sa));
843         req->ipsec_cfg0.lenm1_max = NIX_MAX_FRS - 1;
844
845         req->ipsec_cfg1.sa_idx_w = rte_log2_u32(dev->ipsec_in_max_spi);
846         req->ipsec_cfg1.sa_idx_max = dev->ipsec_in_max_spi - 1;
847
848         return otx2_mbox_process(mbox);
849 }
850
851 int
852 otx2_eth_sec_update_tag_type(struct rte_eth_dev *eth_dev)
853 {
854         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
855         struct otx2_mbox *mbox = dev->mbox;
856         struct nix_aq_enq_rsp *rsp;
857         struct nix_aq_enq_req *aq;
858         int ret;
859
860         aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
861         aq->qidx = 0; /* Read RQ:0 context */
862         aq->ctype = NIX_AQ_CTYPE_RQ;
863         aq->op = NIX_AQ_INSTOP_READ;
864
865         ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
866         if (ret < 0) {
867                 otx2_err("Could not read RQ context");
868                 return ret;
869         }
870
871         /* Update tag type */
872         ret = eth_sec_ipsec_cfg(eth_dev, rsp->rq.sso_tt);
873         if (ret < 0)
874                 otx2_err("Could not update sec eth tag type");
875
876         return ret;
877 }
878
879 int
880 otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
881 {
882         const size_t sa_width = sizeof(struct otx2_ipsec_fp_in_sa);
883         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
884         uint16_t port = eth_dev->data->port_id;
885         char name[RTE_MEMZONE_NAMESIZE];
886         const struct rte_memzone *mz;
887         int mz_sz, ret;
888         uint16_t nb_sa;
889
890         RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
891                          !RTE_IS_POWER_OF_2(sa_width));
892
893         if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
894             !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
895                 return 0;
896
897         if (rte_security_dynfield_register() < 0)
898                 return -rte_errno;
899
900         nb_sa = dev->ipsec_in_max_spi;
901         mz_sz = nb_sa * sa_width;
902         in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
903         mz = rte_memzone_reserve_aligned(name, mz_sz, rte_socket_id(),
904                                          RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
905
906         if (mz == NULL) {
907                 otx2_err("Could not allocate inbound SA DB");
908                 return -ENOMEM;
909         }
910
911         memset(mz->addr, 0, mz_sz);
912
913         ret = eth_sec_ipsec_cfg(eth_dev, SSO_TT_ORDERED);
914         if (ret < 0) {
915                 otx2_err("Could not configure inline IPsec");
916                 goto sec_fini;
917         }
918
919         rte_spinlock_init(&dev->ipsec_tbl_lock);
920
921         return 0;
922
923 sec_fini:
924         otx2_err("Could not configure device for security");
925         otx2_eth_sec_fini(eth_dev);
926         return ret;
927 }
928
929 void
930 otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
931 {
932         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
933         uint16_t port = eth_dev->data->port_id;
934         char name[RTE_MEMZONE_NAMESIZE];
935
936         if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
937             !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
938                 return;
939
940         lookup_mem_sa_tbl_clear(eth_dev);
941
942         in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
943         rte_memzone_free(rte_memzone_lookup(name));
944 }