net/cnxk: disable default inner checksum for outbound inline
[dpdk.git] / drivers / net / cnxk / cn10k_ethdev_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_eventdev.h>
7 #include <rte_security.h>
8 #include <rte_security_driver.h>
9 #include <rte_pmd_cnxk.h>
10
11 #include <cn10k_ethdev.h>
12 #include <cnxk_security.h>
13 #include <roc_priv.h>
14
15 static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
16         {       /* AES GCM */
17                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
18                 {.sym = {
19                         .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
20                         {.aead = {
21                                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
22                                 .block_size = 16,
23                                 .key_size = {
24                                         .min = 16,
25                                         .max = 32,
26                                         .increment = 8
27                                 },
28                                 .digest_size = {
29                                         .min = 16,
30                                         .max = 16,
31                                         .increment = 0
32                                 },
33                                 .aad_size = {
34                                         .min = 8,
35                                         .max = 12,
36                                         .increment = 4
37                                 },
38                                 .iv_size = {
39                                         .min = 12,
40                                         .max = 12,
41                                         .increment = 0
42                                 }
43                         }, }
44                 }, }
45         },
46         {       /* AES CBC */
47                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
48                 {.sym = {
49                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
50                         {.cipher = {
51                                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
52                                 .block_size = 16,
53                                 .key_size = {
54                                         .min = 16,
55                                         .max = 32,
56                                         .increment = 8
57                                 },
58                                 .iv_size = {
59                                         .min = 16,
60                                         .max = 16,
61                                         .increment = 0
62                                 }
63                         }, }
64                 }, }
65         },
66         {       /* 3DES CBC */
67                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
68                 {.sym = {
69                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
70                         {.cipher = {
71                                 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
72                                 .block_size = 8,
73                                 .key_size = {
74                                         .min = 24,
75                                         .max = 24,
76                                         .increment = 0
77                                 },
78                                 .iv_size = {
79                                         .min = 8,
80                                         .max = 16,
81                                         .increment = 8
82                                 }
83                         }, }
84                 }, }
85         },
86         {       /* SHA1 HMAC */
87                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
88                 {.sym = {
89                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
90                         {.auth = {
91                                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
92                                 .block_size = 64,
93                                 .key_size = {
94                                         .min = 20,
95                                         .max = 64,
96                                         .increment = 1
97                                 },
98                                 .digest_size = {
99                                         .min = 12,
100                                         .max = 12,
101                                         .increment = 0
102                                 },
103                         }, }
104                 }, }
105         },
106         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
107 };
108
109 static const struct rte_security_capability cn10k_eth_sec_capabilities[] = {
110         {       /* IPsec Inline Protocol ESP Tunnel Ingress */
111                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
112                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
113                 .ipsec = {
114                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
115                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
116                         .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
117                         .options = { 0 }
118                 },
119                 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
120                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
121         },
122         {       /* IPsec Inline Protocol ESP Tunnel Egress */
123                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
124                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
125                 .ipsec = {
126                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
127                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
128                         .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
129                         .options = { 0 }
130                 },
131                 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
132                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
133         },
134         {       /* IPsec Inline Protocol ESP Transport Egress */
135                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
136                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
137                 .ipsec = {
138                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
139                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
140                         .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
141                         .options = { 0 }
142                 },
143                 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
144                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
145         },
146         {       /* IPsec Inline Protocol ESP Transport Ingress */
147                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
148                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
149                 .ipsec = {
150                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
151                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
152                         .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
153                         .options = { 0 }
154                 },
155                 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
156                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
157         },
158         {
159                 .action = RTE_SECURITY_ACTION_TYPE_NONE
160         }
161 };
162
163 static inline void
164 cnxk_pktmbuf_free_no_cache(struct rte_mbuf *mbuf)
165 {
166         struct rte_mbuf *next;
167
168         if (!mbuf)
169                 return;
170         do {
171                 next = mbuf->next;
172                 roc_npa_aura_op_free(mbuf->pool->pool_id, 1, (rte_iova_t)mbuf);
173                 mbuf = next;
174         } while (mbuf != NULL);
175 }
176
177 void
178 cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event)
179 {
180         struct rte_eth_event_ipsec_desc desc;
181         struct cn10k_sec_sess_priv sess_priv;
182         struct cn10k_outb_priv_data *priv;
183         struct roc_ot_ipsec_outb_sa *sa;
184         struct cpt_cn10k_res_s *res;
185         struct rte_eth_dev *eth_dev;
186         struct cnxk_eth_dev *dev;
187         static uint64_t warn_cnt;
188         uint16_t dlen_adj, rlen;
189         struct rte_mbuf *mbuf;
190         uintptr_t sa_base;
191         uintptr_t nixtx;
192         uint8_t port;
193
194         RTE_SET_USED(args);
195
196         switch ((gw[0] >> 28) & 0xF) {
197         case RTE_EVENT_TYPE_ETHDEV:
198                 /* Event from inbound inline dev due to IPSEC packet bad L4 */
199                 mbuf = (struct rte_mbuf *)(gw[1] - sizeof(struct rte_mbuf));
200                 plt_nix_dbg("Received mbuf %p from inline dev inbound", mbuf);
201                 cnxk_pktmbuf_free_no_cache(mbuf);
202                 return;
203         case RTE_EVENT_TYPE_CPU:
204                 /* Check for subtype */
205                 if (((gw[0] >> 20) & 0xFF) == CNXK_ETHDEV_SEC_OUTB_EV_SUB) {
206                         /* Event from outbound inline error */
207                         mbuf = (struct rte_mbuf *)gw[1];
208                         break;
209                 }
210                 /* Fall through */
211         default:
212                 if (soft_exp_event & 0x1) {
213                         sa = (struct roc_ot_ipsec_outb_sa *)args;
214                         priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
215                         desc.metadata = (uint64_t)priv->userdata;
216                         desc.subtype = RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY;
217                         eth_dev = &rte_eth_devices[soft_exp_event >> 8];
218                         rte_eth_dev_callback_process(eth_dev,
219                                 RTE_ETH_EVENT_IPSEC, &desc);
220                 } else {
221                         plt_err("Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx",
222                                 gw[0], gw[1]);
223                 }
224                 return;
225         }
226
227         /* Get ethdev port from tag */
228         port = gw[0] & 0xFF;
229         eth_dev = &rte_eth_devices[port];
230         dev = cnxk_eth_pmd_priv(eth_dev);
231
232         sess_priv.u64 = *rte_security_dynfield(mbuf);
233         /* Calculate dlen adj */
234         dlen_adj = mbuf->pkt_len - mbuf->l2_len;
235         rlen = (dlen_adj + sess_priv.roundup_len) +
236                (sess_priv.roundup_byte - 1);
237         rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
238         rlen += sess_priv.partial_len;
239         dlen_adj = rlen - dlen_adj;
240
241         /* Find the res area residing on next cacheline after end of data */
242         nixtx = rte_pktmbuf_mtod(mbuf, uintptr_t) + mbuf->pkt_len + dlen_adj;
243         nixtx += BIT_ULL(7);
244         nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
245         res = (struct cpt_cn10k_res_s *)nixtx;
246
247         plt_nix_dbg("Outbound error, mbuf %p, sa_index %u, compcode %x uc %x",
248                     mbuf, sess_priv.sa_idx, res->compcode, res->uc_compcode);
249
250         sess_priv.u64 = *rte_security_dynfield(mbuf);
251
252         sa_base = dev->outb.sa_base;
253         sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
254         priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
255
256         memset(&desc, 0, sizeof(desc));
257
258         switch (res->uc_compcode) {
259         case ROC_IE_OT_UCC_ERR_SA_OVERFLOW:
260                 desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
261                 break;
262         case ROC_IE_OT_UCC_ERR_PKT_IP:
263                 warn_cnt++;
264                 if (warn_cnt % 10000 == 0)
265                         plt_warn("Outbound error, bad ip pkt, mbuf %p,"
266                                  " sa_index %u (total warnings %" PRIu64 ")",
267                                  mbuf, sess_priv.sa_idx, warn_cnt);
268                 desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
269                 break;
270         default:
271                 warn_cnt++;
272                 if (warn_cnt % 10000 == 0)
273                         plt_warn("Outbound error, mbuf %p, sa_index %u,"
274                                  " compcode %x uc %x,"
275                                  " (total warnings %" PRIu64 ")",
276                                  mbuf, sess_priv.sa_idx, res->compcode,
277                                  res->uc_compcode, warn_cnt);
278                 desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
279                 break;
280         }
281
282         desc.metadata = (uint64_t)priv->userdata;
283         rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
284         cnxk_pktmbuf_free_no_cache(mbuf);
285 }
286
287 static void
288 outb_dbg_iv_update(struct roc_ot_ipsec_outb_sa *outb_sa, const char *__iv_str)
289 {
290         uint8_t *iv_dbg = outb_sa->iv.iv_dbg;
291         char *iv_str = strdup(__iv_str);
292         char *iv_b = NULL, len = 16;
293         char *save;
294         int i;
295
296         if (!iv_str)
297                 return;
298
299         if (outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_GCM ||
300             outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_CTR ||
301             outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_CCM ||
302             outb_sa->w2.s.auth_type == ROC_IE_OT_SA_AUTH_AES_GMAC) {
303                 memset(outb_sa->iv.s.iv_dbg1, 0, sizeof(outb_sa->iv.s.iv_dbg1));
304                 memset(outb_sa->iv.s.iv_dbg2, 0, sizeof(outb_sa->iv.s.iv_dbg2));
305
306                 iv_dbg = outb_sa->iv.s.iv_dbg1;
307                 for (i = 0; i < 4; i++) {
308                         iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
309                         if (!iv_b)
310                                 break;
311                         iv_dbg[i] = strtoul(iv_b, NULL, 0);
312                 }
313                 *(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
314
315                 iv_dbg = outb_sa->iv.s.iv_dbg2;
316                 for (i = 0; i < 4; i++) {
317                         iv_b = strtok_r(NULL, ",", &save);
318                         if (!iv_b)
319                                 break;
320                         iv_dbg[i] = strtoul(iv_b, NULL, 0);
321                 }
322                 *(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
323
324         } else {
325                 iv_dbg = outb_sa->iv.iv_dbg;
326                 memset(iv_dbg, 0, sizeof(outb_sa->iv.iv_dbg));
327
328                 for (i = 0; i < len; i++) {
329                         iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
330                         if (!iv_b)
331                                 break;
332                         iv_dbg[i] = strtoul(iv_b, NULL, 0);
333                 }
334                 *(uint64_t *)iv_dbg = rte_be_to_cpu_64(*(uint64_t *)iv_dbg);
335                 *(uint64_t *)&iv_dbg[8] =
336                         rte_be_to_cpu_64(*(uint64_t *)&iv_dbg[8]);
337         }
338
339         /* Update source of IV */
340         outb_sa->w2.s.iv_src = ROC_IE_OT_SA_IV_SRC_FROM_SA;
341         free(iv_str);
342 }
343
344 static int
345 cn10k_eth_sec_outb_sa_misc_fill(struct roc_nix *roc_nix,
346                                 struct roc_ot_ipsec_outb_sa *sa, void *sa_cptr,
347                                 struct rte_security_ipsec_xform *ipsec_xfrm,
348                                 uint32_t sa_idx)
349 {
350         uint64_t *ring_base, ring_addr;
351
352         if (ipsec_xfrm->life.bytes_soft_limit |
353             ipsec_xfrm->life.packets_soft_limit) {
354                 ring_base = roc_nix_inl_outb_ring_base_get(roc_nix);
355                 if (ring_base == NULL)
356                         return -ENOTSUP;
357
358                 ring_addr = ring_base[sa_idx >>
359                                       ROC_NIX_SOFT_EXP_ERR_RING_MAX_ENTRY_LOG2];
360                 sa->ctx.err_ctl.s.mode = ROC_IE_OT_ERR_CTL_MODE_RING;
361                 sa->ctx.err_ctl.s.address = ring_addr >> 3;
362                 sa->w0.s.ctx_id = ((uintptr_t)sa_cptr >> 51) & 0x1ff;
363         }
364
365         return 0;
366 }
367
368 static int
369 cn10k_eth_sec_session_create(void *device,
370                              struct rte_security_session_conf *conf,
371                              struct rte_security_session *sess,
372                              struct rte_mempool *mempool)
373 {
374         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
375         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
376         struct rte_security_ipsec_xform *ipsec;
377         struct cn10k_sec_sess_priv sess_priv;
378         struct rte_crypto_sym_xform *crypto;
379         struct cnxk_eth_sec_sess *eth_sec;
380         struct roc_nix *nix = &dev->nix;
381         bool inbound, inl_dev;
382         rte_spinlock_t *lock;
383         char tbuf[128] = {0};
384         int rc = 0;
385
386         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
387                 return -ENOTSUP;
388
389         if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
390                 return -ENOTSUP;
391
392         if (rte_security_dynfield_register() < 0)
393                 return -ENOTSUP;
394
395         if (conf->ipsec.options.ip_reassembly_en &&
396                         dev->reass_dynfield_off < 0) {
397                 if (rte_eth_ip_reassembly_dynfield_register(&dev->reass_dynfield_off,
398                                         &dev->reass_dynflag_bit) < 0)
399                         return -rte_errno;
400         }
401
402         ipsec = &conf->ipsec;
403         crypto = conf->crypto_xform;
404         inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
405         inl_dev = !!dev->inb.inl_dev;
406
407         /* Search if a session already exits */
408         if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
409                 plt_err("%s SA with SPI %u already in use",
410                         inbound ? "Inbound" : "Outbound", ipsec->spi);
411                 return -EEXIST;
412         }
413
414         if (rte_mempool_get(mempool, (void **)&eth_sec)) {
415                 plt_err("Could not allocate security session private data");
416                 return -ENOMEM;
417         }
418
419         memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
420         sess_priv.u64 = 0;
421
422         lock = inbound ? &dev->inb.lock : &dev->outb.lock;
423         rte_spinlock_lock(lock);
424
425         /* Acquire lock on inline dev for inbound */
426         if (inbound && inl_dev)
427                 roc_nix_inl_dev_lock();
428
429         if (inbound) {
430                 struct roc_ot_ipsec_inb_sa *inb_sa, *inb_sa_dptr;
431                 struct cn10k_inb_priv_data *inb_priv;
432                 uint32_t spi_mask;
433                 uintptr_t sa;
434
435                 PLT_STATIC_ASSERT(sizeof(struct cn10k_inb_priv_data) <
436                                   ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
437
438                 spi_mask = roc_nix_inl_inb_spi_range(nix, inl_dev, NULL, NULL);
439
440                 /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE */
441                 sa = roc_nix_inl_inb_sa_get(nix, inl_dev, ipsec->spi);
442                 if (!sa && dev->inb.inl_dev) {
443                         snprintf(tbuf, sizeof(tbuf),
444                                  "Failed to create ingress sa, inline dev "
445                                  "not found or spi not in range");
446                         rc = -ENOTSUP;
447                         goto mempool_put;
448                 } else if (!sa) {
449                         snprintf(tbuf, sizeof(tbuf),
450                                  "Failed to create ingress sa");
451                         rc = -EFAULT;
452                         goto mempool_put;
453                 }
454
455                 inb_sa = (struct roc_ot_ipsec_inb_sa *)sa;
456
457                 /* Check if SA is already in use */
458                 if (inb_sa->w2.s.valid) {
459                         snprintf(tbuf, sizeof(tbuf),
460                                  "Inbound SA with SPI %u already in use",
461                                  ipsec->spi);
462                         rc = -EBUSY;
463                         goto mempool_put;
464                 }
465
466                 inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
467                 memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
468
469                 /* Fill inbound sa params */
470                 rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto,
471                                                true);
472                 if (rc) {
473                         snprintf(tbuf, sizeof(tbuf),
474                                  "Failed to init inbound sa, rc=%d", rc);
475                         goto mempool_put;
476                 }
477
478                 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
479                 /* Back pointer to get eth_sec */
480                 inb_priv->eth_sec = eth_sec;
481                 /* Save userdata in inb private area */
482                 inb_priv->userdata = conf->userdata;
483
484                 /* Save SA index/SPI in cookie for now */
485                 inb_sa_dptr->w1.s.cookie =
486                         rte_cpu_to_be_32(ipsec->spi & spi_mask);
487
488                 /* Prepare session priv */
489                 sess_priv.inb_sa = 1;
490                 sess_priv.sa_idx = ipsec->spi & spi_mask;
491
492                 /* Pointer from eth_sec -> inb_sa */
493                 eth_sec->sa = inb_sa;
494                 eth_sec->sess = sess;
495                 eth_sec->sa_idx = ipsec->spi & spi_mask;
496                 eth_sec->spi = ipsec->spi;
497                 eth_sec->inl_dev = !!dev->inb.inl_dev;
498                 eth_sec->inb = true;
499
500                 TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
501                 dev->inb.nb_sess++;
502                 /* Sync session in context cache */
503                 rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa,
504                                            eth_sec->inb,
505                                            sizeof(struct roc_ot_ipsec_inb_sa));
506                 if (rc)
507                         goto mempool_put;
508
509                 if (conf->ipsec.options.ip_reassembly_en) {
510                         inb_priv->reass_dynfield_off = dev->reass_dynfield_off;
511                         inb_priv->reass_dynflag_bit = dev->reass_dynflag_bit;
512                 }
513
514         } else {
515                 struct roc_ot_ipsec_outb_sa *outb_sa, *outb_sa_dptr;
516                 struct cn10k_outb_priv_data *outb_priv;
517                 struct cnxk_ipsec_outb_rlens *rlens;
518                 uint64_t sa_base = dev->outb.sa_base;
519                 const char *iv_str;
520                 uint32_t sa_idx;
521
522                 PLT_STATIC_ASSERT(sizeof(struct cn10k_outb_priv_data) <
523                                   ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
524
525                 /* Alloc an sa index */
526                 rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx, ipsec->spi);
527                 if (rc)
528                         goto mempool_put;
529
530                 outb_sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sa_idx);
531                 outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa);
532                 rlens = &outb_priv->rlens;
533
534                 outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr;
535                 memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa));
536
537                 /* Fill outbound sa params */
538                 rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
539                 if (rc) {
540                         snprintf(tbuf, sizeof(tbuf),
541                                  "Failed to init outbound sa, rc=%d", rc);
542                         rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
543                         goto mempool_put;
544                 }
545
546                 iv_str = getenv("CN10K_ETH_SEC_IV_OVR");
547                 if (iv_str)
548                         outb_dbg_iv_update(outb_sa_dptr, iv_str);
549
550                 /* Fill outbound sa misc params */
551                 rc = cn10k_eth_sec_outb_sa_misc_fill(&dev->nix, outb_sa_dptr,
552                                                      outb_sa, ipsec, sa_idx);
553                 if (rc) {
554                         snprintf(tbuf, sizeof(tbuf),
555                                  "Failed to init outb sa misc params, rc=%d",
556                                  rc);
557                         rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
558                         goto mempool_put;
559                 }
560
561                 /* Save userdata */
562                 outb_priv->userdata = conf->userdata;
563                 outb_priv->sa_idx = sa_idx;
564                 outb_priv->eth_sec = eth_sec;
565
566                 /* Save rlen info */
567                 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
568
569                 /* Prepare session priv */
570                 sess_priv.sa_idx = outb_priv->sa_idx;
571                 sess_priv.roundup_byte = rlens->roundup_byte;
572                 sess_priv.roundup_len = rlens->roundup_len;
573                 sess_priv.partial_len = rlens->partial_len;
574                 sess_priv.mode = outb_sa_dptr->w2.s.ipsec_mode;
575                 sess_priv.outer_ip_ver = outb_sa_dptr->w2.s.outer_ip_ver;
576                 /* Propagate inner checksum enable from SA to fast path */
577                 sess_priv.chksum = (!ipsec->options.ip_csum_enable << 1 |
578                                     !ipsec->options.l4_csum_enable);
579
580                 /* Pointer from eth_sec -> outb_sa */
581                 eth_sec->sa = outb_sa;
582                 eth_sec->sess = sess;
583                 eth_sec->sa_idx = sa_idx;
584                 eth_sec->spi = ipsec->spi;
585
586                 TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
587                 dev->outb.nb_sess++;
588                 /* Sync session in context cache */
589                 rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa,
590                                            eth_sec->inb,
591                                            sizeof(struct roc_ot_ipsec_outb_sa));
592                 if (rc)
593                         goto mempool_put;
594         }
595         if (inbound && inl_dev)
596                 roc_nix_inl_dev_unlock();
597         rte_spinlock_unlock(lock);
598
599         plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u inl_dev=%u",
600                     inbound ? "inbound" : "outbound", eth_sec->spi,
601                     eth_sec->sa_idx, eth_sec->inl_dev);
602         /*
603          * Update fast path info in priv area.
604          */
605         set_sec_session_private_data(sess, (void *)sess_priv.u64);
606
607         return 0;
608 mempool_put:
609         if (inbound && inl_dev)
610                 roc_nix_inl_dev_unlock();
611         rte_spinlock_unlock(lock);
612
613         rte_mempool_put(mempool, eth_sec);
614         if (rc)
615                 plt_err("%s", tbuf);
616         return rc;
617 }
618
619 static int
620 cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
621 {
622         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
623         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
624         struct cnxk_eth_sec_sess *eth_sec;
625         struct rte_mempool *mp;
626         rte_spinlock_t *lock;
627         void *sa_dptr;
628
629         eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
630         if (!eth_sec)
631                 return -ENOENT;
632
633         lock = eth_sec->inb ? &dev->inb.lock : &dev->outb.lock;
634         rte_spinlock_lock(lock);
635
636         if (eth_sec->inl_dev)
637                 roc_nix_inl_dev_lock();
638
639         if (eth_sec->inb) {
640                 /* Disable SA */
641                 sa_dptr = dev->inb.sa_dptr;
642                 roc_ot_ipsec_inb_sa_init(sa_dptr, true);
643
644                 roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
645                                       eth_sec->inb,
646                                       sizeof(struct roc_ot_ipsec_inb_sa));
647                 TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
648                 dev->inb.nb_sess--;
649         } else {
650                 /* Disable SA */
651                 sa_dptr = dev->outb.sa_dptr;
652                 roc_ot_ipsec_outb_sa_init(sa_dptr);
653
654                 roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
655                                       eth_sec->inb,
656                                       sizeof(struct roc_ot_ipsec_outb_sa));
657                 /* Release Outbound SA index */
658                 cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
659                 TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
660                 dev->outb.nb_sess--;
661         }
662         if (eth_sec->inl_dev)
663                 roc_nix_inl_dev_unlock();
664
665         rte_spinlock_unlock(lock);
666
667         plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u, inl_dev=%u",
668                     eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
669                     eth_sec->sa_idx, eth_sec->inl_dev);
670
671         /* Put eth_sec object back to pool */
672         mp = rte_mempool_from_obj(eth_sec);
673         set_sec_session_private_data(sess, NULL);
674         rte_mempool_put(mp, eth_sec);
675         return 0;
676 }
677
678 static const struct rte_security_capability *
679 cn10k_eth_sec_capabilities_get(void *device __rte_unused)
680 {
681         return cn10k_eth_sec_capabilities;
682 }
683
684 static int
685 cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess,
686                              struct rte_security_session_conf *conf)
687 {
688         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
689         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
690         struct roc_ot_ipsec_inb_sa *inb_sa_dptr;
691         struct rte_security_ipsec_xform *ipsec;
692         struct rte_crypto_sym_xform *crypto;
693         struct cnxk_eth_sec_sess *eth_sec;
694         bool inbound;
695         int rc;
696
697         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
698             conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
699                 return -ENOENT;
700
701         ipsec = &conf->ipsec;
702         crypto = conf->crypto_xform;
703         inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
704
705         eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
706         if (!eth_sec)
707                 return -ENOENT;
708
709         eth_sec->spi = conf->ipsec.spi;
710
711         if (inbound) {
712                 inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
713                 memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
714
715                 rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto,
716                                                true);
717                 if (rc)
718                         return -EINVAL;
719
720                 rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa,
721                                            eth_sec->inb,
722                                            sizeof(struct roc_ot_ipsec_inb_sa));
723                 if (rc)
724                         return -EINVAL;
725         } else {
726                 struct roc_ot_ipsec_outb_sa *outb_sa_dptr;
727
728                 outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr;
729                 memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa));
730
731                 rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
732                 if (rc)
733                         return -EINVAL;
734                 rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa,
735                                            eth_sec->inb,
736                                            sizeof(struct roc_ot_ipsec_outb_sa));
737                 if (rc)
738                         return -EINVAL;
739         }
740
741         return 0;
742 }
743
744 int
745 rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
746                         void *data, uint32_t len)
747 {
748         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
749         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
750         struct cnxk_eth_sec_sess *eth_sec;
751         int rc;
752
753         eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
754         if (eth_sec == NULL)
755                 return -EINVAL;
756
757         rc = roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,
758                             ROC_NIX_INL_SA_OP_FLUSH);
759         if (rc)
760                 return -EINVAL;
761         rte_delay_ms(1);
762         memcpy(data, eth_sec->sa, len);
763
764         return 0;
765 }
766
767 int
768 rte_pmd_cnxk_hw_sa_write(void *device, struct rte_security_session *sess,
769                          void *data, uint32_t len)
770 {
771         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
772         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
773         struct cnxk_eth_sec_sess *eth_sec;
774         int rc = -EINVAL;
775
776         eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
777         if (eth_sec == NULL)
778                 return rc;
779         rc = roc_nix_inl_ctx_write(&dev->nix, data, eth_sec->sa, eth_sec->inb,
780                                    len);
781         if (rc)
782                 return rc;
783
784         return 0;
785 }
786
787 void
788 cn10k_eth_sec_ops_override(void)
789 {
790         static int init_once;
791
792         if (init_once)
793                 return;
794         init_once = 1;
795
796         /* Update platform specific ops */
797         cnxk_eth_sec_ops.session_create = cn10k_eth_sec_session_create;
798         cnxk_eth_sec_ops.session_destroy = cn10k_eth_sec_session_destroy;
799         cnxk_eth_sec_ops.capabilities_get = cn10k_eth_sec_capabilities_get;
800         cnxk_eth_sec_ops.session_update = cn10k_eth_sec_session_update;
801 }