net/mlx5: add C++ include guard to public header
[dpdk.git] / drivers / net / cnxk / cn9k_ethdev_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_security.h>
7 #include <rte_security_driver.h>
8
9 #include <cn9k_ethdev.h>
10 #include <cnxk_security.h>
11
12 static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
13         {       /* AES GCM */
14                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
15                 {.sym = {
16                         .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
17                         {.aead = {
18                                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
19                                 .block_size = 16,
20                                 .key_size = {
21                                         .min = 16,
22                                         .max = 32,
23                                         .increment = 8
24                                 },
25                                 .digest_size = {
26                                         .min = 16,
27                                         .max = 16,
28                                         .increment = 0
29                                 },
30                                 .aad_size = {
31                                         .min = 8,
32                                         .max = 12,
33                                         .increment = 4
34                                 },
35                                 .iv_size = {
36                                         .min = 12,
37                                         .max = 12,
38                                         .increment = 0
39                                 }
40                         }, }
41                 }, }
42         },
43         {       /* AES CBC */
44                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
45                 {.sym = {
46                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
47                         {.cipher = {
48                                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
49                                 .block_size = 16,
50                                 .key_size = {
51                                         .min = 16,
52                                         .max = 32,
53                                         .increment = 8
54                                 },
55                                 .iv_size = {
56                                         .min = 16,
57                                         .max = 16,
58                                         .increment = 0
59                                 }
60                         }, }
61                 }, }
62         },
63         {       /* SHA1 HMAC */
64                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
65                 {.sym = {
66                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
67                         {.auth = {
68                                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
69                                 .block_size = 64,
70                                 .key_size = {
71                                         .min = 20,
72                                         .max = 64,
73                                         .increment = 1
74                                 },
75                                 .digest_size = {
76                                         .min = 12,
77                                         .max = 12,
78                                         .increment = 0
79                                 },
80                         }, }
81                 }, }
82         },
83         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
84 };
85
86 static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
87         {       /* IPsec Inline Protocol ESP Tunnel Ingress */
88                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
89                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
90                 .ipsec = {
91                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
92                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
93                         .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
94                         .options = { 0 }
95                 },
96                 .crypto_capabilities = cn9k_eth_sec_crypto_caps,
97                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
98         },
99         {       /* IPsec Inline Protocol ESP Tunnel Egress */
100                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
101                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
102                 .ipsec = {
103                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
104                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
105                         .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
106                         .options = { 0 }
107                 },
108                 .crypto_capabilities = cn9k_eth_sec_crypto_caps,
109                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
110         },
111         {
112                 .action = RTE_SECURITY_ACTION_TYPE_NONE
113         }
114 };
115
116 static inline int
117 ar_window_init(struct cn9k_inb_priv_data *inb_priv)
118 {
119         if (inb_priv->replay_win_sz > CNXK_ON_AR_WIN_SIZE_MAX) {
120                 plt_err("Replay window size:%u is not supported",
121                         inb_priv->replay_win_sz);
122                 return -ENOTSUP;
123         }
124
125         rte_spinlock_init(&inb_priv->ar.lock);
126         /*
127          * Set window bottom to 1, base and top to size of
128          * window
129          */
130         inb_priv->ar.winb = 1;
131         inb_priv->ar.wint = inb_priv->replay_win_sz;
132         inb_priv->ar.base = inb_priv->replay_win_sz;
133
134         return 0;
135 }
136
137 static int
138 cn9k_eth_sec_session_create(void *device,
139                             struct rte_security_session_conf *conf,
140                             struct rte_security_session *sess,
141                             struct rte_mempool *mempool)
142 {
143         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
144         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
145         struct rte_security_ipsec_xform *ipsec;
146         struct cn9k_sec_sess_priv sess_priv;
147         struct rte_crypto_sym_xform *crypto;
148         struct cnxk_eth_sec_sess *eth_sec;
149         rte_spinlock_t *lock;
150         char tbuf[128] = {0};
151         bool inbound;
152         int rc = 0;
153
154         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
155                 return -ENOTSUP;
156
157         if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
158                 return -ENOTSUP;
159
160         if (rte_security_dynfield_register() < 0)
161                 return -ENOTSUP;
162
163         ipsec = &conf->ipsec;
164         crypto = conf->crypto_xform;
165         inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
166
167         /* Search if a session already exists */
168         if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
169                 plt_err("%s SA with SPI %u already in use",
170                         inbound ? "Inbound" : "Outbound", ipsec->spi);
171                 return -EEXIST;
172         }
173
174         if (rte_mempool_get(mempool, (void **)&eth_sec)) {
175                 plt_err("Could not allocate security session private data");
176                 return -ENOMEM;
177         }
178
179         lock = inbound ? &dev->inb.lock : &dev->outb.lock;
180         rte_spinlock_lock(lock);
181
182         memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
183         sess_priv.u64 = 0;
184
185         if (inbound) {
186                 struct cn9k_inb_priv_data *inb_priv;
187                 struct roc_onf_ipsec_inb_sa *inb_sa;
188
189                 PLT_STATIC_ASSERT(sizeof(struct cn9k_inb_priv_data) <
190                                   ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD);
191
192                 /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE. Assume no inline
193                  * device always for CN9K.
194                  */
195                 inb_sa = (struct roc_onf_ipsec_inb_sa *)
196                          roc_nix_inl_inb_sa_get(&dev->nix, false, ipsec->spi);
197                 if (!inb_sa) {
198                         snprintf(tbuf, sizeof(tbuf),
199                                  "Failed to create ingress sa");
200                         rc = -EFAULT;
201                         goto mempool_put;
202                 }
203
204                 /* Check if SA is already in use */
205                 if (inb_sa->ctl.valid) {
206                         snprintf(tbuf, sizeof(tbuf),
207                                  "Inbound SA with SPI %u already in use",
208                                  ipsec->spi);
209                         rc = -EBUSY;
210                         goto mempool_put;
211                 }
212
213                 memset(inb_sa, 0, sizeof(struct roc_onf_ipsec_inb_sa));
214
215                 /* Fill inbound sa params */
216                 rc = cnxk_onf_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);
217                 if (rc) {
218                         snprintf(tbuf, sizeof(tbuf),
219                                  "Failed to init inbound sa, rc=%d", rc);
220                         goto mempool_put;
221                 }
222
223                 inb_priv = roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(inb_sa);
224                 /* Back pointer to get eth_sec */
225                 inb_priv->eth_sec = eth_sec;
226
227                 /* Save userdata in inb private area */
228                 inb_priv->userdata = conf->userdata;
229
230                 inb_priv->replay_win_sz = ipsec->replay_win_sz;
231                 if (inb_priv->replay_win_sz) {
232                         rc = ar_window_init(inb_priv);
233                         if (rc)
234                                 goto mempool_put;
235                 }
236
237                 /* Prepare session priv */
238                 sess_priv.inb_sa = 1;
239                 sess_priv.sa_idx = ipsec->spi;
240
241                 /* Pointer from eth_sec -> inb_sa */
242                 eth_sec->sa = inb_sa;
243                 eth_sec->sess = sess;
244                 eth_sec->sa_idx = ipsec->spi;
245                 eth_sec->spi = ipsec->spi;
246                 eth_sec->inb = true;
247
248                 TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
249                 dev->inb.nb_sess++;
250         } else {
251                 struct cn9k_outb_priv_data *outb_priv;
252                 struct roc_onf_ipsec_outb_sa *outb_sa;
253                 uintptr_t sa_base = dev->outb.sa_base;
254                 struct cnxk_ipsec_outb_rlens *rlens;
255                 uint32_t sa_idx;
256
257                 PLT_STATIC_ASSERT(sizeof(struct cn9k_outb_priv_data) <
258                                   ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD);
259
260                 /* Alloc an sa index */
261                 rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
262                 if (rc)
263                         goto mempool_put;
264
265                 outb_sa = roc_nix_inl_onf_ipsec_outb_sa(sa_base, sa_idx);
266                 outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(outb_sa);
267                 rlens = &outb_priv->rlens;
268
269                 memset(outb_sa, 0, sizeof(struct roc_onf_ipsec_outb_sa));
270
271                 /* Fill outbound sa params */
272                 rc = cnxk_onf_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);
273                 if (rc) {
274                         snprintf(tbuf, sizeof(tbuf),
275                                  "Failed to init outbound sa, rc=%d", rc);
276                         rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
277                         goto mempool_put;
278                 }
279
280                 /* Save userdata */
281                 outb_priv->userdata = conf->userdata;
282                 outb_priv->sa_idx = sa_idx;
283                 outb_priv->eth_sec = eth_sec;
284                 /* Start sequence number with 1 */
285                 outb_priv->seq = 1;
286
287                 memcpy(&outb_priv->nonce, outb_sa->nonce, 4);
288                 if (outb_sa->ctl.enc_type == ROC_IE_ON_SA_ENC_AES_GCM)
289                         outb_priv->copy_salt = 1;
290
291                 /* Save rlen info */
292                 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
293
294                 sess_priv.sa_idx = outb_priv->sa_idx;
295                 sess_priv.roundup_byte = rlens->roundup_byte;
296                 sess_priv.roundup_len = rlens->roundup_len;
297                 sess_priv.partial_len = rlens->partial_len;
298
299                 /* Pointer from eth_sec -> outb_sa */
300                 eth_sec->sa = outb_sa;
301                 eth_sec->sess = sess;
302                 eth_sec->sa_idx = sa_idx;
303                 eth_sec->spi = ipsec->spi;
304
305                 TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
306                 dev->outb.nb_sess++;
307         }
308
309         /* Sync SA content */
310         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
311
312         rte_spinlock_unlock(lock);
313
314         plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u",
315                     inbound ? "inbound" : "outbound", eth_sec->spi,
316                     eth_sec->sa_idx);
317         /*
318          * Update fast path info in priv area.
319          */
320         set_sec_session_private_data(sess, (void *)sess_priv.u64);
321
322         return 0;
323 mempool_put:
324         rte_spinlock_unlock(lock);
325         rte_mempool_put(mempool, eth_sec);
326         if (rc)
327                 plt_err("%s", tbuf);
328         return rc;
329 }
330
331 static int
332 cn9k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
333 {
334         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
335         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
336         struct roc_onf_ipsec_outb_sa *outb_sa;
337         struct roc_onf_ipsec_inb_sa *inb_sa;
338         struct cnxk_eth_sec_sess *eth_sec;
339         struct rte_mempool *mp;
340         rte_spinlock_t *lock;
341
342         eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
343         if (!eth_sec)
344                 return -ENOENT;
345
346         lock = eth_sec->inb ? &dev->inb.lock : &dev->outb.lock;
347         rte_spinlock_lock(lock);
348
349         if (eth_sec->inb) {
350                 inb_sa = eth_sec->sa;
351                 /* Disable SA */
352                 inb_sa->ctl.valid = 0;
353
354                 TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
355                 dev->inb.nb_sess--;
356         } else {
357                 outb_sa = eth_sec->sa;
358                 /* Disable SA */
359                 outb_sa->ctl.valid = 0;
360
361                 /* Release Outbound SA index */
362                 cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
363                 TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
364                 dev->outb.nb_sess--;
365         }
366
367         /* Sync SA content */
368         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
369
370         rte_spinlock_unlock(lock);
371
372         plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u",
373                     eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
374                     eth_sec->sa_idx);
375
376         /* Put eth_sec object back to pool */
377         mp = rte_mempool_from_obj(eth_sec);
378         set_sec_session_private_data(sess, NULL);
379         rte_mempool_put(mp, eth_sec);
380         return 0;
381 }
382
383 static const struct rte_security_capability *
384 cn9k_eth_sec_capabilities_get(void *device __rte_unused)
385 {
386         return cn9k_eth_sec_capabilities;
387 }
388
389 void
390 cn9k_eth_sec_ops_override(void)
391 {
392         static int init_once;
393
394         if (init_once)
395                 return;
396         init_once = 1;
397
398         /* Update platform specific ops */
399         cnxk_eth_sec_ops.session_create = cn9k_eth_sec_session_create;
400         cnxk_eth_sec_ops.session_destroy = cn9k_eth_sec_session_destroy;
401         cnxk_eth_sec_ops.capabilities_get = cn9k_eth_sec_capabilities_get;
402 }