net/cnxk: support telemetry
[dpdk.git] / drivers / net / cnxk / cn9k_ethdev_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_security.h>
7 #include <rte_security_driver.h>
8
9 #include <cn9k_ethdev.h>
10 #include <cnxk_security.h>
11
12 static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
13         {       /* AES GCM */
14                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
15                 {.sym = {
16                         .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
17                         {.aead = {
18                                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
19                                 .block_size = 16,
20                                 .key_size = {
21                                         .min = 16,
22                                         .max = 32,
23                                         .increment = 8
24                                 },
25                                 .digest_size = {
26                                         .min = 16,
27                                         .max = 16,
28                                         .increment = 0
29                                 },
30                                 .aad_size = {
31                                         .min = 8,
32                                         .max = 12,
33                                         .increment = 4
34                                 },
35                                 .iv_size = {
36                                         .min = 12,
37                                         .max = 12,
38                                         .increment = 0
39                                 }
40                         }, }
41                 }, }
42         },
43         {       /* AES CBC */
44                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
45                 {.sym = {
46                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
47                         {.cipher = {
48                                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
49                                 .block_size = 16,
50                                 .key_size = {
51                                         .min = 16,
52                                         .max = 32,
53                                         .increment = 8
54                                 },
55                                 .iv_size = {
56                                         .min = 16,
57                                         .max = 16,
58                                         .increment = 0
59                                 }
60                         }, }
61                 }, }
62         },
63         {       /* SHA1 HMAC */
64                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
65                 {.sym = {
66                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
67                         {.auth = {
68                                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
69                                 .block_size = 64,
70                                 .key_size = {
71                                         .min = 20,
72                                         .max = 64,
73                                         .increment = 1
74                                 },
75                                 .digest_size = {
76                                         .min = 12,
77                                         .max = 12,
78                                         .increment = 0
79                                 },
80                         }, }
81                 }, }
82         },
83         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
84 };
85
86 static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
87         {       /* IPsec Inline Protocol ESP Tunnel Ingress */
88                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
89                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
90                 .ipsec = {
91                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
92                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
93                         .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
94                         .options = { 0 }
95                 },
96                 .crypto_capabilities = cn9k_eth_sec_crypto_caps,
97                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
98         },
99         {       /* IPsec Inline Protocol ESP Tunnel Egress */
100                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
101                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
102                 .ipsec = {
103                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
104                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
105                         .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
106                         .options = { 0 }
107                 },
108                 .crypto_capabilities = cn9k_eth_sec_crypto_caps,
109                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
110         },
111         {
112                 .action = RTE_SECURITY_ACTION_TYPE_NONE
113         }
114 };
115
116 static inline int
117 ar_window_init(struct cn9k_inb_priv_data *inb_priv)
118 {
119         if (inb_priv->replay_win_sz > CNXK_ON_AR_WIN_SIZE_MAX) {
120                 plt_err("Replay window size:%u is not supported",
121                         inb_priv->replay_win_sz);
122                 return -ENOTSUP;
123         }
124
125         rte_spinlock_init(&inb_priv->ar.lock);
126         /*
127          * Set window bottom to 1, base and top to size of
128          * window
129          */
130         inb_priv->ar.winb = 1;
131         inb_priv->ar.wint = inb_priv->replay_win_sz;
132         inb_priv->ar.base = inb_priv->replay_win_sz;
133
134         return 0;
135 }
136
137 static int
138 cn9k_eth_sec_session_create(void *device,
139                             struct rte_security_session_conf *conf,
140                             struct rte_security_session *sess,
141                             struct rte_mempool *mempool)
142 {
143         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
144         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
145         struct rte_security_ipsec_xform *ipsec;
146         struct cn9k_sec_sess_priv sess_priv;
147         struct rte_crypto_sym_xform *crypto;
148         struct cnxk_eth_sec_sess *eth_sec;
149         bool inbound;
150         int rc = 0;
151
152         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
153                 return -ENOTSUP;
154
155         if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
156                 return -ENOTSUP;
157
158         if (rte_security_dynfield_register() < 0)
159                 return -ENOTSUP;
160
161         ipsec = &conf->ipsec;
162         crypto = conf->crypto_xform;
163         inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
164
165         /* Search if a session already exists */
166         if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
167                 plt_err("%s SA with SPI %u already in use",
168                         inbound ? "Inbound" : "Outbound", ipsec->spi);
169                 return -EEXIST;
170         }
171
172         if (rte_mempool_get(mempool, (void **)&eth_sec)) {
173                 plt_err("Could not allocate security session private data");
174                 return -ENOMEM;
175         }
176
177         memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
178         sess_priv.u64 = 0;
179
180         if (inbound) {
181                 struct cn9k_inb_priv_data *inb_priv;
182                 struct roc_onf_ipsec_inb_sa *inb_sa;
183
184                 PLT_STATIC_ASSERT(sizeof(struct cn9k_inb_priv_data) <
185                                   ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD);
186
187                 /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE. Assume no inline
188                  * device always for CN9K.
189                  */
190                 inb_sa = (struct roc_onf_ipsec_inb_sa *)
191                         roc_nix_inl_inb_sa_get(&dev->nix, false, ipsec->spi);
192                 if (!inb_sa) {
193                         plt_err("Failed to create ingress sa");
194                         rc = -EFAULT;
195                         goto mempool_put;
196                 }
197
198                 /* Check if SA is already in use */
199                 if (inb_sa->ctl.valid) {
200                         plt_err("Inbound SA with SPI %u already in use",
201                                 ipsec->spi);
202                         rc = -EBUSY;
203                         goto mempool_put;
204                 }
205
206                 memset(inb_sa, 0, sizeof(struct roc_onf_ipsec_inb_sa));
207
208                 /* Fill inbound sa params */
209                 rc = cnxk_onf_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);
210                 if (rc) {
211                         plt_err("Failed to init inbound sa, rc=%d", rc);
212                         goto mempool_put;
213                 }
214
215                 inb_priv = roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(inb_sa);
216                 /* Back pointer to get eth_sec */
217                 inb_priv->eth_sec = eth_sec;
218
219                 /* Save userdata in inb private area */
220                 inb_priv->userdata = conf->userdata;
221
222                 inb_priv->replay_win_sz = ipsec->replay_win_sz;
223                 if (inb_priv->replay_win_sz) {
224                         rc = ar_window_init(inb_priv);
225                         if (rc)
226                                 goto mempool_put;
227                 }
228
229                 /* Prepare session priv */
230                 sess_priv.inb_sa = 1;
231                 sess_priv.sa_idx = ipsec->spi;
232
233                 /* Pointer from eth_sec -> inb_sa */
234                 eth_sec->sa = inb_sa;
235                 eth_sec->sess = sess;
236                 eth_sec->sa_idx = ipsec->spi;
237                 eth_sec->spi = ipsec->spi;
238                 eth_sec->inb = true;
239
240                 TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
241                 dev->inb.nb_sess++;
242         } else {
243                 struct cn9k_outb_priv_data *outb_priv;
244                 struct roc_onf_ipsec_outb_sa *outb_sa;
245                 uintptr_t sa_base = dev->outb.sa_base;
246                 struct cnxk_ipsec_outb_rlens *rlens;
247                 uint32_t sa_idx;
248
249                 PLT_STATIC_ASSERT(sizeof(struct cn9k_outb_priv_data) <
250                                   ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD);
251
252                 /* Alloc an sa index */
253                 rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
254                 if (rc)
255                         goto mempool_put;
256
257                 outb_sa = roc_nix_inl_onf_ipsec_outb_sa(sa_base, sa_idx);
258                 outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(outb_sa);
259                 rlens = &outb_priv->rlens;
260
261                 memset(outb_sa, 0, sizeof(struct roc_onf_ipsec_outb_sa));
262
263                 /* Fill outbound sa params */
264                 rc = cnxk_onf_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);
265                 if (rc) {
266                         plt_err("Failed to init outbound sa, rc=%d", rc);
267                         rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
268                         goto mempool_put;
269                 }
270
271                 /* Save userdata */
272                 outb_priv->userdata = conf->userdata;
273                 outb_priv->sa_idx = sa_idx;
274                 outb_priv->eth_sec = eth_sec;
275                 /* Start sequence number with 1 */
276                 outb_priv->seq = 1;
277
278                 memcpy(&outb_priv->nonce, outb_sa->nonce, 4);
279                 if (outb_sa->ctl.enc_type == ROC_IE_ON_SA_ENC_AES_GCM)
280                         outb_priv->copy_salt = 1;
281
282                 /* Save rlen info */
283                 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
284
285                 sess_priv.sa_idx = outb_priv->sa_idx;
286                 sess_priv.roundup_byte = rlens->roundup_byte;
287                 sess_priv.roundup_len = rlens->roundup_len;
288                 sess_priv.partial_len = rlens->partial_len;
289
290                 /* Pointer from eth_sec -> outb_sa */
291                 eth_sec->sa = outb_sa;
292                 eth_sec->sess = sess;
293                 eth_sec->sa_idx = sa_idx;
294                 eth_sec->spi = ipsec->spi;
295
296                 TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
297                 dev->outb.nb_sess++;
298         }
299
300         /* Sync SA content */
301         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
302
303         plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u",
304                     inbound ? "inbound" : "outbound", eth_sec->spi,
305                     eth_sec->sa_idx);
306         /*
307          * Update fast path info in priv area.
308          */
309         set_sec_session_private_data(sess, (void *)sess_priv.u64);
310
311         return 0;
312 mempool_put:
313         rte_mempool_put(mempool, eth_sec);
314         return rc;
315 }
316
317 static int
318 cn9k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
319 {
320         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
321         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
322         struct roc_onf_ipsec_outb_sa *outb_sa;
323         struct roc_onf_ipsec_inb_sa *inb_sa;
324         struct cnxk_eth_sec_sess *eth_sec;
325         struct rte_mempool *mp;
326
327         eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
328         if (!eth_sec)
329                 return -ENOENT;
330
331         if (eth_sec->inb) {
332                 inb_sa = eth_sec->sa;
333                 /* Disable SA */
334                 inb_sa->ctl.valid = 0;
335
336                 TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
337                 dev->inb.nb_sess--;
338         } else {
339                 outb_sa = eth_sec->sa;
340                 /* Disable SA */
341                 outb_sa->ctl.valid = 0;
342
343                 /* Release Outbound SA index */
344                 cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
345                 TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
346                 dev->outb.nb_sess--;
347         }
348
349         /* Sync SA content */
350         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
351
352         plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u",
353                     eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
354                     eth_sec->sa_idx);
355
356         /* Put eth_sec object back to pool */
357         mp = rte_mempool_from_obj(eth_sec);
358         set_sec_session_private_data(sess, NULL);
359         rte_mempool_put(mp, eth_sec);
360         return 0;
361 }
362
363 static const struct rte_security_capability *
364 cn9k_eth_sec_capabilities_get(void *device __rte_unused)
365 {
366         return cn9k_eth_sec_capabilities;
367 }
368
369 void
370 cn9k_eth_sec_ops_override(void)
371 {
372         static int init_once;
373
374         if (init_once)
375                 return;
376         init_once = 1;
377
378         /* Update platform specific ops */
379         cnxk_eth_sec_ops.session_create = cn9k_eth_sec_session_create;
380         cnxk_eth_sec_ops.session_destroy = cn9k_eth_sec_session_destroy;
381         cnxk_eth_sec_ops.capabilities_get = cn9k_eth_sec_capabilities_get;
382 }