net/cnxk: support Rx security offload on cn10k
[dpdk.git] / drivers / net / cnxk / cn9k_ethdev_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_security.h>
7 #include <rte_security_driver.h>
8
9 #include <cn9k_ethdev.h>
10 #include <cnxk_security.h>
11
12 static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
13         {       /* AES GCM */
14                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
15                 {.sym = {
16                         .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
17                         {.aead = {
18                                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
19                                 .block_size = 16,
20                                 .key_size = {
21                                         .min = 16,
22                                         .max = 32,
23                                         .increment = 8
24                                 },
25                                 .digest_size = {
26                                         .min = 16,
27                                         .max = 16,
28                                         .increment = 0
29                                 },
30                                 .aad_size = {
31                                         .min = 8,
32                                         .max = 12,
33                                         .increment = 4
34                                 },
35                                 .iv_size = {
36                                         .min = 12,
37                                         .max = 12,
38                                         .increment = 0
39                                 }
40                         }, }
41                 }, }
42         },
43         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
44 };
45
46 static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
47         {       /* IPsec Inline Protocol ESP Tunnel Ingress */
48                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
49                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
50                 .ipsec = {
51                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
52                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
53                         .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
54                         .options = { 0 }
55                 },
56                 .crypto_capabilities = cn9k_eth_sec_crypto_caps,
57                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
58         },
59         {       /* IPsec Inline Protocol ESP Tunnel Egress */
60                 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
61                 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
62                 .ipsec = {
63                         .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
64                         .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
65                         .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
66                         .options = { 0 }
67                 },
68                 .crypto_capabilities = cn9k_eth_sec_crypto_caps,
69                 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
70         },
71         {
72                 .action = RTE_SECURITY_ACTION_TYPE_NONE
73         }
74 };
75
76 static int
77 cn9k_eth_sec_session_create(void *device,
78                             struct rte_security_session_conf *conf,
79                             struct rte_security_session *sess,
80                             struct rte_mempool *mempool)
81 {
82         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
83         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
84         struct rte_security_ipsec_xform *ipsec;
85         struct cn9k_sec_sess_priv sess_priv;
86         struct rte_crypto_sym_xform *crypto;
87         struct cnxk_eth_sec_sess *eth_sec;
88         bool inbound;
89         int rc = 0;
90
91         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
92                 return -ENOTSUP;
93
94         if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
95                 return -ENOTSUP;
96
97         if (rte_security_dynfield_register() < 0)
98                 return -ENOTSUP;
99
100         ipsec = &conf->ipsec;
101         crypto = conf->crypto_xform;
102         inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
103
104         /* Search if a session already exists */
105         if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
106                 plt_err("%s SA with SPI %u already in use",
107                         inbound ? "Inbound" : "Outbound", ipsec->spi);
108                 return -EEXIST;
109         }
110
111         if (rte_mempool_get(mempool, (void **)&eth_sec)) {
112                 plt_err("Could not allocate security session private data");
113                 return -ENOMEM;
114         }
115
116         memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
117         sess_priv.u64 = 0;
118
119         if (inbound) {
120                 struct cn9k_inb_priv_data *inb_priv;
121                 struct roc_onf_ipsec_inb_sa *inb_sa;
122
123                 PLT_STATIC_ASSERT(sizeof(struct cn9k_inb_priv_data) <
124                                   ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD);
125
126                 /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE. Assume no inline
127                  * device always for CN9K.
128                  */
129                 inb_sa = (struct roc_onf_ipsec_inb_sa *)
130                         roc_nix_inl_inb_sa_get(&dev->nix, false, ipsec->spi);
131                 if (!inb_sa) {
132                         plt_err("Failed to create ingress sa");
133                         rc = -EFAULT;
134                         goto mempool_put;
135                 }
136
137                 /* Check if SA is already in use */
138                 if (inb_sa->ctl.valid) {
139                         plt_err("Inbound SA with SPI %u already in use",
140                                 ipsec->spi);
141                         rc = -EBUSY;
142                         goto mempool_put;
143                 }
144
145                 memset(inb_sa, 0, sizeof(struct roc_onf_ipsec_inb_sa));
146
147                 /* Fill inbound sa params */
148                 rc = cnxk_onf_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);
149                 if (rc) {
150                         plt_err("Failed to init inbound sa, rc=%d", rc);
151                         goto mempool_put;
152                 }
153
154                 inb_priv = roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(inb_sa);
155                 /* Back pointer to get eth_sec */
156                 inb_priv->eth_sec = eth_sec;
157
158                 /* Save userdata in inb private area */
159                 inb_priv->userdata = conf->userdata;
160
161                 sess_priv.inb_sa = 1;
162                 sess_priv.sa_idx = ipsec->spi;
163
164                 /* Pointer from eth_sec -> inb_sa */
165                 eth_sec->sa = inb_sa;
166                 eth_sec->sess = sess;
167                 eth_sec->sa_idx = ipsec->spi;
168                 eth_sec->spi = ipsec->spi;
169                 eth_sec->inb = true;
170
171                 TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
172                 dev->inb.nb_sess++;
173         } else {
174                 struct cn9k_outb_priv_data *outb_priv;
175                 struct roc_onf_ipsec_outb_sa *outb_sa;
176                 uintptr_t sa_base = dev->outb.sa_base;
177                 struct cnxk_ipsec_outb_rlens *rlens;
178                 uint32_t sa_idx;
179
180                 PLT_STATIC_ASSERT(sizeof(struct cn9k_outb_priv_data) <
181                                   ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD);
182
183                 /* Alloc an sa index */
184                 rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
185                 if (rc)
186                         goto mempool_put;
187
188                 outb_sa = roc_nix_inl_onf_ipsec_outb_sa(sa_base, sa_idx);
189                 outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(outb_sa);
190                 rlens = &outb_priv->rlens;
191
192                 memset(outb_sa, 0, sizeof(struct roc_onf_ipsec_outb_sa));
193
194                 /* Fill outbound sa params */
195                 rc = cnxk_onf_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);
196                 if (rc) {
197                         plt_err("Failed to init outbound sa, rc=%d", rc);
198                         rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
199                         goto mempool_put;
200                 }
201
202                 /* Save userdata */
203                 outb_priv->userdata = conf->userdata;
204                 outb_priv->sa_idx = sa_idx;
205                 outb_priv->eth_sec = eth_sec;
206                 /* Start sequence number with 1 */
207                 outb_priv->seq = 1;
208
209                 memcpy(&outb_priv->nonce, outb_sa->nonce, 4);
210                 if (outb_sa->ctl.enc_type == ROC_IE_ON_SA_ENC_AES_GCM)
211                         outb_priv->copy_salt = 1;
212
213                 /* Save rlen info */
214                 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
215
216                 sess_priv.sa_idx = outb_priv->sa_idx;
217                 sess_priv.roundup_byte = rlens->roundup_byte;
218                 sess_priv.roundup_len = rlens->roundup_len;
219                 sess_priv.partial_len = rlens->partial_len;
220
221                 /* Pointer from eth_sec -> outb_sa */
222                 eth_sec->sa = outb_sa;
223                 eth_sec->sess = sess;
224                 eth_sec->sa_idx = sa_idx;
225                 eth_sec->spi = ipsec->spi;
226
227                 TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
228                 dev->outb.nb_sess++;
229         }
230
231         /* Sync SA content */
232         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
233
234         plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u",
235                     inbound ? "inbound" : "outbound", eth_sec->spi,
236                     eth_sec->sa_idx);
237         /*
238          * Update fast path info in priv area.
239          */
240         set_sec_session_private_data(sess, (void *)sess_priv.u64);
241
242         return 0;
243 mempool_put:
244         rte_mempool_put(mempool, eth_sec);
245         return rc;
246 }
247
248 static int
249 cn9k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
250 {
251         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
252         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
253         struct roc_onf_ipsec_outb_sa *outb_sa;
254         struct roc_onf_ipsec_inb_sa *inb_sa;
255         struct cnxk_eth_sec_sess *eth_sec;
256         struct rte_mempool *mp;
257
258         eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
259         if (!eth_sec)
260                 return -ENOENT;
261
262         if (eth_sec->inb) {
263                 inb_sa = eth_sec->sa;
264                 /* Disable SA */
265                 inb_sa->ctl.valid = 0;
266
267                 TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
268                 dev->inb.nb_sess--;
269         } else {
270                 outb_sa = eth_sec->sa;
271                 /* Disable SA */
272                 outb_sa->ctl.valid = 0;
273
274                 /* Release Outbound SA index */
275                 cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
276                 TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
277                 dev->outb.nb_sess--;
278         }
279
280         /* Sync SA content */
281         plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
282
283         plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u",
284                     eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
285                     eth_sec->sa_idx);
286
287         /* Put eth_sec object back to pool */
288         mp = rte_mempool_from_obj(eth_sec);
289         set_sec_session_private_data(sess, NULL);
290         rte_mempool_put(mp, eth_sec);
291         return 0;
292 }
293
294 static const struct rte_security_capability *
295 cn9k_eth_sec_capabilities_get(void *device __rte_unused)
296 {
297         return cn9k_eth_sec_capabilities;
298 }
299
300 void
301 cn9k_eth_sec_ops_override(void)
302 {
303         static int init_once;
304
305         if (init_once)
306                 return;
307         init_once = 1;
308
309         /* Update platform specific ops */
310         cnxk_eth_sec_ops.session_create = cn9k_eth_sec_session_create;
311         cnxk_eth_sec_ops.session_destroy = cn9k_eth_sec_session_destroy;
312         cnxk_eth_sec_ops.capabilities_get = cn9k_eth_sec_capabilities_get;
313 }