75f05ee558aa4d9e75e1b6674a6d63d7e2de1e49
[dpdk.git] / drivers / net / iavf / iavf_ipsec_crypto.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_ethdev.h>
7 #include <rte_security_driver.h>
8 #include <rte_security.h>
9
10 #include "iavf.h"
11 #include "iavf_rxtx.h"
12 #include "iavf_log.h"
13 #include "iavf_generic_flow.h"
14
15 #include "iavf_ipsec_crypto.h"
16 #include "iavf_ipsec_crypto_capabilities.h"
17
18 /**
19  * iAVF IPsec Crypto Security Context
20  */
21 struct iavf_security_ctx {
22         struct iavf_adapter *adapter;
23         int pkt_md_offset;
24         struct rte_cryptodev_capabilities *crypto_capabilities;
25 };
26
27 /**
28  * iAVF IPsec Crypto Security Session Parameters
29  */
30 struct iavf_security_session {
31         struct iavf_adapter *adapter;
32
33         enum rte_security_ipsec_sa_mode mode;
34         enum rte_security_ipsec_tunnel_type type;
35         enum rte_security_ipsec_sa_direction direction;
36
37         struct {
38                 uint32_t spi; /* Security Parameter Index */
39                 uint32_t hw_idx; /* SA Index in hardware table */
40         } sa;
41
42         struct {
43                 uint8_t enabled :1;
44                 union {
45                         uint64_t value;
46                         struct {
47                                 uint32_t hi;
48                                 uint32_t low;
49                         };
50                 };
51         } esn;
52
53         struct {
54                 uint8_t enabled :1;
55         } udp_encap;
56
57         size_t iv_sz;
58         size_t icv_sz;
59         size_t block_sz;
60
61         struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
62 };
63 /**
64  *  IV Length field in IPsec Tx Desc uses the following encoding:
65  *
66  *  0B - 0
67  *  4B - 1
68  *  8B - 2
69  *  16B - 3
70  *
71  * but we also need the IV Length for TSO to correctly calculate the total
72  * header length so placing it in the upper 6-bits here for easier retrieval.
73  */
74 static inline uint8_t
75 calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
76 {
77         uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
78
79         switch (iv_sz) {
80         case 4:
81                 iv_length = IAVF_IPSEC_IV_LEN_DW;
82                 break;
83         case 8:
84                 iv_length = IAVF_IPSEC_IV_LEN_DDW;
85                 break;
86         case 16:
87                 iv_length = IAVF_IPSEC_IV_LEN_QDW;
88                 break;
89         }
90
91         return (iv_sz << 2) | iv_length;
92 }
93
94 static unsigned int
95 iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
96 {
97         return sizeof(struct iavf_security_session);
98 }
99
100 static const struct rte_cryptodev_symmetric_capability *
101 get_capability(struct iavf_security_ctx *iavf_sctx,
102         uint32_t algo, uint32_t type)
103 {
104         const struct rte_cryptodev_capabilities *capability;
105         int i = 0;
106
107         capability = &iavf_sctx->crypto_capabilities[i];
108
109         while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
110                 if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
111                         (uint32_t)capability->sym.xform_type == type &&
112                         (uint32_t)capability->sym.cipher.algo == algo)
113                         return &capability->sym;
114                 /** try next capability */
115                 capability = &iavf_crypto_capabilities[i++];
116         }
117
118         return NULL;
119 }
120
121 static const struct rte_cryptodev_symmetric_capability *
122 get_auth_capability(struct iavf_security_ctx *iavf_sctx,
123         enum rte_crypto_auth_algorithm algo)
124 {
125         return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
126 }
127
128 static const struct rte_cryptodev_symmetric_capability *
129 get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
130         enum rte_crypto_cipher_algorithm algo)
131 {
132         return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER);
133 }
134 static const struct rte_cryptodev_symmetric_capability *
135 get_aead_capability(struct iavf_security_ctx *iavf_sctx,
136         enum rte_crypto_aead_algorithm algo)
137 {
138         return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
139 }
140
141 static uint16_t
142 get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
143         enum rte_crypto_cipher_algorithm algo)
144 {
145         const struct rte_cryptodev_symmetric_capability *capability;
146
147         capability = get_cipher_capability(iavf_sctx, algo);
148         if (capability == NULL)
149                 return 0;
150
151         return capability->cipher.block_size;
152 }
153
154 static uint16_t
155 get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
156         enum rte_crypto_aead_algorithm algo)
157 {
158         const struct rte_cryptodev_symmetric_capability *capability;
159
160         capability = get_aead_capability(iavf_sctx, algo);
161         if (capability == NULL)
162                 return 0;
163
164         return capability->cipher.block_size;
165 }
166
167 static uint16_t
168 get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
169         enum rte_crypto_auth_algorithm algo)
170 {
171         const struct rte_cryptodev_symmetric_capability *capability;
172
173         capability = get_auth_capability(iavf_sctx, algo);
174         if (capability == NULL)
175                 return 0;
176
177         return capability->auth.block_size;
178 }
179
180 static uint8_t
181 calc_context_desc_cipherblock_sz(size_t len)
182 {
183         switch (len) {
184         case 8:
185                 return 0x2;
186         case 16:
187                 return 0x3;
188         default:
189                 return 0x0;
190         }
191 }
192
193 static int
194 valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
195 {
196         if (len < min || len > max)
197                 return false;
198
199         if (increment == 0)
200                 return true;
201
202         if ((len - min) % increment)
203                 return false;
204
205         /* make sure it fits in the key array */
206         if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN)
207                 return false;
208
209         return true;
210 }
211
212 static int
213 valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
214         struct rte_crypto_auth_xform *auth)
215 {
216         const struct rte_cryptodev_symmetric_capability *capability;
217
218         capability = get_auth_capability(iavf_sctx, auth->algo);
219         if (capability == NULL)
220                 return false;
221
222         /* verify key size */
223         if (!valid_length(auth->key.length,
224                 capability->auth.key_size.min,
225                 capability->auth.key_size.max,
226                 capability->aead.key_size.increment))
227                 return false;
228
229         return true;
230 }
231
232 static int
233 valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
234         struct rte_crypto_cipher_xform *cipher)
235 {
236         const struct rte_cryptodev_symmetric_capability *capability;
237
238         capability = get_cipher_capability(iavf_sctx, cipher->algo);
239         if (capability == NULL)
240                 return false;
241
242         /* verify key size */
243         if (!valid_length(cipher->key.length,
244                 capability->cipher.key_size.min,
245                 capability->cipher.key_size.max,
246                 capability->cipher.key_size.increment))
247                 return false;
248
249         return true;
250 }
251
252 static int
253 valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
254         struct rte_crypto_aead_xform *aead)
255 {
256         const struct rte_cryptodev_symmetric_capability *capability;
257
258         capability = get_aead_capability(iavf_sctx, aead->algo);
259         if (capability == NULL)
260                 return false;
261
262         /* verify key size */
263         if (!valid_length(aead->key.length,
264                 capability->aead.key_size.min,
265                 capability->aead.key_size.max,
266                 capability->aead.key_size.increment))
267                 return false;
268
269         return true;
270 }
271
272 static int
273 iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
274         struct rte_security_session_conf *conf)
275 {
276         /** validate security action/protocol selection */
277         if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
278                 conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
279                 PMD_DRV_LOG(ERR, "Invalid action / protocol specified");
280                 return -EINVAL;
281         }
282
283         /** validate IPsec protocol selection */
284         if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
285                 PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified");
286                 return -EINVAL;
287         }
288
289         /** validate selected options */
290         if (conf->ipsec.options.copy_dscp ||
291                 conf->ipsec.options.copy_flabel ||
292                 conf->ipsec.options.copy_df ||
293                 conf->ipsec.options.dec_ttl ||
294                 conf->ipsec.options.ecn ||
295                 conf->ipsec.options.stats) {
296                 PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
297                 return -EINVAL;
298         }
299
300         /**
301          * Validate crypto xforms parameters.
302          *
303          * AEAD transforms can be used for either inbound/outbound IPsec SAs,
304          * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
305          * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
306          */
307         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
308                 if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
309                         PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
310                         return -EINVAL;
311                 }
312         } else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
313                 conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
314                 conf->crypto_xform->next &&
315                 conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
316                 if (!valid_cipher_xform(iavf_sctx,
317                                 &conf->crypto_xform->cipher)) {
318                         PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
319                         return -EINVAL;
320                 }
321
322                 if (!valid_auth_xform(iavf_sctx,
323                                 &conf->crypto_xform->next->auth)) {
324                         PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
325                         return -EINVAL;
326                 }
327         } else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
328                 conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
329                 conf->crypto_xform->next &&
330                 conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
331                 if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
332                         PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
333                         return -EINVAL;
334                 }
335
336                 if (!valid_cipher_xform(iavf_sctx,
337                                 &conf->crypto_xform->next->cipher)) {
338                         PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
339                         return -EINVAL;
340                 }
341         }
342
343         return 0;
344 }
345
346 static void
347 sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
348         struct rte_crypto_aead_xform *aead, uint32_t salt)
349 {
350         cfg->crypto_type = VIRTCHNL_AEAD;
351
352         switch (aead->algo) {
353         case RTE_CRYPTO_AEAD_AES_CCM:
354                 cfg->algo_type = VIRTCHNL_AES_CCM; break;
355         case RTE_CRYPTO_AEAD_AES_GCM:
356                 cfg->algo_type = VIRTCHNL_AES_GCM; break;
357         case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
358                 cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
359         default:
360                 PMD_DRV_LOG(ERR, "Invalid AEAD parameters");
361                 break;
362         }
363
364         cfg->key_len = aead->key.length;
365         cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt len */
366         cfg->digest_len = aead->digest_length;
367         cfg->salt = salt;
368
369         memcpy(cfg->key_data, aead->key.data, cfg->key_len);
370 }
371
372 static void
373 sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
374         struct rte_crypto_cipher_xform *cipher, uint32_t salt)
375 {
376         cfg->crypto_type = VIRTCHNL_CIPHER;
377
378         switch (cipher->algo) {
379         case RTE_CRYPTO_CIPHER_AES_CBC:
380                 cfg->algo_type = VIRTCHNL_AES_CBC; break;
381         case RTE_CRYPTO_CIPHER_3DES_CBC:
382                 cfg->algo_type = VIRTCHNL_3DES_CBC; break;
383         case RTE_CRYPTO_CIPHER_NULL:
384                 cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
385         case RTE_CRYPTO_CIPHER_AES_CTR:
386                 cfg->algo_type = VIRTCHNL_AES_CTR;
387                 cfg->salt = salt;
388                 break;
389         default:
390                 PMD_DRV_LOG(ERR, "Invalid cipher parameters");
391                 break;
392         }
393
394         cfg->key_len = cipher->key.length;
395         cfg->iv_len = cipher->iv.length;
396         cfg->salt = salt;
397
398         memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
399 }
400
401 static void
402 sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
403         struct rte_crypto_auth_xform *auth, uint32_t salt)
404 {
405         cfg->crypto_type = VIRTCHNL_AUTH;
406
407         switch (auth->algo) {
408         case RTE_CRYPTO_AUTH_NULL:
409                 cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
410         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
411                 cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
412         case RTE_CRYPTO_AUTH_AES_CMAC:
413                 cfg->algo_type = VIRTCHNL_AES_CMAC; break;
414         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
415                 cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
416         case RTE_CRYPTO_AUTH_MD5_HMAC:
417                 cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
418         case RTE_CRYPTO_AUTH_SHA1_HMAC:
419                 cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
420         case RTE_CRYPTO_AUTH_SHA224_HMAC:
421                 cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
422         case RTE_CRYPTO_AUTH_SHA256_HMAC:
423                 cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
424         case RTE_CRYPTO_AUTH_SHA384_HMAC:
425                 cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
426         case RTE_CRYPTO_AUTH_SHA512_HMAC:
427                 cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
428         case RTE_CRYPTO_AUTH_AES_GMAC:
429                 cfg->algo_type = VIRTCHNL_AES_GMAC;
430                 cfg->salt = salt;
431                 break;
432         default:
433                 PMD_DRV_LOG(ERR, "Invalid auth parameters");
434                 break;
435         }
436
437         cfg->key_len = auth->key.length;
438         /* special case for RTE_CRYPTO_AUTH_AES_GMAC */
439         if (auth->algo == RTE_CRYPTO_AUTH_AES_GMAC)
440                 cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt */
441         else
442                 cfg->iv_len = auth->iv.length;
443         cfg->digest_len = auth->digest_length;
444
445         memcpy(cfg->key_data, auth->key.data, cfg->key_len);
446 }
447
448 /**
449  * Send SA add virtual channel request to Inline IPsec driver.
450  *
451  * Inline IPsec driver expects SPI and destination IP address to be in host
452  * order, but DPDK APIs are network order, therefore we need to do a htonl
453  * conversion of these parameters.
454  */
455 static uint32_t
456 iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
457         struct rte_security_session_conf *conf)
458 {
459         struct inline_ipsec_msg *request = NULL, *response = NULL;
460         struct virtchnl_ipsec_sa_cfg *sa_cfg;
461         size_t request_len, response_len;
462
463         int rc;
464
465         request_len = sizeof(struct inline_ipsec_msg) +
466                         sizeof(struct virtchnl_ipsec_sa_cfg);
467
468         request = rte_malloc("iavf-sad-add-request", request_len, 0);
469         if (request == NULL) {
470                 rc = -ENOMEM;
471                 goto update_cleanup;
472         }
473
474         response_len = sizeof(struct inline_ipsec_msg) +
475                         sizeof(struct virtchnl_ipsec_sa_cfg_resp);
476         response = rte_malloc("iavf-sad-add-response", response_len, 0);
477         if (response == NULL) {
478                 rc = -ENOMEM;
479                 goto update_cleanup;
480         }
481
482         /* set msg header params */
483         request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
484         request->req_id = (uint16_t)0xDEADBEEF;
485
486         /* set SA configuration params */
487         sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
488
489         sa_cfg->spi = conf->ipsec.spi;
490         sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
491         sa_cfg->virtchnl_direction =
492                 conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
493                         VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
494
495         if (conf->ipsec.options.esn) {
496                 sa_cfg->esn_enabled = 1;
497                 sa_cfg->esn_hi = conf->ipsec.esn.hi;
498                 sa_cfg->esn_low = conf->ipsec.esn.low;
499         }
500
501         if (conf->ipsec.options.udp_encap)
502                 sa_cfg->udp_encap_enabled = 1;
503
504         /* Set outer IP params */
505         if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
506                 sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
507
508                 *((uint32_t *)sa_cfg->dst_addr) =
509                         htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
510         } else {
511                 uint32_t *v6_dst_addr =
512                         (uint32_t *)conf->ipsec.tunnel.ipv6.dst_addr.s6_addr;
513
514                 sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
515
516                 ((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
517                 ((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
518                 ((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
519                 ((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
520         }
521
522         /* set crypto params */
523         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
524                 sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
525                         &conf->crypto_xform->aead, conf->ipsec.salt);
526
527         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
528                 sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
529                         &conf->crypto_xform->cipher, conf->ipsec.salt);
530                 sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
531                         &conf->crypto_xform->next->auth, conf->ipsec.salt);
532
533         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
534                 sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
535                         &conf->crypto_xform->auth, conf->ipsec.salt);
536                 if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC)
537                         sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
538                         &conf->crypto_xform->next->cipher, conf->ipsec.salt);
539         }
540
541         /* send virtual channel request to add SA to hardware database */
542         rc = iavf_ipsec_crypto_request(adapter,
543                         (uint8_t *)request, request_len,
544                         (uint8_t *)response, response_len);
545         if (rc)
546                 goto update_cleanup;
547
548         /* verify response id */
549         if (response->ipsec_opcode != request->ipsec_opcode ||
550                 response->req_id != request->req_id)
551                 rc = -EFAULT;
552         else
553                 rc = response->ipsec_data.sa_cfg_resp->sa_handle;
554 update_cleanup:
555         rte_free(response);
556         rte_free(request);
557
558         return rc;
559 }
560
561 static void
562 set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template,
563         struct iavf_security_session *sess)
564 {
565         template->sa_idx = sess->sa.hw_idx;
566
567         if (sess->udp_encap.enabled)
568                 template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
569
570         if (sess->esn.enabled)
571                 template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
572
573         template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
574         template->ctx_desc_ipsec_params =
575                         calc_context_desc_cipherblock_sz(sess->block_sz) |
576                         ((uint8_t)(sess->icv_sz >> 2) << 3);
577 }
578
579 static void
580 set_session_parameter(struct iavf_security_ctx *iavf_sctx,
581         struct iavf_security_session *sess,
582         struct rte_security_session_conf *conf, uint32_t sa_idx)
583 {
584         sess->adapter = iavf_sctx->adapter;
585
586         sess->mode = conf->ipsec.mode;
587         sess->direction = conf->ipsec.direction;
588
589         if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
590                 sess->type = conf->ipsec.tunnel.type;
591
592         sess->sa.spi = conf->ipsec.spi;
593         sess->sa.hw_idx = sa_idx;
594
595         if (conf->ipsec.options.esn) {
596                 sess->esn.enabled = 1;
597                 sess->esn.value = conf->ipsec.esn.value;
598         }
599
600         if (conf->ipsec.options.udp_encap)
601                 sess->udp_encap.enabled = 1;
602
603         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
604                 sess->block_sz = get_aead_blocksize(iavf_sctx,
605                         conf->crypto_xform->aead.algo);
606                 sess->iv_sz = sizeof(uint64_t); /* iv.length includes salt */
607                 sess->icv_sz = conf->crypto_xform->aead.digest_length;
608         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
609                 sess->block_sz = get_cipher_blocksize(iavf_sctx,
610                         conf->crypto_xform->cipher.algo);
611                 sess->iv_sz = conf->crypto_xform->cipher.iv.length;
612                 sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
613         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
614                 if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
615                         sess->block_sz = get_auth_blocksize(iavf_sctx,
616                                 conf->crypto_xform->auth.algo);
617                         sess->iv_sz = sizeof(uint64_t); /* iv len inc. salt */
618                         sess->icv_sz = conf->crypto_xform->auth.digest_length;
619                 } else {
620                         sess->block_sz = get_cipher_blocksize(iavf_sctx,
621                                 conf->crypto_xform->next->cipher.algo);
622                         sess->iv_sz =
623                                 conf->crypto_xform->next->cipher.iv.length;
624                         sess->icv_sz = conf->crypto_xform->auth.digest_length;
625                 }
626         }
627
628         set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
629 }
630
631 /**
632  * Create IPsec Security Association for inline IPsec Crypto offload.
633  *
634  * 1. validate session configuration parameters
635  * 2. allocate session memory from mempool
636  * 3. add SA to hardware database
637  * 4. set session parameters
638  * 5. create packet metadata template for datapath
639  */
640 static int
641 iavf_ipsec_crypto_session_create(void *device,
642                                  struct rte_security_session_conf *conf,
643                                  struct rte_security_session *session,
644                                  struct rte_mempool *mempool)
645 {
646         struct rte_eth_dev *ethdev = device;
647         struct iavf_adapter *adapter =
648                 IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
649         struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
650         struct iavf_security_session *iavf_session = NULL;
651         int sa_idx;
652         int ret = 0;
653
654         /* validate that all SA parameters are valid for device */
655         ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
656         if (ret)
657                 return ret;
658
659         /* allocate session context */
660         if (rte_mempool_get(mempool, (void **)&iavf_session)) {
661                 PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
662                 return -ENOMEM;
663         }
664
665         /* add SA to hardware database */
666         sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
667         if (sa_idx < 0) {
668                 PMD_DRV_LOG(ERR,
669                         "Failed to add SA (spi: %d, mode: %s, direction: %s)",
670                         conf->ipsec.spi,
671                         conf->ipsec.mode ==
672                                 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
673                                 "transport" : "tunnel",
674                         conf->ipsec.direction ==
675                                 RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
676                                 "inbound" : "outbound");
677
678                 rte_mempool_put(mempool, iavf_session);
679                 return -EFAULT;
680         }
681
682         /* save data plane required session parameters */
683         set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
684
685         /* save to security session private data */
686         set_sec_session_private_data(session, iavf_session);
687
688         return 0;
689 }
690
691 /**
692  * Check if valid ipsec crypto action.
693  * SPI must be non-zero and SPI in session must match SPI value
694  * passed into function.
695  *
696  * returns: 0 if invalid session or SPI value equal zero
697  * returns: 1 if valid
698  */
699 uint32_t
700 iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
701         const struct rte_security_session *session, uint32_t spi)
702 {
703         struct iavf_adapter *adapter =
704                 IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
705         struct iavf_security_session *sess = session->sess_private_data;
706
707         /* verify we have a valid session and that it belong to this adapter */
708         if (unlikely(sess == NULL || sess->adapter != adapter))
709                 return false;
710
711         /* SPI value must be non-zero */
712         if (spi == 0)
713                 return false;
714         /* Session SPI must patch flow SPI*/
715         else if (sess->sa.spi == spi) {
716                 return true;
717                 /**
718                  * TODO: We should add a way of tracking valid hw SA indices to
719                  * make validation less brittle
720                  */
721         }
722
723                 return true;
724 }
725
726 /**
727  * Send virtual channel security policy add request to IES driver.
728  *
729  * IES driver expects SPI and destination IP address to be in host
730  * order, but DPDK APIs are network order, therefore we need to do a htonl
731  * conversion of these parameters.
732  */
733 int
734 iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
735         uint32_t esp_spi,
736         uint8_t is_v4,
737         rte_be32_t v4_dst_addr,
738         uint8_t *v6_dst_addr,
739         uint8_t drop,
740         bool is_udp,
741         uint16_t udp_port)
742 {
743         struct inline_ipsec_msg *request = NULL, *response = NULL;
744         size_t request_len, response_len;
745         int rc = 0;
746
747         request_len = sizeof(struct inline_ipsec_msg) +
748                         sizeof(struct virtchnl_ipsec_sp_cfg);
749         request = rte_malloc("iavf-inbound-security-policy-add-request",
750                                 request_len, 0);
751         if (request == NULL) {
752                 rc = -ENOMEM;
753                 goto update_cleanup;
754         }
755
756         /* set msg header params */
757         request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
758         request->req_id = (uint16_t)0xDEADBEEF;
759
760         /* ESP SPI */
761         request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
762
763         /* Destination IP  */
764         if (is_v4) {
765                 request->ipsec_data.sp_cfg->table_id =
766                                 VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
767                 request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
768         } else {
769                 request->ipsec_data.sp_cfg->table_id =
770                                 VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
771                 request->ipsec_data.sp_cfg->dip[0] =
772                                 htonl(((uint32_t *)v6_dst_addr)[0]);
773                 request->ipsec_data.sp_cfg->dip[1] =
774                                 htonl(((uint32_t *)v6_dst_addr)[1]);
775                 request->ipsec_data.sp_cfg->dip[2] =
776                                 htonl(((uint32_t *)v6_dst_addr)[2]);
777                 request->ipsec_data.sp_cfg->dip[3] =
778                                 htonl(((uint32_t *)v6_dst_addr)[3]);
779         }
780
781         request->ipsec_data.sp_cfg->drop = drop;
782
783         /** Traffic Class/Congestion Domain currently not support */
784         request->ipsec_data.sp_cfg->set_tc = 0;
785         request->ipsec_data.sp_cfg->cgd = 0;
786         request->ipsec_data.sp_cfg->is_udp = is_udp;
787         request->ipsec_data.sp_cfg->udp_port = htons(udp_port);
788
789         response_len = sizeof(struct inline_ipsec_msg) +
790                         sizeof(struct virtchnl_ipsec_sp_cfg_resp);
791         response = rte_malloc("iavf-inbound-security-policy-add-response",
792                                 response_len, 0);
793         if (response == NULL) {
794                 rc = -ENOMEM;
795                 goto update_cleanup;
796         }
797
798         /* send virtual channel request to add SA to hardware database */
799         rc = iavf_ipsec_crypto_request(adapter,
800                         (uint8_t *)request, request_len,
801                         (uint8_t *)response, response_len);
802         if (rc)
803                 goto update_cleanup;
804
805         /* verify response */
806         if (response->ipsec_opcode != request->ipsec_opcode ||
807                 response->req_id != request->req_id)
808                 rc = -EFAULT;
809         else
810                 rc = response->ipsec_data.sp_cfg_resp->rule_id;
811
812 update_cleanup:
813         rte_free(request);
814         rte_free(response);
815
816         return rc;
817 }
818
819 static uint32_t
820 iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
821         struct iavf_security_session *sess)
822 {
823         struct inline_ipsec_msg *request = NULL, *response = NULL;
824         size_t request_len, response_len;
825         int rc = 0;
826
827         request_len = sizeof(struct inline_ipsec_msg) +
828                         sizeof(struct virtchnl_ipsec_sa_update);
829         request = rte_malloc("iavf-sa-update-request", request_len, 0);
830         if (request == NULL) {
831                 rc = -ENOMEM;
832                 goto update_cleanup;
833         }
834
835         response_len = sizeof(struct inline_ipsec_msg) +
836                         sizeof(struct virtchnl_ipsec_resp);
837         response = rte_malloc("iavf-sa-update-response", response_len, 0);
838         if (response == NULL) {
839                 rc = -ENOMEM;
840                 goto update_cleanup;
841         }
842
843         /* set msg header params */
844         request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
845         request->req_id = (uint16_t)0xDEADBEEF;
846
847         /* set request params */
848         request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
849         request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
850
851         /* send virtual channel request to add SA to hardware database */
852         rc = iavf_ipsec_crypto_request(adapter,
853                         (uint8_t *)request, request_len,
854                         (uint8_t *)response, response_len);
855         if (rc)
856                 goto update_cleanup;
857
858         /* verify response */
859         if (response->ipsec_opcode != request->ipsec_opcode ||
860                 response->req_id != request->req_id)
861                 rc = -EFAULT;
862         else
863                 rc = response->ipsec_data.ipsec_resp->resp;
864
865 update_cleanup:
866         rte_free(request);
867         rte_free(response);
868
869         return rc;
870 }
871
872 static int
873 iavf_ipsec_crypto_session_update(void *device,
874                 struct rte_security_session *session,
875                 struct rte_security_session_conf *conf)
876 {
877         struct iavf_adapter *adapter = NULL;
878         struct iavf_security_session *iavf_sess = NULL;
879         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
880         int rc = 0;
881
882         adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
883         iavf_sess = (struct iavf_security_session *)session->sess_private_data;
884
885         /* verify we have a valid session and that it belong to this adapter */
886         if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
887                 return -EINVAL;
888
889         /* update esn hi 32-bits */
890         if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
891                 /**
892                  * Update ESN in hardware for inbound SA. Store in
893                  * iavf_security_session for outbound SA for use
894                  * in *iavf_ipsec_crypto_pkt_metadata_set* function.
895                  */
896                 if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
897                         rc = iavf_ipsec_crypto_sa_update_esn(adapter,
898                                         iavf_sess);
899                 else
900                         iavf_sess->esn.hi = conf->ipsec.esn.hi;
901         }
902
903         return rc;
904 }
905
906 static int
907 iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
908                 struct rte_security_session *session __rte_unused,
909                 struct rte_security_stats *stats __rte_unused)
910 {
911         return -EOPNOTSUPP;
912 }
913
914 int
915 iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
916         uint8_t is_v4, uint32_t flow_id)
917 {
918         struct inline_ipsec_msg *request = NULL, *response = NULL;
919         size_t request_len, response_len;
920         int rc = 0;
921
922         request_len = sizeof(struct inline_ipsec_msg) +
923                         sizeof(struct virtchnl_ipsec_sp_destroy);
924         request = rte_malloc("iavf-sp-del-request", request_len, 0);
925         if (request == NULL) {
926                 rc = -ENOMEM;
927                 goto update_cleanup;
928         }
929
930         response_len = sizeof(struct inline_ipsec_msg) +
931                         sizeof(struct virtchnl_ipsec_resp);
932         response = rte_malloc("iavf-sp-del-response", response_len, 0);
933         if (response == NULL) {
934                 rc = -ENOMEM;
935                 goto update_cleanup;
936         }
937
938         /* set msg header params */
939         request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
940         request->req_id = (uint16_t)0xDEADBEEF;
941
942         /* set security policy params */
943         request->ipsec_data.sp_destroy->table_id = is_v4 ?
944                         VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
945                         VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
946         request->ipsec_data.sp_destroy->rule_id = flow_id;
947
948         /* send virtual channel request to add SA to hardware database */
949         rc = iavf_ipsec_crypto_request(adapter,
950                         (uint8_t *)request, request_len,
951                         (uint8_t *)response, response_len);
952         if (rc)
953                 goto update_cleanup;
954
955         /* verify response */
956         if (response->ipsec_opcode != request->ipsec_opcode ||
957                 response->req_id != request->req_id)
958                 rc = -EFAULT;
959         else
960                 return response->ipsec_data.ipsec_status->status;
961
962 update_cleanup:
963         rte_free(request);
964         rte_free(response);
965
966         return rc;
967 }
968
969 static uint32_t
970 iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
971         struct iavf_security_session *sess)
972 {
973         struct inline_ipsec_msg *request = NULL, *response = NULL;
974         size_t request_len, response_len;
975
976         int rc = 0;
977
978         request_len = sizeof(struct inline_ipsec_msg) +
979                         sizeof(struct virtchnl_ipsec_sa_destroy);
980
981         request = rte_malloc("iavf-sa-del-request", request_len, 0);
982         if (request == NULL) {
983                 rc = -ENOMEM;
984                 goto update_cleanup;
985         }
986
987         response_len = sizeof(struct inline_ipsec_msg) +
988                         sizeof(struct virtchnl_ipsec_resp);
989
990         response = rte_malloc("iavf-sa-del-response", response_len, 0);
991         if (response == NULL) {
992                 rc = -ENOMEM;
993                 goto update_cleanup;
994         }
995
996         /* set msg header params */
997         request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
998         request->req_id = (uint16_t)0xDEADBEEF;
999
1000         /**
1001          * SA delete supports deletion of 1-8 specified SA's or if the flag
1002          * field is zero, all SA's associated with VF will be deleted.
1003          */
1004         if (sess) {
1005                 request->ipsec_data.sa_destroy->flag = 0x1;
1006                 request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
1007         } else {
1008                 request->ipsec_data.sa_destroy->flag = 0x0;
1009         }
1010
1011         /* send virtual channel request to add SA to hardware database */
1012         rc = iavf_ipsec_crypto_request(adapter,
1013                         (uint8_t *)request, request_len,
1014                         (uint8_t *)response, response_len);
1015         if (rc)
1016                 goto update_cleanup;
1017
1018         /* verify response */
1019         if (response->ipsec_opcode != request->ipsec_opcode ||
1020                 response->req_id != request->req_id)
1021                 rc = -EFAULT;
1022
1023         /**
1024          * Delete status will be the same bitmask as sa_destroy request flag if
1025          * deletes successful
1026          */
1027         if (request->ipsec_data.sa_destroy->flag !=
1028                         response->ipsec_data.ipsec_status->status)
1029                 rc = -EFAULT;
1030
1031 update_cleanup:
1032         rte_free(response);
1033         rte_free(request);
1034
1035         return rc;
1036 }
1037
1038 static int
1039 iavf_ipsec_crypto_session_destroy(void *device,
1040                 struct rte_security_session *session)
1041 {
1042         struct iavf_adapter *adapter = NULL;
1043         struct iavf_security_session *iavf_sess = NULL;
1044         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1045         int ret;
1046
1047         adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
1048         iavf_sess = (struct iavf_security_session *)session->sess_private_data;
1049
1050         /* verify we have a valid session and that it belong to this adapter */
1051         if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
1052                 return -EINVAL;
1053
1054         ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
1055         rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
1056         return ret;
1057 }
1058
1059 /**
1060  * Get ESP trailer from packet as well as calculate the total ESP trailer
1061  * length, which include padding, ESP trailer footer and the ICV
1062  */
1063 static inline struct rte_esp_tail *
1064 iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
1065         struct iavf_security_session *s, uint16_t *esp_trailer_length)
1066 {
1067         struct rte_esp_tail *esp_trailer;
1068
1069         uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
1070         uint16_t offset = 0;
1071
1072         /**
1073          * The ICV will not be present in TSO packets as this is appended by
1074          * hardware during segment generation
1075          */
1076         if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
1077                 length -=  s->icv_sz;
1078
1079         *esp_trailer_length = length;
1080
1081         /**
1082          * Calculate offset in packet to ESP trailer header, this should be
1083          * total packet length less the size of the ESP trailer plus the ICV
1084          * length if it is present
1085          */
1086         offset = rte_pktmbuf_pkt_len(m) - length;
1087
1088         if (m->nb_segs > 1) {
1089                 /* find segment which esp trailer is located */
1090                 while (m->data_len < offset) {
1091                         offset -= m->data_len;
1092                         m = m->next;
1093                 }
1094         }
1095
1096         esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
1097
1098         *esp_trailer_length += esp_trailer->pad_len;
1099
1100         return esp_trailer;
1101 }
1102
1103 static inline uint16_t
1104 iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
1105         struct iavf_security_session *s, uint16_t esp_tlen)
1106 {
1107         uint16_t ol2_len = m->l2_len;   /* MAC + VLAN */
1108         uint16_t ol3_len = 0;           /* ipv4/6 + ext hdrs */
1109         uint16_t ol4_len = 0;           /* UDP NATT */
1110         uint16_t l3_len = 0;            /* IPv4/6 + ext hdrs */
1111         uint16_t l4_len = 0;            /* TCP/UDP/STCP hdrs */
1112         uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
1113
1114         if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
1115                 ol3_len = m->outer_l3_len;
1116                 /**<
1117                  * application provided l3len assumed to include length of
1118                  * ipv4/6 hdr + ext hdrs
1119                  */
1120
1121         if (s->udp_encap.enabled) {
1122                 ol4_len = sizeof(struct rte_udp_hdr);
1123                 l3_len = m->l3_len - ol4_len;
1124                 l4_len = l3_len;
1125         } else {
1126                 l3_len = m->l3_len;
1127                 l4_len = m->l4_len;
1128         }
1129
1130         return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
1131                         esp_hlen + l3_len + l4_len + esp_tlen);
1132 }
1133
1134 static int
1135 iavf_ipsec_crypto_pkt_metadata_set(void *device,
1136                          struct rte_security_session *session,
1137                          struct rte_mbuf *m, void *params)
1138 {
1139         struct rte_eth_dev *ethdev = device;
1140         struct iavf_adapter *adapter =
1141                         IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
1142         struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
1143         struct iavf_security_session *iavf_sess = session->sess_private_data;
1144         struct iavf_ipsec_crypto_pkt_metadata *md;
1145         struct rte_esp_tail *esp_tail;
1146         uint64_t *sqn = params;
1147         uint16_t esp_trailer_length;
1148
1149         /* Check we have valid session and is associated with this device */
1150         if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
1151                 return -EINVAL;
1152
1153         /* Get dynamic metadata location from mbuf */
1154         md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
1155                 struct iavf_ipsec_crypto_pkt_metadata *);
1156
1157         /* Set immutable metadata values from session template */
1158         memcpy(md, &iavf_sess->pkt_metadata_template,
1159                 sizeof(struct iavf_ipsec_crypto_pkt_metadata));
1160
1161         esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
1162                         &esp_trailer_length);
1163
1164         /* Set per packet mutable metadata values */
1165         md->esp_trailer_len = esp_trailer_length;
1166         md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
1167                                 iavf_sess, esp_trailer_length);
1168         md->next_proto = esp_tail->next_proto;
1169
1170         /* If Extended SN in use set the upper 32-bits in metadata */
1171         if (iavf_sess->esn.enabled && sqn != NULL)
1172                 md->esn = (uint32_t)(*sqn >> 32);
1173
1174         return 0;
1175 }
1176
1177 static int
1178 iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
1179                 struct virtchnl_ipsec_cap *capability)
1180 {
1181         /* Perform pf-vf comms */
1182         struct inline_ipsec_msg *request = NULL, *response = NULL;
1183         size_t request_len, response_len;
1184         int rc;
1185
1186         request_len = sizeof(struct inline_ipsec_msg);
1187
1188         request = rte_malloc("iavf-device-capability-request", request_len, 0);
1189         if (request == NULL) {
1190                 rc = -ENOMEM;
1191                 goto update_cleanup;
1192         }
1193
1194         response_len = sizeof(struct inline_ipsec_msg) +
1195                         sizeof(struct virtchnl_ipsec_cap);
1196         response = rte_malloc("iavf-device-capability-response",
1197                         response_len, 0);
1198         if (response == NULL) {
1199                 rc = -ENOMEM;
1200                 goto update_cleanup;
1201         }
1202
1203         /* set msg header params */
1204         request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
1205         request->req_id = (uint16_t)0xDEADBEEF;
1206
1207         /* send virtual channel request to add SA to hardware database */
1208         rc = iavf_ipsec_crypto_request(adapter,
1209                         (uint8_t *)request, request_len,
1210                         (uint8_t *)response, response_len);
1211         if (rc)
1212                 goto update_cleanup;
1213
1214         /* verify response id */
1215         if (response->ipsec_opcode != request->ipsec_opcode ||
1216                 response->req_id != request->req_id){
1217                 rc = -EFAULT;
1218                 goto update_cleanup;
1219         }
1220         memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
1221
1222 update_cleanup:
1223         rte_free(response);
1224         rte_free(request);
1225
1226         return rc;
1227 }
1228
1229 enum rte_crypto_auth_algorithm auth_maptbl[] = {
1230         /* Hash Algorithm */
1231         [VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
1232         [VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
1233         [VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
1234         [VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
1235         [VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
1236         [VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
1237         [VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
1238         [VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
1239         [VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
1240         [VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
1241         [VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
1242         [VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
1243         [VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
1244         [VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
1245         [VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
1246 };
1247
1248 static void
1249 update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
1250                 struct virtchnl_algo_cap *acap)
1251 {
1252         struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
1253
1254         scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1255
1256         capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
1257
1258         capability->auth.algo = auth_maptbl[acap->algo_type];
1259         capability->auth.block_size = acap->block_size;
1260
1261         capability->auth.key_size.min = acap->min_key_size;
1262         capability->auth.key_size.max = acap->max_key_size;
1263         capability->auth.key_size.increment = acap->inc_key_size;
1264
1265         capability->auth.digest_size.min = acap->min_digest_size;
1266         capability->auth.digest_size.max = acap->max_digest_size;
1267         capability->auth.digest_size.increment = acap->inc_digest_size;
1268 }
1269
1270 enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
1271         /* Cipher Algorithm */
1272         [VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
1273         [VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
1274         [VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
1275         [VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
1276 };
1277
1278 static void
1279 update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
1280         struct virtchnl_algo_cap *acap)
1281 {
1282         struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
1283
1284         scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1285
1286         capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1287
1288         capability->cipher.algo = cipher_maptbl[acap->algo_type];
1289
1290         capability->cipher.block_size = acap->block_size;
1291
1292         capability->cipher.key_size.min = acap->min_key_size;
1293         capability->cipher.key_size.max = acap->max_key_size;
1294         capability->cipher.key_size.increment = acap->inc_key_size;
1295
1296         capability->cipher.iv_size.min = acap->min_iv_size;
1297         capability->cipher.iv_size.max = acap->max_iv_size;
1298         capability->cipher.iv_size.increment = acap->inc_iv_size;
1299 }
1300
1301 enum rte_crypto_aead_algorithm aead_maptbl[] = {
1302         /* AEAD Algorithm */
1303         [VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
1304         [VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
1305         [VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
1306 };
1307
1308 static void
1309 update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
1310         struct virtchnl_algo_cap *acap)
1311 {
1312         struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
1313
1314         scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1315
1316         capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
1317
1318         capability->aead.algo = aead_maptbl[acap->algo_type];
1319
1320         capability->aead.block_size = acap->block_size;
1321
1322         capability->aead.key_size.min = acap->min_key_size;
1323         capability->aead.key_size.max = acap->max_key_size;
1324         capability->aead.key_size.increment = acap->inc_key_size;
1325
1326         capability->aead.aad_size.min = acap->min_aad_size;
1327         capability->aead.aad_size.max = acap->max_aad_size;
1328         capability->aead.aad_size.increment = acap->inc_aad_size;
1329
1330         capability->aead.iv_size.min = acap->min_iv_size;
1331         capability->aead.iv_size.max = acap->max_iv_size;
1332         capability->aead.iv_size.increment = acap->inc_iv_size;
1333
1334         capability->aead.digest_size.min = acap->min_digest_size;
1335         capability->aead.digest_size.max = acap->max_digest_size;
1336         capability->aead.digest_size.increment = acap->inc_digest_size;
1337 }
1338
1339 /**
1340  * Dynamically set crypto capabilities based on virtchannel IPsec
1341  * capabilities structure.
1342  */
1343 int
1344 iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
1345                 *iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
1346 {
1347         struct rte_cryptodev_capabilities *capabilities;
1348         int i, j, number_of_capabilities = 0, ci = 0;
1349
1350         /* Count the total number of crypto algorithms supported */
1351         for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
1352                 number_of_capabilities += vch_cap->cap[i].algo_cap_num;
1353
1354         /**
1355          * Allocate cryptodev capabilities structure for
1356          * *number_of_capabilities* items plus one item to null terminate the
1357          * array
1358          */
1359         capabilities = rte_zmalloc("crypto_cap",
1360                 sizeof(struct rte_cryptodev_capabilities) *
1361                 (number_of_capabilities + 1), 0);
1362         if (!capabilities)
1363                 return -ENOMEM;
1364         capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
1365
1366         /**
1367          * Iterate over each virtchnl crypto capability by crypto type and
1368          * algorithm.
1369          */
1370         for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
1371                 for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
1372                         switch (vch_cap->cap[i].crypto_type) {
1373                         case VIRTCHNL_AUTH:
1374                                 update_auth_capabilities(&capabilities[ci],
1375                                         &vch_cap->cap[i].algo_cap_list[j]);
1376                                 break;
1377                         case VIRTCHNL_CIPHER:
1378                                 update_cipher_capabilities(&capabilities[ci],
1379                                         &vch_cap->cap[i].algo_cap_list[j]);
1380                                 break;
1381                         case VIRTCHNL_AEAD:
1382                                 update_aead_capabilities(&capabilities[ci],
1383                                         &vch_cap->cap[i].algo_cap_list[j]);
1384                                 break;
1385                         default:
1386                                 capabilities[ci].op =
1387                                                 RTE_CRYPTO_OP_TYPE_UNDEFINED;
1388                                 break;
1389                         }
1390                 }
1391         }
1392
1393         iavf_sctx->crypto_capabilities = capabilities;
1394         return 0;
1395 }
1396
1397 /**
1398  * Get security capabilities for device
1399  */
1400 static const struct rte_security_capability *
1401 iavf_ipsec_crypto_capabilities_get(void *device)
1402 {
1403         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1404         struct iavf_adapter *adapter =
1405                 IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
1406         struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
1407         unsigned int i;
1408
1409         static struct rte_security_capability iavf_security_capabilities[] = {
1410                 { /* IPsec Inline Crypto ESP Tunnel Egress */
1411                         .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
1412                         .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
1413                         .ipsec = {
1414                                 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
1415                                 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
1416                                 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
1417                                 .options = { .udp_encap = 1,
1418                                                 .stats = 1, .esn = 1 },
1419                         },
1420                         .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
1421                 },
1422                 { /* IPsec Inline Crypto ESP Tunnel Ingress */
1423                         .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
1424                         .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
1425                         .ipsec = {
1426                                 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
1427                                 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
1428                                 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1429                                 .options = { .udp_encap = 1,
1430                                                 .stats = 1, .esn = 1 },
1431                         },
1432                         .ol_flags = 0
1433                 },
1434                 { /* IPsec Inline Crypto ESP Transport Egress */
1435                         .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
1436                         .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
1437                         .ipsec = {
1438                                 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
1439                                 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
1440                                 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
1441                                 .options = { .udp_encap = 1, .stats = 1,
1442                                                 .esn = 1 },
1443                         },
1444                         .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
1445                 },
1446                 { /* IPsec Inline Crypto ESP Transport Ingress */
1447                         .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
1448                         .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
1449                         .ipsec = {
1450                                 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
1451                                 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
1452                                 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1453                                 .options = { .udp_encap = 1, .stats = 1,
1454                                                 .esn = 1 }
1455                         },
1456                         .ol_flags = 0
1457                 },
1458                 {
1459                         .action = RTE_SECURITY_ACTION_TYPE_NONE
1460                 }
1461         };
1462
1463         /**
1464          * Update the security capabilities struct with the runtime discovered
1465          * crypto capabilities, except for last element of the array which is
1466          * the null termination
1467          */
1468         for (i = 0; i < ((sizeof(iavf_security_capabilities) /
1469                         sizeof(iavf_security_capabilities[0])) - 1); i++) {
1470                 iavf_security_capabilities[i].crypto_capabilities =
1471                         iavf_sctx->crypto_capabilities;
1472         }
1473
1474         return iavf_security_capabilities;
1475 }
1476
1477 static struct rte_security_ops iavf_ipsec_crypto_ops = {
1478         .session_get_size               = iavf_ipsec_crypto_session_size_get,
1479         .session_create                 = iavf_ipsec_crypto_session_create,
1480         .session_update                 = iavf_ipsec_crypto_session_update,
1481         .session_stats_get              = iavf_ipsec_crypto_session_stats_get,
1482         .session_destroy                = iavf_ipsec_crypto_session_destroy,
1483         .set_pkt_metadata               = iavf_ipsec_crypto_pkt_metadata_set,
1484         .get_userdata                   = NULL,
1485         .capabilities_get               = iavf_ipsec_crypto_capabilities_get,
1486 };
1487
1488 int
1489 iavf_security_ctx_create(struct iavf_adapter *adapter)
1490 {
1491         struct rte_security_ctx *sctx;
1492
1493         sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
1494         if (sctx == NULL)
1495                 return -ENOMEM;
1496
1497         sctx->device = adapter->vf.eth_dev;
1498         sctx->ops = &iavf_ipsec_crypto_ops;
1499         sctx->sess_cnt = 0;
1500
1501         adapter->vf.eth_dev->security_ctx = sctx;
1502
1503         if (adapter->security_ctx == NULL) {
1504                 adapter->security_ctx = rte_malloc("iavf_security_ctx",
1505                                 sizeof(struct iavf_security_ctx), 0);
1506                 if (adapter->security_ctx == NULL)
1507                         return -ENOMEM;
1508         }
1509
1510         return 0;
1511 }
1512
1513 int
1514 iavf_security_init(struct iavf_adapter *adapter)
1515 {
1516         struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
1517         struct rte_mbuf_dynfield pkt_md_dynfield = {
1518                 .name = "iavf_ipsec_crypto_pkt_metadata",
1519                 .size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
1520                 .align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
1521         };
1522         struct virtchnl_ipsec_cap capabilities;
1523         int rc;
1524
1525         iavf_sctx->adapter = adapter;
1526
1527         iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
1528         if (iavf_sctx->pkt_md_offset < 0)
1529                 return iavf_sctx->pkt_md_offset;
1530
1531         /* Get device capabilities from Inline IPsec driver over PF-VF comms */
1532         rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
1533         if (rc)
1534                 return rc;
1535
1536         return  iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
1537                         &capabilities);
1538 }
1539
1540 int
1541 iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
1542 {
1543         struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
1544
1545         return iavf_sctx->pkt_md_offset;
1546 }
1547
1548 int
1549 iavf_security_ctx_destroy(struct iavf_adapter *adapter)
1550 {
1551         struct rte_security_ctx *sctx  = adapter->vf.eth_dev->security_ctx;
1552         struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
1553
1554         if (iavf_sctx == NULL)
1555                 return -ENODEV;
1556
1557         /* free and reset security data structures */
1558         rte_free(iavf_sctx);
1559         rte_free(sctx);
1560
1561         adapter->security_ctx = NULL;
1562         adapter->vf.eth_dev->security_ctx = NULL;
1563
1564         return 0;
1565 }
1566
1567 static int
1568 iavf_ipsec_crypto_status_get(struct iavf_adapter *adapter,
1569                 struct virtchnl_ipsec_status *status)
1570 {
1571         /* Perform pf-vf comms */
1572         struct inline_ipsec_msg *request = NULL, *response = NULL;
1573         size_t request_len, response_len;
1574         int rc;
1575
1576         request_len = sizeof(struct inline_ipsec_msg);
1577
1578         request = rte_malloc("iavf-device-status-request", request_len, 0);
1579         if (request == NULL) {
1580                 rc = -ENOMEM;
1581                 goto update_cleanup;
1582         }
1583
1584         response_len = sizeof(struct inline_ipsec_msg) +
1585                         sizeof(struct virtchnl_ipsec_cap);
1586         response = rte_malloc("iavf-device-status-response",
1587                         response_len, 0);
1588         if (response == NULL) {
1589                 rc = -ENOMEM;
1590                 goto update_cleanup;
1591         }
1592
1593         /* set msg header params */
1594         request->ipsec_opcode = INLINE_IPSEC_OP_GET_STATUS;
1595         request->req_id = (uint16_t)0xDEADBEEF;
1596
1597         /* send virtual channel request to add SA to hardware database */
1598         rc = iavf_ipsec_crypto_request(adapter,
1599                         (uint8_t *)request, request_len,
1600                         (uint8_t *)response, response_len);
1601         if (rc)
1602                 goto update_cleanup;
1603
1604         /* verify response id */
1605         if (response->ipsec_opcode != request->ipsec_opcode ||
1606                 response->req_id != request->req_id){
1607                 rc = -EFAULT;
1608                 goto update_cleanup;
1609         }
1610         memcpy(status, response->ipsec_data.ipsec_status, sizeof(*status));
1611
1612 update_cleanup:
1613         rte_free(response);
1614         rte_free(request);
1615
1616         return rc;
1617 }
1618
1619
1620 int
1621 iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
1622 {
1623         struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
1624         int crypto_supported = false;
1625
1626         /** Capability check for IPsec Crypto */
1627         if (resources && (resources->vf_cap_flags &
1628                 VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)) {
1629                 struct virtchnl_ipsec_status status;
1630                 int rc = iavf_ipsec_crypto_status_get(adapter, &status);
1631                 if (rc == 0 && status.status == INLINE_IPSEC_STATUS_AVAILABLE)
1632                         crypto_supported = true;
1633         }
1634
1635         /* Clear the VF flag to return faster next call */
1636         if (resources && !crypto_supported)
1637                 resources->vf_cap_flags &=
1638                                 ~(VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO);
1639
1640         return crypto_supported;
1641 }
1642
1643 #define IAVF_IPSEC_INSET_ESP (\
1644         IAVF_INSET_ESP_SPI)
1645
1646 #define IAVF_IPSEC_INSET_AH (\
1647         IAVF_INSET_AH_SPI)
1648
1649 #define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
1650         IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
1651         IAVF_INSET_ESP_SPI)
1652
1653 #define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
1654         IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
1655         IAVF_INSET_ESP_SPI)
1656
1657 enum iavf_ipsec_flow_pt_type {
1658         IAVF_PATTERN_ESP = 1,
1659         IAVF_PATTERN_AH,
1660         IAVF_PATTERN_UDP_ESP,
1661 };
1662 enum iavf_ipsec_flow_pt_ip_ver {
1663         IAVF_PATTERN_IPV4 = 1,
1664         IAVF_PATTERN_IPV6,
1665 };
1666
1667 #define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
1668 #define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
1669 #define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
1670
1671 static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
1672         {iavf_pattern_eth_ipv4_esp,     IAVF_IPSEC_INSET_ESP,
1673                         IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
1674         {iavf_pattern_eth_ipv6_esp,     IAVF_IPSEC_INSET_ESP,
1675                         IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
1676         {iavf_pattern_eth_ipv4_ah,      IAVF_IPSEC_INSET_AH,
1677                         IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
1678         {iavf_pattern_eth_ipv6_ah,      IAVF_IPSEC_INSET_AH,
1679                         IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
1680         {iavf_pattern_eth_ipv4_udp_esp, IAVF_IPSEC_INSET_IPV4_NATT_ESP,
1681                         IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)},
1682         {iavf_pattern_eth_ipv6_udp_esp, IAVF_IPSEC_INSET_IPV6_NATT_ESP,
1683                         IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)},
1684 };
1685
1686 struct iavf_ipsec_flow_item {
1687         uint64_t id;
1688         uint8_t is_ipv4;
1689         uint32_t spi;
1690         struct rte_ether_hdr eth_hdr;
1691         union {
1692                 struct rte_ipv4_hdr ipv4_hdr;
1693                 struct rte_ipv6_hdr ipv6_hdr;
1694         };
1695         struct rte_udp_hdr udp_hdr;
1696         uint8_t is_udp;
1697 };
1698
1699 static void
1700 parse_eth_item(const struct rte_flow_item_eth *item,
1701                 struct rte_ether_hdr *eth)
1702 {
1703         memcpy(eth->src_addr.addr_bytes,
1704                         item->src.addr_bytes, sizeof(eth->src_addr));
1705         memcpy(eth->dst_addr.addr_bytes,
1706                         item->dst.addr_bytes, sizeof(eth->dst_addr));
1707 }
1708
1709 static void
1710 parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
1711                 struct rte_ipv4_hdr *ipv4)
1712 {
1713         ipv4->src_addr = item->hdr.src_addr;
1714         ipv4->dst_addr = item->hdr.dst_addr;
1715 }
1716
1717 static void
1718 parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
1719                 struct rte_ipv6_hdr *ipv6)
1720 {
1721         memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
1722         memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
1723 }
1724
1725 static void
1726 parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp)
1727 {
1728         udp->dst_port = item->hdr.dst_port;
1729         udp->src_port = item->hdr.src_port;
1730 }
1731
1732 static int
1733 has_security_action(const struct rte_flow_action actions[],
1734         const void **session)
1735 {
1736         /* only {SECURITY; END} supported */
1737         if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY &&
1738                 actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
1739                 *session = actions[0].conf;
1740                 return true;
1741         }
1742         return false;
1743 }
1744
1745 static struct iavf_ipsec_flow_item *
1746 iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
1747                 const struct rte_flow_item pattern[],
1748                 const struct rte_flow_action actions[],
1749                 uint32_t type)
1750 {
1751         const void *session;
1752         struct iavf_ipsec_flow_item
1753                 *ipsec_flow = rte_malloc("security-flow-rule",
1754                 sizeof(struct iavf_ipsec_flow_item), 0);
1755         enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
1756         enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
1757
1758         if (ipsec_flow == NULL)
1759                 return NULL;
1760
1761         ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
1762
1763         if (pattern[0].spec)
1764                 parse_eth_item((const struct rte_flow_item_eth *)
1765                                 pattern[0].spec, &ipsec_flow->eth_hdr);
1766
1767         switch (p_type) {
1768         case IAVF_PATTERN_ESP:
1769                 if (ipsec_flow->is_ipv4) {
1770                         parse_ipv4_item((const struct rte_flow_item_ipv4 *)
1771                                         pattern[1].spec,
1772                                         &ipsec_flow->ipv4_hdr);
1773                 } else {
1774                         parse_ipv6_item((const struct rte_flow_item_ipv6 *)
1775                                         pattern[1].spec,
1776                                         &ipsec_flow->ipv6_hdr);
1777                 }
1778                 ipsec_flow->spi =
1779                         ((const struct rte_flow_item_esp *)
1780                                         pattern[2].spec)->hdr.spi;
1781                 break;
1782         case IAVF_PATTERN_AH:
1783                 if (ipsec_flow->is_ipv4) {
1784                         parse_ipv4_item((const struct rte_flow_item_ipv4 *)
1785                                         pattern[1].spec,
1786                                         &ipsec_flow->ipv4_hdr);
1787                 } else {
1788                         parse_ipv6_item((const struct rte_flow_item_ipv6 *)
1789                                         pattern[1].spec,
1790                                         &ipsec_flow->ipv6_hdr);
1791                 }
1792                 ipsec_flow->spi =
1793                         ((const struct rte_flow_item_ah *)
1794                                         pattern[2].spec)->spi;
1795                 break;
1796         case IAVF_PATTERN_UDP_ESP:
1797                 if (ipsec_flow->is_ipv4) {
1798                         parse_ipv4_item((const struct rte_flow_item_ipv4 *)
1799                                         pattern[1].spec,
1800                                         &ipsec_flow->ipv4_hdr);
1801                 } else {
1802                         parse_ipv6_item((const struct rte_flow_item_ipv6 *)
1803                                         pattern[1].spec,
1804                                         &ipsec_flow->ipv6_hdr);
1805                 }
1806                 parse_udp_item((const struct rte_flow_item_udp *)
1807                                 pattern[2].spec,
1808                         &ipsec_flow->udp_hdr);
1809                 ipsec_flow->is_udp = true;
1810                 ipsec_flow->spi =
1811                         ((const struct rte_flow_item_esp *)
1812                                         pattern[3].spec)->hdr.spi;
1813                 break;
1814         default:
1815                 goto flow_cleanup;
1816         }
1817
1818         if (!has_security_action(actions, &session))
1819                 goto flow_cleanup;
1820
1821         if (!iavf_ipsec_crypto_action_valid(ethdev, session,
1822                         ipsec_flow->spi))
1823                 goto flow_cleanup;
1824
1825         return ipsec_flow;
1826
1827 flow_cleanup:
1828         rte_free(ipsec_flow);
1829         return NULL;
1830 }
1831
1832
1833 static struct iavf_flow_parser iavf_ipsec_flow_parser;
1834
1835 static int
1836 iavf_ipsec_flow_init(struct iavf_adapter *ad)
1837 {
1838         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1839         struct iavf_flow_parser *parser;
1840
1841         if (!vf->vf_res)
1842                 return -EINVAL;
1843
1844         if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
1845                 parser = &iavf_ipsec_flow_parser;
1846         else
1847                 return -ENOTSUP;
1848
1849         return iavf_register_parser(parser, ad);
1850 }
1851
1852 static void
1853 iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
1854 {
1855         iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
1856 }
1857
1858 static int
1859 iavf_ipsec_flow_create(struct iavf_adapter *ad,
1860                 struct rte_flow *flow,
1861                 void *meta,
1862                 struct rte_flow_error *error)
1863 {
1864         struct iavf_ipsec_flow_item *ipsec_flow = meta;
1865         if (!ipsec_flow) {
1866                 rte_flow_error_set(error, EINVAL,
1867                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1868                                 "NULL rule.");
1869                 return -rte_errno;
1870         }
1871
1872         if (ipsec_flow->is_ipv4) {
1873                 ipsec_flow->id =
1874                         iavf_ipsec_crypto_inbound_security_policy_add(ad,
1875                         ipsec_flow->spi,
1876                         1,
1877                         ipsec_flow->ipv4_hdr.dst_addr,
1878                         NULL,
1879                         0,
1880                         ipsec_flow->is_udp,
1881                         ipsec_flow->udp_hdr.dst_port);
1882         } else {
1883                 ipsec_flow->id =
1884                         iavf_ipsec_crypto_inbound_security_policy_add(ad,
1885                         ipsec_flow->spi,
1886                         0,
1887                         0,
1888                         ipsec_flow->ipv6_hdr.dst_addr,
1889                         0,
1890                         ipsec_flow->is_udp,
1891                         ipsec_flow->udp_hdr.dst_port);
1892         }
1893
1894         if (ipsec_flow->id < 1) {
1895                 rte_flow_error_set(error, EINVAL,
1896                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1897                                 "Failed to add SA.");
1898                 return -rte_errno;
1899         }
1900
1901         flow->rule = ipsec_flow;
1902
1903         return 0;
1904 }
1905
1906 static int
1907 iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
1908                 struct rte_flow *flow,
1909                 struct rte_flow_error *error)
1910 {
1911         struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
1912         if (!ipsec_flow) {
1913                 rte_flow_error_set(error, EINVAL,
1914                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1915                                 "NULL rule.");
1916                 return -rte_errno;
1917         }
1918
1919         iavf_ipsec_crypto_security_policy_delete(ad,
1920                         ipsec_flow->is_ipv4, ipsec_flow->id);
1921         rte_free(ipsec_flow);
1922         return 0;
1923 }
1924
1925 static struct iavf_flow_engine iavf_ipsec_flow_engine = {
1926         .init = iavf_ipsec_flow_init,
1927         .uninit = iavf_ipsec_flow_uninit,
1928         .create = iavf_ipsec_flow_create,
1929         .destroy = iavf_ipsec_flow_destroy,
1930         .type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
1931 };
1932
1933 static int
1934 iavf_ipsec_flow_parse(struct iavf_adapter *ad,
1935                        struct iavf_pattern_match_item *array,
1936                        uint32_t array_len,
1937                        const struct rte_flow_item pattern[],
1938                        const struct rte_flow_action actions[],
1939                        void **meta,
1940                        struct rte_flow_error *error)
1941 {
1942         struct iavf_pattern_match_item *item = NULL;
1943         int ret = -1;
1944
1945         item = iavf_search_pattern_match_item(pattern, array, array_len, error);
1946         if (item && item->meta) {
1947                 uint32_t type = (uint64_t)(item->meta);
1948                 struct iavf_ipsec_flow_item *fi =
1949                                 iavf_ipsec_flow_item_parse(ad->vf.eth_dev,
1950                                                 pattern, actions, type);
1951                 if (fi && meta) {
1952                         *meta = fi;
1953                         ret = 0;
1954                 }
1955         }
1956         return ret;
1957 }
1958
1959 static struct iavf_flow_parser iavf_ipsec_flow_parser = {
1960         .engine = &iavf_ipsec_flow_engine,
1961         .array = iavf_ipsec_flow_pattern,
1962         .array_len = RTE_DIM(iavf_ipsec_flow_pattern),
1963         .parse_pattern_action = iavf_ipsec_flow_parse,
1964         .stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
1965 };
1966
1967 RTE_INIT(iavf_ipsec_flow_engine_register)
1968 {
1969         iavf_register_flow_engine(&iavf_ipsec_flow_engine);
1970 }