1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
5 #include <rte_cryptodev.h>
6 #include <rte_ethdev.h>
7 #include <rte_security_driver.h>
8 #include <rte_security.h>
11 #include "iavf_rxtx.h"
13 #include "iavf_generic_flow.h"
15 #include "iavf_ipsec_crypto.h"
16 #include "iavf_ipsec_crypto_capabilities.h"
19 * iAVF IPsec Crypto Security Context
21 struct iavf_security_ctx {
22 struct iavf_adapter *adapter;
24 struct rte_cryptodev_capabilities *crypto_capabilities;
28 * iAVF IPsec Crypto Security Session Parameters
30 struct iavf_security_session {
31 struct iavf_adapter *adapter;
33 enum rte_security_ipsec_sa_mode mode;
34 enum rte_security_ipsec_tunnel_type type;
35 enum rte_security_ipsec_sa_direction direction;
38 uint32_t spi; /* Security Parameter Index */
39 uint32_t hw_idx; /* SA Index in hardware table */
61 struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
64 * IV Length field in IPsec Tx Desc uses the following encoding:
71 * but we also need the IV Length for TSO to correctly calculate the total
72 * header length so placing it in the upper 6-bits here for easier retrieval.
75 calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
77 uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
81 iv_length = IAVF_IPSEC_IV_LEN_DW;
84 iv_length = IAVF_IPSEC_IV_LEN_DDW;
87 iv_length = IAVF_IPSEC_IV_LEN_QDW;
91 return (iv_sz << 2) | iv_length;
95 iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
97 return sizeof(struct iavf_security_session);
100 static const struct rte_cryptodev_symmetric_capability *
101 get_capability(struct iavf_security_ctx *iavf_sctx,
102 uint32_t algo, uint32_t type)
104 const struct rte_cryptodev_capabilities *capability;
107 capability = &iavf_sctx->crypto_capabilities[i];
109 while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
110 if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
111 (uint32_t)capability->sym.xform_type == type &&
112 (uint32_t)capability->sym.cipher.algo == algo)
113 return &capability->sym;
114 /** try next capability */
115 capability = &iavf_crypto_capabilities[i++];
121 static const struct rte_cryptodev_symmetric_capability *
122 get_auth_capability(struct iavf_security_ctx *iavf_sctx,
123 enum rte_crypto_auth_algorithm algo)
125 return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
128 static const struct rte_cryptodev_symmetric_capability *
129 get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
130 enum rte_crypto_cipher_algorithm algo)
132 return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER);
134 static const struct rte_cryptodev_symmetric_capability *
135 get_aead_capability(struct iavf_security_ctx *iavf_sctx,
136 enum rte_crypto_aead_algorithm algo)
138 return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
142 get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
143 enum rte_crypto_cipher_algorithm algo)
145 const struct rte_cryptodev_symmetric_capability *capability;
147 capability = get_cipher_capability(iavf_sctx, algo);
148 if (capability == NULL)
151 return capability->cipher.block_size;
155 get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
156 enum rte_crypto_aead_algorithm algo)
158 const struct rte_cryptodev_symmetric_capability *capability;
160 capability = get_aead_capability(iavf_sctx, algo);
161 if (capability == NULL)
164 return capability->cipher.block_size;
168 get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
169 enum rte_crypto_auth_algorithm algo)
171 const struct rte_cryptodev_symmetric_capability *capability;
173 capability = get_auth_capability(iavf_sctx, algo);
174 if (capability == NULL)
177 return capability->auth.block_size;
181 calc_context_desc_cipherblock_sz(size_t len)
194 valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
196 if (len < min || len > max)
202 if ((len - min) % increment)
205 /* make sure it fits in the key array */
206 if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN)
213 valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
214 struct rte_crypto_auth_xform *auth)
216 const struct rte_cryptodev_symmetric_capability *capability;
218 capability = get_auth_capability(iavf_sctx, auth->algo);
219 if (capability == NULL)
222 /* verify key size */
223 if (!valid_length(auth->key.length,
224 capability->auth.key_size.min,
225 capability->auth.key_size.max,
226 capability->aead.key_size.increment))
233 valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
234 struct rte_crypto_cipher_xform *cipher)
236 const struct rte_cryptodev_symmetric_capability *capability;
238 capability = get_cipher_capability(iavf_sctx, cipher->algo);
239 if (capability == NULL)
242 /* verify key size */
243 if (!valid_length(cipher->key.length,
244 capability->cipher.key_size.min,
245 capability->cipher.key_size.max,
246 capability->cipher.key_size.increment))
253 valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
254 struct rte_crypto_aead_xform *aead)
256 const struct rte_cryptodev_symmetric_capability *capability;
258 capability = get_aead_capability(iavf_sctx, aead->algo);
259 if (capability == NULL)
262 /* verify key size */
263 if (!valid_length(aead->key.length,
264 capability->aead.key_size.min,
265 capability->aead.key_size.max,
266 capability->aead.key_size.increment))
273 iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
274 struct rte_security_session_conf *conf)
276 /** validate security action/protocol selection */
277 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
278 conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
279 PMD_DRV_LOG(ERR, "Invalid action / protocol specified");
283 /** validate IPsec protocol selection */
284 if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
285 PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified");
289 /** validate selected options */
290 if (conf->ipsec.options.copy_dscp ||
291 conf->ipsec.options.copy_flabel ||
292 conf->ipsec.options.copy_df ||
293 conf->ipsec.options.dec_ttl ||
294 conf->ipsec.options.ecn ||
295 conf->ipsec.options.stats) {
296 PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
301 * Validate crypto xforms parameters.
303 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
304 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
305 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
307 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
308 if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
309 PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
312 } else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
313 conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
314 conf->crypto_xform->next &&
315 conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
316 if (!valid_cipher_xform(iavf_sctx,
317 &conf->crypto_xform->cipher)) {
318 PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
322 if (!valid_auth_xform(iavf_sctx,
323 &conf->crypto_xform->next->auth)) {
324 PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
327 } else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
328 conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
329 conf->crypto_xform->next &&
330 conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
331 if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
332 PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
336 if (!valid_cipher_xform(iavf_sctx,
337 &conf->crypto_xform->next->cipher)) {
338 PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
347 sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
348 struct rte_crypto_aead_xform *aead, uint32_t salt)
350 cfg->crypto_type = VIRTCHNL_AEAD;
352 switch (aead->algo) {
353 case RTE_CRYPTO_AEAD_AES_CCM:
354 cfg->algo_type = VIRTCHNL_AES_CCM; break;
355 case RTE_CRYPTO_AEAD_AES_GCM:
356 cfg->algo_type = VIRTCHNL_AES_GCM; break;
357 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
358 cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
360 PMD_DRV_LOG(ERR, "Invalid AEAD parameters");
364 cfg->key_len = aead->key.length;
365 cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt len */
366 cfg->digest_len = aead->digest_length;
369 memcpy(cfg->key_data, aead->key.data, cfg->key_len);
373 sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
374 struct rte_crypto_cipher_xform *cipher, uint32_t salt)
376 cfg->crypto_type = VIRTCHNL_CIPHER;
378 switch (cipher->algo) {
379 case RTE_CRYPTO_CIPHER_AES_CBC:
380 cfg->algo_type = VIRTCHNL_AES_CBC; break;
381 case RTE_CRYPTO_CIPHER_3DES_CBC:
382 cfg->algo_type = VIRTCHNL_3DES_CBC; break;
383 case RTE_CRYPTO_CIPHER_NULL:
384 cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
385 case RTE_CRYPTO_CIPHER_AES_CTR:
386 cfg->algo_type = VIRTCHNL_AES_CTR;
390 PMD_DRV_LOG(ERR, "Invalid cipher parameters");
394 cfg->key_len = cipher->key.length;
395 cfg->iv_len = cipher->iv.length;
398 memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
402 sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
403 struct rte_crypto_auth_xform *auth, uint32_t salt)
405 cfg->crypto_type = VIRTCHNL_AUTH;
407 switch (auth->algo) {
408 case RTE_CRYPTO_AUTH_NULL:
409 cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
410 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
411 cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
412 case RTE_CRYPTO_AUTH_AES_CMAC:
413 cfg->algo_type = VIRTCHNL_AES_CMAC; break;
414 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
415 cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
416 case RTE_CRYPTO_AUTH_MD5_HMAC:
417 cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
418 case RTE_CRYPTO_AUTH_SHA1_HMAC:
419 cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
420 case RTE_CRYPTO_AUTH_SHA224_HMAC:
421 cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
422 case RTE_CRYPTO_AUTH_SHA256_HMAC:
423 cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
424 case RTE_CRYPTO_AUTH_SHA384_HMAC:
425 cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
426 case RTE_CRYPTO_AUTH_SHA512_HMAC:
427 cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
428 case RTE_CRYPTO_AUTH_AES_GMAC:
429 cfg->algo_type = VIRTCHNL_AES_GMAC;
433 PMD_DRV_LOG(ERR, "Invalid auth parameters");
437 cfg->key_len = auth->key.length;
438 /* special case for RTE_CRYPTO_AUTH_AES_GMAC */
439 if (auth->algo == RTE_CRYPTO_AUTH_AES_GMAC)
440 cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt */
442 cfg->iv_len = auth->iv.length;
443 cfg->digest_len = auth->digest_length;
445 memcpy(cfg->key_data, auth->key.data, cfg->key_len);
449 * Send SA add virtual channel request to Inline IPsec driver.
451 * Inline IPsec driver expects SPI and destination IP address to be in host
452 * order, but DPDK APIs are network order, therefore we need to do a htonl
453 * conversion of these parameters.
456 iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
457 struct rte_security_session_conf *conf)
459 struct inline_ipsec_msg *request = NULL, *response = NULL;
460 struct virtchnl_ipsec_sa_cfg *sa_cfg;
461 size_t request_len, response_len;
465 request_len = sizeof(struct inline_ipsec_msg) +
466 sizeof(struct virtchnl_ipsec_sa_cfg);
468 request = rte_malloc("iavf-sad-add-request", request_len, 0);
469 if (request == NULL) {
474 response_len = sizeof(struct inline_ipsec_msg) +
475 sizeof(struct virtchnl_ipsec_sa_cfg_resp);
476 response = rte_malloc("iavf-sad-add-response", response_len, 0);
477 if (response == NULL) {
482 /* set msg header params */
483 request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
484 request->req_id = (uint16_t)0xDEADBEEF;
486 /* set SA configuration params */
487 sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
489 sa_cfg->spi = conf->ipsec.spi;
490 sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
491 sa_cfg->virtchnl_direction =
492 conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
493 VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
495 if (conf->ipsec.options.esn) {
496 sa_cfg->esn_enabled = 1;
497 sa_cfg->esn_hi = conf->ipsec.esn.hi;
498 sa_cfg->esn_low = conf->ipsec.esn.low;
501 if (conf->ipsec.options.udp_encap)
502 sa_cfg->udp_encap_enabled = 1;
504 /* Set outer IP params */
505 if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
506 sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
508 *((uint32_t *)sa_cfg->dst_addr) =
509 htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
511 uint32_t *v6_dst_addr =
512 (uint32_t *)conf->ipsec.tunnel.ipv6.dst_addr.s6_addr;
514 sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
516 ((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
517 ((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
518 ((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
519 ((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
522 /* set crypto params */
523 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
524 sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
525 &conf->crypto_xform->aead, conf->ipsec.salt);
527 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
528 sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
529 &conf->crypto_xform->cipher, conf->ipsec.salt);
530 sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
531 &conf->crypto_xform->next->auth, conf->ipsec.salt);
533 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
534 sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
535 &conf->crypto_xform->auth, conf->ipsec.salt);
536 if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC)
537 sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
538 &conf->crypto_xform->next->cipher, conf->ipsec.salt);
541 /* send virtual channel request to add SA to hardware database */
542 rc = iavf_ipsec_crypto_request(adapter,
543 (uint8_t *)request, request_len,
544 (uint8_t *)response, response_len);
548 /* verify response id */
549 if (response->ipsec_opcode != request->ipsec_opcode ||
550 response->req_id != request->req_id)
553 rc = response->ipsec_data.sa_cfg_resp->sa_handle;
562 set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template,
563 struct iavf_security_session *sess)
565 template->sa_idx = sess->sa.hw_idx;
567 if (sess->udp_encap.enabled)
568 template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
570 if (sess->esn.enabled)
571 template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
573 template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
574 template->ctx_desc_ipsec_params =
575 calc_context_desc_cipherblock_sz(sess->block_sz) |
576 ((uint8_t)(sess->icv_sz >> 2) << 3);
580 set_session_parameter(struct iavf_security_ctx *iavf_sctx,
581 struct iavf_security_session *sess,
582 struct rte_security_session_conf *conf, uint32_t sa_idx)
584 sess->adapter = iavf_sctx->adapter;
586 sess->mode = conf->ipsec.mode;
587 sess->direction = conf->ipsec.direction;
589 if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
590 sess->type = conf->ipsec.tunnel.type;
592 sess->sa.spi = conf->ipsec.spi;
593 sess->sa.hw_idx = sa_idx;
595 if (conf->ipsec.options.esn) {
596 sess->esn.enabled = 1;
597 sess->esn.value = conf->ipsec.esn.value;
600 if (conf->ipsec.options.udp_encap)
601 sess->udp_encap.enabled = 1;
603 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
604 sess->block_sz = get_aead_blocksize(iavf_sctx,
605 conf->crypto_xform->aead.algo);
606 sess->iv_sz = sizeof(uint64_t); /* iv.length includes salt */
607 sess->icv_sz = conf->crypto_xform->aead.digest_length;
608 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
609 sess->block_sz = get_cipher_blocksize(iavf_sctx,
610 conf->crypto_xform->cipher.algo);
611 sess->iv_sz = conf->crypto_xform->cipher.iv.length;
612 sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
613 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
614 if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
615 sess->block_sz = get_auth_blocksize(iavf_sctx,
616 conf->crypto_xform->auth.algo);
617 sess->iv_sz = sizeof(uint64_t); /* iv len inc. salt */
618 sess->icv_sz = conf->crypto_xform->auth.digest_length;
620 sess->block_sz = get_cipher_blocksize(iavf_sctx,
621 conf->crypto_xform->next->cipher.algo);
623 conf->crypto_xform->next->cipher.iv.length;
624 sess->icv_sz = conf->crypto_xform->auth.digest_length;
628 set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
632 * Create IPsec Security Association for inline IPsec Crypto offload.
634 * 1. validate session configuration parameters
635 * 2. allocate session memory from mempool
636 * 3. add SA to hardware database
637 * 4. set session parameters
638 * 5. create packet metadata template for datapath
641 iavf_ipsec_crypto_session_create(void *device,
642 struct rte_security_session_conf *conf,
643 struct rte_security_session *session,
644 struct rte_mempool *mempool)
646 struct rte_eth_dev *ethdev = device;
647 struct iavf_adapter *adapter =
648 IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
649 struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
650 struct iavf_security_session *iavf_session = NULL;
654 /* validate that all SA parameters are valid for device */
655 ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
659 /* allocate session context */
660 if (rte_mempool_get(mempool, (void **)&iavf_session)) {
661 PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
665 /* add SA to hardware database */
666 sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
669 "Failed to add SA (spi: %d, mode: %s, direction: %s)",
672 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
673 "transport" : "tunnel",
674 conf->ipsec.direction ==
675 RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
676 "inbound" : "outbound");
678 rte_mempool_put(mempool, iavf_session);
682 /* save data plane required session parameters */
683 set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
685 /* save to security session private data */
686 set_sec_session_private_data(session, iavf_session);
692 * Check if valid ipsec crypto action.
693 * SPI must be non-zero and SPI in session must match SPI value
694 * passed into function.
696 * returns: 0 if invalid session or SPI value equal zero
697 * returns: 1 if valid
700 iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
701 const struct rte_security_session *session, uint32_t spi)
703 struct iavf_adapter *adapter =
704 IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
705 struct iavf_security_session *sess = session->sess_private_data;
707 /* verify we have a valid session and that it belong to this adapter */
708 if (unlikely(sess == NULL || sess->adapter != adapter))
711 /* SPI value must be non-zero */
714 /* Session SPI must patch flow SPI*/
715 else if (sess->sa.spi == spi) {
718 * TODO: We should add a way of tracking valid hw SA indices to
719 * make validation less brittle
727 * Send virtual channel security policy add request to IES driver.
729 * IES driver expects SPI and destination IP address to be in host
730 * order, but DPDK APIs are network order, therefore we need to do a htonl
731 * conversion of these parameters.
734 iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
737 rte_be32_t v4_dst_addr,
738 uint8_t *v6_dst_addr,
743 struct inline_ipsec_msg *request = NULL, *response = NULL;
744 size_t request_len, response_len;
747 request_len = sizeof(struct inline_ipsec_msg) +
748 sizeof(struct virtchnl_ipsec_sp_cfg);
749 request = rte_malloc("iavf-inbound-security-policy-add-request",
751 if (request == NULL) {
756 /* set msg header params */
757 request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
758 request->req_id = (uint16_t)0xDEADBEEF;
761 request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
765 request->ipsec_data.sp_cfg->table_id =
766 VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
767 request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
769 request->ipsec_data.sp_cfg->table_id =
770 VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
771 request->ipsec_data.sp_cfg->dip[0] =
772 htonl(((uint32_t *)v6_dst_addr)[0]);
773 request->ipsec_data.sp_cfg->dip[1] =
774 htonl(((uint32_t *)v6_dst_addr)[1]);
775 request->ipsec_data.sp_cfg->dip[2] =
776 htonl(((uint32_t *)v6_dst_addr)[2]);
777 request->ipsec_data.sp_cfg->dip[3] =
778 htonl(((uint32_t *)v6_dst_addr)[3]);
781 request->ipsec_data.sp_cfg->drop = drop;
783 /** Traffic Class/Congestion Domain currently not support */
784 request->ipsec_data.sp_cfg->set_tc = 0;
785 request->ipsec_data.sp_cfg->cgd = 0;
786 request->ipsec_data.sp_cfg->is_udp = is_udp;
787 request->ipsec_data.sp_cfg->udp_port = htons(udp_port);
789 response_len = sizeof(struct inline_ipsec_msg) +
790 sizeof(struct virtchnl_ipsec_sp_cfg_resp);
791 response = rte_malloc("iavf-inbound-security-policy-add-response",
793 if (response == NULL) {
798 /* send virtual channel request to add SA to hardware database */
799 rc = iavf_ipsec_crypto_request(adapter,
800 (uint8_t *)request, request_len,
801 (uint8_t *)response, response_len);
805 /* verify response */
806 if (response->ipsec_opcode != request->ipsec_opcode ||
807 response->req_id != request->req_id)
810 rc = response->ipsec_data.sp_cfg_resp->rule_id;
820 iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
821 struct iavf_security_session *sess)
823 struct inline_ipsec_msg *request = NULL, *response = NULL;
824 size_t request_len, response_len;
827 request_len = sizeof(struct inline_ipsec_msg) +
828 sizeof(struct virtchnl_ipsec_sa_update);
829 request = rte_malloc("iavf-sa-update-request", request_len, 0);
830 if (request == NULL) {
835 response_len = sizeof(struct inline_ipsec_msg) +
836 sizeof(struct virtchnl_ipsec_resp);
837 response = rte_malloc("iavf-sa-update-response", response_len, 0);
838 if (response == NULL) {
843 /* set msg header params */
844 request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
845 request->req_id = (uint16_t)0xDEADBEEF;
847 /* set request params */
848 request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
849 request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
851 /* send virtual channel request to add SA to hardware database */
852 rc = iavf_ipsec_crypto_request(adapter,
853 (uint8_t *)request, request_len,
854 (uint8_t *)response, response_len);
858 /* verify response */
859 if (response->ipsec_opcode != request->ipsec_opcode ||
860 response->req_id != request->req_id)
863 rc = response->ipsec_data.ipsec_resp->resp;
873 iavf_ipsec_crypto_session_update(void *device,
874 struct rte_security_session *session,
875 struct rte_security_session_conf *conf)
877 struct iavf_adapter *adapter = NULL;
878 struct iavf_security_session *iavf_sess = NULL;
879 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
882 adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
883 iavf_sess = (struct iavf_security_session *)session->sess_private_data;
885 /* verify we have a valid session and that it belong to this adapter */
886 if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
889 /* update esn hi 32-bits */
890 if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
892 * Update ESN in hardware for inbound SA. Store in
893 * iavf_security_session for outbound SA for use
894 * in *iavf_ipsec_crypto_pkt_metadata_set* function.
896 if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
897 rc = iavf_ipsec_crypto_sa_update_esn(adapter,
900 iavf_sess->esn.hi = conf->ipsec.esn.hi;
907 iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
908 struct rte_security_session *session __rte_unused,
909 struct rte_security_stats *stats __rte_unused)
915 iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
916 uint8_t is_v4, uint32_t flow_id)
918 struct inline_ipsec_msg *request = NULL, *response = NULL;
919 size_t request_len, response_len;
922 request_len = sizeof(struct inline_ipsec_msg) +
923 sizeof(struct virtchnl_ipsec_sp_destroy);
924 request = rte_malloc("iavf-sp-del-request", request_len, 0);
925 if (request == NULL) {
930 response_len = sizeof(struct inline_ipsec_msg) +
931 sizeof(struct virtchnl_ipsec_resp);
932 response = rte_malloc("iavf-sp-del-response", response_len, 0);
933 if (response == NULL) {
938 /* set msg header params */
939 request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
940 request->req_id = (uint16_t)0xDEADBEEF;
942 /* set security policy params */
943 request->ipsec_data.sp_destroy->table_id = is_v4 ?
944 VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
945 VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
946 request->ipsec_data.sp_destroy->rule_id = flow_id;
948 /* send virtual channel request to add SA to hardware database */
949 rc = iavf_ipsec_crypto_request(adapter,
950 (uint8_t *)request, request_len,
951 (uint8_t *)response, response_len);
955 /* verify response */
956 if (response->ipsec_opcode != request->ipsec_opcode ||
957 response->req_id != request->req_id)
960 return response->ipsec_data.ipsec_status->status;
970 iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
971 struct iavf_security_session *sess)
973 struct inline_ipsec_msg *request = NULL, *response = NULL;
974 size_t request_len, response_len;
978 request_len = sizeof(struct inline_ipsec_msg) +
979 sizeof(struct virtchnl_ipsec_sa_destroy);
981 request = rte_malloc("iavf-sa-del-request", request_len, 0);
982 if (request == NULL) {
987 response_len = sizeof(struct inline_ipsec_msg) +
988 sizeof(struct virtchnl_ipsec_resp);
990 response = rte_malloc("iavf-sa-del-response", response_len, 0);
991 if (response == NULL) {
996 /* set msg header params */
997 request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
998 request->req_id = (uint16_t)0xDEADBEEF;
1001 * SA delete supports deletion of 1-8 specified SA's or if the flag
1002 * field is zero, all SA's associated with VF will be deleted.
1005 request->ipsec_data.sa_destroy->flag = 0x1;
1006 request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
1008 request->ipsec_data.sa_destroy->flag = 0x0;
1011 /* send virtual channel request to add SA to hardware database */
1012 rc = iavf_ipsec_crypto_request(adapter,
1013 (uint8_t *)request, request_len,
1014 (uint8_t *)response, response_len);
1016 goto update_cleanup;
1018 /* verify response */
1019 if (response->ipsec_opcode != request->ipsec_opcode ||
1020 response->req_id != request->req_id)
1024 * Delete status will be the same bitmask as sa_destroy request flag if
1025 * deletes successful
1027 if (request->ipsec_data.sa_destroy->flag !=
1028 response->ipsec_data.ipsec_status->status)
1039 iavf_ipsec_crypto_session_destroy(void *device,
1040 struct rte_security_session *session)
1042 struct iavf_adapter *adapter = NULL;
1043 struct iavf_security_session *iavf_sess = NULL;
1044 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1047 adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
1048 iavf_sess = (struct iavf_security_session *)session->sess_private_data;
1050 /* verify we have a valid session and that it belong to this adapter */
1051 if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
1054 ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
1055 rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
1060 * Get ESP trailer from packet as well as calculate the total ESP trailer
1061 * length, which include padding, ESP trailer footer and the ICV
1063 static inline struct rte_esp_tail *
1064 iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
1065 struct iavf_security_session *s, uint16_t *esp_trailer_length)
1067 struct rte_esp_tail *esp_trailer;
1069 uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
1070 uint16_t offset = 0;
1073 * The ICV will not be present in TSO packets as this is appended by
1074 * hardware during segment generation
1076 if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
1077 length -= s->icv_sz;
1079 *esp_trailer_length = length;
1082 * Calculate offset in packet to ESP trailer header, this should be
1083 * total packet length less the size of the ESP trailer plus the ICV
1084 * length if it is present
1086 offset = rte_pktmbuf_pkt_len(m) - length;
1088 if (m->nb_segs > 1) {
1089 /* find segment which esp trailer is located */
1090 while (m->data_len < offset) {
1091 offset -= m->data_len;
1096 esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
1098 *esp_trailer_length += esp_trailer->pad_len;
1103 static inline uint16_t
1104 iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
1105 struct iavf_security_session *s, uint16_t esp_tlen)
1107 uint16_t ol2_len = m->l2_len; /* MAC + VLAN */
1108 uint16_t ol3_len = 0; /* ipv4/6 + ext hdrs */
1109 uint16_t ol4_len = 0; /* UDP NATT */
1110 uint16_t l3_len = 0; /* IPv4/6 + ext hdrs */
1111 uint16_t l4_len = 0; /* TCP/UDP/STCP hdrs */
1112 uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
1114 if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
1115 ol3_len = m->outer_l3_len;
1117 * application provided l3len assumed to include length of
1118 * ipv4/6 hdr + ext hdrs
1121 if (s->udp_encap.enabled) {
1122 ol4_len = sizeof(struct rte_udp_hdr);
1123 l3_len = m->l3_len - ol4_len;
1130 return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
1131 esp_hlen + l3_len + l4_len + esp_tlen);
1135 iavf_ipsec_crypto_pkt_metadata_set(void *device,
1136 struct rte_security_session *session,
1137 struct rte_mbuf *m, void *params)
1139 struct rte_eth_dev *ethdev = device;
1140 struct iavf_adapter *adapter =
1141 IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
1142 struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
1143 struct iavf_security_session *iavf_sess = session->sess_private_data;
1144 struct iavf_ipsec_crypto_pkt_metadata *md;
1145 struct rte_esp_tail *esp_tail;
1146 uint64_t *sqn = params;
1147 uint16_t esp_trailer_length;
1149 /* Check we have valid session and is associated with this device */
1150 if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
1153 /* Get dynamic metadata location from mbuf */
1154 md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
1155 struct iavf_ipsec_crypto_pkt_metadata *);
1157 /* Set immutable metadata values from session template */
1158 memcpy(md, &iavf_sess->pkt_metadata_template,
1159 sizeof(struct iavf_ipsec_crypto_pkt_metadata));
1161 esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
1162 &esp_trailer_length);
1164 /* Set per packet mutable metadata values */
1165 md->esp_trailer_len = esp_trailer_length;
1166 md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
1167 iavf_sess, esp_trailer_length);
1168 md->next_proto = esp_tail->next_proto;
1170 /* If Extended SN in use set the upper 32-bits in metadata */
1171 if (iavf_sess->esn.enabled && sqn != NULL)
1172 md->esn = (uint32_t)(*sqn >> 32);
1178 iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
1179 struct virtchnl_ipsec_cap *capability)
1181 /* Perform pf-vf comms */
1182 struct inline_ipsec_msg *request = NULL, *response = NULL;
1183 size_t request_len, response_len;
1186 request_len = sizeof(struct inline_ipsec_msg);
1188 request = rte_malloc("iavf-device-capability-request", request_len, 0);
1189 if (request == NULL) {
1191 goto update_cleanup;
1194 response_len = sizeof(struct inline_ipsec_msg) +
1195 sizeof(struct virtchnl_ipsec_cap);
1196 response = rte_malloc("iavf-device-capability-response",
1198 if (response == NULL) {
1200 goto update_cleanup;
1203 /* set msg header params */
1204 request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
1205 request->req_id = (uint16_t)0xDEADBEEF;
1207 /* send virtual channel request to add SA to hardware database */
1208 rc = iavf_ipsec_crypto_request(adapter,
1209 (uint8_t *)request, request_len,
1210 (uint8_t *)response, response_len);
1212 goto update_cleanup;
1214 /* verify response id */
1215 if (response->ipsec_opcode != request->ipsec_opcode ||
1216 response->req_id != request->req_id){
1218 goto update_cleanup;
1220 memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
1229 enum rte_crypto_auth_algorithm auth_maptbl[] = {
1230 /* Hash Algorithm */
1231 [VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
1232 [VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
1233 [VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
1234 [VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
1235 [VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
1236 [VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
1237 [VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
1238 [VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
1239 [VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
1240 [VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
1241 [VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
1242 [VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
1243 [VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
1244 [VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
1245 [VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
1249 update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
1250 struct virtchnl_algo_cap *acap)
1252 struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
1254 scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1256 capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
1258 capability->auth.algo = auth_maptbl[acap->algo_type];
1259 capability->auth.block_size = acap->block_size;
1261 capability->auth.key_size.min = acap->min_key_size;
1262 capability->auth.key_size.max = acap->max_key_size;
1263 capability->auth.key_size.increment = acap->inc_key_size;
1265 capability->auth.digest_size.min = acap->min_digest_size;
1266 capability->auth.digest_size.max = acap->max_digest_size;
1267 capability->auth.digest_size.increment = acap->inc_digest_size;
1270 enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
1271 /* Cipher Algorithm */
1272 [VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
1273 [VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
1274 [VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
1275 [VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
1279 update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
1280 struct virtchnl_algo_cap *acap)
1282 struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
1284 scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1286 capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1288 capability->cipher.algo = cipher_maptbl[acap->algo_type];
1290 capability->cipher.block_size = acap->block_size;
1292 capability->cipher.key_size.min = acap->min_key_size;
1293 capability->cipher.key_size.max = acap->max_key_size;
1294 capability->cipher.key_size.increment = acap->inc_key_size;
1296 capability->cipher.iv_size.min = acap->min_iv_size;
1297 capability->cipher.iv_size.max = acap->max_iv_size;
1298 capability->cipher.iv_size.increment = acap->inc_iv_size;
1301 enum rte_crypto_aead_algorithm aead_maptbl[] = {
1302 /* AEAD Algorithm */
1303 [VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
1304 [VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
1305 [VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
1309 update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
1310 struct virtchnl_algo_cap *acap)
1312 struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
1314 scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1316 capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
1318 capability->aead.algo = aead_maptbl[acap->algo_type];
1320 capability->aead.block_size = acap->block_size;
1322 capability->aead.key_size.min = acap->min_key_size;
1323 capability->aead.key_size.max = acap->max_key_size;
1324 capability->aead.key_size.increment = acap->inc_key_size;
1326 capability->aead.aad_size.min = acap->min_aad_size;
1327 capability->aead.aad_size.max = acap->max_aad_size;
1328 capability->aead.aad_size.increment = acap->inc_aad_size;
1330 capability->aead.iv_size.min = acap->min_iv_size;
1331 capability->aead.iv_size.max = acap->max_iv_size;
1332 capability->aead.iv_size.increment = acap->inc_iv_size;
1334 capability->aead.digest_size.min = acap->min_digest_size;
1335 capability->aead.digest_size.max = acap->max_digest_size;
1336 capability->aead.digest_size.increment = acap->inc_digest_size;
1340 * Dynamically set crypto capabilities based on virtchannel IPsec
1341 * capabilities structure.
1344 iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
1345 *iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
1347 struct rte_cryptodev_capabilities *capabilities;
1348 int i, j, number_of_capabilities = 0, ci = 0;
1350 /* Count the total number of crypto algorithms supported */
1351 for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
1352 number_of_capabilities += vch_cap->cap[i].algo_cap_num;
1355 * Allocate cryptodev capabilities structure for
1356 * *number_of_capabilities* items plus one item to null terminate the
1359 capabilities = rte_zmalloc("crypto_cap",
1360 sizeof(struct rte_cryptodev_capabilities) *
1361 (number_of_capabilities + 1), 0);
1364 capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
1367 * Iterate over each virtchnl crypto capability by crypto type and
1370 for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
1371 for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
1372 switch (vch_cap->cap[i].crypto_type) {
1374 update_auth_capabilities(&capabilities[ci],
1375 &vch_cap->cap[i].algo_cap_list[j]);
1377 case VIRTCHNL_CIPHER:
1378 update_cipher_capabilities(&capabilities[ci],
1379 &vch_cap->cap[i].algo_cap_list[j]);
1382 update_aead_capabilities(&capabilities[ci],
1383 &vch_cap->cap[i].algo_cap_list[j]);
1386 capabilities[ci].op =
1387 RTE_CRYPTO_OP_TYPE_UNDEFINED;
1393 iavf_sctx->crypto_capabilities = capabilities;
1398 * Get security capabilities for device
1400 static const struct rte_security_capability *
1401 iavf_ipsec_crypto_capabilities_get(void *device)
1403 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1404 struct iavf_adapter *adapter =
1405 IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
1406 struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
1409 static struct rte_security_capability iavf_security_capabilities[] = {
1410 { /* IPsec Inline Crypto ESP Tunnel Egress */
1411 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
1412 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
1414 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
1415 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
1416 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
1417 .options = { .udp_encap = 1,
1418 .stats = 1, .esn = 1 },
1420 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
1422 { /* IPsec Inline Crypto ESP Tunnel Ingress */
1423 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
1424 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
1426 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
1427 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
1428 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1429 .options = { .udp_encap = 1,
1430 .stats = 1, .esn = 1 },
1434 { /* IPsec Inline Crypto ESP Transport Egress */
1435 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
1436 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
1438 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
1439 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
1440 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
1441 .options = { .udp_encap = 1, .stats = 1,
1444 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
1446 { /* IPsec Inline Crypto ESP Transport Ingress */
1447 .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
1448 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
1450 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
1451 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
1452 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1453 .options = { .udp_encap = 1, .stats = 1,
1459 .action = RTE_SECURITY_ACTION_TYPE_NONE
1464 * Update the security capabilities struct with the runtime discovered
1465 * crypto capabilities, except for last element of the array which is
1466 * the null termination
1468 for (i = 0; i < ((sizeof(iavf_security_capabilities) /
1469 sizeof(iavf_security_capabilities[0])) - 1); i++) {
1470 iavf_security_capabilities[i].crypto_capabilities =
1471 iavf_sctx->crypto_capabilities;
1474 return iavf_security_capabilities;
1477 static struct rte_security_ops iavf_ipsec_crypto_ops = {
1478 .session_get_size = iavf_ipsec_crypto_session_size_get,
1479 .session_create = iavf_ipsec_crypto_session_create,
1480 .session_update = iavf_ipsec_crypto_session_update,
1481 .session_stats_get = iavf_ipsec_crypto_session_stats_get,
1482 .session_destroy = iavf_ipsec_crypto_session_destroy,
1483 .set_pkt_metadata = iavf_ipsec_crypto_pkt_metadata_set,
1484 .get_userdata = NULL,
1485 .capabilities_get = iavf_ipsec_crypto_capabilities_get,
1489 iavf_security_ctx_create(struct iavf_adapter *adapter)
1491 struct rte_security_ctx *sctx;
1493 sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
1497 sctx->device = adapter->vf.eth_dev;
1498 sctx->ops = &iavf_ipsec_crypto_ops;
1501 adapter->vf.eth_dev->security_ctx = sctx;
1503 if (adapter->security_ctx == NULL) {
1504 adapter->security_ctx = rte_malloc("iavf_security_ctx",
1505 sizeof(struct iavf_security_ctx), 0);
1506 if (adapter->security_ctx == NULL)
1514 iavf_security_init(struct iavf_adapter *adapter)
1516 struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
1517 struct rte_mbuf_dynfield pkt_md_dynfield = {
1518 .name = "iavf_ipsec_crypto_pkt_metadata",
1519 .size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
1520 .align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
1522 struct virtchnl_ipsec_cap capabilities;
1525 iavf_sctx->adapter = adapter;
1527 iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
1528 if (iavf_sctx->pkt_md_offset < 0)
1529 return iavf_sctx->pkt_md_offset;
1531 /* Get device capabilities from Inline IPsec driver over PF-VF comms */
1532 rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
1536 return iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
1541 iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
1543 struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
1545 return iavf_sctx->pkt_md_offset;
1549 iavf_security_ctx_destroy(struct iavf_adapter *adapter)
1551 struct rte_security_ctx *sctx = adapter->vf.eth_dev->security_ctx;
1552 struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
1554 if (iavf_sctx == NULL)
1557 /* free and reset security data structures */
1558 rte_free(iavf_sctx);
1561 adapter->security_ctx = NULL;
1562 adapter->vf.eth_dev->security_ctx = NULL;
1568 iavf_ipsec_crypto_status_get(struct iavf_adapter *adapter,
1569 struct virtchnl_ipsec_status *status)
1571 /* Perform pf-vf comms */
1572 struct inline_ipsec_msg *request = NULL, *response = NULL;
1573 size_t request_len, response_len;
1576 request_len = sizeof(struct inline_ipsec_msg);
1578 request = rte_malloc("iavf-device-status-request", request_len, 0);
1579 if (request == NULL) {
1581 goto update_cleanup;
1584 response_len = sizeof(struct inline_ipsec_msg) +
1585 sizeof(struct virtchnl_ipsec_cap);
1586 response = rte_malloc("iavf-device-status-response",
1588 if (response == NULL) {
1590 goto update_cleanup;
1593 /* set msg header params */
1594 request->ipsec_opcode = INLINE_IPSEC_OP_GET_STATUS;
1595 request->req_id = (uint16_t)0xDEADBEEF;
1597 /* send virtual channel request to add SA to hardware database */
1598 rc = iavf_ipsec_crypto_request(adapter,
1599 (uint8_t *)request, request_len,
1600 (uint8_t *)response, response_len);
1602 goto update_cleanup;
1604 /* verify response id */
1605 if (response->ipsec_opcode != request->ipsec_opcode ||
1606 response->req_id != request->req_id){
1608 goto update_cleanup;
1610 memcpy(status, response->ipsec_data.ipsec_status, sizeof(*status));
1621 iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
1623 struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
1624 int crypto_supported = false;
1626 /** Capability check for IPsec Crypto */
1627 if (resources && (resources->vf_cap_flags &
1628 VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)) {
1629 struct virtchnl_ipsec_status status;
1630 int rc = iavf_ipsec_crypto_status_get(adapter, &status);
1631 if (rc == 0 && status.status == INLINE_IPSEC_STATUS_AVAILABLE)
1632 crypto_supported = true;
1635 /* Clear the VF flag to return faster next call */
1636 if (resources && !crypto_supported)
1637 resources->vf_cap_flags &=
1638 ~(VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO);
1640 return crypto_supported;
1643 #define IAVF_IPSEC_INSET_ESP (\
1646 #define IAVF_IPSEC_INSET_AH (\
1649 #define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
1650 IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
1653 #define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
1654 IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
1657 enum iavf_ipsec_flow_pt_type {
1658 IAVF_PATTERN_ESP = 1,
1660 IAVF_PATTERN_UDP_ESP,
1662 enum iavf_ipsec_flow_pt_ip_ver {
1663 IAVF_PATTERN_IPV4 = 1,
1667 #define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
1668 #define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
1669 #define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
1671 static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
1672 {iavf_pattern_eth_ipv4_esp, IAVF_IPSEC_INSET_ESP,
1673 IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
1674 {iavf_pattern_eth_ipv6_esp, IAVF_IPSEC_INSET_ESP,
1675 IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
1676 {iavf_pattern_eth_ipv4_ah, IAVF_IPSEC_INSET_AH,
1677 IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
1678 {iavf_pattern_eth_ipv6_ah, IAVF_IPSEC_INSET_AH,
1679 IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
1680 {iavf_pattern_eth_ipv4_udp_esp, IAVF_IPSEC_INSET_IPV4_NATT_ESP,
1681 IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)},
1682 {iavf_pattern_eth_ipv6_udp_esp, IAVF_IPSEC_INSET_IPV6_NATT_ESP,
1683 IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)},
1686 struct iavf_ipsec_flow_item {
1690 struct rte_ether_hdr eth_hdr;
1692 struct rte_ipv4_hdr ipv4_hdr;
1693 struct rte_ipv6_hdr ipv6_hdr;
1695 struct rte_udp_hdr udp_hdr;
1700 parse_eth_item(const struct rte_flow_item_eth *item,
1701 struct rte_ether_hdr *eth)
1703 memcpy(eth->src_addr.addr_bytes,
1704 item->src.addr_bytes, sizeof(eth->src_addr));
1705 memcpy(eth->dst_addr.addr_bytes,
1706 item->dst.addr_bytes, sizeof(eth->dst_addr));
1710 parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
1711 struct rte_ipv4_hdr *ipv4)
1713 ipv4->src_addr = item->hdr.src_addr;
1714 ipv4->dst_addr = item->hdr.dst_addr;
1718 parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
1719 struct rte_ipv6_hdr *ipv6)
1721 memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
1722 memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
1726 parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp)
1728 udp->dst_port = item->hdr.dst_port;
1729 udp->src_port = item->hdr.src_port;
1733 has_security_action(const struct rte_flow_action actions[],
1734 const void **session)
1736 /* only {SECURITY; END} supported */
1737 if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY &&
1738 actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
1739 *session = actions[0].conf;
1745 static struct iavf_ipsec_flow_item *
1746 iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
1747 const struct rte_flow_item pattern[],
1748 const struct rte_flow_action actions[],
1751 const void *session;
1752 struct iavf_ipsec_flow_item
1753 *ipsec_flow = rte_malloc("security-flow-rule",
1754 sizeof(struct iavf_ipsec_flow_item), 0);
1755 enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
1756 enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
1758 if (ipsec_flow == NULL)
1761 ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
1763 if (pattern[0].spec)
1764 parse_eth_item((const struct rte_flow_item_eth *)
1765 pattern[0].spec, &ipsec_flow->eth_hdr);
1768 case IAVF_PATTERN_ESP:
1769 if (ipsec_flow->is_ipv4) {
1770 parse_ipv4_item((const struct rte_flow_item_ipv4 *)
1772 &ipsec_flow->ipv4_hdr);
1774 parse_ipv6_item((const struct rte_flow_item_ipv6 *)
1776 &ipsec_flow->ipv6_hdr);
1779 ((const struct rte_flow_item_esp *)
1780 pattern[2].spec)->hdr.spi;
1782 case IAVF_PATTERN_AH:
1783 if (ipsec_flow->is_ipv4) {
1784 parse_ipv4_item((const struct rte_flow_item_ipv4 *)
1786 &ipsec_flow->ipv4_hdr);
1788 parse_ipv6_item((const struct rte_flow_item_ipv6 *)
1790 &ipsec_flow->ipv6_hdr);
1793 ((const struct rte_flow_item_ah *)
1794 pattern[2].spec)->spi;
1796 case IAVF_PATTERN_UDP_ESP:
1797 if (ipsec_flow->is_ipv4) {
1798 parse_ipv4_item((const struct rte_flow_item_ipv4 *)
1800 &ipsec_flow->ipv4_hdr);
1802 parse_ipv6_item((const struct rte_flow_item_ipv6 *)
1804 &ipsec_flow->ipv6_hdr);
1806 parse_udp_item((const struct rte_flow_item_udp *)
1808 &ipsec_flow->udp_hdr);
1809 ipsec_flow->is_udp = true;
1811 ((const struct rte_flow_item_esp *)
1812 pattern[3].spec)->hdr.spi;
1818 if (!has_security_action(actions, &session))
1821 if (!iavf_ipsec_crypto_action_valid(ethdev, session,
1828 rte_free(ipsec_flow);
1833 static struct iavf_flow_parser iavf_ipsec_flow_parser;
1836 iavf_ipsec_flow_init(struct iavf_adapter *ad)
1838 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1839 struct iavf_flow_parser *parser;
1844 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
1845 parser = &iavf_ipsec_flow_parser;
1849 return iavf_register_parser(parser, ad);
1853 iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
1855 iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
1859 iavf_ipsec_flow_create(struct iavf_adapter *ad,
1860 struct rte_flow *flow,
1862 struct rte_flow_error *error)
1864 struct iavf_ipsec_flow_item *ipsec_flow = meta;
1866 rte_flow_error_set(error, EINVAL,
1867 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1872 if (ipsec_flow->is_ipv4) {
1874 iavf_ipsec_crypto_inbound_security_policy_add(ad,
1877 ipsec_flow->ipv4_hdr.dst_addr,
1881 ipsec_flow->udp_hdr.dst_port);
1884 iavf_ipsec_crypto_inbound_security_policy_add(ad,
1888 ipsec_flow->ipv6_hdr.dst_addr,
1891 ipsec_flow->udp_hdr.dst_port);
1894 if (ipsec_flow->id < 1) {
1895 rte_flow_error_set(error, EINVAL,
1896 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1897 "Failed to add SA.");
1901 flow->rule = ipsec_flow;
1907 iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
1908 struct rte_flow *flow,
1909 struct rte_flow_error *error)
1911 struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
1913 rte_flow_error_set(error, EINVAL,
1914 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1919 iavf_ipsec_crypto_security_policy_delete(ad,
1920 ipsec_flow->is_ipv4, ipsec_flow->id);
1921 rte_free(ipsec_flow);
1925 static struct iavf_flow_engine iavf_ipsec_flow_engine = {
1926 .init = iavf_ipsec_flow_init,
1927 .uninit = iavf_ipsec_flow_uninit,
1928 .create = iavf_ipsec_flow_create,
1929 .destroy = iavf_ipsec_flow_destroy,
1930 .type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
1934 iavf_ipsec_flow_parse(struct iavf_adapter *ad,
1935 struct iavf_pattern_match_item *array,
1937 const struct rte_flow_item pattern[],
1938 const struct rte_flow_action actions[],
1940 struct rte_flow_error *error)
1942 struct iavf_pattern_match_item *item = NULL;
1945 item = iavf_search_pattern_match_item(pattern, array, array_len, error);
1946 if (item && item->meta) {
1947 uint32_t type = (uint64_t)(item->meta);
1948 struct iavf_ipsec_flow_item *fi =
1949 iavf_ipsec_flow_item_parse(ad->vf.eth_dev,
1950 pattern, actions, type);
1959 static struct iavf_flow_parser iavf_ipsec_flow_parser = {
1960 .engine = &iavf_ipsec_flow_engine,
1961 .array = iavf_ipsec_flow_pattern,
1962 .array_len = RTE_DIM(iavf_ipsec_flow_pattern),
1963 .parse_pattern_action = iavf_ipsec_flow_parse,
1964 .stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
1967 RTE_INIT(iavf_ipsec_flow_engine_register)
1969 iavf_register_flow_engine(&iavf_ipsec_flow_engine);