* 16B - 3
*
* but we also need the IV Length for TSO to correctly calculate the total
- * header length so placing it in the upper 6-bits here for easier reterival.
+ * header length so placing it in the upper 6-bits here for easier retrieval.
*/
static inline uint8_t
calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
/**
* Send SA add virtual channel request to Inline IPsec driver.
*
- * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * Inline IPsec driver expects SPI and destination IP address to be in host
* order, but DPDK APIs are network order, therefore we need to do a htonl
* conversion of these parameters.
*/
if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
sess->block_sz = get_auth_blocksize(iavf_sctx,
conf->crypto_xform->auth.algo);
- sess->iv_sz = conf->crypto_xform->auth.iv.length;
+ sess->iv_sz = sizeof(uint64_t); /* iv len inc. salt */
sess->icv_sz = conf->crypto_xform->auth.digest_length;
} else {
sess->block_sz = get_cipher_blocksize(iavf_sctx,
/**
* Send virtual channel security policy add request to IES driver.
*
- * IES driver expects SPI and destination IP adderss to be in host
+ * IES driver expects SPI and destination IP address to be in host
* order, but DPDK APIs are network order, therefore we need to do a htonl
* conversion of these parameters.
*/
uint8_t is_v4,
rte_be32_t v4_dst_addr,
uint8_t *v6_dst_addr,
- uint8_t drop)
+ uint8_t drop,
+ bool is_udp,
+ uint16_t udp_port)
{
struct inline_ipsec_msg *request = NULL, *response = NULL;
size_t request_len, response_len;
/** Traffic Class/Congestion Domain currently not support */
request->ipsec_data.sp_cfg->set_tc = 0;
request->ipsec_data.sp_cfg->cgd = 0;
+ request->ipsec_data.sp_cfg->is_udp = is_udp;
+ request->ipsec_data.sp_cfg->udp_port = htons(udp_port);
response_len = sizeof(struct inline_ipsec_msg) +
sizeof(struct virtchnl_ipsec_sp_cfg_resp);
request->req_id = (uint16_t)0xDEADBEEF;
/**
- * SA delete supports deletetion of 1-8 specified SA's or if the flag
+ * SA delete supports deletion of 1-8 specified SA's or if the flag
* field is zero, all SA's associated with VF will be deleted.
*/
if (sess) {
md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
struct iavf_ipsec_crypto_pkt_metadata *);
- /* Set immutatable metadata values from session template */
+ /* Set immutable metadata values from session template */
memcpy(md, &iavf_sess->pkt_metadata_template,
sizeof(struct iavf_ipsec_crypto_pkt_metadata));
capabilities = rte_zmalloc("crypto_cap",
sizeof(struct rte_cryptodev_capabilities) *
(number_of_capabilities + 1), 0);
+ if (!capabilities)
+ return -ENOMEM;
capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
/**
- * Iterate over each virtchl crypto capability by crypto type and
+ * Iterate over each virtchnl crypto capability by crypto type and
* algorithm.
*/
for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
/**
* Update the security capabilities struct with the runtime discovered
* crypto capabilities, except for last element of the array which is
- * the null terminatation
+ * the null termination
*/
for (i = 0; i < ((sizeof(iavf_security_capabilities) /
sizeof(iavf_security_capabilities[0])) - 1); i++) {
rte_free(iavf_sctx);
rte_free(sctx);
- iavf_sctx = NULL;
- sctx = NULL;
+ adapter->security_ctx = NULL;
+ adapter->vf.eth_dev->security_ctx = NULL;
return 0;
}
struct rte_ipv6_hdr ipv6_hdr;
};
struct rte_udp_hdr udp_hdr;
+ uint8_t is_udp;
};
static void
parse_udp_item((const struct rte_flow_item_udp *)
pattern[2].spec,
&ipsec_flow->udp_hdr);
+ ipsec_flow->is_udp = true;
ipsec_flow->spi =
((const struct rte_flow_item_esp *)
pattern[3].spec)->hdr.spi;
1,
ipsec_flow->ipv4_hdr.dst_addr,
NULL,
- 0);
+ 0,
+ ipsec_flow->is_udp,
+ ipsec_flow->udp_hdr.dst_port);
} else {
ipsec_flow->id =
iavf_ipsec_crypto_inbound_security_policy_add(ad,
0,
0,
ipsec_flow->ipv6_hdr.dst_addr,
- 0);
+ 0,
+ ipsec_flow->is_udp,
+ ipsec_flow->udp_hdr.dst_port);
}
if (ipsec_flow->id < 1) {