do {
/* Copy CPT command to LMTLINE */
- roc_lmt_mov((void *)lmtline, &inst, 2);
+ roc_lmt_mov64((void *)lmtline, &inst);
lmt_status = roc_lmt_submit_ldeor(io_addr);
} while (lmt_status == 0);
struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
int aes_key_len = 0;
- if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
- ctl->direction = ROC_IE_SA_DIR_OUTBOUND;
- cipher_xform = crypto_xform;
- auth_xform = crypto_xform->next;
- } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
- ctl->direction = ROC_IE_SA_DIR_INBOUND;
+ if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
auth_xform = crypto_xform;
cipher_xform = crypto_xform->next;
} else {
- return -EINVAL;
+ cipher_xform = crypto_xform;
+ auth_xform = crypto_xform->next;
}
+ if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
+ ctl->direction = ROC_IE_SA_DIR_OUTBOUND;
+ else
+ ctl->direction = ROC_IE_SA_DIR_INBOUND;
+
if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
return -ENOTSUP;
}
} else {
- switch (cipher_xform->cipher.algo) {
- case RTE_CRYPTO_CIPHER_NULL:
- ctl->enc_type = ROC_IE_ON_SA_ENC_NULL;
- break;
- case RTE_CRYPTO_CIPHER_AES_CBC:
- ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
- aes_key_len = cipher_xform->cipher.key.length;
- break;
- case RTE_CRYPTO_CIPHER_AES_CTR:
- ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CTR;
- aes_key_len = cipher_xform->cipher.key.length;
- break;
- default:
- plt_err("Unsupported cipher algorithm");
- return -ENOTSUP;
+ if (cipher_xform != NULL) {
+ switch (cipher_xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ ctl->enc_type = ROC_IE_ON_SA_ENC_NULL;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
+ aes_key_len = cipher_xform->cipher.key.length;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CTR;
+ aes_key_len = cipher_xform->cipher.key.length;
+ break;
+ default:
+ plt_err("Unsupported cipher algorithm");
+ return -ENOTSUP;
+ }
}
switch (auth_xform->auth.algo) {
if (ipsec->options.udp_encap == 1)
ctl->encap_type = ROC_IE_ON_SA_ENCAP_UDP;
+ ctl->copy_df = ipsec->options.copy_df;
+
ctl->spi = rte_cpu_to_be_32(ipsec->spi);
rte_io_wmb();
int cipher_key_len = 0;
int ret;
+ ret = ipsec_sa_ctl_set(ipsec, crypto_xform, &common_sa->ctl);
+ if (ret)
+ return ret;
+
+ if (ipsec->esn.value) {
+ common_sa->esn_low = ipsec->esn.low;
+ common_sa->esn_hi = ipsec->esn.hi;
+ }
+
+ if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
+ return 0;
+
if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
cipher_xform = crypto_xform->next;
else
cipher_xform = crypto_xform;
- ret = ipsec_sa_ctl_set(ipsec, crypto_xform, &common_sa->ctl);
- if (ret)
- return ret;
-
if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
memcpy(common_sa->iv.gcm.nonce, &ipsec->salt, 4);
struct rte_crypto_sym_xform *crypto_xform,
struct rte_security_session *sec_sess)
{
- struct rte_crypto_sym_xform *auth_xform = crypto_xform->next;
struct roc_ie_on_ip_template *template = NULL;
struct roc_cpt *roc_cpt = qp->lf.roc_cpt;
+ struct rte_crypto_sym_xform *auth_xform;
union roc_on_ipsec_outb_param1 param1;
struct cnxk_cpt_inst_tmpl *inst_tmpl;
struct roc_ie_on_outb_sa *out_sa;
sa->seq_lo = 1;
sa->seq_hi = 0;
+ if (ipsec->esn.value)
+ sa->esn = ipsec->esn.value;
+
+ if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ auth_xform = crypto_xform;
+ else
+ auth_xform = crypto_xform->next;
+
ret = fill_ipsec_common_sa(ipsec, crypto_xform, &out_sa->common_sa);
if (ret)
return ret;
template->ip4.udp_src = rte_be_to_cpu_16(4500);
template->ip4.udp_dst = rte_be_to_cpu_16(4500);
} else {
- ip4->next_proto_id = IPPROTO_ESP;
+ if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
+ ip4->next_proto_id = IPPROTO_AH;
+ else
+ ip4->next_proto_id = IPPROTO_ESP;
}
if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+ uint16_t frag_off = 0;
ctx_len += sizeof(template->ip4);
ip4->version_ihl = RTE_IPV4_VHL_DEF;
ip4->time_to_live = ipsec->tunnel.ipv4.ttl;
ip4->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
if (ipsec->tunnel.ipv4.df)
- ip4->fragment_offset = BIT(14);
+ frag_off |= RTE_IPV4_HDR_DF_FLAG;
+ ip4->fragment_offset = rte_cpu_to_be_16(frag_off);
+
memcpy(&ip4->src_addr, &ipsec->tunnel.ipv4.src_ip,
sizeof(struct in_addr));
memcpy(&ip4->dst_addr, &ipsec->tunnel.ipv4.dst_ip,
ctx_len += RTE_ALIGN_CEIL(ctx_len, 8);
- if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
- sa->cipher_iv_off = crypto_xform->aead.iv.offset;
- sa->cipher_iv_len = crypto_xform->aead.iv.length;
- } else {
- sa->cipher_iv_off = crypto_xform->cipher.iv.offset;
- sa->cipher_iv_len = crypto_xform->cipher.iv.length;
-
+ if (crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
auth_key = auth_xform->auth.key.data;
auth_key_len = auth_xform->auth.key.length;
param1.u16 = 0;
param1.s.ikev2 = 1;
- param1.s.per_pkt_iv = 1;
+
+ sa->custom_hdr_len = sizeof(struct roc_ie_on_outb_hdr) -
+ ROC_IE_ON_MAX_IV_LEN;
+
+#ifdef LA_IPSEC_DEBUG
+ /* Use IV from application in debug mode */
+ if (ipsec->options.iv_gen_disable == 1) {
+ param1.s.per_pkt_iv = ROC_IE_ON_IV_SRC_FROM_DPTR;
+ sa->custom_hdr_len = sizeof(struct roc_ie_on_outb_hdr);
+
+ if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ sa->cipher_iv_off = crypto_xform->aead.iv.offset;
+ sa->cipher_iv_len = crypto_xform->aead.iv.length;
+ } else if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ sa->cipher_iv_off = crypto_xform->cipher.iv.offset;
+ sa->cipher_iv_len = crypto_xform->cipher.iv.length;
+ }
+ }
+#else
+ if (ipsec->options.iv_gen_disable != 0) {
+ plt_err("Application provided IV is not supported");
+ return -ENOTSUP;
+ }
+#endif
+
w4.s.param1 = param1.u16;
inst_tmpl->w4 = w4.u64;
sa->ar.wint = sa->replay_win_sz;
sa->ar.base = sa->replay_win_sz;
- in_sa->common_sa.esn_low = 0;
- in_sa->common_sa.esn_hi = 0;
+ in_sa->common_sa.esn_low = sa->seq_lo;
+ in_sa->common_sa.esn_hi = sa->seq_hi;
}
return cn9k_cpt_enq_sa_write(
ipsec->life.packets_soft_limit != 0)
return -ENOTSUP;
- if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
+ if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT &&
+ ipsec->proto != RTE_SECURITY_IPSEC_SA_PROTO_AH) {
enum rte_crypto_sym_xform_type type = crypto->type;
if (type == RTE_CRYPTO_SYM_XFORM_AEAD) {
return sizeof(struct cn9k_sec_session);
}
+static int
+cn9k_sec_session_update(void *device, struct rte_security_session *sec_sess,
+ struct rte_security_session_conf *conf)
+{
+ struct rte_cryptodev *crypto_dev = device;
+ struct cnxk_cpt_qp *qp;
+ int ret;
+
+ qp = crypto_dev->data->queue_pairs[0];
+ if (qp == NULL) {
+ plt_err("CPT queue pairs need to be setup for updating security"
+ " session");
+ return -EPERM;
+ }
+
+ if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+ return -ENOTSUP;
+
+ ret = cnxk_ipsec_xform_verify(&conf->ipsec, conf->crypto_xform);
+ if (ret)
+ return ret;
+
+ ret = cn9k_ipsec_xform_verify(&conf->ipsec, conf->crypto_xform);
+ if (ret)
+ return ret;
+
+ return cn9k_ipsec_outb_sa_create(qp, &conf->ipsec, conf->crypto_xform,
+ sec_sess);
+}
+
/* Update platform specific security ops */
void
cn9k_sec_ops_override(void)
cnxk_sec_ops.session_create = cn9k_sec_session_create;
cnxk_sec_ops.session_destroy = cn9k_sec_session_destroy;
cnxk_sec_ops.session_get_size = cn9k_sec_session_get_size;
+ cnxk_sec_ops.session_update = cn9k_sec_session_update;
}