X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fipsec-secgw%2Fipsec.c;h=7b85330774b77b080de65edbecf8d9d17d324603;hb=e482e0fa6a106c548afe9c52e71abf3a70848d46;hp=5fb5bc16e7bf08011d2143717106deff14617f7e;hpb=6138c2daece4e763db54fa1320ef95963d3aad00;p=dpdk.git diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c index 5fb5bc16e7..7b85330774 100644 --- a/examples/ipsec-secgw/ipsec.c +++ b/examples/ipsec-secgw/ipsec.c @@ -36,9 +36,10 @@ set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec) } /* TODO support for Transport and IPV6 tunnel */ } + ipsec->esn_soft_limit = IPSEC_OFFLOAD_ESN_SOFTLIMIT; } -static inline int +int create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa) { struct rte_cryptodev_info cdev_info; @@ -101,7 +102,7 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa) set_ipsec_conf(sa, &(sess_conf.ipsec)); sa->sec_session = rte_security_session_create(ctx, - &sess_conf, ipsec_ctx->session_pool); + &sess_conf, ipsec_ctx->session_priv_pool); if (sa->sec_session == NULL) { RTE_LOG(ERR, IPSEC, "SEC Session init failed: err: %d\n", ret); @@ -116,7 +117,7 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa) int ret = 0; sa->sec_session = rte_security_session_create(ctx, - &sess_conf, ipsec_ctx->session_pool); + &sess_conf, ipsec_ctx->session_priv_pool); if (sa->sec_session == NULL) { RTE_LOG(ERR, IPSEC, "SEC Session init failed: err: %d\n", ret); @@ -186,14 +187,8 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa) .rss_key_len = 40, }; struct rte_eth_dev *eth_dev; - union { - struct rte_flow_action_rss rss; - struct { - const struct rte_eth_rss_conf *rss_conf; - uint16_t num; - uint16_t queue[RTE_MAX_QUEUES_PER_PORT]; - } local; - } action_rss; + uint16_t queue[RTE_MAX_QUEUES_PER_PORT]; + struct rte_flow_action_rss action_rss; unsigned int i; unsigned int j; @@ -207,9 +202,14 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa) for (i = 0, j = 0; i < eth_dev->data->nb_rx_queues; ++i) if (eth_dev->data->rx_queues[i]) - action_rss.local.queue[j++] = i; - action_rss.local.num = j; - action_rss.local.rss_conf = &rss_conf; + queue[j++] = i; + action_rss = (struct rte_flow_action_rss){ + .types = rss_conf.rss_hf, + .key_len = rss_conf.rss_key_len, + .queue_num = j, + .key = rss_key, + .queue = queue, + }; ret = rte_flow_validate(sa->portid, &sa->attr, sa->pattern, sa->action, &err); @@ -270,11 +270,14 @@ flow_create_failure: * the packet is received, this userdata will be * retrieved using the metadata from the packet. * - * This is required only for inbound SAs. + * The PMD is expected to set similar metadata for other + * operations, like rte_eth_event, which are tied to + * security session. In such cases, the userdata could + * be obtained to uniquely identify the security + * parameters denoted. */ - if (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) - sess_conf.userdata = (void *) sa; + sess_conf.userdata = (void *) sa; sa->sec_session = rte_security_session_create(ctx, &sess_conf, ipsec_ctx->session_pool); @@ -320,49 +323,45 @@ flow_create_failure: ipsec_ctx->session_pool); rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id, sa->crypto_session, sa->xforms, - ipsec_ctx->session_pool); + ipsec_ctx->session_priv_pool); rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, &cdev_info); - if (cdev_info.sym.max_nb_sessions_per_qp > 0) { - ret = rte_cryptodev_queue_pair_attach_sym_session( - ipsec_ctx->tbl[cdev_id_qp].id, - ipsec_ctx->tbl[cdev_id_qp].qp, - sa->crypto_session); - if (ret < 0) { - RTE_LOG(ERR, IPSEC, - "Session cannot be attached to qp %u\n", - ipsec_ctx->tbl[cdev_id_qp].qp); - return -1; - } - } } sa->cdev_id_qp = cdev_id_qp; return 0; } +/* + * queue crypto-ops into PMD queue. + */ +void +enqueue_cop_burst(struct cdev_qp *cqp) +{ + uint32_t i, len, ret; + + len = cqp->len; + ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cqp->buf, len); + if (ret < len) { + RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:" + " enqueued %u crypto ops out of %u\n", + cqp->id, cqp->qp, ret, len); + /* drop packets that we fail to enqueue */ + for (i = ret; i < len; i++) + rte_pktmbuf_free(cqp->buf[i]->sym->m_src); + } + cqp->in_flight += ret; + cqp->len = 0; +} + static inline void enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop) { - int32_t ret, i; - cqp->buf[cqp->len++] = cop; - if (cqp->len == MAX_PKT_BURST) { - ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, - cqp->buf, cqp->len); - if (ret < cqp->len) { - RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:" - " enqueued %u crypto ops out of %u\n", - cqp->id, cqp->qp, - ret, cqp->len); - for (i = ret; i < cqp->len; i++) - rte_pktmbuf_free(cqp->buf[i]->sym->m_src); - } - cqp->in_flight += ret; - cqp->len = 0; - } + if (cqp->len == MAX_PKT_BURST) + enqueue_cop_burst(cqp); } static inline void @@ -476,6 +475,32 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, } } +static inline int32_t +ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, + struct rte_mbuf *pkts[], uint16_t max_pkts) +{ + int32_t nb_pkts, ret; + struct ipsec_mbuf_metadata *priv; + struct ipsec_sa *sa; + struct rte_mbuf *pkt; + + nb_pkts = 0; + while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) { + pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt]; + rte_prefetch0(pkt); + priv = get_priv(pkt); + sa = priv->sa; + ret = xform_func(pkt, sa, &priv->cop); + if (unlikely(ret)) { + rte_pktmbuf_free(pkt); + continue; + } + pkts[nb_pkts++] = pkt; + } + + return nb_pkts; +} + static inline int ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[], uint16_t max_pkts) @@ -486,30 +511,15 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa; struct rte_mbuf *pkt; - for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts;) { + for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) { struct cdev_qp *cqp; - cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp]; - while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) { - pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt]; - rte_prefetch0(pkt); - priv = get_priv(pkt); - sa = priv->sa; - ret = xform_func(pkt, sa, &priv->cop); - if (unlikely(ret)) { - rte_pktmbuf_free(pkt); - continue; - } - pkts[nb_pkts++] = pkt; - } + cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++]; + if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps) + ipsec_ctx->last_qp %= ipsec_ctx->nb_qps; - if (cqp->in_flight == 0) { - ipsec_ctx->last_qp++; - if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps) - ipsec_ctx->last_qp %= ipsec_ctx->nb_qps; - i++; + if (cqp->in_flight == 0) continue; - } nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp, cops, max_pkts - nb_pkts); @@ -533,12 +543,6 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, } } pkts[nb_pkts++] = pkt; - if (cqp->in_flight < max_pkts) { - ipsec_ctx->last_qp++; - if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps) - ipsec_ctx->last_qp %= ipsec_ctx->nb_qps; - i++; - } } } @@ -556,6 +560,13 @@ ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts); + return ipsec_inline_dequeue(esp_inbound_post, ctx, pkts, len); +} + +uint16_t +ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint16_t len) +{ return ipsec_dequeue(esp_inbound_post, ctx, pkts, len); } @@ -569,5 +580,12 @@ ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts); + return ipsec_inline_dequeue(esp_outbound_post, ctx, pkts, len); +} + +uint16_t +ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[], + uint16_t len) +{ return ipsec_dequeue(esp_outbound_post, ctx, pkts, len); }