1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
8 #include <rte_branch_prediction.h>
10 #include <rte_crypto.h>
11 #include <rte_security.h>
12 #include <rte_cryptodev.h>
13 #include <rte_ipsec.h>
14 #include <rte_ethdev.h>
22 set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
24 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
25 struct rte_security_ipsec_tunnel_param *tunnel =
27 if (IS_IP4_TUNNEL(sa->flags)) {
29 RTE_SECURITY_IPSEC_TUNNEL_IPV4;
30 tunnel->ipv4.ttl = IPDEFTTL;
32 memcpy((uint8_t *)&tunnel->ipv4.src_ip,
33 (uint8_t *)&sa->src.ip.ip4, 4);
35 memcpy((uint8_t *)&tunnel->ipv4.dst_ip,
36 (uint8_t *)&sa->dst.ip.ip4, 4);
37 } else if (IS_IP6_TUNNEL(sa->flags)) {
39 RTE_SECURITY_IPSEC_TUNNEL_IPV6;
40 tunnel->ipv6.hlimit = IPDEFTTL;
41 tunnel->ipv6.dscp = 0;
42 tunnel->ipv6.flabel = 0;
44 memcpy((uint8_t *)&tunnel->ipv6.src_addr,
45 (uint8_t *)&sa->src.ip.ip6.ip6_b, 16);
47 memcpy((uint8_t *)&tunnel->ipv6.dst_addr,
48 (uint8_t *)&sa->dst.ip.ip6.ip6_b, 16);
50 /* TODO support for Transport */
52 ipsec->replay_win_sz = app_sa_prm.window_size;
53 ipsec->options.esn = app_sa_prm.enable_esn;
54 ipsec->options.udp_encap = sa->udp_encap;
58 create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
59 struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
60 struct rte_ipsec_session *ips)
62 uint16_t cdev_id = RTE_CRYPTO_MAX_DEVS;
63 struct rte_cryptodev_info cdev_info;
64 unsigned long cdev_id_qp = 0;
65 struct cdev_key key = { 0 };
66 struct ipsec_ctx *ipsec_ctx;
70 RTE_LCORE_FOREACH(lcore_id) {
71 ipsec_ctx = ipsec_ctx_lcore[lcore_id];
73 /* Core is not bound to any cryptodev, skip it */
74 if (ipsec_ctx->cdev_map == NULL)
77 /* Looking for cryptodev, which can handle this SA */
78 key.lcore_id = (uint8_t)lcore_id;
79 key.cipher_algo = (uint8_t)sa->cipher_algo;
80 key.auth_algo = (uint8_t)sa->auth_algo;
81 key.aead_algo = (uint8_t)sa->aead_algo;
83 ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
84 (void **)&cdev_id_qp);
89 "No cryptodev: core %u, cipher_algo %u, "
90 "auth_algo %u, aead_algo %u\n",
98 /* Verify that all cores are using same cryptodev for current
99 * algorithm combination, required by SA.
100 * Current cryptodev mapping process will map SA to the first
101 * cryptodev that matches requirements, so it's a double check,
102 * not an additional restriction.
104 if (cdev_id == RTE_CRYPTO_MAX_DEVS)
105 cdev_id = ipsec_ctx->tbl[cdev_id_qp].id;
106 else if (cdev_id != ipsec_ctx->tbl[cdev_id_qp].id) {
108 "SA mapping to multiple cryptodevs is "
113 /* Store per core queue pair information */
114 sa->cqp[lcore_id] = &ipsec_ctx->tbl[cdev_id_qp];
116 if (cdev_id == RTE_CRYPTO_MAX_DEVS) {
117 RTE_LOG(WARNING, IPSEC, "No cores found to handle SA\n");
121 RTE_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
122 "%u\n", sa->spi, cdev_id);
124 if (ips->type != RTE_SECURITY_ACTION_TYPE_NONE &&
125 ips->type != RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
126 struct rte_security_session_conf sess_conf = {
127 .action_type = ips->type,
128 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
134 .direction = sa->direction,
135 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
136 .mode = (IS_TUNNEL(sa->flags)) ?
137 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
138 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
140 .crypto_xform = sa->xforms,
145 if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
146 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
147 rte_cryptodev_get_sec_ctx(
150 /* Set IPsec parameters in conf */
151 set_ipsec_conf(sa, &(sess_conf.ipsec));
153 ips->security.ses = rte_security_session_create(ctx,
154 &sess_conf, skt_ctx->session_pool,
155 skt_ctx->session_priv_pool);
156 if (ips->security.ses == NULL) {
158 "SEC Session init failed: err: %d\n", ret);
161 ips->security.ctx = ctx;
163 RTE_LOG(ERR, IPSEC, "Inline not supported\n");
167 if (ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
168 struct rte_cryptodev_info info;
170 rte_cryptodev_info_get(cdev_id, &info);
171 if (!(info.feature_flags &
172 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO))
176 ips->crypto.dev_id = cdev_id;
177 ips->crypto.ses = rte_cryptodev_sym_session_create(
178 skt_ctx->session_pool);
179 rte_cryptodev_sym_session_init(cdev_id,
180 ips->crypto.ses, sa->xforms,
181 skt_ctx->session_priv_pool);
183 rte_cryptodev_info_get(cdev_id, &cdev_info);
190 create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
191 struct rte_ipsec_session *ips)
194 struct rte_security_ctx *sec_ctx;
195 struct rte_security_session_conf sess_conf = {
196 .action_type = ips->type,
197 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
203 .direction = sa->direction,
204 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP
206 .crypto_xform = sa->xforms,
210 if (IS_TRANSPORT(sa->flags)) {
211 sess_conf.ipsec.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
212 if (IS_IP4(sa->flags)) {
213 sess_conf.ipsec.tunnel.type =
214 RTE_SECURITY_IPSEC_TUNNEL_IPV4;
216 sess_conf.ipsec.tunnel.ipv4.src_ip.s_addr =
218 sess_conf.ipsec.tunnel.ipv4.dst_ip.s_addr =
220 } else if (IS_IP6(sa->flags)) {
221 sess_conf.ipsec.tunnel.type =
222 RTE_SECURITY_IPSEC_TUNNEL_IPV6;
224 memcpy(sess_conf.ipsec.tunnel.ipv6.src_addr.s6_addr,
225 sa->src.ip.ip6.ip6_b, 16);
226 memcpy(sess_conf.ipsec.tunnel.ipv6.dst_addr.s6_addr,
227 sa->dst.ip.ip6.ip6_b, 16);
229 } else if (IS_TUNNEL(sa->flags)) {
230 sess_conf.ipsec.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
232 if (IS_IP4(sa->flags)) {
233 sess_conf.ipsec.tunnel.type =
234 RTE_SECURITY_IPSEC_TUNNEL_IPV4;
236 sess_conf.ipsec.tunnel.ipv4.src_ip.s_addr =
238 sess_conf.ipsec.tunnel.ipv4.dst_ip.s_addr =
240 } else if (IS_IP6(sa->flags)) {
241 sess_conf.ipsec.tunnel.type =
242 RTE_SECURITY_IPSEC_TUNNEL_IPV6;
244 memcpy(sess_conf.ipsec.tunnel.ipv6.src_addr.s6_addr,
245 sa->src.ip.ip6.ip6_b, 16);
246 memcpy(sess_conf.ipsec.tunnel.ipv6.dst_addr.s6_addr,
247 sa->dst.ip.ip6.ip6_b, 16);
249 RTE_LOG(ERR, IPSEC, "invalid tunnel type\n");
255 sess_conf.ipsec.options.udp_encap = 1;
256 sess_conf.ipsec.udp.sport = htons(sa->udp.sport);
257 sess_conf.ipsec.udp.dport = htons(sa->udp.dport);
261 sess_conf.ipsec.options.esn = 1;
262 sess_conf.ipsec.esn.value = sa->esn;
266 RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on port %u\n",
267 sa->spi, sa->portid);
269 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
270 struct rte_flow_error err;
271 const struct rte_security_capability *sec_cap;
274 sec_ctx = (struct rte_security_ctx *)
275 rte_eth_dev_get_sec_ctx(
277 if (sec_ctx == NULL) {
279 " rte_eth_dev_get_sec_ctx failed\n");
283 ips->security.ses = rte_security_session_create(sec_ctx,
284 &sess_conf, skt_ctx->session_pool,
285 skt_ctx->session_priv_pool);
286 if (ips->security.ses == NULL) {
288 "SEC Session init failed: err: %d\n", ret);
292 sec_cap = rte_security_capabilities_get(sec_ctx);
294 /* iterate until ESP tunnel*/
295 while (sec_cap->action != RTE_SECURITY_ACTION_TYPE_NONE) {
296 if (sec_cap->action == ips->type &&
298 RTE_SECURITY_PROTOCOL_IPSEC &&
299 sec_cap->ipsec.mode ==
300 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
301 sec_cap->ipsec.direction == sa->direction)
306 if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
308 "No suitable security capability found\n");
312 ips->security.ol_flags = sec_cap->ol_flags;
313 ips->security.ctx = sec_ctx;
314 sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
316 if (IS_IP6(sa->flags)) {
317 sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
318 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
319 sa->pattern[1].spec = &sa->ipv6_spec;
321 memcpy(sa->ipv6_spec.hdr.dst_addr,
322 sa->dst.ip.ip6.ip6_b, 16);
323 memcpy(sa->ipv6_spec.hdr.src_addr,
324 sa->src.ip.ip6.ip6_b, 16);
325 } else if (IS_IP4(sa->flags)) {
326 sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
327 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
328 sa->pattern[1].spec = &sa->ipv4_spec;
330 sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
331 sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
334 sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
338 sa->udp_spec.hdr.dst_port =
339 rte_cpu_to_be_16(sa->udp.dport);
340 sa->udp_spec.hdr.src_port =
341 rte_cpu_to_be_16(sa->udp.sport);
343 sa->pattern[2].mask = &rte_flow_item_udp_mask;
344 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_UDP;
345 sa->pattern[2].spec = &sa->udp_spec;
347 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_ESP;
348 sa->pattern[3].spec = &sa->esp_spec;
349 sa->pattern[3].mask = &rte_flow_item_esp_mask;
351 sa->pattern[4].type = RTE_FLOW_ITEM_TYPE_END;
353 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
354 sa->pattern[2].spec = &sa->esp_spec;
355 sa->pattern[2].mask = &rte_flow_item_esp_mask;
357 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
360 sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
361 sa->action[0].conf = ips->security.ses;
363 sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
365 sa->attr.egress = (sa->direction ==
366 RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
367 sa->attr.ingress = (sa->direction ==
368 RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
369 if (sa->attr.ingress) {
371 struct rte_eth_rss_conf rss_conf = {
373 .rss_key_len = sizeof(rss_key),
375 struct rte_eth_dev_info dev_info;
376 uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
377 struct rte_flow_action_rss action_rss;
381 /* Don't create flow if default flow is created */
382 if (flow_info_tbl[sa->portid].rx_def_flow)
385 ret = rte_eth_dev_info_get(sa->portid, &dev_info);
388 "Error during getting device (port %u) info: %s\n",
389 sa->portid, strerror(-ret));
393 sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
395 sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
396 sa->action[1].conf = &action_rss;
397 ret = rte_eth_dev_rss_hash_conf_get(sa->portid,
401 "rte_eth_dev_rss_hash_conf_get:ret=%d\n",
405 for (i = 0, j = 0; i < dev_info.nb_rx_queues; ++i)
408 action_rss = (struct rte_flow_action_rss){
409 .types = rss_conf.rss_hf,
410 .key_len = rss_conf.rss_key_len,
415 ret = rte_flow_validate(sa->portid, &sa->attr,
416 sa->pattern, sa->action,
421 sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
423 &(struct rte_flow_action_queue){
426 ret = rte_flow_validate(sa->portid, &sa->attr,
427 sa->pattern, sa->action,
430 sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
431 sa->action[1].conf = NULL;
432 ret = rte_flow_validate(sa->portid, &sa->attr,
433 sa->pattern, sa->action,
436 goto flow_create_failure;
437 } else if (sa->attr.egress &&
438 (ips->security.ol_flags &
439 RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) {
441 RTE_FLOW_ACTION_TYPE_PASSTHRU;
443 RTE_FLOW_ACTION_TYPE_END;
446 sa->flow = rte_flow_create(sa->portid,
447 &sa->attr, sa->pattern, sa->action, &err);
448 if (sa->flow == NULL) {
451 "Failed to create ipsec flow msg: %s\n",
455 } else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
456 const struct rte_security_capability *sec_cap;
458 sec_ctx = (struct rte_security_ctx *)
459 rte_eth_dev_get_sec_ctx(sa->portid);
461 if (sec_ctx == NULL) {
463 "Ethernet device doesn't have security features registered\n");
467 /* Set IPsec parameters in conf */
468 set_ipsec_conf(sa, &(sess_conf.ipsec));
470 /* Save SA as userdata for the security session. When
471 * the packet is received, this userdata will be
472 * retrieved using the metadata from the packet.
474 * The PMD is expected to set similar metadata for other
475 * operations, like rte_eth_event, which are tied to
476 * security session. In such cases, the userdata could
477 * be obtained to uniquely identify the security
478 * parameters denoted.
481 sess_conf.userdata = (void *) sa;
483 ips->security.ses = rte_security_session_create(sec_ctx,
484 &sess_conf, skt_ctx->session_pool,
485 skt_ctx->session_priv_pool);
486 if (ips->security.ses == NULL) {
488 "SEC Session init failed: err: %d\n", ret);
492 sec_cap = rte_security_capabilities_get(sec_ctx);
493 if (sec_cap == NULL) {
495 "No capabilities registered\n");
499 /* iterate until ESP tunnel*/
500 while (sec_cap->action !=
501 RTE_SECURITY_ACTION_TYPE_NONE) {
502 if (sec_cap->action == ips->type &&
504 RTE_SECURITY_PROTOCOL_IPSEC &&
505 sec_cap->ipsec.mode ==
506 sess_conf.ipsec.mode &&
507 sec_cap->ipsec.direction == sa->direction)
512 if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
514 "No suitable security capability found\n");
518 ips->security.ol_flags = sec_cap->ol_flags;
519 ips->security.ctx = sec_ctx;
526 create_ipsec_esp_flow(struct ipsec_sa *sa)
529 struct rte_flow_error err = {};
530 if (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
532 "No Flow director rule for Egress traffic\n");
535 if (sa->flags == TRANSPORT) {
537 "No Flow director rule for transport mode\n");
540 sa->action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
541 sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
542 sa->action[0].conf = &(struct rte_flow_action_queue) {
543 .index = sa->fdir_qid,
546 sa->attr.ingress = 1;
547 if (IS_IP6(sa->flags)) {
548 sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
549 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
550 sa->pattern[1].spec = &sa->ipv6_spec;
551 memcpy(sa->ipv6_spec.hdr.dst_addr,
552 sa->dst.ip.ip6.ip6_b, sizeof(sa->dst.ip.ip6.ip6_b));
553 memcpy(sa->ipv6_spec.hdr.src_addr,
554 sa->src.ip.ip6.ip6_b, sizeof(sa->src.ip.ip6.ip6_b));
555 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
556 sa->pattern[2].spec = &sa->esp_spec;
557 sa->pattern[2].mask = &rte_flow_item_esp_mask;
558 sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
559 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
560 } else if (IS_IP4(sa->flags)) {
561 sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
562 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
563 sa->pattern[1].spec = &sa->ipv4_spec;
564 sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
565 sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
566 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
567 sa->pattern[2].spec = &sa->esp_spec;
568 sa->pattern[2].mask = &rte_flow_item_esp_mask;
569 sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
570 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
572 sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
574 ret = rte_flow_validate(sa->portid, &sa->attr, sa->pattern, sa->action,
577 RTE_LOG(ERR, IPSEC, "Flow validation failed %s\n", err.message);
581 sa->flow = rte_flow_create(sa->portid, &sa->attr, sa->pattern,
584 RTE_LOG(ERR, IPSEC, "Flow creation failed %s\n", err.message);
592 * queue crypto-ops into PMD queue.
595 enqueue_cop_burst(struct cdev_qp *cqp)
597 uint32_t i, len, ret;
600 ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cqp->buf, len);
602 RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
603 " enqueued %u crypto ops out of %u\n",
604 cqp->id, cqp->qp, ret, len);
605 /* drop packets that we fail to enqueue */
606 for (i = ret; i < len; i++)
607 free_pkts(&cqp->buf[i]->sym->m_src, 1);
609 cqp->in_flight += ret;
614 enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
616 cqp->buf[cqp->len++] = cop;
618 if (cqp->len == MAX_PKT_BURST)
619 enqueue_cop_burst(cqp);
623 ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
624 struct rte_mbuf *pkts[], void *sas[],
628 struct ipsec_mbuf_metadata *priv;
629 struct rte_crypto_sym_op *sym_cop;
631 struct rte_ipsec_session *ips;
633 for (i = 0; i < nb_pkts; i++) {
634 if (unlikely(sas[i] == NULL)) {
635 free_pkts(&pkts[i], 1);
639 rte_prefetch0(sas[i]);
640 rte_prefetch0(pkts[i]);
642 priv = get_priv(pkts[i]);
643 sa = ipsec_mask_saptr(sas[i]);
645 ips = ipsec_get_primary_session(sa);
648 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
649 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
650 priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
652 rte_prefetch0(&priv->sym_cop);
654 if (unlikely(ips->security.ses == NULL)) {
655 free_pkts(&pkts[i], 1);
659 if (unlikely((pkts[i]->packet_type &
660 (RTE_PTYPE_TUNNEL_MASK |
661 RTE_PTYPE_L4_MASK)) ==
662 MBUF_PTYPE_TUNNEL_ESP_IN_UDP &&
663 sa->udp_encap != 1)) {
664 free_pkts(&pkts[i], 1);
668 sym_cop = get_sym_cop(&priv->cop);
669 sym_cop->m_src = pkts[i];
671 rte_security_attach_session(&priv->cop,
675 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
676 RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by the"
678 free_pkts(&pkts[i], 1);
681 case RTE_SECURITY_ACTION_TYPE_NONE:
683 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
684 priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
686 rte_prefetch0(&priv->sym_cop);
688 if (unlikely(ips->crypto.ses == NULL)) {
689 free_pkts(&pkts[i], 1);
693 rte_crypto_op_attach_sym_session(&priv->cop,
696 ret = xform_func(pkts[i], sa, &priv->cop);
698 free_pkts(&pkts[i], 1);
702 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
703 RTE_ASSERT(ips->security.ses != NULL);
704 ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
705 if (ips->security.ol_flags &
706 RTE_SECURITY_TX_OLOAD_NEED_MDATA)
707 rte_security_set_pkt_metadata(
708 ips->security.ctx, ips->security.ses,
711 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
712 RTE_ASSERT(ips->security.ses != NULL);
713 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
714 priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
716 rte_prefetch0(&priv->sym_cop);
717 rte_security_attach_session(&priv->cop,
720 ret = xform_func(pkts[i], sa, &priv->cop);
722 free_pkts(&pkts[i], 1);
726 ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
727 if (ips->security.ol_flags &
728 RTE_SECURITY_TX_OLOAD_NEED_MDATA)
729 rte_security_set_pkt_metadata(
730 ips->security.ctx, ips->security.ses,
735 enqueue_cop(sa->cqp[ipsec_ctx->lcore_id], &priv->cop);
739 static inline int32_t
740 ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
741 struct rte_mbuf *pkts[], uint16_t max_pkts)
743 int32_t nb_pkts, ret;
744 struct ipsec_mbuf_metadata *priv;
746 struct rte_mbuf *pkt;
749 while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
750 pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt];
752 priv = get_priv(pkt);
754 ret = xform_func(pkt, sa, &priv->cop);
759 pkts[nb_pkts++] = pkt;
766 ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
767 struct rte_mbuf *pkts[], uint16_t max_pkts)
769 int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
770 struct ipsec_mbuf_metadata *priv;
771 struct rte_crypto_op *cops[max_pkts];
773 struct rte_mbuf *pkt;
775 for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
778 cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++];
779 if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
780 ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
782 if (cqp->in_flight == 0)
785 nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp,
786 cops, max_pkts - nb_pkts);
788 cqp->in_flight -= nb_cops;
790 for (j = 0; j < nb_cops; j++) {
791 pkt = cops[j]->sym->m_src;
794 priv = get_priv(pkt);
797 RTE_ASSERT(sa != NULL);
799 if (ipsec_get_action_type(sa) ==
800 RTE_SECURITY_ACTION_TYPE_NONE) {
801 ret = xform_func(pkt, sa, cops[j]);
806 } else if (ipsec_get_action_type(sa) ==
807 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
808 if (cops[j]->status) {
813 pkts[nb_pkts++] = pkt;
822 ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
823 uint16_t nb_pkts, uint16_t len)
827 inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);
829 ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
831 return ipsec_inline_dequeue(esp_inbound_post, ctx, pkts, len);
835 ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
838 return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
842 ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
843 uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len)
847 outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);
849 ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
851 return ipsec_inline_dequeue(esp_outbound_post, ctx, pkts, len);
855 ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
858 return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);