+ sec_cap = rte_security_capabilities_get(sec_ctx);
+
+ /* iterate until ESP tunnel*/
+ while (sec_cap->action != RTE_SECURITY_ACTION_TYPE_NONE) {
+ if (sec_cap->action == ips->type &&
+ sec_cap->protocol ==
+ RTE_SECURITY_PROTOCOL_IPSEC &&
+ sec_cap->ipsec.mode ==
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
+ sec_cap->ipsec.direction == sa->direction)
+ break;
+ sec_cap++;
+ }
+
+ if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
+ RTE_LOG(ERR, IPSEC,
+ "No suitable security capability found\n");
+ return -1;
+ }
+
+ ips->security.ol_flags = sec_cap->ol_flags;
+ ips->security.ctx = sec_ctx;
+ sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+
+ if (IS_IP6(sa->flags)) {
+ sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
+ sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
+ sa->pattern[1].spec = &sa->ipv6_spec;
+
+ memcpy(sa->ipv6_spec.hdr.dst_addr,
+ sa->dst.ip.ip6.ip6_b, 16);
+ memcpy(sa->ipv6_spec.hdr.src_addr,
+ sa->src.ip.ip6.ip6_b, 16);
+ } else if (IS_IP4(sa->flags)) {
+ sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
+ sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
+ sa->pattern[1].spec = &sa->ipv4_spec;
+
+ sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
+ sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
+ }
+
+ sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
+ sa->pattern[2].spec = &sa->esp_spec;
+ sa->pattern[2].mask = &rte_flow_item_esp_mask;
+ sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
+
+ sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
+
+ sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
+ sa->action[0].conf = ips->security.ses;
+
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
+
+ sa->attr.egress = (sa->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
+ sa->attr.ingress = (sa->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
+ if (sa->attr.ingress) {
+ uint8_t rss_key[40];
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = rss_key,
+ .rss_key_len = 40,
+ };
+ struct rte_eth_dev_info dev_info;
+ uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
+ struct rte_flow_action_rss action_rss;
+ unsigned int i;
+ unsigned int j;
+
+ ret = rte_eth_dev_info_get(sa->portid, &dev_info);
+ if (ret != 0) {
+ RTE_LOG(ERR, IPSEC,
+ "Error during getting device (port %u) info: %s\n",
+ sa->portid, strerror(-ret));
+ return ret;
+ }
+
+ sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
+ /* Try RSS. */
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
+ sa->action[1].conf = &action_rss;
+ ret = rte_eth_dev_rss_hash_conf_get(sa->portid,
+ &rss_conf);
+ if (ret != 0) {
+ RTE_LOG(ERR, IPSEC,
+ "rte_eth_dev_rss_hash_conf_get:ret=%d\n",
+ ret);
+ return -1;
+ }
+ for (i = 0, j = 0; i < dev_info.nb_rx_queues; ++i)
+ queue[j++] = i;
+
+ action_rss = (struct rte_flow_action_rss){
+ .types = rss_conf.rss_hf,
+ .key_len = rss_conf.rss_key_len,
+ .queue_num = j,
+ .key = rss_key,
+ .queue = queue,
+ };
+ ret = rte_flow_validate(sa->portid, &sa->attr,
+ sa->pattern, sa->action,
+ &err);
+ if (!ret)
+ goto flow_create;
+ /* Try Queue. */
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
+ sa->action[1].conf =
+ &(struct rte_flow_action_queue){
+ .index = 0,
+ };
+ ret = rte_flow_validate(sa->portid, &sa->attr,
+ sa->pattern, sa->action,
+ &err);
+ /* Try End. */
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
+ sa->action[1].conf = NULL;
+ ret = rte_flow_validate(sa->portid, &sa->attr,
+ sa->pattern, sa->action,
+ &err);
+ if (ret)
+ goto flow_create_failure;
+ } else if (sa->attr.egress &&
+ (ips->security.ol_flags &
+ RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) {
+ sa->action[1].type =
+ RTE_FLOW_ACTION_TYPE_PASSTHRU;
+ sa->action[2].type =
+ RTE_FLOW_ACTION_TYPE_END;