* END
* other members in mask and spec should set to 0x00.
* item->last should be NULL.
+ *
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
+ *
*/
static int
cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
+#ifdef RTE_LIB_SECURITY
+ /**
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
+ */
+ act = next_no_void_action(actions, NULL);
+ if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+ const void *conf = act->conf;
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* get the IP pattern*/
+ item = next_no_void_pattern(pattern, NULL);
+ while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ if (item->last ||
+ item->type == RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "IP pattern missing.");
+ return -rte_errno;
+ }
+ item = next_no_void_pattern(pattern, item);
+ }
+
+ filter->proto = IPPROTO_ESP;
+ return txgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+ }
+#endif
+
/* the first not void item can be MAC or IPv4 */
item = next_no_void_pattern(pattern, NULL);
if (ret)
return ret;
+#ifdef RTE_LIB_SECURITY
+ /* ESP flow not really a flow */
+ if (filter->proto == IPPROTO_ESP)
+ return 0;
+#endif
+
/* txgbe doesn't support tcp flags */
if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
actions, &ntuple_filter, error);
+#ifdef RTE_LIB_SECURITY
+ /* ESP flow not really a flow*/
+ if (ntuple_filter.proto == IPPROTO_ESP)
+ return flow;
+#endif
+
if (!ret) {
ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
if (!ret) {
return 0;
}
+int
+txgbe_crypto_add_ingress_sa_from_flow(const void *sess,
+ const void *ip_spec,
+ uint8_t is_ipv6)
+{
+ struct txgbe_crypto_session *ic_session =
+ get_sec_session_private_data(sess);
+
+ if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) {
+ if (is_ipv6) {
+ const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
+ ic_session->src_ip.type = IPv6;
+ ic_session->dst_ip.type = IPv6;
+ rte_memcpy(ic_session->src_ip.ipv6,
+ ipv6->hdr.src_addr, 16);
+ rte_memcpy(ic_session->dst_ip.ipv6,
+ ipv6->hdr.dst_addr, 16);
+ } else {
+ const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
+ ic_session->src_ip.type = IPv4;
+ ic_session->dst_ip.type = IPv4;
+ ic_session->src_ip.ipv4 = ipv4->hdr.src_addr;
+ ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr;
+ }
+ return txgbe_crypto_add_sa(ic_session);
+ }
+
+ return 0;
+}
+
static struct rte_security_ops txgbe_security_ops = {
.session_create = txgbe_crypto_create_session,
.session_get_size = txgbe_crypto_session_get_size,