#include <rte_flow.h>
#include <rte_hexdump.h>
#include <rte_vxlan.h>
+#include <rte_gre.h>
+#include <rte_mpls.h>
+#include <rte_gtp.h>
+#include <rte_geneve.h>
#include "testpmd.h"
PORT_ID,
GROUP_ID,
PRIORITY_LEVEL,
+ SHARED_ACTION_ID,
/* Top-level command. */
SET,
/* Top-level command. */
FLOW,
/* Sub-level commands. */
+ SHARED_ACTION,
VALIDATE,
CREATE,
DESTROY,
LIST,
AGED,
ISOLATE,
+ TUNNEL,
+
+ /* Tunnel arguments. */
+ TUNNEL_CREATE,
+ TUNNEL_CREATE_TYPE,
+ TUNNEL_LIST,
+ TUNNEL_DESTROY,
+ TUNNEL_DESTROY_ID,
/* Destroy arguments. */
DESTROY_RULE,
INGRESS,
EGRESS,
TRANSFER,
+ TUNNEL_SET,
+ TUNNEL_MATCH,
+
+ /* Shared action arguments */
+ SHARED_ACTION_CREATE,
+ SHARED_ACTION_UPDATE,
+ SHARED_ACTION_DESTROY,
+ SHARED_ACTION_QUERY,
+
+ /* Shared action create arguments */
+ SHARED_ACTION_CREATE_ID,
+ SHARED_ACTION_INGRESS,
+ SHARED_ACTION_EGRESS,
+ SHARED_ACTION_TRANSFER,
+ SHARED_ACTION_SPEC,
+
+ /* Shared action destroy arguments */
+ SHARED_ACTION_DESTROY_ID,
/* Validate/create pattern. */
PATTERN,
ITEM_ETH_DST,
ITEM_ETH_SRC,
ITEM_ETH_TYPE,
+ ITEM_ETH_HAS_VLAN,
ITEM_VLAN,
ITEM_VLAN_TCI,
ITEM_VLAN_PCP,
ITEM_VLAN_DEI,
ITEM_VLAN_VID,
ITEM_VLAN_INNER_TYPE,
+ ITEM_VLAN_HAS_MORE_VLAN,
ITEM_IPV4,
ITEM_IPV4_TOS,
+ ITEM_IPV4_FRAGMENT_OFFSET,
ITEM_IPV4_TTL,
ITEM_IPV4_PROTO,
ITEM_IPV4_SRC,
ITEM_IPV6_HOP,
ITEM_IPV6_SRC,
ITEM_IPV6_DST,
+ ITEM_IPV6_HAS_FRAG_EXT,
ITEM_ICMP,
ITEM_ICMP_TYPE,
ITEM_ICMP_CODE,
ITEM_ARP_ETH_IPV4_TPA,
ITEM_IPV6_EXT,
ITEM_IPV6_EXT_NEXT_HDR,
+ ITEM_IPV6_FRAG_EXT,
+ ITEM_IPV6_FRAG_EXT_NEXT_HDR,
+ ITEM_IPV6_FRAG_EXT_FRAG_DATA,
ITEM_ICMP6,
ITEM_ICMP6_TYPE,
ITEM_ICMP6_CODE,
ACTION_SAMPLE_RATIO,
ACTION_SAMPLE_INDEX,
ACTION_SAMPLE_INDEX_VALUE,
+ ACTION_SHARED,
+ SHARED_ACTION_ID2PTR,
};
/** Maximum size for pattern in struct rte_flow_item_raw. */
enum index command; /**< Flow command. */
portid_t port; /**< Affected port ID. */
union {
+ struct {
+ uint32_t *action_id;
+ uint32_t action_id_n;
+ } sa_destroy; /**< Shared action destroy arguments. */
+ struct {
+ uint32_t action_id;
+ } sa; /* Shared action query arguments */
struct {
struct rte_flow_attr attr;
+ struct tunnel_ops tunnel_ops;
struct rte_flow_item *pattern;
struct rte_flow_action *actions;
uint32_t pattern_n;
.size = s, \
})
+static const enum index next_sa_create_attr[] = {
+ SHARED_ACTION_CREATE_ID,
+ SHARED_ACTION_INGRESS,
+ SHARED_ACTION_EGRESS,
+ SHARED_ACTION_TRANSFER,
+ SHARED_ACTION_SPEC,
+ ZERO,
+};
+
+static const enum index next_sa_subcmd[] = {
+ SHARED_ACTION_CREATE,
+ SHARED_ACTION_UPDATE,
+ SHARED_ACTION_DESTROY,
+ SHARED_ACTION_QUERY,
+ ZERO,
+};
+
static const enum index next_vc_attr[] = {
GROUP,
PRIORITY,
INGRESS,
EGRESS,
TRANSFER,
+ TUNNEL_SET,
+ TUNNEL_MATCH,
PATTERN,
ZERO,
};
ZERO,
};
+static const enum index next_sa_destroy_attr[] = {
+ SHARED_ACTION_DESTROY_ID,
+ END,
+ ZERO,
+};
+
static const enum index item_param[] = {
ITEM_PARAM_IS,
ITEM_PARAM_SPEC,
ITEM_VXLAN_GPE,
ITEM_ARP_ETH_IPV4,
ITEM_IPV6_EXT,
+ ITEM_IPV6_FRAG_EXT,
ITEM_ICMP6,
ITEM_ICMP6_ND_NS,
ITEM_ICMP6_ND_NA,
ITEM_ETH_DST,
ITEM_ETH_SRC,
ITEM_ETH_TYPE,
+ ITEM_ETH_HAS_VLAN,
ITEM_NEXT,
ZERO,
};
ITEM_VLAN_DEI,
ITEM_VLAN_VID,
ITEM_VLAN_INNER_TYPE,
+ ITEM_VLAN_HAS_MORE_VLAN,
ITEM_NEXT,
ZERO,
};
static const enum index item_ipv4[] = {
ITEM_IPV4_TOS,
+ ITEM_IPV4_FRAGMENT_OFFSET,
ITEM_IPV4_TTL,
ITEM_IPV4_PROTO,
ITEM_IPV4_SRC,
ITEM_IPV6_HOP,
ITEM_IPV6_SRC,
ITEM_IPV6_DST,
+ ITEM_IPV6_HAS_FRAG_EXT,
ITEM_NEXT,
ZERO,
};
ZERO,
};
+static const enum index item_ipv6_frag_ext[] = {
+ ITEM_IPV6_FRAG_EXT_NEXT_HDR,
+ ITEM_IPV6_FRAG_EXT_FRAG_DATA,
+ ITEM_NEXT,
+ ZERO,
+};
+
static const enum index item_icmp6[] = {
ITEM_ICMP6_TYPE,
ITEM_ICMP6_CODE,
ACTION_SET_IPV6_DSCP,
ACTION_AGE,
ACTION_SAMPLE,
+ ACTION_SHARED,
ZERO,
};
static int parse_isolate(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
+static int parse_tunnel(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
static int parse_int(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
static int parse_port(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
+static int parse_sa(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
+static int parse_sa_destroy(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size);
+static int parse_sa_id2ptr(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len, void *buf,
+ unsigned int size);
static int comp_none(struct context *, const struct token *,
unsigned int, char *, unsigned int);
static int comp_boolean(struct context *, const struct token *,
.call = parse_int,
.comp = comp_none,
},
+ [SHARED_ACTION_ID] = {
+ .name = "{shared_action_id}",
+ .type = "SHARED_ACTION_ID",
+ .help = "shared action id",
+ .call = parse_int,
+ .comp = comp_none,
+ },
/* Top-level command. */
[FLOW] = {
.name = "flow",
.type = "{command} {port_id} [{arg} [...]]",
.help = "manage ingress/egress flow rules",
.next = NEXT(NEXT_ENTRY
- (VALIDATE,
+ (SHARED_ACTION,
+ VALIDATE,
CREATE,
DESTROY,
FLUSH,
LIST,
AGED,
QUERY,
- ISOLATE)),
+ ISOLATE,
+ TUNNEL)),
.call = parse_init,
},
+ /* Top-level command. */
+ [SHARED_ACTION] = {
+ .name = "shared_action",
+ .type = "{command} {port_id} [{arg} [...]]",
+ .help = "manage shared actions",
+ .next = NEXT(next_sa_subcmd, NEXT_ENTRY(PORT_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, port)),
+ .call = parse_sa,
+ },
/* Sub-level commands. */
+ [SHARED_ACTION_CREATE] = {
+ .name = "create",
+ .help = "create shared action",
+ .next = NEXT(next_sa_create_attr),
+ .call = parse_sa,
+ },
+ [SHARED_ACTION_UPDATE] = {
+ .name = "update",
+ .help = "update shared action",
+ .next = NEXT(NEXT_ENTRY(SHARED_ACTION_SPEC),
+ NEXT_ENTRY(SHARED_ACTION_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, args.vc.attr.group)),
+ .call = parse_sa,
+ },
+ [SHARED_ACTION_DESTROY] = {
+ .name = "destroy",
+ .help = "destroy shared action",
+ .next = NEXT(NEXT_ENTRY(SHARED_ACTION_DESTROY_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, port)),
+ .call = parse_sa_destroy,
+ },
+ [SHARED_ACTION_QUERY] = {
+ .name = "query",
+ .help = "query shared action",
+ .next = NEXT(NEXT_ENTRY(END), NEXT_ENTRY(SHARED_ACTION_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, args.sa.action_id)),
+ .call = parse_sa,
+ },
[VALIDATE] = {
.name = "validate",
.help = "check whether a flow rule can be created",
ARGS_ENTRY(struct buffer, port)),
.call = parse_isolate,
},
+ [TUNNEL] = {
+ .name = "tunnel",
+ .help = "new tunnel API",
+ .next = NEXT(NEXT_ENTRY
+ (TUNNEL_CREATE, TUNNEL_LIST, TUNNEL_DESTROY)),
+ .call = parse_tunnel,
+ },
+ /* Tunnel arguments. */
+ [TUNNEL_CREATE] = {
+ .name = "create",
+ .help = "create new tunnel object",
+ .next = NEXT(NEXT_ENTRY(TUNNEL_CREATE_TYPE),
+ NEXT_ENTRY(PORT_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, port)),
+ .call = parse_tunnel,
+ },
+ [TUNNEL_CREATE_TYPE] = {
+ .name = "type",
+ .help = "create new tunnel",
+ .next = NEXT(NEXT_ENTRY(FILE_PATH)),
+ .args = ARGS(ARGS_ENTRY(struct tunnel_ops, type)),
+ .call = parse_tunnel,
+ },
+ [TUNNEL_DESTROY] = {
+ .name = "destroy",
+ .help = "destroy tunel",
+ .next = NEXT(NEXT_ENTRY(TUNNEL_DESTROY_ID),
+ NEXT_ENTRY(PORT_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, port)),
+ .call = parse_tunnel,
+ },
+ [TUNNEL_DESTROY_ID] = {
+ .name = "id",
+ .help = "tunnel identifier to testroy",
+ .next = NEXT(NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),
+ .call = parse_tunnel,
+ },
+ [TUNNEL_LIST] = {
+ .name = "list",
+ .help = "list existing tunnels",
+ .next = NEXT(NEXT_ENTRY(PORT_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, port)),
+ .call = parse_tunnel,
+ },
/* Destroy arguments. */
[DESTROY_RULE] = {
.name = "rule",
.next = NEXT(next_vc_attr),
.call = parse_vc,
},
+ [TUNNEL_SET] = {
+ .name = "tunnel_set",
+ .help = "tunnel steer rule",
+ .next = NEXT(next_vc_attr, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),
+ .call = parse_vc,
+ },
+ [TUNNEL_MATCH] = {
+ .name = "tunnel_match",
+ .help = "tunnel match rule",
+ .next = NEXT(next_vc_attr, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),
+ .call = parse_vc,
+ },
/* Validate/create pattern. */
[PATTERN] = {
.name = "pattern",
.next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
},
+ [ITEM_ETH_HAS_VLAN] = {
+ .name = "has_vlan",
+ .help = "packet header contains VLAN",
+ .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_eth,
+ has_vlan, 1)),
+ },
[ITEM_VLAN] = {
.name = "vlan",
.help = "match 802.1Q/ad VLAN tag",
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
inner_type)),
},
+ [ITEM_VLAN_HAS_MORE_VLAN] = {
+ .name = "has_more_vlan",
+ .help = "packet header contains another VLAN",
+ .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_vlan,
+ has_more_vlan, 1)),
+ },
[ITEM_IPV4] = {
.name = "ipv4",
.help = "match IPv4 header",
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
hdr.type_of_service)),
},
+ [ITEM_IPV4_FRAGMENT_OFFSET] = {
+ .name = "fragment_offset",
+ .help = "fragmentation flags and fragment offset",
+ .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
+ hdr.fragment_offset)),
+ },
[ITEM_IPV4_TTL] = {
.name = "ttl",
.help = "time to live",
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
hdr.dst_addr)),
},
+ [ITEM_IPV6_HAS_FRAG_EXT] = {
+ .name = "has_frag_ext",
+ .help = "fragment packet attribute",
+ .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_ipv6,
+ has_frag_ext, 1)),
+ },
[ITEM_ICMP] = {
.name = "icmp",
.help = "match ICMP header",
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
next_hdr)),
},
+ [ITEM_IPV6_FRAG_EXT] = {
+ .name = "ipv6_frag_ext",
+ .help = "match presence of IPv6 fragment extension header",
+ .priv = PRIV_ITEM(IPV6_FRAG_EXT,
+ sizeof(struct rte_flow_item_ipv6_frag_ext)),
+ .next = NEXT(item_ipv6_frag_ext),
+ .call = parse_vc,
+ },
+ [ITEM_IPV6_FRAG_EXT_NEXT_HDR] = {
+ .name = "next_hdr",
+ .help = "next header",
+ .next = NEXT(item_ipv6_frag_ext, NEXT_ENTRY(UNSIGNED),
+ item_param),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_ipv6_frag_ext,
+ hdr.next_header)),
+ },
+ [ITEM_IPV6_FRAG_EXT_FRAG_DATA] = {
+ .name = "frag_data",
+ .help = "Fragment flags and offset",
+ .next = NEXT(item_ipv6_frag_ext, NEXT_ENTRY(UNSIGNED),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_frag_ext,
+ hdr.frag_data)),
+ },
[ITEM_ICMP6] = {
.name = "icmp6",
.help = "match any ICMPv6 header",
[ITEM_ECPRI_MSG_IQ_DATA_PCID] = {
.name = "pc_id",
.help = "Physical Channel ID",
- .next = NEXT(item_ecpri, NEXT_ENTRY(UNSIGNED), item_param),
+ .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_IQ_DATA_PCID,
+ ITEM_ECPRI_COMMON, ITEM_NEXT),
+ NEXT_ENTRY(UNSIGNED), item_param),
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ecpri,
hdr.type0.pc_id)),
},
[ITEM_ECPRI_MSG_RTC_CTRL_RTCID] = {
.name = "rtc_id",
.help = "Real-Time Control Data ID",
- .next = NEXT(item_ecpri, NEXT_ENTRY(UNSIGNED), item_param),
+ .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_RTC_CTRL_RTCID,
+ ITEM_ECPRI_COMMON, ITEM_NEXT),
+ NEXT_ENTRY(UNSIGNED), item_param),
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ecpri,
hdr.type2.rtc_id)),
},
[ITEM_ECPRI_MSG_DLY_MSR_MSRID] = {
.name = "msr_id",
.help = "Measurement ID",
- .next = NEXT(item_ecpri, NEXT_ENTRY(UNSIGNED), item_param),
+ .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_DLY_MSR_MSRID,
+ ITEM_ECPRI_COMMON, ITEM_NEXT),
+ NEXT_ENTRY(UNSIGNED), item_param),
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ecpri,
hdr.type5.msr_id)),
},
.call = parse_vc_action_sample_index,
.comp = comp_set_sample_index,
},
+ /* Shared action destroy arguments. */
+ [SHARED_ACTION_DESTROY_ID] = {
+ .name = "action_id",
+ .help = "specify a shared action id to destroy",
+ .next = NEXT(next_sa_destroy_attr,
+ NEXT_ENTRY(SHARED_ACTION_ID)),
+ .args = ARGS(ARGS_ENTRY_PTR(struct buffer,
+ args.sa_destroy.action_id)),
+ .call = parse_sa_destroy,
+ },
+ /* Shared action create arguments. */
+ [SHARED_ACTION_CREATE_ID] = {
+ .name = "action_id",
+ .help = "specify a shared action id to create",
+ .next = NEXT(next_sa_create_attr,
+ NEXT_ENTRY(SHARED_ACTION_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, args.vc.attr.group)),
+ },
+ [ACTION_SHARED] = {
+ .name = "shared",
+ .help = "apply shared action by id",
+ .priv = PRIV_ACTION(SHARED, 0),
+ .next = NEXT(NEXT_ENTRY(SHARED_ACTION_ID2PTR)),
+ .args = ARGS(ARGS_ENTRY_ARB(0, sizeof(uint32_t))),
+ .call = parse_vc,
+ },
+ [SHARED_ACTION_ID2PTR] = {
+ .name = "{action_id}",
+ .type = "SHARED_ACTION_ID",
+ .help = "shared action id",
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_sa_id2ptr,
+ .comp = comp_none,
+ },
+ [SHARED_ACTION_INGRESS] = {
+ .name = "ingress",
+ .help = "affect rule to ingress",
+ .next = NEXT(next_sa_create_attr),
+ .call = parse_sa,
+ },
+ [SHARED_ACTION_EGRESS] = {
+ .name = "egress",
+ .help = "affect rule to egress",
+ .next = NEXT(next_sa_create_attr),
+ .call = parse_sa,
+ },
+ [SHARED_ACTION_TRANSFER] = {
+ .name = "transfer",
+ .help = "affect rule to transfer",
+ .next = NEXT(next_sa_create_attr),
+ .call = parse_sa,
+ },
+ [SHARED_ACTION_SPEC] = {
+ .name = "action",
+ .help = "specify action to share",
+ .next = NEXT(next_action),
+ },
};
/** Remove and return last entry from argument stack. */
return len;
}
+/** Parse tokens for shared action commands. */
+static int
+parse_sa(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+
+ /* Token name must match. */
+ if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+ return -1;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return len;
+ if (!out->command) {
+ if (ctx->curr != SHARED_ACTION)
+ return -1;
+ if (sizeof(*out) > size)
+ return -1;
+ out->command = ctx->curr;
+ ctx->objdata = 0;
+ ctx->object = out;
+ ctx->objmask = NULL;
+ out->args.vc.data = (uint8_t *)out + size;
+ return len;
+ }
+ switch (ctx->curr) {
+ case SHARED_ACTION_CREATE:
+ case SHARED_ACTION_UPDATE:
+ out->args.vc.actions =
+ (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
+ sizeof(double));
+ out->args.vc.attr.group = UINT32_MAX;
+ /* fallthrough */
+ case SHARED_ACTION_QUERY:
+ out->command = ctx->curr;
+ ctx->objdata = 0;
+ ctx->object = out;
+ ctx->objmask = NULL;
+ return len;
+ case SHARED_ACTION_EGRESS:
+ out->args.vc.attr.egress = 1;
+ return len;
+ case SHARED_ACTION_INGRESS:
+ out->args.vc.attr.ingress = 1;
+ return len;
+ case SHARED_ACTION_TRANSFER:
+ out->args.vc.attr.transfer = 1;
+ return len;
+ default:
+ return -1;
+ }
+}
+
+
+/** Parse tokens for shared action destroy command. */
+static int
+parse_sa_destroy(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ uint32_t *action_id;
+
+ /* Token name must match. */
+ if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+ return -1;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return len;
+ if (!out->command || out->command == SHARED_ACTION) {
+ if (ctx->curr != SHARED_ACTION_DESTROY)
+ return -1;
+ if (sizeof(*out) > size)
+ return -1;
+ out->command = ctx->curr;
+ ctx->objdata = 0;
+ ctx->object = out;
+ ctx->objmask = NULL;
+ out->args.sa_destroy.action_id =
+ (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
+ sizeof(double));
+ return len;
+ }
+ action_id = out->args.sa_destroy.action_id
+ + out->args.sa_destroy.action_id_n++;
+ if ((uint8_t *)action_id > (uint8_t *)out + size)
+ return -1;
+ ctx->objdata = 0;
+ ctx->object = action_id;
+ ctx->objmask = NULL;
+ return len;
+}
+
/** Parse tokens for validate/create commands. */
static int
parse_vc(struct context *ctx, const struct token *token,
return len;
}
ctx->objdata = 0;
- ctx->object = &out->args.vc.attr;
+ switch (ctx->curr) {
+ default:
+ ctx->object = &out->args.vc.attr;
+ break;
+ case TUNNEL_SET:
+ case TUNNEL_MATCH:
+ ctx->object = &out->args.vc.tunnel_ops;
+ break;
+ }
ctx->objmask = NULL;
switch (ctx->curr) {
case GROUP:
case PRIORITY:
return len;
+ case TUNNEL_SET:
+ out->args.vc.tunnel_ops.enabled = 1;
+ out->args.vc.tunnel_ops.actions = 1;
+ return len;
+ case TUNNEL_MATCH:
+ out->args.vc.tunnel_ops.enabled = 1;
+ out->args.vc.tunnel_ops.items = 1;
+ return len;
case INGRESS:
out->args.vc.attr.ingress = 1;
return len;
.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
.level = 0,
.types = rss_hf,
- .key_len = sizeof(action_rss_data->key),
+ .key_len = 0,
.queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
- .key = action_rss_data->key,
+ .key = NULL,
.queue = action_rss_data->queue,
},
- .key = "testpmd's default RSS hash key, "
- "override it for better balancing",
.queue = { 0 },
};
for (i = 0; i < action_rss_data->conf.queue_num; ++i)
action_rss_data->queue[i] = i;
- if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
- ctx->port != (portid_t)RTE_PORT_ALL) {
- struct rte_eth_dev_info info;
- int ret2;
-
- ret2 = rte_eth_dev_info_get(ctx->port, &info);
- if (ret2 != 0)
- return ret2;
-
- action_rss_data->conf.key_len =
- RTE_MIN(sizeof(action_rss_data->key),
- info.hash_key_size);
- }
action->conf = &action_rss_data->conf;
return ret;
}
return len;
}
+static int
+parse_tunnel(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+
+ /* Token name must match. */
+ if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+ return -1;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return len;
+ if (!out->command) {
+ if (ctx->curr != TUNNEL)
+ return -1;
+ if (sizeof(*out) > size)
+ return -1;
+ out->command = ctx->curr;
+ ctx->objdata = 0;
+ ctx->object = out;
+ ctx->objmask = NULL;
+ } else {
+ switch (ctx->curr) {
+ default:
+ break;
+ case TUNNEL_CREATE:
+ case TUNNEL_DESTROY:
+ case TUNNEL_LIST:
+ out->command = ctx->curr;
+ break;
+ case TUNNEL_CREATE_TYPE:
+ case TUNNEL_DESTROY_ID:
+ ctx->object = &out->args.vc.tunnel_ops;
+ break;
+ }
+ }
+
+ return len;
+}
+
/**
* Parse signed/unsigned integers 8 to 64-bit long.
*
return ret;
}
+static int
+parse_sa_id2ptr(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct rte_flow_action *action = ctx->object;
+ uint32_t id;
+ int ret;
+
+ (void)buf;
+ (void)size;
+ ctx->objdata = 0;
+ ctx->object = &id;
+ ctx->objmask = NULL;
+ ret = parse_int(ctx, token, str, len, ctx->object, sizeof(id));
+ ctx->object = action;
+ if (ret != (int)len)
+ return ret;
+ /* set shared action */
+ if (action) {
+ action->conf = port_shared_action_get_by_id(ctx->port, id);
+ ret = (action->conf) ? ret : -1;
+ }
+ return ret;
+}
+
/** Parse set command, initialize output buffer for subsequent tokens. */
static int
parse_set_raw_encap_decap(struct context *ctx, const struct token *token,
cmd_flow_parsed(const struct buffer *in)
{
switch (in->command) {
+ case SHARED_ACTION_CREATE:
+ port_shared_action_create(
+ in->port, in->args.vc.attr.group,
+ &((const struct rte_flow_shared_action_conf) {
+ .ingress = in->args.vc.attr.ingress,
+ .egress = in->args.vc.attr.egress,
+ .transfer = in->args.vc.attr.transfer,
+ }),
+ in->args.vc.actions);
+ break;
+ case SHARED_ACTION_DESTROY:
+ port_shared_action_destroy(in->port,
+ in->args.sa_destroy.action_id_n,
+ in->args.sa_destroy.action_id);
+ break;
+ case SHARED_ACTION_UPDATE:
+ port_shared_action_update(in->port, in->args.vc.attr.group,
+ in->args.vc.actions);
+ break;
+ case SHARED_ACTION_QUERY:
+ port_shared_action_query(in->port, in->args.sa.action_id);
+ break;
case VALIDATE:
port_flow_validate(in->port, &in->args.vc.attr,
- in->args.vc.pattern, in->args.vc.actions);
+ in->args.vc.pattern, in->args.vc.actions,
+ &in->args.vc.tunnel_ops);
break;
case CREATE:
port_flow_create(in->port, &in->args.vc.attr,
- in->args.vc.pattern, in->args.vc.actions);
+ in->args.vc.pattern, in->args.vc.actions,
+ &in->args.vc.tunnel_ops);
break;
case DESTROY:
port_flow_destroy(in->port, in->args.destroy.rule_n,
case AGED:
port_flow_aged(in->port, in->args.aged.destroy);
break;
+ case TUNNEL_CREATE:
+ port_flow_tunnel_create(in->port, &in->args.vc.tunnel_ops);
+ break;
+ case TUNNEL_DESTROY:
+ port_flow_tunnel_destroy(in->port, in->args.vc.tunnel_ops.id);
+ break;
+ case TUNNEL_LIST:
+ port_flow_tunnel_list(in->port);
+ break;
default:
break;
}
static void
update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto)
{
- struct rte_flow_item_ipv4 *ipv4;
- struct rte_flow_item_eth *eth;
- struct rte_flow_item_ipv6 *ipv6;
- struct rte_flow_item_vxlan *vxlan;
- struct rte_flow_item_vxlan_gpe *gpe;
+ struct rte_ipv4_hdr *ipv4;
+ struct rte_ether_hdr *eth;
+ struct rte_ipv6_hdr *ipv6;
+ struct rte_vxlan_hdr *vxlan;
+ struct rte_vxlan_gpe_hdr *gpe;
struct rte_flow_item_nvgre *nvgre;
uint32_t ipv6_vtc_flow;
switch (item->type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- eth = (struct rte_flow_item_eth *)buf;
+ eth = (struct rte_ether_hdr *)buf;
if (next_proto)
- eth->type = rte_cpu_to_be_16(next_proto);
+ eth->ether_type = rte_cpu_to_be_16(next_proto);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
- ipv4 = (struct rte_flow_item_ipv4 *)buf;
- ipv4->hdr.version_ihl = 0x45;
- if (next_proto && ipv4->hdr.next_proto_id == 0)
- ipv4->hdr.next_proto_id = (uint8_t)next_proto;
+ ipv4 = (struct rte_ipv4_hdr *)buf;
+ ipv4->version_ihl = 0x45;
+ if (next_proto && ipv4->next_proto_id == 0)
+ ipv4->next_proto_id = (uint8_t)next_proto;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- ipv6 = (struct rte_flow_item_ipv6 *)buf;
- if (next_proto && ipv6->hdr.proto == 0)
- ipv6->hdr.proto = (uint8_t)next_proto;
- ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->hdr.vtc_flow);
+ ipv6 = (struct rte_ipv6_hdr *)buf;
+ if (next_proto && ipv6->proto == 0)
+ ipv6->proto = (uint8_t)next_proto;
+ ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->vtc_flow);
ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */
ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */
- ipv6->hdr.vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
+ ipv6->vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
- vxlan = (struct rte_flow_item_vxlan *)buf;
- vxlan->flags = 0x08;
+ vxlan = (struct rte_vxlan_hdr *)buf;
+ vxlan->vx_flags = 0x08;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- gpe = (struct rte_flow_item_vxlan_gpe *)buf;
- gpe->flags = 0x0C;
+ gpe = (struct rte_vxlan_gpe_hdr *)buf;
+ gpe->vx_flags = 0x0C;
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
nvgre = (struct rte_flow_item_nvgre *)buf;
uint16_t upper_layer = 0;
uint16_t proto = 0;
uint16_t idx = in->port; /* We borrow port field as index */
+ int gtp_psc = -1; /* GTP PSC option index. */
if (in->command == SET_SAMPLE_ACTIONS)
return cmd_set_raw_parsed_sample(in);
/* process hdr from upper layer to low layer (L3/L4 -> L2). */
data_tail = data + ACTION_RAW_ENCAP_MAX_DATA;
for (i = n - 1 ; i >= 0; --i) {
+ const struct rte_flow_item_gtp *gtp;
+
item = in->args.vc.pattern + i;
if (item->spec == NULL)
item->spec = flow_item_default_mask(item);
switch (item->type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- size = sizeof(struct rte_flow_item_eth);
+ size = sizeof(struct rte_ether_hdr);
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- size = sizeof(struct rte_flow_item_vlan);
+ size = sizeof(struct rte_vlan_hdr);
proto = RTE_ETHER_TYPE_VLAN;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
- size = sizeof(struct rte_flow_item_ipv4);
+ size = sizeof(struct rte_ipv4_hdr);
proto = RTE_ETHER_TYPE_IPV4;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- size = sizeof(struct rte_flow_item_ipv6);
+ size = sizeof(struct rte_ipv6_hdr);
proto = RTE_ETHER_TYPE_IPV6;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
- size = sizeof(struct rte_flow_item_udp);
+ size = sizeof(struct rte_udp_hdr);
proto = 0x11;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
- size = sizeof(struct rte_flow_item_tcp);
+ size = sizeof(struct rte_tcp_hdr);
proto = 0x06;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
- size = sizeof(struct rte_flow_item_vxlan);
+ size = sizeof(struct rte_vxlan_hdr);
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- size = sizeof(struct rte_flow_item_vxlan_gpe);
+ size = sizeof(struct rte_vxlan_gpe_hdr);
break;
case RTE_FLOW_ITEM_TYPE_GRE:
- size = sizeof(struct rte_flow_item_gre);
+ size = sizeof(struct rte_gre_hdr);
proto = 0x2F;
break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
proto = 0x0;
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
- size = sizeof(struct rte_flow_item_mpls);
+ size = sizeof(struct rte_mpls_hdr);
proto = 0x0;
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
proto = 0x2F;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE:
- size = sizeof(struct rte_flow_item_geneve);
+ size = sizeof(struct rte_geneve_hdr);
break;
case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
- size = sizeof(struct rte_flow_item_l2tpv3oip);
+ size = sizeof(rte_be32_t);
proto = 0x73;
break;
case RTE_FLOW_ITEM_TYPE_ESP:
- size = sizeof(struct rte_flow_item_esp);
+ size = sizeof(struct rte_esp_hdr);
proto = 0x32;
break;
case RTE_FLOW_ITEM_TYPE_AH:
proto = 0x33;
break;
case RTE_FLOW_ITEM_TYPE_GTP:
- size = sizeof(struct rte_flow_item_gtp);
+ if (gtp_psc < 0) {
+ size = sizeof(struct rte_gtp_hdr);
+ break;
+ }
+ if (gtp_psc != i + 1) {
+ printf("Error - GTP PSC does not follow GTP\n");
+ goto error;
+ }
+ gtp = item->spec;
+ if ((gtp->v_pt_rsv_flags & 0x07) != 0x04) {
+ /* Only E flag should be set. */
+ printf("Error - GTP unsupported flags\n");
+ goto error;
+ } else {
+ struct rte_gtp_hdr_ext_word ext_word = {
+ .next_ext = 0x85
+ };
+
+ /* We have to add GTP header extra word. */
+ *total_size += sizeof(ext_word);
+ rte_memcpy(data_tail - (*total_size),
+ &ext_word, sizeof(ext_word));
+ }
+ size = sizeof(struct rte_gtp_hdr);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GTP_PSC:
+ if (gtp_psc >= 0) {
+ printf("Error - Multiple GTP PSC items\n");
+ goto error;
+ } else {
+ const struct rte_flow_item_gtp_psc
+ *opt = item->spec;
+ struct {
+ uint8_t len;
+ uint8_t pdu_type;
+ uint8_t qfi;
+ uint8_t next;
+ } psc;
+
+ if (opt->pdu_type & 0x0F) {
+ /* Support the minimal option only. */
+ printf("Error - GTP PSC option with "
+ "extra fields not supported\n");
+ goto error;
+ }
+ psc.len = sizeof(psc);
+ psc.pdu_type = opt->pdu_type;
+ psc.qfi = opt->qfi;
+ psc.next = 0;
+ *total_size += sizeof(psc);
+ rte_memcpy(data_tail - (*total_size),
+ &psc, sizeof(psc));
+ gtp_psc = i;
+ size = 0;
+ }
break;
case RTE_FLOW_ITEM_TYPE_PFCP:
size = sizeof(struct rte_flow_item_pfcp);
break;
default:
printf("Error - Not supported item\n");
- *total_size = 0;
- memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
- return;
+ goto error;
}
*total_size += size;
rte_memcpy(data_tail - (*total_size), item->spec, size);
printf("total data size is %zu\n", (*total_size));
RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA);
memmove(data, (data_tail - (*total_size)), *total_size);
+ return;
+
+error:
+ *total_size = 0;
+ memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
}
/** Populate help strings for current token (cmdline API). */
cmd_what, "raw_encap#raw_decap");
cmdline_parse_token_num_t cmd_show_set_raw_cmd_index =
TOKEN_NUM_INITIALIZER(struct cmd_show_set_raw_result,
- cmd_index, UINT16);
+ cmd_index, RTE_UINT16);
cmdline_parse_token_string_t cmd_show_set_raw_cmd_all =
TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
cmd_all, "all");