ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
ITEM_ICMP6_ND_OPT_TLA_ETH,
ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
+ ITEM_META,
+ ITEM_META_DATA,
/* Validate/create actions. */
ACTIONS,
ACTION_VXLAN_DECAP,
ACTION_NVGRE_ENCAP,
ACTION_NVGRE_DECAP,
+ ACTION_L2_ENCAP,
+ ACTION_L2_DECAP,
+ ACTION_MPLSOGRE_ENCAP,
+ ACTION_MPLSOGRE_DECAP,
+ ACTION_MPLSOUDP_ENCAP,
+ ACTION_MPLSOUDP_DECAP,
ACTION_SET_IPV4_SRC,
ACTION_SET_IPV4_SRC_IPV4_SRC,
ACTION_SET_IPV4_DST,
struct rte_flow_item_nvgre item_nvgre;
};
+/** Maximum data size in struct rte_flow_action_raw_encap. */
+#define ACTION_RAW_ENCAP_MAX_DATA 128
+
+/** Storage for struct rte_flow_action_raw_encap including external data. */
+struct action_raw_encap_data {
+ struct rte_flow_action_raw_encap conf;
+ uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
+ uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
+};
+
+/** Storage for struct rte_flow_action_raw_decap including external data. */
+struct action_raw_decap_data {
+ struct rte_flow_action_raw_decap conf;
+ uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
+};
+
/** Maximum number of subsequent tokens and arguments on the stack. */
#define CTX_STACK_SIZE 16
ITEM_ICMP6_ND_OPT,
ITEM_ICMP6_ND_OPT_SLA_ETH,
ITEM_ICMP6_ND_OPT_TLA_ETH,
+ ITEM_META,
ZERO,
};
ZERO,
};
+static const enum index item_meta[] = {
+ ITEM_META_DATA,
+ ITEM_NEXT,
+ ZERO,
+};
+
static const enum index next_action[] = {
ACTION_END,
ACTION_VOID,
ACTION_VXLAN_DECAP,
ACTION_NVGRE_ENCAP,
ACTION_NVGRE_DECAP,
+ ACTION_L2_ENCAP,
+ ACTION_L2_DECAP,
+ ACTION_MPLSOGRE_ENCAP,
+ ACTION_MPLSOGRE_DECAP,
+ ACTION_MPLSOUDP_ENCAP,
+ ACTION_MPLSOUDP_DECAP,
ACTION_SET_IPV4_SRC,
ACTION_SET_IPV4_DST,
ACTION_SET_IPV6_SRC,
static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
const char *, unsigned int, void *,
unsigned int);
+static int parse_vc_action_l2_encap(struct context *, const struct token *,
+ const char *, unsigned int, void *,
+ unsigned int);
+static int parse_vc_action_l2_decap(struct context *, const struct token *,
+ const char *, unsigned int, void *,
+ unsigned int);
+static int parse_vc_action_mplsogre_encap(struct context *,
+ const struct token *, const char *,
+ unsigned int, void *, unsigned int);
+static int parse_vc_action_mplsogre_decap(struct context *,
+ const struct token *, const char *,
+ unsigned int, void *, unsigned int);
+static int parse_vc_action_mplsoudp_encap(struct context *,
+ const struct token *, const char *,
+ unsigned int, void *, unsigned int);
+static int parse_vc_action_mplsoudp_decap(struct context *,
+ const struct token *, const char *,
+ unsigned int, void *, unsigned int);
static int parse_destroy(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
.args = ARGS(ARGS_ENTRY_HTON
(struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
},
+ [ITEM_META] = {
+ .name = "meta",
+ .help = "match metadata header",
+ .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
+ .next = NEXT(item_meta),
+ .call = parse_vc,
+ },
+ [ITEM_META_DATA] = {
+ .name = "data",
+ .help = "metadata value",
+ .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_meta,
+ data, "\xff\xff\xff\xff")),
+ },
/* Validate/create actions. */
[ACTIONS] = {
.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
.call = parse_vc,
},
+ [ACTION_L2_ENCAP] = {
+ .name = "l2_encap",
+ .help = "l2 encap, uses configuration set by"
+ " \"set l2_encap\"",
+ .priv = PRIV_ACTION(RAW_ENCAP,
+ sizeof(struct action_raw_encap_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_l2_encap,
+ },
+ [ACTION_L2_DECAP] = {
+ .name = "l2_decap",
+ .help = "l2 decap, uses configuration set by"
+ " \"set l2_decap\"",
+ .priv = PRIV_ACTION(RAW_DECAP,
+ sizeof(struct action_raw_decap_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_l2_decap,
+ },
+ [ACTION_MPLSOGRE_ENCAP] = {
+ .name = "mplsogre_encap",
+ .help = "mplsogre encapsulation, uses configuration set by"
+ " \"set mplsogre_encap\"",
+ .priv = PRIV_ACTION(RAW_ENCAP,
+ sizeof(struct action_raw_encap_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_mplsogre_encap,
+ },
+ [ACTION_MPLSOGRE_DECAP] = {
+ .name = "mplsogre_decap",
+ .help = "mplsogre decapsulation, uses configuration set by"
+ " \"set mplsogre_decap\"",
+ .priv = PRIV_ACTION(RAW_DECAP,
+ sizeof(struct action_raw_decap_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_mplsogre_decap,
+ },
+ [ACTION_MPLSOUDP_ENCAP] = {
+ .name = "mplsoudp_encap",
+ .help = "mplsoudp encapsulation, uses configuration set by"
+ " \"set mplsoudp_encap\"",
+ .priv = PRIV_ACTION(RAW_ENCAP,
+ sizeof(struct action_raw_encap_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_mplsoudp_encap,
+ },
+ [ACTION_MPLSOUDP_DECAP] = {
+ .name = "mplsoudp_decap",
+ .help = "mplsoudp decapsulation, uses configuration set by"
+ " \"set mplsoudp_decap\"",
+ .priv = PRIV_ACTION(RAW_DECAP,
+ sizeof(struct action_raw_decap_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_mplsoudp_decap,
+ },
[ACTION_SET_IPV4_SRC] = {
.name = "set_ipv4_src",
.help = "Set a new IPv4 source address in the outermost"
if (!vxlan_encap_conf.select_vlan)
action_vxlan_encap_data->items[1].type =
RTE_FLOW_ITEM_TYPE_VOID;
+ if (vxlan_encap_conf.select_tos_ttl) {
+ if (vxlan_encap_conf.select_ipv4) {
+ static struct rte_flow_item_ipv4 ipv4_mask_tos;
+
+ memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
+ sizeof(ipv4_mask_tos));
+ ipv4_mask_tos.hdr.type_of_service = 0xff;
+ ipv4_mask_tos.hdr.time_to_live = 0xff;
+ action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
+ vxlan_encap_conf.ip_tos;
+ action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
+ vxlan_encap_conf.ip_ttl;
+ action_vxlan_encap_data->items[2].mask =
+ &ipv4_mask_tos;
+ } else {
+ static struct rte_flow_item_ipv6 ipv6_mask_tos;
+
+ memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
+ sizeof(ipv6_mask_tos));
+ ipv6_mask_tos.hdr.vtc_flow |=
+ RTE_BE32(0xfful << IPV6_HDR_TC_SHIFT);
+ ipv6_mask_tos.hdr.hop_limits = 0xff;
+ action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
+ rte_cpu_to_be_32
+ ((uint32_t)vxlan_encap_conf.ip_tos <<
+ IPV6_HDR_TC_SHIFT);
+ action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
+ vxlan_encap_conf.ip_ttl;
+ action_vxlan_encap_data->items[2].mask =
+ &ipv6_mask_tos;
+ }
+ }
memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
RTE_DIM(vxlan_encap_conf.vni));
action->conf = &action_vxlan_encap_data->conf;
return ret;
}
+/** Parse l2 encap action. */
+static int
+parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_raw_encap_data *action_encap_data;
+ struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_vlan vlan = {
+ .tci = mplsoudp_encap_conf.vlan_tci,
+ .inner_type = 0,
+ };
+ uint8_t *header;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Copy the headers to the buffer. */
+ action_encap_data = ctx->object;
+ *action_encap_data = (struct action_raw_encap_data) {
+ .conf = (struct rte_flow_action_raw_encap){
+ .data = action_encap_data->data,
+ },
+ .data = {},
+ };
+ header = action_encap_data->data;
+ if (l2_encap_conf.select_vlan)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ else if (l2_encap_conf.select_ipv4)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(eth.dst.addr_bytes,
+ l2_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ memcpy(eth.src.addr_bytes,
+ l2_encap_conf.eth_src, ETHER_ADDR_LEN);
+ memcpy(header, ð, sizeof(eth));
+ header += sizeof(eth);
+ if (l2_encap_conf.select_vlan) {
+ if (l2_encap_conf.select_ipv4)
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(header, &vlan, sizeof(vlan));
+ header += sizeof(vlan);
+ }
+ action_encap_data->conf.size = header -
+ action_encap_data->data;
+ action->conf = &action_encap_data->conf;
+ return ret;
+}
+
+/** Parse l2 decap action. */
+static int
+parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_raw_decap_data *action_decap_data;
+ struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_vlan vlan = {
+ .tci = mplsoudp_encap_conf.vlan_tci,
+ .inner_type = 0,
+ };
+ uint8_t *header;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Copy the headers to the buffer. */
+ action_decap_data = ctx->object;
+ *action_decap_data = (struct action_raw_decap_data) {
+ .conf = (struct rte_flow_action_raw_decap){
+ .data = action_decap_data->data,
+ },
+ .data = {},
+ };
+ header = action_decap_data->data;
+ if (l2_decap_conf.select_vlan)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ memcpy(header, ð, sizeof(eth));
+ header += sizeof(eth);
+ if (l2_decap_conf.select_vlan) {
+ memcpy(header, &vlan, sizeof(vlan));
+ header += sizeof(vlan);
+ }
+ action_decap_data->conf.size = header -
+ action_decap_data->data;
+ action->conf = &action_decap_data->conf;
+ return ret;
+}
+
+#define ETHER_TYPE_MPLS_UNICAST 0x8847
+
+/** Parse MPLSOGRE encap action. */
+static int
+parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_raw_encap_data *action_encap_data;
+ struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_vlan vlan = {
+ .tci = mplsogre_encap_conf.vlan_tci,
+ .inner_type = 0,
+ };
+ struct rte_flow_item_ipv4 ipv4 = {
+ .hdr = {
+ .src_addr = mplsogre_encap_conf.ipv4_src,
+ .dst_addr = mplsogre_encap_conf.ipv4_dst,
+ .next_proto_id = IPPROTO_GRE,
+ },
+ };
+ struct rte_flow_item_ipv6 ipv6 = {
+ .hdr = {
+ .proto = IPPROTO_GRE,
+ },
+ };
+ struct rte_flow_item_gre gre = {
+ .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
+ };
+ struct rte_flow_item_mpls mpls;
+ uint8_t *header;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Copy the headers to the buffer. */
+ action_encap_data = ctx->object;
+ *action_encap_data = (struct action_raw_encap_data) {
+ .conf = (struct rte_flow_action_raw_encap){
+ .data = action_encap_data->data,
+ },
+ .data = {},
+ .preserve = {},
+ };
+ header = action_encap_data->data;
+ if (mplsogre_encap_conf.select_vlan)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ else if (mplsogre_encap_conf.select_ipv4)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(eth.dst.addr_bytes,
+ mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ memcpy(eth.src.addr_bytes,
+ mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);
+ memcpy(header, ð, sizeof(eth));
+ header += sizeof(eth);
+ if (mplsogre_encap_conf.select_vlan) {
+ if (mplsogre_encap_conf.select_ipv4)
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(header, &vlan, sizeof(vlan));
+ header += sizeof(vlan);
+ }
+ if (mplsogre_encap_conf.select_ipv4) {
+ memcpy(header, &ipv4, sizeof(ipv4));
+ header += sizeof(ipv4);
+ } else {
+ memcpy(&ipv6.hdr.src_addr,
+ &mplsogre_encap_conf.ipv6_src,
+ sizeof(mplsogre_encap_conf.ipv6_src));
+ memcpy(&ipv6.hdr.dst_addr,
+ &mplsogre_encap_conf.ipv6_dst,
+ sizeof(mplsogre_encap_conf.ipv6_dst));
+ memcpy(header, &ipv6, sizeof(ipv6));
+ header += sizeof(ipv6);
+ }
+ memcpy(header, &gre, sizeof(gre));
+ header += sizeof(gre);
+ memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
+ RTE_DIM(mplsogre_encap_conf.label));
+ mpls.label_tc_s[2] |= 0x1;
+ memcpy(header, &mpls, sizeof(mpls));
+ header += sizeof(mpls);
+ action_encap_data->conf.size = header -
+ action_encap_data->data;
+ action->conf = &action_encap_data->conf;
+ return ret;
+}
+
+/** Parse MPLSOGRE decap action. */
+static int
+parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_raw_decap_data *action_decap_data;
+ struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_vlan vlan = {.tci = 0};
+ struct rte_flow_item_ipv4 ipv4 = {
+ .hdr = {
+ .next_proto_id = IPPROTO_GRE,
+ },
+ };
+ struct rte_flow_item_ipv6 ipv6 = {
+ .hdr = {
+ .proto = IPPROTO_GRE,
+ },
+ };
+ struct rte_flow_item_gre gre = {
+ .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
+ };
+ struct rte_flow_item_mpls mpls;
+ uint8_t *header;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Copy the headers to the buffer. */
+ action_decap_data = ctx->object;
+ *action_decap_data = (struct action_raw_decap_data) {
+ .conf = (struct rte_flow_action_raw_decap){
+ .data = action_decap_data->data,
+ },
+ .data = {},
+ };
+ header = action_decap_data->data;
+ if (mplsogre_decap_conf.select_vlan)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ else if (mplsogre_encap_conf.select_ipv4)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(eth.dst.addr_bytes,
+ mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ memcpy(eth.src.addr_bytes,
+ mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);
+ memcpy(header, ð, sizeof(eth));
+ header += sizeof(eth);
+ if (mplsogre_encap_conf.select_vlan) {
+ if (mplsogre_encap_conf.select_ipv4)
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(header, &vlan, sizeof(vlan));
+ header += sizeof(vlan);
+ }
+ if (mplsogre_encap_conf.select_ipv4) {
+ memcpy(header, &ipv4, sizeof(ipv4));
+ header += sizeof(ipv4);
+ } else {
+ memcpy(header, &ipv6, sizeof(ipv6));
+ header += sizeof(ipv6);
+ }
+ memcpy(header, &gre, sizeof(gre));
+ header += sizeof(gre);
+ memset(&mpls, 0, sizeof(mpls));
+ memcpy(header, &mpls, sizeof(mpls));
+ header += sizeof(mpls);
+ action_decap_data->conf.size = header -
+ action_decap_data->data;
+ action->conf = &action_decap_data->conf;
+ return ret;
+}
+
+/** Parse MPLSOUDP encap action. */
+static int
+parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_raw_encap_data *action_encap_data;
+ struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_vlan vlan = {
+ .tci = mplsoudp_encap_conf.vlan_tci,
+ .inner_type = 0,
+ };
+ struct rte_flow_item_ipv4 ipv4 = {
+ .hdr = {
+ .src_addr = mplsoudp_encap_conf.ipv4_src,
+ .dst_addr = mplsoudp_encap_conf.ipv4_dst,
+ .next_proto_id = IPPROTO_UDP,
+ },
+ };
+ struct rte_flow_item_ipv6 ipv6 = {
+ .hdr = {
+ .proto = IPPROTO_UDP,
+ },
+ };
+ struct rte_flow_item_udp udp = {
+ .hdr = {
+ .src_port = mplsoudp_encap_conf.udp_src,
+ .dst_port = mplsoudp_encap_conf.udp_dst,
+ },
+ };
+ struct rte_flow_item_mpls mpls;
+ uint8_t *header;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Copy the headers to the buffer. */
+ action_encap_data = ctx->object;
+ *action_encap_data = (struct action_raw_encap_data) {
+ .conf = (struct rte_flow_action_raw_encap){
+ .data = action_encap_data->data,
+ },
+ .data = {},
+ .preserve = {},
+ };
+ header = action_encap_data->data;
+ if (mplsoudp_encap_conf.select_vlan)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ else if (mplsoudp_encap_conf.select_ipv4)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(eth.dst.addr_bytes,
+ mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ memcpy(eth.src.addr_bytes,
+ mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN);
+ memcpy(header, ð, sizeof(eth));
+ header += sizeof(eth);
+ if (mplsoudp_encap_conf.select_vlan) {
+ if (mplsoudp_encap_conf.select_ipv4)
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(header, &vlan, sizeof(vlan));
+ header += sizeof(vlan);
+ }
+ if (mplsoudp_encap_conf.select_ipv4) {
+ memcpy(header, &ipv4, sizeof(ipv4));
+ header += sizeof(ipv4);
+ } else {
+ memcpy(&ipv6.hdr.src_addr,
+ &mplsoudp_encap_conf.ipv6_src,
+ sizeof(mplsoudp_encap_conf.ipv6_src));
+ memcpy(&ipv6.hdr.dst_addr,
+ &mplsoudp_encap_conf.ipv6_dst,
+ sizeof(mplsoudp_encap_conf.ipv6_dst));
+ memcpy(header, &ipv6, sizeof(ipv6));
+ header += sizeof(ipv6);
+ }
+ memcpy(header, &udp, sizeof(udp));
+ header += sizeof(udp);
+ memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
+ RTE_DIM(mplsoudp_encap_conf.label));
+ mpls.label_tc_s[2] |= 0x1;
+ memcpy(header, &mpls, sizeof(mpls));
+ header += sizeof(mpls);
+ action_encap_data->conf.size = header -
+ action_encap_data->data;
+ action->conf = &action_encap_data->conf;
+ return ret;
+}
+
+/** Parse MPLSOUDP decap action. */
+static int
+parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_raw_decap_data *action_decap_data;
+ struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_vlan vlan = {.tci = 0};
+ struct rte_flow_item_ipv4 ipv4 = {
+ .hdr = {
+ .next_proto_id = IPPROTO_UDP,
+ },
+ };
+ struct rte_flow_item_ipv6 ipv6 = {
+ .hdr = {
+ .proto = IPPROTO_UDP,
+ },
+ };
+ struct rte_flow_item_udp udp = {
+ .hdr = {
+ .dst_port = rte_cpu_to_be_16(6635),
+ },
+ };
+ struct rte_flow_item_mpls mpls;
+ uint8_t *header;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Copy the headers to the buffer. */
+ action_decap_data = ctx->object;
+ *action_decap_data = (struct action_raw_decap_data) {
+ .conf = (struct rte_flow_action_raw_decap){
+ .data = action_decap_data->data,
+ },
+ .data = {},
+ };
+ header = action_decap_data->data;
+ if (mplsoudp_decap_conf.select_vlan)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ else if (mplsoudp_encap_conf.select_ipv4)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(eth.dst.addr_bytes,
+ mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ memcpy(eth.src.addr_bytes,
+ mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN);
+ memcpy(header, ð, sizeof(eth));
+ header += sizeof(eth);
+ if (mplsoudp_encap_conf.select_vlan) {
+ if (mplsoudp_encap_conf.select_ipv4)
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(header, &vlan, sizeof(vlan));
+ header += sizeof(vlan);
+ }
+ if (mplsoudp_encap_conf.select_ipv4) {
+ memcpy(header, &ipv4, sizeof(ipv4));
+ header += sizeof(ipv4);
+ } else {
+ memcpy(header, &ipv6, sizeof(ipv6));
+ header += sizeof(ipv6);
+ }
+ memcpy(header, &udp, sizeof(udp));
+ header += sizeof(udp);
+ memset(&mpls, 0, sizeof(mpls));
+ memcpy(header, &mpls, sizeof(mpls));
+ header += sizeof(mpls);
+ action_decap_data->conf.size = header -
+ action_decap_data->data;
+ action->conf = &action_decap_data->conf;
+ return ret;
+}
+
/** Parse tokens for destroy command. */
static int
parse_destroy(struct context *ctx, const struct token *token,
}
buf = (uint8_t *)ctx->object + arg->offset;
size = arg->size;
+ if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
+ return -1;
objmask:
switch (size) {
case sizeof(uint8_t):