X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pipeline%2Frte_table_action.c;h=1e85cc9cfb91501c5fccc19e6c90f865ffd4b928;hb=6d13ea8e8e49ab957deae2bba5ecf4a4bfe747d1;hp=791fc20f5e747b36156d7efaba6271af93edc846;hpb=c8ae949197ca64c35181d01ef2943006c8296d8b;p=dpdk.git diff --git a/lib/librte_pipeline/rte_table_action.c b/lib/librte_pipeline/rte_table_action.c index 791fc20f5e..1e85cc9cfb 100644 --- a/lib/librte_pipeline/rte_table_action.c +++ b/lib/librte_pipeline/rte_table_action.c @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2010-2018 Intel Corporation */ - #include #include @@ -9,8 +8,14 @@ #include #include #include +#include +#include #include - +#include +#include +#include +#include +#include #include "rte_table_action.h" @@ -40,6 +45,55 @@ fwd_apply(struct fwd_data *data, return 0; } +/** + * RTE_TABLE_ACTION_LB + */ +static int +lb_cfg_check(struct rte_table_action_lb_config *cfg) +{ + if ((cfg == NULL) || + (cfg->key_size < RTE_TABLE_ACTION_LB_KEY_SIZE_MIN) || + (cfg->key_size > RTE_TABLE_ACTION_LB_KEY_SIZE_MAX) || + (!rte_is_power_of_2(cfg->key_size)) || + (cfg->f_hash == NULL)) + return -1; + + return 0; +} + +struct lb_data { + uint32_t out[RTE_TABLE_ACTION_LB_TABLE_SIZE]; +} __attribute__((__packed__)); + +static int +lb_apply(struct lb_data *data, + struct rte_table_action_lb_params *p) +{ + memcpy(data->out, p->out, sizeof(data->out)); + + return 0; +} + +static __rte_always_inline void +pkt_work_lb(struct rte_mbuf *mbuf, + struct lb_data *data, + struct rte_table_action_lb_config *cfg) +{ + uint8_t *pkt_key = RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->key_offset); + uint32_t *out = RTE_MBUF_METADATA_UINT32_PTR(mbuf, cfg->out_offset); + uint64_t digest, pos; + uint32_t out_val; + + digest = cfg->f_hash(pkt_key, + cfg->key_mask, + cfg->key_size, + cfg->seed); + pos = digest & (RTE_TABLE_ACTION_LB_TABLE_SIZE - 1); + out_val = data->out[pos]; + + *out = out_val; +} + /** * RTE_TABLE_ACTION_MTR */ @@ -53,39 +107,31 @@ mtr_cfg_check(struct rte_table_action_mtr_config *mtr) return 0; } -#define MBUF_SCHED_QUEUE_TC_COLOR(queue, tc, color) \ - ((uint16_t)((((uint64_t)(queue)) & 0x3) | \ - ((((uint64_t)(tc)) & 0x3) << 2) | \ - ((((uint64_t)(color)) & 0x3) << 4))) - -#define MBUF_SCHED_COLOR(sched, color) \ - (((sched) & (~0x30LLU)) | ((color) << 4)) - struct mtr_trtcm_data { struct rte_meter_trtcm trtcm; - uint64_t stats[e_RTE_METER_COLORS]; + uint64_t stats[RTE_COLORS]; } __attribute__((__packed__)); #define MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data) \ - (((data)->stats[e_RTE_METER_GREEN] & 0xF8LLU) >> 3) + (((data)->stats[RTE_COLOR_GREEN] & 0xF8LLU) >> 3) static void mtr_trtcm_data_meter_profile_id_set(struct mtr_trtcm_data *data, uint32_t profile_id) { - data->stats[e_RTE_METER_GREEN] &= ~0xF8LLU; - data->stats[e_RTE_METER_GREEN] |= (profile_id % 32) << 3; + data->stats[RTE_COLOR_GREEN] &= ~0xF8LLU; + data->stats[RTE_COLOR_GREEN] |= (profile_id % 32) << 3; } #define MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color)\ (((data)->stats[(color)] & 4LLU) >> 2) #define MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color)\ - ((enum rte_meter_color)((data)->stats[(color)] & 3LLU)) + ((enum rte_color)((data)->stats[(color)] & 3LLU)) static void mtr_trtcm_data_policer_action_set(struct mtr_trtcm_data *data, - enum rte_meter_color color, + enum rte_color color, enum rte_table_action_policer action) { if (action == RTE_TABLE_ACTION_POLICER_DROP) { @@ -98,14 +144,14 @@ mtr_trtcm_data_policer_action_set(struct mtr_trtcm_data *data, static uint64_t mtr_trtcm_data_stats_get(struct mtr_trtcm_data *data, - enum rte_meter_color color) + enum rte_color color) { return data->stats[color] >> 8; } static void mtr_trtcm_data_stats_reset(struct mtr_trtcm_data *data, - enum rte_meter_color color) + enum rte_color color) { data->stats[color] &= 0xFFLU; } @@ -120,9 +166,9 @@ mtr_data_size(struct rte_table_action_mtr_config *mtr) } struct dscp_table_entry_data { - enum rte_meter_color color; + enum rte_color color; uint16_t tc; - uint16_t queue_tc_color; + uint16_t tc_queue; }; struct dscp_table_data { @@ -241,16 +287,16 @@ mtr_apply(struct mtr_trtcm_data *data, /* Policer actions */ mtr_trtcm_data_policer_action_set(data_tc, - e_RTE_METER_GREEN, - p_tc->policer[e_RTE_METER_GREEN]); + RTE_COLOR_GREEN, + p_tc->policer[RTE_COLOR_GREEN]); mtr_trtcm_data_policer_action_set(data_tc, - e_RTE_METER_YELLOW, - p_tc->policer[e_RTE_METER_YELLOW]); + RTE_COLOR_YELLOW, + p_tc->policer[RTE_COLOR_YELLOW]); mtr_trtcm_data_policer_action_set(data_tc, - e_RTE_METER_RED, - p_tc->policer[e_RTE_METER_RED]); + RTE_COLOR_RED, + p_tc->policer[RTE_COLOR_RED]); } return 0; @@ -265,17 +311,15 @@ pkt_work_mtr(struct rte_mbuf *mbuf, uint32_t dscp, uint16_t total_length) { - uint64_t drop_mask, sched; - uint64_t *sched_ptr = (uint64_t *) &mbuf->hash.sched; + uint64_t drop_mask; struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp]; - enum rte_meter_color color_in, color_meter, color_policer; + enum rte_color color_in, color_meter, color_policer; uint32_t tc, mp_id; tc = dscp_entry->tc; color_in = dscp_entry->color; data += tc; mp_id = MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data); - sched = *sched_ptr; /* Meter */ color_meter = rte_meter_trtcm_color_aware_check( @@ -292,77 +336,1846 @@ pkt_work_mtr(struct rte_mbuf *mbuf, drop_mask = MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color_meter); color_policer = MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color_meter); - *sched_ptr = MBUF_SCHED_COLOR(sched, color_policer); + rte_mbuf_sched_color_set(mbuf, (uint8_t)color_policer); + + return drop_mask; +} + +/** + * RTE_TABLE_ACTION_TM + */ +static int +tm_cfg_check(struct rte_table_action_tm_config *tm) +{ + if ((tm->n_subports_per_port == 0) || + (rte_is_power_of_2(tm->n_subports_per_port) == 0) || + (tm->n_subports_per_port > UINT16_MAX) || + (tm->n_pipes_per_subport == 0) || + (rte_is_power_of_2(tm->n_pipes_per_subport) == 0)) + return -ENOTSUP; + + return 0; +} + +struct tm_data { + uint32_t queue_id; + uint32_t reserved; +} __attribute__((__packed__)); + +static int +tm_apply_check(struct rte_table_action_tm_params *p, + struct rte_table_action_tm_config *cfg) +{ + if ((p->subport_id >= cfg->n_subports_per_port) || + (p->pipe_id >= cfg->n_pipes_per_subport)) + return -EINVAL; + + return 0; +} + +static int +tm_apply(struct tm_data *data, + struct rte_table_action_tm_params *p, + struct rte_table_action_tm_config *cfg) +{ + int status; + + /* Check input arguments */ + status = tm_apply_check(p, cfg); + if (status) + return status; + + /* Apply */ + data->queue_id = p->subport_id << + (__builtin_ctz(cfg->n_pipes_per_subport) + 4) | + p->pipe_id << 4; + + return 0; +} + +static __rte_always_inline void +pkt_work_tm(struct rte_mbuf *mbuf, + struct tm_data *data, + struct dscp_table_data *dscp_table, + uint32_t dscp) +{ + struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp]; + uint32_t queue_id = data->queue_id | + (dscp_entry->tc << 2) | + dscp_entry->tc_queue; + rte_mbuf_sched_set(mbuf, queue_id, dscp_entry->tc, + (uint8_t)dscp_entry->color); +} + +/** + * RTE_TABLE_ACTION_ENCAP + */ +static int +encap_valid(enum rte_table_action_encap_type encap) +{ + switch (encap) { + case RTE_TABLE_ACTION_ENCAP_ETHER: + case RTE_TABLE_ACTION_ENCAP_VLAN: + case RTE_TABLE_ACTION_ENCAP_QINQ: + case RTE_TABLE_ACTION_ENCAP_MPLS: + case RTE_TABLE_ACTION_ENCAP_PPPOE: + case RTE_TABLE_ACTION_ENCAP_VXLAN: + case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE: + return 1; + default: + return 0; + } +} + +static int +encap_cfg_check(struct rte_table_action_encap_config *encap) +{ + if ((encap->encap_mask == 0) || + (__builtin_popcountll(encap->encap_mask) != 1)) + return -ENOTSUP; + + return 0; +} + +struct encap_ether_data { + struct rte_ether_hdr ether; +} __attribute__((__packed__)); + +#define VLAN(pcp, dei, vid) \ + ((uint16_t)((((uint64_t)(pcp)) & 0x7LLU) << 13) | \ + ((((uint64_t)(dei)) & 0x1LLU) << 12) | \ + (((uint64_t)(vid)) & 0xFFFLLU)) \ + +struct encap_vlan_data { + struct rte_ether_hdr ether; + struct rte_vlan_hdr vlan; +} __attribute__((__packed__)); + +struct encap_qinq_data { + struct rte_ether_hdr ether; + struct rte_vlan_hdr svlan; + struct rte_vlan_hdr cvlan; +} __attribute__((__packed__)); + +#define ETHER_TYPE_MPLS_UNICAST 0x8847 + +#define ETHER_TYPE_MPLS_MULTICAST 0x8848 + +#define MPLS(label, tc, s, ttl) \ + ((uint32_t)(((((uint64_t)(label)) & 0xFFFFFLLU) << 12) |\ + ((((uint64_t)(tc)) & 0x7LLU) << 9) | \ + ((((uint64_t)(s)) & 0x1LLU) << 8) | \ + (((uint64_t)(ttl)) & 0xFFLLU))) + +struct encap_mpls_data { + struct rte_ether_hdr ether; + uint32_t mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX]; + uint32_t mpls_count; +} __attribute__((__packed__)); + +#define PPP_PROTOCOL_IP 0x0021 + +struct pppoe_ppp_hdr { + uint16_t ver_type_code; + uint16_t session_id; + uint16_t length; + uint16_t protocol; +} __attribute__((__packed__)); + +struct encap_pppoe_data { + struct rte_ether_hdr ether; + struct pppoe_ppp_hdr pppoe_ppp; +} __attribute__((__packed__)); + +#define IP_PROTO_UDP 17 + +struct encap_vxlan_ipv4_data { + struct rte_ether_hdr ether; + struct ipv4_hdr ipv4; + struct udp_hdr udp; + struct rte_vxlan_hdr vxlan; +} __attribute__((__packed__)); + +struct encap_vxlan_ipv4_vlan_data { + struct rte_ether_hdr ether; + struct rte_vlan_hdr vlan; + struct ipv4_hdr ipv4; + struct udp_hdr udp; + struct rte_vxlan_hdr vxlan; +} __attribute__((__packed__)); + +struct encap_vxlan_ipv6_data { + struct rte_ether_hdr ether; + struct ipv6_hdr ipv6; + struct udp_hdr udp; + struct rte_vxlan_hdr vxlan; +} __attribute__((__packed__)); + +struct encap_vxlan_ipv6_vlan_data { + struct rte_ether_hdr ether; + struct rte_vlan_hdr vlan; + struct ipv6_hdr ipv6; + struct udp_hdr udp; + struct rte_vxlan_hdr vxlan; +} __attribute__((__packed__)); + +struct encap_qinq_pppoe_data { + struct rte_ether_hdr ether; + struct rte_vlan_hdr svlan; + struct rte_vlan_hdr cvlan; + struct pppoe_ppp_hdr pppoe_ppp; +} __attribute__((__packed__)); + +static size_t +encap_data_size(struct rte_table_action_encap_config *encap) +{ + switch (encap->encap_mask) { + case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER: + return sizeof(struct encap_ether_data); + + case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN: + return sizeof(struct encap_vlan_data); + + case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ: + return sizeof(struct encap_qinq_data); + + case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS: + return sizeof(struct encap_mpls_data); + + case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE: + return sizeof(struct encap_pppoe_data); + + case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN: + if (encap->vxlan.ip_version) + if (encap->vxlan.vlan) + return sizeof(struct encap_vxlan_ipv4_vlan_data); + else + return sizeof(struct encap_vxlan_ipv4_data); + else + if (encap->vxlan.vlan) + return sizeof(struct encap_vxlan_ipv6_vlan_data); + else + return sizeof(struct encap_vxlan_ipv6_data); + + case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE: + return sizeof(struct encap_qinq_pppoe_data); + + default: + return 0; + } +} + +static int +encap_apply_check(struct rte_table_action_encap_params *p, + struct rte_table_action_encap_config *cfg) +{ + if ((encap_valid(p->type) == 0) || + ((cfg->encap_mask & (1LLU << p->type)) == 0)) + return -EINVAL; + + switch (p->type) { + case RTE_TABLE_ACTION_ENCAP_ETHER: + return 0; + + case RTE_TABLE_ACTION_ENCAP_VLAN: + return 0; + + case RTE_TABLE_ACTION_ENCAP_QINQ: + return 0; + + case RTE_TABLE_ACTION_ENCAP_MPLS: + if ((p->mpls.mpls_count == 0) || + (p->mpls.mpls_count > RTE_TABLE_ACTION_MPLS_LABELS_MAX)) + return -EINVAL; + + return 0; + + case RTE_TABLE_ACTION_ENCAP_PPPOE: + return 0; + + case RTE_TABLE_ACTION_ENCAP_VXLAN: + return 0; + + case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE: + return 0; + + default: + return -EINVAL; + } +} + +static int +encap_ether_apply(void *data, + struct rte_table_action_encap_params *p, + struct rte_table_action_common_config *common_cfg) +{ + struct encap_ether_data *d = data; + uint16_t ethertype = (common_cfg->ip_version) ? + ETHER_TYPE_IPv4 : + ETHER_TYPE_IPv6; + + /* Ethernet */ + ether_addr_copy(&p->ether.ether.da, &d->ether.d_addr); + ether_addr_copy(&p->ether.ether.sa, &d->ether.s_addr); + d->ether.ether_type = rte_htons(ethertype); + + return 0; +} + +static int +encap_vlan_apply(void *data, + struct rte_table_action_encap_params *p, + struct rte_table_action_common_config *common_cfg) +{ + struct encap_vlan_data *d = data; + uint16_t ethertype = (common_cfg->ip_version) ? + ETHER_TYPE_IPv4 : + ETHER_TYPE_IPv6; + + /* Ethernet */ + ether_addr_copy(&p->vlan.ether.da, &d->ether.d_addr); + ether_addr_copy(&p->vlan.ether.sa, &d->ether.s_addr); + d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN); + + /* VLAN */ + d->vlan.vlan_tci = rte_htons(VLAN(p->vlan.vlan.pcp, + p->vlan.vlan.dei, + p->vlan.vlan.vid)); + d->vlan.eth_proto = rte_htons(ethertype); + + return 0; +} + +static int +encap_qinq_apply(void *data, + struct rte_table_action_encap_params *p, + struct rte_table_action_common_config *common_cfg) +{ + struct encap_qinq_data *d = data; + uint16_t ethertype = (common_cfg->ip_version) ? + ETHER_TYPE_IPv4 : + ETHER_TYPE_IPv6; + + /* Ethernet */ + ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr); + ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr); + d->ether.ether_type = rte_htons(ETHER_TYPE_QINQ); + + /* SVLAN */ + d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp, + p->qinq.svlan.dei, + p->qinq.svlan.vid)); + d->svlan.eth_proto = rte_htons(ETHER_TYPE_VLAN); + + /* CVLAN */ + d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp, + p->qinq.cvlan.dei, + p->qinq.cvlan.vid)); + d->cvlan.eth_proto = rte_htons(ethertype); + + return 0; +} + +static int +encap_qinq_pppoe_apply(void *data, + struct rte_table_action_encap_params *p) +{ + struct encap_qinq_pppoe_data *d = data; + + /* Ethernet */ + ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr); + ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr); + d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN); + + /* SVLAN */ + d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp, + p->qinq.svlan.dei, + p->qinq.svlan.vid)); + d->svlan.eth_proto = rte_htons(ETHER_TYPE_VLAN); + + /* CVLAN */ + d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp, + p->qinq.cvlan.dei, + p->qinq.cvlan.vid)); + d->cvlan.eth_proto = rte_htons(ETHER_TYPE_PPPOE_SESSION); + + /* PPPoE and PPP*/ + d->pppoe_ppp.ver_type_code = rte_htons(0x1100); + d->pppoe_ppp.session_id = rte_htons(p->qinq_pppoe.pppoe.session_id); + d->pppoe_ppp.length = 0; /* not pre-computed */ + d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP); + + return 0; +} + +static int +encap_mpls_apply(void *data, + struct rte_table_action_encap_params *p) +{ + struct encap_mpls_data *d = data; + uint16_t ethertype = (p->mpls.unicast) ? + ETHER_TYPE_MPLS_UNICAST : + ETHER_TYPE_MPLS_MULTICAST; + uint32_t i; + + /* Ethernet */ + ether_addr_copy(&p->mpls.ether.da, &d->ether.d_addr); + ether_addr_copy(&p->mpls.ether.sa, &d->ether.s_addr); + d->ether.ether_type = rte_htons(ethertype); + + /* MPLS */ + for (i = 0; i < p->mpls.mpls_count - 1; i++) + d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label, + p->mpls.mpls[i].tc, + 0, + p->mpls.mpls[i].ttl)); + + d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label, + p->mpls.mpls[i].tc, + 1, + p->mpls.mpls[i].ttl)); + + d->mpls_count = p->mpls.mpls_count; + return 0; +} + +static int +encap_pppoe_apply(void *data, + struct rte_table_action_encap_params *p) +{ + struct encap_pppoe_data *d = data; + + /* Ethernet */ + ether_addr_copy(&p->pppoe.ether.da, &d->ether.d_addr); + ether_addr_copy(&p->pppoe.ether.sa, &d->ether.s_addr); + d->ether.ether_type = rte_htons(ETHER_TYPE_PPPOE_SESSION); + + /* PPPoE and PPP*/ + d->pppoe_ppp.ver_type_code = rte_htons(0x1100); + d->pppoe_ppp.session_id = rte_htons(p->pppoe.pppoe.session_id); + d->pppoe_ppp.length = 0; /* not pre-computed */ + d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP); + + return 0; +} + +static int +encap_vxlan_apply(void *data, + struct rte_table_action_encap_params *p, + struct rte_table_action_encap_config *cfg) +{ + if ((p->vxlan.vxlan.vni > 0xFFFFFF) || + (cfg->vxlan.ip_version && (p->vxlan.ipv4.dscp > 0x3F)) || + (!cfg->vxlan.ip_version && (p->vxlan.ipv6.flow_label > 0xFFFFF)) || + (!cfg->vxlan.ip_version && (p->vxlan.ipv6.dscp > 0x3F)) || + (cfg->vxlan.vlan && (p->vxlan.vlan.vid > 0xFFF))) + return -1; + + if (cfg->vxlan.ip_version) + if (cfg->vxlan.vlan) { + struct encap_vxlan_ipv4_vlan_data *d = data; + + /* Ethernet */ + ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr); + ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr); + d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN); + + /* VLAN */ + d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp, + p->vxlan.vlan.dei, + p->vxlan.vlan.vid)); + d->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv4); + + /* IPv4*/ + d->ipv4.version_ihl = 0x45; + d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2; + d->ipv4.total_length = 0; /* not pre-computed */ + d->ipv4.packet_id = 0; + d->ipv4.fragment_offset = 0; + d->ipv4.time_to_live = p->vxlan.ipv4.ttl; + d->ipv4.next_proto_id = IP_PROTO_UDP; + d->ipv4.hdr_checksum = 0; + d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa); + d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da); + + d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4); + + /* UDP */ + d->udp.src_port = rte_htons(p->vxlan.udp.sp); + d->udp.dst_port = rte_htons(p->vxlan.udp.dp); + d->udp.dgram_len = 0; /* not pre-computed */ + d->udp.dgram_cksum = 0; + + /* VXLAN */ + d->vxlan.vx_flags = rte_htonl(0x08000000); + d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8); + + return 0; + } else { + struct encap_vxlan_ipv4_data *d = data; + + /* Ethernet */ + ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr); + ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr); + d->ether.ether_type = rte_htons(ETHER_TYPE_IPv4); + + /* IPv4*/ + d->ipv4.version_ihl = 0x45; + d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2; + d->ipv4.total_length = 0; /* not pre-computed */ + d->ipv4.packet_id = 0; + d->ipv4.fragment_offset = 0; + d->ipv4.time_to_live = p->vxlan.ipv4.ttl; + d->ipv4.next_proto_id = IP_PROTO_UDP; + d->ipv4.hdr_checksum = 0; + d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa); + d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da); + + d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4); + + /* UDP */ + d->udp.src_port = rte_htons(p->vxlan.udp.sp); + d->udp.dst_port = rte_htons(p->vxlan.udp.dp); + d->udp.dgram_len = 0; /* not pre-computed */ + d->udp.dgram_cksum = 0; + + /* VXLAN */ + d->vxlan.vx_flags = rte_htonl(0x08000000); + d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8); + + return 0; + } + else + if (cfg->vxlan.vlan) { + struct encap_vxlan_ipv6_vlan_data *d = data; + + /* Ethernet */ + ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr); + ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr); + d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN); + + /* VLAN */ + d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp, + p->vxlan.vlan.dei, + p->vxlan.vlan.vid)); + d->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv6); + + /* IPv6*/ + d->ipv6.vtc_flow = rte_htonl((6 << 28) | + (p->vxlan.ipv6.dscp << 22) | + p->vxlan.ipv6.flow_label); + d->ipv6.payload_len = 0; /* not pre-computed */ + d->ipv6.proto = IP_PROTO_UDP; + d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit; + memcpy(d->ipv6.src_addr, + p->vxlan.ipv6.sa, + sizeof(p->vxlan.ipv6.sa)); + memcpy(d->ipv6.dst_addr, + p->vxlan.ipv6.da, + sizeof(p->vxlan.ipv6.da)); + + /* UDP */ + d->udp.src_port = rte_htons(p->vxlan.udp.sp); + d->udp.dst_port = rte_htons(p->vxlan.udp.dp); + d->udp.dgram_len = 0; /* not pre-computed */ + d->udp.dgram_cksum = 0; + + /* VXLAN */ + d->vxlan.vx_flags = rte_htonl(0x08000000); + d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8); + + return 0; + } else { + struct encap_vxlan_ipv6_data *d = data; + + /* Ethernet */ + ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr); + ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr); + d->ether.ether_type = rte_htons(ETHER_TYPE_IPv6); + + /* IPv6*/ + d->ipv6.vtc_flow = rte_htonl((6 << 28) | + (p->vxlan.ipv6.dscp << 22) | + p->vxlan.ipv6.flow_label); + d->ipv6.payload_len = 0; /* not pre-computed */ + d->ipv6.proto = IP_PROTO_UDP; + d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit; + memcpy(d->ipv6.src_addr, + p->vxlan.ipv6.sa, + sizeof(p->vxlan.ipv6.sa)); + memcpy(d->ipv6.dst_addr, + p->vxlan.ipv6.da, + sizeof(p->vxlan.ipv6.da)); + + /* UDP */ + d->udp.src_port = rte_htons(p->vxlan.udp.sp); + d->udp.dst_port = rte_htons(p->vxlan.udp.dp); + d->udp.dgram_len = 0; /* not pre-computed */ + d->udp.dgram_cksum = 0; + + /* VXLAN */ + d->vxlan.vx_flags = rte_htonl(0x08000000); + d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8); + + return 0; + } +} + +static int +encap_apply(void *data, + struct rte_table_action_encap_params *p, + struct rte_table_action_encap_config *cfg, + struct rte_table_action_common_config *common_cfg) +{ + int status; + + /* Check input arguments */ + status = encap_apply_check(p, cfg); + if (status) + return status; + + switch (p->type) { + case RTE_TABLE_ACTION_ENCAP_ETHER: + return encap_ether_apply(data, p, common_cfg); + + case RTE_TABLE_ACTION_ENCAP_VLAN: + return encap_vlan_apply(data, p, common_cfg); + + case RTE_TABLE_ACTION_ENCAP_QINQ: + return encap_qinq_apply(data, p, common_cfg); + + case RTE_TABLE_ACTION_ENCAP_MPLS: + return encap_mpls_apply(data, p); + + case RTE_TABLE_ACTION_ENCAP_PPPOE: + return encap_pppoe_apply(data, p); + + case RTE_TABLE_ACTION_ENCAP_VXLAN: + return encap_vxlan_apply(data, p, cfg); + + case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE: + return encap_qinq_pppoe_apply(data, p); + + default: + return -EINVAL; + } +} + +static __rte_always_inline uint16_t +encap_vxlan_ipv4_checksum_update(uint16_t cksum0, + uint16_t total_length) +{ + int32_t cksum1; + + cksum1 = cksum0; + cksum1 = ~cksum1 & 0xFFFF; + + /* Add total length (one's complement logic) */ + cksum1 += total_length; + cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16); + cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16); + + return (uint16_t)(~cksum1); +} + +static __rte_always_inline void * +encap(void *dst, const void *src, size_t n) +{ + dst = ((uint8_t *) dst) - n; + return rte_memcpy(dst, src, n); +} + +static __rte_always_inline void +pkt_work_encap_vxlan_ipv4(struct rte_mbuf *mbuf, + struct encap_vxlan_ipv4_data *vxlan_tbl, + struct rte_table_action_encap_config *cfg) +{ + uint32_t ether_offset = cfg->vxlan.data_offset; + void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset); + struct encap_vxlan_ipv4_data *vxlan_pkt; + uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length; + + ether_length = (uint16_t)mbuf->pkt_len; + ipv4_total_length = ether_length + + (sizeof(struct rte_vxlan_hdr) + + sizeof(struct udp_hdr) + + sizeof(struct ipv4_hdr)); + ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum, + rte_htons(ipv4_total_length)); + udp_length = ether_length + + (sizeof(struct rte_vxlan_hdr) + + sizeof(struct udp_hdr)); + + vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl)); + vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length); + vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum; + vxlan_pkt->udp.dgram_len = rte_htons(udp_length); + + mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt)); + mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt); +} + +static __rte_always_inline void +pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf *mbuf, + struct encap_vxlan_ipv4_vlan_data *vxlan_tbl, + struct rte_table_action_encap_config *cfg) +{ + uint32_t ether_offset = cfg->vxlan.data_offset; + void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset); + struct encap_vxlan_ipv4_vlan_data *vxlan_pkt; + uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length; + + ether_length = (uint16_t)mbuf->pkt_len; + ipv4_total_length = ether_length + + (sizeof(struct rte_vxlan_hdr) + + sizeof(struct udp_hdr) + + sizeof(struct ipv4_hdr)); + ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum, + rte_htons(ipv4_total_length)); + udp_length = ether_length + + (sizeof(struct rte_vxlan_hdr) + + sizeof(struct udp_hdr)); + + vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl)); + vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length); + vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum; + vxlan_pkt->udp.dgram_len = rte_htons(udp_length); + + mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt)); + mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt); +} + +static __rte_always_inline void +pkt_work_encap_vxlan_ipv6(struct rte_mbuf *mbuf, + struct encap_vxlan_ipv6_data *vxlan_tbl, + struct rte_table_action_encap_config *cfg) +{ + uint32_t ether_offset = cfg->vxlan.data_offset; + void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset); + struct encap_vxlan_ipv6_data *vxlan_pkt; + uint16_t ether_length, ipv6_payload_length, udp_length; + + ether_length = (uint16_t)mbuf->pkt_len; + ipv6_payload_length = ether_length + + (sizeof(struct rte_vxlan_hdr) + + sizeof(struct udp_hdr)); + udp_length = ether_length + + (sizeof(struct rte_vxlan_hdr) + + sizeof(struct udp_hdr)); + + vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl)); + vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length); + vxlan_pkt->udp.dgram_len = rte_htons(udp_length); + + mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt)); + mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt); +} + +static __rte_always_inline void +pkt_work_encap_vxlan_ipv6_vlan(struct rte_mbuf *mbuf, + struct encap_vxlan_ipv6_vlan_data *vxlan_tbl, + struct rte_table_action_encap_config *cfg) +{ + uint32_t ether_offset = cfg->vxlan.data_offset; + void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset); + struct encap_vxlan_ipv6_vlan_data *vxlan_pkt; + uint16_t ether_length, ipv6_payload_length, udp_length; + + ether_length = (uint16_t)mbuf->pkt_len; + ipv6_payload_length = ether_length + + (sizeof(struct rte_vxlan_hdr) + + sizeof(struct udp_hdr)); + udp_length = ether_length + + (sizeof(struct rte_vxlan_hdr) + + sizeof(struct udp_hdr)); + + vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl)); + vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length); + vxlan_pkt->udp.dgram_len = rte_htons(udp_length); + + mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt)); + mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt); +} + +static __rte_always_inline void +pkt_work_encap(struct rte_mbuf *mbuf, + void *data, + struct rte_table_action_encap_config *cfg, + void *ip, + uint16_t total_length, + uint32_t ip_offset) +{ + switch (cfg->encap_mask) { + case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER: + encap(ip, data, sizeof(struct encap_ether_data)); + mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + + sizeof(struct encap_ether_data)); + mbuf->pkt_len = mbuf->data_len = total_length + + sizeof(struct encap_ether_data); + break; + + case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN: + encap(ip, data, sizeof(struct encap_vlan_data)); + mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + + sizeof(struct encap_vlan_data)); + mbuf->pkt_len = mbuf->data_len = total_length + + sizeof(struct encap_vlan_data); + break; + + case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ: + encap(ip, data, sizeof(struct encap_qinq_data)); + mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + + sizeof(struct encap_qinq_data)); + mbuf->pkt_len = mbuf->data_len = total_length + + sizeof(struct encap_qinq_data); + break; + + case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS: + { + struct encap_mpls_data *mpls = data; + size_t size = sizeof(struct rte_ether_hdr) + + mpls->mpls_count * 4; + + encap(ip, data, size); + mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + size); + mbuf->pkt_len = mbuf->data_len = total_length + size; + break; + } + + case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE: + { + struct encap_pppoe_data *pppoe = + encap(ip, data, sizeof(struct encap_pppoe_data)); + pppoe->pppoe_ppp.length = rte_htons(total_length + 2); + mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + + sizeof(struct encap_pppoe_data)); + mbuf->pkt_len = mbuf->data_len = total_length + + sizeof(struct encap_pppoe_data); + break; + } + + case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE: + { + struct encap_qinq_pppoe_data *qinq_pppoe = + encap(ip, data, sizeof(struct encap_qinq_pppoe_data)); + qinq_pppoe->pppoe_ppp.length = rte_htons(total_length + 2); + mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + + sizeof(struct encap_qinq_pppoe_data)); + mbuf->pkt_len = mbuf->data_len = total_length + + sizeof(struct encap_qinq_pppoe_data); + break; + } + + case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN: + { + if (cfg->vxlan.ip_version) + if (cfg->vxlan.vlan) + pkt_work_encap_vxlan_ipv4_vlan(mbuf, data, cfg); + else + pkt_work_encap_vxlan_ipv4(mbuf, data, cfg); + else + if (cfg->vxlan.vlan) + pkt_work_encap_vxlan_ipv6_vlan(mbuf, data, cfg); + else + pkt_work_encap_vxlan_ipv6(mbuf, data, cfg); + } + + default: + break; + } +} + +/** + * RTE_TABLE_ACTION_NAT + */ +static int +nat_cfg_check(struct rte_table_action_nat_config *nat) +{ + if ((nat->proto != 0x06) && + (nat->proto != 0x11)) + return -ENOTSUP; + + return 0; +} + +struct nat_ipv4_data { + uint32_t addr; + uint16_t port; +} __attribute__((__packed__)); + +struct nat_ipv6_data { + uint8_t addr[16]; + uint16_t port; +} __attribute__((__packed__)); + +static size_t +nat_data_size(struct rte_table_action_nat_config *nat __rte_unused, + struct rte_table_action_common_config *common) +{ + int ip_version = common->ip_version; + + return (ip_version) ? + sizeof(struct nat_ipv4_data) : + sizeof(struct nat_ipv6_data); +} + +static int +nat_apply_check(struct rte_table_action_nat_params *p, + struct rte_table_action_common_config *cfg) +{ + if ((p->ip_version && (cfg->ip_version == 0)) || + ((p->ip_version == 0) && cfg->ip_version)) + return -EINVAL; + + return 0; +} + +static int +nat_apply(void *data, + struct rte_table_action_nat_params *p, + struct rte_table_action_common_config *cfg) +{ + int status; + + /* Check input arguments */ + status = nat_apply_check(p, cfg); + if (status) + return status; + + /* Apply */ + if (p->ip_version) { + struct nat_ipv4_data *d = data; + + d->addr = rte_htonl(p->addr.ipv4); + d->port = rte_htons(p->port); + } else { + struct nat_ipv6_data *d = data; + + memcpy(d->addr, p->addr.ipv6, sizeof(d->addr)); + d->port = rte_htons(p->port); + } + + return 0; +} + +static __rte_always_inline uint16_t +nat_ipv4_checksum_update(uint16_t cksum0, + uint32_t ip0, + uint32_t ip1) +{ + int32_t cksum1; + + cksum1 = cksum0; + cksum1 = ~cksum1 & 0xFFFF; + + /* Subtract ip0 (one's complement logic) */ + cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF); + cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16); + cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16); + + /* Add ip1 (one's complement logic) */ + cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF); + cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16); + cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16); + + return (uint16_t)(~cksum1); +} + +static __rte_always_inline uint16_t +nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0, + uint32_t ip0, + uint32_t ip1, + uint16_t port0, + uint16_t port1) +{ + int32_t cksum1; + + cksum1 = cksum0; + cksum1 = ~cksum1 & 0xFFFF; + + /* Subtract ip0 and port 0 (one's complement logic) */ + cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF) + port0; + cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16); + cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16); + + /* Add ip1 and port1 (one's complement logic) */ + cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF) + port1; + cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16); + cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16); + + return (uint16_t)(~cksum1); +} + +static __rte_always_inline uint16_t +nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0, + uint16_t *ip0, + uint16_t *ip1, + uint16_t port0, + uint16_t port1) +{ + int32_t cksum1; + + cksum1 = cksum0; + cksum1 = ~cksum1 & 0xFFFF; + + /* Subtract ip0 and port 0 (one's complement logic) */ + cksum1 -= ip0[0] + ip0[1] + ip0[2] + ip0[3] + + ip0[4] + ip0[5] + ip0[6] + ip0[7] + port0; + cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16); + cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16); + + /* Add ip1 and port1 (one's complement logic) */ + cksum1 += ip1[0] + ip1[1] + ip1[2] + ip1[3] + + ip1[4] + ip1[5] + ip1[6] + ip1[7] + port1; + cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16); + cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16); + + return (uint16_t)(~cksum1); +} + +static __rte_always_inline void +pkt_ipv4_work_nat(struct ipv4_hdr *ip, + struct nat_ipv4_data *data, + struct rte_table_action_nat_config *cfg) +{ + if (cfg->source_nat) { + if (cfg->proto == 0x6) { + struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1]; + uint16_t ip_cksum, tcp_cksum; + + ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum, + ip->src_addr, + data->addr); + + tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum, + ip->src_addr, + data->addr, + tcp->src_port, + data->port); + + ip->src_addr = data->addr; + ip->hdr_checksum = ip_cksum; + tcp->src_port = data->port; + tcp->cksum = tcp_cksum; + } else { + struct udp_hdr *udp = (struct udp_hdr *) &ip[1]; + uint16_t ip_cksum, udp_cksum; + + ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum, + ip->src_addr, + data->addr); + + udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum, + ip->src_addr, + data->addr, + udp->src_port, + data->port); + + ip->src_addr = data->addr; + ip->hdr_checksum = ip_cksum; + udp->src_port = data->port; + if (udp->dgram_cksum) + udp->dgram_cksum = udp_cksum; + } + } else { + if (cfg->proto == 0x6) { + struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1]; + uint16_t ip_cksum, tcp_cksum; + + ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum, + ip->dst_addr, + data->addr); + + tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum, + ip->dst_addr, + data->addr, + tcp->dst_port, + data->port); + + ip->dst_addr = data->addr; + ip->hdr_checksum = ip_cksum; + tcp->dst_port = data->port; + tcp->cksum = tcp_cksum; + } else { + struct udp_hdr *udp = (struct udp_hdr *) &ip[1]; + uint16_t ip_cksum, udp_cksum; + + ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum, + ip->dst_addr, + data->addr); + + udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum, + ip->dst_addr, + data->addr, + udp->dst_port, + data->port); + + ip->dst_addr = data->addr; + ip->hdr_checksum = ip_cksum; + udp->dst_port = data->port; + if (udp->dgram_cksum) + udp->dgram_cksum = udp_cksum; + } + } +} + +static __rte_always_inline void +pkt_ipv6_work_nat(struct ipv6_hdr *ip, + struct nat_ipv6_data *data, + struct rte_table_action_nat_config *cfg) +{ + if (cfg->source_nat) { + if (cfg->proto == 0x6) { + struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1]; + uint16_t tcp_cksum; + + tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum, + (uint16_t *)ip->src_addr, + (uint16_t *)data->addr, + tcp->src_port, + data->port); + + rte_memcpy(ip->src_addr, data->addr, 16); + tcp->src_port = data->port; + tcp->cksum = tcp_cksum; + } else { + struct udp_hdr *udp = (struct udp_hdr *) &ip[1]; + uint16_t udp_cksum; + + udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum, + (uint16_t *)ip->src_addr, + (uint16_t *)data->addr, + udp->src_port, + data->port); + + rte_memcpy(ip->src_addr, data->addr, 16); + udp->src_port = data->port; + udp->dgram_cksum = udp_cksum; + } + } else { + if (cfg->proto == 0x6) { + struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1]; + uint16_t tcp_cksum; + + tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum, + (uint16_t *)ip->dst_addr, + (uint16_t *)data->addr, + tcp->dst_port, + data->port); + + rte_memcpy(ip->dst_addr, data->addr, 16); + tcp->dst_port = data->port; + tcp->cksum = tcp_cksum; + } else { + struct udp_hdr *udp = (struct udp_hdr *) &ip[1]; + uint16_t udp_cksum; + + udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum, + (uint16_t *)ip->dst_addr, + (uint16_t *)data->addr, + udp->dst_port, + data->port); + + rte_memcpy(ip->dst_addr, data->addr, 16); + udp->dst_port = data->port; + udp->dgram_cksum = udp_cksum; + } + } +} + +/** + * RTE_TABLE_ACTION_TTL + */ +static int +ttl_cfg_check(struct rte_table_action_ttl_config *ttl) +{ + if (ttl->drop == 0) + return -ENOTSUP; + + return 0; +} + +struct ttl_data { + uint32_t n_packets; +} __attribute__((__packed__)); + +#define TTL_INIT(data, decrement) \ + ((data)->n_packets = (decrement) ? 1 : 0) + +#define TTL_DEC_GET(data) \ + ((uint8_t)((data)->n_packets & 1)) + +#define TTL_STATS_RESET(data) \ + ((data)->n_packets = ((data)->n_packets & 1)) + +#define TTL_STATS_READ(data) \ + ((data)->n_packets >> 1) + +#define TTL_STATS_ADD(data, value) \ + ((data)->n_packets = \ + (((((data)->n_packets >> 1) + (value)) << 1) | \ + ((data)->n_packets & 1))) + +static int +ttl_apply(void *data, + struct rte_table_action_ttl_params *p) +{ + struct ttl_data *d = data; + + TTL_INIT(d, p->decrement); + + return 0; +} + +static __rte_always_inline uint64_t +pkt_ipv4_work_ttl(struct ipv4_hdr *ip, + struct ttl_data *data) +{ + uint32_t drop; + uint16_t cksum = ip->hdr_checksum; + uint8_t ttl = ip->time_to_live; + uint8_t ttl_diff = TTL_DEC_GET(data); + + cksum += ttl_diff; + ttl -= ttl_diff; + + ip->hdr_checksum = cksum; + ip->time_to_live = ttl; + + drop = (ttl == 0) ? 1 : 0; + TTL_STATS_ADD(data, drop); + + return drop; +} + +static __rte_always_inline uint64_t +pkt_ipv6_work_ttl(struct ipv6_hdr *ip, + struct ttl_data *data) +{ + uint32_t drop; + uint8_t ttl = ip->hop_limits; + uint8_t ttl_diff = TTL_DEC_GET(data); + + ttl -= ttl_diff; + + ip->hop_limits = ttl; + + drop = (ttl == 0) ? 1 : 0; + TTL_STATS_ADD(data, drop); + + return drop; +} + +/** + * RTE_TABLE_ACTION_STATS + */ +static int +stats_cfg_check(struct rte_table_action_stats_config *stats) +{ + if ((stats->n_packets_enabled == 0) && (stats->n_bytes_enabled == 0)) + return -EINVAL; + + return 0; +} + +struct stats_data { + uint64_t n_packets; + uint64_t n_bytes; +} __attribute__((__packed__)); + +static int +stats_apply(struct stats_data *data, + struct rte_table_action_stats_params *p) +{ + data->n_packets = p->n_packets; + data->n_bytes = p->n_bytes; + + return 0; +} + +static __rte_always_inline void +pkt_work_stats(struct stats_data *data, + uint16_t total_length) +{ + data->n_packets++; + data->n_bytes += total_length; +} + +/** + * RTE_TABLE_ACTION_TIME + */ +struct time_data { + uint64_t time; +} __attribute__((__packed__)); + +static int +time_apply(struct time_data *data, + struct rte_table_action_time_params *p) +{ + data->time = p->time; + return 0; +} + +static __rte_always_inline void +pkt_work_time(struct time_data *data, + uint64_t time) +{ + data->time = time; +} + + +/** + * RTE_TABLE_ACTION_CRYPTO + */ + +#define CRYPTO_OP_MASK_CIPHER 0x1 +#define CRYPTO_OP_MASK_AUTH 0x2 +#define CRYPTO_OP_MASK_AEAD 0x4 + +struct crypto_op_sym_iv_aad { + struct rte_crypto_op op; + struct rte_crypto_sym_op sym_op; + union { + struct { + uint8_t cipher_iv[ + RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX]; + uint8_t auth_iv[ + RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX]; + } cipher_auth; + + struct { + uint8_t iv[RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX]; + uint8_t aad[RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX]; + } aead_iv_aad; + + } iv_aad; +}; + +struct sym_crypto_data { + + union { + struct { + + /** Length of cipher iv. */ + uint16_t cipher_iv_len; + + /** Offset from start of IP header to the cipher iv. */ + uint16_t cipher_iv_data_offset; + + /** Length of cipher iv to be updated in the mbuf. */ + uint16_t cipher_iv_update_len; + + /** Offset from start of IP header to the auth iv. */ + uint16_t auth_iv_data_offset; + + /** Length of auth iv in the mbuf. */ + uint16_t auth_iv_len; + + /** Length of auth iv to be updated in the mbuf. */ + uint16_t auth_iv_update_len; + + } cipher_auth; + struct { + + /** Length of iv. */ + uint16_t iv_len; + + /** Offset from start of IP header to the aead iv. */ + uint16_t iv_data_offset; + + /** Length of iv to be updated in the mbuf. */ + uint16_t iv_update_len; + + /** Length of aad */ + uint16_t aad_len; + + /** Offset from start of IP header to the aad. */ + uint16_t aad_data_offset; + + /** Length of aad to updated in the mbuf. */ + uint16_t aad_update_len; + + } aead; + }; + + /** Offset from start of IP header to the data. */ + uint16_t data_offset; + + /** Digest length. */ + uint16_t digest_len; + + /** block size */ + uint16_t block_size; + + /** Mask of crypto operation */ + uint16_t op_mask; + + /** Session pointer. */ + struct rte_cryptodev_sym_session *session; + + /** Direction of crypto, encrypt or decrypt */ + uint16_t direction; + + /** Private data size to store cipher iv / aad. */ + uint8_t iv_aad_data[32]; + +} __attribute__((__packed__)); + +static int +sym_crypto_cfg_check(struct rte_table_action_sym_crypto_config *cfg) +{ + if (!rte_cryptodev_pmd_is_valid_dev(cfg->cryptodev_id)) + return -EINVAL; + if (cfg->mp_create == NULL || cfg->mp_init == NULL) + return -EINVAL; + + return 0; +} + +static int +get_block_size(const struct rte_crypto_sym_xform *xform, uint8_t cdev_id) +{ + struct rte_cryptodev_info dev_info; + const struct rte_cryptodev_capabilities *cap; + uint32_t i; + + rte_cryptodev_info_get(cdev_id, &dev_info); + + for (i = 0; dev_info.capabilities[i].op != RTE_CRYPTO_OP_TYPE_UNDEFINED; + i++) { + cap = &dev_info.capabilities[i]; + + if (cap->sym.xform_type != xform->type) + continue; + + if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) && + (cap->sym.cipher.algo == xform->cipher.algo)) + return cap->sym.cipher.block_size; + + if ((xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) && + (cap->sym.aead.algo == xform->aead.algo)) + return cap->sym.aead.block_size; + + if (xform->type == RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED) + break; + } + + return -1; +} + +static int +sym_crypto_apply(struct sym_crypto_data *data, + struct rte_table_action_sym_crypto_config *cfg, + struct rte_table_action_sym_crypto_params *p) +{ + const struct rte_crypto_cipher_xform *cipher_xform = NULL; + const struct rte_crypto_auth_xform *auth_xform = NULL; + const struct rte_crypto_aead_xform *aead_xform = NULL; + struct rte_crypto_sym_xform *xform = p->xform; + struct rte_cryptodev_sym_session *session; + int ret; + + memset(data, 0, sizeof(*data)); + + while (xform) { + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { + cipher_xform = &xform->cipher; + + if (cipher_xform->iv.length > + RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX) + return -ENOMEM; + if (cipher_xform->iv.offset != + RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET) + return -EINVAL; + + ret = get_block_size(xform, cfg->cryptodev_id); + if (ret < 0) + return -1; + data->block_size = (uint16_t)ret; + data->op_mask |= CRYPTO_OP_MASK_CIPHER; + + data->cipher_auth.cipher_iv_len = + cipher_xform->iv.length; + data->cipher_auth.cipher_iv_data_offset = (uint16_t) + p->cipher_auth.cipher_iv_update.offset; + data->cipher_auth.cipher_iv_update_len = (uint16_t) + p->cipher_auth.cipher_iv_update.length; + + rte_memcpy(data->iv_aad_data, + p->cipher_auth.cipher_iv.val, + p->cipher_auth.cipher_iv.length); + + data->direction = cipher_xform->op; + + } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { + auth_xform = &xform->auth; + if (auth_xform->iv.length > + RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX) + return -ENOMEM; + data->op_mask |= CRYPTO_OP_MASK_AUTH; + + data->cipher_auth.auth_iv_len = auth_xform->iv.length; + data->cipher_auth.auth_iv_data_offset = (uint16_t) + p->cipher_auth.auth_iv_update.offset; + data->cipher_auth.auth_iv_update_len = (uint16_t) + p->cipher_auth.auth_iv_update.length; + data->digest_len = auth_xform->digest_length; + + data->direction = (auth_xform->op == + RTE_CRYPTO_AUTH_OP_GENERATE) ? + RTE_CRYPTO_CIPHER_OP_ENCRYPT : + RTE_CRYPTO_CIPHER_OP_DECRYPT; + + } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + aead_xform = &xform->aead; + + if ((aead_xform->iv.length > + RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX) || ( + aead_xform->aad_length > + RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX)) + return -EINVAL; + if (aead_xform->iv.offset != + RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET) + return -EINVAL; + + ret = get_block_size(xform, cfg->cryptodev_id); + if (ret < 0) + return -1; + data->block_size = (uint16_t)ret; + data->op_mask |= CRYPTO_OP_MASK_AEAD; + + data->digest_len = aead_xform->digest_length; + data->aead.iv_len = aead_xform->iv.length; + data->aead.aad_len = aead_xform->aad_length; + + data->aead.iv_data_offset = (uint16_t) + p->aead.iv_update.offset; + data->aead.iv_update_len = (uint16_t) + p->aead.iv_update.length; + data->aead.aad_data_offset = (uint16_t) + p->aead.aad_update.offset; + data->aead.aad_update_len = (uint16_t) + p->aead.aad_update.length; + + rte_memcpy(data->iv_aad_data, + p->aead.iv.val, + p->aead.iv.length); + + rte_memcpy(data->iv_aad_data + p->aead.iv.length, + p->aead.aad.val, + p->aead.aad.length); + + data->direction = (aead_xform->op == + RTE_CRYPTO_AEAD_OP_ENCRYPT) ? + RTE_CRYPTO_CIPHER_OP_ENCRYPT : + RTE_CRYPTO_CIPHER_OP_DECRYPT; + } else + return -EINVAL; + + xform = xform->next; + } + + if (auth_xform && auth_xform->iv.length) { + if (cipher_xform) { + if (auth_xform->iv.offset != + RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET + + cipher_xform->iv.length) + return -EINVAL; + + rte_memcpy(data->iv_aad_data + cipher_xform->iv.length, + p->cipher_auth.auth_iv.val, + p->cipher_auth.auth_iv.length); + } else { + rte_memcpy(data->iv_aad_data, + p->cipher_auth.auth_iv.val, + p->cipher_auth.auth_iv.length); + } + } + + session = rte_cryptodev_sym_session_create(cfg->mp_create); + if (!session) + return -ENOMEM; + + ret = rte_cryptodev_sym_session_init(cfg->cryptodev_id, session, + p->xform, cfg->mp_init); + if (ret < 0) { + rte_cryptodev_sym_session_free(session); + return ret; + } + + data->data_offset = (uint16_t)p->data_offset; + data->session = session; + + return 0; +} + +static __rte_always_inline uint64_t +pkt_work_sym_crypto(struct rte_mbuf *mbuf, struct sym_crypto_data *data, + struct rte_table_action_sym_crypto_config *cfg, + uint16_t ip_offset) +{ + struct crypto_op_sym_iv_aad *crypto_op = (struct crypto_op_sym_iv_aad *) + RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->op_offset); + struct rte_crypto_op *op = &crypto_op->op; + struct rte_crypto_sym_op *sym = op->sym; + uint32_t pkt_offset = sizeof(*mbuf) + mbuf->data_off; + uint32_t payload_len = pkt_offset + mbuf->data_len - data->data_offset; + + op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + op->sess_type = RTE_CRYPTO_OP_WITH_SESSION; + op->phys_addr = mbuf->buf_iova + cfg->op_offset - sizeof(*mbuf); + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + sym->m_src = mbuf; + sym->m_dst = NULL; + sym->session = data->session; + + /** pad the packet */ + if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { + uint32_t append_len = RTE_ALIGN_CEIL(payload_len, + data->block_size) - payload_len; + + if (unlikely(rte_pktmbuf_append(mbuf, append_len + + data->digest_len) == NULL)) + return 1; + + payload_len += append_len; + } else + payload_len -= data->digest_len; + + if (data->op_mask & CRYPTO_OP_MASK_CIPHER) { + /** prepare cipher op */ + uint8_t *iv = crypto_op->iv_aad.cipher_auth.cipher_iv; + + sym->cipher.data.length = payload_len; + sym->cipher.data.offset = data->data_offset - pkt_offset; + + if (data->cipher_auth.cipher_iv_update_len) { + uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf, + data->cipher_auth.cipher_iv_data_offset + + ip_offset); + + /** For encryption, update the pkt iv field, otherwise + * update the iv_aad_field + **/ + if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) + rte_memcpy(pkt_iv, data->iv_aad_data, + data->cipher_auth.cipher_iv_update_len); + else + rte_memcpy(data->iv_aad_data, pkt_iv, + data->cipher_auth.cipher_iv_update_len); + } + + /** write iv */ + rte_memcpy(iv, data->iv_aad_data, + data->cipher_auth.cipher_iv_len); + } + + if (data->op_mask & CRYPTO_OP_MASK_AUTH) { + /** authentication always start from IP header. */ + sym->auth.data.offset = ip_offset - pkt_offset; + sym->auth.data.length = mbuf->data_len - sym->auth.data.offset - + data->digest_len; + sym->auth.digest.data = rte_pktmbuf_mtod_offset(mbuf, + uint8_t *, rte_pktmbuf_pkt_len(mbuf) - + data->digest_len); + sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf, + rte_pktmbuf_pkt_len(mbuf) - data->digest_len); + + if (data->cipher_auth.auth_iv_update_len) { + uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf, + data->cipher_auth.auth_iv_data_offset + + ip_offset); + uint8_t *data_iv = data->iv_aad_data + + data->cipher_auth.cipher_iv_len; + + if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) + rte_memcpy(pkt_iv, data_iv, + data->cipher_auth.auth_iv_update_len); + else + rte_memcpy(data_iv, pkt_iv, + data->cipher_auth.auth_iv_update_len); + } + + if (data->cipher_auth.auth_iv_len) { + /** prepare cipher op */ + uint8_t *iv = crypto_op->iv_aad.cipher_auth.auth_iv; + + rte_memcpy(iv, data->iv_aad_data + + data->cipher_auth.cipher_iv_len, + data->cipher_auth.auth_iv_len); + } + } + + if (data->op_mask & CRYPTO_OP_MASK_AEAD) { + uint8_t *iv = crypto_op->iv_aad.aead_iv_aad.iv; + uint8_t *aad = crypto_op->iv_aad.aead_iv_aad.aad; + + sym->aead.aad.data = aad; + sym->aead.aad.phys_addr = rte_pktmbuf_iova_offset(mbuf, + aad - rte_pktmbuf_mtod(mbuf, uint8_t *)); + sym->aead.digest.data = rte_pktmbuf_mtod_offset(mbuf, + uint8_t *, rte_pktmbuf_pkt_len(mbuf) - + data->digest_len); + sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf, + rte_pktmbuf_pkt_len(mbuf) - data->digest_len); + sym->aead.data.offset = data->data_offset - pkt_offset; + sym->aead.data.length = payload_len; + + if (data->aead.iv_update_len) { + uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf, + data->aead.iv_data_offset + ip_offset); + uint8_t *data_iv = data->iv_aad_data; + + if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) + rte_memcpy(pkt_iv, data_iv, + data->aead.iv_update_len); + else + rte_memcpy(data_iv, pkt_iv, + data->aead.iv_update_len); + } - return drop_mask; -} + rte_memcpy(iv, data->iv_aad_data, data->aead.iv_len); -/** - * RTE_TABLE_ACTION_TM - */ -static int -tm_cfg_check(struct rte_table_action_tm_config *tm) -{ - if ((tm->n_subports_per_port == 0) || - (rte_is_power_of_2(tm->n_subports_per_port) == 0) || - (tm->n_subports_per_port > UINT16_MAX) || - (tm->n_pipes_per_subport == 0) || - (rte_is_power_of_2(tm->n_pipes_per_subport) == 0)) - return -ENOTSUP; + if (data->aead.aad_update_len) { + uint8_t *pkt_aad = RTE_MBUF_METADATA_UINT8_PTR(mbuf, + data->aead.aad_data_offset + ip_offset); + uint8_t *data_aad = data->iv_aad_data + + data->aead.iv_len; + + if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) + rte_memcpy(pkt_aad, data_aad, + data->aead.iv_update_len); + else + rte_memcpy(data_aad, pkt_aad, + data->aead.iv_update_len); + } + + rte_memcpy(aad, data->iv_aad_data + data->aead.iv_len, + data->aead.aad_len); + } return 0; } -struct tm_data { - uint16_t queue_tc_color; - uint16_t subport; - uint32_t pipe; +/** + * RTE_TABLE_ACTION_TAG + */ +struct tag_data { + uint32_t tag; } __attribute__((__packed__)); static int -tm_apply_check(struct rte_table_action_tm_params *p, - struct rte_table_action_tm_config *cfg) +tag_apply(struct tag_data *data, + struct rte_table_action_tag_params *p) { - if ((p->subport_id >= cfg->n_subports_per_port) || - (p->pipe_id >= cfg->n_pipes_per_subport)) - return -EINVAL; - + data->tag = p->tag; return 0; } -static int -tm_apply(struct tm_data *data, - struct rte_table_action_tm_params *p, - struct rte_table_action_tm_config *cfg) +static __rte_always_inline void +pkt_work_tag(struct rte_mbuf *mbuf, + struct tag_data *data) { - int status; + mbuf->hash.fdir.hi = data->tag; + mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; +} - /* Check input arguments */ - status = tm_apply_check(p, cfg); - if (status) - return status; +static __rte_always_inline void +pkt4_work_tag(struct rte_mbuf *mbuf0, + struct rte_mbuf *mbuf1, + struct rte_mbuf *mbuf2, + struct rte_mbuf *mbuf3, + struct tag_data *data0, + struct tag_data *data1, + struct tag_data *data2, + struct tag_data *data3) +{ + mbuf0->hash.fdir.hi = data0->tag; + mbuf1->hash.fdir.hi = data1->tag; + mbuf2->hash.fdir.hi = data2->tag; + mbuf3->hash.fdir.hi = data3->tag; + + mbuf0->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + mbuf1->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + mbuf2->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + mbuf3->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; +} - /* Apply */ - data->queue_tc_color = 0; - data->subport = (uint16_t) p->subport_id; - data->pipe = p->pipe_id; +/** + * RTE_TABLE_ACTION_DECAP + */ +struct decap_data { + uint16_t n; +} __attribute__((__packed__)); +static int +decap_apply(struct decap_data *data, + struct rte_table_action_decap_params *p) +{ + data->n = p->n; return 0; } static __rte_always_inline void -pkt_work_tm(struct rte_mbuf *mbuf, - struct tm_data *data, - struct dscp_table_data *dscp_table, - uint32_t dscp) +pkt_work_decap(struct rte_mbuf *mbuf, + struct decap_data *data) { - struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp]; - struct tm_data *sched_ptr = (struct tm_data *) &mbuf->hash.sched; - struct tm_data sched; + uint16_t data_off = mbuf->data_off; + uint16_t data_len = mbuf->data_len; + uint32_t pkt_len = mbuf->pkt_len; + uint16_t n = data->n; + + mbuf->data_off = data_off + n; + mbuf->data_len = data_len - n; + mbuf->pkt_len = pkt_len - n; +} - sched = *data; - sched.queue_tc_color = dscp_entry->queue_tc_color; - *sched_ptr = sched; +static __rte_always_inline void +pkt4_work_decap(struct rte_mbuf *mbuf0, + struct rte_mbuf *mbuf1, + struct rte_mbuf *mbuf2, + struct rte_mbuf *mbuf3, + struct decap_data *data0, + struct decap_data *data1, + struct decap_data *data2, + struct decap_data *data3) +{ + uint16_t data_off0 = mbuf0->data_off; + uint16_t data_len0 = mbuf0->data_len; + uint32_t pkt_len0 = mbuf0->pkt_len; + + uint16_t data_off1 = mbuf1->data_off; + uint16_t data_len1 = mbuf1->data_len; + uint32_t pkt_len1 = mbuf1->pkt_len; + + uint16_t data_off2 = mbuf2->data_off; + uint16_t data_len2 = mbuf2->data_len; + uint32_t pkt_len2 = mbuf2->pkt_len; + + uint16_t data_off3 = mbuf3->data_off; + uint16_t data_len3 = mbuf3->data_len; + uint32_t pkt_len3 = mbuf3->pkt_len; + + uint16_t n0 = data0->n; + uint16_t n1 = data1->n; + uint16_t n2 = data2->n; + uint16_t n3 = data3->n; + + mbuf0->data_off = data_off0 + n0; + mbuf0->data_len = data_len0 - n0; + mbuf0->pkt_len = pkt_len0 - n0; + + mbuf1->data_off = data_off1 + n1; + mbuf1->data_len = data_len1 - n1; + mbuf1->pkt_len = pkt_len1 - n1; + + mbuf2->data_off = data_off2 + n2; + mbuf2->data_len = data_len2 - n2; + mbuf2->pkt_len = pkt_len2 - n2; + + mbuf3->data_off = data_off3 + n3; + mbuf3->data_len = data_len3 - n3; + mbuf3->pkt_len = pkt_len3 - n3; } /** @@ -373,8 +2186,17 @@ action_valid(enum rte_table_action_type action) { switch (action) { case RTE_TABLE_ACTION_FWD: + case RTE_TABLE_ACTION_LB: case RTE_TABLE_ACTION_MTR: case RTE_TABLE_ACTION_TM: + case RTE_TABLE_ACTION_ENCAP: + case RTE_TABLE_ACTION_NAT: + case RTE_TABLE_ACTION_TTL: + case RTE_TABLE_ACTION_STATS: + case RTE_TABLE_ACTION_TIME: + case RTE_TABLE_ACTION_SYM_CRYPTO: + case RTE_TABLE_ACTION_TAG: + case RTE_TABLE_ACTION_DECAP: return 1; default: return 0; @@ -387,18 +2209,36 @@ action_valid(enum rte_table_action_type action) struct ap_config { uint64_t action_mask; struct rte_table_action_common_config common; + struct rte_table_action_lb_config lb; struct rte_table_action_mtr_config mtr; struct rte_table_action_tm_config tm; + struct rte_table_action_encap_config encap; + struct rte_table_action_nat_config nat; + struct rte_table_action_ttl_config ttl; + struct rte_table_action_stats_config stats; + struct rte_table_action_sym_crypto_config sym_crypto; }; static size_t action_cfg_size(enum rte_table_action_type action) { switch (action) { + case RTE_TABLE_ACTION_LB: + return sizeof(struct rte_table_action_lb_config); case RTE_TABLE_ACTION_MTR: return sizeof(struct rte_table_action_mtr_config); case RTE_TABLE_ACTION_TM: return sizeof(struct rte_table_action_tm_config); + case RTE_TABLE_ACTION_ENCAP: + return sizeof(struct rte_table_action_encap_config); + case RTE_TABLE_ACTION_NAT: + return sizeof(struct rte_table_action_nat_config); + case RTE_TABLE_ACTION_TTL: + return sizeof(struct rte_table_action_ttl_config); + case RTE_TABLE_ACTION_STATS: + return sizeof(struct rte_table_action_stats_config); + case RTE_TABLE_ACTION_SYM_CRYPTO: + return sizeof(struct rte_table_action_sym_crypto_config); default: return 0; } @@ -409,12 +2249,29 @@ action_cfg_get(struct ap_config *ap_config, enum rte_table_action_type type) { switch (type) { + case RTE_TABLE_ACTION_LB: + return &ap_config->lb; + case RTE_TABLE_ACTION_MTR: return &ap_config->mtr; case RTE_TABLE_ACTION_TM: return &ap_config->tm; + case RTE_TABLE_ACTION_ENCAP: + return &ap_config->encap; + + case RTE_TABLE_ACTION_NAT: + return &ap_config->nat; + + case RTE_TABLE_ACTION_TTL: + return &ap_config->ttl; + + case RTE_TABLE_ACTION_STATS: + return &ap_config->stats; + + case RTE_TABLE_ACTION_SYM_CRYPTO: + return &ap_config->sym_crypto; default: return NULL; } @@ -446,12 +2303,40 @@ action_data_size(enum rte_table_action_type action, case RTE_TABLE_ACTION_FWD: return sizeof(struct fwd_data); + case RTE_TABLE_ACTION_LB: + return sizeof(struct lb_data); + case RTE_TABLE_ACTION_MTR: return mtr_data_size(&ap_config->mtr); case RTE_TABLE_ACTION_TM: return sizeof(struct tm_data); + case RTE_TABLE_ACTION_ENCAP: + return encap_data_size(&ap_config->encap); + + case RTE_TABLE_ACTION_NAT: + return nat_data_size(&ap_config->nat, + &ap_config->common); + + case RTE_TABLE_ACTION_TTL: + return sizeof(struct ttl_data); + + case RTE_TABLE_ACTION_STATS: + return sizeof(struct stats_data); + + case RTE_TABLE_ACTION_TIME: + return sizeof(struct time_data); + + case RTE_TABLE_ACTION_SYM_CRYPTO: + return (sizeof(struct sym_crypto_data)); + + case RTE_TABLE_ACTION_TAG: + return sizeof(struct tag_data); + + case RTE_TABLE_ACTION_DECAP: + return sizeof(struct decap_data); + default: return 0; } @@ -523,6 +2408,10 @@ rte_table_action_profile_action_register(struct rte_table_action_profile *profil return -EINVAL; switch (type) { + case RTE_TABLE_ACTION_LB: + status = lb_cfg_check(action_config); + break; + case RTE_TABLE_ACTION_MTR: status = mtr_cfg_check(action_config); break; @@ -531,6 +2420,26 @@ rte_table_action_profile_action_register(struct rte_table_action_profile *profil status = tm_cfg_check(action_config); break; + case RTE_TABLE_ACTION_ENCAP: + status = encap_cfg_check(action_config); + break; + + case RTE_TABLE_ACTION_NAT: + status = nat_cfg_check(action_config); + break; + + case RTE_TABLE_ACTION_TTL: + status = ttl_cfg_check(action_config); + break; + + case RTE_TABLE_ACTION_STATS: + status = stats_cfg_check(action_config); + break; + + case RTE_TABLE_ACTION_SYM_CRYPTO: + status = sym_crypto_cfg_check(action_config); + break; + default: status = 0; break; @@ -641,6 +2550,10 @@ rte_table_action_apply(struct rte_table_action *action, return fwd_apply(action_data, action_params); + case RTE_TABLE_ACTION_LB: + return lb_apply(action_data, + action_params); + case RTE_TABLE_ACTION_MTR: return mtr_apply(action_data, action_params, @@ -653,6 +2566,42 @@ rte_table_action_apply(struct rte_table_action *action, action_params, &action->cfg.tm); + case RTE_TABLE_ACTION_ENCAP: + return encap_apply(action_data, + action_params, + &action->cfg.encap, + &action->cfg.common); + + case RTE_TABLE_ACTION_NAT: + return nat_apply(action_data, + action_params, + &action->cfg.common); + + case RTE_TABLE_ACTION_TTL: + return ttl_apply(action_data, + action_params); + + case RTE_TABLE_ACTION_STATS: + return stats_apply(action_data, + action_params); + + case RTE_TABLE_ACTION_TIME: + return time_apply(action_data, + action_params); + + case RTE_TABLE_ACTION_SYM_CRYPTO: + return sym_crypto_apply(action_data, + &action->cfg.sym_crypto, + action_params); + + case RTE_TABLE_ACTION_TAG: + return tag_apply(action_data, + action_params); + + case RTE_TABLE_ACTION_DECAP: + return decap_apply(action_data, + action_params); + default: return -EINVAL; } @@ -678,17 +2627,13 @@ rte_table_action_dscp_table_update(struct rte_table_action *action, &action->dscp_table.entry[i]; struct rte_table_action_dscp_table_entry *entry = &table->entry[i]; - uint16_t queue_tc_color = - MBUF_SCHED_QUEUE_TC_COLOR(entry->tc_queue_id, - entry->tc_id, - entry->color); if ((dscp_mask & (1LLU << i)) == 0) continue; data->color = entry->color; data->tc = entry->tc_id; - data->queue_tc_color = queue_tc_color; + data->tc_queue = entry->tc_queue_id; } return 0; @@ -786,14 +2731,14 @@ rte_table_action_meter_read(struct rte_table_action *action, if ((tc_mask & (1 << i)) == 0) continue; - dst->n_packets[e_RTE_METER_GREEN] = - mtr_trtcm_data_stats_get(src, e_RTE_METER_GREEN); + dst->n_packets[RTE_COLOR_GREEN] = + mtr_trtcm_data_stats_get(src, RTE_COLOR_GREEN); - dst->n_packets[e_RTE_METER_YELLOW] = - mtr_trtcm_data_stats_get(src, e_RTE_METER_YELLOW); + dst->n_packets[RTE_COLOR_YELLOW] = + mtr_trtcm_data_stats_get(src, RTE_COLOR_YELLOW); - dst->n_packets[e_RTE_METER_RED] = - mtr_trtcm_data_stats_get(src, e_RTE_METER_RED); + dst->n_packets[RTE_COLOR_RED] = + mtr_trtcm_data_stats_get(src, RTE_COLOR_RED); dst->n_packets_valid = 1; dst->n_bytes_valid = 0; @@ -810,15 +2755,120 @@ rte_table_action_meter_read(struct rte_table_action *action, if ((tc_mask & (1 << i)) == 0) continue; - mtr_trtcm_data_stats_reset(src, e_RTE_METER_GREEN); - mtr_trtcm_data_stats_reset(src, e_RTE_METER_YELLOW); - mtr_trtcm_data_stats_reset(src, e_RTE_METER_RED); + mtr_trtcm_data_stats_reset(src, RTE_COLOR_GREEN); + mtr_trtcm_data_stats_reset(src, RTE_COLOR_YELLOW); + mtr_trtcm_data_stats_reset(src, RTE_COLOR_RED); } return 0; } +int +rte_table_action_ttl_read(struct rte_table_action *action, + void *data, + struct rte_table_action_ttl_counters *stats, + int clear) +{ + struct ttl_data *ttl_data; + + /* Check input arguments */ + if ((action == NULL) || + ((action->cfg.action_mask & + (1LLU << RTE_TABLE_ACTION_TTL)) == 0) || + (data == NULL)) + return -EINVAL; + + ttl_data = action_data_get(data, action, RTE_TABLE_ACTION_TTL); + + /* Read */ + if (stats) + stats->n_packets = TTL_STATS_READ(ttl_data); + + /* Clear */ + if (clear) + TTL_STATS_RESET(ttl_data); + + return 0; +} + +int +rte_table_action_stats_read(struct rte_table_action *action, + void *data, + struct rte_table_action_stats_counters *stats, + int clear) +{ + struct stats_data *stats_data; + + /* Check input arguments */ + if ((action == NULL) || + ((action->cfg.action_mask & + (1LLU << RTE_TABLE_ACTION_STATS)) == 0) || + (data == NULL)) + return -EINVAL; + + stats_data = action_data_get(data, action, + RTE_TABLE_ACTION_STATS); + + /* Read */ + if (stats) { + stats->n_packets = stats_data->n_packets; + stats->n_bytes = stats_data->n_bytes; + stats->n_packets_valid = 1; + stats->n_bytes_valid = 1; + } + + /* Clear */ + if (clear) { + stats_data->n_packets = 0; + stats_data->n_bytes = 0; + } + + return 0; +} + +int +rte_table_action_time_read(struct rte_table_action *action, + void *data, + uint64_t *timestamp) +{ + struct time_data *time_data; + + /* Check input arguments */ + if ((action == NULL) || + ((action->cfg.action_mask & + (1LLU << RTE_TABLE_ACTION_TIME)) == 0) || + (data == NULL) || + (timestamp == NULL)) + return -EINVAL; + + time_data = action_data_get(data, action, RTE_TABLE_ACTION_TIME); + + /* Read */ + *timestamp = time_data->time; + + return 0; +} + +struct rte_cryptodev_sym_session * +rte_table_action_crypto_sym_session_get(struct rte_table_action *action, + void *data) +{ + struct sym_crypto_data *sym_crypto_data; + + /* Check input arguments */ + if ((action == NULL) || + ((action->cfg.action_mask & + (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) == 0) || + (data == NULL)) + return NULL; + + sym_crypto_data = action_data_get(data, action, + RTE_TABLE_ACTION_SYM_CRYPTO); + + return sym_crypto_data->session; +} + static __rte_always_inline uint64_t pkt_work(struct rte_mbuf *mbuf, struct rte_pipeline_table_entry *table_entry, @@ -847,6 +2897,14 @@ pkt_work(struct rte_mbuf *mbuf, rte_ntohs(hdr->payload_len) + sizeof(struct ipv6_hdr); } + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) { + void *data = + action_data_get(table_entry, action, RTE_TABLE_ACTION_LB); + + pkt_work_lb(mbuf, + data, + &cfg->lb); + } if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) { void *data = action_data_get(table_entry, action, RTE_TABLE_ACTION_MTR); @@ -870,6 +2928,76 @@ pkt_work(struct rte_mbuf *mbuf, dscp); } + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) { + void *data = action_data_get(table_entry, + action, + RTE_TABLE_ACTION_DECAP); + + pkt_work_decap(mbuf, data); + } + + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) { + void *data = + action_data_get(table_entry, action, RTE_TABLE_ACTION_ENCAP); + + pkt_work_encap(mbuf, + data, + &cfg->encap, + ip, + total_length, + ip_offset); + } + + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) { + void *data = + action_data_get(table_entry, action, RTE_TABLE_ACTION_NAT); + + if (cfg->common.ip_version) + pkt_ipv4_work_nat(ip, data, &cfg->nat); + else + pkt_ipv6_work_nat(ip, data, &cfg->nat); + } + + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) { + void *data = + action_data_get(table_entry, action, RTE_TABLE_ACTION_TTL); + + if (cfg->common.ip_version) + drop_mask |= pkt_ipv4_work_ttl(ip, data); + else + drop_mask |= pkt_ipv6_work_ttl(ip, data); + } + + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) { + void *data = + action_data_get(table_entry, action, RTE_TABLE_ACTION_STATS); + + pkt_work_stats(data, total_length); + } + + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) { + void *data = + action_data_get(table_entry, action, RTE_TABLE_ACTION_TIME); + + pkt_work_time(data, time); + } + + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) { + void *data = action_data_get(table_entry, action, + RTE_TABLE_ACTION_SYM_CRYPTO); + + drop_mask |= pkt_work_sym_crypto(mbuf, data, &cfg->sym_crypto, + ip_offset); + } + + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) { + void *data = action_data_get(table_entry, + action, + RTE_TABLE_ACTION_TAG); + + pkt_work_tag(mbuf, data); + } + return drop_mask; } @@ -940,6 +3068,33 @@ pkt4_work(struct rte_mbuf **mbufs, rte_ntohs(hdr3->payload_len) + sizeof(struct ipv6_hdr); } + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) { + void *data0 = + action_data_get(table_entry0, action, RTE_TABLE_ACTION_LB); + void *data1 = + action_data_get(table_entry1, action, RTE_TABLE_ACTION_LB); + void *data2 = + action_data_get(table_entry2, action, RTE_TABLE_ACTION_LB); + void *data3 = + action_data_get(table_entry3, action, RTE_TABLE_ACTION_LB); + + pkt_work_lb(mbuf0, + data0, + &cfg->lb); + + pkt_work_lb(mbuf1, + data1, + &cfg->lb); + + pkt_work_lb(mbuf2, + data2, + &cfg->lb); + + pkt_work_lb(mbuf3, + data3, + &cfg->lb); + } + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) { void *data0 = action_data_get(table_entry0, action, RTE_TABLE_ACTION_MTR); @@ -1014,6 +3169,179 @@ pkt4_work(struct rte_mbuf **mbufs, dscp3); } + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) { + void *data0 = action_data_get(table_entry0, + action, + RTE_TABLE_ACTION_DECAP); + void *data1 = action_data_get(table_entry1, + action, + RTE_TABLE_ACTION_DECAP); + void *data2 = action_data_get(table_entry2, + action, + RTE_TABLE_ACTION_DECAP); + void *data3 = action_data_get(table_entry3, + action, + RTE_TABLE_ACTION_DECAP); + + pkt4_work_decap(mbuf0, mbuf1, mbuf2, mbuf3, + data0, data1, data2, data3); + } + + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) { + void *data0 = + action_data_get(table_entry0, action, RTE_TABLE_ACTION_ENCAP); + void *data1 = + action_data_get(table_entry1, action, RTE_TABLE_ACTION_ENCAP); + void *data2 = + action_data_get(table_entry2, action, RTE_TABLE_ACTION_ENCAP); + void *data3 = + action_data_get(table_entry3, action, RTE_TABLE_ACTION_ENCAP); + + pkt_work_encap(mbuf0, + data0, + &cfg->encap, + ip0, + total_length0, + ip_offset); + + pkt_work_encap(mbuf1, + data1, + &cfg->encap, + ip1, + total_length1, + ip_offset); + + pkt_work_encap(mbuf2, + data2, + &cfg->encap, + ip2, + total_length2, + ip_offset); + + pkt_work_encap(mbuf3, + data3, + &cfg->encap, + ip3, + total_length3, + ip_offset); + } + + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) { + void *data0 = + action_data_get(table_entry0, action, RTE_TABLE_ACTION_NAT); + void *data1 = + action_data_get(table_entry1, action, RTE_TABLE_ACTION_NAT); + void *data2 = + action_data_get(table_entry2, action, RTE_TABLE_ACTION_NAT); + void *data3 = + action_data_get(table_entry3, action, RTE_TABLE_ACTION_NAT); + + if (cfg->common.ip_version) { + pkt_ipv4_work_nat(ip0, data0, &cfg->nat); + pkt_ipv4_work_nat(ip1, data1, &cfg->nat); + pkt_ipv4_work_nat(ip2, data2, &cfg->nat); + pkt_ipv4_work_nat(ip3, data3, &cfg->nat); + } else { + pkt_ipv6_work_nat(ip0, data0, &cfg->nat); + pkt_ipv6_work_nat(ip1, data1, &cfg->nat); + pkt_ipv6_work_nat(ip2, data2, &cfg->nat); + pkt_ipv6_work_nat(ip3, data3, &cfg->nat); + } + } + + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) { + void *data0 = + action_data_get(table_entry0, action, RTE_TABLE_ACTION_TTL); + void *data1 = + action_data_get(table_entry1, action, RTE_TABLE_ACTION_TTL); + void *data2 = + action_data_get(table_entry2, action, RTE_TABLE_ACTION_TTL); + void *data3 = + action_data_get(table_entry3, action, RTE_TABLE_ACTION_TTL); + + if (cfg->common.ip_version) { + drop_mask0 |= pkt_ipv4_work_ttl(ip0, data0); + drop_mask1 |= pkt_ipv4_work_ttl(ip1, data1); + drop_mask2 |= pkt_ipv4_work_ttl(ip2, data2); + drop_mask3 |= pkt_ipv4_work_ttl(ip3, data3); + } else { + drop_mask0 |= pkt_ipv6_work_ttl(ip0, data0); + drop_mask1 |= pkt_ipv6_work_ttl(ip1, data1); + drop_mask2 |= pkt_ipv6_work_ttl(ip2, data2); + drop_mask3 |= pkt_ipv6_work_ttl(ip3, data3); + } + } + + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) { + void *data0 = + action_data_get(table_entry0, action, RTE_TABLE_ACTION_STATS); + void *data1 = + action_data_get(table_entry1, action, RTE_TABLE_ACTION_STATS); + void *data2 = + action_data_get(table_entry2, action, RTE_TABLE_ACTION_STATS); + void *data3 = + action_data_get(table_entry3, action, RTE_TABLE_ACTION_STATS); + + pkt_work_stats(data0, total_length0); + pkt_work_stats(data1, total_length1); + pkt_work_stats(data2, total_length2); + pkt_work_stats(data3, total_length3); + } + + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) { + void *data0 = + action_data_get(table_entry0, action, RTE_TABLE_ACTION_TIME); + void *data1 = + action_data_get(table_entry1, action, RTE_TABLE_ACTION_TIME); + void *data2 = + action_data_get(table_entry2, action, RTE_TABLE_ACTION_TIME); + void *data3 = + action_data_get(table_entry3, action, RTE_TABLE_ACTION_TIME); + + pkt_work_time(data0, time); + pkt_work_time(data1, time); + pkt_work_time(data2, time); + pkt_work_time(data3, time); + } + + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) { + void *data0 = action_data_get(table_entry0, action, + RTE_TABLE_ACTION_SYM_CRYPTO); + void *data1 = action_data_get(table_entry1, action, + RTE_TABLE_ACTION_SYM_CRYPTO); + void *data2 = action_data_get(table_entry2, action, + RTE_TABLE_ACTION_SYM_CRYPTO); + void *data3 = action_data_get(table_entry3, action, + RTE_TABLE_ACTION_SYM_CRYPTO); + + drop_mask0 |= pkt_work_sym_crypto(mbuf0, data0, &cfg->sym_crypto, + ip_offset); + drop_mask1 |= pkt_work_sym_crypto(mbuf1, data1, &cfg->sym_crypto, + ip_offset); + drop_mask2 |= pkt_work_sym_crypto(mbuf2, data2, &cfg->sym_crypto, + ip_offset); + drop_mask3 |= pkt_work_sym_crypto(mbuf3, data3, &cfg->sym_crypto, + ip_offset); + } + + if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) { + void *data0 = action_data_get(table_entry0, + action, + RTE_TABLE_ACTION_TAG); + void *data1 = action_data_get(table_entry1, + action, + RTE_TABLE_ACTION_TAG); + void *data2 = action_data_get(table_entry2, + action, + RTE_TABLE_ACTION_TAG); + void *data3 = action_data_get(table_entry3, + action, + RTE_TABLE_ACTION_TAG); + + pkt4_work_tag(mbuf0, mbuf1, mbuf2, mbuf3, + data0, data1, data2, data3); + } + return drop_mask0 | (drop_mask1 << 1) | (drop_mask2 << 2) | @@ -1031,7 +3359,8 @@ ah(struct rte_pipeline *p, uint64_t pkts_drop_mask = 0; uint64_t time = 0; - if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) + if (cfg->action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) | + (1LLU << RTE_TABLE_ACTION_TIME))) time = rte_rdtsc(); if ((pkts_mask & (pkts_mask + 1)) == 0) {