+ mtr_trtcm_data_policer_action_set(data_tc,
+ e_RTE_METER_RED,
+ p_tc->policer[e_RTE_METER_RED]);
+ }
+
+ return 0;
+}
+
+static __rte_always_inline uint64_t
+pkt_work_mtr(struct rte_mbuf *mbuf,
+ struct mtr_trtcm_data *data,
+ struct dscp_table_data *dscp_table,
+ struct meter_profile_data *mp,
+ uint64_t time,
+ uint32_t dscp,
+ uint16_t total_length)
+{
+ uint64_t drop_mask, sched;
+ uint64_t *sched_ptr = (uint64_t *) &mbuf->hash.sched;
+ struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
+ enum rte_meter_color color_in, color_meter, color_policer;
+ uint32_t tc, mp_id;
+
+ tc = dscp_entry->tc;
+ color_in = dscp_entry->color;
+ data += tc;
+ mp_id = MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data);
+ sched = *sched_ptr;
+
+ /* Meter */
+ color_meter = rte_meter_trtcm_color_aware_check(
+ &data->trtcm,
+ &mp[mp_id].profile,
+ time,
+ total_length,
+ color_in);
+
+ /* Stats */
+ MTR_TRTCM_DATA_STATS_INC(data, color_meter);
+
+ /* Police */
+ drop_mask = MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color_meter);
+ color_policer =
+ MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color_meter);
+ *sched_ptr = MBUF_SCHED_COLOR(sched, color_policer);
+
+ return drop_mask;
+}
+
+/**
+ * RTE_TABLE_ACTION_TM
+ */
+static int
+tm_cfg_check(struct rte_table_action_tm_config *tm)
+{
+ if ((tm->n_subports_per_port == 0) ||
+ (rte_is_power_of_2(tm->n_subports_per_port) == 0) ||
+ (tm->n_subports_per_port > UINT16_MAX) ||
+ (tm->n_pipes_per_subport == 0) ||
+ (rte_is_power_of_2(tm->n_pipes_per_subport) == 0))
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct tm_data {
+ uint16_t queue_tc_color;
+ uint16_t subport;
+ uint32_t pipe;
+} __attribute__((__packed__));
+
+static int
+tm_apply_check(struct rte_table_action_tm_params *p,
+ struct rte_table_action_tm_config *cfg)
+{
+ if ((p->subport_id >= cfg->n_subports_per_port) ||
+ (p->pipe_id >= cfg->n_pipes_per_subport))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+tm_apply(struct tm_data *data,
+ struct rte_table_action_tm_params *p,
+ struct rte_table_action_tm_config *cfg)
+{
+ int status;
+
+ /* Check input arguments */
+ status = tm_apply_check(p, cfg);
+ if (status)
+ return status;
+
+ /* Apply */
+ data->queue_tc_color = 0;
+ data->subport = (uint16_t) p->subport_id;
+ data->pipe = p->pipe_id;
+
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_tm(struct rte_mbuf *mbuf,
+ struct tm_data *data,
+ struct dscp_table_data *dscp_table,
+ uint32_t dscp)
+{
+ struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
+ struct tm_data *sched_ptr = (struct tm_data *) &mbuf->hash.sched;
+ struct tm_data sched;
+
+ sched = *data;
+ sched.queue_tc_color = dscp_entry->queue_tc_color;
+ *sched_ptr = sched;
+}
+
+/**
+ * RTE_TABLE_ACTION_ENCAP
+ */
+static int
+encap_valid(enum rte_table_action_encap_type encap)
+{
+ switch (encap) {
+ case RTE_TABLE_ACTION_ENCAP_ETHER:
+ case RTE_TABLE_ACTION_ENCAP_VLAN:
+ case RTE_TABLE_ACTION_ENCAP_QINQ:
+ case RTE_TABLE_ACTION_ENCAP_MPLS:
+ case RTE_TABLE_ACTION_ENCAP_PPPOE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int
+encap_cfg_check(struct rte_table_action_encap_config *encap)
+{
+ if ((encap->encap_mask == 0) ||
+ (__builtin_popcountll(encap->encap_mask) != 1))
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct encap_ether_data {
+ struct ether_hdr ether;
+} __attribute__((__packed__));
+
+#define VLAN(pcp, dei, vid) \
+ ((uint16_t)((((uint64_t)(pcp)) & 0x7LLU) << 13) | \
+ ((((uint64_t)(dei)) & 0x1LLU) << 12) | \
+ (((uint64_t)(vid)) & 0xFFFLLU)) \
+
+struct encap_vlan_data {
+ struct ether_hdr ether;
+ struct vlan_hdr vlan;
+} __attribute__((__packed__));
+
+struct encap_qinq_data {
+ struct ether_hdr ether;
+ struct vlan_hdr svlan;
+ struct vlan_hdr cvlan;
+} __attribute__((__packed__));
+
+#define ETHER_TYPE_MPLS_UNICAST 0x8847
+
+#define ETHER_TYPE_MPLS_MULTICAST 0x8848
+
+#define MPLS(label, tc, s, ttl) \
+ ((uint32_t)(((((uint64_t)(label)) & 0xFFFFFLLU) << 12) |\
+ ((((uint64_t)(tc)) & 0x7LLU) << 9) | \
+ ((((uint64_t)(s)) & 0x1LLU) << 8) | \
+ (((uint64_t)(ttl)) & 0xFFLLU)))
+
+struct encap_mpls_data {
+ struct ether_hdr ether;
+ uint32_t mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX];
+ uint32_t mpls_count;
+} __attribute__((__packed__));
+
+#define ETHER_TYPE_PPPOE_SESSION 0x8864
+
+#define PPP_PROTOCOL_IP 0x0021
+
+struct pppoe_ppp_hdr {
+ uint16_t ver_type_code;
+ uint16_t session_id;
+ uint16_t length;
+ uint16_t protocol;
+} __attribute__((__packed__));
+
+struct encap_pppoe_data {
+ struct ether_hdr ether;
+ struct pppoe_ppp_hdr pppoe_ppp;
+} __attribute__((__packed__));
+
+static size_t
+encap_data_size(struct rte_table_action_encap_config *encap)
+{
+ switch (encap->encap_mask) {
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
+ return sizeof(struct encap_ether_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
+ return sizeof(struct encap_vlan_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
+ return sizeof(struct encap_qinq_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
+ return sizeof(struct encap_mpls_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
+ return sizeof(struct encap_pppoe_data);
+
+ default:
+ return 0;
+ }
+}
+
+static int
+encap_apply_check(struct rte_table_action_encap_params *p,
+ struct rte_table_action_encap_config *cfg)
+{
+ if ((encap_valid(p->type) == 0) ||
+ ((cfg->encap_mask & (1LLU << p->type)) == 0))
+ return -EINVAL;
+
+ switch (p->type) {
+ case RTE_TABLE_ACTION_ENCAP_ETHER:
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_VLAN:
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_QINQ:
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_MPLS:
+ if ((p->mpls.mpls_count == 0) ||
+ (p->mpls.mpls_count > RTE_TABLE_ACTION_MPLS_LABELS_MAX))
+ return -EINVAL;
+
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_PPPOE:
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+encap_ether_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_common_config *common_cfg)
+{
+ struct encap_ether_data *d = data;
+ uint16_t ethertype = (common_cfg->ip_version) ?
+ ETHER_TYPE_IPv4 :
+ ETHER_TYPE_IPv6;
+
+ /* Ethernet */
+ ether_addr_copy(&p->ether.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->ether.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ethertype);
+
+ return 0;
+}
+
+static int
+encap_vlan_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_common_config *common_cfg)
+{
+ struct encap_vlan_data *d = data;
+ uint16_t ethertype = (common_cfg->ip_version) ?
+ ETHER_TYPE_IPv4 :
+ ETHER_TYPE_IPv6;
+
+ /* Ethernet */
+ ether_addr_copy(&p->vlan.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->vlan.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
+
+ /* VLAN */
+ d->vlan.vlan_tci = rte_htons(VLAN(p->vlan.vlan.pcp,
+ p->vlan.vlan.dei,
+ p->vlan.vlan.vid));
+ d->vlan.eth_proto = rte_htons(ethertype);
+
+ return 0;
+}
+
+static int
+encap_qinq_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_common_config *common_cfg)
+{
+ struct encap_qinq_data *d = data;
+ uint16_t ethertype = (common_cfg->ip_version) ?
+ ETHER_TYPE_IPv4 :
+ ETHER_TYPE_IPv6;
+
+ /* Ethernet */
+ ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_QINQ);
+
+ /* SVLAN */
+ d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
+ p->qinq.svlan.dei,
+ p->qinq.svlan.vid));
+ d->svlan.eth_proto = rte_htons(ETHER_TYPE_VLAN);
+
+ /* CVLAN */
+ d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
+ p->qinq.cvlan.dei,
+ p->qinq.cvlan.vid));
+ d->cvlan.eth_proto = rte_htons(ethertype);
+
+ return 0;
+}
+
+static int
+encap_mpls_apply(void *data,
+ struct rte_table_action_encap_params *p)
+{
+ struct encap_mpls_data *d = data;
+ uint16_t ethertype = (p->mpls.unicast) ?
+ ETHER_TYPE_MPLS_UNICAST :
+ ETHER_TYPE_MPLS_MULTICAST;
+ uint32_t i;
+
+ /* Ethernet */
+ ether_addr_copy(&p->mpls.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->mpls.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ethertype);
+
+ /* MPLS */
+ for (i = 0; i < p->mpls.mpls_count - 1; i++)
+ d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
+ p->mpls.mpls[i].tc,
+ 0,
+ p->mpls.mpls[i].ttl));
+
+ d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
+ p->mpls.mpls[i].tc,
+ 1,
+ p->mpls.mpls[i].ttl));
+
+ d->mpls_count = p->mpls.mpls_count;
+ return 0;
+}
+
+static int
+encap_pppoe_apply(void *data,
+ struct rte_table_action_encap_params *p)
+{
+ struct encap_pppoe_data *d = data;
+
+ /* Ethernet */
+ ether_addr_copy(&p->pppoe.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->pppoe.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_PPPOE_SESSION);
+
+ /* PPPoE and PPP*/
+ d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
+ d->pppoe_ppp.session_id = rte_htons(p->pppoe.pppoe.session_id);
+ d->pppoe_ppp.length = 0; /* not pre-computed */
+ d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
+
+ return 0;
+}
+
+static int
+encap_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_encap_config *cfg,
+ struct rte_table_action_common_config *common_cfg)
+{
+ int status;
+
+ /* Check input arguments */
+ status = encap_apply_check(p, cfg);
+ if (status)
+ return status;
+
+ switch (p->type) {
+ case RTE_TABLE_ACTION_ENCAP_ETHER:
+ return encap_ether_apply(data, p, common_cfg);
+
+ case RTE_TABLE_ACTION_ENCAP_VLAN:
+ return encap_vlan_apply(data, p, common_cfg);
+
+ case RTE_TABLE_ACTION_ENCAP_QINQ:
+ return encap_qinq_apply(data, p, common_cfg);
+
+ case RTE_TABLE_ACTION_ENCAP_MPLS:
+ return encap_mpls_apply(data, p);
+
+ case RTE_TABLE_ACTION_ENCAP_PPPOE:
+ return encap_pppoe_apply(data, p);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static __rte_always_inline void *
+encap(void *dst, const void *src, size_t n)
+{
+ dst = ((uint8_t *) dst) - n;
+ return rte_memcpy(dst, src, n);
+}
+
+static __rte_always_inline void
+pkt_work_encap(struct rte_mbuf *mbuf,
+ void *data,
+ struct rte_table_action_encap_config *cfg,
+ void *ip,
+ uint16_t total_length,
+ uint32_t ip_offset)
+{
+ switch (cfg->encap_mask) {
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
+ encap(ip, data, sizeof(struct encap_ether_data));
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_ether_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_ether_data);
+ break;
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
+ encap(ip, data, sizeof(struct encap_vlan_data));
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_vlan_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_vlan_data);
+ break;
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
+ encap(ip, data, sizeof(struct encap_qinq_data));
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_qinq_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_qinq_data);
+ break;
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
+ {
+ struct encap_mpls_data *mpls = data;
+ size_t size = sizeof(struct ether_hdr) +
+ mpls->mpls_count * 4;
+
+ encap(ip, data, size);
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + size);
+ mbuf->pkt_len = mbuf->data_len = total_length + size;
+ break;
+ }
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
+ {
+ struct encap_pppoe_data *pppoe =
+ encap(ip, data, sizeof(struct encap_pppoe_data));
+ pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_pppoe_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_pppoe_data);
+ break;
+ }
+
+ default:
+ break;
+ }
+}
+
+/**
+ * RTE_TABLE_ACTION_NAT
+ */
+static int
+nat_cfg_check(struct rte_table_action_nat_config *nat)
+{
+ if ((nat->proto != 0x06) &&
+ (nat->proto != 0x11))
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct nat_ipv4_data {
+ uint32_t addr;
+ uint16_t port;
+} __attribute__((__packed__));
+
+struct nat_ipv6_data {
+ uint8_t addr[16];
+ uint16_t port;
+} __attribute__((__packed__));
+
+static size_t
+nat_data_size(struct rte_table_action_nat_config *nat __rte_unused,
+ struct rte_table_action_common_config *common)
+{
+ int ip_version = common->ip_version;
+
+ return (ip_version) ?
+ sizeof(struct nat_ipv4_data) :
+ sizeof(struct nat_ipv6_data);
+}
+
+static int
+nat_apply_check(struct rte_table_action_nat_params *p,
+ struct rte_table_action_common_config *cfg)
+{
+ if ((p->ip_version && (cfg->ip_version == 0)) ||
+ ((p->ip_version == 0) && cfg->ip_version))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nat_apply(void *data,
+ struct rte_table_action_nat_params *p,
+ struct rte_table_action_common_config *cfg)
+{
+ int status;
+
+ /* Check input arguments */
+ status = nat_apply_check(p, cfg);
+ if (status)
+ return status;
+
+ /* Apply */
+ if (p->ip_version) {
+ struct nat_ipv4_data *d = data;
+
+ d->addr = rte_htonl(p->addr.ipv4);
+ d->port = rte_htons(p->port);
+ } else {
+ struct nat_ipv6_data *d = data;
+
+ memcpy(d->addr, p->addr.ipv6, sizeof(d->addr));
+ d->port = rte_htons(p->port);
+ }
+
+ return 0;
+}
+
+static __rte_always_inline uint16_t
+nat_ipv4_checksum_update(uint16_t cksum0,
+ uint32_t ip0,
+ uint32_t ip1)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Subtract ip0 (one's complement logic) */
+ cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ /* Add ip1 (one's complement logic) */
+ cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline uint16_t
+nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0,
+ uint32_t ip0,
+ uint32_t ip1,
+ uint16_t port0,
+ uint16_t port1)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Subtract ip0 and port 0 (one's complement logic) */
+ cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF) + port0;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ /* Add ip1 and port1 (one's complement logic) */
+ cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF) + port1;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline uint16_t
+nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0,
+ uint16_t *ip0,
+ uint16_t *ip1,
+ uint16_t port0,
+ uint16_t port1)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Subtract ip0 and port 0 (one's complement logic) */
+ cksum1 -= ip0[0] + ip0[1] + ip0[2] + ip0[3] +
+ ip0[4] + ip0[5] + ip0[6] + ip0[7] + port0;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ /* Add ip1 and port1 (one's complement logic) */
+ cksum1 += ip1[0] + ip1[1] + ip1[2] + ip1[3] +
+ ip1[4] + ip1[5] + ip1[6] + ip1[7] + port1;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline void
+pkt_ipv4_work_nat(struct ipv4_hdr *ip,
+ struct nat_ipv4_data *data,
+ struct rte_table_action_nat_config *cfg)
+{
+ if (cfg->source_nat) {
+ if (cfg->proto == 0x6) {
+ struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
+ uint16_t ip_cksum, tcp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->src_addr,
+ data->addr);
+
+ tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
+ ip->src_addr,
+ data->addr,
+ tcp->src_port,
+ data->port);
+
+ ip->src_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ tcp->src_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ uint16_t ip_cksum, udp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->src_addr,
+ data->addr);
+
+ udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
+ ip->src_addr,
+ data->addr,
+ udp->src_port,
+ data->port);
+
+ ip->src_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ udp->src_port = data->port;
+ if (udp->dgram_cksum)
+ udp->dgram_cksum = udp_cksum;
+ }
+ } else {
+ if (cfg->proto == 0x6) {
+ struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
+ uint16_t ip_cksum, tcp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->dst_addr,
+ data->addr);
+
+ tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
+ ip->dst_addr,
+ data->addr,
+ tcp->dst_port,
+ data->port);
+
+ ip->dst_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ tcp->dst_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ uint16_t ip_cksum, udp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->dst_addr,
+ data->addr);
+
+ udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
+ ip->dst_addr,
+ data->addr,
+ udp->dst_port,
+ data->port);
+
+ ip->dst_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ udp->dst_port = data->port;
+ if (udp->dgram_cksum)
+ udp->dgram_cksum = udp_cksum;
+ }
+ }
+}
+
+static __rte_always_inline void
+pkt_ipv6_work_nat(struct ipv6_hdr *ip,
+ struct nat_ipv6_data *data,
+ struct rte_table_action_nat_config *cfg)
+{
+ if (cfg->source_nat) {
+ if (cfg->proto == 0x6) {
+ struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
+ uint16_t tcp_cksum;
+
+ tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
+ (uint16_t *)ip->src_addr,
+ (uint16_t *)data->addr,
+ tcp->src_port,
+ data->port);
+
+ rte_memcpy(ip->src_addr, data->addr, 16);
+ tcp->src_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ uint16_t udp_cksum;
+
+ udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
+ (uint16_t *)ip->src_addr,
+ (uint16_t *)data->addr,
+ udp->src_port,
+ data->port);
+
+ rte_memcpy(ip->src_addr, data->addr, 16);
+ udp->src_port = data->port;
+ udp->dgram_cksum = udp_cksum;
+ }
+ } else {
+ if (cfg->proto == 0x6) {
+ struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
+ uint16_t tcp_cksum;
+
+ tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
+ (uint16_t *)ip->dst_addr,
+ (uint16_t *)data->addr,
+ tcp->dst_port,
+ data->port);
+
+ rte_memcpy(ip->dst_addr, data->addr, 16);
+ tcp->dst_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ uint16_t udp_cksum;
+
+ udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
+ (uint16_t *)ip->dst_addr,
+ (uint16_t *)data->addr,
+ udp->dst_port,
+ data->port);
+
+ rte_memcpy(ip->dst_addr, data->addr, 16);
+ udp->dst_port = data->port;
+ udp->dgram_cksum = udp_cksum;
+ }
+ }
+}
+
+/**
+ * RTE_TABLE_ACTION_TTL
+ */
+static int
+ttl_cfg_check(struct rte_table_action_ttl_config *ttl)
+{
+ if (ttl->drop == 0)
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct ttl_data {
+ uint32_t n_packets;
+} __attribute__((__packed__));
+
+#define TTL_INIT(data, decrement) \
+ ((data)->n_packets = (decrement) ? 1 : 0)
+
+#define TTL_DEC_GET(data) \
+ ((uint8_t)((data)->n_packets & 1))
+
+#define TTL_STATS_RESET(data) \
+ ((data)->n_packets = ((data)->n_packets & 1))
+
+#define TTL_STATS_READ(data) \
+ ((data)->n_packets >> 1)
+
+#define TTL_STATS_ADD(data, value) \
+ ((data)->n_packets = \
+ (((((data)->n_packets >> 1) + (value)) << 1) | \
+ ((data)->n_packets & 1)))
+
+static int
+ttl_apply(void *data,
+ struct rte_table_action_ttl_params *p)
+{
+ struct ttl_data *d = data;
+
+ TTL_INIT(d, p->decrement);
+
+ return 0;
+}
+
+static __rte_always_inline uint64_t
+pkt_ipv4_work_ttl(struct ipv4_hdr *ip,
+ struct ttl_data *data)
+{
+ uint32_t drop;
+ uint16_t cksum = ip->hdr_checksum;
+ uint8_t ttl = ip->time_to_live;
+ uint8_t ttl_diff = TTL_DEC_GET(data);
+
+ cksum += ttl_diff;
+ ttl -= ttl_diff;
+
+ ip->hdr_checksum = cksum;
+ ip->time_to_live = ttl;
+
+ drop = (ttl == 0) ? 1 : 0;
+ TTL_STATS_ADD(data, drop);
+
+ return drop;
+}
+
+static __rte_always_inline uint64_t
+pkt_ipv6_work_ttl(struct ipv6_hdr *ip,
+ struct ttl_data *data)
+{
+ uint32_t drop;
+ uint8_t ttl = ip->hop_limits;
+ uint8_t ttl_diff = TTL_DEC_GET(data);
+
+ ttl -= ttl_diff;
+
+ ip->hop_limits = ttl;
+
+ drop = (ttl == 0) ? 1 : 0;
+ TTL_STATS_ADD(data, drop);
+
+ return drop;
+}
+
+/**
+ * RTE_TABLE_ACTION_STATS
+ */
+static int
+stats_cfg_check(struct rte_table_action_stats_config *stats)
+{
+ if ((stats->n_packets_enabled == 0) && (stats->n_bytes_enabled == 0))
+ return -EINVAL;