+static __rte_always_inline uint16_t
+encap_vxlan_ipv4_checksum_update(uint16_t cksum0,
+ uint16_t total_length)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Add total length (one's complement logic) */
+ cksum1 += total_length;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline void *
+encap(void *dst, const void *src, size_t n)
+{
+ dst = ((uint8_t *) dst) - n;
+ return rte_memcpy(dst, src, n);
+}
+
+static __rte_always_inline void
+pkt_work_encap_vxlan_ipv4(struct rte_mbuf *mbuf,
+ struct encap_vxlan_ipv4_data *vxlan_tbl,
+ struct rte_table_action_encap_config *cfg)
+{
+ uint32_t ether_offset = cfg->vxlan.data_offset;
+ void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
+ struct encap_vxlan_ipv4_data *vxlan_pkt;
+ uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
+
+ ether_length = (uint16_t)mbuf->pkt_len;
+ ipv4_total_length = ether_length +
+ (sizeof(struct rte_vxlan_hdr) +
+ sizeof(struct rte_udp_hdr) +
+ sizeof(struct rte_ipv4_hdr));
+ ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
+ rte_htons(ipv4_total_length));
+ udp_length = ether_length +
+ (sizeof(struct rte_vxlan_hdr) +
+ sizeof(struct rte_udp_hdr));
+
+ vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
+ vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
+ vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
+ vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
+
+ mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
+ mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
+}
+
+static __rte_always_inline void
+pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf *mbuf,
+ struct encap_vxlan_ipv4_vlan_data *vxlan_tbl,
+ struct rte_table_action_encap_config *cfg)
+{
+ uint32_t ether_offset = cfg->vxlan.data_offset;
+ void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
+ struct encap_vxlan_ipv4_vlan_data *vxlan_pkt;
+ uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
+
+ ether_length = (uint16_t)mbuf->pkt_len;
+ ipv4_total_length = ether_length +
+ (sizeof(struct rte_vxlan_hdr) +
+ sizeof(struct rte_udp_hdr) +
+ sizeof(struct rte_ipv4_hdr));
+ ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
+ rte_htons(ipv4_total_length));
+ udp_length = ether_length +
+ (sizeof(struct rte_vxlan_hdr) +
+ sizeof(struct rte_udp_hdr));
+
+ vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
+ vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
+ vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
+ vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
+
+ mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
+ mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
+}
+
+static __rte_always_inline void
+pkt_work_encap_vxlan_ipv6(struct rte_mbuf *mbuf,
+ struct encap_vxlan_ipv6_data *vxlan_tbl,
+ struct rte_table_action_encap_config *cfg)
+{
+ uint32_t ether_offset = cfg->vxlan.data_offset;
+ void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
+ struct encap_vxlan_ipv6_data *vxlan_pkt;
+ uint16_t ether_length, ipv6_payload_length, udp_length;
+
+ ether_length = (uint16_t)mbuf->pkt_len;
+ ipv6_payload_length = ether_length +
+ (sizeof(struct rte_vxlan_hdr) +
+ sizeof(struct rte_udp_hdr));
+ udp_length = ether_length +
+ (sizeof(struct rte_vxlan_hdr) +
+ sizeof(struct rte_udp_hdr));
+
+ vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
+ vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
+ vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
+
+ mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
+ mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
+}
+
+static __rte_always_inline void
+pkt_work_encap_vxlan_ipv6_vlan(struct rte_mbuf *mbuf,
+ struct encap_vxlan_ipv6_vlan_data *vxlan_tbl,
+ struct rte_table_action_encap_config *cfg)
+{
+ uint32_t ether_offset = cfg->vxlan.data_offset;
+ void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
+ struct encap_vxlan_ipv6_vlan_data *vxlan_pkt;
+ uint16_t ether_length, ipv6_payload_length, udp_length;
+
+ ether_length = (uint16_t)mbuf->pkt_len;
+ ipv6_payload_length = ether_length +
+ (sizeof(struct rte_vxlan_hdr) +
+ sizeof(struct rte_udp_hdr));
+ udp_length = ether_length +
+ (sizeof(struct rte_vxlan_hdr) +
+ sizeof(struct rte_udp_hdr));
+
+ vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
+ vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
+ vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
+
+ mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
+ mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
+}
+
+static __rte_always_inline void
+pkt_work_encap(struct rte_mbuf *mbuf,
+ void *data,
+ struct rte_table_action_encap_config *cfg,
+ void *ip,
+ uint16_t total_length,
+ uint32_t ip_offset)
+{
+ switch (cfg->encap_mask) {
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
+ encap(ip, data, sizeof(struct encap_ether_data));
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_ether_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_ether_data);
+ break;
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
+ encap(ip, data, sizeof(struct encap_vlan_data));
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_vlan_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_vlan_data);
+ break;
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
+ encap(ip, data, sizeof(struct encap_qinq_data));
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_qinq_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_qinq_data);
+ break;
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
+ {
+ struct encap_mpls_data *mpls = data;
+ size_t size = sizeof(struct rte_ether_hdr) +
+ mpls->mpls_count * 4;
+
+ encap(ip, data, size);
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + size);
+ mbuf->pkt_len = mbuf->data_len = total_length + size;
+ break;
+ }
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
+ {
+ struct encap_pppoe_data *pppoe =
+ encap(ip, data, sizeof(struct encap_pppoe_data));
+ pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_pppoe_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_pppoe_data);
+ break;
+ }
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
+ {
+ struct encap_qinq_pppoe_data *qinq_pppoe =
+ encap(ip, data, sizeof(struct encap_qinq_pppoe_data));
+ qinq_pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_qinq_pppoe_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_qinq_pppoe_data);
+ break;
+ }
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
+ {
+ if (cfg->vxlan.ip_version)
+ if (cfg->vxlan.vlan)
+ pkt_work_encap_vxlan_ipv4_vlan(mbuf, data, cfg);
+ else
+ pkt_work_encap_vxlan_ipv4(mbuf, data, cfg);
+ else
+ if (cfg->vxlan.vlan)
+ pkt_work_encap_vxlan_ipv6_vlan(mbuf, data, cfg);
+ else
+ pkt_work_encap_vxlan_ipv6(mbuf, data, cfg);
+ }
+
+ default:
+ break;
+ }
+}
+
+/**
+ * RTE_TABLE_ACTION_NAT
+ */
+static int
+nat_cfg_check(struct rte_table_action_nat_config *nat)
+{
+ if ((nat->proto != 0x06) &&
+ (nat->proto != 0x11))
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct nat_ipv4_data {
+ uint32_t addr;
+ uint16_t port;
+} __attribute__((__packed__));
+
+struct nat_ipv6_data {
+ uint8_t addr[16];
+ uint16_t port;
+} __attribute__((__packed__));
+
+static size_t
+nat_data_size(struct rte_table_action_nat_config *nat __rte_unused,
+ struct rte_table_action_common_config *common)
+{
+ int ip_version = common->ip_version;
+
+ return (ip_version) ?
+ sizeof(struct nat_ipv4_data) :
+ sizeof(struct nat_ipv6_data);
+}
+
+static int
+nat_apply_check(struct rte_table_action_nat_params *p,
+ struct rte_table_action_common_config *cfg)
+{
+ if ((p->ip_version && (cfg->ip_version == 0)) ||
+ ((p->ip_version == 0) && cfg->ip_version))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nat_apply(void *data,
+ struct rte_table_action_nat_params *p,
+ struct rte_table_action_common_config *cfg)
+{
+ int status;
+
+ /* Check input arguments */
+ status = nat_apply_check(p, cfg);
+ if (status)
+ return status;
+
+ /* Apply */
+ if (p->ip_version) {
+ struct nat_ipv4_data *d = data;
+
+ d->addr = rte_htonl(p->addr.ipv4);
+ d->port = rte_htons(p->port);
+ } else {
+ struct nat_ipv6_data *d = data;
+
+ memcpy(d->addr, p->addr.ipv6, sizeof(d->addr));
+ d->port = rte_htons(p->port);
+ }
+
+ return 0;
+}
+
+static __rte_always_inline uint16_t
+nat_ipv4_checksum_update(uint16_t cksum0,
+ uint32_t ip0,
+ uint32_t ip1)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Subtract ip0 (one's complement logic) */
+ cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ /* Add ip1 (one's complement logic) */
+ cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline uint16_t
+nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0,
+ uint32_t ip0,
+ uint32_t ip1,
+ uint16_t port0,
+ uint16_t port1)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Subtract ip0 and port 0 (one's complement logic) */
+ cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF) + port0;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ /* Add ip1 and port1 (one's complement logic) */
+ cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF) + port1;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline uint16_t
+nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0,
+ uint16_t *ip0,
+ uint16_t *ip1,
+ uint16_t port0,
+ uint16_t port1)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Subtract ip0 and port 0 (one's complement logic) */
+ cksum1 -= ip0[0] + ip0[1] + ip0[2] + ip0[3] +
+ ip0[4] + ip0[5] + ip0[6] + ip0[7] + port0;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ /* Add ip1 and port1 (one's complement logic) */
+ cksum1 += ip1[0] + ip1[1] + ip1[2] + ip1[3] +
+ ip1[4] + ip1[5] + ip1[6] + ip1[7] + port1;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline void
+pkt_ipv4_work_nat(struct rte_ipv4_hdr *ip,
+ struct nat_ipv4_data *data,
+ struct rte_table_action_nat_config *cfg)
+{
+ if (cfg->source_nat) {
+ if (cfg->proto == 0x6) {
+ struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
+ uint16_t ip_cksum, tcp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->src_addr,
+ data->addr);
+
+ tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
+ ip->src_addr,
+ data->addr,
+ tcp->src_port,
+ data->port);
+
+ ip->src_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ tcp->src_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
+ uint16_t ip_cksum, udp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->src_addr,
+ data->addr);
+
+ udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
+ ip->src_addr,
+ data->addr,
+ udp->src_port,
+ data->port);
+
+ ip->src_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ udp->src_port = data->port;
+ if (udp->dgram_cksum)
+ udp->dgram_cksum = udp_cksum;
+ }
+ } else {
+ if (cfg->proto == 0x6) {
+ struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
+ uint16_t ip_cksum, tcp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->dst_addr,
+ data->addr);
+
+ tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
+ ip->dst_addr,
+ data->addr,
+ tcp->dst_port,
+ data->port);
+
+ ip->dst_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ tcp->dst_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
+ uint16_t ip_cksum, udp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->dst_addr,
+ data->addr);
+
+ udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
+ ip->dst_addr,
+ data->addr,
+ udp->dst_port,
+ data->port);
+
+ ip->dst_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ udp->dst_port = data->port;
+ if (udp->dgram_cksum)
+ udp->dgram_cksum = udp_cksum;
+ }
+ }
+}
+
+static __rte_always_inline void
+pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
+ struct nat_ipv6_data *data,
+ struct rte_table_action_nat_config *cfg)
+{
+ if (cfg->source_nat) {
+ if (cfg->proto == 0x6) {
+ struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
+ uint16_t tcp_cksum;
+
+ tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
+ (uint16_t *)ip->src_addr,
+ (uint16_t *)data->addr,
+ tcp->src_port,
+ data->port);
+
+ rte_memcpy(ip->src_addr, data->addr, 16);
+ tcp->src_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
+ uint16_t udp_cksum;
+
+ udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
+ (uint16_t *)ip->src_addr,
+ (uint16_t *)data->addr,
+ udp->src_port,
+ data->port);
+
+ rte_memcpy(ip->src_addr, data->addr, 16);
+ udp->src_port = data->port;
+ udp->dgram_cksum = udp_cksum;
+ }
+ } else {
+ if (cfg->proto == 0x6) {
+ struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
+ uint16_t tcp_cksum;
+
+ tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
+ (uint16_t *)ip->dst_addr,
+ (uint16_t *)data->addr,
+ tcp->dst_port,
+ data->port);
+
+ rte_memcpy(ip->dst_addr, data->addr, 16);
+ tcp->dst_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
+ uint16_t udp_cksum;
+
+ udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
+ (uint16_t *)ip->dst_addr,
+ (uint16_t *)data->addr,
+ udp->dst_port,
+ data->port);
+
+ rte_memcpy(ip->dst_addr, data->addr, 16);
+ udp->dst_port = data->port;
+ udp->dgram_cksum = udp_cksum;
+ }
+ }
+}
+
+/**
+ * RTE_TABLE_ACTION_TTL
+ */
+static int
+ttl_cfg_check(struct rte_table_action_ttl_config *ttl)
+{
+ if (ttl->drop == 0)
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct ttl_data {
+ uint32_t n_packets;
+} __attribute__((__packed__));
+
+#define TTL_INIT(data, decrement) \
+ ((data)->n_packets = (decrement) ? 1 : 0)
+
+#define TTL_DEC_GET(data) \
+ ((uint8_t)((data)->n_packets & 1))
+
+#define TTL_STATS_RESET(data) \
+ ((data)->n_packets = ((data)->n_packets & 1))
+
+#define TTL_STATS_READ(data) \
+ ((data)->n_packets >> 1)
+
+#define TTL_STATS_ADD(data, value) \
+ ((data)->n_packets = \
+ (((((data)->n_packets >> 1) + (value)) << 1) | \
+ ((data)->n_packets & 1)))
+
+static int
+ttl_apply(void *data,
+ struct rte_table_action_ttl_params *p)
+{
+ struct ttl_data *d = data;
+
+ TTL_INIT(d, p->decrement);
+
+ return 0;
+}
+
+static __rte_always_inline uint64_t
+pkt_ipv4_work_ttl(struct rte_ipv4_hdr *ip,
+ struct ttl_data *data)
+{
+ uint32_t drop;
+ uint16_t cksum = ip->hdr_checksum;
+ uint8_t ttl = ip->time_to_live;
+ uint8_t ttl_diff = TTL_DEC_GET(data);
+
+ cksum += ttl_diff;
+ ttl -= ttl_diff;
+
+ ip->hdr_checksum = cksum;
+ ip->time_to_live = ttl;
+
+ drop = (ttl == 0) ? 1 : 0;
+ TTL_STATS_ADD(data, drop);
+
+ return drop;
+}
+
+static __rte_always_inline uint64_t
+pkt_ipv6_work_ttl(struct rte_ipv6_hdr *ip,
+ struct ttl_data *data)
+{
+ uint32_t drop;
+ uint8_t ttl = ip->hop_limits;
+ uint8_t ttl_diff = TTL_DEC_GET(data);
+
+ ttl -= ttl_diff;
+
+ ip->hop_limits = ttl;
+
+ drop = (ttl == 0) ? 1 : 0;
+ TTL_STATS_ADD(data, drop);
+
+ return drop;
+}
+
+/**
+ * RTE_TABLE_ACTION_STATS
+ */
+static int
+stats_cfg_check(struct rte_table_action_stats_config *stats)
+{
+ if ((stats->n_packets_enabled == 0) && (stats->n_bytes_enabled == 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+struct stats_data {
+ uint64_t n_packets;
+ uint64_t n_bytes;
+} __attribute__((__packed__));
+
+static int
+stats_apply(struct stats_data *data,
+ struct rte_table_action_stats_params *p)
+{
+ data->n_packets = p->n_packets;
+ data->n_bytes = p->n_bytes;
+
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_stats(struct stats_data *data,
+ uint16_t total_length)
+{
+ data->n_packets++;
+ data->n_bytes += total_length;
+}
+
+/**
+ * RTE_TABLE_ACTION_TIME
+ */
+struct time_data {
+ uint64_t time;
+} __attribute__((__packed__));
+
+static int
+time_apply(struct time_data *data,
+ struct rte_table_action_time_params *p)
+{
+ data->time = p->time;
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_time(struct time_data *data,
+ uint64_t time)
+{
+ data->time = time;
+}
+
+
+/**
+ * RTE_TABLE_ACTION_CRYPTO
+ */
+
+#define CRYPTO_OP_MASK_CIPHER 0x1
+#define CRYPTO_OP_MASK_AUTH 0x2
+#define CRYPTO_OP_MASK_AEAD 0x4
+
+struct crypto_op_sym_iv_aad {
+ struct rte_crypto_op op;
+ struct rte_crypto_sym_op sym_op;
+ union {
+ struct {
+ uint8_t cipher_iv[
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
+ uint8_t auth_iv[
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
+ } cipher_auth;
+
+ struct {
+ uint8_t iv[RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
+ uint8_t aad[RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX];
+ } aead_iv_aad;
+
+ } iv_aad;
+};
+
+struct sym_crypto_data {
+
+ union {
+ struct {
+
+ /** Length of cipher iv. */
+ uint16_t cipher_iv_len;
+
+ /** Offset from start of IP header to the cipher iv. */
+ uint16_t cipher_iv_data_offset;
+
+ /** Length of cipher iv to be updated in the mbuf. */
+ uint16_t cipher_iv_update_len;
+
+ /** Offset from start of IP header to the auth iv. */
+ uint16_t auth_iv_data_offset;
+
+ /** Length of auth iv in the mbuf. */
+ uint16_t auth_iv_len;
+
+ /** Length of auth iv to be updated in the mbuf. */
+ uint16_t auth_iv_update_len;
+
+ } cipher_auth;
+ struct {
+
+ /** Length of iv. */
+ uint16_t iv_len;
+
+ /** Offset from start of IP header to the aead iv. */
+ uint16_t iv_data_offset;
+
+ /** Length of iv to be updated in the mbuf. */
+ uint16_t iv_update_len;
+
+ /** Length of aad */
+ uint16_t aad_len;
+
+ /** Offset from start of IP header to the aad. */
+ uint16_t aad_data_offset;
+
+ /** Length of aad to updated in the mbuf. */
+ uint16_t aad_update_len;
+
+ } aead;
+ };
+
+ /** Offset from start of IP header to the data. */
+ uint16_t data_offset;
+
+ /** Digest length. */
+ uint16_t digest_len;
+
+ /** block size */
+ uint16_t block_size;
+
+ /** Mask of crypto operation */
+ uint16_t op_mask;
+
+ /** Session pointer. */
+ struct rte_cryptodev_sym_session *session;
+
+ /** Direction of crypto, encrypt or decrypt */
+ uint16_t direction;
+
+ /** Private data size to store cipher iv / aad. */
+ uint8_t iv_aad_data[32];
+
+} __attribute__((__packed__));
+
+static int
+sym_crypto_cfg_check(struct rte_table_action_sym_crypto_config *cfg)
+{
+ if (!rte_cryptodev_pmd_is_valid_dev(cfg->cryptodev_id))
+ return -EINVAL;
+ if (cfg->mp_create == NULL || cfg->mp_init == NULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+get_block_size(const struct rte_crypto_sym_xform *xform, uint8_t cdev_id)
+{
+ struct rte_cryptodev_info dev_info;
+ const struct rte_cryptodev_capabilities *cap;
+ uint32_t i;
+
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+
+ for (i = 0; dev_info.capabilities[i].op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
+ i++) {
+ cap = &dev_info.capabilities[i];
+
+ if (cap->sym.xform_type != xform->type)
+ continue;
+
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
+ (cap->sym.cipher.algo == xform->cipher.algo))
+ return cap->sym.cipher.block_size;
+
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) &&
+ (cap->sym.aead.algo == xform->aead.algo))
+ return cap->sym.aead.block_size;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED)
+ break;
+ }
+
+ return -1;
+}
+
+static int
+sym_crypto_apply(struct sym_crypto_data *data,
+ struct rte_table_action_sym_crypto_config *cfg,
+ struct rte_table_action_sym_crypto_params *p)
+{
+ const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ const struct rte_crypto_auth_xform *auth_xform = NULL;
+ const struct rte_crypto_aead_xform *aead_xform = NULL;
+ struct rte_crypto_sym_xform *xform = p->xform;
+ struct rte_cryptodev_sym_session *session;
+ int ret;
+
+ memset(data, 0, sizeof(*data));
+
+ while (xform) {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ cipher_xform = &xform->cipher;
+
+ if (cipher_xform->iv.length >
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
+ return -ENOMEM;
+ if (cipher_xform->iv.offset !=
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
+ return -EINVAL;
+
+ ret = get_block_size(xform, cfg->cryptodev_id);
+ if (ret < 0)
+ return -1;
+ data->block_size = (uint16_t)ret;
+ data->op_mask |= CRYPTO_OP_MASK_CIPHER;
+
+ data->cipher_auth.cipher_iv_len =
+ cipher_xform->iv.length;
+ data->cipher_auth.cipher_iv_data_offset = (uint16_t)
+ p->cipher_auth.cipher_iv_update.offset;
+ data->cipher_auth.cipher_iv_update_len = (uint16_t)
+ p->cipher_auth.cipher_iv_update.length;
+
+ rte_memcpy(data->iv_aad_data,
+ p->cipher_auth.cipher_iv.val,
+ p->cipher_auth.cipher_iv.length);
+
+ data->direction = cipher_xform->op;
+
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ auth_xform = &xform->auth;
+ if (auth_xform->iv.length >
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
+ return -ENOMEM;
+ data->op_mask |= CRYPTO_OP_MASK_AUTH;
+
+ data->cipher_auth.auth_iv_len = auth_xform->iv.length;
+ data->cipher_auth.auth_iv_data_offset = (uint16_t)
+ p->cipher_auth.auth_iv_update.offset;
+ data->cipher_auth.auth_iv_update_len = (uint16_t)
+ p->cipher_auth.auth_iv_update.length;
+ data->digest_len = auth_xform->digest_length;
+
+ data->direction = (auth_xform->op ==
+ RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT :
+ RTE_CRYPTO_CIPHER_OP_DECRYPT;
+
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ aead_xform = &xform->aead;
+
+ if ((aead_xform->iv.length >
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX) || (
+ aead_xform->aad_length >
+ RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX))
+ return -EINVAL;
+ if (aead_xform->iv.offset !=
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
+ return -EINVAL;
+
+ ret = get_block_size(xform, cfg->cryptodev_id);
+ if (ret < 0)
+ return -1;
+ data->block_size = (uint16_t)ret;
+ data->op_mask |= CRYPTO_OP_MASK_AEAD;
+
+ data->digest_len = aead_xform->digest_length;
+ data->aead.iv_len = aead_xform->iv.length;
+ data->aead.aad_len = aead_xform->aad_length;
+
+ data->aead.iv_data_offset = (uint16_t)
+ p->aead.iv_update.offset;
+ data->aead.iv_update_len = (uint16_t)
+ p->aead.iv_update.length;
+ data->aead.aad_data_offset = (uint16_t)
+ p->aead.aad_update.offset;
+ data->aead.aad_update_len = (uint16_t)
+ p->aead.aad_update.length;
+
+ rte_memcpy(data->iv_aad_data,
+ p->aead.iv.val,
+ p->aead.iv.length);
+
+ rte_memcpy(data->iv_aad_data + p->aead.iv.length,
+ p->aead.aad.val,
+ p->aead.aad.length);
+
+ data->direction = (aead_xform->op ==
+ RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT :
+ RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ } else
+ return -EINVAL;
+
+ xform = xform->next;
+ }
+
+ if (auth_xform && auth_xform->iv.length) {
+ if (cipher_xform) {
+ if (auth_xform->iv.offset !=
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET +
+ cipher_xform->iv.length)
+ return -EINVAL;
+
+ rte_memcpy(data->iv_aad_data + cipher_xform->iv.length,
+ p->cipher_auth.auth_iv.val,
+ p->cipher_auth.auth_iv.length);
+ } else {
+ rte_memcpy(data->iv_aad_data,
+ p->cipher_auth.auth_iv.val,
+ p->cipher_auth.auth_iv.length);
+ }
+ }
+
+ session = rte_cryptodev_sym_session_create(cfg->mp_create);
+ if (!session)
+ return -ENOMEM;
+
+ ret = rte_cryptodev_sym_session_init(cfg->cryptodev_id, session,
+ p->xform, cfg->mp_init);
+ if (ret < 0) {
+ rte_cryptodev_sym_session_free(session);
+ return ret;
+ }
+
+ data->data_offset = (uint16_t)p->data_offset;
+ data->session = session;
+
+ return 0;
+}
+
+static __rte_always_inline uint64_t
+pkt_work_sym_crypto(struct rte_mbuf *mbuf, struct sym_crypto_data *data,
+ struct rte_table_action_sym_crypto_config *cfg,
+ uint16_t ip_offset)
+{
+ struct crypto_op_sym_iv_aad *crypto_op = (struct crypto_op_sym_iv_aad *)
+ RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->op_offset);
+ struct rte_crypto_op *op = &crypto_op->op;
+ struct rte_crypto_sym_op *sym = op->sym;
+ uint32_t pkt_offset = sizeof(*mbuf) + mbuf->data_off;
+ uint32_t payload_len = pkt_offset + mbuf->data_len - data->data_offset;
+
+ op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+ op->phys_addr = mbuf->buf_iova + cfg->op_offset - sizeof(*mbuf);
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ sym->m_src = mbuf;
+ sym->m_dst = NULL;
+ sym->session = data->session;
+
+ /** pad the packet */
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ uint32_t append_len = RTE_ALIGN_CEIL(payload_len,
+ data->block_size) - payload_len;
+
+ if (unlikely(rte_pktmbuf_append(mbuf, append_len +
+ data->digest_len) == NULL))
+ return 1;
+
+ payload_len += append_len;
+ } else
+ payload_len -= data->digest_len;
+
+ if (data->op_mask & CRYPTO_OP_MASK_CIPHER) {
+ /** prepare cipher op */
+ uint8_t *iv = crypto_op->iv_aad.cipher_auth.cipher_iv;
+
+ sym->cipher.data.length = payload_len;
+ sym->cipher.data.offset = data->data_offset - pkt_offset;
+
+ if (data->cipher_auth.cipher_iv_update_len) {
+ uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ data->cipher_auth.cipher_iv_data_offset
+ + ip_offset);
+
+ /** For encryption, update the pkt iv field, otherwise
+ * update the iv_aad_field
+ **/
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ rte_memcpy(pkt_iv, data->iv_aad_data,
+ data->cipher_auth.cipher_iv_update_len);
+ else
+ rte_memcpy(data->iv_aad_data, pkt_iv,
+ data->cipher_auth.cipher_iv_update_len);
+ }
+
+ /** write iv */
+ rte_memcpy(iv, data->iv_aad_data,
+ data->cipher_auth.cipher_iv_len);
+ }
+
+ if (data->op_mask & CRYPTO_OP_MASK_AUTH) {
+ /** authentication always start from IP header. */
+ sym->auth.data.offset = ip_offset - pkt_offset;
+ sym->auth.data.length = mbuf->data_len - sym->auth.data.offset -
+ data->digest_len;
+ sym->auth.digest.data = rte_pktmbuf_mtod_offset(mbuf,
+ uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
+ data->digest_len);
+ sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+ rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
+
+ if (data->cipher_auth.auth_iv_update_len) {
+ uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ data->cipher_auth.auth_iv_data_offset
+ + ip_offset);
+ uint8_t *data_iv = data->iv_aad_data +
+ data->cipher_auth.cipher_iv_len;
+
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ rte_memcpy(pkt_iv, data_iv,
+ data->cipher_auth.auth_iv_update_len);
+ else
+ rte_memcpy(data_iv, pkt_iv,
+ data->cipher_auth.auth_iv_update_len);
+ }
+
+ if (data->cipher_auth.auth_iv_len) {
+ /** prepare cipher op */
+ uint8_t *iv = crypto_op->iv_aad.cipher_auth.auth_iv;
+
+ rte_memcpy(iv, data->iv_aad_data +
+ data->cipher_auth.cipher_iv_len,
+ data->cipher_auth.auth_iv_len);
+ }
+ }
+
+ if (data->op_mask & CRYPTO_OP_MASK_AEAD) {
+ uint8_t *iv = crypto_op->iv_aad.aead_iv_aad.iv;
+ uint8_t *aad = crypto_op->iv_aad.aead_iv_aad.aad;
+
+ sym->aead.aad.data = aad;
+ sym->aead.aad.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+ aad - rte_pktmbuf_mtod(mbuf, uint8_t *));
+ sym->aead.digest.data = rte_pktmbuf_mtod_offset(mbuf,
+ uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
+ data->digest_len);
+ sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+ rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
+ sym->aead.data.offset = data->data_offset - pkt_offset;
+ sym->aead.data.length = payload_len;
+
+ if (data->aead.iv_update_len) {
+ uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ data->aead.iv_data_offset + ip_offset);
+ uint8_t *data_iv = data->iv_aad_data;
+
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ rte_memcpy(pkt_iv, data_iv,
+ data->aead.iv_update_len);
+ else
+ rte_memcpy(data_iv, pkt_iv,
+ data->aead.iv_update_len);
+ }
+
+ rte_memcpy(iv, data->iv_aad_data, data->aead.iv_len);
+
+ if (data->aead.aad_update_len) {
+ uint8_t *pkt_aad = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ data->aead.aad_data_offset + ip_offset);
+ uint8_t *data_aad = data->iv_aad_data +
+ data->aead.iv_len;
+
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ rte_memcpy(pkt_aad, data_aad,
+ data->aead.iv_update_len);
+ else
+ rte_memcpy(data_aad, pkt_aad,
+ data->aead.iv_update_len);
+ }
+
+ rte_memcpy(aad, data->iv_aad_data + data->aead.iv_len,
+ data->aead.aad_len);
+ }
+
+ return 0;
+}
+
+/**
+ * RTE_TABLE_ACTION_TAG
+ */
+struct tag_data {
+ uint32_t tag;
+} __attribute__((__packed__));
+
+static int
+tag_apply(struct tag_data *data,
+ struct rte_table_action_tag_params *p)