+fwd_apply(struct fwd_data *data,
+ struct rte_table_action_fwd_params *p)
+{
+ data->action = p->action;
+
+ if (p->action == RTE_PIPELINE_ACTION_PORT)
+ data->port_id = p->id;
+
+ if (p->action == RTE_PIPELINE_ACTION_TABLE)
+ data->table_id = p->id;
+
+ return 0;
+}
+
+/**
+ * RTE_TABLE_ACTION_LB
+ */
+static int
+lb_cfg_check(struct rte_table_action_lb_config *cfg)
+{
+ if ((cfg == NULL) ||
+ (cfg->key_size < RTE_TABLE_ACTION_LB_KEY_SIZE_MIN) ||
+ (cfg->key_size > RTE_TABLE_ACTION_LB_KEY_SIZE_MAX) ||
+ (!rte_is_power_of_2(cfg->key_size)) ||
+ (cfg->f_hash == NULL))
+ return -1;
+
+ return 0;
+}
+
+struct lb_data {
+ uint32_t out[RTE_TABLE_ACTION_LB_TABLE_SIZE];
+} __rte_packed;
+
+static int
+lb_apply(struct lb_data *data,
+ struct rte_table_action_lb_params *p)
+{
+ memcpy(data->out, p->out, sizeof(data->out));
+
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_lb(struct rte_mbuf *mbuf,
+ struct lb_data *data,
+ struct rte_table_action_lb_config *cfg)
+{
+ uint8_t *pkt_key = RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->key_offset);
+ uint32_t *out = RTE_MBUF_METADATA_UINT32_PTR(mbuf, cfg->out_offset);
+ uint64_t digest, pos;
+ uint32_t out_val;
+
+ digest = cfg->f_hash(pkt_key,
+ cfg->key_mask,
+ cfg->key_size,
+ cfg->seed);
+ pos = digest & (RTE_TABLE_ACTION_LB_TABLE_SIZE - 1);
+ out_val = data->out[pos];
+
+ *out = out_val;
+}
+
+/**
+ * RTE_TABLE_ACTION_MTR
+ */
+static int
+mtr_cfg_check(struct rte_table_action_mtr_config *mtr)
+{
+ if ((mtr->alg == RTE_TABLE_ACTION_METER_SRTCM) ||
+ ((mtr->n_tc != 1) && (mtr->n_tc != 4)) ||
+ (mtr->n_bytes_enabled != 0))
+ return -ENOTSUP;
+ return 0;
+}
+
+struct mtr_trtcm_data {
+ struct rte_meter_trtcm trtcm;
+ uint64_t stats[RTE_COLORS];
+} __rte_packed;
+
+#define MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data) \
+ (((data)->stats[RTE_COLOR_GREEN] & 0xF8LLU) >> 3)
+
+static void
+mtr_trtcm_data_meter_profile_id_set(struct mtr_trtcm_data *data,
+ uint32_t profile_id)
+{
+ data->stats[RTE_COLOR_GREEN] &= ~0xF8LLU;
+ data->stats[RTE_COLOR_GREEN] |= (profile_id % 32) << 3;
+}
+
+#define MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color)\
+ (((data)->stats[(color)] & 4LLU) >> 2)
+
+#define MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color)\
+ ((enum rte_color)((data)->stats[(color)] & 3LLU))
+
+static void
+mtr_trtcm_data_policer_action_set(struct mtr_trtcm_data *data,
+ enum rte_color color,
+ enum rte_table_action_policer action)
+{
+ if (action == RTE_TABLE_ACTION_POLICER_DROP) {
+ data->stats[color] |= 4LLU;
+ } else {
+ data->stats[color] &= ~7LLU;
+ data->stats[color] |= color & 3LLU;
+ }
+}
+
+static uint64_t
+mtr_trtcm_data_stats_get(struct mtr_trtcm_data *data,
+ enum rte_color color)
+{
+ return data->stats[color] >> 8;
+}
+
+static void
+mtr_trtcm_data_stats_reset(struct mtr_trtcm_data *data,
+ enum rte_color color)
+{
+ data->stats[color] &= 0xFFLU;
+}
+
+#define MTR_TRTCM_DATA_STATS_INC(data, color) \
+ ((data)->stats[(color)] += (1LLU << 8))
+
+static size_t
+mtr_data_size(struct rte_table_action_mtr_config *mtr)
+{
+ return mtr->n_tc * sizeof(struct mtr_trtcm_data);
+}
+
+struct dscp_table_entry_data {
+ enum rte_color color;
+ uint16_t tc;
+ uint16_t tc_queue;
+};
+
+struct dscp_table_data {
+ struct dscp_table_entry_data entry[64];
+};
+
+struct meter_profile_data {
+ struct rte_meter_trtcm_profile profile;
+ uint32_t profile_id;
+ int valid;
+};
+
+static struct meter_profile_data *
+meter_profile_data_find(struct meter_profile_data *mp,
+ uint32_t mp_size,
+ uint32_t profile_id)
+{
+ uint32_t i;
+
+ for (i = 0; i < mp_size; i++) {
+ struct meter_profile_data *mp_data = &mp[i];
+
+ if (mp_data->valid && (mp_data->profile_id == profile_id))
+ return mp_data;
+ }
+
+ return NULL;
+}
+
+static struct meter_profile_data *
+meter_profile_data_find_unused(struct meter_profile_data *mp,
+ uint32_t mp_size)
+{
+ uint32_t i;
+
+ for (i = 0; i < mp_size; i++) {
+ struct meter_profile_data *mp_data = &mp[i];
+
+ if (!mp_data->valid)
+ return mp_data;
+ }
+
+ return NULL;
+}
+
+static int
+mtr_apply_check(struct rte_table_action_mtr_params *p,
+ struct rte_table_action_mtr_config *cfg,
+ struct meter_profile_data *mp,
+ uint32_t mp_size)
+{
+ uint32_t i;
+
+ if (p->tc_mask > RTE_LEN2MASK(cfg->n_tc, uint32_t))
+ return -EINVAL;
+
+ for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
+ struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
+ struct meter_profile_data *mp_data;
+
+ if ((p->tc_mask & (1LLU << i)) == 0)
+ continue;
+
+ mp_data = meter_profile_data_find(mp,
+ mp_size,
+ p_tc->meter_profile_id);
+ if (!mp_data)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mtr_apply(struct mtr_trtcm_data *data,
+ struct rte_table_action_mtr_params *p,
+ struct rte_table_action_mtr_config *cfg,
+ struct meter_profile_data *mp,
+ uint32_t mp_size)
+{
+ uint32_t i;
+ int status;
+
+ /* Check input arguments */
+ status = mtr_apply_check(p, cfg, mp, mp_size);
+ if (status)
+ return status;
+
+ /* Apply */
+ for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
+ struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
+ struct mtr_trtcm_data *data_tc = &data[i];
+ struct meter_profile_data *mp_data;
+
+ if ((p->tc_mask & (1LLU << i)) == 0)
+ continue;
+
+ /* Find profile */
+ mp_data = meter_profile_data_find(mp,
+ mp_size,
+ p_tc->meter_profile_id);
+ if (!mp_data)
+ return -EINVAL;
+
+ memset(data_tc, 0, sizeof(*data_tc));
+
+ /* Meter object */
+ status = rte_meter_trtcm_config(&data_tc->trtcm,
+ &mp_data->profile);
+ if (status)
+ return status;
+
+ /* Meter profile */
+ mtr_trtcm_data_meter_profile_id_set(data_tc,
+ mp_data - mp);
+
+ /* Policer actions */
+ mtr_trtcm_data_policer_action_set(data_tc,
+ RTE_COLOR_GREEN,
+ p_tc->policer[RTE_COLOR_GREEN]);
+
+ mtr_trtcm_data_policer_action_set(data_tc,
+ RTE_COLOR_YELLOW,
+ p_tc->policer[RTE_COLOR_YELLOW]);
+
+ mtr_trtcm_data_policer_action_set(data_tc,
+ RTE_COLOR_RED,
+ p_tc->policer[RTE_COLOR_RED]);
+ }
+
+ return 0;
+}
+
+static __rte_always_inline uint64_t
+pkt_work_mtr(struct rte_mbuf *mbuf,
+ struct mtr_trtcm_data *data,
+ struct dscp_table_data *dscp_table,
+ struct meter_profile_data *mp,
+ uint64_t time,
+ uint32_t dscp,
+ uint16_t total_length)
+{
+ uint64_t drop_mask;
+ struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
+ enum rte_color color_in, color_meter, color_policer;
+ uint32_t tc, mp_id;
+
+ tc = dscp_entry->tc;
+ color_in = dscp_entry->color;
+ data += tc;
+ mp_id = MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data);
+
+ /* Meter */
+ color_meter = rte_meter_trtcm_color_aware_check(
+ &data->trtcm,
+ &mp[mp_id].profile,
+ time,
+ total_length,
+ color_in);
+
+ /* Stats */
+ MTR_TRTCM_DATA_STATS_INC(data, color_meter);
+
+ /* Police */
+ drop_mask = MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color_meter);
+ color_policer =
+ MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color_meter);
+ rte_mbuf_sched_color_set(mbuf, (uint8_t)color_policer);
+
+ return drop_mask;
+}
+
+/**
+ * RTE_TABLE_ACTION_TM
+ */
+static int
+tm_cfg_check(struct rte_table_action_tm_config *tm)
+{
+ if ((tm->n_subports_per_port == 0) ||
+ (rte_is_power_of_2(tm->n_subports_per_port) == 0) ||
+ (tm->n_subports_per_port > UINT16_MAX) ||
+ (tm->n_pipes_per_subport == 0) ||
+ (rte_is_power_of_2(tm->n_pipes_per_subport) == 0))
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct tm_data {
+ uint32_t queue_id;
+ uint32_t reserved;
+} __rte_packed;
+
+static int
+tm_apply_check(struct rte_table_action_tm_params *p,
+ struct rte_table_action_tm_config *cfg)
+{
+ if ((p->subport_id >= cfg->n_subports_per_port) ||
+ (p->pipe_id >= cfg->n_pipes_per_subport))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+tm_apply(struct tm_data *data,
+ struct rte_table_action_tm_params *p,
+ struct rte_table_action_tm_config *cfg)
+{
+ int status;
+
+ /* Check input arguments */
+ status = tm_apply_check(p, cfg);
+ if (status)
+ return status;
+
+ /* Apply */
+ data->queue_id = p->subport_id <<
+ (__builtin_ctz(cfg->n_pipes_per_subport) + 4) |
+ p->pipe_id << 4;
+
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_tm(struct rte_mbuf *mbuf,
+ struct tm_data *data,
+ struct dscp_table_data *dscp_table,
+ uint32_t dscp)
+{
+ struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
+ uint32_t queue_id = data->queue_id |
+ dscp_entry->tc_queue;
+ rte_mbuf_sched_set(mbuf, queue_id, dscp_entry->tc,
+ (uint8_t)dscp_entry->color);
+}
+
+/**
+ * RTE_TABLE_ACTION_ENCAP
+ */
+static int
+encap_valid(enum rte_table_action_encap_type encap)
+{
+ switch (encap) {
+ case RTE_TABLE_ACTION_ENCAP_ETHER:
+ case RTE_TABLE_ACTION_ENCAP_VLAN:
+ case RTE_TABLE_ACTION_ENCAP_QINQ:
+ case RTE_TABLE_ACTION_ENCAP_MPLS:
+ case RTE_TABLE_ACTION_ENCAP_PPPOE:
+ case RTE_TABLE_ACTION_ENCAP_VXLAN:
+ case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int
+encap_cfg_check(struct rte_table_action_encap_config *encap)
+{
+ if ((encap->encap_mask == 0) ||
+ (__builtin_popcountll(encap->encap_mask) != 1))
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct encap_ether_data {
+ struct rte_ether_hdr ether;
+};
+
+#define VLAN(pcp, dei, vid) \
+ ((uint16_t)((((uint64_t)(pcp)) & 0x7LLU) << 13) | \
+ ((((uint64_t)(dei)) & 0x1LLU) << 12) | \
+ (((uint64_t)(vid)) & 0xFFFLLU)) \
+
+struct encap_vlan_data {
+ struct rte_ether_hdr ether;
+ struct rte_vlan_hdr vlan;
+};
+
+struct encap_qinq_data {
+ struct rte_ether_hdr ether;
+ struct rte_vlan_hdr svlan;
+ struct rte_vlan_hdr cvlan;
+};
+
+#define ETHER_TYPE_MPLS_UNICAST 0x8847
+
+#define ETHER_TYPE_MPLS_MULTICAST 0x8848
+
+#define MPLS(label, tc, s, ttl) \
+ ((uint32_t)(((((uint64_t)(label)) & 0xFFFFFLLU) << 12) |\
+ ((((uint64_t)(tc)) & 0x7LLU) << 9) | \
+ ((((uint64_t)(s)) & 0x1LLU) << 8) | \
+ (((uint64_t)(ttl)) & 0xFFLLU)))
+
+struct encap_mpls_data {
+ struct rte_ether_hdr ether;
+ uint32_t mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX];
+ uint32_t mpls_count;
+} __rte_packed __rte_aligned(2);
+
+#define PPP_PROTOCOL_IP 0x0021
+
+struct pppoe_ppp_hdr {
+ uint16_t ver_type_code;
+ uint16_t session_id;
+ uint16_t length;
+ uint16_t protocol;
+};
+
+struct encap_pppoe_data {
+ struct rte_ether_hdr ether;
+ struct pppoe_ppp_hdr pppoe_ppp;
+};
+
+#define IP_PROTO_UDP 17
+
+struct encap_vxlan_ipv4_data {
+ struct rte_ether_hdr ether;
+ struct rte_ipv4_hdr ipv4;
+ struct rte_udp_hdr udp;
+ struct rte_vxlan_hdr vxlan;
+} __rte_packed __rte_aligned(2);
+
+struct encap_vxlan_ipv4_vlan_data {
+ struct rte_ether_hdr ether;
+ struct rte_vlan_hdr vlan;
+ struct rte_ipv4_hdr ipv4;
+ struct rte_udp_hdr udp;
+ struct rte_vxlan_hdr vxlan;
+} __rte_packed __rte_aligned(2);
+
+struct encap_vxlan_ipv6_data {
+ struct rte_ether_hdr ether;
+ struct rte_ipv6_hdr ipv6;
+ struct rte_udp_hdr udp;
+ struct rte_vxlan_hdr vxlan;
+} __rte_packed __rte_aligned(2);
+
+struct encap_vxlan_ipv6_vlan_data {
+ struct rte_ether_hdr ether;
+ struct rte_vlan_hdr vlan;
+ struct rte_ipv6_hdr ipv6;
+ struct rte_udp_hdr udp;
+ struct rte_vxlan_hdr vxlan;
+} __rte_packed __rte_aligned(2);
+
+struct encap_qinq_pppoe_data {
+ struct rte_ether_hdr ether;
+ struct rte_vlan_hdr svlan;
+ struct rte_vlan_hdr cvlan;
+ struct pppoe_ppp_hdr pppoe_ppp;
+} __rte_packed __rte_aligned(2);
+
+static size_t
+encap_data_size(struct rte_table_action_encap_config *encap)
+{
+ switch (encap->encap_mask) {
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
+ return sizeof(struct encap_ether_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
+ return sizeof(struct encap_vlan_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
+ return sizeof(struct encap_qinq_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
+ return sizeof(struct encap_mpls_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
+ return sizeof(struct encap_pppoe_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
+ if (encap->vxlan.ip_version)
+ if (encap->vxlan.vlan)
+ return sizeof(struct encap_vxlan_ipv4_vlan_data);
+ else
+ return sizeof(struct encap_vxlan_ipv4_data);
+ else
+ if (encap->vxlan.vlan)
+ return sizeof(struct encap_vxlan_ipv6_vlan_data);
+ else
+ return sizeof(struct encap_vxlan_ipv6_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
+ return sizeof(struct encap_qinq_pppoe_data);
+
+ default:
+ return 0;
+ }
+}
+
+static int
+encap_apply_check(struct rte_table_action_encap_params *p,
+ struct rte_table_action_encap_config *cfg)
+{
+ if ((encap_valid(p->type) == 0) ||
+ ((cfg->encap_mask & (1LLU << p->type)) == 0))
+ return -EINVAL;
+
+ switch (p->type) {
+ case RTE_TABLE_ACTION_ENCAP_ETHER:
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_VLAN:
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_QINQ:
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_MPLS:
+ if ((p->mpls.mpls_count == 0) ||
+ (p->mpls.mpls_count > RTE_TABLE_ACTION_MPLS_LABELS_MAX))
+ return -EINVAL;
+
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_PPPOE:
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_VXLAN:
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+encap_ether_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_common_config *common_cfg)
+{
+ struct encap_ether_data *d = data;
+ uint16_t ethertype = (common_cfg->ip_version) ?
+ RTE_ETHER_TYPE_IPV4 :
+ RTE_ETHER_TYPE_IPV6;
+
+ /* Ethernet */
+ rte_ether_addr_copy(&p->ether.ether.da, &d->ether.d_addr);
+ rte_ether_addr_copy(&p->ether.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ethertype);
+
+ return 0;
+}
+
+static int
+encap_vlan_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_common_config *common_cfg)
+{
+ struct encap_vlan_data *d = data;
+ uint16_t ethertype = (common_cfg->ip_version) ?
+ RTE_ETHER_TYPE_IPV4 :
+ RTE_ETHER_TYPE_IPV6;
+
+ /* Ethernet */
+ rte_ether_addr_copy(&p->vlan.ether.da, &d->ether.d_addr);
+ rte_ether_addr_copy(&p->vlan.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
+
+ /* VLAN */
+ d->vlan.vlan_tci = rte_htons(VLAN(p->vlan.vlan.pcp,
+ p->vlan.vlan.dei,
+ p->vlan.vlan.vid));
+ d->vlan.eth_proto = rte_htons(ethertype);
+
+ return 0;
+}
+
+static int
+encap_qinq_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_common_config *common_cfg)
+{
+ struct encap_qinq_data *d = data;
+ uint16_t ethertype = (common_cfg->ip_version) ?
+ RTE_ETHER_TYPE_IPV4 :
+ RTE_ETHER_TYPE_IPV6;
+
+ /* Ethernet */
+ rte_ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
+ rte_ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_QINQ);
+
+ /* SVLAN */
+ d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
+ p->qinq.svlan.dei,
+ p->qinq.svlan.vid));
+ d->svlan.eth_proto = rte_htons(RTE_ETHER_TYPE_VLAN);
+
+ /* CVLAN */
+ d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
+ p->qinq.cvlan.dei,
+ p->qinq.cvlan.vid));
+ d->cvlan.eth_proto = rte_htons(ethertype);
+
+ return 0;
+}
+
+static int
+encap_qinq_pppoe_apply(void *data,
+ struct rte_table_action_encap_params *p)
+{
+ struct encap_qinq_pppoe_data *d = data;
+
+ /* Ethernet */
+ rte_ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
+ rte_ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
+
+ /* SVLAN */
+ d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
+ p->qinq.svlan.dei,
+ p->qinq.svlan.vid));
+ d->svlan.eth_proto = rte_htons(RTE_ETHER_TYPE_VLAN);
+
+ /* CVLAN */
+ d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
+ p->qinq.cvlan.dei,
+ p->qinq.cvlan.vid));
+ d->cvlan.eth_proto = rte_htons(RTE_ETHER_TYPE_PPPOE_SESSION);
+
+ /* PPPoE and PPP*/
+ d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
+ d->pppoe_ppp.session_id = rte_htons(p->qinq_pppoe.pppoe.session_id);
+ d->pppoe_ppp.length = 0; /* not pre-computed */
+ d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
+
+ return 0;
+}
+
+static int
+encap_mpls_apply(void *data,
+ struct rte_table_action_encap_params *p)
+{
+ struct encap_mpls_data *d = data;
+ uint16_t ethertype = (p->mpls.unicast) ?
+ ETHER_TYPE_MPLS_UNICAST :
+ ETHER_TYPE_MPLS_MULTICAST;
+ uint32_t i;
+
+ /* Ethernet */
+ rte_ether_addr_copy(&p->mpls.ether.da, &d->ether.d_addr);
+ rte_ether_addr_copy(&p->mpls.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ethertype);
+
+ /* MPLS */
+ for (i = 0; i < p->mpls.mpls_count - 1; i++)
+ d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
+ p->mpls.mpls[i].tc,
+ 0,
+ p->mpls.mpls[i].ttl));
+
+ d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
+ p->mpls.mpls[i].tc,
+ 1,
+ p->mpls.mpls[i].ttl));
+
+ d->mpls_count = p->mpls.mpls_count;
+ return 0;
+}
+
+static int
+encap_pppoe_apply(void *data,
+ struct rte_table_action_encap_params *p)
+{
+ struct encap_pppoe_data *d = data;
+
+ /* Ethernet */
+ rte_ether_addr_copy(&p->pppoe.ether.da, &d->ether.d_addr);
+ rte_ether_addr_copy(&p->pppoe.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_PPPOE_SESSION);
+
+ /* PPPoE and PPP*/
+ d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
+ d->pppoe_ppp.session_id = rte_htons(p->pppoe.pppoe.session_id);
+ d->pppoe_ppp.length = 0; /* not pre-computed */
+ d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
+
+ return 0;
+}
+
+static int
+encap_vxlan_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_encap_config *cfg)
+{
+ if ((p->vxlan.vxlan.vni > 0xFFFFFF) ||
+ (cfg->vxlan.ip_version && (p->vxlan.ipv4.dscp > 0x3F)) ||
+ (!cfg->vxlan.ip_version && (p->vxlan.ipv6.flow_label > 0xFFFFF)) ||
+ (!cfg->vxlan.ip_version && (p->vxlan.ipv6.dscp > 0x3F)) ||
+ (cfg->vxlan.vlan && (p->vxlan.vlan.vid > 0xFFF)))
+ return -1;
+
+ if (cfg->vxlan.ip_version)
+ if (cfg->vxlan.vlan) {
+ struct encap_vxlan_ipv4_vlan_data *d = data;
+
+ /* Ethernet */
+ rte_ether_addr_copy(&p->vxlan.ether.da,
+ &d->ether.d_addr);
+ rte_ether_addr_copy(&p->vxlan.ether.sa,
+ &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
+
+ /* VLAN */
+ d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
+ p->vxlan.vlan.dei,
+ p->vxlan.vlan.vid));
+ d->vlan.eth_proto = rte_htons(RTE_ETHER_TYPE_IPV4);
+
+ /* IPv4*/
+ d->ipv4.version_ihl = 0x45;
+ d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
+ d->ipv4.total_length = 0; /* not pre-computed */
+ d->ipv4.packet_id = 0;
+ d->ipv4.fragment_offset = 0;
+ d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
+ d->ipv4.next_proto_id = IP_PROTO_UDP;
+ d->ipv4.hdr_checksum = 0;
+ d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
+ d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
+
+ d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
+
+ /* UDP */
+ d->udp.src_port = rte_htons(p->vxlan.udp.sp);
+ d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
+ d->udp.dgram_len = 0; /* not pre-computed */
+ d->udp.dgram_cksum = 0;
+
+ /* VXLAN */
+ d->vxlan.vx_flags = rte_htonl(0x08000000);
+ d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
+
+ return 0;
+ } else {
+ struct encap_vxlan_ipv4_data *d = data;
+
+ /* Ethernet */
+ rte_ether_addr_copy(&p->vxlan.ether.da,
+ &d->ether.d_addr);
+ rte_ether_addr_copy(&p->vxlan.ether.sa,
+ &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_IPV4);
+
+ /* IPv4*/
+ d->ipv4.version_ihl = 0x45;
+ d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
+ d->ipv4.total_length = 0; /* not pre-computed */
+ d->ipv4.packet_id = 0;
+ d->ipv4.fragment_offset = 0;
+ d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
+ d->ipv4.next_proto_id = IP_PROTO_UDP;
+ d->ipv4.hdr_checksum = 0;
+ d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
+ d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
+
+ d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
+
+ /* UDP */
+ d->udp.src_port = rte_htons(p->vxlan.udp.sp);
+ d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
+ d->udp.dgram_len = 0; /* not pre-computed */
+ d->udp.dgram_cksum = 0;
+
+ /* VXLAN */
+ d->vxlan.vx_flags = rte_htonl(0x08000000);
+ d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
+
+ return 0;
+ }
+ else
+ if (cfg->vxlan.vlan) {
+ struct encap_vxlan_ipv6_vlan_data *d = data;
+
+ /* Ethernet */
+ rte_ether_addr_copy(&p->vxlan.ether.da,
+ &d->ether.d_addr);
+ rte_ether_addr_copy(&p->vxlan.ether.sa,
+ &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
+
+ /* VLAN */
+ d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
+ p->vxlan.vlan.dei,
+ p->vxlan.vlan.vid));
+ d->vlan.eth_proto = rte_htons(RTE_ETHER_TYPE_IPV6);
+
+ /* IPv6*/
+ d->ipv6.vtc_flow = rte_htonl((6 << 28) |
+ (p->vxlan.ipv6.dscp << 22) |
+ p->vxlan.ipv6.flow_label);
+ d->ipv6.payload_len = 0; /* not pre-computed */
+ d->ipv6.proto = IP_PROTO_UDP;
+ d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
+ memcpy(d->ipv6.src_addr,
+ p->vxlan.ipv6.sa,
+ sizeof(p->vxlan.ipv6.sa));
+ memcpy(d->ipv6.dst_addr,
+ p->vxlan.ipv6.da,
+ sizeof(p->vxlan.ipv6.da));
+
+ /* UDP */
+ d->udp.src_port = rte_htons(p->vxlan.udp.sp);
+ d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
+ d->udp.dgram_len = 0; /* not pre-computed */
+ d->udp.dgram_cksum = 0;
+
+ /* VXLAN */
+ d->vxlan.vx_flags = rte_htonl(0x08000000);
+ d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
+
+ return 0;
+ } else {
+ struct encap_vxlan_ipv6_data *d = data;
+
+ /* Ethernet */
+ rte_ether_addr_copy(&p->vxlan.ether.da,
+ &d->ether.d_addr);
+ rte_ether_addr_copy(&p->vxlan.ether.sa,
+ &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_IPV6);
+
+ /* IPv6*/
+ d->ipv6.vtc_flow = rte_htonl((6 << 28) |
+ (p->vxlan.ipv6.dscp << 22) |
+ p->vxlan.ipv6.flow_label);
+ d->ipv6.payload_len = 0; /* not pre-computed */
+ d->ipv6.proto = IP_PROTO_UDP;
+ d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
+ memcpy(d->ipv6.src_addr,
+ p->vxlan.ipv6.sa,
+ sizeof(p->vxlan.ipv6.sa));
+ memcpy(d->ipv6.dst_addr,
+ p->vxlan.ipv6.da,
+ sizeof(p->vxlan.ipv6.da));
+
+ /* UDP */
+ d->udp.src_port = rte_htons(p->vxlan.udp.sp);
+ d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
+ d->udp.dgram_len = 0; /* not pre-computed */
+ d->udp.dgram_cksum = 0;
+
+ /* VXLAN */
+ d->vxlan.vx_flags = rte_htonl(0x08000000);
+ d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
+
+ return 0;
+ }
+}
+
+static int
+encap_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_encap_config *cfg,
+ struct rte_table_action_common_config *common_cfg)
+{
+ int status;
+
+ /* Check input arguments */
+ status = encap_apply_check(p, cfg);
+ if (status)
+ return status;
+
+ switch (p->type) {
+ case RTE_TABLE_ACTION_ENCAP_ETHER:
+ return encap_ether_apply(data, p, common_cfg);
+
+ case RTE_TABLE_ACTION_ENCAP_VLAN:
+ return encap_vlan_apply(data, p, common_cfg);
+
+ case RTE_TABLE_ACTION_ENCAP_QINQ:
+ return encap_qinq_apply(data, p, common_cfg);
+
+ case RTE_TABLE_ACTION_ENCAP_MPLS:
+ return encap_mpls_apply(data, p);
+
+ case RTE_TABLE_ACTION_ENCAP_PPPOE:
+ return encap_pppoe_apply(data, p);
+
+ case RTE_TABLE_ACTION_ENCAP_VXLAN:
+ return encap_vxlan_apply(data, p, cfg);
+
+ case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
+ return encap_qinq_pppoe_apply(data, p);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static __rte_always_inline uint16_t
+encap_vxlan_ipv4_checksum_update(uint16_t cksum0,
+ uint16_t total_length)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Add total length (one's complement logic) */
+ cksum1 += total_length;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline void *
+encap(void *dst, const void *src, size_t n)
+{
+ dst = ((uint8_t *) dst) - n;
+ return rte_memcpy(dst, src, n);
+}
+
+static __rte_always_inline void
+pkt_work_encap_vxlan_ipv4(struct rte_mbuf *mbuf,
+ struct encap_vxlan_ipv4_data *vxlan_tbl,
+ struct rte_table_action_encap_config *cfg)
+{
+ uint32_t ether_offset = cfg->vxlan.data_offset;
+ void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
+ struct encap_vxlan_ipv4_data *vxlan_pkt;
+ uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
+
+ ether_length = (uint16_t)mbuf->pkt_len;
+ ipv4_total_length = ether_length +
+ (sizeof(struct rte_vxlan_hdr) +
+ sizeof(struct rte_udp_hdr) +
+ sizeof(struct rte_ipv4_hdr));
+ ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
+ rte_htons(ipv4_total_length));
+ udp_length = ether_length +
+ (sizeof(struct rte_vxlan_hdr) +
+ sizeof(struct rte_udp_hdr));
+
+ vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
+ vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
+ vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
+ vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
+
+ mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
+ mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
+}
+
+static __rte_always_inline void
+pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf *mbuf,
+ struct encap_vxlan_ipv4_vlan_data *vxlan_tbl,
+ struct rte_table_action_encap_config *cfg)
+{
+ uint32_t ether_offset = cfg->vxlan.data_offset;
+ void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
+ struct encap_vxlan_ipv4_vlan_data *vxlan_pkt;
+ uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
+
+ ether_length = (uint16_t)mbuf->pkt_len;
+ ipv4_total_length = ether_length +
+ (sizeof(struct rte_vxlan_hdr) +
+ sizeof(struct rte_udp_hdr) +
+ sizeof(struct rte_ipv4_hdr));
+ ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
+ rte_htons(ipv4_total_length));
+ udp_length = ether_length +
+ (sizeof(struct rte_vxlan_hdr) +
+ sizeof(struct rte_udp_hdr));
+
+ vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
+ vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
+ vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
+ vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
+
+ mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
+ mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
+}
+
+static __rte_always_inline void
+pkt_work_encap_vxlan_ipv6(struct rte_mbuf *mbuf,
+ struct encap_vxlan_ipv6_data *vxlan_tbl,
+ struct rte_table_action_encap_config *cfg)
+{
+ uint32_t ether_offset = cfg->vxlan.data_offset;
+ void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
+ struct encap_vxlan_ipv6_data *vxlan_pkt;
+ uint16_t ether_length, ipv6_payload_length, udp_length;
+
+ ether_length = (uint16_t)mbuf->pkt_len;
+ ipv6_payload_length = ether_length +
+ (sizeof(struct rte_vxlan_hdr) +
+ sizeof(struct rte_udp_hdr));
+ udp_length = ether_length +
+ (sizeof(struct rte_vxlan_hdr) +
+ sizeof(struct rte_udp_hdr));
+
+ vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
+ vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
+ vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
+
+ mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
+ mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
+}
+
+static __rte_always_inline void
+pkt_work_encap_vxlan_ipv6_vlan(struct rte_mbuf *mbuf,
+ struct encap_vxlan_ipv6_vlan_data *vxlan_tbl,
+ struct rte_table_action_encap_config *cfg)
+{
+ uint32_t ether_offset = cfg->vxlan.data_offset;
+ void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
+ struct encap_vxlan_ipv6_vlan_data *vxlan_pkt;
+ uint16_t ether_length, ipv6_payload_length, udp_length;
+
+ ether_length = (uint16_t)mbuf->pkt_len;
+ ipv6_payload_length = ether_length +
+ (sizeof(struct rte_vxlan_hdr) +
+ sizeof(struct rte_udp_hdr));
+ udp_length = ether_length +
+ (sizeof(struct rte_vxlan_hdr) +
+ sizeof(struct rte_udp_hdr));
+
+ vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
+ vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
+ vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
+
+ mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
+ mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
+}
+
+static __rte_always_inline void
+pkt_work_encap(struct rte_mbuf *mbuf,
+ void *data,
+ struct rte_table_action_encap_config *cfg,
+ void *ip,
+ uint16_t total_length,
+ uint32_t ip_offset)
+{
+ switch (cfg->encap_mask) {
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
+ encap(ip, data, sizeof(struct encap_ether_data));
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_ether_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_ether_data);
+ break;
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
+ encap(ip, data, sizeof(struct encap_vlan_data));
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_vlan_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_vlan_data);
+ break;
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
+ encap(ip, data, sizeof(struct encap_qinq_data));
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_qinq_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_qinq_data);
+ break;
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
+ {
+ struct encap_mpls_data *mpls = data;
+ size_t size = sizeof(struct rte_ether_hdr) +
+ mpls->mpls_count * 4;
+
+ encap(ip, data, size);
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + size);
+ mbuf->pkt_len = mbuf->data_len = total_length + size;
+ break;
+ }
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
+ {
+ struct encap_pppoe_data *pppoe =
+ encap(ip, data, sizeof(struct encap_pppoe_data));
+ pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_pppoe_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_pppoe_data);
+ break;
+ }
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
+ {
+ struct encap_qinq_pppoe_data *qinq_pppoe =
+ encap(ip, data, sizeof(struct encap_qinq_pppoe_data));
+ qinq_pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_qinq_pppoe_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_qinq_pppoe_data);
+ break;
+ }
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
+ {
+ if (cfg->vxlan.ip_version)
+ if (cfg->vxlan.vlan)
+ pkt_work_encap_vxlan_ipv4_vlan(mbuf, data, cfg);
+ else
+ pkt_work_encap_vxlan_ipv4(mbuf, data, cfg);
+ else
+ if (cfg->vxlan.vlan)
+ pkt_work_encap_vxlan_ipv6_vlan(mbuf, data, cfg);
+ else
+ pkt_work_encap_vxlan_ipv6(mbuf, data, cfg);
+ }
+
+ default:
+ break;
+ }
+}
+
+/**
+ * RTE_TABLE_ACTION_NAT
+ */
+static int
+nat_cfg_check(struct rte_table_action_nat_config *nat)
+{
+ if ((nat->proto != 0x06) &&
+ (nat->proto != 0x11))
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct nat_ipv4_data {
+ uint32_t addr;
+ uint16_t port;
+} __rte_packed;
+
+struct nat_ipv6_data {
+ uint8_t addr[16];
+ uint16_t port;
+} __rte_packed;
+
+static size_t
+nat_data_size(struct rte_table_action_nat_config *nat __rte_unused,
+ struct rte_table_action_common_config *common)
+{
+ int ip_version = common->ip_version;
+
+ return (ip_version) ?
+ sizeof(struct nat_ipv4_data) :
+ sizeof(struct nat_ipv6_data);
+}
+
+static int
+nat_apply_check(struct rte_table_action_nat_params *p,
+ struct rte_table_action_common_config *cfg)
+{
+ if ((p->ip_version && (cfg->ip_version == 0)) ||
+ ((p->ip_version == 0) && cfg->ip_version))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nat_apply(void *data,
+ struct rte_table_action_nat_params *p,
+ struct rte_table_action_common_config *cfg)
+{
+ int status;
+
+ /* Check input arguments */
+ status = nat_apply_check(p, cfg);
+ if (status)
+ return status;
+
+ /* Apply */
+ if (p->ip_version) {
+ struct nat_ipv4_data *d = data;
+
+ d->addr = rte_htonl(p->addr.ipv4);
+ d->port = rte_htons(p->port);
+ } else {
+ struct nat_ipv6_data *d = data;
+
+ memcpy(d->addr, p->addr.ipv6, sizeof(d->addr));
+ d->port = rte_htons(p->port);
+ }
+
+ return 0;
+}
+
+static __rte_always_inline uint16_t
+nat_ipv4_checksum_update(uint16_t cksum0,
+ uint32_t ip0,
+ uint32_t ip1)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Subtract ip0 (one's complement logic) */
+ cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ /* Add ip1 (one's complement logic) */
+ cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline uint16_t
+nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0,
+ uint32_t ip0,
+ uint32_t ip1,
+ uint16_t port0,
+ uint16_t port1)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Subtract ip0 and port 0 (one's complement logic) */
+ cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF) + port0;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ /* Add ip1 and port1 (one's complement logic) */
+ cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF) + port1;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline uint16_t
+nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0,
+ uint16_t *ip0,
+ uint16_t *ip1,
+ uint16_t port0,
+ uint16_t port1)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Subtract ip0 and port 0 (one's complement logic) */
+ cksum1 -= ip0[0] + ip0[1] + ip0[2] + ip0[3] +
+ ip0[4] + ip0[5] + ip0[6] + ip0[7] + port0;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ /* Add ip1 and port1 (one's complement logic) */
+ cksum1 += ip1[0] + ip1[1] + ip1[2] + ip1[3] +
+ ip1[4] + ip1[5] + ip1[6] + ip1[7] + port1;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline void
+pkt_ipv4_work_nat(struct rte_ipv4_hdr *ip,
+ struct nat_ipv4_data *data,
+ struct rte_table_action_nat_config *cfg)
+{
+ if (cfg->source_nat) {
+ if (cfg->proto == 0x6) {
+ struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
+ uint16_t ip_cksum, tcp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->src_addr,
+ data->addr);
+
+ tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
+ ip->src_addr,
+ data->addr,
+ tcp->src_port,
+ data->port);
+
+ ip->src_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ tcp->src_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
+ uint16_t ip_cksum, udp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->src_addr,
+ data->addr);
+
+ udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
+ ip->src_addr,
+ data->addr,
+ udp->src_port,
+ data->port);
+
+ ip->src_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ udp->src_port = data->port;
+ if (udp->dgram_cksum)
+ udp->dgram_cksum = udp_cksum;
+ }
+ } else {
+ if (cfg->proto == 0x6) {
+ struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
+ uint16_t ip_cksum, tcp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->dst_addr,
+ data->addr);
+
+ tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
+ ip->dst_addr,
+ data->addr,
+ tcp->dst_port,
+ data->port);
+
+ ip->dst_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ tcp->dst_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
+ uint16_t ip_cksum, udp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->dst_addr,
+ data->addr);
+
+ udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
+ ip->dst_addr,
+ data->addr,
+ udp->dst_port,
+ data->port);
+
+ ip->dst_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ udp->dst_port = data->port;
+ if (udp->dgram_cksum)
+ udp->dgram_cksum = udp_cksum;
+ }
+ }
+}
+
+static __rte_always_inline void
+pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
+ struct nat_ipv6_data *data,
+ struct rte_table_action_nat_config *cfg)
+{
+ if (cfg->source_nat) {
+ if (cfg->proto == 0x6) {
+ struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
+ uint16_t tcp_cksum;
+
+ tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
+ (uint16_t *)ip->src_addr,
+ (uint16_t *)data->addr,
+ tcp->src_port,
+ data->port);
+
+ rte_memcpy(ip->src_addr, data->addr, 16);
+ tcp->src_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
+ uint16_t udp_cksum;
+
+ udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
+ (uint16_t *)ip->src_addr,
+ (uint16_t *)data->addr,
+ udp->src_port,
+ data->port);
+
+ rte_memcpy(ip->src_addr, data->addr, 16);
+ udp->src_port = data->port;
+ udp->dgram_cksum = udp_cksum;
+ }
+ } else {
+ if (cfg->proto == 0x6) {
+ struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
+ uint16_t tcp_cksum;
+
+ tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
+ (uint16_t *)ip->dst_addr,
+ (uint16_t *)data->addr,
+ tcp->dst_port,
+ data->port);
+
+ rte_memcpy(ip->dst_addr, data->addr, 16);
+ tcp->dst_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
+ uint16_t udp_cksum;
+
+ udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
+ (uint16_t *)ip->dst_addr,
+ (uint16_t *)data->addr,
+ udp->dst_port,
+ data->port);
+
+ rte_memcpy(ip->dst_addr, data->addr, 16);
+ udp->dst_port = data->port;
+ udp->dgram_cksum = udp_cksum;
+ }
+ }
+}
+
+/**
+ * RTE_TABLE_ACTION_TTL
+ */
+static int
+ttl_cfg_check(struct rte_table_action_ttl_config *ttl)
+{
+ if (ttl->drop == 0)
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct ttl_data {
+ uint32_t n_packets;
+} __rte_packed;
+
+#define TTL_INIT(data, decrement) \
+ ((data)->n_packets = (decrement) ? 1 : 0)
+
+#define TTL_DEC_GET(data) \
+ ((uint8_t)((data)->n_packets & 1))
+
+#define TTL_STATS_RESET(data) \
+ ((data)->n_packets = ((data)->n_packets & 1))
+
+#define TTL_STATS_READ(data) \
+ ((data)->n_packets >> 1)
+
+#define TTL_STATS_ADD(data, value) \
+ ((data)->n_packets = \
+ (((((data)->n_packets >> 1) + (value)) << 1) | \
+ ((data)->n_packets & 1)))
+
+static int
+ttl_apply(void *data,
+ struct rte_table_action_ttl_params *p)
+{
+ struct ttl_data *d = data;
+
+ TTL_INIT(d, p->decrement);
+
+ return 0;
+}
+
+static __rte_always_inline uint64_t
+pkt_ipv4_work_ttl(struct rte_ipv4_hdr *ip,
+ struct ttl_data *data)
+{
+ uint32_t drop;
+ uint16_t cksum = ip->hdr_checksum;
+ uint8_t ttl = ip->time_to_live;
+ uint8_t ttl_diff = TTL_DEC_GET(data);
+
+ cksum += ttl_diff;
+ ttl -= ttl_diff;
+
+ ip->hdr_checksum = cksum;
+ ip->time_to_live = ttl;
+
+ drop = (ttl == 0) ? 1 : 0;
+ TTL_STATS_ADD(data, drop);
+
+ return drop;
+}
+
+static __rte_always_inline uint64_t
+pkt_ipv6_work_ttl(struct rte_ipv6_hdr *ip,
+ struct ttl_data *data)
+{
+ uint32_t drop;
+ uint8_t ttl = ip->hop_limits;
+ uint8_t ttl_diff = TTL_DEC_GET(data);
+
+ ttl -= ttl_diff;
+
+ ip->hop_limits = ttl;
+
+ drop = (ttl == 0) ? 1 : 0;
+ TTL_STATS_ADD(data, drop);
+
+ return drop;
+}
+
+/**
+ * RTE_TABLE_ACTION_STATS
+ */
+static int
+stats_cfg_check(struct rte_table_action_stats_config *stats)
+{
+ if ((stats->n_packets_enabled == 0) && (stats->n_bytes_enabled == 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+struct stats_data {
+ uint64_t n_packets;
+ uint64_t n_bytes;
+} __rte_packed;
+
+static int
+stats_apply(struct stats_data *data,
+ struct rte_table_action_stats_params *p)
+{
+ data->n_packets = p->n_packets;
+ data->n_bytes = p->n_bytes;
+
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_stats(struct stats_data *data,
+ uint16_t total_length)
+{
+ data->n_packets++;
+ data->n_bytes += total_length;
+}
+
+/**
+ * RTE_TABLE_ACTION_TIME
+ */
+struct time_data {
+ uint64_t time;
+} __rte_packed;
+
+static int
+time_apply(struct time_data *data,
+ struct rte_table_action_time_params *p)
+{
+ data->time = p->time;
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_time(struct time_data *data,
+ uint64_t time)
+{
+ data->time = time;
+}
+
+
+/**
+ * RTE_TABLE_ACTION_CRYPTO
+ */
+
+#define CRYPTO_OP_MASK_CIPHER 0x1
+#define CRYPTO_OP_MASK_AUTH 0x2
+#define CRYPTO_OP_MASK_AEAD 0x4
+
+struct crypto_op_sym_iv_aad {
+ struct rte_crypto_op op;
+ struct rte_crypto_sym_op sym_op;
+ union {
+ struct {
+ uint8_t cipher_iv[
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
+ uint8_t auth_iv[
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
+ } cipher_auth;
+
+ struct {
+ uint8_t iv[RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
+ uint8_t aad[RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX];
+ } aead_iv_aad;
+
+ } iv_aad;
+};
+
+struct sym_crypto_data {
+
+ union {
+ struct {
+
+ /** Length of cipher iv. */
+ uint16_t cipher_iv_len;
+
+ /** Offset from start of IP header to the cipher iv. */
+ uint16_t cipher_iv_data_offset;
+
+ /** Length of cipher iv to be updated in the mbuf. */
+ uint16_t cipher_iv_update_len;
+
+ /** Offset from start of IP header to the auth iv. */
+ uint16_t auth_iv_data_offset;
+
+ /** Length of auth iv in the mbuf. */
+ uint16_t auth_iv_len;
+
+ /** Length of auth iv to be updated in the mbuf. */
+ uint16_t auth_iv_update_len;
+
+ } cipher_auth;
+ struct {
+
+ /** Length of iv. */
+ uint16_t iv_len;
+
+ /** Offset from start of IP header to the aead iv. */
+ uint16_t iv_data_offset;
+
+ /** Length of iv to be updated in the mbuf. */
+ uint16_t iv_update_len;
+
+ /** Length of aad */
+ uint16_t aad_len;
+
+ /** Offset from start of IP header to the aad. */
+ uint16_t aad_data_offset;
+
+ /** Length of aad to updated in the mbuf. */
+ uint16_t aad_update_len;
+
+ } aead;
+ };
+
+ /** Offset from start of IP header to the data. */
+ uint16_t data_offset;
+
+ /** Digest length. */
+ uint16_t digest_len;
+
+ /** block size */
+ uint16_t block_size;
+
+ /** Mask of crypto operation */
+ uint16_t op_mask;
+
+ /** Session pointer. */
+ struct rte_cryptodev_sym_session *session;
+
+ /** Direction of crypto, encrypt or decrypt */
+ uint16_t direction;
+
+ /** Private data size to store cipher iv / aad. */
+ uint8_t iv_aad_data[32];
+
+} __rte_packed;
+
+static int
+sym_crypto_cfg_check(struct rte_table_action_sym_crypto_config *cfg)
+{
+ if (!rte_cryptodev_pmd_is_valid_dev(cfg->cryptodev_id))
+ return -EINVAL;
+ if (cfg->mp_create == NULL || cfg->mp_init == NULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+get_block_size(const struct rte_crypto_sym_xform *xform, uint8_t cdev_id)
+{
+ struct rte_cryptodev_info dev_info;
+ const struct rte_cryptodev_capabilities *cap;
+ uint32_t i;
+
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+
+ for (i = 0; dev_info.capabilities[i].op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
+ i++) {
+ cap = &dev_info.capabilities[i];
+
+ if (cap->sym.xform_type != xform->type)
+ continue;
+
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
+ (cap->sym.cipher.algo == xform->cipher.algo))
+ return cap->sym.cipher.block_size;
+
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) &&
+ (cap->sym.aead.algo == xform->aead.algo))
+ return cap->sym.aead.block_size;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED)
+ break;
+ }
+
+ return -1;
+}
+
+static int
+sym_crypto_apply(struct sym_crypto_data *data,
+ struct rte_table_action_sym_crypto_config *cfg,
+ struct rte_table_action_sym_crypto_params *p)
+{
+ const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ const struct rte_crypto_auth_xform *auth_xform = NULL;
+ const struct rte_crypto_aead_xform *aead_xform = NULL;
+ struct rte_crypto_sym_xform *xform = p->xform;
+ struct rte_cryptodev_sym_session *session;
+ int ret;
+
+ memset(data, 0, sizeof(*data));
+
+ while (xform) {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ cipher_xform = &xform->cipher;
+
+ if (cipher_xform->iv.length >
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
+ return -ENOMEM;
+ if (cipher_xform->iv.offset !=
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
+ return -EINVAL;
+
+ ret = get_block_size(xform, cfg->cryptodev_id);
+ if (ret < 0)
+ return -1;
+ data->block_size = (uint16_t)ret;
+ data->op_mask |= CRYPTO_OP_MASK_CIPHER;
+
+ data->cipher_auth.cipher_iv_len =
+ cipher_xform->iv.length;
+ data->cipher_auth.cipher_iv_data_offset = (uint16_t)
+ p->cipher_auth.cipher_iv_update.offset;
+ data->cipher_auth.cipher_iv_update_len = (uint16_t)
+ p->cipher_auth.cipher_iv_update.length;
+
+ rte_memcpy(data->iv_aad_data,
+ p->cipher_auth.cipher_iv.val,
+ p->cipher_auth.cipher_iv.length);
+
+ data->direction = cipher_xform->op;
+
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ auth_xform = &xform->auth;
+ if (auth_xform->iv.length >
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
+ return -ENOMEM;
+ data->op_mask |= CRYPTO_OP_MASK_AUTH;
+
+ data->cipher_auth.auth_iv_len = auth_xform->iv.length;
+ data->cipher_auth.auth_iv_data_offset = (uint16_t)
+ p->cipher_auth.auth_iv_update.offset;
+ data->cipher_auth.auth_iv_update_len = (uint16_t)
+ p->cipher_auth.auth_iv_update.length;
+ data->digest_len = auth_xform->digest_length;
+
+ data->direction = (auth_xform->op ==
+ RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT :
+ RTE_CRYPTO_CIPHER_OP_DECRYPT;
+
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ aead_xform = &xform->aead;
+
+ if ((aead_xform->iv.length >
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX) || (
+ aead_xform->aad_length >
+ RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX))
+ return -EINVAL;
+ if (aead_xform->iv.offset !=
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
+ return -EINVAL;
+
+ ret = get_block_size(xform, cfg->cryptodev_id);
+ if (ret < 0)
+ return -1;
+ data->block_size = (uint16_t)ret;
+ data->op_mask |= CRYPTO_OP_MASK_AEAD;
+
+ data->digest_len = aead_xform->digest_length;
+ data->aead.iv_len = aead_xform->iv.length;
+ data->aead.aad_len = aead_xform->aad_length;
+
+ data->aead.iv_data_offset = (uint16_t)
+ p->aead.iv_update.offset;
+ data->aead.iv_update_len = (uint16_t)
+ p->aead.iv_update.length;
+ data->aead.aad_data_offset = (uint16_t)
+ p->aead.aad_update.offset;
+ data->aead.aad_update_len = (uint16_t)
+ p->aead.aad_update.length;
+
+ rte_memcpy(data->iv_aad_data,
+ p->aead.iv.val,
+ p->aead.iv.length);
+
+ rte_memcpy(data->iv_aad_data + p->aead.iv.length,
+ p->aead.aad.val,
+ p->aead.aad.length);
+
+ data->direction = (aead_xform->op ==
+ RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT :
+ RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ } else
+ return -EINVAL;
+
+ xform = xform->next;
+ }
+
+ if (auth_xform && auth_xform->iv.length) {
+ if (cipher_xform) {
+ if (auth_xform->iv.offset !=
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET +
+ cipher_xform->iv.length)
+ return -EINVAL;
+
+ rte_memcpy(data->iv_aad_data + cipher_xform->iv.length,
+ p->cipher_auth.auth_iv.val,
+ p->cipher_auth.auth_iv.length);
+ } else {
+ rte_memcpy(data->iv_aad_data,
+ p->cipher_auth.auth_iv.val,
+ p->cipher_auth.auth_iv.length);
+ }
+ }
+
+ session = rte_cryptodev_sym_session_create(cfg->mp_create);
+ if (!session)
+ return -ENOMEM;
+
+ ret = rte_cryptodev_sym_session_init(cfg->cryptodev_id, session,
+ p->xform, cfg->mp_init);
+ if (ret < 0) {
+ rte_cryptodev_sym_session_free(session);
+ return ret;
+ }
+
+ data->data_offset = (uint16_t)p->data_offset;
+ data->session = session;
+
+ return 0;
+}
+
+static __rte_always_inline uint64_t
+pkt_work_sym_crypto(struct rte_mbuf *mbuf, struct sym_crypto_data *data,
+ struct rte_table_action_sym_crypto_config *cfg,
+ uint16_t ip_offset)
+{
+ struct crypto_op_sym_iv_aad *crypto_op = (struct crypto_op_sym_iv_aad *)
+ RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->op_offset);
+ struct rte_crypto_op *op = &crypto_op->op;
+ struct rte_crypto_sym_op *sym = op->sym;
+ uint32_t pkt_offset = sizeof(*mbuf) + mbuf->data_off;
+ uint32_t payload_len = pkt_offset + mbuf->data_len - data->data_offset;
+
+ op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+ op->phys_addr = mbuf->buf_iova + cfg->op_offset - sizeof(*mbuf);
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ sym->m_src = mbuf;
+ sym->m_dst = NULL;
+ sym->session = data->session;
+
+ /** pad the packet */
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ uint32_t append_len = RTE_ALIGN_CEIL(payload_len,
+ data->block_size) - payload_len;
+
+ if (unlikely(rte_pktmbuf_append(mbuf, append_len +
+ data->digest_len) == NULL))
+ return 1;
+
+ payload_len += append_len;
+ } else
+ payload_len -= data->digest_len;
+
+ if (data->op_mask & CRYPTO_OP_MASK_CIPHER) {
+ /** prepare cipher op */
+ uint8_t *iv = crypto_op->iv_aad.cipher_auth.cipher_iv;
+
+ sym->cipher.data.length = payload_len;
+ sym->cipher.data.offset = data->data_offset - pkt_offset;
+
+ if (data->cipher_auth.cipher_iv_update_len) {
+ uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ data->cipher_auth.cipher_iv_data_offset
+ + ip_offset);
+
+ /** For encryption, update the pkt iv field, otherwise
+ * update the iv_aad_field
+ **/
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ rte_memcpy(pkt_iv, data->iv_aad_data,
+ data->cipher_auth.cipher_iv_update_len);
+ else
+ rte_memcpy(data->iv_aad_data, pkt_iv,
+ data->cipher_auth.cipher_iv_update_len);
+ }
+
+ /** write iv */
+ rte_memcpy(iv, data->iv_aad_data,
+ data->cipher_auth.cipher_iv_len);
+ }
+
+ if (data->op_mask & CRYPTO_OP_MASK_AUTH) {
+ /** authentication always start from IP header. */
+ sym->auth.data.offset = ip_offset - pkt_offset;
+ sym->auth.data.length = mbuf->data_len - sym->auth.data.offset -
+ data->digest_len;
+ sym->auth.digest.data = rte_pktmbuf_mtod_offset(mbuf,
+ uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
+ data->digest_len);
+ sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+ rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
+
+ if (data->cipher_auth.auth_iv_update_len) {
+ uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ data->cipher_auth.auth_iv_data_offset
+ + ip_offset);
+ uint8_t *data_iv = data->iv_aad_data +
+ data->cipher_auth.cipher_iv_len;
+
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ rte_memcpy(pkt_iv, data_iv,
+ data->cipher_auth.auth_iv_update_len);
+ else
+ rte_memcpy(data_iv, pkt_iv,
+ data->cipher_auth.auth_iv_update_len);
+ }
+
+ if (data->cipher_auth.auth_iv_len) {
+ /** prepare cipher op */
+ uint8_t *iv = crypto_op->iv_aad.cipher_auth.auth_iv;
+
+ rte_memcpy(iv, data->iv_aad_data +
+ data->cipher_auth.cipher_iv_len,
+ data->cipher_auth.auth_iv_len);
+ }
+ }
+
+ if (data->op_mask & CRYPTO_OP_MASK_AEAD) {
+ uint8_t *iv = crypto_op->iv_aad.aead_iv_aad.iv;
+ uint8_t *aad = crypto_op->iv_aad.aead_iv_aad.aad;
+
+ sym->aead.aad.data = aad;
+ sym->aead.aad.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+ aad - rte_pktmbuf_mtod(mbuf, uint8_t *));
+ sym->aead.digest.data = rte_pktmbuf_mtod_offset(mbuf,
+ uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
+ data->digest_len);
+ sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+ rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
+ sym->aead.data.offset = data->data_offset - pkt_offset;
+ sym->aead.data.length = payload_len;
+
+ if (data->aead.iv_update_len) {
+ uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ data->aead.iv_data_offset + ip_offset);
+ uint8_t *data_iv = data->iv_aad_data;
+
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ rte_memcpy(pkt_iv, data_iv,
+ data->aead.iv_update_len);
+ else
+ rte_memcpy(data_iv, pkt_iv,
+ data->aead.iv_update_len);
+ }
+
+ rte_memcpy(iv, data->iv_aad_data, data->aead.iv_len);
+
+ if (data->aead.aad_update_len) {
+ uint8_t *pkt_aad = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ data->aead.aad_data_offset + ip_offset);
+ uint8_t *data_aad = data->iv_aad_data +
+ data->aead.iv_len;
+
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ rte_memcpy(pkt_aad, data_aad,
+ data->aead.iv_update_len);
+ else
+ rte_memcpy(data_aad, pkt_aad,
+ data->aead.iv_update_len);
+ }
+
+ rte_memcpy(aad, data->iv_aad_data + data->aead.iv_len,
+ data->aead.aad_len);
+ }
+
+ return 0;
+}
+
+/**
+ * RTE_TABLE_ACTION_TAG
+ */
+struct tag_data {
+ uint32_t tag;
+} __rte_packed;
+
+static int
+tag_apply(struct tag_data *data,
+ struct rte_table_action_tag_params *p)
+{
+ data->tag = p->tag;
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_tag(struct rte_mbuf *mbuf,
+ struct tag_data *data)
+{
+ mbuf->hash.fdir.hi = data->tag;
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+}
+
+static __rte_always_inline void
+pkt4_work_tag(struct rte_mbuf *mbuf0,
+ struct rte_mbuf *mbuf1,
+ struct rte_mbuf *mbuf2,
+ struct rte_mbuf *mbuf3,
+ struct tag_data *data0,
+ struct tag_data *data1,
+ struct tag_data *data2,
+ struct tag_data *data3)
+{
+ mbuf0->hash.fdir.hi = data0->tag;
+ mbuf1->hash.fdir.hi = data1->tag;
+ mbuf2->hash.fdir.hi = data2->tag;
+ mbuf3->hash.fdir.hi = data3->tag;
+
+ mbuf0->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mbuf1->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mbuf2->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mbuf3->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+}
+
+/**
+ * RTE_TABLE_ACTION_DECAP
+ */
+struct decap_data {
+ uint16_t n;
+} __rte_packed;
+
+static int
+decap_apply(struct decap_data *data,
+ struct rte_table_action_decap_params *p)