1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_byteorder.h>
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
12 #include <rte_ether.h>
17 #include <rte_cryptodev.h>
18 #include <rte_cryptodev_pmd.h>
20 #include "rte_table_action.h"
22 #define rte_htons rte_cpu_to_be_16
23 #define rte_htonl rte_cpu_to_be_32
25 #define rte_ntohs rte_be_to_cpu_16
26 #define rte_ntohl rte_be_to_cpu_32
29 * RTE_TABLE_ACTION_FWD
31 #define fwd_data rte_pipeline_table_entry
34 fwd_apply(struct fwd_data *data,
35 struct rte_table_action_fwd_params *p)
37 data->action = p->action;
39 if (p->action == RTE_PIPELINE_ACTION_PORT)
40 data->port_id = p->id;
42 if (p->action == RTE_PIPELINE_ACTION_TABLE)
43 data->table_id = p->id;
52 lb_cfg_check(struct rte_table_action_lb_config *cfg)
55 (cfg->key_size < RTE_TABLE_ACTION_LB_KEY_SIZE_MIN) ||
56 (cfg->key_size > RTE_TABLE_ACTION_LB_KEY_SIZE_MAX) ||
57 (!rte_is_power_of_2(cfg->key_size)) ||
58 (cfg->f_hash == NULL))
65 uint32_t out[RTE_TABLE_ACTION_LB_TABLE_SIZE];
66 } __attribute__((__packed__));
69 lb_apply(struct lb_data *data,
70 struct rte_table_action_lb_params *p)
72 memcpy(data->out, p->out, sizeof(data->out));
77 static __rte_always_inline void
78 pkt_work_lb(struct rte_mbuf *mbuf,
80 struct rte_table_action_lb_config *cfg)
82 uint8_t *pkt_key = RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->key_offset);
83 uint32_t *out = RTE_MBUF_METADATA_UINT32_PTR(mbuf, cfg->out_offset);
87 digest = cfg->f_hash(pkt_key,
91 pos = digest & (RTE_TABLE_ACTION_LB_TABLE_SIZE - 1);
92 out_val = data->out[pos];
98 * RTE_TABLE_ACTION_MTR
101 mtr_cfg_check(struct rte_table_action_mtr_config *mtr)
103 if ((mtr->alg == RTE_TABLE_ACTION_METER_SRTCM) ||
104 ((mtr->n_tc != 1) && (mtr->n_tc != 4)) ||
105 (mtr->n_bytes_enabled != 0))
110 #define MBUF_SCHED_QUEUE_TC_COLOR(queue, tc, color) \
111 ((uint16_t)((((uint64_t)(queue)) & 0x3) | \
112 ((((uint64_t)(tc)) & 0x3) << 2) | \
113 ((((uint64_t)(color)) & 0x3) << 4)))
115 #define MBUF_SCHED_COLOR(sched, color) \
116 (((sched) & (~0x30LLU)) | ((color) << 4))
118 struct mtr_trtcm_data {
119 struct rte_meter_trtcm trtcm;
120 uint64_t stats[e_RTE_METER_COLORS];
121 } __attribute__((__packed__));
123 #define MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data) \
124 (((data)->stats[e_RTE_METER_GREEN] & 0xF8LLU) >> 3)
127 mtr_trtcm_data_meter_profile_id_set(struct mtr_trtcm_data *data,
130 data->stats[e_RTE_METER_GREEN] &= ~0xF8LLU;
131 data->stats[e_RTE_METER_GREEN] |= (profile_id % 32) << 3;
134 #define MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color)\
135 (((data)->stats[(color)] & 4LLU) >> 2)
137 #define MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color)\
138 ((enum rte_meter_color)((data)->stats[(color)] & 3LLU))
141 mtr_trtcm_data_policer_action_set(struct mtr_trtcm_data *data,
142 enum rte_meter_color color,
143 enum rte_table_action_policer action)
145 if (action == RTE_TABLE_ACTION_POLICER_DROP) {
146 data->stats[color] |= 4LLU;
148 data->stats[color] &= ~7LLU;
149 data->stats[color] |= color & 3LLU;
154 mtr_trtcm_data_stats_get(struct mtr_trtcm_data *data,
155 enum rte_meter_color color)
157 return data->stats[color] >> 8;
161 mtr_trtcm_data_stats_reset(struct mtr_trtcm_data *data,
162 enum rte_meter_color color)
164 data->stats[color] &= 0xFFLU;
167 #define MTR_TRTCM_DATA_STATS_INC(data, color) \
168 ((data)->stats[(color)] += (1LLU << 8))
171 mtr_data_size(struct rte_table_action_mtr_config *mtr)
173 return mtr->n_tc * sizeof(struct mtr_trtcm_data);
176 struct dscp_table_entry_data {
177 enum rte_meter_color color;
179 uint16_t queue_tc_color;
182 struct dscp_table_data {
183 struct dscp_table_entry_data entry[64];
186 struct meter_profile_data {
187 struct rte_meter_trtcm_profile profile;
192 static struct meter_profile_data *
193 meter_profile_data_find(struct meter_profile_data *mp,
199 for (i = 0; i < mp_size; i++) {
200 struct meter_profile_data *mp_data = &mp[i];
202 if (mp_data->valid && (mp_data->profile_id == profile_id))
209 static struct meter_profile_data *
210 meter_profile_data_find_unused(struct meter_profile_data *mp,
215 for (i = 0; i < mp_size; i++) {
216 struct meter_profile_data *mp_data = &mp[i];
226 mtr_apply_check(struct rte_table_action_mtr_params *p,
227 struct rte_table_action_mtr_config *cfg,
228 struct meter_profile_data *mp,
233 if (p->tc_mask > RTE_LEN2MASK(cfg->n_tc, uint32_t))
236 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
237 struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
238 struct meter_profile_data *mp_data;
240 if ((p->tc_mask & (1LLU << i)) == 0)
243 mp_data = meter_profile_data_find(mp,
245 p_tc->meter_profile_id);
254 mtr_apply(struct mtr_trtcm_data *data,
255 struct rte_table_action_mtr_params *p,
256 struct rte_table_action_mtr_config *cfg,
257 struct meter_profile_data *mp,
263 /* Check input arguments */
264 status = mtr_apply_check(p, cfg, mp, mp_size);
269 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
270 struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
271 struct mtr_trtcm_data *data_tc = &data[i];
272 struct meter_profile_data *mp_data;
274 if ((p->tc_mask & (1LLU << i)) == 0)
278 mp_data = meter_profile_data_find(mp,
280 p_tc->meter_profile_id);
284 memset(data_tc, 0, sizeof(*data_tc));
287 status = rte_meter_trtcm_config(&data_tc->trtcm,
293 mtr_trtcm_data_meter_profile_id_set(data_tc,
296 /* Policer actions */
297 mtr_trtcm_data_policer_action_set(data_tc,
299 p_tc->policer[e_RTE_METER_GREEN]);
301 mtr_trtcm_data_policer_action_set(data_tc,
303 p_tc->policer[e_RTE_METER_YELLOW]);
305 mtr_trtcm_data_policer_action_set(data_tc,
307 p_tc->policer[e_RTE_METER_RED]);
313 static __rte_always_inline uint64_t
314 pkt_work_mtr(struct rte_mbuf *mbuf,
315 struct mtr_trtcm_data *data,
316 struct dscp_table_data *dscp_table,
317 struct meter_profile_data *mp,
320 uint16_t total_length)
322 uint64_t drop_mask, sched;
323 uint64_t *sched_ptr = (uint64_t *) &mbuf->hash.sched;
324 struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
325 enum rte_meter_color color_in, color_meter, color_policer;
329 color_in = dscp_entry->color;
331 mp_id = MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data);
335 color_meter = rte_meter_trtcm_color_aware_check(
343 MTR_TRTCM_DATA_STATS_INC(data, color_meter);
346 drop_mask = MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color_meter);
348 MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color_meter);
349 *sched_ptr = MBUF_SCHED_COLOR(sched, color_policer);
355 * RTE_TABLE_ACTION_TM
358 tm_cfg_check(struct rte_table_action_tm_config *tm)
360 if ((tm->n_subports_per_port == 0) ||
361 (rte_is_power_of_2(tm->n_subports_per_port) == 0) ||
362 (tm->n_subports_per_port > UINT16_MAX) ||
363 (tm->n_pipes_per_subport == 0) ||
364 (rte_is_power_of_2(tm->n_pipes_per_subport) == 0))
371 uint16_t queue_tc_color;
374 } __attribute__((__packed__));
377 tm_apply_check(struct rte_table_action_tm_params *p,
378 struct rte_table_action_tm_config *cfg)
380 if ((p->subport_id >= cfg->n_subports_per_port) ||
381 (p->pipe_id >= cfg->n_pipes_per_subport))
388 tm_apply(struct tm_data *data,
389 struct rte_table_action_tm_params *p,
390 struct rte_table_action_tm_config *cfg)
394 /* Check input arguments */
395 status = tm_apply_check(p, cfg);
400 data->queue_tc_color = 0;
401 data->subport = (uint16_t) p->subport_id;
402 data->pipe = p->pipe_id;
407 static __rte_always_inline void
408 pkt_work_tm(struct rte_mbuf *mbuf,
409 struct tm_data *data,
410 struct dscp_table_data *dscp_table,
413 struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
414 struct tm_data *sched_ptr = (struct tm_data *) &mbuf->hash.sched;
415 struct tm_data sched;
418 sched.queue_tc_color = dscp_entry->queue_tc_color;
423 * RTE_TABLE_ACTION_ENCAP
426 encap_valid(enum rte_table_action_encap_type encap)
429 case RTE_TABLE_ACTION_ENCAP_ETHER:
430 case RTE_TABLE_ACTION_ENCAP_VLAN:
431 case RTE_TABLE_ACTION_ENCAP_QINQ:
432 case RTE_TABLE_ACTION_ENCAP_MPLS:
433 case RTE_TABLE_ACTION_ENCAP_PPPOE:
434 case RTE_TABLE_ACTION_ENCAP_VXLAN:
442 encap_cfg_check(struct rte_table_action_encap_config *encap)
444 if ((encap->encap_mask == 0) ||
445 (__builtin_popcountll(encap->encap_mask) != 1))
451 struct encap_ether_data {
452 struct ether_hdr ether;
453 } __attribute__((__packed__));
455 #define VLAN(pcp, dei, vid) \
456 ((uint16_t)((((uint64_t)(pcp)) & 0x7LLU) << 13) | \
457 ((((uint64_t)(dei)) & 0x1LLU) << 12) | \
458 (((uint64_t)(vid)) & 0xFFFLLU)) \
460 struct encap_vlan_data {
461 struct ether_hdr ether;
462 struct vlan_hdr vlan;
463 } __attribute__((__packed__));
465 struct encap_qinq_data {
466 struct ether_hdr ether;
467 struct vlan_hdr svlan;
468 struct vlan_hdr cvlan;
469 } __attribute__((__packed__));
471 #define ETHER_TYPE_MPLS_UNICAST 0x8847
473 #define ETHER_TYPE_MPLS_MULTICAST 0x8848
475 #define MPLS(label, tc, s, ttl) \
476 ((uint32_t)(((((uint64_t)(label)) & 0xFFFFFLLU) << 12) |\
477 ((((uint64_t)(tc)) & 0x7LLU) << 9) | \
478 ((((uint64_t)(s)) & 0x1LLU) << 8) | \
479 (((uint64_t)(ttl)) & 0xFFLLU)))
481 struct encap_mpls_data {
482 struct ether_hdr ether;
483 uint32_t mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX];
485 } __attribute__((__packed__));
487 #define ETHER_TYPE_PPPOE_SESSION 0x8864
489 #define PPP_PROTOCOL_IP 0x0021
491 struct pppoe_ppp_hdr {
492 uint16_t ver_type_code;
496 } __attribute__((__packed__));
498 struct encap_pppoe_data {
499 struct ether_hdr ether;
500 struct pppoe_ppp_hdr pppoe_ppp;
501 } __attribute__((__packed__));
503 #define IP_PROTO_UDP 17
505 struct encap_vxlan_ipv4_data {
506 struct ether_hdr ether;
507 struct ipv4_hdr ipv4;
509 struct vxlan_hdr vxlan;
510 } __attribute__((__packed__));
512 struct encap_vxlan_ipv4_vlan_data {
513 struct ether_hdr ether;
514 struct vlan_hdr vlan;
515 struct ipv4_hdr ipv4;
517 struct vxlan_hdr vxlan;
518 } __attribute__((__packed__));
520 struct encap_vxlan_ipv6_data {
521 struct ether_hdr ether;
522 struct ipv6_hdr ipv6;
524 struct vxlan_hdr vxlan;
525 } __attribute__((__packed__));
527 struct encap_vxlan_ipv6_vlan_data {
528 struct ether_hdr ether;
529 struct vlan_hdr vlan;
530 struct ipv6_hdr ipv6;
532 struct vxlan_hdr vxlan;
533 } __attribute__((__packed__));
536 encap_data_size(struct rte_table_action_encap_config *encap)
538 switch (encap->encap_mask) {
539 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
540 return sizeof(struct encap_ether_data);
542 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
543 return sizeof(struct encap_vlan_data);
545 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
546 return sizeof(struct encap_qinq_data);
548 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
549 return sizeof(struct encap_mpls_data);
551 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
552 return sizeof(struct encap_pppoe_data);
554 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
555 if (encap->vxlan.ip_version)
556 if (encap->vxlan.vlan)
557 return sizeof(struct encap_vxlan_ipv4_vlan_data);
559 return sizeof(struct encap_vxlan_ipv4_data);
561 if (encap->vxlan.vlan)
562 return sizeof(struct encap_vxlan_ipv6_vlan_data);
564 return sizeof(struct encap_vxlan_ipv6_data);
572 encap_apply_check(struct rte_table_action_encap_params *p,
573 struct rte_table_action_encap_config *cfg)
575 if ((encap_valid(p->type) == 0) ||
576 ((cfg->encap_mask & (1LLU << p->type)) == 0))
580 case RTE_TABLE_ACTION_ENCAP_ETHER:
583 case RTE_TABLE_ACTION_ENCAP_VLAN:
586 case RTE_TABLE_ACTION_ENCAP_QINQ:
589 case RTE_TABLE_ACTION_ENCAP_MPLS:
590 if ((p->mpls.mpls_count == 0) ||
591 (p->mpls.mpls_count > RTE_TABLE_ACTION_MPLS_LABELS_MAX))
596 case RTE_TABLE_ACTION_ENCAP_PPPOE:
599 case RTE_TABLE_ACTION_ENCAP_VXLAN:
608 encap_ether_apply(void *data,
609 struct rte_table_action_encap_params *p,
610 struct rte_table_action_common_config *common_cfg)
612 struct encap_ether_data *d = data;
613 uint16_t ethertype = (common_cfg->ip_version) ?
618 ether_addr_copy(&p->ether.ether.da, &d->ether.d_addr);
619 ether_addr_copy(&p->ether.ether.sa, &d->ether.s_addr);
620 d->ether.ether_type = rte_htons(ethertype);
626 encap_vlan_apply(void *data,
627 struct rte_table_action_encap_params *p,
628 struct rte_table_action_common_config *common_cfg)
630 struct encap_vlan_data *d = data;
631 uint16_t ethertype = (common_cfg->ip_version) ?
636 ether_addr_copy(&p->vlan.ether.da, &d->ether.d_addr);
637 ether_addr_copy(&p->vlan.ether.sa, &d->ether.s_addr);
638 d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
641 d->vlan.vlan_tci = rte_htons(VLAN(p->vlan.vlan.pcp,
644 d->vlan.eth_proto = rte_htons(ethertype);
650 encap_qinq_apply(void *data,
651 struct rte_table_action_encap_params *p,
652 struct rte_table_action_common_config *common_cfg)
654 struct encap_qinq_data *d = data;
655 uint16_t ethertype = (common_cfg->ip_version) ?
660 ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
661 ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
662 d->ether.ether_type = rte_htons(ETHER_TYPE_QINQ);
665 d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
668 d->svlan.eth_proto = rte_htons(ETHER_TYPE_VLAN);
671 d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
674 d->cvlan.eth_proto = rte_htons(ethertype);
680 encap_mpls_apply(void *data,
681 struct rte_table_action_encap_params *p)
683 struct encap_mpls_data *d = data;
684 uint16_t ethertype = (p->mpls.unicast) ?
685 ETHER_TYPE_MPLS_UNICAST :
686 ETHER_TYPE_MPLS_MULTICAST;
690 ether_addr_copy(&p->mpls.ether.da, &d->ether.d_addr);
691 ether_addr_copy(&p->mpls.ether.sa, &d->ether.s_addr);
692 d->ether.ether_type = rte_htons(ethertype);
695 for (i = 0; i < p->mpls.mpls_count - 1; i++)
696 d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
699 p->mpls.mpls[i].ttl));
701 d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
704 p->mpls.mpls[i].ttl));
706 d->mpls_count = p->mpls.mpls_count;
711 encap_pppoe_apply(void *data,
712 struct rte_table_action_encap_params *p)
714 struct encap_pppoe_data *d = data;
717 ether_addr_copy(&p->pppoe.ether.da, &d->ether.d_addr);
718 ether_addr_copy(&p->pppoe.ether.sa, &d->ether.s_addr);
719 d->ether.ether_type = rte_htons(ETHER_TYPE_PPPOE_SESSION);
722 d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
723 d->pppoe_ppp.session_id = rte_htons(p->pppoe.pppoe.session_id);
724 d->pppoe_ppp.length = 0; /* not pre-computed */
725 d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
731 encap_vxlan_apply(void *data,
732 struct rte_table_action_encap_params *p,
733 struct rte_table_action_encap_config *cfg)
735 if ((p->vxlan.vxlan.vni > 0xFFFFFF) ||
736 (cfg->vxlan.ip_version && (p->vxlan.ipv4.dscp > 0x3F)) ||
737 (!cfg->vxlan.ip_version && (p->vxlan.ipv6.flow_label > 0xFFFFF)) ||
738 (!cfg->vxlan.ip_version && (p->vxlan.ipv6.dscp > 0x3F)) ||
739 (cfg->vxlan.vlan && (p->vxlan.vlan.vid > 0xFFF)))
742 if (cfg->vxlan.ip_version)
743 if (cfg->vxlan.vlan) {
744 struct encap_vxlan_ipv4_vlan_data *d = data;
747 ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
748 ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
749 d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
752 d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
755 d->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv4);
758 d->ipv4.version_ihl = 0x45;
759 d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
760 d->ipv4.total_length = 0; /* not pre-computed */
761 d->ipv4.packet_id = 0;
762 d->ipv4.fragment_offset = 0;
763 d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
764 d->ipv4.next_proto_id = IP_PROTO_UDP;
765 d->ipv4.hdr_checksum = 0;
766 d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
767 d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
769 d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
772 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
773 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
774 d->udp.dgram_len = 0; /* not pre-computed */
775 d->udp.dgram_cksum = 0;
778 d->vxlan.vx_flags = rte_htonl(0x08000000);
779 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
783 struct encap_vxlan_ipv4_data *d = data;
786 ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
787 ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
788 d->ether.ether_type = rte_htons(ETHER_TYPE_IPv4);
791 d->ipv4.version_ihl = 0x45;
792 d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
793 d->ipv4.total_length = 0; /* not pre-computed */
794 d->ipv4.packet_id = 0;
795 d->ipv4.fragment_offset = 0;
796 d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
797 d->ipv4.next_proto_id = IP_PROTO_UDP;
798 d->ipv4.hdr_checksum = 0;
799 d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
800 d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
802 d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
805 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
806 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
807 d->udp.dgram_len = 0; /* not pre-computed */
808 d->udp.dgram_cksum = 0;
811 d->vxlan.vx_flags = rte_htonl(0x08000000);
812 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
817 if (cfg->vxlan.vlan) {
818 struct encap_vxlan_ipv6_vlan_data *d = data;
821 ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
822 ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
823 d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
826 d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
829 d->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv6);
832 d->ipv6.vtc_flow = rte_htonl((6 << 28) |
833 (p->vxlan.ipv6.dscp << 22) |
834 p->vxlan.ipv6.flow_label);
835 d->ipv6.payload_len = 0; /* not pre-computed */
836 d->ipv6.proto = IP_PROTO_UDP;
837 d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
838 memcpy(d->ipv6.src_addr,
840 sizeof(p->vxlan.ipv6.sa));
841 memcpy(d->ipv6.dst_addr,
843 sizeof(p->vxlan.ipv6.da));
846 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
847 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
848 d->udp.dgram_len = 0; /* not pre-computed */
849 d->udp.dgram_cksum = 0;
852 d->vxlan.vx_flags = rte_htonl(0x08000000);
853 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
857 struct encap_vxlan_ipv6_data *d = data;
860 ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
861 ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
862 d->ether.ether_type = rte_htons(ETHER_TYPE_IPv6);
865 d->ipv6.vtc_flow = rte_htonl((6 << 28) |
866 (p->vxlan.ipv6.dscp << 22) |
867 p->vxlan.ipv6.flow_label);
868 d->ipv6.payload_len = 0; /* not pre-computed */
869 d->ipv6.proto = IP_PROTO_UDP;
870 d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
871 memcpy(d->ipv6.src_addr,
873 sizeof(p->vxlan.ipv6.sa));
874 memcpy(d->ipv6.dst_addr,
876 sizeof(p->vxlan.ipv6.da));
879 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
880 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
881 d->udp.dgram_len = 0; /* not pre-computed */
882 d->udp.dgram_cksum = 0;
885 d->vxlan.vx_flags = rte_htonl(0x08000000);
886 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
893 encap_apply(void *data,
894 struct rte_table_action_encap_params *p,
895 struct rte_table_action_encap_config *cfg,
896 struct rte_table_action_common_config *common_cfg)
900 /* Check input arguments */
901 status = encap_apply_check(p, cfg);
906 case RTE_TABLE_ACTION_ENCAP_ETHER:
907 return encap_ether_apply(data, p, common_cfg);
909 case RTE_TABLE_ACTION_ENCAP_VLAN:
910 return encap_vlan_apply(data, p, common_cfg);
912 case RTE_TABLE_ACTION_ENCAP_QINQ:
913 return encap_qinq_apply(data, p, common_cfg);
915 case RTE_TABLE_ACTION_ENCAP_MPLS:
916 return encap_mpls_apply(data, p);
918 case RTE_TABLE_ACTION_ENCAP_PPPOE:
919 return encap_pppoe_apply(data, p);
921 case RTE_TABLE_ACTION_ENCAP_VXLAN:
922 return encap_vxlan_apply(data, p, cfg);
929 static __rte_always_inline uint16_t
930 encap_vxlan_ipv4_checksum_update(uint16_t cksum0,
931 uint16_t total_length)
936 cksum1 = ~cksum1 & 0xFFFF;
938 /* Add total length (one's complement logic) */
939 cksum1 += total_length;
940 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
941 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
943 return (uint16_t)(~cksum1);
946 static __rte_always_inline void *
947 encap(void *dst, const void *src, size_t n)
949 dst = ((uint8_t *) dst) - n;
950 return rte_memcpy(dst, src, n);
953 static __rte_always_inline void
954 pkt_work_encap_vxlan_ipv4(struct rte_mbuf *mbuf,
955 struct encap_vxlan_ipv4_data *vxlan_tbl,
956 struct rte_table_action_encap_config *cfg)
958 uint32_t ether_offset = cfg->vxlan.data_offset;
959 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
960 struct encap_vxlan_ipv4_data *vxlan_pkt;
961 uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
963 ether_length = (uint16_t)mbuf->pkt_len;
964 ipv4_total_length = ether_length +
965 (sizeof(struct vxlan_hdr) +
966 sizeof(struct udp_hdr) +
967 sizeof(struct ipv4_hdr));
968 ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
969 rte_htons(ipv4_total_length));
970 udp_length = ether_length +
971 (sizeof(struct vxlan_hdr) +
972 sizeof(struct udp_hdr));
974 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
975 vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
976 vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
977 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
979 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
980 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
983 static __rte_always_inline void
984 pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf *mbuf,
985 struct encap_vxlan_ipv4_vlan_data *vxlan_tbl,
986 struct rte_table_action_encap_config *cfg)
988 uint32_t ether_offset = cfg->vxlan.data_offset;
989 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
990 struct encap_vxlan_ipv4_vlan_data *vxlan_pkt;
991 uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
993 ether_length = (uint16_t)mbuf->pkt_len;
994 ipv4_total_length = ether_length +
995 (sizeof(struct vxlan_hdr) +
996 sizeof(struct udp_hdr) +
997 sizeof(struct ipv4_hdr));
998 ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
999 rte_htons(ipv4_total_length));
1000 udp_length = ether_length +
1001 (sizeof(struct vxlan_hdr) +
1002 sizeof(struct udp_hdr));
1004 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1005 vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
1006 vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
1007 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1009 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1010 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1013 static __rte_always_inline void
1014 pkt_work_encap_vxlan_ipv6(struct rte_mbuf *mbuf,
1015 struct encap_vxlan_ipv6_data *vxlan_tbl,
1016 struct rte_table_action_encap_config *cfg)
1018 uint32_t ether_offset = cfg->vxlan.data_offset;
1019 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1020 struct encap_vxlan_ipv6_data *vxlan_pkt;
1021 uint16_t ether_length, ipv6_payload_length, udp_length;
1023 ether_length = (uint16_t)mbuf->pkt_len;
1024 ipv6_payload_length = ether_length +
1025 (sizeof(struct vxlan_hdr) +
1026 sizeof(struct udp_hdr));
1027 udp_length = ether_length +
1028 (sizeof(struct vxlan_hdr) +
1029 sizeof(struct udp_hdr));
1031 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1032 vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
1033 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1035 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1036 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1039 static __rte_always_inline void
1040 pkt_work_encap_vxlan_ipv6_vlan(struct rte_mbuf *mbuf,
1041 struct encap_vxlan_ipv6_vlan_data *vxlan_tbl,
1042 struct rte_table_action_encap_config *cfg)
1044 uint32_t ether_offset = cfg->vxlan.data_offset;
1045 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1046 struct encap_vxlan_ipv6_vlan_data *vxlan_pkt;
1047 uint16_t ether_length, ipv6_payload_length, udp_length;
1049 ether_length = (uint16_t)mbuf->pkt_len;
1050 ipv6_payload_length = ether_length +
1051 (sizeof(struct vxlan_hdr) +
1052 sizeof(struct udp_hdr));
1053 udp_length = ether_length +
1054 (sizeof(struct vxlan_hdr) +
1055 sizeof(struct udp_hdr));
1057 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1058 vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
1059 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1061 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1062 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1065 static __rte_always_inline void
1066 pkt_work_encap(struct rte_mbuf *mbuf,
1068 struct rte_table_action_encap_config *cfg,
1070 uint16_t total_length,
1073 switch (cfg->encap_mask) {
1074 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
1075 encap(ip, data, sizeof(struct encap_ether_data));
1076 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1077 sizeof(struct encap_ether_data));
1078 mbuf->pkt_len = mbuf->data_len = total_length +
1079 sizeof(struct encap_ether_data);
1082 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
1083 encap(ip, data, sizeof(struct encap_vlan_data));
1084 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1085 sizeof(struct encap_vlan_data));
1086 mbuf->pkt_len = mbuf->data_len = total_length +
1087 sizeof(struct encap_vlan_data);
1090 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
1091 encap(ip, data, sizeof(struct encap_qinq_data));
1092 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1093 sizeof(struct encap_qinq_data));
1094 mbuf->pkt_len = mbuf->data_len = total_length +
1095 sizeof(struct encap_qinq_data);
1098 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
1100 struct encap_mpls_data *mpls = data;
1101 size_t size = sizeof(struct ether_hdr) +
1102 mpls->mpls_count * 4;
1104 encap(ip, data, size);
1105 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + size);
1106 mbuf->pkt_len = mbuf->data_len = total_length + size;
1110 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
1112 struct encap_pppoe_data *pppoe =
1113 encap(ip, data, sizeof(struct encap_pppoe_data));
1114 pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
1115 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1116 sizeof(struct encap_pppoe_data));
1117 mbuf->pkt_len = mbuf->data_len = total_length +
1118 sizeof(struct encap_pppoe_data);
1122 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
1124 if (cfg->vxlan.ip_version)
1125 if (cfg->vxlan.vlan)
1126 pkt_work_encap_vxlan_ipv4_vlan(mbuf, data, cfg);
1128 pkt_work_encap_vxlan_ipv4(mbuf, data, cfg);
1130 if (cfg->vxlan.vlan)
1131 pkt_work_encap_vxlan_ipv6_vlan(mbuf, data, cfg);
1133 pkt_work_encap_vxlan_ipv6(mbuf, data, cfg);
1142 * RTE_TABLE_ACTION_NAT
1145 nat_cfg_check(struct rte_table_action_nat_config *nat)
1147 if ((nat->proto != 0x06) &&
1148 (nat->proto != 0x11))
1154 struct nat_ipv4_data {
1157 } __attribute__((__packed__));
1159 struct nat_ipv6_data {
1162 } __attribute__((__packed__));
1165 nat_data_size(struct rte_table_action_nat_config *nat __rte_unused,
1166 struct rte_table_action_common_config *common)
1168 int ip_version = common->ip_version;
1170 return (ip_version) ?
1171 sizeof(struct nat_ipv4_data) :
1172 sizeof(struct nat_ipv6_data);
1176 nat_apply_check(struct rte_table_action_nat_params *p,
1177 struct rte_table_action_common_config *cfg)
1179 if ((p->ip_version && (cfg->ip_version == 0)) ||
1180 ((p->ip_version == 0) && cfg->ip_version))
1187 nat_apply(void *data,
1188 struct rte_table_action_nat_params *p,
1189 struct rte_table_action_common_config *cfg)
1193 /* Check input arguments */
1194 status = nat_apply_check(p, cfg);
1199 if (p->ip_version) {
1200 struct nat_ipv4_data *d = data;
1202 d->addr = rte_htonl(p->addr.ipv4);
1203 d->port = rte_htons(p->port);
1205 struct nat_ipv6_data *d = data;
1207 memcpy(d->addr, p->addr.ipv6, sizeof(d->addr));
1208 d->port = rte_htons(p->port);
1214 static __rte_always_inline uint16_t
1215 nat_ipv4_checksum_update(uint16_t cksum0,
1222 cksum1 = ~cksum1 & 0xFFFF;
1224 /* Subtract ip0 (one's complement logic) */
1225 cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF);
1226 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1227 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1229 /* Add ip1 (one's complement logic) */
1230 cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF);
1231 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1232 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1234 return (uint16_t)(~cksum1);
1237 static __rte_always_inline uint16_t
1238 nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0,
1247 cksum1 = ~cksum1 & 0xFFFF;
1249 /* Subtract ip0 and port 0 (one's complement logic) */
1250 cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF) + port0;
1251 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1252 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1254 /* Add ip1 and port1 (one's complement logic) */
1255 cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF) + port1;
1256 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1257 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1259 return (uint16_t)(~cksum1);
1262 static __rte_always_inline uint16_t
1263 nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0,
1272 cksum1 = ~cksum1 & 0xFFFF;
1274 /* Subtract ip0 and port 0 (one's complement logic) */
1275 cksum1 -= ip0[0] + ip0[1] + ip0[2] + ip0[3] +
1276 ip0[4] + ip0[5] + ip0[6] + ip0[7] + port0;
1277 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1278 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1280 /* Add ip1 and port1 (one's complement logic) */
1281 cksum1 += ip1[0] + ip1[1] + ip1[2] + ip1[3] +
1282 ip1[4] + ip1[5] + ip1[6] + ip1[7] + port1;
1283 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1284 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1286 return (uint16_t)(~cksum1);
1289 static __rte_always_inline void
1290 pkt_ipv4_work_nat(struct ipv4_hdr *ip,
1291 struct nat_ipv4_data *data,
1292 struct rte_table_action_nat_config *cfg)
1294 if (cfg->source_nat) {
1295 if (cfg->proto == 0x6) {
1296 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1297 uint16_t ip_cksum, tcp_cksum;
1299 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1303 tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
1309 ip->src_addr = data->addr;
1310 ip->hdr_checksum = ip_cksum;
1311 tcp->src_port = data->port;
1312 tcp->cksum = tcp_cksum;
1314 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1315 uint16_t ip_cksum, udp_cksum;
1317 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1321 udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1327 ip->src_addr = data->addr;
1328 ip->hdr_checksum = ip_cksum;
1329 udp->src_port = data->port;
1330 if (udp->dgram_cksum)
1331 udp->dgram_cksum = udp_cksum;
1334 if (cfg->proto == 0x6) {
1335 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1336 uint16_t ip_cksum, tcp_cksum;
1338 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1342 tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
1348 ip->dst_addr = data->addr;
1349 ip->hdr_checksum = ip_cksum;
1350 tcp->dst_port = data->port;
1351 tcp->cksum = tcp_cksum;
1353 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1354 uint16_t ip_cksum, udp_cksum;
1356 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1360 udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1366 ip->dst_addr = data->addr;
1367 ip->hdr_checksum = ip_cksum;
1368 udp->dst_port = data->port;
1369 if (udp->dgram_cksum)
1370 udp->dgram_cksum = udp_cksum;
1375 static __rte_always_inline void
1376 pkt_ipv6_work_nat(struct ipv6_hdr *ip,
1377 struct nat_ipv6_data *data,
1378 struct rte_table_action_nat_config *cfg)
1380 if (cfg->source_nat) {
1381 if (cfg->proto == 0x6) {
1382 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1385 tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1386 (uint16_t *)ip->src_addr,
1387 (uint16_t *)data->addr,
1391 rte_memcpy(ip->src_addr, data->addr, 16);
1392 tcp->src_port = data->port;
1393 tcp->cksum = tcp_cksum;
1395 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1398 udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1399 (uint16_t *)ip->src_addr,
1400 (uint16_t *)data->addr,
1404 rte_memcpy(ip->src_addr, data->addr, 16);
1405 udp->src_port = data->port;
1406 udp->dgram_cksum = udp_cksum;
1409 if (cfg->proto == 0x6) {
1410 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1413 tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1414 (uint16_t *)ip->dst_addr,
1415 (uint16_t *)data->addr,
1419 rte_memcpy(ip->dst_addr, data->addr, 16);
1420 tcp->dst_port = data->port;
1421 tcp->cksum = tcp_cksum;
1423 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1426 udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1427 (uint16_t *)ip->dst_addr,
1428 (uint16_t *)data->addr,
1432 rte_memcpy(ip->dst_addr, data->addr, 16);
1433 udp->dst_port = data->port;
1434 udp->dgram_cksum = udp_cksum;
1440 * RTE_TABLE_ACTION_TTL
1443 ttl_cfg_check(struct rte_table_action_ttl_config *ttl)
1453 } __attribute__((__packed__));
1455 #define TTL_INIT(data, decrement) \
1456 ((data)->n_packets = (decrement) ? 1 : 0)
1458 #define TTL_DEC_GET(data) \
1459 ((uint8_t)((data)->n_packets & 1))
1461 #define TTL_STATS_RESET(data) \
1462 ((data)->n_packets = ((data)->n_packets & 1))
1464 #define TTL_STATS_READ(data) \
1465 ((data)->n_packets >> 1)
1467 #define TTL_STATS_ADD(data, value) \
1468 ((data)->n_packets = \
1469 (((((data)->n_packets >> 1) + (value)) << 1) | \
1470 ((data)->n_packets & 1)))
1473 ttl_apply(void *data,
1474 struct rte_table_action_ttl_params *p)
1476 struct ttl_data *d = data;
1478 TTL_INIT(d, p->decrement);
1483 static __rte_always_inline uint64_t
1484 pkt_ipv4_work_ttl(struct ipv4_hdr *ip,
1485 struct ttl_data *data)
1488 uint16_t cksum = ip->hdr_checksum;
1489 uint8_t ttl = ip->time_to_live;
1490 uint8_t ttl_diff = TTL_DEC_GET(data);
1495 ip->hdr_checksum = cksum;
1496 ip->time_to_live = ttl;
1498 drop = (ttl == 0) ? 1 : 0;
1499 TTL_STATS_ADD(data, drop);
1504 static __rte_always_inline uint64_t
1505 pkt_ipv6_work_ttl(struct ipv6_hdr *ip,
1506 struct ttl_data *data)
1509 uint8_t ttl = ip->hop_limits;
1510 uint8_t ttl_diff = TTL_DEC_GET(data);
1514 ip->hop_limits = ttl;
1516 drop = (ttl == 0) ? 1 : 0;
1517 TTL_STATS_ADD(data, drop);
1523 * RTE_TABLE_ACTION_STATS
1526 stats_cfg_check(struct rte_table_action_stats_config *stats)
1528 if ((stats->n_packets_enabled == 0) && (stats->n_bytes_enabled == 0))
1537 } __attribute__((__packed__));
1540 stats_apply(struct stats_data *data,
1541 struct rte_table_action_stats_params *p)
1543 data->n_packets = p->n_packets;
1544 data->n_bytes = p->n_bytes;
1549 static __rte_always_inline void
1550 pkt_work_stats(struct stats_data *data,
1551 uint16_t total_length)
1554 data->n_bytes += total_length;
1558 * RTE_TABLE_ACTION_TIME
1562 } __attribute__((__packed__));
1565 time_apply(struct time_data *data,
1566 struct rte_table_action_time_params *p)
1568 data->time = p->time;
1572 static __rte_always_inline void
1573 pkt_work_time(struct time_data *data,
1581 * RTE_TABLE_ACTION_CRYPTO
1584 #define CRYPTO_OP_MASK_CIPHER 0x1
1585 #define CRYPTO_OP_MASK_AUTH 0x2
1586 #define CRYPTO_OP_MASK_AEAD 0x4
1588 struct crypto_op_sym_iv_aad {
1589 struct rte_crypto_op op;
1590 struct rte_crypto_sym_op sym_op;
1594 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1596 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1600 uint8_t iv[RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1601 uint8_t aad[RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX];
1607 struct sym_crypto_data {
1612 /** Length of cipher iv. */
1613 uint16_t cipher_iv_len;
1615 /** Offset from start of IP header to the cipher iv. */
1616 uint16_t cipher_iv_data_offset;
1618 /** Length of cipher iv to be updated in the mbuf. */
1619 uint16_t cipher_iv_update_len;
1621 /** Offset from start of IP header to the auth iv. */
1622 uint16_t auth_iv_data_offset;
1624 /** Length of auth iv in the mbuf. */
1625 uint16_t auth_iv_len;
1627 /** Length of auth iv to be updated in the mbuf. */
1628 uint16_t auth_iv_update_len;
1633 /** Length of iv. */
1636 /** Offset from start of IP header to the aead iv. */
1637 uint16_t iv_data_offset;
1639 /** Length of iv to be updated in the mbuf. */
1640 uint16_t iv_update_len;
1642 /** Length of aad */
1645 /** Offset from start of IP header to the aad. */
1646 uint16_t aad_data_offset;
1648 /** Length of aad to updated in the mbuf. */
1649 uint16_t aad_update_len;
1654 /** Offset from start of IP header to the data. */
1655 uint16_t data_offset;
1657 /** Digest length. */
1658 uint16_t digest_len;
1661 uint16_t block_size;
1663 /** Mask of crypto operation */
1666 /** Session pointer. */
1667 struct rte_cryptodev_sym_session *session;
1669 /** Direction of crypto, encrypt or decrypt */
1672 /** Private data size to store cipher iv / aad. */
1673 uint8_t iv_aad_data[32];
1675 } __attribute__((__packed__));
1678 sym_crypto_cfg_check(struct rte_table_action_sym_crypto_config *cfg)
1680 if (!rte_cryptodev_pmd_is_valid_dev(cfg->cryptodev_id))
1682 if (cfg->mp_create == NULL || cfg->mp_init == NULL)
1689 get_block_size(const struct rte_crypto_sym_xform *xform, uint8_t cdev_id)
1691 struct rte_cryptodev_info dev_info;
1692 const struct rte_cryptodev_capabilities *cap;
1695 rte_cryptodev_info_get(cdev_id, &dev_info);
1697 for (i = 0; dev_info.capabilities[i].op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
1699 cap = &dev_info.capabilities[i];
1701 if (cap->sym.xform_type != xform->type)
1704 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
1705 (cap->sym.cipher.algo == xform->cipher.algo))
1706 return cap->sym.cipher.block_size;
1708 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) &&
1709 (cap->sym.aead.algo == xform->aead.algo))
1710 return cap->sym.aead.block_size;
1712 if (xform->type == RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED)
1720 sym_crypto_apply(struct sym_crypto_data *data,
1721 struct rte_table_action_sym_crypto_config *cfg,
1722 struct rte_table_action_sym_crypto_params *p)
1724 const struct rte_crypto_cipher_xform *cipher_xform = NULL;
1725 const struct rte_crypto_auth_xform *auth_xform = NULL;
1726 const struct rte_crypto_aead_xform *aead_xform = NULL;
1727 struct rte_crypto_sym_xform *xform = p->xform;
1728 struct rte_cryptodev_sym_session *session;
1731 memset(data, 0, sizeof(*data));
1734 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1735 cipher_xform = &xform->cipher;
1737 if (cipher_xform->iv.length >
1738 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
1740 if (cipher_xform->iv.offset !=
1741 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
1744 ret = get_block_size(xform, cfg->cryptodev_id);
1747 data->block_size = (uint16_t)ret;
1748 data->op_mask |= CRYPTO_OP_MASK_CIPHER;
1750 data->cipher_auth.cipher_iv_len =
1751 cipher_xform->iv.length;
1752 data->cipher_auth.cipher_iv_data_offset = (uint16_t)
1753 p->cipher_auth.cipher_iv_update.offset;
1754 data->cipher_auth.cipher_iv_update_len = (uint16_t)
1755 p->cipher_auth.cipher_iv_update.length;
1757 rte_memcpy(data->iv_aad_data,
1758 p->cipher_auth.cipher_iv.val,
1759 p->cipher_auth.cipher_iv.length);
1761 data->direction = cipher_xform->op;
1763 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1764 auth_xform = &xform->auth;
1765 if (auth_xform->iv.length >
1766 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
1768 data->op_mask |= CRYPTO_OP_MASK_AUTH;
1770 data->cipher_auth.auth_iv_len = auth_xform->iv.length;
1771 data->cipher_auth.auth_iv_data_offset = (uint16_t)
1772 p->cipher_auth.auth_iv_update.offset;
1773 data->cipher_auth.auth_iv_update_len = (uint16_t)
1774 p->cipher_auth.auth_iv_update.length;
1775 data->digest_len = auth_xform->digest_length;
1777 data->direction = (auth_xform->op ==
1778 RTE_CRYPTO_AUTH_OP_GENERATE) ?
1779 RTE_CRYPTO_CIPHER_OP_ENCRYPT :
1780 RTE_CRYPTO_CIPHER_OP_DECRYPT;
1782 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1783 aead_xform = &xform->aead;
1785 if ((aead_xform->iv.length >
1786 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX) || (
1787 aead_xform->aad_length >
1788 RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX))
1790 if (aead_xform->iv.offset !=
1791 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
1794 ret = get_block_size(xform, cfg->cryptodev_id);
1797 data->block_size = (uint16_t)ret;
1798 data->op_mask |= CRYPTO_OP_MASK_AEAD;
1800 data->digest_len = aead_xform->digest_length;
1801 data->aead.iv_len = aead_xform->iv.length;
1802 data->aead.aad_len = aead_xform->aad_length;
1804 data->aead.iv_data_offset = (uint16_t)
1805 p->aead.iv_update.offset;
1806 data->aead.iv_update_len = (uint16_t)
1807 p->aead.iv_update.length;
1808 data->aead.aad_data_offset = (uint16_t)
1809 p->aead.aad_update.offset;
1810 data->aead.aad_update_len = (uint16_t)
1811 p->aead.aad_update.length;
1813 rte_memcpy(data->iv_aad_data,
1817 rte_memcpy(data->iv_aad_data + p->aead.iv.length,
1819 p->aead.aad.length);
1821 data->direction = (aead_xform->op ==
1822 RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1823 RTE_CRYPTO_CIPHER_OP_ENCRYPT :
1824 RTE_CRYPTO_CIPHER_OP_DECRYPT;
1828 xform = xform->next;
1831 if (auth_xform && auth_xform->iv.length) {
1833 if (auth_xform->iv.offset !=
1834 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET +
1835 cipher_xform->iv.length)
1838 rte_memcpy(data->iv_aad_data + cipher_xform->iv.length,
1839 p->cipher_auth.auth_iv.val,
1840 p->cipher_auth.auth_iv.length);
1842 rte_memcpy(data->iv_aad_data,
1843 p->cipher_auth.auth_iv.val,
1844 p->cipher_auth.auth_iv.length);
1848 session = rte_cryptodev_sym_session_create(cfg->mp_create);
1852 ret = rte_cryptodev_sym_session_init(cfg->cryptodev_id, session,
1853 p->xform, cfg->mp_init);
1855 rte_cryptodev_sym_session_free(session);
1859 data->data_offset = (uint16_t)p->data_offset;
1860 data->session = session;
1865 static __rte_always_inline uint64_t
1866 pkt_work_sym_crypto(struct rte_mbuf *mbuf, struct sym_crypto_data *data,
1867 struct rte_table_action_sym_crypto_config *cfg,
1870 struct crypto_op_sym_iv_aad *crypto_op = (struct crypto_op_sym_iv_aad *)
1871 RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->op_offset);
1872 struct rte_crypto_op *op = &crypto_op->op;
1873 struct rte_crypto_sym_op *sym = op->sym;
1874 uint32_t pkt_offset = sizeof(*mbuf) + mbuf->data_off;
1875 uint32_t payload_len = pkt_offset + mbuf->data_len - data->data_offset;
1877 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1878 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1879 op->phys_addr = mbuf->buf_iova + cfg->op_offset - sizeof(*mbuf);
1880 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1883 sym->session = data->session;
1885 /** pad the packet */
1886 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1887 uint32_t append_len = RTE_ALIGN_CEIL(payload_len,
1888 data->block_size) - payload_len;
1890 if (unlikely(rte_pktmbuf_append(mbuf, append_len +
1891 data->digest_len) == NULL))
1894 payload_len += append_len;
1896 payload_len -= data->digest_len;
1898 if (data->op_mask & CRYPTO_OP_MASK_CIPHER) {
1899 /** prepare cipher op */
1900 uint8_t *iv = crypto_op->iv_aad.cipher_auth.cipher_iv;
1902 sym->cipher.data.length = payload_len;
1903 sym->cipher.data.offset = data->data_offset - pkt_offset;
1905 if (data->cipher_auth.cipher_iv_update_len) {
1906 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1907 data->cipher_auth.cipher_iv_data_offset
1910 /** For encryption, update the pkt iv field, otherwise
1911 * update the iv_aad_field
1913 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1914 rte_memcpy(pkt_iv, data->iv_aad_data,
1915 data->cipher_auth.cipher_iv_update_len);
1917 rte_memcpy(data->iv_aad_data, pkt_iv,
1918 data->cipher_auth.cipher_iv_update_len);
1922 rte_memcpy(iv, data->iv_aad_data,
1923 data->cipher_auth.cipher_iv_len);
1926 if (data->op_mask & CRYPTO_OP_MASK_AUTH) {
1927 /** authentication always start from IP header. */
1928 sym->auth.data.offset = ip_offset - pkt_offset;
1929 sym->auth.data.length = mbuf->data_len - sym->auth.data.offset -
1931 sym->auth.digest.data = rte_pktmbuf_mtod_offset(mbuf,
1932 uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
1934 sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
1935 rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
1937 if (data->cipher_auth.auth_iv_update_len) {
1938 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1939 data->cipher_auth.auth_iv_data_offset
1941 uint8_t *data_iv = data->iv_aad_data +
1942 data->cipher_auth.cipher_iv_len;
1944 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1945 rte_memcpy(pkt_iv, data_iv,
1946 data->cipher_auth.auth_iv_update_len);
1948 rte_memcpy(data_iv, pkt_iv,
1949 data->cipher_auth.auth_iv_update_len);
1952 if (data->cipher_auth.auth_iv_len) {
1953 /** prepare cipher op */
1954 uint8_t *iv = crypto_op->iv_aad.cipher_auth.auth_iv;
1956 rte_memcpy(iv, data->iv_aad_data +
1957 data->cipher_auth.cipher_iv_len,
1958 data->cipher_auth.auth_iv_len);
1962 if (data->op_mask & CRYPTO_OP_MASK_AEAD) {
1963 uint8_t *iv = crypto_op->iv_aad.aead_iv_aad.iv;
1964 uint8_t *aad = crypto_op->iv_aad.aead_iv_aad.aad;
1966 sym->aead.aad.data = aad;
1967 sym->aead.aad.phys_addr = rte_pktmbuf_iova_offset(mbuf,
1968 aad - rte_pktmbuf_mtod(mbuf, uint8_t *));
1969 sym->aead.digest.data = rte_pktmbuf_mtod_offset(mbuf,
1970 uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
1972 sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
1973 rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
1974 sym->aead.data.offset = data->data_offset - pkt_offset;
1975 sym->aead.data.length = payload_len;
1977 if (data->aead.iv_update_len) {
1978 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1979 data->aead.iv_data_offset + ip_offset);
1980 uint8_t *data_iv = data->iv_aad_data;
1982 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1983 rte_memcpy(pkt_iv, data_iv,
1984 data->aead.iv_update_len);
1986 rte_memcpy(data_iv, pkt_iv,
1987 data->aead.iv_update_len);
1990 rte_memcpy(iv, data->iv_aad_data, data->aead.iv_len);
1992 if (data->aead.aad_update_len) {
1993 uint8_t *pkt_aad = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1994 data->aead.aad_data_offset + ip_offset);
1995 uint8_t *data_aad = data->iv_aad_data +
1998 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1999 rte_memcpy(pkt_aad, data_aad,
2000 data->aead.iv_update_len);
2002 rte_memcpy(data_aad, pkt_aad,
2003 data->aead.iv_update_len);
2006 rte_memcpy(aad, data->iv_aad_data + data->aead.iv_len,
2007 data->aead.aad_len);
2014 * RTE_TABLE_ACTION_TAG
2018 } __attribute__((__packed__));
2021 tag_apply(struct tag_data *data,
2022 struct rte_table_action_tag_params *p)
2028 static __rte_always_inline void
2029 pkt_work_tag(struct rte_mbuf *mbuf,
2030 struct tag_data *data)
2032 mbuf->hash.fdir.hi = data->tag;
2033 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2036 static __rte_always_inline void
2037 pkt4_work_tag(struct rte_mbuf *mbuf0,
2038 struct rte_mbuf *mbuf1,
2039 struct rte_mbuf *mbuf2,
2040 struct rte_mbuf *mbuf3,
2041 struct tag_data *data0,
2042 struct tag_data *data1,
2043 struct tag_data *data2,
2044 struct tag_data *data3)
2046 mbuf0->hash.fdir.hi = data0->tag;
2047 mbuf1->hash.fdir.hi = data1->tag;
2048 mbuf2->hash.fdir.hi = data2->tag;
2049 mbuf3->hash.fdir.hi = data3->tag;
2051 mbuf0->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2052 mbuf1->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2053 mbuf2->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2054 mbuf3->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2058 * RTE_TABLE_ACTION_DECAP
2062 } __attribute__((__packed__));
2065 decap_apply(struct decap_data *data,
2066 struct rte_table_action_decap_params *p)
2072 static __rte_always_inline void
2073 pkt_work_decap(struct rte_mbuf *mbuf,
2074 struct decap_data *data)
2076 uint16_t data_off = mbuf->data_off;
2077 uint16_t data_len = mbuf->data_len;
2078 uint32_t pkt_len = mbuf->pkt_len;
2079 uint16_t n = data->n;
2081 mbuf->data_off = data_off + n;
2082 mbuf->data_len = data_len - n;
2083 mbuf->pkt_len = pkt_len - n;
2086 static __rte_always_inline void
2087 pkt4_work_decap(struct rte_mbuf *mbuf0,
2088 struct rte_mbuf *mbuf1,
2089 struct rte_mbuf *mbuf2,
2090 struct rte_mbuf *mbuf3,
2091 struct decap_data *data0,
2092 struct decap_data *data1,
2093 struct decap_data *data2,
2094 struct decap_data *data3)
2096 uint16_t data_off0 = mbuf0->data_off;
2097 uint16_t data_len0 = mbuf0->data_len;
2098 uint32_t pkt_len0 = mbuf0->pkt_len;
2100 uint16_t data_off1 = mbuf1->data_off;
2101 uint16_t data_len1 = mbuf1->data_len;
2102 uint32_t pkt_len1 = mbuf1->pkt_len;
2104 uint16_t data_off2 = mbuf2->data_off;
2105 uint16_t data_len2 = mbuf2->data_len;
2106 uint32_t pkt_len2 = mbuf2->pkt_len;
2108 uint16_t data_off3 = mbuf3->data_off;
2109 uint16_t data_len3 = mbuf3->data_len;
2110 uint32_t pkt_len3 = mbuf3->pkt_len;
2112 uint16_t n0 = data0->n;
2113 uint16_t n1 = data1->n;
2114 uint16_t n2 = data2->n;
2115 uint16_t n3 = data3->n;
2117 mbuf0->data_off = data_off0 + n0;
2118 mbuf0->data_len = data_len0 - n0;
2119 mbuf0->pkt_len = pkt_len0 - n0;
2121 mbuf1->data_off = data_off1 + n1;
2122 mbuf1->data_len = data_len1 - n1;
2123 mbuf1->pkt_len = pkt_len1 - n1;
2125 mbuf2->data_off = data_off2 + n2;
2126 mbuf2->data_len = data_len2 - n2;
2127 mbuf2->pkt_len = pkt_len2 - n2;
2129 mbuf3->data_off = data_off3 + n3;
2130 mbuf3->data_len = data_len3 - n3;
2131 mbuf3->pkt_len = pkt_len3 - n3;
2138 action_valid(enum rte_table_action_type action)
2141 case RTE_TABLE_ACTION_FWD:
2142 case RTE_TABLE_ACTION_LB:
2143 case RTE_TABLE_ACTION_MTR:
2144 case RTE_TABLE_ACTION_TM:
2145 case RTE_TABLE_ACTION_ENCAP:
2146 case RTE_TABLE_ACTION_NAT:
2147 case RTE_TABLE_ACTION_TTL:
2148 case RTE_TABLE_ACTION_STATS:
2149 case RTE_TABLE_ACTION_TIME:
2150 case RTE_TABLE_ACTION_SYM_CRYPTO:
2151 case RTE_TABLE_ACTION_TAG:
2152 case RTE_TABLE_ACTION_DECAP:
2160 #define RTE_TABLE_ACTION_MAX 64
2163 uint64_t action_mask;
2164 struct rte_table_action_common_config common;
2165 struct rte_table_action_lb_config lb;
2166 struct rte_table_action_mtr_config mtr;
2167 struct rte_table_action_tm_config tm;
2168 struct rte_table_action_encap_config encap;
2169 struct rte_table_action_nat_config nat;
2170 struct rte_table_action_ttl_config ttl;
2171 struct rte_table_action_stats_config stats;
2172 struct rte_table_action_sym_crypto_config sym_crypto;
2176 action_cfg_size(enum rte_table_action_type action)
2179 case RTE_TABLE_ACTION_LB:
2180 return sizeof(struct rte_table_action_lb_config);
2181 case RTE_TABLE_ACTION_MTR:
2182 return sizeof(struct rte_table_action_mtr_config);
2183 case RTE_TABLE_ACTION_TM:
2184 return sizeof(struct rte_table_action_tm_config);
2185 case RTE_TABLE_ACTION_ENCAP:
2186 return sizeof(struct rte_table_action_encap_config);
2187 case RTE_TABLE_ACTION_NAT:
2188 return sizeof(struct rte_table_action_nat_config);
2189 case RTE_TABLE_ACTION_TTL:
2190 return sizeof(struct rte_table_action_ttl_config);
2191 case RTE_TABLE_ACTION_STATS:
2192 return sizeof(struct rte_table_action_stats_config);
2193 case RTE_TABLE_ACTION_SYM_CRYPTO:
2194 return sizeof(struct rte_table_action_sym_crypto_config);
2201 action_cfg_get(struct ap_config *ap_config,
2202 enum rte_table_action_type type)
2205 case RTE_TABLE_ACTION_LB:
2206 return &ap_config->lb;
2208 case RTE_TABLE_ACTION_MTR:
2209 return &ap_config->mtr;
2211 case RTE_TABLE_ACTION_TM:
2212 return &ap_config->tm;
2214 case RTE_TABLE_ACTION_ENCAP:
2215 return &ap_config->encap;
2217 case RTE_TABLE_ACTION_NAT:
2218 return &ap_config->nat;
2220 case RTE_TABLE_ACTION_TTL:
2221 return &ap_config->ttl;
2223 case RTE_TABLE_ACTION_STATS:
2224 return &ap_config->stats;
2226 case RTE_TABLE_ACTION_SYM_CRYPTO:
2227 return &ap_config->sym_crypto;
2234 action_cfg_set(struct ap_config *ap_config,
2235 enum rte_table_action_type type,
2238 void *dst = action_cfg_get(ap_config, type);
2241 memcpy(dst, action_cfg, action_cfg_size(type));
2243 ap_config->action_mask |= 1LLU << type;
2247 size_t offset[RTE_TABLE_ACTION_MAX];
2252 action_data_size(enum rte_table_action_type action,
2253 struct ap_config *ap_config)
2256 case RTE_TABLE_ACTION_FWD:
2257 return sizeof(struct fwd_data);
2259 case RTE_TABLE_ACTION_LB:
2260 return sizeof(struct lb_data);
2262 case RTE_TABLE_ACTION_MTR:
2263 return mtr_data_size(&ap_config->mtr);
2265 case RTE_TABLE_ACTION_TM:
2266 return sizeof(struct tm_data);
2268 case RTE_TABLE_ACTION_ENCAP:
2269 return encap_data_size(&ap_config->encap);
2271 case RTE_TABLE_ACTION_NAT:
2272 return nat_data_size(&ap_config->nat,
2273 &ap_config->common);
2275 case RTE_TABLE_ACTION_TTL:
2276 return sizeof(struct ttl_data);
2278 case RTE_TABLE_ACTION_STATS:
2279 return sizeof(struct stats_data);
2281 case RTE_TABLE_ACTION_TIME:
2282 return sizeof(struct time_data);
2284 case RTE_TABLE_ACTION_SYM_CRYPTO:
2285 return (sizeof(struct sym_crypto_data));
2287 case RTE_TABLE_ACTION_TAG:
2288 return sizeof(struct tag_data);
2290 case RTE_TABLE_ACTION_DECAP:
2291 return sizeof(struct decap_data);
2300 action_data_offset_set(struct ap_data *ap_data,
2301 struct ap_config *ap_config)
2303 uint64_t action_mask = ap_config->action_mask;
2307 memset(ap_data->offset, 0, sizeof(ap_data->offset));
2310 for (action = 0; action < RTE_TABLE_ACTION_MAX; action++)
2311 if (action_mask & (1LLU << action)) {
2312 ap_data->offset[action] = offset;
2313 offset += action_data_size((enum rte_table_action_type)action,
2317 ap_data->total_size = offset;
2320 struct rte_table_action_profile {
2321 struct ap_config cfg;
2322 struct ap_data data;
2326 struct rte_table_action_profile *
2327 rte_table_action_profile_create(struct rte_table_action_common_config *common)
2329 struct rte_table_action_profile *ap;
2331 /* Check input arguments */
2335 /* Memory allocation */
2336 ap = calloc(1, sizeof(struct rte_table_action_profile));
2340 /* Initialization */
2341 memcpy(&ap->cfg.common, common, sizeof(*common));
2348 rte_table_action_profile_action_register(struct rte_table_action_profile *profile,
2349 enum rte_table_action_type type,
2350 void *action_config)
2354 /* Check input arguments */
2355 if ((profile == NULL) ||
2357 (action_valid(type) == 0) ||
2358 (profile->cfg.action_mask & (1LLU << type)) ||
2359 ((action_cfg_size(type) == 0) && action_config) ||
2360 (action_cfg_size(type) && (action_config == NULL)))
2364 case RTE_TABLE_ACTION_LB:
2365 status = lb_cfg_check(action_config);
2368 case RTE_TABLE_ACTION_MTR:
2369 status = mtr_cfg_check(action_config);
2372 case RTE_TABLE_ACTION_TM:
2373 status = tm_cfg_check(action_config);
2376 case RTE_TABLE_ACTION_ENCAP:
2377 status = encap_cfg_check(action_config);
2380 case RTE_TABLE_ACTION_NAT:
2381 status = nat_cfg_check(action_config);
2384 case RTE_TABLE_ACTION_TTL:
2385 status = ttl_cfg_check(action_config);
2388 case RTE_TABLE_ACTION_STATS:
2389 status = stats_cfg_check(action_config);
2392 case RTE_TABLE_ACTION_SYM_CRYPTO:
2393 status = sym_crypto_cfg_check(action_config);
2405 action_cfg_set(&profile->cfg, type, action_config);
2411 rte_table_action_profile_freeze(struct rte_table_action_profile *profile)
2413 if (profile->frozen)
2416 profile->cfg.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD;
2417 action_data_offset_set(&profile->data, &profile->cfg);
2418 profile->frozen = 1;
2424 rte_table_action_profile_free(struct rte_table_action_profile *profile)
2426 if (profile == NULL)
2436 #define METER_PROFILES_MAX 32
2438 struct rte_table_action {
2439 struct ap_config cfg;
2440 struct ap_data data;
2441 struct dscp_table_data dscp_table;
2442 struct meter_profile_data mp[METER_PROFILES_MAX];
2445 struct rte_table_action *
2446 rte_table_action_create(struct rte_table_action_profile *profile,
2449 struct rte_table_action *action;
2451 /* Check input arguments */
2452 if ((profile == NULL) ||
2453 (profile->frozen == 0))
2456 /* Memory allocation */
2457 action = rte_zmalloc_socket(NULL,
2458 sizeof(struct rte_table_action),
2459 RTE_CACHE_LINE_SIZE,
2464 /* Initialization */
2465 memcpy(&action->cfg, &profile->cfg, sizeof(profile->cfg));
2466 memcpy(&action->data, &profile->data, sizeof(profile->data));
2471 static __rte_always_inline void *
2472 action_data_get(void *data,
2473 struct rte_table_action *action,
2474 enum rte_table_action_type type)
2476 size_t offset = action->data.offset[type];
2477 uint8_t *data_bytes = data;
2479 return &data_bytes[offset];
2483 rte_table_action_apply(struct rte_table_action *action,
2485 enum rte_table_action_type type,
2486 void *action_params)
2490 /* Check input arguments */
2491 if ((action == NULL) ||
2493 (action_valid(type) == 0) ||
2494 ((action->cfg.action_mask & (1LLU << type)) == 0) ||
2495 (action_params == NULL))
2499 action_data = action_data_get(data, action, type);
2502 case RTE_TABLE_ACTION_FWD:
2503 return fwd_apply(action_data,
2506 case RTE_TABLE_ACTION_LB:
2507 return lb_apply(action_data,
2510 case RTE_TABLE_ACTION_MTR:
2511 return mtr_apply(action_data,
2515 RTE_DIM(action->mp));
2517 case RTE_TABLE_ACTION_TM:
2518 return tm_apply(action_data,
2522 case RTE_TABLE_ACTION_ENCAP:
2523 return encap_apply(action_data,
2526 &action->cfg.common);
2528 case RTE_TABLE_ACTION_NAT:
2529 return nat_apply(action_data,
2531 &action->cfg.common);
2533 case RTE_TABLE_ACTION_TTL:
2534 return ttl_apply(action_data,
2537 case RTE_TABLE_ACTION_STATS:
2538 return stats_apply(action_data,
2541 case RTE_TABLE_ACTION_TIME:
2542 return time_apply(action_data,
2545 case RTE_TABLE_ACTION_SYM_CRYPTO:
2546 return sym_crypto_apply(action_data,
2547 &action->cfg.sym_crypto,
2550 case RTE_TABLE_ACTION_TAG:
2551 return tag_apply(action_data,
2554 case RTE_TABLE_ACTION_DECAP:
2555 return decap_apply(action_data,
2564 rte_table_action_dscp_table_update(struct rte_table_action *action,
2566 struct rte_table_action_dscp_table *table)
2570 /* Check input arguments */
2571 if ((action == NULL) ||
2572 ((action->cfg.action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
2573 (1LLU << RTE_TABLE_ACTION_TM))) == 0) ||
2578 for (i = 0; i < RTE_DIM(table->entry); i++) {
2579 struct dscp_table_entry_data *data =
2580 &action->dscp_table.entry[i];
2581 struct rte_table_action_dscp_table_entry *entry =
2583 uint16_t queue_tc_color =
2584 MBUF_SCHED_QUEUE_TC_COLOR(entry->tc_queue_id,
2588 if ((dscp_mask & (1LLU << i)) == 0)
2591 data->color = entry->color;
2592 data->tc = entry->tc_id;
2593 data->queue_tc_color = queue_tc_color;
2600 rte_table_action_meter_profile_add(struct rte_table_action *action,
2601 uint32_t meter_profile_id,
2602 struct rte_table_action_meter_profile *profile)
2604 struct meter_profile_data *mp_data;
2607 /* Check input arguments */
2608 if ((action == NULL) ||
2609 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
2613 if (profile->alg != RTE_TABLE_ACTION_METER_TRTCM)
2616 mp_data = meter_profile_data_find(action->mp,
2617 RTE_DIM(action->mp),
2622 mp_data = meter_profile_data_find_unused(action->mp,
2623 RTE_DIM(action->mp));
2627 /* Install new profile */
2628 status = rte_meter_trtcm_profile_config(&mp_data->profile,
2633 mp_data->profile_id = meter_profile_id;
2640 rte_table_action_meter_profile_delete(struct rte_table_action *action,
2641 uint32_t meter_profile_id)
2643 struct meter_profile_data *mp_data;
2645 /* Check input arguments */
2646 if ((action == NULL) ||
2647 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0))
2650 mp_data = meter_profile_data_find(action->mp,
2651 RTE_DIM(action->mp),
2656 /* Uninstall profile */
2663 rte_table_action_meter_read(struct rte_table_action *action,
2666 struct rte_table_action_mtr_counters *stats,
2669 struct mtr_trtcm_data *mtr_data;
2672 /* Check input arguments */
2673 if ((action == NULL) ||
2674 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
2676 (tc_mask > RTE_LEN2MASK(action->cfg.mtr.n_tc, uint32_t)))
2679 mtr_data = action_data_get(data, action, RTE_TABLE_ACTION_MTR);
2683 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
2684 struct rte_table_action_mtr_counters_tc *dst =
2686 struct mtr_trtcm_data *src = &mtr_data[i];
2688 if ((tc_mask & (1 << i)) == 0)
2691 dst->n_packets[e_RTE_METER_GREEN] =
2692 mtr_trtcm_data_stats_get(src, e_RTE_METER_GREEN);
2694 dst->n_packets[e_RTE_METER_YELLOW] =
2695 mtr_trtcm_data_stats_get(src, e_RTE_METER_YELLOW);
2697 dst->n_packets[e_RTE_METER_RED] =
2698 mtr_trtcm_data_stats_get(src, e_RTE_METER_RED);
2700 dst->n_packets_valid = 1;
2701 dst->n_bytes_valid = 0;
2704 stats->tc_mask = tc_mask;
2709 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
2710 struct mtr_trtcm_data *src = &mtr_data[i];
2712 if ((tc_mask & (1 << i)) == 0)
2715 mtr_trtcm_data_stats_reset(src, e_RTE_METER_GREEN);
2716 mtr_trtcm_data_stats_reset(src, e_RTE_METER_YELLOW);
2717 mtr_trtcm_data_stats_reset(src, e_RTE_METER_RED);
2725 rte_table_action_ttl_read(struct rte_table_action *action,
2727 struct rte_table_action_ttl_counters *stats,
2730 struct ttl_data *ttl_data;
2732 /* Check input arguments */
2733 if ((action == NULL) ||
2734 ((action->cfg.action_mask &
2735 (1LLU << RTE_TABLE_ACTION_TTL)) == 0) ||
2739 ttl_data = action_data_get(data, action, RTE_TABLE_ACTION_TTL);
2743 stats->n_packets = TTL_STATS_READ(ttl_data);
2747 TTL_STATS_RESET(ttl_data);
2753 rte_table_action_stats_read(struct rte_table_action *action,
2755 struct rte_table_action_stats_counters *stats,
2758 struct stats_data *stats_data;
2760 /* Check input arguments */
2761 if ((action == NULL) ||
2762 ((action->cfg.action_mask &
2763 (1LLU << RTE_TABLE_ACTION_STATS)) == 0) ||
2767 stats_data = action_data_get(data, action,
2768 RTE_TABLE_ACTION_STATS);
2772 stats->n_packets = stats_data->n_packets;
2773 stats->n_bytes = stats_data->n_bytes;
2774 stats->n_packets_valid = 1;
2775 stats->n_bytes_valid = 1;
2780 stats_data->n_packets = 0;
2781 stats_data->n_bytes = 0;
2788 rte_table_action_time_read(struct rte_table_action *action,
2790 uint64_t *timestamp)
2792 struct time_data *time_data;
2794 /* Check input arguments */
2795 if ((action == NULL) ||
2796 ((action->cfg.action_mask &
2797 (1LLU << RTE_TABLE_ACTION_TIME)) == 0) ||
2799 (timestamp == NULL))
2802 time_data = action_data_get(data, action, RTE_TABLE_ACTION_TIME);
2805 *timestamp = time_data->time;
2810 struct rte_cryptodev_sym_session *
2811 rte_table_action_crypto_sym_session_get(struct rte_table_action *action,
2814 struct sym_crypto_data *sym_crypto_data;
2816 /* Check input arguments */
2817 if ((action == NULL) ||
2818 ((action->cfg.action_mask &
2819 (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) == 0) ||
2823 sym_crypto_data = action_data_get(data, action,
2824 RTE_TABLE_ACTION_SYM_CRYPTO);
2826 return sym_crypto_data->session;
2829 static __rte_always_inline uint64_t
2830 pkt_work(struct rte_mbuf *mbuf,
2831 struct rte_pipeline_table_entry *table_entry,
2833 struct rte_table_action *action,
2834 struct ap_config *cfg)
2836 uint64_t drop_mask = 0;
2838 uint32_t ip_offset = action->cfg.common.ip_offset;
2839 void *ip = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ip_offset);
2842 uint16_t total_length;
2844 if (cfg->common.ip_version) {
2845 struct ipv4_hdr *hdr = ip;
2847 dscp = hdr->type_of_service >> 2;
2848 total_length = rte_ntohs(hdr->total_length);
2850 struct ipv6_hdr *hdr = ip;
2852 dscp = (rte_ntohl(hdr->vtc_flow) & 0x0F600000) >> 18;
2854 rte_ntohs(hdr->payload_len) + sizeof(struct ipv6_hdr);
2857 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2859 action_data_get(table_entry, action, RTE_TABLE_ACTION_LB);
2865 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2867 action_data_get(table_entry, action, RTE_TABLE_ACTION_MTR);
2869 drop_mask |= pkt_work_mtr(mbuf,
2871 &action->dscp_table,
2878 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2880 action_data_get(table_entry, action, RTE_TABLE_ACTION_TM);
2884 &action->dscp_table,
2888 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
2889 void *data = action_data_get(table_entry,
2891 RTE_TABLE_ACTION_DECAP);
2893 pkt_work_decap(mbuf, data);
2896 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2898 action_data_get(table_entry, action, RTE_TABLE_ACTION_ENCAP);
2900 pkt_work_encap(mbuf,
2908 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2910 action_data_get(table_entry, action, RTE_TABLE_ACTION_NAT);
2912 if (cfg->common.ip_version)
2913 pkt_ipv4_work_nat(ip, data, &cfg->nat);
2915 pkt_ipv6_work_nat(ip, data, &cfg->nat);
2918 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2920 action_data_get(table_entry, action, RTE_TABLE_ACTION_TTL);
2922 if (cfg->common.ip_version)
2923 drop_mask |= pkt_ipv4_work_ttl(ip, data);
2925 drop_mask |= pkt_ipv6_work_ttl(ip, data);
2928 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2930 action_data_get(table_entry, action, RTE_TABLE_ACTION_STATS);
2932 pkt_work_stats(data, total_length);
2935 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2937 action_data_get(table_entry, action, RTE_TABLE_ACTION_TIME);
2939 pkt_work_time(data, time);
2942 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
2943 void *data = action_data_get(table_entry, action,
2944 RTE_TABLE_ACTION_SYM_CRYPTO);
2946 drop_mask |= pkt_work_sym_crypto(mbuf, data, &cfg->sym_crypto,
2950 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
2951 void *data = action_data_get(table_entry,
2953 RTE_TABLE_ACTION_TAG);
2955 pkt_work_tag(mbuf, data);
2961 static __rte_always_inline uint64_t
2962 pkt4_work(struct rte_mbuf **mbufs,
2963 struct rte_pipeline_table_entry **table_entries,
2965 struct rte_table_action *action,
2966 struct ap_config *cfg)
2968 uint64_t drop_mask0 = 0;
2969 uint64_t drop_mask1 = 0;
2970 uint64_t drop_mask2 = 0;
2971 uint64_t drop_mask3 = 0;
2973 struct rte_mbuf *mbuf0 = mbufs[0];
2974 struct rte_mbuf *mbuf1 = mbufs[1];
2975 struct rte_mbuf *mbuf2 = mbufs[2];
2976 struct rte_mbuf *mbuf3 = mbufs[3];
2978 struct rte_pipeline_table_entry *table_entry0 = table_entries[0];
2979 struct rte_pipeline_table_entry *table_entry1 = table_entries[1];
2980 struct rte_pipeline_table_entry *table_entry2 = table_entries[2];
2981 struct rte_pipeline_table_entry *table_entry3 = table_entries[3];
2983 uint32_t ip_offset = action->cfg.common.ip_offset;
2984 void *ip0 = RTE_MBUF_METADATA_UINT32_PTR(mbuf0, ip_offset);
2985 void *ip1 = RTE_MBUF_METADATA_UINT32_PTR(mbuf1, ip_offset);
2986 void *ip2 = RTE_MBUF_METADATA_UINT32_PTR(mbuf2, ip_offset);
2987 void *ip3 = RTE_MBUF_METADATA_UINT32_PTR(mbuf3, ip_offset);
2989 uint32_t dscp0, dscp1, dscp2, dscp3;
2990 uint16_t total_length0, total_length1, total_length2, total_length3;
2992 if (cfg->common.ip_version) {
2993 struct ipv4_hdr *hdr0 = ip0;
2994 struct ipv4_hdr *hdr1 = ip1;
2995 struct ipv4_hdr *hdr2 = ip2;
2996 struct ipv4_hdr *hdr3 = ip3;
2998 dscp0 = hdr0->type_of_service >> 2;
2999 dscp1 = hdr1->type_of_service >> 2;
3000 dscp2 = hdr2->type_of_service >> 2;
3001 dscp3 = hdr3->type_of_service >> 2;
3003 total_length0 = rte_ntohs(hdr0->total_length);
3004 total_length1 = rte_ntohs(hdr1->total_length);
3005 total_length2 = rte_ntohs(hdr2->total_length);
3006 total_length3 = rte_ntohs(hdr3->total_length);
3008 struct ipv6_hdr *hdr0 = ip0;
3009 struct ipv6_hdr *hdr1 = ip1;
3010 struct ipv6_hdr *hdr2 = ip2;
3011 struct ipv6_hdr *hdr3 = ip3;
3013 dscp0 = (rte_ntohl(hdr0->vtc_flow) & 0x0F600000) >> 18;
3014 dscp1 = (rte_ntohl(hdr1->vtc_flow) & 0x0F600000) >> 18;
3015 dscp2 = (rte_ntohl(hdr2->vtc_flow) & 0x0F600000) >> 18;
3016 dscp3 = (rte_ntohl(hdr3->vtc_flow) & 0x0F600000) >> 18;
3019 rte_ntohs(hdr0->payload_len) + sizeof(struct ipv6_hdr);
3021 rte_ntohs(hdr1->payload_len) + sizeof(struct ipv6_hdr);
3023 rte_ntohs(hdr2->payload_len) + sizeof(struct ipv6_hdr);
3025 rte_ntohs(hdr3->payload_len) + sizeof(struct ipv6_hdr);
3028 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
3030 action_data_get(table_entry0, action, RTE_TABLE_ACTION_LB);
3032 action_data_get(table_entry1, action, RTE_TABLE_ACTION_LB);
3034 action_data_get(table_entry2, action, RTE_TABLE_ACTION_LB);
3036 action_data_get(table_entry3, action, RTE_TABLE_ACTION_LB);
3055 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
3057 action_data_get(table_entry0, action, RTE_TABLE_ACTION_MTR);
3059 action_data_get(table_entry1, action, RTE_TABLE_ACTION_MTR);
3061 action_data_get(table_entry2, action, RTE_TABLE_ACTION_MTR);
3063 action_data_get(table_entry3, action, RTE_TABLE_ACTION_MTR);
3065 drop_mask0 |= pkt_work_mtr(mbuf0,
3067 &action->dscp_table,
3073 drop_mask1 |= pkt_work_mtr(mbuf1,
3075 &action->dscp_table,
3081 drop_mask2 |= pkt_work_mtr(mbuf2,
3083 &action->dscp_table,
3089 drop_mask3 |= pkt_work_mtr(mbuf3,
3091 &action->dscp_table,
3098 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
3100 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TM);
3102 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TM);
3104 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TM);
3106 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TM);
3110 &action->dscp_table,
3115 &action->dscp_table,
3120 &action->dscp_table,
3125 &action->dscp_table,
3129 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
3130 void *data0 = action_data_get(table_entry0,
3132 RTE_TABLE_ACTION_DECAP);
3133 void *data1 = action_data_get(table_entry1,
3135 RTE_TABLE_ACTION_DECAP);
3136 void *data2 = action_data_get(table_entry2,
3138 RTE_TABLE_ACTION_DECAP);
3139 void *data3 = action_data_get(table_entry3,
3141 RTE_TABLE_ACTION_DECAP);
3143 pkt4_work_decap(mbuf0, mbuf1, mbuf2, mbuf3,
3144 data0, data1, data2, data3);
3147 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
3149 action_data_get(table_entry0, action, RTE_TABLE_ACTION_ENCAP);
3151 action_data_get(table_entry1, action, RTE_TABLE_ACTION_ENCAP);
3153 action_data_get(table_entry2, action, RTE_TABLE_ACTION_ENCAP);
3155 action_data_get(table_entry3, action, RTE_TABLE_ACTION_ENCAP);
3157 pkt_work_encap(mbuf0,
3164 pkt_work_encap(mbuf1,
3171 pkt_work_encap(mbuf2,
3178 pkt_work_encap(mbuf3,
3186 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
3188 action_data_get(table_entry0, action, RTE_TABLE_ACTION_NAT);
3190 action_data_get(table_entry1, action, RTE_TABLE_ACTION_NAT);
3192 action_data_get(table_entry2, action, RTE_TABLE_ACTION_NAT);
3194 action_data_get(table_entry3, action, RTE_TABLE_ACTION_NAT);
3196 if (cfg->common.ip_version) {
3197 pkt_ipv4_work_nat(ip0, data0, &cfg->nat);
3198 pkt_ipv4_work_nat(ip1, data1, &cfg->nat);
3199 pkt_ipv4_work_nat(ip2, data2, &cfg->nat);
3200 pkt_ipv4_work_nat(ip3, data3, &cfg->nat);
3202 pkt_ipv6_work_nat(ip0, data0, &cfg->nat);
3203 pkt_ipv6_work_nat(ip1, data1, &cfg->nat);
3204 pkt_ipv6_work_nat(ip2, data2, &cfg->nat);
3205 pkt_ipv6_work_nat(ip3, data3, &cfg->nat);
3209 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
3211 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TTL);
3213 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TTL);
3215 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TTL);
3217 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TTL);
3219 if (cfg->common.ip_version) {
3220 drop_mask0 |= pkt_ipv4_work_ttl(ip0, data0);
3221 drop_mask1 |= pkt_ipv4_work_ttl(ip1, data1);
3222 drop_mask2 |= pkt_ipv4_work_ttl(ip2, data2);
3223 drop_mask3 |= pkt_ipv4_work_ttl(ip3, data3);
3225 drop_mask0 |= pkt_ipv6_work_ttl(ip0, data0);
3226 drop_mask1 |= pkt_ipv6_work_ttl(ip1, data1);
3227 drop_mask2 |= pkt_ipv6_work_ttl(ip2, data2);
3228 drop_mask3 |= pkt_ipv6_work_ttl(ip3, data3);
3232 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
3234 action_data_get(table_entry0, action, RTE_TABLE_ACTION_STATS);
3236 action_data_get(table_entry1, action, RTE_TABLE_ACTION_STATS);
3238 action_data_get(table_entry2, action, RTE_TABLE_ACTION_STATS);
3240 action_data_get(table_entry3, action, RTE_TABLE_ACTION_STATS);
3242 pkt_work_stats(data0, total_length0);
3243 pkt_work_stats(data1, total_length1);
3244 pkt_work_stats(data2, total_length2);
3245 pkt_work_stats(data3, total_length3);
3248 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
3250 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TIME);
3252 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TIME);
3254 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TIME);
3256 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TIME);
3258 pkt_work_time(data0, time);
3259 pkt_work_time(data1, time);
3260 pkt_work_time(data2, time);
3261 pkt_work_time(data3, time);
3264 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
3265 void *data0 = action_data_get(table_entry0, action,
3266 RTE_TABLE_ACTION_SYM_CRYPTO);
3267 void *data1 = action_data_get(table_entry1, action,
3268 RTE_TABLE_ACTION_SYM_CRYPTO);
3269 void *data2 = action_data_get(table_entry2, action,
3270 RTE_TABLE_ACTION_SYM_CRYPTO);
3271 void *data3 = action_data_get(table_entry3, action,
3272 RTE_TABLE_ACTION_SYM_CRYPTO);
3274 drop_mask0 |= pkt_work_sym_crypto(mbuf0, data0, &cfg->sym_crypto,
3276 drop_mask1 |= pkt_work_sym_crypto(mbuf1, data1, &cfg->sym_crypto,
3278 drop_mask2 |= pkt_work_sym_crypto(mbuf2, data2, &cfg->sym_crypto,
3280 drop_mask3 |= pkt_work_sym_crypto(mbuf3, data3, &cfg->sym_crypto,
3284 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
3285 void *data0 = action_data_get(table_entry0,
3287 RTE_TABLE_ACTION_TAG);
3288 void *data1 = action_data_get(table_entry1,
3290 RTE_TABLE_ACTION_TAG);
3291 void *data2 = action_data_get(table_entry2,
3293 RTE_TABLE_ACTION_TAG);
3294 void *data3 = action_data_get(table_entry3,
3296 RTE_TABLE_ACTION_TAG);
3298 pkt4_work_tag(mbuf0, mbuf1, mbuf2, mbuf3,
3299 data0, data1, data2, data3);
3308 static __rte_always_inline int
3309 ah(struct rte_pipeline *p,
3310 struct rte_mbuf **pkts,
3312 struct rte_pipeline_table_entry **entries,
3313 struct rte_table_action *action,
3314 struct ap_config *cfg)
3316 uint64_t pkts_drop_mask = 0;
3319 if (cfg->action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
3320 (1LLU << RTE_TABLE_ACTION_TIME)))
3323 if ((pkts_mask & (pkts_mask + 1)) == 0) {
3324 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
3327 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
3330 drop_mask = pkt4_work(&pkts[i],
3336 pkts_drop_mask |= drop_mask << i;
3339 for ( ; i < n_pkts; i++) {
3342 drop_mask = pkt_work(pkts[i],
3348 pkts_drop_mask |= drop_mask << i;
3351 for ( ; pkts_mask; ) {
3352 uint32_t pos = __builtin_ctzll(pkts_mask);
3353 uint64_t pkt_mask = 1LLU << pos;
3356 drop_mask = pkt_work(pkts[pos],
3362 pkts_mask &= ~pkt_mask;
3363 pkts_drop_mask |= drop_mask << pos;
3366 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
3372 ah_default(struct rte_pipeline *p,
3373 struct rte_mbuf **pkts,
3375 struct rte_pipeline_table_entry **entries,
3378 struct rte_table_action *action = arg;
3388 static rte_pipeline_table_action_handler_hit
3389 ah_selector(struct rte_table_action *action)
3391 if (action->cfg.action_mask == (1LLU << RTE_TABLE_ACTION_FWD))
3398 rte_table_action_table_params_get(struct rte_table_action *action,
3399 struct rte_pipeline_table_params *params)
3401 rte_pipeline_table_action_handler_hit f_action_hit;
3402 uint32_t total_size;
3404 /* Check input arguments */
3405 if ((action == NULL) ||
3409 f_action_hit = ah_selector(action);
3410 total_size = rte_align32pow2(action->data.total_size);
3412 /* Fill in params */
3413 params->f_action_hit = f_action_hit;
3414 params->f_action_miss = NULL;
3415 params->arg_ah = (f_action_hit) ? action : NULL;
3416 params->action_data_size = total_size -
3417 sizeof(struct rte_pipeline_table_entry);
3423 rte_table_action_free(struct rte_table_action *action)