1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_byteorder.h>
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
12 #include <rte_ether.h>
17 #include <rte_cryptodev.h>
18 #include <rte_cryptodev_pmd.h>
20 #include "rte_table_action.h"
22 #define rte_htons rte_cpu_to_be_16
23 #define rte_htonl rte_cpu_to_be_32
25 #define rte_ntohs rte_be_to_cpu_16
26 #define rte_ntohl rte_be_to_cpu_32
29 * RTE_TABLE_ACTION_FWD
31 #define fwd_data rte_pipeline_table_entry
34 fwd_apply(struct fwd_data *data,
35 struct rte_table_action_fwd_params *p)
37 data->action = p->action;
39 if (p->action == RTE_PIPELINE_ACTION_PORT)
40 data->port_id = p->id;
42 if (p->action == RTE_PIPELINE_ACTION_TABLE)
43 data->table_id = p->id;
52 lb_cfg_check(struct rte_table_action_lb_config *cfg)
55 (cfg->key_size < RTE_TABLE_ACTION_LB_KEY_SIZE_MIN) ||
56 (cfg->key_size > RTE_TABLE_ACTION_LB_KEY_SIZE_MAX) ||
57 (!rte_is_power_of_2(cfg->key_size)) ||
58 (cfg->f_hash == NULL))
65 uint32_t out[RTE_TABLE_ACTION_LB_TABLE_SIZE];
66 } __attribute__((__packed__));
69 lb_apply(struct lb_data *data,
70 struct rte_table_action_lb_params *p)
72 memcpy(data->out, p->out, sizeof(data->out));
77 static __rte_always_inline void
78 pkt_work_lb(struct rte_mbuf *mbuf,
80 struct rte_table_action_lb_config *cfg)
82 uint8_t *pkt_key = RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->key_offset);
83 uint32_t *out = RTE_MBUF_METADATA_UINT32_PTR(mbuf, cfg->out_offset);
87 digest = cfg->f_hash(pkt_key,
91 pos = digest & (RTE_TABLE_ACTION_LB_TABLE_SIZE - 1);
92 out_val = data->out[pos];
98 * RTE_TABLE_ACTION_MTR
101 mtr_cfg_check(struct rte_table_action_mtr_config *mtr)
103 if ((mtr->alg == RTE_TABLE_ACTION_METER_SRTCM) ||
104 ((mtr->n_tc != 1) && (mtr->n_tc != 4)) ||
105 (mtr->n_bytes_enabled != 0))
110 struct mtr_trtcm_data {
111 struct rte_meter_trtcm trtcm;
112 uint64_t stats[e_RTE_METER_COLORS];
113 } __attribute__((__packed__));
115 #define MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data) \
116 (((data)->stats[e_RTE_METER_GREEN] & 0xF8LLU) >> 3)
119 mtr_trtcm_data_meter_profile_id_set(struct mtr_trtcm_data *data,
122 data->stats[e_RTE_METER_GREEN] &= ~0xF8LLU;
123 data->stats[e_RTE_METER_GREEN] |= (profile_id % 32) << 3;
126 #define MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color)\
127 (((data)->stats[(color)] & 4LLU) >> 2)
129 #define MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color)\
130 ((enum rte_meter_color)((data)->stats[(color)] & 3LLU))
133 mtr_trtcm_data_policer_action_set(struct mtr_trtcm_data *data,
134 enum rte_meter_color color,
135 enum rte_table_action_policer action)
137 if (action == RTE_TABLE_ACTION_POLICER_DROP) {
138 data->stats[color] |= 4LLU;
140 data->stats[color] &= ~7LLU;
141 data->stats[color] |= color & 3LLU;
146 mtr_trtcm_data_stats_get(struct mtr_trtcm_data *data,
147 enum rte_meter_color color)
149 return data->stats[color] >> 8;
153 mtr_trtcm_data_stats_reset(struct mtr_trtcm_data *data,
154 enum rte_meter_color color)
156 data->stats[color] &= 0xFFLU;
159 #define MTR_TRTCM_DATA_STATS_INC(data, color) \
160 ((data)->stats[(color)] += (1LLU << 8))
163 mtr_data_size(struct rte_table_action_mtr_config *mtr)
165 return mtr->n_tc * sizeof(struct mtr_trtcm_data);
168 struct dscp_table_entry_data {
169 enum rte_meter_color color;
174 struct dscp_table_data {
175 struct dscp_table_entry_data entry[64];
178 struct meter_profile_data {
179 struct rte_meter_trtcm_profile profile;
184 static struct meter_profile_data *
185 meter_profile_data_find(struct meter_profile_data *mp,
191 for (i = 0; i < mp_size; i++) {
192 struct meter_profile_data *mp_data = &mp[i];
194 if (mp_data->valid && (mp_data->profile_id == profile_id))
201 static struct meter_profile_data *
202 meter_profile_data_find_unused(struct meter_profile_data *mp,
207 for (i = 0; i < mp_size; i++) {
208 struct meter_profile_data *mp_data = &mp[i];
218 mtr_apply_check(struct rte_table_action_mtr_params *p,
219 struct rte_table_action_mtr_config *cfg,
220 struct meter_profile_data *mp,
225 if (p->tc_mask > RTE_LEN2MASK(cfg->n_tc, uint32_t))
228 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
229 struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
230 struct meter_profile_data *mp_data;
232 if ((p->tc_mask & (1LLU << i)) == 0)
235 mp_data = meter_profile_data_find(mp,
237 p_tc->meter_profile_id);
246 mtr_apply(struct mtr_trtcm_data *data,
247 struct rte_table_action_mtr_params *p,
248 struct rte_table_action_mtr_config *cfg,
249 struct meter_profile_data *mp,
255 /* Check input arguments */
256 status = mtr_apply_check(p, cfg, mp, mp_size);
261 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
262 struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
263 struct mtr_trtcm_data *data_tc = &data[i];
264 struct meter_profile_data *mp_data;
266 if ((p->tc_mask & (1LLU << i)) == 0)
270 mp_data = meter_profile_data_find(mp,
272 p_tc->meter_profile_id);
276 memset(data_tc, 0, sizeof(*data_tc));
279 status = rte_meter_trtcm_config(&data_tc->trtcm,
285 mtr_trtcm_data_meter_profile_id_set(data_tc,
288 /* Policer actions */
289 mtr_trtcm_data_policer_action_set(data_tc,
291 p_tc->policer[e_RTE_METER_GREEN]);
293 mtr_trtcm_data_policer_action_set(data_tc,
295 p_tc->policer[e_RTE_METER_YELLOW]);
297 mtr_trtcm_data_policer_action_set(data_tc,
299 p_tc->policer[e_RTE_METER_RED]);
305 static __rte_always_inline uint64_t
306 pkt_work_mtr(struct rte_mbuf *mbuf,
307 struct mtr_trtcm_data *data,
308 struct dscp_table_data *dscp_table,
309 struct meter_profile_data *mp,
312 uint16_t total_length)
315 struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
316 enum rte_meter_color color_in, color_meter, color_policer;
320 color_in = dscp_entry->color;
322 mp_id = MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data);
325 color_meter = rte_meter_trtcm_color_aware_check(
333 MTR_TRTCM_DATA_STATS_INC(data, color_meter);
336 drop_mask = MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color_meter);
338 MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color_meter);
339 rte_mbuf_sched_color_set(mbuf, (uint8_t)color_policer);
345 * RTE_TABLE_ACTION_TM
348 tm_cfg_check(struct rte_table_action_tm_config *tm)
350 if ((tm->n_subports_per_port == 0) ||
351 (rte_is_power_of_2(tm->n_subports_per_port) == 0) ||
352 (tm->n_subports_per_port > UINT16_MAX) ||
353 (tm->n_pipes_per_subport == 0) ||
354 (rte_is_power_of_2(tm->n_pipes_per_subport) == 0))
363 } __attribute__((__packed__));
366 tm_apply_check(struct rte_table_action_tm_params *p,
367 struct rte_table_action_tm_config *cfg)
369 if ((p->subport_id >= cfg->n_subports_per_port) ||
370 (p->pipe_id >= cfg->n_pipes_per_subport))
377 tm_apply(struct tm_data *data,
378 struct rte_table_action_tm_params *p,
379 struct rte_table_action_tm_config *cfg)
383 /* Check input arguments */
384 status = tm_apply_check(p, cfg);
389 data->queue_id = p->subport_id <<
390 (__builtin_ctz(cfg->n_pipes_per_subport) + 4) |
396 static __rte_always_inline void
397 pkt_work_tm(struct rte_mbuf *mbuf,
398 struct tm_data *data,
399 struct dscp_table_data *dscp_table,
402 struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
403 uint32_t queue_id = data->queue_id |
404 (dscp_entry->tc << 2) |
405 dscp_entry->tc_queue;
406 rte_mbuf_sched_set(mbuf, queue_id, dscp_entry->tc,
407 (uint8_t)dscp_entry->color);
411 * RTE_TABLE_ACTION_ENCAP
414 encap_valid(enum rte_table_action_encap_type encap)
417 case RTE_TABLE_ACTION_ENCAP_ETHER:
418 case RTE_TABLE_ACTION_ENCAP_VLAN:
419 case RTE_TABLE_ACTION_ENCAP_QINQ:
420 case RTE_TABLE_ACTION_ENCAP_MPLS:
421 case RTE_TABLE_ACTION_ENCAP_PPPOE:
422 case RTE_TABLE_ACTION_ENCAP_VXLAN:
423 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
431 encap_cfg_check(struct rte_table_action_encap_config *encap)
433 if ((encap->encap_mask == 0) ||
434 (__builtin_popcountll(encap->encap_mask) != 1))
440 struct encap_ether_data {
441 struct ether_hdr ether;
442 } __attribute__((__packed__));
444 #define VLAN(pcp, dei, vid) \
445 ((uint16_t)((((uint64_t)(pcp)) & 0x7LLU) << 13) | \
446 ((((uint64_t)(dei)) & 0x1LLU) << 12) | \
447 (((uint64_t)(vid)) & 0xFFFLLU)) \
449 struct encap_vlan_data {
450 struct ether_hdr ether;
451 struct vlan_hdr vlan;
452 } __attribute__((__packed__));
454 struct encap_qinq_data {
455 struct ether_hdr ether;
456 struct vlan_hdr svlan;
457 struct vlan_hdr cvlan;
458 } __attribute__((__packed__));
460 #define ETHER_TYPE_MPLS_UNICAST 0x8847
462 #define ETHER_TYPE_MPLS_MULTICAST 0x8848
464 #define MPLS(label, tc, s, ttl) \
465 ((uint32_t)(((((uint64_t)(label)) & 0xFFFFFLLU) << 12) |\
466 ((((uint64_t)(tc)) & 0x7LLU) << 9) | \
467 ((((uint64_t)(s)) & 0x1LLU) << 8) | \
468 (((uint64_t)(ttl)) & 0xFFLLU)))
470 struct encap_mpls_data {
471 struct ether_hdr ether;
472 uint32_t mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX];
474 } __attribute__((__packed__));
476 #define ETHER_TYPE_PPPOE_SESSION 0x8864
478 #define PPP_PROTOCOL_IP 0x0021
480 struct pppoe_ppp_hdr {
481 uint16_t ver_type_code;
485 } __attribute__((__packed__));
487 struct encap_pppoe_data {
488 struct ether_hdr ether;
489 struct pppoe_ppp_hdr pppoe_ppp;
490 } __attribute__((__packed__));
492 #define IP_PROTO_UDP 17
494 struct encap_vxlan_ipv4_data {
495 struct ether_hdr ether;
496 struct ipv4_hdr ipv4;
498 struct vxlan_hdr vxlan;
499 } __attribute__((__packed__));
501 struct encap_vxlan_ipv4_vlan_data {
502 struct ether_hdr ether;
503 struct vlan_hdr vlan;
504 struct ipv4_hdr ipv4;
506 struct vxlan_hdr vxlan;
507 } __attribute__((__packed__));
509 struct encap_vxlan_ipv6_data {
510 struct ether_hdr ether;
511 struct ipv6_hdr ipv6;
513 struct vxlan_hdr vxlan;
514 } __attribute__((__packed__));
516 struct encap_vxlan_ipv6_vlan_data {
517 struct ether_hdr ether;
518 struct vlan_hdr vlan;
519 struct ipv6_hdr ipv6;
521 struct vxlan_hdr vxlan;
522 } __attribute__((__packed__));
524 struct encap_qinq_pppoe_data {
525 struct ether_hdr ether;
526 struct vlan_hdr svlan;
527 struct vlan_hdr cvlan;
528 struct pppoe_ppp_hdr pppoe_ppp;
529 } __attribute__((__packed__));
532 encap_data_size(struct rte_table_action_encap_config *encap)
534 switch (encap->encap_mask) {
535 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
536 return sizeof(struct encap_ether_data);
538 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
539 return sizeof(struct encap_vlan_data);
541 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
542 return sizeof(struct encap_qinq_data);
544 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
545 return sizeof(struct encap_mpls_data);
547 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
548 return sizeof(struct encap_pppoe_data);
550 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
551 if (encap->vxlan.ip_version)
552 if (encap->vxlan.vlan)
553 return sizeof(struct encap_vxlan_ipv4_vlan_data);
555 return sizeof(struct encap_vxlan_ipv4_data);
557 if (encap->vxlan.vlan)
558 return sizeof(struct encap_vxlan_ipv6_vlan_data);
560 return sizeof(struct encap_vxlan_ipv6_data);
562 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
563 return sizeof(struct encap_qinq_pppoe_data);
571 encap_apply_check(struct rte_table_action_encap_params *p,
572 struct rte_table_action_encap_config *cfg)
574 if ((encap_valid(p->type) == 0) ||
575 ((cfg->encap_mask & (1LLU << p->type)) == 0))
579 case RTE_TABLE_ACTION_ENCAP_ETHER:
582 case RTE_TABLE_ACTION_ENCAP_VLAN:
585 case RTE_TABLE_ACTION_ENCAP_QINQ:
588 case RTE_TABLE_ACTION_ENCAP_MPLS:
589 if ((p->mpls.mpls_count == 0) ||
590 (p->mpls.mpls_count > RTE_TABLE_ACTION_MPLS_LABELS_MAX))
595 case RTE_TABLE_ACTION_ENCAP_PPPOE:
598 case RTE_TABLE_ACTION_ENCAP_VXLAN:
601 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
610 encap_ether_apply(void *data,
611 struct rte_table_action_encap_params *p,
612 struct rte_table_action_common_config *common_cfg)
614 struct encap_ether_data *d = data;
615 uint16_t ethertype = (common_cfg->ip_version) ?
620 ether_addr_copy(&p->ether.ether.da, &d->ether.d_addr);
621 ether_addr_copy(&p->ether.ether.sa, &d->ether.s_addr);
622 d->ether.ether_type = rte_htons(ethertype);
628 encap_vlan_apply(void *data,
629 struct rte_table_action_encap_params *p,
630 struct rte_table_action_common_config *common_cfg)
632 struct encap_vlan_data *d = data;
633 uint16_t ethertype = (common_cfg->ip_version) ?
638 ether_addr_copy(&p->vlan.ether.da, &d->ether.d_addr);
639 ether_addr_copy(&p->vlan.ether.sa, &d->ether.s_addr);
640 d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
643 d->vlan.vlan_tci = rte_htons(VLAN(p->vlan.vlan.pcp,
646 d->vlan.eth_proto = rte_htons(ethertype);
652 encap_qinq_apply(void *data,
653 struct rte_table_action_encap_params *p,
654 struct rte_table_action_common_config *common_cfg)
656 struct encap_qinq_data *d = data;
657 uint16_t ethertype = (common_cfg->ip_version) ?
662 ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
663 ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
664 d->ether.ether_type = rte_htons(ETHER_TYPE_QINQ);
667 d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
670 d->svlan.eth_proto = rte_htons(ETHER_TYPE_VLAN);
673 d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
676 d->cvlan.eth_proto = rte_htons(ethertype);
682 encap_qinq_pppoe_apply(void *data,
683 struct rte_table_action_encap_params *p)
685 struct encap_qinq_pppoe_data *d = data;
688 ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
689 ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
690 d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
693 d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
696 d->svlan.eth_proto = rte_htons(ETHER_TYPE_VLAN);
699 d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
702 d->cvlan.eth_proto = rte_htons(ETHER_TYPE_PPPOE_SESSION);
705 d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
706 d->pppoe_ppp.session_id = rte_htons(p->qinq_pppoe.pppoe.session_id);
707 d->pppoe_ppp.length = 0; /* not pre-computed */
708 d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
714 encap_mpls_apply(void *data,
715 struct rte_table_action_encap_params *p)
717 struct encap_mpls_data *d = data;
718 uint16_t ethertype = (p->mpls.unicast) ?
719 ETHER_TYPE_MPLS_UNICAST :
720 ETHER_TYPE_MPLS_MULTICAST;
724 ether_addr_copy(&p->mpls.ether.da, &d->ether.d_addr);
725 ether_addr_copy(&p->mpls.ether.sa, &d->ether.s_addr);
726 d->ether.ether_type = rte_htons(ethertype);
729 for (i = 0; i < p->mpls.mpls_count - 1; i++)
730 d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
733 p->mpls.mpls[i].ttl));
735 d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
738 p->mpls.mpls[i].ttl));
740 d->mpls_count = p->mpls.mpls_count;
745 encap_pppoe_apply(void *data,
746 struct rte_table_action_encap_params *p)
748 struct encap_pppoe_data *d = data;
751 ether_addr_copy(&p->pppoe.ether.da, &d->ether.d_addr);
752 ether_addr_copy(&p->pppoe.ether.sa, &d->ether.s_addr);
753 d->ether.ether_type = rte_htons(ETHER_TYPE_PPPOE_SESSION);
756 d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
757 d->pppoe_ppp.session_id = rte_htons(p->pppoe.pppoe.session_id);
758 d->pppoe_ppp.length = 0; /* not pre-computed */
759 d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
765 encap_vxlan_apply(void *data,
766 struct rte_table_action_encap_params *p,
767 struct rte_table_action_encap_config *cfg)
769 if ((p->vxlan.vxlan.vni > 0xFFFFFF) ||
770 (cfg->vxlan.ip_version && (p->vxlan.ipv4.dscp > 0x3F)) ||
771 (!cfg->vxlan.ip_version && (p->vxlan.ipv6.flow_label > 0xFFFFF)) ||
772 (!cfg->vxlan.ip_version && (p->vxlan.ipv6.dscp > 0x3F)) ||
773 (cfg->vxlan.vlan && (p->vxlan.vlan.vid > 0xFFF)))
776 if (cfg->vxlan.ip_version)
777 if (cfg->vxlan.vlan) {
778 struct encap_vxlan_ipv4_vlan_data *d = data;
781 ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
782 ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
783 d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
786 d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
789 d->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv4);
792 d->ipv4.version_ihl = 0x45;
793 d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
794 d->ipv4.total_length = 0; /* not pre-computed */
795 d->ipv4.packet_id = 0;
796 d->ipv4.fragment_offset = 0;
797 d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
798 d->ipv4.next_proto_id = IP_PROTO_UDP;
799 d->ipv4.hdr_checksum = 0;
800 d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
801 d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
803 d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
806 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
807 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
808 d->udp.dgram_len = 0; /* not pre-computed */
809 d->udp.dgram_cksum = 0;
812 d->vxlan.vx_flags = rte_htonl(0x08000000);
813 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
817 struct encap_vxlan_ipv4_data *d = data;
820 ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
821 ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
822 d->ether.ether_type = rte_htons(ETHER_TYPE_IPv4);
825 d->ipv4.version_ihl = 0x45;
826 d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
827 d->ipv4.total_length = 0; /* not pre-computed */
828 d->ipv4.packet_id = 0;
829 d->ipv4.fragment_offset = 0;
830 d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
831 d->ipv4.next_proto_id = IP_PROTO_UDP;
832 d->ipv4.hdr_checksum = 0;
833 d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
834 d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
836 d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
839 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
840 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
841 d->udp.dgram_len = 0; /* not pre-computed */
842 d->udp.dgram_cksum = 0;
845 d->vxlan.vx_flags = rte_htonl(0x08000000);
846 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
851 if (cfg->vxlan.vlan) {
852 struct encap_vxlan_ipv6_vlan_data *d = data;
855 ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
856 ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
857 d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
860 d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
863 d->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv6);
866 d->ipv6.vtc_flow = rte_htonl((6 << 28) |
867 (p->vxlan.ipv6.dscp << 22) |
868 p->vxlan.ipv6.flow_label);
869 d->ipv6.payload_len = 0; /* not pre-computed */
870 d->ipv6.proto = IP_PROTO_UDP;
871 d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
872 memcpy(d->ipv6.src_addr,
874 sizeof(p->vxlan.ipv6.sa));
875 memcpy(d->ipv6.dst_addr,
877 sizeof(p->vxlan.ipv6.da));
880 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
881 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
882 d->udp.dgram_len = 0; /* not pre-computed */
883 d->udp.dgram_cksum = 0;
886 d->vxlan.vx_flags = rte_htonl(0x08000000);
887 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
891 struct encap_vxlan_ipv6_data *d = data;
894 ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
895 ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
896 d->ether.ether_type = rte_htons(ETHER_TYPE_IPv6);
899 d->ipv6.vtc_flow = rte_htonl((6 << 28) |
900 (p->vxlan.ipv6.dscp << 22) |
901 p->vxlan.ipv6.flow_label);
902 d->ipv6.payload_len = 0; /* not pre-computed */
903 d->ipv6.proto = IP_PROTO_UDP;
904 d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
905 memcpy(d->ipv6.src_addr,
907 sizeof(p->vxlan.ipv6.sa));
908 memcpy(d->ipv6.dst_addr,
910 sizeof(p->vxlan.ipv6.da));
913 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
914 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
915 d->udp.dgram_len = 0; /* not pre-computed */
916 d->udp.dgram_cksum = 0;
919 d->vxlan.vx_flags = rte_htonl(0x08000000);
920 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
927 encap_apply(void *data,
928 struct rte_table_action_encap_params *p,
929 struct rte_table_action_encap_config *cfg,
930 struct rte_table_action_common_config *common_cfg)
934 /* Check input arguments */
935 status = encap_apply_check(p, cfg);
940 case RTE_TABLE_ACTION_ENCAP_ETHER:
941 return encap_ether_apply(data, p, common_cfg);
943 case RTE_TABLE_ACTION_ENCAP_VLAN:
944 return encap_vlan_apply(data, p, common_cfg);
946 case RTE_TABLE_ACTION_ENCAP_QINQ:
947 return encap_qinq_apply(data, p, common_cfg);
949 case RTE_TABLE_ACTION_ENCAP_MPLS:
950 return encap_mpls_apply(data, p);
952 case RTE_TABLE_ACTION_ENCAP_PPPOE:
953 return encap_pppoe_apply(data, p);
955 case RTE_TABLE_ACTION_ENCAP_VXLAN:
956 return encap_vxlan_apply(data, p, cfg);
958 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
959 return encap_qinq_pppoe_apply(data, p);
966 static __rte_always_inline uint16_t
967 encap_vxlan_ipv4_checksum_update(uint16_t cksum0,
968 uint16_t total_length)
973 cksum1 = ~cksum1 & 0xFFFF;
975 /* Add total length (one's complement logic) */
976 cksum1 += total_length;
977 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
978 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
980 return (uint16_t)(~cksum1);
983 static __rte_always_inline void *
984 encap(void *dst, const void *src, size_t n)
986 dst = ((uint8_t *) dst) - n;
987 return rte_memcpy(dst, src, n);
990 static __rte_always_inline void
991 pkt_work_encap_vxlan_ipv4(struct rte_mbuf *mbuf,
992 struct encap_vxlan_ipv4_data *vxlan_tbl,
993 struct rte_table_action_encap_config *cfg)
995 uint32_t ether_offset = cfg->vxlan.data_offset;
996 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
997 struct encap_vxlan_ipv4_data *vxlan_pkt;
998 uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
1000 ether_length = (uint16_t)mbuf->pkt_len;
1001 ipv4_total_length = ether_length +
1002 (sizeof(struct vxlan_hdr) +
1003 sizeof(struct udp_hdr) +
1004 sizeof(struct ipv4_hdr));
1005 ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
1006 rte_htons(ipv4_total_length));
1007 udp_length = ether_length +
1008 (sizeof(struct vxlan_hdr) +
1009 sizeof(struct udp_hdr));
1011 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1012 vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
1013 vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
1014 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1016 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1017 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1020 static __rte_always_inline void
1021 pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf *mbuf,
1022 struct encap_vxlan_ipv4_vlan_data *vxlan_tbl,
1023 struct rte_table_action_encap_config *cfg)
1025 uint32_t ether_offset = cfg->vxlan.data_offset;
1026 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1027 struct encap_vxlan_ipv4_vlan_data *vxlan_pkt;
1028 uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
1030 ether_length = (uint16_t)mbuf->pkt_len;
1031 ipv4_total_length = ether_length +
1032 (sizeof(struct vxlan_hdr) +
1033 sizeof(struct udp_hdr) +
1034 sizeof(struct ipv4_hdr));
1035 ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
1036 rte_htons(ipv4_total_length));
1037 udp_length = ether_length +
1038 (sizeof(struct vxlan_hdr) +
1039 sizeof(struct udp_hdr));
1041 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1042 vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
1043 vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
1044 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1046 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1047 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1050 static __rte_always_inline void
1051 pkt_work_encap_vxlan_ipv6(struct rte_mbuf *mbuf,
1052 struct encap_vxlan_ipv6_data *vxlan_tbl,
1053 struct rte_table_action_encap_config *cfg)
1055 uint32_t ether_offset = cfg->vxlan.data_offset;
1056 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1057 struct encap_vxlan_ipv6_data *vxlan_pkt;
1058 uint16_t ether_length, ipv6_payload_length, udp_length;
1060 ether_length = (uint16_t)mbuf->pkt_len;
1061 ipv6_payload_length = ether_length +
1062 (sizeof(struct vxlan_hdr) +
1063 sizeof(struct udp_hdr));
1064 udp_length = ether_length +
1065 (sizeof(struct vxlan_hdr) +
1066 sizeof(struct udp_hdr));
1068 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1069 vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
1070 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1072 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1073 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1076 static __rte_always_inline void
1077 pkt_work_encap_vxlan_ipv6_vlan(struct rte_mbuf *mbuf,
1078 struct encap_vxlan_ipv6_vlan_data *vxlan_tbl,
1079 struct rte_table_action_encap_config *cfg)
1081 uint32_t ether_offset = cfg->vxlan.data_offset;
1082 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1083 struct encap_vxlan_ipv6_vlan_data *vxlan_pkt;
1084 uint16_t ether_length, ipv6_payload_length, udp_length;
1086 ether_length = (uint16_t)mbuf->pkt_len;
1087 ipv6_payload_length = ether_length +
1088 (sizeof(struct vxlan_hdr) +
1089 sizeof(struct udp_hdr));
1090 udp_length = ether_length +
1091 (sizeof(struct vxlan_hdr) +
1092 sizeof(struct udp_hdr));
1094 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1095 vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
1096 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1098 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1099 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1102 static __rte_always_inline void
1103 pkt_work_encap(struct rte_mbuf *mbuf,
1105 struct rte_table_action_encap_config *cfg,
1107 uint16_t total_length,
1110 switch (cfg->encap_mask) {
1111 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
1112 encap(ip, data, sizeof(struct encap_ether_data));
1113 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1114 sizeof(struct encap_ether_data));
1115 mbuf->pkt_len = mbuf->data_len = total_length +
1116 sizeof(struct encap_ether_data);
1119 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
1120 encap(ip, data, sizeof(struct encap_vlan_data));
1121 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1122 sizeof(struct encap_vlan_data));
1123 mbuf->pkt_len = mbuf->data_len = total_length +
1124 sizeof(struct encap_vlan_data);
1127 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
1128 encap(ip, data, sizeof(struct encap_qinq_data));
1129 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1130 sizeof(struct encap_qinq_data));
1131 mbuf->pkt_len = mbuf->data_len = total_length +
1132 sizeof(struct encap_qinq_data);
1135 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
1137 struct encap_mpls_data *mpls = data;
1138 size_t size = sizeof(struct ether_hdr) +
1139 mpls->mpls_count * 4;
1141 encap(ip, data, size);
1142 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + size);
1143 mbuf->pkt_len = mbuf->data_len = total_length + size;
1147 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
1149 struct encap_pppoe_data *pppoe =
1150 encap(ip, data, sizeof(struct encap_pppoe_data));
1151 pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
1152 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1153 sizeof(struct encap_pppoe_data));
1154 mbuf->pkt_len = mbuf->data_len = total_length +
1155 sizeof(struct encap_pppoe_data);
1159 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
1161 struct encap_qinq_pppoe_data *qinq_pppoe =
1162 encap(ip, data, sizeof(struct encap_qinq_pppoe_data));
1163 qinq_pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
1164 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1165 sizeof(struct encap_qinq_pppoe_data));
1166 mbuf->pkt_len = mbuf->data_len = total_length +
1167 sizeof(struct encap_qinq_pppoe_data);
1171 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
1173 if (cfg->vxlan.ip_version)
1174 if (cfg->vxlan.vlan)
1175 pkt_work_encap_vxlan_ipv4_vlan(mbuf, data, cfg);
1177 pkt_work_encap_vxlan_ipv4(mbuf, data, cfg);
1179 if (cfg->vxlan.vlan)
1180 pkt_work_encap_vxlan_ipv6_vlan(mbuf, data, cfg);
1182 pkt_work_encap_vxlan_ipv6(mbuf, data, cfg);
1191 * RTE_TABLE_ACTION_NAT
1194 nat_cfg_check(struct rte_table_action_nat_config *nat)
1196 if ((nat->proto != 0x06) &&
1197 (nat->proto != 0x11))
1203 struct nat_ipv4_data {
1206 } __attribute__((__packed__));
1208 struct nat_ipv6_data {
1211 } __attribute__((__packed__));
1214 nat_data_size(struct rte_table_action_nat_config *nat __rte_unused,
1215 struct rte_table_action_common_config *common)
1217 int ip_version = common->ip_version;
1219 return (ip_version) ?
1220 sizeof(struct nat_ipv4_data) :
1221 sizeof(struct nat_ipv6_data);
1225 nat_apply_check(struct rte_table_action_nat_params *p,
1226 struct rte_table_action_common_config *cfg)
1228 if ((p->ip_version && (cfg->ip_version == 0)) ||
1229 ((p->ip_version == 0) && cfg->ip_version))
1236 nat_apply(void *data,
1237 struct rte_table_action_nat_params *p,
1238 struct rte_table_action_common_config *cfg)
1242 /* Check input arguments */
1243 status = nat_apply_check(p, cfg);
1248 if (p->ip_version) {
1249 struct nat_ipv4_data *d = data;
1251 d->addr = rte_htonl(p->addr.ipv4);
1252 d->port = rte_htons(p->port);
1254 struct nat_ipv6_data *d = data;
1256 memcpy(d->addr, p->addr.ipv6, sizeof(d->addr));
1257 d->port = rte_htons(p->port);
1263 static __rte_always_inline uint16_t
1264 nat_ipv4_checksum_update(uint16_t cksum0,
1271 cksum1 = ~cksum1 & 0xFFFF;
1273 /* Subtract ip0 (one's complement logic) */
1274 cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF);
1275 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1276 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1278 /* Add ip1 (one's complement logic) */
1279 cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF);
1280 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1281 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1283 return (uint16_t)(~cksum1);
1286 static __rte_always_inline uint16_t
1287 nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0,
1296 cksum1 = ~cksum1 & 0xFFFF;
1298 /* Subtract ip0 and port 0 (one's complement logic) */
1299 cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF) + port0;
1300 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1301 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1303 /* Add ip1 and port1 (one's complement logic) */
1304 cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF) + port1;
1305 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1306 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1308 return (uint16_t)(~cksum1);
1311 static __rte_always_inline uint16_t
1312 nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0,
1321 cksum1 = ~cksum1 & 0xFFFF;
1323 /* Subtract ip0 and port 0 (one's complement logic) */
1324 cksum1 -= ip0[0] + ip0[1] + ip0[2] + ip0[3] +
1325 ip0[4] + ip0[5] + ip0[6] + ip0[7] + port0;
1326 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1327 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1329 /* Add ip1 and port1 (one's complement logic) */
1330 cksum1 += ip1[0] + ip1[1] + ip1[2] + ip1[3] +
1331 ip1[4] + ip1[5] + ip1[6] + ip1[7] + port1;
1332 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1333 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1335 return (uint16_t)(~cksum1);
1338 static __rte_always_inline void
1339 pkt_ipv4_work_nat(struct ipv4_hdr *ip,
1340 struct nat_ipv4_data *data,
1341 struct rte_table_action_nat_config *cfg)
1343 if (cfg->source_nat) {
1344 if (cfg->proto == 0x6) {
1345 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1346 uint16_t ip_cksum, tcp_cksum;
1348 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1352 tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
1358 ip->src_addr = data->addr;
1359 ip->hdr_checksum = ip_cksum;
1360 tcp->src_port = data->port;
1361 tcp->cksum = tcp_cksum;
1363 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1364 uint16_t ip_cksum, udp_cksum;
1366 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1370 udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1376 ip->src_addr = data->addr;
1377 ip->hdr_checksum = ip_cksum;
1378 udp->src_port = data->port;
1379 if (udp->dgram_cksum)
1380 udp->dgram_cksum = udp_cksum;
1383 if (cfg->proto == 0x6) {
1384 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1385 uint16_t ip_cksum, tcp_cksum;
1387 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1391 tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
1397 ip->dst_addr = data->addr;
1398 ip->hdr_checksum = ip_cksum;
1399 tcp->dst_port = data->port;
1400 tcp->cksum = tcp_cksum;
1402 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1403 uint16_t ip_cksum, udp_cksum;
1405 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1409 udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1415 ip->dst_addr = data->addr;
1416 ip->hdr_checksum = ip_cksum;
1417 udp->dst_port = data->port;
1418 if (udp->dgram_cksum)
1419 udp->dgram_cksum = udp_cksum;
1424 static __rte_always_inline void
1425 pkt_ipv6_work_nat(struct ipv6_hdr *ip,
1426 struct nat_ipv6_data *data,
1427 struct rte_table_action_nat_config *cfg)
1429 if (cfg->source_nat) {
1430 if (cfg->proto == 0x6) {
1431 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1434 tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1435 (uint16_t *)ip->src_addr,
1436 (uint16_t *)data->addr,
1440 rte_memcpy(ip->src_addr, data->addr, 16);
1441 tcp->src_port = data->port;
1442 tcp->cksum = tcp_cksum;
1444 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1447 udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1448 (uint16_t *)ip->src_addr,
1449 (uint16_t *)data->addr,
1453 rte_memcpy(ip->src_addr, data->addr, 16);
1454 udp->src_port = data->port;
1455 udp->dgram_cksum = udp_cksum;
1458 if (cfg->proto == 0x6) {
1459 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1462 tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1463 (uint16_t *)ip->dst_addr,
1464 (uint16_t *)data->addr,
1468 rte_memcpy(ip->dst_addr, data->addr, 16);
1469 tcp->dst_port = data->port;
1470 tcp->cksum = tcp_cksum;
1472 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1475 udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1476 (uint16_t *)ip->dst_addr,
1477 (uint16_t *)data->addr,
1481 rte_memcpy(ip->dst_addr, data->addr, 16);
1482 udp->dst_port = data->port;
1483 udp->dgram_cksum = udp_cksum;
1489 * RTE_TABLE_ACTION_TTL
1492 ttl_cfg_check(struct rte_table_action_ttl_config *ttl)
1502 } __attribute__((__packed__));
1504 #define TTL_INIT(data, decrement) \
1505 ((data)->n_packets = (decrement) ? 1 : 0)
1507 #define TTL_DEC_GET(data) \
1508 ((uint8_t)((data)->n_packets & 1))
1510 #define TTL_STATS_RESET(data) \
1511 ((data)->n_packets = ((data)->n_packets & 1))
1513 #define TTL_STATS_READ(data) \
1514 ((data)->n_packets >> 1)
1516 #define TTL_STATS_ADD(data, value) \
1517 ((data)->n_packets = \
1518 (((((data)->n_packets >> 1) + (value)) << 1) | \
1519 ((data)->n_packets & 1)))
1522 ttl_apply(void *data,
1523 struct rte_table_action_ttl_params *p)
1525 struct ttl_data *d = data;
1527 TTL_INIT(d, p->decrement);
1532 static __rte_always_inline uint64_t
1533 pkt_ipv4_work_ttl(struct ipv4_hdr *ip,
1534 struct ttl_data *data)
1537 uint16_t cksum = ip->hdr_checksum;
1538 uint8_t ttl = ip->time_to_live;
1539 uint8_t ttl_diff = TTL_DEC_GET(data);
1544 ip->hdr_checksum = cksum;
1545 ip->time_to_live = ttl;
1547 drop = (ttl == 0) ? 1 : 0;
1548 TTL_STATS_ADD(data, drop);
1553 static __rte_always_inline uint64_t
1554 pkt_ipv6_work_ttl(struct ipv6_hdr *ip,
1555 struct ttl_data *data)
1558 uint8_t ttl = ip->hop_limits;
1559 uint8_t ttl_diff = TTL_DEC_GET(data);
1563 ip->hop_limits = ttl;
1565 drop = (ttl == 0) ? 1 : 0;
1566 TTL_STATS_ADD(data, drop);
1572 * RTE_TABLE_ACTION_STATS
1575 stats_cfg_check(struct rte_table_action_stats_config *stats)
1577 if ((stats->n_packets_enabled == 0) && (stats->n_bytes_enabled == 0))
1586 } __attribute__((__packed__));
1589 stats_apply(struct stats_data *data,
1590 struct rte_table_action_stats_params *p)
1592 data->n_packets = p->n_packets;
1593 data->n_bytes = p->n_bytes;
1598 static __rte_always_inline void
1599 pkt_work_stats(struct stats_data *data,
1600 uint16_t total_length)
1603 data->n_bytes += total_length;
1607 * RTE_TABLE_ACTION_TIME
1611 } __attribute__((__packed__));
1614 time_apply(struct time_data *data,
1615 struct rte_table_action_time_params *p)
1617 data->time = p->time;
1621 static __rte_always_inline void
1622 pkt_work_time(struct time_data *data,
1630 * RTE_TABLE_ACTION_CRYPTO
1633 #define CRYPTO_OP_MASK_CIPHER 0x1
1634 #define CRYPTO_OP_MASK_AUTH 0x2
1635 #define CRYPTO_OP_MASK_AEAD 0x4
1637 struct crypto_op_sym_iv_aad {
1638 struct rte_crypto_op op;
1639 struct rte_crypto_sym_op sym_op;
1643 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1645 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1649 uint8_t iv[RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1650 uint8_t aad[RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX];
1656 struct sym_crypto_data {
1661 /** Length of cipher iv. */
1662 uint16_t cipher_iv_len;
1664 /** Offset from start of IP header to the cipher iv. */
1665 uint16_t cipher_iv_data_offset;
1667 /** Length of cipher iv to be updated in the mbuf. */
1668 uint16_t cipher_iv_update_len;
1670 /** Offset from start of IP header to the auth iv. */
1671 uint16_t auth_iv_data_offset;
1673 /** Length of auth iv in the mbuf. */
1674 uint16_t auth_iv_len;
1676 /** Length of auth iv to be updated in the mbuf. */
1677 uint16_t auth_iv_update_len;
1682 /** Length of iv. */
1685 /** Offset from start of IP header to the aead iv. */
1686 uint16_t iv_data_offset;
1688 /** Length of iv to be updated in the mbuf. */
1689 uint16_t iv_update_len;
1691 /** Length of aad */
1694 /** Offset from start of IP header to the aad. */
1695 uint16_t aad_data_offset;
1697 /** Length of aad to updated in the mbuf. */
1698 uint16_t aad_update_len;
1703 /** Offset from start of IP header to the data. */
1704 uint16_t data_offset;
1706 /** Digest length. */
1707 uint16_t digest_len;
1710 uint16_t block_size;
1712 /** Mask of crypto operation */
1715 /** Session pointer. */
1716 struct rte_cryptodev_sym_session *session;
1718 /** Direction of crypto, encrypt or decrypt */
1721 /** Private data size to store cipher iv / aad. */
1722 uint8_t iv_aad_data[32];
1724 } __attribute__((__packed__));
1727 sym_crypto_cfg_check(struct rte_table_action_sym_crypto_config *cfg)
1729 if (!rte_cryptodev_pmd_is_valid_dev(cfg->cryptodev_id))
1731 if (cfg->mp_create == NULL || cfg->mp_init == NULL)
1738 get_block_size(const struct rte_crypto_sym_xform *xform, uint8_t cdev_id)
1740 struct rte_cryptodev_info dev_info;
1741 const struct rte_cryptodev_capabilities *cap;
1744 rte_cryptodev_info_get(cdev_id, &dev_info);
1746 for (i = 0; dev_info.capabilities[i].op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
1748 cap = &dev_info.capabilities[i];
1750 if (cap->sym.xform_type != xform->type)
1753 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
1754 (cap->sym.cipher.algo == xform->cipher.algo))
1755 return cap->sym.cipher.block_size;
1757 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) &&
1758 (cap->sym.aead.algo == xform->aead.algo))
1759 return cap->sym.aead.block_size;
1761 if (xform->type == RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED)
1769 sym_crypto_apply(struct sym_crypto_data *data,
1770 struct rte_table_action_sym_crypto_config *cfg,
1771 struct rte_table_action_sym_crypto_params *p)
1773 const struct rte_crypto_cipher_xform *cipher_xform = NULL;
1774 const struct rte_crypto_auth_xform *auth_xform = NULL;
1775 const struct rte_crypto_aead_xform *aead_xform = NULL;
1776 struct rte_crypto_sym_xform *xform = p->xform;
1777 struct rte_cryptodev_sym_session *session;
1780 memset(data, 0, sizeof(*data));
1783 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1784 cipher_xform = &xform->cipher;
1786 if (cipher_xform->iv.length >
1787 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
1789 if (cipher_xform->iv.offset !=
1790 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
1793 ret = get_block_size(xform, cfg->cryptodev_id);
1796 data->block_size = (uint16_t)ret;
1797 data->op_mask |= CRYPTO_OP_MASK_CIPHER;
1799 data->cipher_auth.cipher_iv_len =
1800 cipher_xform->iv.length;
1801 data->cipher_auth.cipher_iv_data_offset = (uint16_t)
1802 p->cipher_auth.cipher_iv_update.offset;
1803 data->cipher_auth.cipher_iv_update_len = (uint16_t)
1804 p->cipher_auth.cipher_iv_update.length;
1806 rte_memcpy(data->iv_aad_data,
1807 p->cipher_auth.cipher_iv.val,
1808 p->cipher_auth.cipher_iv.length);
1810 data->direction = cipher_xform->op;
1812 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1813 auth_xform = &xform->auth;
1814 if (auth_xform->iv.length >
1815 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
1817 data->op_mask |= CRYPTO_OP_MASK_AUTH;
1819 data->cipher_auth.auth_iv_len = auth_xform->iv.length;
1820 data->cipher_auth.auth_iv_data_offset = (uint16_t)
1821 p->cipher_auth.auth_iv_update.offset;
1822 data->cipher_auth.auth_iv_update_len = (uint16_t)
1823 p->cipher_auth.auth_iv_update.length;
1824 data->digest_len = auth_xform->digest_length;
1826 data->direction = (auth_xform->op ==
1827 RTE_CRYPTO_AUTH_OP_GENERATE) ?
1828 RTE_CRYPTO_CIPHER_OP_ENCRYPT :
1829 RTE_CRYPTO_CIPHER_OP_DECRYPT;
1831 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1832 aead_xform = &xform->aead;
1834 if ((aead_xform->iv.length >
1835 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX) || (
1836 aead_xform->aad_length >
1837 RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX))
1839 if (aead_xform->iv.offset !=
1840 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
1843 ret = get_block_size(xform, cfg->cryptodev_id);
1846 data->block_size = (uint16_t)ret;
1847 data->op_mask |= CRYPTO_OP_MASK_AEAD;
1849 data->digest_len = aead_xform->digest_length;
1850 data->aead.iv_len = aead_xform->iv.length;
1851 data->aead.aad_len = aead_xform->aad_length;
1853 data->aead.iv_data_offset = (uint16_t)
1854 p->aead.iv_update.offset;
1855 data->aead.iv_update_len = (uint16_t)
1856 p->aead.iv_update.length;
1857 data->aead.aad_data_offset = (uint16_t)
1858 p->aead.aad_update.offset;
1859 data->aead.aad_update_len = (uint16_t)
1860 p->aead.aad_update.length;
1862 rte_memcpy(data->iv_aad_data,
1866 rte_memcpy(data->iv_aad_data + p->aead.iv.length,
1868 p->aead.aad.length);
1870 data->direction = (aead_xform->op ==
1871 RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1872 RTE_CRYPTO_CIPHER_OP_ENCRYPT :
1873 RTE_CRYPTO_CIPHER_OP_DECRYPT;
1877 xform = xform->next;
1880 if (auth_xform && auth_xform->iv.length) {
1882 if (auth_xform->iv.offset !=
1883 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET +
1884 cipher_xform->iv.length)
1887 rte_memcpy(data->iv_aad_data + cipher_xform->iv.length,
1888 p->cipher_auth.auth_iv.val,
1889 p->cipher_auth.auth_iv.length);
1891 rte_memcpy(data->iv_aad_data,
1892 p->cipher_auth.auth_iv.val,
1893 p->cipher_auth.auth_iv.length);
1897 session = rte_cryptodev_sym_session_create(cfg->mp_create);
1901 ret = rte_cryptodev_sym_session_init(cfg->cryptodev_id, session,
1902 p->xform, cfg->mp_init);
1904 rte_cryptodev_sym_session_free(session);
1908 data->data_offset = (uint16_t)p->data_offset;
1909 data->session = session;
1914 static __rte_always_inline uint64_t
1915 pkt_work_sym_crypto(struct rte_mbuf *mbuf, struct sym_crypto_data *data,
1916 struct rte_table_action_sym_crypto_config *cfg,
1919 struct crypto_op_sym_iv_aad *crypto_op = (struct crypto_op_sym_iv_aad *)
1920 RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->op_offset);
1921 struct rte_crypto_op *op = &crypto_op->op;
1922 struct rte_crypto_sym_op *sym = op->sym;
1923 uint32_t pkt_offset = sizeof(*mbuf) + mbuf->data_off;
1924 uint32_t payload_len = pkt_offset + mbuf->data_len - data->data_offset;
1926 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1927 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1928 op->phys_addr = mbuf->buf_iova + cfg->op_offset - sizeof(*mbuf);
1929 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1932 sym->session = data->session;
1934 /** pad the packet */
1935 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1936 uint32_t append_len = RTE_ALIGN_CEIL(payload_len,
1937 data->block_size) - payload_len;
1939 if (unlikely(rte_pktmbuf_append(mbuf, append_len +
1940 data->digest_len) == NULL))
1943 payload_len += append_len;
1945 payload_len -= data->digest_len;
1947 if (data->op_mask & CRYPTO_OP_MASK_CIPHER) {
1948 /** prepare cipher op */
1949 uint8_t *iv = crypto_op->iv_aad.cipher_auth.cipher_iv;
1951 sym->cipher.data.length = payload_len;
1952 sym->cipher.data.offset = data->data_offset - pkt_offset;
1954 if (data->cipher_auth.cipher_iv_update_len) {
1955 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1956 data->cipher_auth.cipher_iv_data_offset
1959 /** For encryption, update the pkt iv field, otherwise
1960 * update the iv_aad_field
1962 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1963 rte_memcpy(pkt_iv, data->iv_aad_data,
1964 data->cipher_auth.cipher_iv_update_len);
1966 rte_memcpy(data->iv_aad_data, pkt_iv,
1967 data->cipher_auth.cipher_iv_update_len);
1971 rte_memcpy(iv, data->iv_aad_data,
1972 data->cipher_auth.cipher_iv_len);
1975 if (data->op_mask & CRYPTO_OP_MASK_AUTH) {
1976 /** authentication always start from IP header. */
1977 sym->auth.data.offset = ip_offset - pkt_offset;
1978 sym->auth.data.length = mbuf->data_len - sym->auth.data.offset -
1980 sym->auth.digest.data = rte_pktmbuf_mtod_offset(mbuf,
1981 uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
1983 sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
1984 rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
1986 if (data->cipher_auth.auth_iv_update_len) {
1987 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1988 data->cipher_auth.auth_iv_data_offset
1990 uint8_t *data_iv = data->iv_aad_data +
1991 data->cipher_auth.cipher_iv_len;
1993 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1994 rte_memcpy(pkt_iv, data_iv,
1995 data->cipher_auth.auth_iv_update_len);
1997 rte_memcpy(data_iv, pkt_iv,
1998 data->cipher_auth.auth_iv_update_len);
2001 if (data->cipher_auth.auth_iv_len) {
2002 /** prepare cipher op */
2003 uint8_t *iv = crypto_op->iv_aad.cipher_auth.auth_iv;
2005 rte_memcpy(iv, data->iv_aad_data +
2006 data->cipher_auth.cipher_iv_len,
2007 data->cipher_auth.auth_iv_len);
2011 if (data->op_mask & CRYPTO_OP_MASK_AEAD) {
2012 uint8_t *iv = crypto_op->iv_aad.aead_iv_aad.iv;
2013 uint8_t *aad = crypto_op->iv_aad.aead_iv_aad.aad;
2015 sym->aead.aad.data = aad;
2016 sym->aead.aad.phys_addr = rte_pktmbuf_iova_offset(mbuf,
2017 aad - rte_pktmbuf_mtod(mbuf, uint8_t *));
2018 sym->aead.digest.data = rte_pktmbuf_mtod_offset(mbuf,
2019 uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
2021 sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
2022 rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
2023 sym->aead.data.offset = data->data_offset - pkt_offset;
2024 sym->aead.data.length = payload_len;
2026 if (data->aead.iv_update_len) {
2027 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
2028 data->aead.iv_data_offset + ip_offset);
2029 uint8_t *data_iv = data->iv_aad_data;
2031 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2032 rte_memcpy(pkt_iv, data_iv,
2033 data->aead.iv_update_len);
2035 rte_memcpy(data_iv, pkt_iv,
2036 data->aead.iv_update_len);
2039 rte_memcpy(iv, data->iv_aad_data, data->aead.iv_len);
2041 if (data->aead.aad_update_len) {
2042 uint8_t *pkt_aad = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
2043 data->aead.aad_data_offset + ip_offset);
2044 uint8_t *data_aad = data->iv_aad_data +
2047 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2048 rte_memcpy(pkt_aad, data_aad,
2049 data->aead.iv_update_len);
2051 rte_memcpy(data_aad, pkt_aad,
2052 data->aead.iv_update_len);
2055 rte_memcpy(aad, data->iv_aad_data + data->aead.iv_len,
2056 data->aead.aad_len);
2063 * RTE_TABLE_ACTION_TAG
2067 } __attribute__((__packed__));
2070 tag_apply(struct tag_data *data,
2071 struct rte_table_action_tag_params *p)
2077 static __rte_always_inline void
2078 pkt_work_tag(struct rte_mbuf *mbuf,
2079 struct tag_data *data)
2081 mbuf->hash.fdir.hi = data->tag;
2082 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2085 static __rte_always_inline void
2086 pkt4_work_tag(struct rte_mbuf *mbuf0,
2087 struct rte_mbuf *mbuf1,
2088 struct rte_mbuf *mbuf2,
2089 struct rte_mbuf *mbuf3,
2090 struct tag_data *data0,
2091 struct tag_data *data1,
2092 struct tag_data *data2,
2093 struct tag_data *data3)
2095 mbuf0->hash.fdir.hi = data0->tag;
2096 mbuf1->hash.fdir.hi = data1->tag;
2097 mbuf2->hash.fdir.hi = data2->tag;
2098 mbuf3->hash.fdir.hi = data3->tag;
2100 mbuf0->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2101 mbuf1->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2102 mbuf2->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2103 mbuf3->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2107 * RTE_TABLE_ACTION_DECAP
2111 } __attribute__((__packed__));
2114 decap_apply(struct decap_data *data,
2115 struct rte_table_action_decap_params *p)
2121 static __rte_always_inline void
2122 pkt_work_decap(struct rte_mbuf *mbuf,
2123 struct decap_data *data)
2125 uint16_t data_off = mbuf->data_off;
2126 uint16_t data_len = mbuf->data_len;
2127 uint32_t pkt_len = mbuf->pkt_len;
2128 uint16_t n = data->n;
2130 mbuf->data_off = data_off + n;
2131 mbuf->data_len = data_len - n;
2132 mbuf->pkt_len = pkt_len - n;
2135 static __rte_always_inline void
2136 pkt4_work_decap(struct rte_mbuf *mbuf0,
2137 struct rte_mbuf *mbuf1,
2138 struct rte_mbuf *mbuf2,
2139 struct rte_mbuf *mbuf3,
2140 struct decap_data *data0,
2141 struct decap_data *data1,
2142 struct decap_data *data2,
2143 struct decap_data *data3)
2145 uint16_t data_off0 = mbuf0->data_off;
2146 uint16_t data_len0 = mbuf0->data_len;
2147 uint32_t pkt_len0 = mbuf0->pkt_len;
2149 uint16_t data_off1 = mbuf1->data_off;
2150 uint16_t data_len1 = mbuf1->data_len;
2151 uint32_t pkt_len1 = mbuf1->pkt_len;
2153 uint16_t data_off2 = mbuf2->data_off;
2154 uint16_t data_len2 = mbuf2->data_len;
2155 uint32_t pkt_len2 = mbuf2->pkt_len;
2157 uint16_t data_off3 = mbuf3->data_off;
2158 uint16_t data_len3 = mbuf3->data_len;
2159 uint32_t pkt_len3 = mbuf3->pkt_len;
2161 uint16_t n0 = data0->n;
2162 uint16_t n1 = data1->n;
2163 uint16_t n2 = data2->n;
2164 uint16_t n3 = data3->n;
2166 mbuf0->data_off = data_off0 + n0;
2167 mbuf0->data_len = data_len0 - n0;
2168 mbuf0->pkt_len = pkt_len0 - n0;
2170 mbuf1->data_off = data_off1 + n1;
2171 mbuf1->data_len = data_len1 - n1;
2172 mbuf1->pkt_len = pkt_len1 - n1;
2174 mbuf2->data_off = data_off2 + n2;
2175 mbuf2->data_len = data_len2 - n2;
2176 mbuf2->pkt_len = pkt_len2 - n2;
2178 mbuf3->data_off = data_off3 + n3;
2179 mbuf3->data_len = data_len3 - n3;
2180 mbuf3->pkt_len = pkt_len3 - n3;
2187 action_valid(enum rte_table_action_type action)
2190 case RTE_TABLE_ACTION_FWD:
2191 case RTE_TABLE_ACTION_LB:
2192 case RTE_TABLE_ACTION_MTR:
2193 case RTE_TABLE_ACTION_TM:
2194 case RTE_TABLE_ACTION_ENCAP:
2195 case RTE_TABLE_ACTION_NAT:
2196 case RTE_TABLE_ACTION_TTL:
2197 case RTE_TABLE_ACTION_STATS:
2198 case RTE_TABLE_ACTION_TIME:
2199 case RTE_TABLE_ACTION_SYM_CRYPTO:
2200 case RTE_TABLE_ACTION_TAG:
2201 case RTE_TABLE_ACTION_DECAP:
2209 #define RTE_TABLE_ACTION_MAX 64
2212 uint64_t action_mask;
2213 struct rte_table_action_common_config common;
2214 struct rte_table_action_lb_config lb;
2215 struct rte_table_action_mtr_config mtr;
2216 struct rte_table_action_tm_config tm;
2217 struct rte_table_action_encap_config encap;
2218 struct rte_table_action_nat_config nat;
2219 struct rte_table_action_ttl_config ttl;
2220 struct rte_table_action_stats_config stats;
2221 struct rte_table_action_sym_crypto_config sym_crypto;
2225 action_cfg_size(enum rte_table_action_type action)
2228 case RTE_TABLE_ACTION_LB:
2229 return sizeof(struct rte_table_action_lb_config);
2230 case RTE_TABLE_ACTION_MTR:
2231 return sizeof(struct rte_table_action_mtr_config);
2232 case RTE_TABLE_ACTION_TM:
2233 return sizeof(struct rte_table_action_tm_config);
2234 case RTE_TABLE_ACTION_ENCAP:
2235 return sizeof(struct rte_table_action_encap_config);
2236 case RTE_TABLE_ACTION_NAT:
2237 return sizeof(struct rte_table_action_nat_config);
2238 case RTE_TABLE_ACTION_TTL:
2239 return sizeof(struct rte_table_action_ttl_config);
2240 case RTE_TABLE_ACTION_STATS:
2241 return sizeof(struct rte_table_action_stats_config);
2242 case RTE_TABLE_ACTION_SYM_CRYPTO:
2243 return sizeof(struct rte_table_action_sym_crypto_config);
2250 action_cfg_get(struct ap_config *ap_config,
2251 enum rte_table_action_type type)
2254 case RTE_TABLE_ACTION_LB:
2255 return &ap_config->lb;
2257 case RTE_TABLE_ACTION_MTR:
2258 return &ap_config->mtr;
2260 case RTE_TABLE_ACTION_TM:
2261 return &ap_config->tm;
2263 case RTE_TABLE_ACTION_ENCAP:
2264 return &ap_config->encap;
2266 case RTE_TABLE_ACTION_NAT:
2267 return &ap_config->nat;
2269 case RTE_TABLE_ACTION_TTL:
2270 return &ap_config->ttl;
2272 case RTE_TABLE_ACTION_STATS:
2273 return &ap_config->stats;
2275 case RTE_TABLE_ACTION_SYM_CRYPTO:
2276 return &ap_config->sym_crypto;
2283 action_cfg_set(struct ap_config *ap_config,
2284 enum rte_table_action_type type,
2287 void *dst = action_cfg_get(ap_config, type);
2290 memcpy(dst, action_cfg, action_cfg_size(type));
2292 ap_config->action_mask |= 1LLU << type;
2296 size_t offset[RTE_TABLE_ACTION_MAX];
2301 action_data_size(enum rte_table_action_type action,
2302 struct ap_config *ap_config)
2305 case RTE_TABLE_ACTION_FWD:
2306 return sizeof(struct fwd_data);
2308 case RTE_TABLE_ACTION_LB:
2309 return sizeof(struct lb_data);
2311 case RTE_TABLE_ACTION_MTR:
2312 return mtr_data_size(&ap_config->mtr);
2314 case RTE_TABLE_ACTION_TM:
2315 return sizeof(struct tm_data);
2317 case RTE_TABLE_ACTION_ENCAP:
2318 return encap_data_size(&ap_config->encap);
2320 case RTE_TABLE_ACTION_NAT:
2321 return nat_data_size(&ap_config->nat,
2322 &ap_config->common);
2324 case RTE_TABLE_ACTION_TTL:
2325 return sizeof(struct ttl_data);
2327 case RTE_TABLE_ACTION_STATS:
2328 return sizeof(struct stats_data);
2330 case RTE_TABLE_ACTION_TIME:
2331 return sizeof(struct time_data);
2333 case RTE_TABLE_ACTION_SYM_CRYPTO:
2334 return (sizeof(struct sym_crypto_data));
2336 case RTE_TABLE_ACTION_TAG:
2337 return sizeof(struct tag_data);
2339 case RTE_TABLE_ACTION_DECAP:
2340 return sizeof(struct decap_data);
2349 action_data_offset_set(struct ap_data *ap_data,
2350 struct ap_config *ap_config)
2352 uint64_t action_mask = ap_config->action_mask;
2356 memset(ap_data->offset, 0, sizeof(ap_data->offset));
2359 for (action = 0; action < RTE_TABLE_ACTION_MAX; action++)
2360 if (action_mask & (1LLU << action)) {
2361 ap_data->offset[action] = offset;
2362 offset += action_data_size((enum rte_table_action_type)action,
2366 ap_data->total_size = offset;
2369 struct rte_table_action_profile {
2370 struct ap_config cfg;
2371 struct ap_data data;
2375 struct rte_table_action_profile *
2376 rte_table_action_profile_create(struct rte_table_action_common_config *common)
2378 struct rte_table_action_profile *ap;
2380 /* Check input arguments */
2384 /* Memory allocation */
2385 ap = calloc(1, sizeof(struct rte_table_action_profile));
2389 /* Initialization */
2390 memcpy(&ap->cfg.common, common, sizeof(*common));
2397 rte_table_action_profile_action_register(struct rte_table_action_profile *profile,
2398 enum rte_table_action_type type,
2399 void *action_config)
2403 /* Check input arguments */
2404 if ((profile == NULL) ||
2406 (action_valid(type) == 0) ||
2407 (profile->cfg.action_mask & (1LLU << type)) ||
2408 ((action_cfg_size(type) == 0) && action_config) ||
2409 (action_cfg_size(type) && (action_config == NULL)))
2413 case RTE_TABLE_ACTION_LB:
2414 status = lb_cfg_check(action_config);
2417 case RTE_TABLE_ACTION_MTR:
2418 status = mtr_cfg_check(action_config);
2421 case RTE_TABLE_ACTION_TM:
2422 status = tm_cfg_check(action_config);
2425 case RTE_TABLE_ACTION_ENCAP:
2426 status = encap_cfg_check(action_config);
2429 case RTE_TABLE_ACTION_NAT:
2430 status = nat_cfg_check(action_config);
2433 case RTE_TABLE_ACTION_TTL:
2434 status = ttl_cfg_check(action_config);
2437 case RTE_TABLE_ACTION_STATS:
2438 status = stats_cfg_check(action_config);
2441 case RTE_TABLE_ACTION_SYM_CRYPTO:
2442 status = sym_crypto_cfg_check(action_config);
2454 action_cfg_set(&profile->cfg, type, action_config);
2460 rte_table_action_profile_freeze(struct rte_table_action_profile *profile)
2462 if (profile->frozen)
2465 profile->cfg.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD;
2466 action_data_offset_set(&profile->data, &profile->cfg);
2467 profile->frozen = 1;
2473 rte_table_action_profile_free(struct rte_table_action_profile *profile)
2475 if (profile == NULL)
2485 #define METER_PROFILES_MAX 32
2487 struct rte_table_action {
2488 struct ap_config cfg;
2489 struct ap_data data;
2490 struct dscp_table_data dscp_table;
2491 struct meter_profile_data mp[METER_PROFILES_MAX];
2494 struct rte_table_action *
2495 rte_table_action_create(struct rte_table_action_profile *profile,
2498 struct rte_table_action *action;
2500 /* Check input arguments */
2501 if ((profile == NULL) ||
2502 (profile->frozen == 0))
2505 /* Memory allocation */
2506 action = rte_zmalloc_socket(NULL,
2507 sizeof(struct rte_table_action),
2508 RTE_CACHE_LINE_SIZE,
2513 /* Initialization */
2514 memcpy(&action->cfg, &profile->cfg, sizeof(profile->cfg));
2515 memcpy(&action->data, &profile->data, sizeof(profile->data));
2520 static __rte_always_inline void *
2521 action_data_get(void *data,
2522 struct rte_table_action *action,
2523 enum rte_table_action_type type)
2525 size_t offset = action->data.offset[type];
2526 uint8_t *data_bytes = data;
2528 return &data_bytes[offset];
2532 rte_table_action_apply(struct rte_table_action *action,
2534 enum rte_table_action_type type,
2535 void *action_params)
2539 /* Check input arguments */
2540 if ((action == NULL) ||
2542 (action_valid(type) == 0) ||
2543 ((action->cfg.action_mask & (1LLU << type)) == 0) ||
2544 (action_params == NULL))
2548 action_data = action_data_get(data, action, type);
2551 case RTE_TABLE_ACTION_FWD:
2552 return fwd_apply(action_data,
2555 case RTE_TABLE_ACTION_LB:
2556 return lb_apply(action_data,
2559 case RTE_TABLE_ACTION_MTR:
2560 return mtr_apply(action_data,
2564 RTE_DIM(action->mp));
2566 case RTE_TABLE_ACTION_TM:
2567 return tm_apply(action_data,
2571 case RTE_TABLE_ACTION_ENCAP:
2572 return encap_apply(action_data,
2575 &action->cfg.common);
2577 case RTE_TABLE_ACTION_NAT:
2578 return nat_apply(action_data,
2580 &action->cfg.common);
2582 case RTE_TABLE_ACTION_TTL:
2583 return ttl_apply(action_data,
2586 case RTE_TABLE_ACTION_STATS:
2587 return stats_apply(action_data,
2590 case RTE_TABLE_ACTION_TIME:
2591 return time_apply(action_data,
2594 case RTE_TABLE_ACTION_SYM_CRYPTO:
2595 return sym_crypto_apply(action_data,
2596 &action->cfg.sym_crypto,
2599 case RTE_TABLE_ACTION_TAG:
2600 return tag_apply(action_data,
2603 case RTE_TABLE_ACTION_DECAP:
2604 return decap_apply(action_data,
2613 rte_table_action_dscp_table_update(struct rte_table_action *action,
2615 struct rte_table_action_dscp_table *table)
2619 /* Check input arguments */
2620 if ((action == NULL) ||
2621 ((action->cfg.action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
2622 (1LLU << RTE_TABLE_ACTION_TM))) == 0) ||
2627 for (i = 0; i < RTE_DIM(table->entry); i++) {
2628 struct dscp_table_entry_data *data =
2629 &action->dscp_table.entry[i];
2630 struct rte_table_action_dscp_table_entry *entry =
2633 if ((dscp_mask & (1LLU << i)) == 0)
2636 data->color = entry->color;
2637 data->tc = entry->tc_id;
2638 data->tc_queue = entry->tc_queue_id;
2645 rte_table_action_meter_profile_add(struct rte_table_action *action,
2646 uint32_t meter_profile_id,
2647 struct rte_table_action_meter_profile *profile)
2649 struct meter_profile_data *mp_data;
2652 /* Check input arguments */
2653 if ((action == NULL) ||
2654 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
2658 if (profile->alg != RTE_TABLE_ACTION_METER_TRTCM)
2661 mp_data = meter_profile_data_find(action->mp,
2662 RTE_DIM(action->mp),
2667 mp_data = meter_profile_data_find_unused(action->mp,
2668 RTE_DIM(action->mp));
2672 /* Install new profile */
2673 status = rte_meter_trtcm_profile_config(&mp_data->profile,
2678 mp_data->profile_id = meter_profile_id;
2685 rte_table_action_meter_profile_delete(struct rte_table_action *action,
2686 uint32_t meter_profile_id)
2688 struct meter_profile_data *mp_data;
2690 /* Check input arguments */
2691 if ((action == NULL) ||
2692 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0))
2695 mp_data = meter_profile_data_find(action->mp,
2696 RTE_DIM(action->mp),
2701 /* Uninstall profile */
2708 rte_table_action_meter_read(struct rte_table_action *action,
2711 struct rte_table_action_mtr_counters *stats,
2714 struct mtr_trtcm_data *mtr_data;
2717 /* Check input arguments */
2718 if ((action == NULL) ||
2719 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
2721 (tc_mask > RTE_LEN2MASK(action->cfg.mtr.n_tc, uint32_t)))
2724 mtr_data = action_data_get(data, action, RTE_TABLE_ACTION_MTR);
2728 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
2729 struct rte_table_action_mtr_counters_tc *dst =
2731 struct mtr_trtcm_data *src = &mtr_data[i];
2733 if ((tc_mask & (1 << i)) == 0)
2736 dst->n_packets[e_RTE_METER_GREEN] =
2737 mtr_trtcm_data_stats_get(src, e_RTE_METER_GREEN);
2739 dst->n_packets[e_RTE_METER_YELLOW] =
2740 mtr_trtcm_data_stats_get(src, e_RTE_METER_YELLOW);
2742 dst->n_packets[e_RTE_METER_RED] =
2743 mtr_trtcm_data_stats_get(src, e_RTE_METER_RED);
2745 dst->n_packets_valid = 1;
2746 dst->n_bytes_valid = 0;
2749 stats->tc_mask = tc_mask;
2754 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
2755 struct mtr_trtcm_data *src = &mtr_data[i];
2757 if ((tc_mask & (1 << i)) == 0)
2760 mtr_trtcm_data_stats_reset(src, e_RTE_METER_GREEN);
2761 mtr_trtcm_data_stats_reset(src, e_RTE_METER_YELLOW);
2762 mtr_trtcm_data_stats_reset(src, e_RTE_METER_RED);
2770 rte_table_action_ttl_read(struct rte_table_action *action,
2772 struct rte_table_action_ttl_counters *stats,
2775 struct ttl_data *ttl_data;
2777 /* Check input arguments */
2778 if ((action == NULL) ||
2779 ((action->cfg.action_mask &
2780 (1LLU << RTE_TABLE_ACTION_TTL)) == 0) ||
2784 ttl_data = action_data_get(data, action, RTE_TABLE_ACTION_TTL);
2788 stats->n_packets = TTL_STATS_READ(ttl_data);
2792 TTL_STATS_RESET(ttl_data);
2798 rte_table_action_stats_read(struct rte_table_action *action,
2800 struct rte_table_action_stats_counters *stats,
2803 struct stats_data *stats_data;
2805 /* Check input arguments */
2806 if ((action == NULL) ||
2807 ((action->cfg.action_mask &
2808 (1LLU << RTE_TABLE_ACTION_STATS)) == 0) ||
2812 stats_data = action_data_get(data, action,
2813 RTE_TABLE_ACTION_STATS);
2817 stats->n_packets = stats_data->n_packets;
2818 stats->n_bytes = stats_data->n_bytes;
2819 stats->n_packets_valid = 1;
2820 stats->n_bytes_valid = 1;
2825 stats_data->n_packets = 0;
2826 stats_data->n_bytes = 0;
2833 rte_table_action_time_read(struct rte_table_action *action,
2835 uint64_t *timestamp)
2837 struct time_data *time_data;
2839 /* Check input arguments */
2840 if ((action == NULL) ||
2841 ((action->cfg.action_mask &
2842 (1LLU << RTE_TABLE_ACTION_TIME)) == 0) ||
2844 (timestamp == NULL))
2847 time_data = action_data_get(data, action, RTE_TABLE_ACTION_TIME);
2850 *timestamp = time_data->time;
2855 struct rte_cryptodev_sym_session *
2856 rte_table_action_crypto_sym_session_get(struct rte_table_action *action,
2859 struct sym_crypto_data *sym_crypto_data;
2861 /* Check input arguments */
2862 if ((action == NULL) ||
2863 ((action->cfg.action_mask &
2864 (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) == 0) ||
2868 sym_crypto_data = action_data_get(data, action,
2869 RTE_TABLE_ACTION_SYM_CRYPTO);
2871 return sym_crypto_data->session;
2874 static __rte_always_inline uint64_t
2875 pkt_work(struct rte_mbuf *mbuf,
2876 struct rte_pipeline_table_entry *table_entry,
2878 struct rte_table_action *action,
2879 struct ap_config *cfg)
2881 uint64_t drop_mask = 0;
2883 uint32_t ip_offset = action->cfg.common.ip_offset;
2884 void *ip = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ip_offset);
2887 uint16_t total_length;
2889 if (cfg->common.ip_version) {
2890 struct ipv4_hdr *hdr = ip;
2892 dscp = hdr->type_of_service >> 2;
2893 total_length = rte_ntohs(hdr->total_length);
2895 struct ipv6_hdr *hdr = ip;
2897 dscp = (rte_ntohl(hdr->vtc_flow) & 0x0F600000) >> 18;
2899 rte_ntohs(hdr->payload_len) + sizeof(struct ipv6_hdr);
2902 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2904 action_data_get(table_entry, action, RTE_TABLE_ACTION_LB);
2910 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2912 action_data_get(table_entry, action, RTE_TABLE_ACTION_MTR);
2914 drop_mask |= pkt_work_mtr(mbuf,
2916 &action->dscp_table,
2923 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2925 action_data_get(table_entry, action, RTE_TABLE_ACTION_TM);
2929 &action->dscp_table,
2933 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
2934 void *data = action_data_get(table_entry,
2936 RTE_TABLE_ACTION_DECAP);
2938 pkt_work_decap(mbuf, data);
2941 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2943 action_data_get(table_entry, action, RTE_TABLE_ACTION_ENCAP);
2945 pkt_work_encap(mbuf,
2953 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2955 action_data_get(table_entry, action, RTE_TABLE_ACTION_NAT);
2957 if (cfg->common.ip_version)
2958 pkt_ipv4_work_nat(ip, data, &cfg->nat);
2960 pkt_ipv6_work_nat(ip, data, &cfg->nat);
2963 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2965 action_data_get(table_entry, action, RTE_TABLE_ACTION_TTL);
2967 if (cfg->common.ip_version)
2968 drop_mask |= pkt_ipv4_work_ttl(ip, data);
2970 drop_mask |= pkt_ipv6_work_ttl(ip, data);
2973 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2975 action_data_get(table_entry, action, RTE_TABLE_ACTION_STATS);
2977 pkt_work_stats(data, total_length);
2980 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2982 action_data_get(table_entry, action, RTE_TABLE_ACTION_TIME);
2984 pkt_work_time(data, time);
2987 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
2988 void *data = action_data_get(table_entry, action,
2989 RTE_TABLE_ACTION_SYM_CRYPTO);
2991 drop_mask |= pkt_work_sym_crypto(mbuf, data, &cfg->sym_crypto,
2995 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
2996 void *data = action_data_get(table_entry,
2998 RTE_TABLE_ACTION_TAG);
3000 pkt_work_tag(mbuf, data);
3006 static __rte_always_inline uint64_t
3007 pkt4_work(struct rte_mbuf **mbufs,
3008 struct rte_pipeline_table_entry **table_entries,
3010 struct rte_table_action *action,
3011 struct ap_config *cfg)
3013 uint64_t drop_mask0 = 0;
3014 uint64_t drop_mask1 = 0;
3015 uint64_t drop_mask2 = 0;
3016 uint64_t drop_mask3 = 0;
3018 struct rte_mbuf *mbuf0 = mbufs[0];
3019 struct rte_mbuf *mbuf1 = mbufs[1];
3020 struct rte_mbuf *mbuf2 = mbufs[2];
3021 struct rte_mbuf *mbuf3 = mbufs[3];
3023 struct rte_pipeline_table_entry *table_entry0 = table_entries[0];
3024 struct rte_pipeline_table_entry *table_entry1 = table_entries[1];
3025 struct rte_pipeline_table_entry *table_entry2 = table_entries[2];
3026 struct rte_pipeline_table_entry *table_entry3 = table_entries[3];
3028 uint32_t ip_offset = action->cfg.common.ip_offset;
3029 void *ip0 = RTE_MBUF_METADATA_UINT32_PTR(mbuf0, ip_offset);
3030 void *ip1 = RTE_MBUF_METADATA_UINT32_PTR(mbuf1, ip_offset);
3031 void *ip2 = RTE_MBUF_METADATA_UINT32_PTR(mbuf2, ip_offset);
3032 void *ip3 = RTE_MBUF_METADATA_UINT32_PTR(mbuf3, ip_offset);
3034 uint32_t dscp0, dscp1, dscp2, dscp3;
3035 uint16_t total_length0, total_length1, total_length2, total_length3;
3037 if (cfg->common.ip_version) {
3038 struct ipv4_hdr *hdr0 = ip0;
3039 struct ipv4_hdr *hdr1 = ip1;
3040 struct ipv4_hdr *hdr2 = ip2;
3041 struct ipv4_hdr *hdr3 = ip3;
3043 dscp0 = hdr0->type_of_service >> 2;
3044 dscp1 = hdr1->type_of_service >> 2;
3045 dscp2 = hdr2->type_of_service >> 2;
3046 dscp3 = hdr3->type_of_service >> 2;
3048 total_length0 = rte_ntohs(hdr0->total_length);
3049 total_length1 = rte_ntohs(hdr1->total_length);
3050 total_length2 = rte_ntohs(hdr2->total_length);
3051 total_length3 = rte_ntohs(hdr3->total_length);
3053 struct ipv6_hdr *hdr0 = ip0;
3054 struct ipv6_hdr *hdr1 = ip1;
3055 struct ipv6_hdr *hdr2 = ip2;
3056 struct ipv6_hdr *hdr3 = ip3;
3058 dscp0 = (rte_ntohl(hdr0->vtc_flow) & 0x0F600000) >> 18;
3059 dscp1 = (rte_ntohl(hdr1->vtc_flow) & 0x0F600000) >> 18;
3060 dscp2 = (rte_ntohl(hdr2->vtc_flow) & 0x0F600000) >> 18;
3061 dscp3 = (rte_ntohl(hdr3->vtc_flow) & 0x0F600000) >> 18;
3064 rte_ntohs(hdr0->payload_len) + sizeof(struct ipv6_hdr);
3066 rte_ntohs(hdr1->payload_len) + sizeof(struct ipv6_hdr);
3068 rte_ntohs(hdr2->payload_len) + sizeof(struct ipv6_hdr);
3070 rte_ntohs(hdr3->payload_len) + sizeof(struct ipv6_hdr);
3073 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
3075 action_data_get(table_entry0, action, RTE_TABLE_ACTION_LB);
3077 action_data_get(table_entry1, action, RTE_TABLE_ACTION_LB);
3079 action_data_get(table_entry2, action, RTE_TABLE_ACTION_LB);
3081 action_data_get(table_entry3, action, RTE_TABLE_ACTION_LB);
3100 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
3102 action_data_get(table_entry0, action, RTE_TABLE_ACTION_MTR);
3104 action_data_get(table_entry1, action, RTE_TABLE_ACTION_MTR);
3106 action_data_get(table_entry2, action, RTE_TABLE_ACTION_MTR);
3108 action_data_get(table_entry3, action, RTE_TABLE_ACTION_MTR);
3110 drop_mask0 |= pkt_work_mtr(mbuf0,
3112 &action->dscp_table,
3118 drop_mask1 |= pkt_work_mtr(mbuf1,
3120 &action->dscp_table,
3126 drop_mask2 |= pkt_work_mtr(mbuf2,
3128 &action->dscp_table,
3134 drop_mask3 |= pkt_work_mtr(mbuf3,
3136 &action->dscp_table,
3143 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
3145 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TM);
3147 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TM);
3149 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TM);
3151 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TM);
3155 &action->dscp_table,
3160 &action->dscp_table,
3165 &action->dscp_table,
3170 &action->dscp_table,
3174 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
3175 void *data0 = action_data_get(table_entry0,
3177 RTE_TABLE_ACTION_DECAP);
3178 void *data1 = action_data_get(table_entry1,
3180 RTE_TABLE_ACTION_DECAP);
3181 void *data2 = action_data_get(table_entry2,
3183 RTE_TABLE_ACTION_DECAP);
3184 void *data3 = action_data_get(table_entry3,
3186 RTE_TABLE_ACTION_DECAP);
3188 pkt4_work_decap(mbuf0, mbuf1, mbuf2, mbuf3,
3189 data0, data1, data2, data3);
3192 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
3194 action_data_get(table_entry0, action, RTE_TABLE_ACTION_ENCAP);
3196 action_data_get(table_entry1, action, RTE_TABLE_ACTION_ENCAP);
3198 action_data_get(table_entry2, action, RTE_TABLE_ACTION_ENCAP);
3200 action_data_get(table_entry3, action, RTE_TABLE_ACTION_ENCAP);
3202 pkt_work_encap(mbuf0,
3209 pkt_work_encap(mbuf1,
3216 pkt_work_encap(mbuf2,
3223 pkt_work_encap(mbuf3,
3231 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
3233 action_data_get(table_entry0, action, RTE_TABLE_ACTION_NAT);
3235 action_data_get(table_entry1, action, RTE_TABLE_ACTION_NAT);
3237 action_data_get(table_entry2, action, RTE_TABLE_ACTION_NAT);
3239 action_data_get(table_entry3, action, RTE_TABLE_ACTION_NAT);
3241 if (cfg->common.ip_version) {
3242 pkt_ipv4_work_nat(ip0, data0, &cfg->nat);
3243 pkt_ipv4_work_nat(ip1, data1, &cfg->nat);
3244 pkt_ipv4_work_nat(ip2, data2, &cfg->nat);
3245 pkt_ipv4_work_nat(ip3, data3, &cfg->nat);
3247 pkt_ipv6_work_nat(ip0, data0, &cfg->nat);
3248 pkt_ipv6_work_nat(ip1, data1, &cfg->nat);
3249 pkt_ipv6_work_nat(ip2, data2, &cfg->nat);
3250 pkt_ipv6_work_nat(ip3, data3, &cfg->nat);
3254 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
3256 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TTL);
3258 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TTL);
3260 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TTL);
3262 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TTL);
3264 if (cfg->common.ip_version) {
3265 drop_mask0 |= pkt_ipv4_work_ttl(ip0, data0);
3266 drop_mask1 |= pkt_ipv4_work_ttl(ip1, data1);
3267 drop_mask2 |= pkt_ipv4_work_ttl(ip2, data2);
3268 drop_mask3 |= pkt_ipv4_work_ttl(ip3, data3);
3270 drop_mask0 |= pkt_ipv6_work_ttl(ip0, data0);
3271 drop_mask1 |= pkt_ipv6_work_ttl(ip1, data1);
3272 drop_mask2 |= pkt_ipv6_work_ttl(ip2, data2);
3273 drop_mask3 |= pkt_ipv6_work_ttl(ip3, data3);
3277 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
3279 action_data_get(table_entry0, action, RTE_TABLE_ACTION_STATS);
3281 action_data_get(table_entry1, action, RTE_TABLE_ACTION_STATS);
3283 action_data_get(table_entry2, action, RTE_TABLE_ACTION_STATS);
3285 action_data_get(table_entry3, action, RTE_TABLE_ACTION_STATS);
3287 pkt_work_stats(data0, total_length0);
3288 pkt_work_stats(data1, total_length1);
3289 pkt_work_stats(data2, total_length2);
3290 pkt_work_stats(data3, total_length3);
3293 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
3295 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TIME);
3297 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TIME);
3299 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TIME);
3301 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TIME);
3303 pkt_work_time(data0, time);
3304 pkt_work_time(data1, time);
3305 pkt_work_time(data2, time);
3306 pkt_work_time(data3, time);
3309 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
3310 void *data0 = action_data_get(table_entry0, action,
3311 RTE_TABLE_ACTION_SYM_CRYPTO);
3312 void *data1 = action_data_get(table_entry1, action,
3313 RTE_TABLE_ACTION_SYM_CRYPTO);
3314 void *data2 = action_data_get(table_entry2, action,
3315 RTE_TABLE_ACTION_SYM_CRYPTO);
3316 void *data3 = action_data_get(table_entry3, action,
3317 RTE_TABLE_ACTION_SYM_CRYPTO);
3319 drop_mask0 |= pkt_work_sym_crypto(mbuf0, data0, &cfg->sym_crypto,
3321 drop_mask1 |= pkt_work_sym_crypto(mbuf1, data1, &cfg->sym_crypto,
3323 drop_mask2 |= pkt_work_sym_crypto(mbuf2, data2, &cfg->sym_crypto,
3325 drop_mask3 |= pkt_work_sym_crypto(mbuf3, data3, &cfg->sym_crypto,
3329 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
3330 void *data0 = action_data_get(table_entry0,
3332 RTE_TABLE_ACTION_TAG);
3333 void *data1 = action_data_get(table_entry1,
3335 RTE_TABLE_ACTION_TAG);
3336 void *data2 = action_data_get(table_entry2,
3338 RTE_TABLE_ACTION_TAG);
3339 void *data3 = action_data_get(table_entry3,
3341 RTE_TABLE_ACTION_TAG);
3343 pkt4_work_tag(mbuf0, mbuf1, mbuf2, mbuf3,
3344 data0, data1, data2, data3);
3353 static __rte_always_inline int
3354 ah(struct rte_pipeline *p,
3355 struct rte_mbuf **pkts,
3357 struct rte_pipeline_table_entry **entries,
3358 struct rte_table_action *action,
3359 struct ap_config *cfg)
3361 uint64_t pkts_drop_mask = 0;
3364 if (cfg->action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
3365 (1LLU << RTE_TABLE_ACTION_TIME)))
3368 if ((pkts_mask & (pkts_mask + 1)) == 0) {
3369 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
3372 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
3375 drop_mask = pkt4_work(&pkts[i],
3381 pkts_drop_mask |= drop_mask << i;
3384 for ( ; i < n_pkts; i++) {
3387 drop_mask = pkt_work(pkts[i],
3393 pkts_drop_mask |= drop_mask << i;
3396 for ( ; pkts_mask; ) {
3397 uint32_t pos = __builtin_ctzll(pkts_mask);
3398 uint64_t pkt_mask = 1LLU << pos;
3401 drop_mask = pkt_work(pkts[pos],
3407 pkts_mask &= ~pkt_mask;
3408 pkts_drop_mask |= drop_mask << pos;
3411 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
3417 ah_default(struct rte_pipeline *p,
3418 struct rte_mbuf **pkts,
3420 struct rte_pipeline_table_entry **entries,
3423 struct rte_table_action *action = arg;
3433 static rte_pipeline_table_action_handler_hit
3434 ah_selector(struct rte_table_action *action)
3436 if (action->cfg.action_mask == (1LLU << RTE_TABLE_ACTION_FWD))
3443 rte_table_action_table_params_get(struct rte_table_action *action,
3444 struct rte_pipeline_table_params *params)
3446 rte_pipeline_table_action_handler_hit f_action_hit;
3447 uint32_t total_size;
3449 /* Check input arguments */
3450 if ((action == NULL) ||
3454 f_action_hit = ah_selector(action);
3455 total_size = rte_align32pow2(action->data.total_size);
3457 /* Fill in params */
3458 params->f_action_hit = f_action_hit;
3459 params->f_action_miss = NULL;
3460 params->arg_ah = (f_action_hit) ? action : NULL;
3461 params->action_data_size = total_size -
3462 sizeof(struct rte_pipeline_table_entry);
3468 rte_table_action_free(struct rte_table_action *action)