1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
8 #include <rte_common.h>
9 #include <rte_byteorder.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memcpy.h>
13 #include <rte_ether.h>
19 #include "rte_table_action.h"
21 #define rte_htons rte_cpu_to_be_16
22 #define rte_htonl rte_cpu_to_be_32
24 #define rte_ntohs rte_be_to_cpu_16
25 #define rte_ntohl rte_be_to_cpu_32
28 * RTE_TABLE_ACTION_FWD
30 #define fwd_data rte_pipeline_table_entry
33 fwd_apply(struct fwd_data *data,
34 struct rte_table_action_fwd_params *p)
36 data->action = p->action;
38 if (p->action == RTE_PIPELINE_ACTION_PORT)
39 data->port_id = p->id;
41 if (p->action == RTE_PIPELINE_ACTION_TABLE)
42 data->table_id = p->id;
51 lb_cfg_check(struct rte_table_action_lb_config *cfg)
54 (cfg->key_size < RTE_TABLE_ACTION_LB_KEY_SIZE_MIN) ||
55 (cfg->key_size > RTE_TABLE_ACTION_LB_KEY_SIZE_MAX) ||
56 (!rte_is_power_of_2(cfg->key_size)) ||
57 (cfg->f_hash == NULL))
64 uint32_t out[RTE_TABLE_ACTION_LB_TABLE_SIZE];
65 } __attribute__((__packed__));
68 lb_apply(struct lb_data *data,
69 struct rte_table_action_lb_params *p)
71 memcpy(data->out, p->out, sizeof(data->out));
76 static __rte_always_inline void
77 pkt_work_lb(struct rte_mbuf *mbuf,
79 struct rte_table_action_lb_config *cfg)
81 uint8_t *pkt_key = RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->key_offset);
82 uint32_t *out = RTE_MBUF_METADATA_UINT32_PTR(mbuf, cfg->out_offset);
86 digest = cfg->f_hash(pkt_key,
90 pos = digest & (RTE_TABLE_ACTION_LB_TABLE_SIZE - 1);
91 out_val = data->out[pos];
97 * RTE_TABLE_ACTION_MTR
100 mtr_cfg_check(struct rte_table_action_mtr_config *mtr)
102 if ((mtr->alg == RTE_TABLE_ACTION_METER_SRTCM) ||
103 ((mtr->n_tc != 1) && (mtr->n_tc != 4)) ||
104 (mtr->n_bytes_enabled != 0))
109 #define MBUF_SCHED_QUEUE_TC_COLOR(queue, tc, color) \
110 ((uint16_t)((((uint64_t)(queue)) & 0x3) | \
111 ((((uint64_t)(tc)) & 0x3) << 2) | \
112 ((((uint64_t)(color)) & 0x3) << 4)))
114 #define MBUF_SCHED_COLOR(sched, color) \
115 (((sched) & (~0x30LLU)) | ((color) << 4))
117 struct mtr_trtcm_data {
118 struct rte_meter_trtcm trtcm;
119 uint64_t stats[e_RTE_METER_COLORS];
120 } __attribute__((__packed__));
122 #define MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data) \
123 (((data)->stats[e_RTE_METER_GREEN] & 0xF8LLU) >> 3)
126 mtr_trtcm_data_meter_profile_id_set(struct mtr_trtcm_data *data,
129 data->stats[e_RTE_METER_GREEN] &= ~0xF8LLU;
130 data->stats[e_RTE_METER_GREEN] |= (profile_id % 32) << 3;
133 #define MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color)\
134 (((data)->stats[(color)] & 4LLU) >> 2)
136 #define MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color)\
137 ((enum rte_meter_color)((data)->stats[(color)] & 3LLU))
140 mtr_trtcm_data_policer_action_set(struct mtr_trtcm_data *data,
141 enum rte_meter_color color,
142 enum rte_table_action_policer action)
144 if (action == RTE_TABLE_ACTION_POLICER_DROP) {
145 data->stats[color] |= 4LLU;
147 data->stats[color] &= ~7LLU;
148 data->stats[color] |= color & 3LLU;
153 mtr_trtcm_data_stats_get(struct mtr_trtcm_data *data,
154 enum rte_meter_color color)
156 return data->stats[color] >> 8;
160 mtr_trtcm_data_stats_reset(struct mtr_trtcm_data *data,
161 enum rte_meter_color color)
163 data->stats[color] &= 0xFFLU;
166 #define MTR_TRTCM_DATA_STATS_INC(data, color) \
167 ((data)->stats[(color)] += (1LLU << 8))
170 mtr_data_size(struct rte_table_action_mtr_config *mtr)
172 return mtr->n_tc * sizeof(struct mtr_trtcm_data);
175 struct dscp_table_entry_data {
176 enum rte_meter_color color;
178 uint16_t queue_tc_color;
181 struct dscp_table_data {
182 struct dscp_table_entry_data entry[64];
185 struct meter_profile_data {
186 struct rte_meter_trtcm_profile profile;
191 static struct meter_profile_data *
192 meter_profile_data_find(struct meter_profile_data *mp,
198 for (i = 0; i < mp_size; i++) {
199 struct meter_profile_data *mp_data = &mp[i];
201 if (mp_data->valid && (mp_data->profile_id == profile_id))
208 static struct meter_profile_data *
209 meter_profile_data_find_unused(struct meter_profile_data *mp,
214 for (i = 0; i < mp_size; i++) {
215 struct meter_profile_data *mp_data = &mp[i];
225 mtr_apply_check(struct rte_table_action_mtr_params *p,
226 struct rte_table_action_mtr_config *cfg,
227 struct meter_profile_data *mp,
232 if (p->tc_mask > RTE_LEN2MASK(cfg->n_tc, uint32_t))
235 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
236 struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
237 struct meter_profile_data *mp_data;
239 if ((p->tc_mask & (1LLU << i)) == 0)
242 mp_data = meter_profile_data_find(mp,
244 p_tc->meter_profile_id);
253 mtr_apply(struct mtr_trtcm_data *data,
254 struct rte_table_action_mtr_params *p,
255 struct rte_table_action_mtr_config *cfg,
256 struct meter_profile_data *mp,
262 /* Check input arguments */
263 status = mtr_apply_check(p, cfg, mp, mp_size);
268 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
269 struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
270 struct mtr_trtcm_data *data_tc = &data[i];
271 struct meter_profile_data *mp_data;
273 if ((p->tc_mask & (1LLU << i)) == 0)
277 mp_data = meter_profile_data_find(mp,
279 p_tc->meter_profile_id);
283 memset(data_tc, 0, sizeof(*data_tc));
286 status = rte_meter_trtcm_config(&data_tc->trtcm,
292 mtr_trtcm_data_meter_profile_id_set(data_tc,
295 /* Policer actions */
296 mtr_trtcm_data_policer_action_set(data_tc,
298 p_tc->policer[e_RTE_METER_GREEN]);
300 mtr_trtcm_data_policer_action_set(data_tc,
302 p_tc->policer[e_RTE_METER_YELLOW]);
304 mtr_trtcm_data_policer_action_set(data_tc,
306 p_tc->policer[e_RTE_METER_RED]);
312 static __rte_always_inline uint64_t
313 pkt_work_mtr(struct rte_mbuf *mbuf,
314 struct mtr_trtcm_data *data,
315 struct dscp_table_data *dscp_table,
316 struct meter_profile_data *mp,
319 uint16_t total_length)
321 uint64_t drop_mask, sched;
322 uint64_t *sched_ptr = (uint64_t *) &mbuf->hash.sched;
323 struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
324 enum rte_meter_color color_in, color_meter, color_policer;
328 color_in = dscp_entry->color;
330 mp_id = MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data);
334 color_meter = rte_meter_trtcm_color_aware_check(
342 MTR_TRTCM_DATA_STATS_INC(data, color_meter);
345 drop_mask = MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color_meter);
347 MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color_meter);
348 *sched_ptr = MBUF_SCHED_COLOR(sched, color_policer);
354 * RTE_TABLE_ACTION_TM
357 tm_cfg_check(struct rte_table_action_tm_config *tm)
359 if ((tm->n_subports_per_port == 0) ||
360 (rte_is_power_of_2(tm->n_subports_per_port) == 0) ||
361 (tm->n_subports_per_port > UINT16_MAX) ||
362 (tm->n_pipes_per_subport == 0) ||
363 (rte_is_power_of_2(tm->n_pipes_per_subport) == 0))
370 uint16_t queue_tc_color;
373 } __attribute__((__packed__));
376 tm_apply_check(struct rte_table_action_tm_params *p,
377 struct rte_table_action_tm_config *cfg)
379 if ((p->subport_id >= cfg->n_subports_per_port) ||
380 (p->pipe_id >= cfg->n_pipes_per_subport))
387 tm_apply(struct tm_data *data,
388 struct rte_table_action_tm_params *p,
389 struct rte_table_action_tm_config *cfg)
393 /* Check input arguments */
394 status = tm_apply_check(p, cfg);
399 data->queue_tc_color = 0;
400 data->subport = (uint16_t) p->subport_id;
401 data->pipe = p->pipe_id;
406 static __rte_always_inline void
407 pkt_work_tm(struct rte_mbuf *mbuf,
408 struct tm_data *data,
409 struct dscp_table_data *dscp_table,
412 struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
413 struct tm_data *sched_ptr = (struct tm_data *) &mbuf->hash.sched;
414 struct tm_data sched;
417 sched.queue_tc_color = dscp_entry->queue_tc_color;
422 * RTE_TABLE_ACTION_ENCAP
425 encap_valid(enum rte_table_action_encap_type encap)
428 case RTE_TABLE_ACTION_ENCAP_ETHER:
429 case RTE_TABLE_ACTION_ENCAP_VLAN:
430 case RTE_TABLE_ACTION_ENCAP_QINQ:
431 case RTE_TABLE_ACTION_ENCAP_MPLS:
432 case RTE_TABLE_ACTION_ENCAP_PPPOE:
433 case RTE_TABLE_ACTION_ENCAP_VXLAN:
441 encap_cfg_check(struct rte_table_action_encap_config *encap)
443 if ((encap->encap_mask == 0) ||
444 (__builtin_popcountll(encap->encap_mask) != 1))
450 struct encap_ether_data {
451 struct ether_hdr ether;
452 } __attribute__((__packed__));
454 #define VLAN(pcp, dei, vid) \
455 ((uint16_t)((((uint64_t)(pcp)) & 0x7LLU) << 13) | \
456 ((((uint64_t)(dei)) & 0x1LLU) << 12) | \
457 (((uint64_t)(vid)) & 0xFFFLLU)) \
459 struct encap_vlan_data {
460 struct ether_hdr ether;
461 struct vlan_hdr vlan;
462 } __attribute__((__packed__));
464 struct encap_qinq_data {
465 struct ether_hdr ether;
466 struct vlan_hdr svlan;
467 struct vlan_hdr cvlan;
468 } __attribute__((__packed__));
470 #define ETHER_TYPE_MPLS_UNICAST 0x8847
472 #define ETHER_TYPE_MPLS_MULTICAST 0x8848
474 #define MPLS(label, tc, s, ttl) \
475 ((uint32_t)(((((uint64_t)(label)) & 0xFFFFFLLU) << 12) |\
476 ((((uint64_t)(tc)) & 0x7LLU) << 9) | \
477 ((((uint64_t)(s)) & 0x1LLU) << 8) | \
478 (((uint64_t)(ttl)) & 0xFFLLU)))
480 struct encap_mpls_data {
481 struct ether_hdr ether;
482 uint32_t mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX];
484 } __attribute__((__packed__));
486 #define ETHER_TYPE_PPPOE_SESSION 0x8864
488 #define PPP_PROTOCOL_IP 0x0021
490 struct pppoe_ppp_hdr {
491 uint16_t ver_type_code;
495 } __attribute__((__packed__));
497 struct encap_pppoe_data {
498 struct ether_hdr ether;
499 struct pppoe_ppp_hdr pppoe_ppp;
500 } __attribute__((__packed__));
502 #define IP_PROTO_UDP 17
504 struct encap_vxlan_ipv4_data {
505 struct ether_hdr ether;
506 struct ipv4_hdr ipv4;
508 struct vxlan_hdr vxlan;
509 } __attribute__((__packed__));
511 struct encap_vxlan_ipv4_vlan_data {
512 struct ether_hdr ether;
513 struct vlan_hdr vlan;
514 struct ipv4_hdr ipv4;
516 struct vxlan_hdr vxlan;
517 } __attribute__((__packed__));
519 struct encap_vxlan_ipv6_data {
520 struct ether_hdr ether;
521 struct ipv6_hdr ipv6;
523 struct vxlan_hdr vxlan;
524 } __attribute__((__packed__));
526 struct encap_vxlan_ipv6_vlan_data {
527 struct ether_hdr ether;
528 struct vlan_hdr vlan;
529 struct ipv6_hdr ipv6;
531 struct vxlan_hdr vxlan;
532 } __attribute__((__packed__));
535 encap_data_size(struct rte_table_action_encap_config *encap)
537 switch (encap->encap_mask) {
538 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
539 return sizeof(struct encap_ether_data);
541 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
542 return sizeof(struct encap_vlan_data);
544 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
545 return sizeof(struct encap_qinq_data);
547 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
548 return sizeof(struct encap_mpls_data);
550 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
551 return sizeof(struct encap_pppoe_data);
553 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
554 if (encap->vxlan.ip_version)
555 if (encap->vxlan.vlan)
556 return sizeof(struct encap_vxlan_ipv4_vlan_data);
558 return sizeof(struct encap_vxlan_ipv4_data);
560 if (encap->vxlan.vlan)
561 return sizeof(struct encap_vxlan_ipv6_vlan_data);
563 return sizeof(struct encap_vxlan_ipv6_data);
571 encap_apply_check(struct rte_table_action_encap_params *p,
572 struct rte_table_action_encap_config *cfg)
574 if ((encap_valid(p->type) == 0) ||
575 ((cfg->encap_mask & (1LLU << p->type)) == 0))
579 case RTE_TABLE_ACTION_ENCAP_ETHER:
582 case RTE_TABLE_ACTION_ENCAP_VLAN:
585 case RTE_TABLE_ACTION_ENCAP_QINQ:
588 case RTE_TABLE_ACTION_ENCAP_MPLS:
589 if ((p->mpls.mpls_count == 0) ||
590 (p->mpls.mpls_count > RTE_TABLE_ACTION_MPLS_LABELS_MAX))
595 case RTE_TABLE_ACTION_ENCAP_PPPOE:
598 case RTE_TABLE_ACTION_ENCAP_VXLAN:
607 encap_ether_apply(void *data,
608 struct rte_table_action_encap_params *p,
609 struct rte_table_action_common_config *common_cfg)
611 struct encap_ether_data *d = data;
612 uint16_t ethertype = (common_cfg->ip_version) ?
617 ether_addr_copy(&p->ether.ether.da, &d->ether.d_addr);
618 ether_addr_copy(&p->ether.ether.sa, &d->ether.s_addr);
619 d->ether.ether_type = rte_htons(ethertype);
625 encap_vlan_apply(void *data,
626 struct rte_table_action_encap_params *p,
627 struct rte_table_action_common_config *common_cfg)
629 struct encap_vlan_data *d = data;
630 uint16_t ethertype = (common_cfg->ip_version) ?
635 ether_addr_copy(&p->vlan.ether.da, &d->ether.d_addr);
636 ether_addr_copy(&p->vlan.ether.sa, &d->ether.s_addr);
637 d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
640 d->vlan.vlan_tci = rte_htons(VLAN(p->vlan.vlan.pcp,
643 d->vlan.eth_proto = rte_htons(ethertype);
649 encap_qinq_apply(void *data,
650 struct rte_table_action_encap_params *p,
651 struct rte_table_action_common_config *common_cfg)
653 struct encap_qinq_data *d = data;
654 uint16_t ethertype = (common_cfg->ip_version) ?
659 ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
660 ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
661 d->ether.ether_type = rte_htons(ETHER_TYPE_QINQ);
664 d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
667 d->svlan.eth_proto = rte_htons(ETHER_TYPE_VLAN);
670 d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
673 d->cvlan.eth_proto = rte_htons(ethertype);
679 encap_mpls_apply(void *data,
680 struct rte_table_action_encap_params *p)
682 struct encap_mpls_data *d = data;
683 uint16_t ethertype = (p->mpls.unicast) ?
684 ETHER_TYPE_MPLS_UNICAST :
685 ETHER_TYPE_MPLS_MULTICAST;
689 ether_addr_copy(&p->mpls.ether.da, &d->ether.d_addr);
690 ether_addr_copy(&p->mpls.ether.sa, &d->ether.s_addr);
691 d->ether.ether_type = rte_htons(ethertype);
694 for (i = 0; i < p->mpls.mpls_count - 1; i++)
695 d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
698 p->mpls.mpls[i].ttl));
700 d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
703 p->mpls.mpls[i].ttl));
705 d->mpls_count = p->mpls.mpls_count;
710 encap_pppoe_apply(void *data,
711 struct rte_table_action_encap_params *p)
713 struct encap_pppoe_data *d = data;
716 ether_addr_copy(&p->pppoe.ether.da, &d->ether.d_addr);
717 ether_addr_copy(&p->pppoe.ether.sa, &d->ether.s_addr);
718 d->ether.ether_type = rte_htons(ETHER_TYPE_PPPOE_SESSION);
721 d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
722 d->pppoe_ppp.session_id = rte_htons(p->pppoe.pppoe.session_id);
723 d->pppoe_ppp.length = 0; /* not pre-computed */
724 d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
730 encap_vxlan_apply(void *data,
731 struct rte_table_action_encap_params *p,
732 struct rte_table_action_encap_config *cfg)
734 if ((p->vxlan.vxlan.vni > 0xFFFFFF) ||
735 (cfg->vxlan.ip_version && (p->vxlan.ipv4.dscp > 0x3F)) ||
736 (!cfg->vxlan.ip_version && (p->vxlan.ipv6.flow_label > 0xFFFFF)) ||
737 (!cfg->vxlan.ip_version && (p->vxlan.ipv6.dscp > 0x3F)) ||
738 (cfg->vxlan.vlan && (p->vxlan.vlan.vid > 0xFFF)))
741 if (cfg->vxlan.ip_version)
742 if (cfg->vxlan.vlan) {
743 struct encap_vxlan_ipv4_vlan_data *d = data;
746 ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
747 ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
748 d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
751 d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
754 d->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv4);
757 d->ipv4.version_ihl = 0x45;
758 d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
759 d->ipv4.total_length = 0; /* not pre-computed */
760 d->ipv4.packet_id = 0;
761 d->ipv4.fragment_offset = 0;
762 d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
763 d->ipv4.next_proto_id = IP_PROTO_UDP;
764 d->ipv4.hdr_checksum = 0;
765 d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
766 d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
768 d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
771 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
772 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
773 d->udp.dgram_len = 0; /* not pre-computed */
774 d->udp.dgram_cksum = 0;
777 d->vxlan.vx_flags = rte_htonl(0x08000000);
778 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
782 struct encap_vxlan_ipv4_data *d = data;
785 ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
786 ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
787 d->ether.ether_type = rte_htons(ETHER_TYPE_IPv4);
790 d->ipv4.version_ihl = 0x45;
791 d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
792 d->ipv4.total_length = 0; /* not pre-computed */
793 d->ipv4.packet_id = 0;
794 d->ipv4.fragment_offset = 0;
795 d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
796 d->ipv4.next_proto_id = IP_PROTO_UDP;
797 d->ipv4.hdr_checksum = 0;
798 d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
799 d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
801 d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
804 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
805 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
806 d->udp.dgram_len = 0; /* not pre-computed */
807 d->udp.dgram_cksum = 0;
810 d->vxlan.vx_flags = rte_htonl(0x08000000);
811 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
816 if (cfg->vxlan.vlan) {
817 struct encap_vxlan_ipv6_vlan_data *d = data;
820 ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
821 ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
822 d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
825 d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
828 d->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv6);
831 d->ipv6.vtc_flow = rte_htonl((6 << 28) |
832 (p->vxlan.ipv6.dscp << 22) |
833 p->vxlan.ipv6.flow_label);
834 d->ipv6.payload_len = 0; /* not pre-computed */
835 d->ipv6.proto = IP_PROTO_UDP;
836 d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
837 memcpy(d->ipv6.src_addr,
839 sizeof(p->vxlan.ipv6.sa));
840 memcpy(d->ipv6.dst_addr,
842 sizeof(p->vxlan.ipv6.da));
845 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
846 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
847 d->udp.dgram_len = 0; /* not pre-computed */
848 d->udp.dgram_cksum = 0;
851 d->vxlan.vx_flags = rte_htonl(0x08000000);
852 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
856 struct encap_vxlan_ipv6_data *d = data;
859 ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
860 ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
861 d->ether.ether_type = rte_htons(ETHER_TYPE_IPv6);
864 d->ipv6.vtc_flow = rte_htonl((6 << 28) |
865 (p->vxlan.ipv6.dscp << 22) |
866 p->vxlan.ipv6.flow_label);
867 d->ipv6.payload_len = 0; /* not pre-computed */
868 d->ipv6.proto = IP_PROTO_UDP;
869 d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
870 memcpy(d->ipv6.src_addr,
872 sizeof(p->vxlan.ipv6.sa));
873 memcpy(d->ipv6.dst_addr,
875 sizeof(p->vxlan.ipv6.da));
878 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
879 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
880 d->udp.dgram_len = 0; /* not pre-computed */
881 d->udp.dgram_cksum = 0;
884 d->vxlan.vx_flags = rte_htonl(0x08000000);
885 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
892 encap_apply(void *data,
893 struct rte_table_action_encap_params *p,
894 struct rte_table_action_encap_config *cfg,
895 struct rte_table_action_common_config *common_cfg)
899 /* Check input arguments */
900 status = encap_apply_check(p, cfg);
905 case RTE_TABLE_ACTION_ENCAP_ETHER:
906 return encap_ether_apply(data, p, common_cfg);
908 case RTE_TABLE_ACTION_ENCAP_VLAN:
909 return encap_vlan_apply(data, p, common_cfg);
911 case RTE_TABLE_ACTION_ENCAP_QINQ:
912 return encap_qinq_apply(data, p, common_cfg);
914 case RTE_TABLE_ACTION_ENCAP_MPLS:
915 return encap_mpls_apply(data, p);
917 case RTE_TABLE_ACTION_ENCAP_PPPOE:
918 return encap_pppoe_apply(data, p);
920 case RTE_TABLE_ACTION_ENCAP_VXLAN:
921 return encap_vxlan_apply(data, p, cfg);
928 static __rte_always_inline uint16_t
929 encap_vxlan_ipv4_checksum_update(uint16_t cksum0,
930 uint16_t total_length)
935 cksum1 = ~cksum1 & 0xFFFF;
937 /* Add total length (one's complement logic) */
938 cksum1 += total_length;
939 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
940 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
942 return (uint16_t)(~cksum1);
945 static __rte_always_inline void *
946 encap(void *dst, const void *src, size_t n)
948 dst = ((uint8_t *) dst) - n;
949 return rte_memcpy(dst, src, n);
952 static __rte_always_inline void
953 pkt_work_encap_vxlan_ipv4(struct rte_mbuf *mbuf,
954 struct encap_vxlan_ipv4_data *vxlan_tbl,
955 struct rte_table_action_encap_config *cfg)
957 uint32_t ether_offset = cfg->vxlan.data_offset;
958 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
959 struct encap_vxlan_ipv4_data *vxlan_pkt;
960 uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
962 ether_length = (uint16_t)mbuf->pkt_len;
963 ipv4_total_length = ether_length +
964 (sizeof(struct vxlan_hdr) +
965 sizeof(struct udp_hdr) +
966 sizeof(struct ipv4_hdr));
967 ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
968 rte_htons(ipv4_total_length));
969 udp_length = ether_length +
970 (sizeof(struct vxlan_hdr) +
971 sizeof(struct udp_hdr));
973 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
974 vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
975 vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
976 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
978 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
979 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
982 static __rte_always_inline void
983 pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf *mbuf,
984 struct encap_vxlan_ipv4_vlan_data *vxlan_tbl,
985 struct rte_table_action_encap_config *cfg)
987 uint32_t ether_offset = cfg->vxlan.data_offset;
988 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
989 struct encap_vxlan_ipv4_vlan_data *vxlan_pkt;
990 uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
992 ether_length = (uint16_t)mbuf->pkt_len;
993 ipv4_total_length = ether_length +
994 (sizeof(struct vxlan_hdr) +
995 sizeof(struct udp_hdr) +
996 sizeof(struct ipv4_hdr));
997 ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
998 rte_htons(ipv4_total_length));
999 udp_length = ether_length +
1000 (sizeof(struct vxlan_hdr) +
1001 sizeof(struct udp_hdr));
1003 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1004 vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
1005 vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
1006 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1008 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1009 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1012 static __rte_always_inline void
1013 pkt_work_encap_vxlan_ipv6(struct rte_mbuf *mbuf,
1014 struct encap_vxlan_ipv6_data *vxlan_tbl,
1015 struct rte_table_action_encap_config *cfg)
1017 uint32_t ether_offset = cfg->vxlan.data_offset;
1018 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1019 struct encap_vxlan_ipv6_data *vxlan_pkt;
1020 uint16_t ether_length, ipv6_payload_length, udp_length;
1022 ether_length = (uint16_t)mbuf->pkt_len;
1023 ipv6_payload_length = ether_length +
1024 (sizeof(struct vxlan_hdr) +
1025 sizeof(struct udp_hdr));
1026 udp_length = ether_length +
1027 (sizeof(struct vxlan_hdr) +
1028 sizeof(struct udp_hdr));
1030 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1031 vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
1032 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1034 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1035 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1038 static __rte_always_inline void
1039 pkt_work_encap_vxlan_ipv6_vlan(struct rte_mbuf *mbuf,
1040 struct encap_vxlan_ipv6_vlan_data *vxlan_tbl,
1041 struct rte_table_action_encap_config *cfg)
1043 uint32_t ether_offset = cfg->vxlan.data_offset;
1044 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1045 struct encap_vxlan_ipv6_vlan_data *vxlan_pkt;
1046 uint16_t ether_length, ipv6_payload_length, udp_length;
1048 ether_length = (uint16_t)mbuf->pkt_len;
1049 ipv6_payload_length = ether_length +
1050 (sizeof(struct vxlan_hdr) +
1051 sizeof(struct udp_hdr));
1052 udp_length = ether_length +
1053 (sizeof(struct vxlan_hdr) +
1054 sizeof(struct udp_hdr));
1056 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1057 vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
1058 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1060 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1061 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1064 static __rte_always_inline void
1065 pkt_work_encap(struct rte_mbuf *mbuf,
1067 struct rte_table_action_encap_config *cfg,
1069 uint16_t total_length,
1072 switch (cfg->encap_mask) {
1073 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
1074 encap(ip, data, sizeof(struct encap_ether_data));
1075 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1076 sizeof(struct encap_ether_data));
1077 mbuf->pkt_len = mbuf->data_len = total_length +
1078 sizeof(struct encap_ether_data);
1081 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
1082 encap(ip, data, sizeof(struct encap_vlan_data));
1083 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1084 sizeof(struct encap_vlan_data));
1085 mbuf->pkt_len = mbuf->data_len = total_length +
1086 sizeof(struct encap_vlan_data);
1089 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
1090 encap(ip, data, sizeof(struct encap_qinq_data));
1091 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1092 sizeof(struct encap_qinq_data));
1093 mbuf->pkt_len = mbuf->data_len = total_length +
1094 sizeof(struct encap_qinq_data);
1097 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
1099 struct encap_mpls_data *mpls = data;
1100 size_t size = sizeof(struct ether_hdr) +
1101 mpls->mpls_count * 4;
1103 encap(ip, data, size);
1104 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + size);
1105 mbuf->pkt_len = mbuf->data_len = total_length + size;
1109 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
1111 struct encap_pppoe_data *pppoe =
1112 encap(ip, data, sizeof(struct encap_pppoe_data));
1113 pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
1114 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1115 sizeof(struct encap_pppoe_data));
1116 mbuf->pkt_len = mbuf->data_len = total_length +
1117 sizeof(struct encap_pppoe_data);
1121 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
1123 if (cfg->vxlan.ip_version)
1124 if (cfg->vxlan.vlan)
1125 pkt_work_encap_vxlan_ipv4_vlan(mbuf, data, cfg);
1127 pkt_work_encap_vxlan_ipv4(mbuf, data, cfg);
1129 if (cfg->vxlan.vlan)
1130 pkt_work_encap_vxlan_ipv6_vlan(mbuf, data, cfg);
1132 pkt_work_encap_vxlan_ipv6(mbuf, data, cfg);
1141 * RTE_TABLE_ACTION_NAT
1144 nat_cfg_check(struct rte_table_action_nat_config *nat)
1146 if ((nat->proto != 0x06) &&
1147 (nat->proto != 0x11))
1153 struct nat_ipv4_data {
1156 } __attribute__((__packed__));
1158 struct nat_ipv6_data {
1161 } __attribute__((__packed__));
1164 nat_data_size(struct rte_table_action_nat_config *nat __rte_unused,
1165 struct rte_table_action_common_config *common)
1167 int ip_version = common->ip_version;
1169 return (ip_version) ?
1170 sizeof(struct nat_ipv4_data) :
1171 sizeof(struct nat_ipv6_data);
1175 nat_apply_check(struct rte_table_action_nat_params *p,
1176 struct rte_table_action_common_config *cfg)
1178 if ((p->ip_version && (cfg->ip_version == 0)) ||
1179 ((p->ip_version == 0) && cfg->ip_version))
1186 nat_apply(void *data,
1187 struct rte_table_action_nat_params *p,
1188 struct rte_table_action_common_config *cfg)
1192 /* Check input arguments */
1193 status = nat_apply_check(p, cfg);
1198 if (p->ip_version) {
1199 struct nat_ipv4_data *d = data;
1201 d->addr = rte_htonl(p->addr.ipv4);
1202 d->port = rte_htons(p->port);
1204 struct nat_ipv6_data *d = data;
1206 memcpy(d->addr, p->addr.ipv6, sizeof(d->addr));
1207 d->port = rte_htons(p->port);
1213 static __rte_always_inline uint16_t
1214 nat_ipv4_checksum_update(uint16_t cksum0,
1221 cksum1 = ~cksum1 & 0xFFFF;
1223 /* Subtract ip0 (one's complement logic) */
1224 cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF);
1225 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1226 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1228 /* Add ip1 (one's complement logic) */
1229 cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF);
1230 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1231 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1233 return (uint16_t)(~cksum1);
1236 static __rte_always_inline uint16_t
1237 nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0,
1246 cksum1 = ~cksum1 & 0xFFFF;
1248 /* Subtract ip0 and port 0 (one's complement logic) */
1249 cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF) + port0;
1250 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1251 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1253 /* Add ip1 and port1 (one's complement logic) */
1254 cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF) + port1;
1255 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1256 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1258 return (uint16_t)(~cksum1);
1261 static __rte_always_inline uint16_t
1262 nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0,
1271 cksum1 = ~cksum1 & 0xFFFF;
1273 /* Subtract ip0 and port 0 (one's complement logic) */
1274 cksum1 -= ip0[0] + ip0[1] + ip0[2] + ip0[3] +
1275 ip0[4] + ip0[5] + ip0[6] + ip0[7] + port0;
1276 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1277 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1279 /* Add ip1 and port1 (one's complement logic) */
1280 cksum1 += ip1[0] + ip1[1] + ip1[2] + ip1[3] +
1281 ip1[4] + ip1[5] + ip1[6] + ip1[7] + port1;
1282 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1283 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1285 return (uint16_t)(~cksum1);
1288 static __rte_always_inline void
1289 pkt_ipv4_work_nat(struct ipv4_hdr *ip,
1290 struct nat_ipv4_data *data,
1291 struct rte_table_action_nat_config *cfg)
1293 if (cfg->source_nat) {
1294 if (cfg->proto == 0x6) {
1295 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1296 uint16_t ip_cksum, tcp_cksum;
1298 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1302 tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
1308 ip->src_addr = data->addr;
1309 ip->hdr_checksum = ip_cksum;
1310 tcp->src_port = data->port;
1311 tcp->cksum = tcp_cksum;
1313 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1314 uint16_t ip_cksum, udp_cksum;
1316 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1320 udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1326 ip->src_addr = data->addr;
1327 ip->hdr_checksum = ip_cksum;
1328 udp->src_port = data->port;
1329 if (udp->dgram_cksum)
1330 udp->dgram_cksum = udp_cksum;
1333 if (cfg->proto == 0x6) {
1334 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1335 uint16_t ip_cksum, tcp_cksum;
1337 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1341 tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
1347 ip->dst_addr = data->addr;
1348 ip->hdr_checksum = ip_cksum;
1349 tcp->dst_port = data->port;
1350 tcp->cksum = tcp_cksum;
1352 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1353 uint16_t ip_cksum, udp_cksum;
1355 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1359 udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1365 ip->dst_addr = data->addr;
1366 ip->hdr_checksum = ip_cksum;
1367 udp->dst_port = data->port;
1368 if (udp->dgram_cksum)
1369 udp->dgram_cksum = udp_cksum;
1374 static __rte_always_inline void
1375 pkt_ipv6_work_nat(struct ipv6_hdr *ip,
1376 struct nat_ipv6_data *data,
1377 struct rte_table_action_nat_config *cfg)
1379 if (cfg->source_nat) {
1380 if (cfg->proto == 0x6) {
1381 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1384 tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1385 (uint16_t *)ip->src_addr,
1386 (uint16_t *)data->addr,
1390 rte_memcpy(ip->src_addr, data->addr, 16);
1391 tcp->src_port = data->port;
1392 tcp->cksum = tcp_cksum;
1394 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1397 udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1398 (uint16_t *)ip->src_addr,
1399 (uint16_t *)data->addr,
1403 rte_memcpy(ip->src_addr, data->addr, 16);
1404 udp->src_port = data->port;
1405 udp->dgram_cksum = udp_cksum;
1408 if (cfg->proto == 0x6) {
1409 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1412 tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1413 (uint16_t *)ip->dst_addr,
1414 (uint16_t *)data->addr,
1418 rte_memcpy(ip->dst_addr, data->addr, 16);
1419 tcp->dst_port = data->port;
1420 tcp->cksum = tcp_cksum;
1422 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1425 udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1426 (uint16_t *)ip->dst_addr,
1427 (uint16_t *)data->addr,
1431 rte_memcpy(ip->dst_addr, data->addr, 16);
1432 udp->dst_port = data->port;
1433 udp->dgram_cksum = udp_cksum;
1439 * RTE_TABLE_ACTION_TTL
1442 ttl_cfg_check(struct rte_table_action_ttl_config *ttl)
1452 } __attribute__((__packed__));
1454 #define TTL_INIT(data, decrement) \
1455 ((data)->n_packets = (decrement) ? 1 : 0)
1457 #define TTL_DEC_GET(data) \
1458 ((uint8_t)((data)->n_packets & 1))
1460 #define TTL_STATS_RESET(data) \
1461 ((data)->n_packets = ((data)->n_packets & 1))
1463 #define TTL_STATS_READ(data) \
1464 ((data)->n_packets >> 1)
1466 #define TTL_STATS_ADD(data, value) \
1467 ((data)->n_packets = \
1468 (((((data)->n_packets >> 1) + (value)) << 1) | \
1469 ((data)->n_packets & 1)))
1472 ttl_apply(void *data,
1473 struct rte_table_action_ttl_params *p)
1475 struct ttl_data *d = data;
1477 TTL_INIT(d, p->decrement);
1482 static __rte_always_inline uint64_t
1483 pkt_ipv4_work_ttl(struct ipv4_hdr *ip,
1484 struct ttl_data *data)
1487 uint16_t cksum = ip->hdr_checksum;
1488 uint8_t ttl = ip->time_to_live;
1489 uint8_t ttl_diff = TTL_DEC_GET(data);
1494 ip->hdr_checksum = cksum;
1495 ip->time_to_live = ttl;
1497 drop = (ttl == 0) ? 1 : 0;
1498 TTL_STATS_ADD(data, drop);
1503 static __rte_always_inline uint64_t
1504 pkt_ipv6_work_ttl(struct ipv6_hdr *ip,
1505 struct ttl_data *data)
1508 uint8_t ttl = ip->hop_limits;
1509 uint8_t ttl_diff = TTL_DEC_GET(data);
1513 ip->hop_limits = ttl;
1515 drop = (ttl == 0) ? 1 : 0;
1516 TTL_STATS_ADD(data, drop);
1522 * RTE_TABLE_ACTION_STATS
1525 stats_cfg_check(struct rte_table_action_stats_config *stats)
1527 if ((stats->n_packets_enabled == 0) && (stats->n_bytes_enabled == 0))
1536 } __attribute__((__packed__));
1539 stats_apply(struct stats_data *data,
1540 struct rte_table_action_stats_params *p)
1542 data->n_packets = p->n_packets;
1543 data->n_bytes = p->n_bytes;
1548 static __rte_always_inline void
1549 pkt_work_stats(struct stats_data *data,
1550 uint16_t total_length)
1553 data->n_bytes += total_length;
1557 * RTE_TABLE_ACTION_TIME
1561 } __attribute__((__packed__));
1564 time_apply(struct time_data *data,
1565 struct rte_table_action_time_params *p)
1567 data->time = p->time;
1571 static __rte_always_inline void
1572 pkt_work_time(struct time_data *data,
1582 action_valid(enum rte_table_action_type action)
1585 case RTE_TABLE_ACTION_FWD:
1586 case RTE_TABLE_ACTION_LB:
1587 case RTE_TABLE_ACTION_MTR:
1588 case RTE_TABLE_ACTION_TM:
1589 case RTE_TABLE_ACTION_ENCAP:
1590 case RTE_TABLE_ACTION_NAT:
1591 case RTE_TABLE_ACTION_TTL:
1592 case RTE_TABLE_ACTION_STATS:
1593 case RTE_TABLE_ACTION_TIME:
1601 #define RTE_TABLE_ACTION_MAX 64
1604 uint64_t action_mask;
1605 struct rte_table_action_common_config common;
1606 struct rte_table_action_lb_config lb;
1607 struct rte_table_action_mtr_config mtr;
1608 struct rte_table_action_tm_config tm;
1609 struct rte_table_action_encap_config encap;
1610 struct rte_table_action_nat_config nat;
1611 struct rte_table_action_ttl_config ttl;
1612 struct rte_table_action_stats_config stats;
1616 action_cfg_size(enum rte_table_action_type action)
1619 case RTE_TABLE_ACTION_LB:
1620 return sizeof(struct rte_table_action_lb_config);
1621 case RTE_TABLE_ACTION_MTR:
1622 return sizeof(struct rte_table_action_mtr_config);
1623 case RTE_TABLE_ACTION_TM:
1624 return sizeof(struct rte_table_action_tm_config);
1625 case RTE_TABLE_ACTION_ENCAP:
1626 return sizeof(struct rte_table_action_encap_config);
1627 case RTE_TABLE_ACTION_NAT:
1628 return sizeof(struct rte_table_action_nat_config);
1629 case RTE_TABLE_ACTION_TTL:
1630 return sizeof(struct rte_table_action_ttl_config);
1631 case RTE_TABLE_ACTION_STATS:
1632 return sizeof(struct rte_table_action_stats_config);
1639 action_cfg_get(struct ap_config *ap_config,
1640 enum rte_table_action_type type)
1643 case RTE_TABLE_ACTION_LB:
1644 return &ap_config->lb;
1646 case RTE_TABLE_ACTION_MTR:
1647 return &ap_config->mtr;
1649 case RTE_TABLE_ACTION_TM:
1650 return &ap_config->tm;
1652 case RTE_TABLE_ACTION_ENCAP:
1653 return &ap_config->encap;
1655 case RTE_TABLE_ACTION_NAT:
1656 return &ap_config->nat;
1658 case RTE_TABLE_ACTION_TTL:
1659 return &ap_config->ttl;
1661 case RTE_TABLE_ACTION_STATS:
1662 return &ap_config->stats;
1670 action_cfg_set(struct ap_config *ap_config,
1671 enum rte_table_action_type type,
1674 void *dst = action_cfg_get(ap_config, type);
1677 memcpy(dst, action_cfg, action_cfg_size(type));
1679 ap_config->action_mask |= 1LLU << type;
1683 size_t offset[RTE_TABLE_ACTION_MAX];
1688 action_data_size(enum rte_table_action_type action,
1689 struct ap_config *ap_config)
1692 case RTE_TABLE_ACTION_FWD:
1693 return sizeof(struct fwd_data);
1695 case RTE_TABLE_ACTION_LB:
1696 return sizeof(struct lb_data);
1698 case RTE_TABLE_ACTION_MTR:
1699 return mtr_data_size(&ap_config->mtr);
1701 case RTE_TABLE_ACTION_TM:
1702 return sizeof(struct tm_data);
1704 case RTE_TABLE_ACTION_ENCAP:
1705 return encap_data_size(&ap_config->encap);
1707 case RTE_TABLE_ACTION_NAT:
1708 return nat_data_size(&ap_config->nat,
1709 &ap_config->common);
1711 case RTE_TABLE_ACTION_TTL:
1712 return sizeof(struct ttl_data);
1714 case RTE_TABLE_ACTION_STATS:
1715 return sizeof(struct stats_data);
1717 case RTE_TABLE_ACTION_TIME:
1718 return sizeof(struct time_data);
1727 action_data_offset_set(struct ap_data *ap_data,
1728 struct ap_config *ap_config)
1730 uint64_t action_mask = ap_config->action_mask;
1734 memset(ap_data->offset, 0, sizeof(ap_data->offset));
1737 for (action = 0; action < RTE_TABLE_ACTION_MAX; action++)
1738 if (action_mask & (1LLU << action)) {
1739 ap_data->offset[action] = offset;
1740 offset += action_data_size((enum rte_table_action_type)action,
1744 ap_data->total_size = offset;
1747 struct rte_table_action_profile {
1748 struct ap_config cfg;
1749 struct ap_data data;
1753 struct rte_table_action_profile *
1754 rte_table_action_profile_create(struct rte_table_action_common_config *common)
1756 struct rte_table_action_profile *ap;
1758 /* Check input arguments */
1762 /* Memory allocation */
1763 ap = calloc(1, sizeof(struct rte_table_action_profile));
1767 /* Initialization */
1768 memcpy(&ap->cfg.common, common, sizeof(*common));
1775 rte_table_action_profile_action_register(struct rte_table_action_profile *profile,
1776 enum rte_table_action_type type,
1777 void *action_config)
1781 /* Check input arguments */
1782 if ((profile == NULL) ||
1784 (action_valid(type) == 0) ||
1785 (profile->cfg.action_mask & (1LLU << type)) ||
1786 ((action_cfg_size(type) == 0) && action_config) ||
1787 (action_cfg_size(type) && (action_config == NULL)))
1791 case RTE_TABLE_ACTION_LB:
1792 status = lb_cfg_check(action_config);
1795 case RTE_TABLE_ACTION_MTR:
1796 status = mtr_cfg_check(action_config);
1799 case RTE_TABLE_ACTION_TM:
1800 status = tm_cfg_check(action_config);
1803 case RTE_TABLE_ACTION_ENCAP:
1804 status = encap_cfg_check(action_config);
1807 case RTE_TABLE_ACTION_NAT:
1808 status = nat_cfg_check(action_config);
1811 case RTE_TABLE_ACTION_TTL:
1812 status = ttl_cfg_check(action_config);
1815 case RTE_TABLE_ACTION_STATS:
1816 status = stats_cfg_check(action_config);
1828 action_cfg_set(&profile->cfg, type, action_config);
1834 rte_table_action_profile_freeze(struct rte_table_action_profile *profile)
1836 if (profile->frozen)
1839 profile->cfg.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD;
1840 action_data_offset_set(&profile->data, &profile->cfg);
1841 profile->frozen = 1;
1847 rte_table_action_profile_free(struct rte_table_action_profile *profile)
1849 if (profile == NULL)
1859 #define METER_PROFILES_MAX 32
1861 struct rte_table_action {
1862 struct ap_config cfg;
1863 struct ap_data data;
1864 struct dscp_table_data dscp_table;
1865 struct meter_profile_data mp[METER_PROFILES_MAX];
1868 struct rte_table_action *
1869 rte_table_action_create(struct rte_table_action_profile *profile,
1872 struct rte_table_action *action;
1874 /* Check input arguments */
1875 if ((profile == NULL) ||
1876 (profile->frozen == 0))
1879 /* Memory allocation */
1880 action = rte_zmalloc_socket(NULL,
1881 sizeof(struct rte_table_action),
1882 RTE_CACHE_LINE_SIZE,
1887 /* Initialization */
1888 memcpy(&action->cfg, &profile->cfg, sizeof(profile->cfg));
1889 memcpy(&action->data, &profile->data, sizeof(profile->data));
1894 static __rte_always_inline void *
1895 action_data_get(void *data,
1896 struct rte_table_action *action,
1897 enum rte_table_action_type type)
1899 size_t offset = action->data.offset[type];
1900 uint8_t *data_bytes = data;
1902 return &data_bytes[offset];
1906 rte_table_action_apply(struct rte_table_action *action,
1908 enum rte_table_action_type type,
1909 void *action_params)
1913 /* Check input arguments */
1914 if ((action == NULL) ||
1916 (action_valid(type) == 0) ||
1917 ((action->cfg.action_mask & (1LLU << type)) == 0) ||
1918 (action_params == NULL))
1922 action_data = action_data_get(data, action, type);
1925 case RTE_TABLE_ACTION_FWD:
1926 return fwd_apply(action_data,
1929 case RTE_TABLE_ACTION_LB:
1930 return lb_apply(action_data,
1933 case RTE_TABLE_ACTION_MTR:
1934 return mtr_apply(action_data,
1938 RTE_DIM(action->mp));
1940 case RTE_TABLE_ACTION_TM:
1941 return tm_apply(action_data,
1945 case RTE_TABLE_ACTION_ENCAP:
1946 return encap_apply(action_data,
1949 &action->cfg.common);
1951 case RTE_TABLE_ACTION_NAT:
1952 return nat_apply(action_data,
1954 &action->cfg.common);
1956 case RTE_TABLE_ACTION_TTL:
1957 return ttl_apply(action_data,
1960 case RTE_TABLE_ACTION_STATS:
1961 return stats_apply(action_data,
1964 case RTE_TABLE_ACTION_TIME:
1965 return time_apply(action_data,
1974 rte_table_action_dscp_table_update(struct rte_table_action *action,
1976 struct rte_table_action_dscp_table *table)
1980 /* Check input arguments */
1981 if ((action == NULL) ||
1982 ((action->cfg.action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
1983 (1LLU << RTE_TABLE_ACTION_TM))) == 0) ||
1988 for (i = 0; i < RTE_DIM(table->entry); i++) {
1989 struct dscp_table_entry_data *data =
1990 &action->dscp_table.entry[i];
1991 struct rte_table_action_dscp_table_entry *entry =
1993 uint16_t queue_tc_color =
1994 MBUF_SCHED_QUEUE_TC_COLOR(entry->tc_queue_id,
1998 if ((dscp_mask & (1LLU << i)) == 0)
2001 data->color = entry->color;
2002 data->tc = entry->tc_id;
2003 data->queue_tc_color = queue_tc_color;
2010 rte_table_action_meter_profile_add(struct rte_table_action *action,
2011 uint32_t meter_profile_id,
2012 struct rte_table_action_meter_profile *profile)
2014 struct meter_profile_data *mp_data;
2017 /* Check input arguments */
2018 if ((action == NULL) ||
2019 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
2023 if (profile->alg != RTE_TABLE_ACTION_METER_TRTCM)
2026 mp_data = meter_profile_data_find(action->mp,
2027 RTE_DIM(action->mp),
2032 mp_data = meter_profile_data_find_unused(action->mp,
2033 RTE_DIM(action->mp));
2037 /* Install new profile */
2038 status = rte_meter_trtcm_profile_config(&mp_data->profile,
2043 mp_data->profile_id = meter_profile_id;
2050 rte_table_action_meter_profile_delete(struct rte_table_action *action,
2051 uint32_t meter_profile_id)
2053 struct meter_profile_data *mp_data;
2055 /* Check input arguments */
2056 if ((action == NULL) ||
2057 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0))
2060 mp_data = meter_profile_data_find(action->mp,
2061 RTE_DIM(action->mp),
2066 /* Uninstall profile */
2073 rte_table_action_meter_read(struct rte_table_action *action,
2076 struct rte_table_action_mtr_counters *stats,
2079 struct mtr_trtcm_data *mtr_data;
2082 /* Check input arguments */
2083 if ((action == NULL) ||
2084 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
2086 (tc_mask > RTE_LEN2MASK(action->cfg.mtr.n_tc, uint32_t)))
2089 mtr_data = action_data_get(data, action, RTE_TABLE_ACTION_MTR);
2093 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
2094 struct rte_table_action_mtr_counters_tc *dst =
2096 struct mtr_trtcm_data *src = &mtr_data[i];
2098 if ((tc_mask & (1 << i)) == 0)
2101 dst->n_packets[e_RTE_METER_GREEN] =
2102 mtr_trtcm_data_stats_get(src, e_RTE_METER_GREEN);
2104 dst->n_packets[e_RTE_METER_YELLOW] =
2105 mtr_trtcm_data_stats_get(src, e_RTE_METER_YELLOW);
2107 dst->n_packets[e_RTE_METER_RED] =
2108 mtr_trtcm_data_stats_get(src, e_RTE_METER_RED);
2110 dst->n_packets_valid = 1;
2111 dst->n_bytes_valid = 0;
2114 stats->tc_mask = tc_mask;
2119 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
2120 struct mtr_trtcm_data *src = &mtr_data[i];
2122 if ((tc_mask & (1 << i)) == 0)
2125 mtr_trtcm_data_stats_reset(src, e_RTE_METER_GREEN);
2126 mtr_trtcm_data_stats_reset(src, e_RTE_METER_YELLOW);
2127 mtr_trtcm_data_stats_reset(src, e_RTE_METER_RED);
2135 rte_table_action_ttl_read(struct rte_table_action *action,
2137 struct rte_table_action_ttl_counters *stats,
2140 struct ttl_data *ttl_data;
2142 /* Check input arguments */
2143 if ((action == NULL) ||
2144 ((action->cfg.action_mask &
2145 (1LLU << RTE_TABLE_ACTION_TTL)) == 0) ||
2149 ttl_data = action_data_get(data, action, RTE_TABLE_ACTION_TTL);
2153 stats->n_packets = TTL_STATS_READ(ttl_data);
2157 TTL_STATS_RESET(ttl_data);
2163 rte_table_action_stats_read(struct rte_table_action *action,
2165 struct rte_table_action_stats_counters *stats,
2168 struct stats_data *stats_data;
2170 /* Check input arguments */
2171 if ((action == NULL) ||
2172 ((action->cfg.action_mask &
2173 (1LLU << RTE_TABLE_ACTION_STATS)) == 0) ||
2177 stats_data = action_data_get(data, action,
2178 RTE_TABLE_ACTION_STATS);
2182 stats->n_packets = stats_data->n_packets;
2183 stats->n_bytes = stats_data->n_bytes;
2184 stats->n_packets_valid = 1;
2185 stats->n_bytes_valid = 1;
2190 stats_data->n_packets = 0;
2191 stats_data->n_bytes = 0;
2198 rte_table_action_time_read(struct rte_table_action *action,
2200 uint64_t *timestamp)
2202 struct time_data *time_data;
2204 /* Check input arguments */
2205 if ((action == NULL) ||
2206 ((action->cfg.action_mask &
2207 (1LLU << RTE_TABLE_ACTION_TIME)) == 0) ||
2209 (timestamp == NULL))
2212 time_data = action_data_get(data, action, RTE_TABLE_ACTION_TIME);
2215 *timestamp = time_data->time;
2220 static __rte_always_inline uint64_t
2221 pkt_work(struct rte_mbuf *mbuf,
2222 struct rte_pipeline_table_entry *table_entry,
2224 struct rte_table_action *action,
2225 struct ap_config *cfg)
2227 uint64_t drop_mask = 0;
2229 uint32_t ip_offset = action->cfg.common.ip_offset;
2230 void *ip = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ip_offset);
2233 uint16_t total_length;
2235 if (cfg->common.ip_version) {
2236 struct ipv4_hdr *hdr = ip;
2238 dscp = hdr->type_of_service >> 2;
2239 total_length = rte_ntohs(hdr->total_length);
2241 struct ipv6_hdr *hdr = ip;
2243 dscp = (rte_ntohl(hdr->vtc_flow) & 0x0F600000) >> 18;
2245 rte_ntohs(hdr->payload_len) + sizeof(struct ipv6_hdr);
2248 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2250 action_data_get(table_entry, action, RTE_TABLE_ACTION_LB);
2256 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2258 action_data_get(table_entry, action, RTE_TABLE_ACTION_MTR);
2260 drop_mask |= pkt_work_mtr(mbuf,
2262 &action->dscp_table,
2269 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2271 action_data_get(table_entry, action, RTE_TABLE_ACTION_TM);
2275 &action->dscp_table,
2279 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2281 action_data_get(table_entry, action, RTE_TABLE_ACTION_ENCAP);
2283 pkt_work_encap(mbuf,
2291 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2293 action_data_get(table_entry, action, RTE_TABLE_ACTION_NAT);
2295 if (cfg->common.ip_version)
2296 pkt_ipv4_work_nat(ip, data, &cfg->nat);
2298 pkt_ipv6_work_nat(ip, data, &cfg->nat);
2301 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2303 action_data_get(table_entry, action, RTE_TABLE_ACTION_TTL);
2305 if (cfg->common.ip_version)
2306 drop_mask |= pkt_ipv4_work_ttl(ip, data);
2308 drop_mask |= pkt_ipv6_work_ttl(ip, data);
2311 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2313 action_data_get(table_entry, action, RTE_TABLE_ACTION_STATS);
2315 pkt_work_stats(data, total_length);
2318 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2320 action_data_get(table_entry, action, RTE_TABLE_ACTION_TIME);
2322 pkt_work_time(data, time);
2328 static __rte_always_inline uint64_t
2329 pkt4_work(struct rte_mbuf **mbufs,
2330 struct rte_pipeline_table_entry **table_entries,
2332 struct rte_table_action *action,
2333 struct ap_config *cfg)
2335 uint64_t drop_mask0 = 0;
2336 uint64_t drop_mask1 = 0;
2337 uint64_t drop_mask2 = 0;
2338 uint64_t drop_mask3 = 0;
2340 struct rte_mbuf *mbuf0 = mbufs[0];
2341 struct rte_mbuf *mbuf1 = mbufs[1];
2342 struct rte_mbuf *mbuf2 = mbufs[2];
2343 struct rte_mbuf *mbuf3 = mbufs[3];
2345 struct rte_pipeline_table_entry *table_entry0 = table_entries[0];
2346 struct rte_pipeline_table_entry *table_entry1 = table_entries[1];
2347 struct rte_pipeline_table_entry *table_entry2 = table_entries[2];
2348 struct rte_pipeline_table_entry *table_entry3 = table_entries[3];
2350 uint32_t ip_offset = action->cfg.common.ip_offset;
2351 void *ip0 = RTE_MBUF_METADATA_UINT32_PTR(mbuf0, ip_offset);
2352 void *ip1 = RTE_MBUF_METADATA_UINT32_PTR(mbuf1, ip_offset);
2353 void *ip2 = RTE_MBUF_METADATA_UINT32_PTR(mbuf2, ip_offset);
2354 void *ip3 = RTE_MBUF_METADATA_UINT32_PTR(mbuf3, ip_offset);
2356 uint32_t dscp0, dscp1, dscp2, dscp3;
2357 uint16_t total_length0, total_length1, total_length2, total_length3;
2359 if (cfg->common.ip_version) {
2360 struct ipv4_hdr *hdr0 = ip0;
2361 struct ipv4_hdr *hdr1 = ip1;
2362 struct ipv4_hdr *hdr2 = ip2;
2363 struct ipv4_hdr *hdr3 = ip3;
2365 dscp0 = hdr0->type_of_service >> 2;
2366 dscp1 = hdr1->type_of_service >> 2;
2367 dscp2 = hdr2->type_of_service >> 2;
2368 dscp3 = hdr3->type_of_service >> 2;
2370 total_length0 = rte_ntohs(hdr0->total_length);
2371 total_length1 = rte_ntohs(hdr1->total_length);
2372 total_length2 = rte_ntohs(hdr2->total_length);
2373 total_length3 = rte_ntohs(hdr3->total_length);
2375 struct ipv6_hdr *hdr0 = ip0;
2376 struct ipv6_hdr *hdr1 = ip1;
2377 struct ipv6_hdr *hdr2 = ip2;
2378 struct ipv6_hdr *hdr3 = ip3;
2380 dscp0 = (rte_ntohl(hdr0->vtc_flow) & 0x0F600000) >> 18;
2381 dscp1 = (rte_ntohl(hdr1->vtc_flow) & 0x0F600000) >> 18;
2382 dscp2 = (rte_ntohl(hdr2->vtc_flow) & 0x0F600000) >> 18;
2383 dscp3 = (rte_ntohl(hdr3->vtc_flow) & 0x0F600000) >> 18;
2386 rte_ntohs(hdr0->payload_len) + sizeof(struct ipv6_hdr);
2388 rte_ntohs(hdr1->payload_len) + sizeof(struct ipv6_hdr);
2390 rte_ntohs(hdr2->payload_len) + sizeof(struct ipv6_hdr);
2392 rte_ntohs(hdr3->payload_len) + sizeof(struct ipv6_hdr);
2395 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2397 action_data_get(table_entry0, action, RTE_TABLE_ACTION_LB);
2399 action_data_get(table_entry1, action, RTE_TABLE_ACTION_LB);
2401 action_data_get(table_entry2, action, RTE_TABLE_ACTION_LB);
2403 action_data_get(table_entry3, action, RTE_TABLE_ACTION_LB);
2422 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2424 action_data_get(table_entry0, action, RTE_TABLE_ACTION_MTR);
2426 action_data_get(table_entry1, action, RTE_TABLE_ACTION_MTR);
2428 action_data_get(table_entry2, action, RTE_TABLE_ACTION_MTR);
2430 action_data_get(table_entry3, action, RTE_TABLE_ACTION_MTR);
2432 drop_mask0 |= pkt_work_mtr(mbuf0,
2434 &action->dscp_table,
2440 drop_mask1 |= pkt_work_mtr(mbuf1,
2442 &action->dscp_table,
2448 drop_mask2 |= pkt_work_mtr(mbuf2,
2450 &action->dscp_table,
2456 drop_mask3 |= pkt_work_mtr(mbuf3,
2458 &action->dscp_table,
2465 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2467 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TM);
2469 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TM);
2471 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TM);
2473 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TM);
2477 &action->dscp_table,
2482 &action->dscp_table,
2487 &action->dscp_table,
2492 &action->dscp_table,
2496 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2498 action_data_get(table_entry0, action, RTE_TABLE_ACTION_ENCAP);
2500 action_data_get(table_entry1, action, RTE_TABLE_ACTION_ENCAP);
2502 action_data_get(table_entry2, action, RTE_TABLE_ACTION_ENCAP);
2504 action_data_get(table_entry3, action, RTE_TABLE_ACTION_ENCAP);
2506 pkt_work_encap(mbuf0,
2513 pkt_work_encap(mbuf1,
2520 pkt_work_encap(mbuf2,
2527 pkt_work_encap(mbuf3,
2535 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2537 action_data_get(table_entry0, action, RTE_TABLE_ACTION_NAT);
2539 action_data_get(table_entry1, action, RTE_TABLE_ACTION_NAT);
2541 action_data_get(table_entry2, action, RTE_TABLE_ACTION_NAT);
2543 action_data_get(table_entry3, action, RTE_TABLE_ACTION_NAT);
2545 if (cfg->common.ip_version) {
2546 pkt_ipv4_work_nat(ip0, data0, &cfg->nat);
2547 pkt_ipv4_work_nat(ip1, data1, &cfg->nat);
2548 pkt_ipv4_work_nat(ip2, data2, &cfg->nat);
2549 pkt_ipv4_work_nat(ip3, data3, &cfg->nat);
2551 pkt_ipv6_work_nat(ip0, data0, &cfg->nat);
2552 pkt_ipv6_work_nat(ip1, data1, &cfg->nat);
2553 pkt_ipv6_work_nat(ip2, data2, &cfg->nat);
2554 pkt_ipv6_work_nat(ip3, data3, &cfg->nat);
2558 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2560 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TTL);
2562 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TTL);
2564 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TTL);
2566 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TTL);
2568 if (cfg->common.ip_version) {
2569 drop_mask0 |= pkt_ipv4_work_ttl(ip0, data0);
2570 drop_mask1 |= pkt_ipv4_work_ttl(ip1, data1);
2571 drop_mask2 |= pkt_ipv4_work_ttl(ip2, data2);
2572 drop_mask3 |= pkt_ipv4_work_ttl(ip3, data3);
2574 drop_mask0 |= pkt_ipv6_work_ttl(ip0, data0);
2575 drop_mask1 |= pkt_ipv6_work_ttl(ip1, data1);
2576 drop_mask2 |= pkt_ipv6_work_ttl(ip2, data2);
2577 drop_mask3 |= pkt_ipv6_work_ttl(ip3, data3);
2581 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2583 action_data_get(table_entry0, action, RTE_TABLE_ACTION_STATS);
2585 action_data_get(table_entry1, action, RTE_TABLE_ACTION_STATS);
2587 action_data_get(table_entry2, action, RTE_TABLE_ACTION_STATS);
2589 action_data_get(table_entry3, action, RTE_TABLE_ACTION_STATS);
2591 pkt_work_stats(data0, total_length0);
2592 pkt_work_stats(data1, total_length1);
2593 pkt_work_stats(data2, total_length2);
2594 pkt_work_stats(data3, total_length3);
2597 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2599 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TIME);
2601 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TIME);
2603 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TIME);
2605 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TIME);
2607 pkt_work_time(data0, time);
2608 pkt_work_time(data1, time);
2609 pkt_work_time(data2, time);
2610 pkt_work_time(data3, time);
2619 static __rte_always_inline int
2620 ah(struct rte_pipeline *p,
2621 struct rte_mbuf **pkts,
2623 struct rte_pipeline_table_entry **entries,
2624 struct rte_table_action *action,
2625 struct ap_config *cfg)
2627 uint64_t pkts_drop_mask = 0;
2630 if (cfg->action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
2631 (1LLU << RTE_TABLE_ACTION_TIME)))
2634 if ((pkts_mask & (pkts_mask + 1)) == 0) {
2635 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
2638 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
2641 drop_mask = pkt4_work(&pkts[i],
2647 pkts_drop_mask |= drop_mask << i;
2650 for ( ; i < n_pkts; i++) {
2653 drop_mask = pkt_work(pkts[i],
2659 pkts_drop_mask |= drop_mask << i;
2662 for ( ; pkts_mask; ) {
2663 uint32_t pos = __builtin_ctzll(pkts_mask);
2664 uint64_t pkt_mask = 1LLU << pos;
2667 drop_mask = pkt_work(pkts[pos],
2673 pkts_mask &= ~pkt_mask;
2674 pkts_drop_mask |= drop_mask << pos;
2677 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
2683 ah_default(struct rte_pipeline *p,
2684 struct rte_mbuf **pkts,
2686 struct rte_pipeline_table_entry **entries,
2689 struct rte_table_action *action = arg;
2699 static rte_pipeline_table_action_handler_hit
2700 ah_selector(struct rte_table_action *action)
2702 if (action->cfg.action_mask == (1LLU << RTE_TABLE_ACTION_FWD))
2709 rte_table_action_table_params_get(struct rte_table_action *action,
2710 struct rte_pipeline_table_params *params)
2712 rte_pipeline_table_action_handler_hit f_action_hit;
2713 uint32_t total_size;
2715 /* Check input arguments */
2716 if ((action == NULL) ||
2720 f_action_hit = ah_selector(action);
2721 total_size = rte_align32pow2(action->data.total_size);
2723 /* Fill in params */
2724 params->f_action_hit = f_action_hit;
2725 params->f_action_miss = NULL;
2726 params->arg_ah = (f_action_hit) ? action : NULL;
2727 params->action_data_size = total_size -
2728 sizeof(struct rte_pipeline_table_entry);
2734 rte_table_action_free(struct rte_table_action *action)