1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
8 #include <rte_common.h>
9 #include <rte_byteorder.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memcpy.h>
13 #include <rte_ether.h>
19 #include "rte_table_action.h"
21 #define rte_htons rte_cpu_to_be_16
22 #define rte_htonl rte_cpu_to_be_32
24 #define rte_ntohs rte_be_to_cpu_16
25 #define rte_ntohl rte_be_to_cpu_32
28 * RTE_TABLE_ACTION_FWD
30 #define fwd_data rte_pipeline_table_entry
33 fwd_apply(struct fwd_data *data,
34 struct rte_table_action_fwd_params *p)
36 data->action = p->action;
38 if (p->action == RTE_PIPELINE_ACTION_PORT)
39 data->port_id = p->id;
41 if (p->action == RTE_PIPELINE_ACTION_TABLE)
42 data->table_id = p->id;
51 lb_cfg_check(struct rte_table_action_lb_config *cfg)
54 (cfg->key_size < RTE_TABLE_ACTION_LB_KEY_SIZE_MIN) ||
55 (cfg->key_size > RTE_TABLE_ACTION_LB_KEY_SIZE_MAX) ||
56 (!rte_is_power_of_2(cfg->key_size)) ||
57 (cfg->f_hash == NULL))
64 uint32_t out[RTE_TABLE_ACTION_LB_TABLE_SIZE];
65 } __attribute__((__packed__));
68 lb_apply(struct lb_data *data,
69 struct rte_table_action_lb_params *p)
71 memcpy(data->out, p->out, sizeof(data->out));
76 static __rte_always_inline void
77 pkt_work_lb(struct rte_mbuf *mbuf,
79 struct rte_table_action_lb_config *cfg)
81 uint8_t *pkt_key = RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->key_offset);
82 uint32_t *out = RTE_MBUF_METADATA_UINT32_PTR(mbuf, cfg->out_offset);
86 digest = cfg->f_hash(pkt_key,
90 pos = digest & (RTE_TABLE_ACTION_LB_TABLE_SIZE - 1);
91 out_val = data->out[pos];
97 * RTE_TABLE_ACTION_MTR
100 mtr_cfg_check(struct rte_table_action_mtr_config *mtr)
102 if ((mtr->alg == RTE_TABLE_ACTION_METER_SRTCM) ||
103 ((mtr->n_tc != 1) && (mtr->n_tc != 4)) ||
104 (mtr->n_bytes_enabled != 0))
109 #define MBUF_SCHED_QUEUE_TC_COLOR(queue, tc, color) \
110 ((uint16_t)((((uint64_t)(queue)) & 0x3) | \
111 ((((uint64_t)(tc)) & 0x3) << 2) | \
112 ((((uint64_t)(color)) & 0x3) << 4)))
114 #define MBUF_SCHED_COLOR(sched, color) \
115 (((sched) & (~0x30LLU)) | ((color) << 4))
117 struct mtr_trtcm_data {
118 struct rte_meter_trtcm trtcm;
119 uint64_t stats[e_RTE_METER_COLORS];
120 } __attribute__((__packed__));
122 #define MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data) \
123 (((data)->stats[e_RTE_METER_GREEN] & 0xF8LLU) >> 3)
126 mtr_trtcm_data_meter_profile_id_set(struct mtr_trtcm_data *data,
129 data->stats[e_RTE_METER_GREEN] &= ~0xF8LLU;
130 data->stats[e_RTE_METER_GREEN] |= (profile_id % 32) << 3;
133 #define MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color)\
134 (((data)->stats[(color)] & 4LLU) >> 2)
136 #define MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color)\
137 ((enum rte_meter_color)((data)->stats[(color)] & 3LLU))
140 mtr_trtcm_data_policer_action_set(struct mtr_trtcm_data *data,
141 enum rte_meter_color color,
142 enum rte_table_action_policer action)
144 if (action == RTE_TABLE_ACTION_POLICER_DROP) {
145 data->stats[color] |= 4LLU;
147 data->stats[color] &= ~7LLU;
148 data->stats[color] |= color & 3LLU;
153 mtr_trtcm_data_stats_get(struct mtr_trtcm_data *data,
154 enum rte_meter_color color)
156 return data->stats[color] >> 8;
160 mtr_trtcm_data_stats_reset(struct mtr_trtcm_data *data,
161 enum rte_meter_color color)
163 data->stats[color] &= 0xFFLU;
166 #define MTR_TRTCM_DATA_STATS_INC(data, color) \
167 ((data)->stats[(color)] += (1LLU << 8))
170 mtr_data_size(struct rte_table_action_mtr_config *mtr)
172 return mtr->n_tc * sizeof(struct mtr_trtcm_data);
175 struct dscp_table_entry_data {
176 enum rte_meter_color color;
178 uint16_t queue_tc_color;
181 struct dscp_table_data {
182 struct dscp_table_entry_data entry[64];
185 struct meter_profile_data {
186 struct rte_meter_trtcm_profile profile;
191 static struct meter_profile_data *
192 meter_profile_data_find(struct meter_profile_data *mp,
198 for (i = 0; i < mp_size; i++) {
199 struct meter_profile_data *mp_data = &mp[i];
201 if (mp_data->valid && (mp_data->profile_id == profile_id))
208 static struct meter_profile_data *
209 meter_profile_data_find_unused(struct meter_profile_data *mp,
214 for (i = 0; i < mp_size; i++) {
215 struct meter_profile_data *mp_data = &mp[i];
225 mtr_apply_check(struct rte_table_action_mtr_params *p,
226 struct rte_table_action_mtr_config *cfg,
227 struct meter_profile_data *mp,
232 if (p->tc_mask > RTE_LEN2MASK(cfg->n_tc, uint32_t))
235 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
236 struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
237 struct meter_profile_data *mp_data;
239 if ((p->tc_mask & (1LLU << i)) == 0)
242 mp_data = meter_profile_data_find(mp,
244 p_tc->meter_profile_id);
253 mtr_apply(struct mtr_trtcm_data *data,
254 struct rte_table_action_mtr_params *p,
255 struct rte_table_action_mtr_config *cfg,
256 struct meter_profile_data *mp,
262 /* Check input arguments */
263 status = mtr_apply_check(p, cfg, mp, mp_size);
268 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
269 struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
270 struct mtr_trtcm_data *data_tc = &data[i];
271 struct meter_profile_data *mp_data;
273 if ((p->tc_mask & (1LLU << i)) == 0)
277 mp_data = meter_profile_data_find(mp,
279 p_tc->meter_profile_id);
283 memset(data_tc, 0, sizeof(*data_tc));
286 status = rte_meter_trtcm_config(&data_tc->trtcm,
292 mtr_trtcm_data_meter_profile_id_set(data_tc,
295 /* Policer actions */
296 mtr_trtcm_data_policer_action_set(data_tc,
298 p_tc->policer[e_RTE_METER_GREEN]);
300 mtr_trtcm_data_policer_action_set(data_tc,
302 p_tc->policer[e_RTE_METER_YELLOW]);
304 mtr_trtcm_data_policer_action_set(data_tc,
306 p_tc->policer[e_RTE_METER_RED]);
312 static __rte_always_inline uint64_t
313 pkt_work_mtr(struct rte_mbuf *mbuf,
314 struct mtr_trtcm_data *data,
315 struct dscp_table_data *dscp_table,
316 struct meter_profile_data *mp,
319 uint16_t total_length)
321 uint64_t drop_mask, sched;
322 uint64_t *sched_ptr = (uint64_t *) &mbuf->hash.sched;
323 struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
324 enum rte_meter_color color_in, color_meter, color_policer;
328 color_in = dscp_entry->color;
330 mp_id = MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data);
334 color_meter = rte_meter_trtcm_color_aware_check(
342 MTR_TRTCM_DATA_STATS_INC(data, color_meter);
345 drop_mask = MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color_meter);
347 MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color_meter);
348 *sched_ptr = MBUF_SCHED_COLOR(sched, color_policer);
354 * RTE_TABLE_ACTION_TM
357 tm_cfg_check(struct rte_table_action_tm_config *tm)
359 if ((tm->n_subports_per_port == 0) ||
360 (rte_is_power_of_2(tm->n_subports_per_port) == 0) ||
361 (tm->n_subports_per_port > UINT16_MAX) ||
362 (tm->n_pipes_per_subport == 0) ||
363 (rte_is_power_of_2(tm->n_pipes_per_subport) == 0))
370 uint16_t queue_tc_color;
373 } __attribute__((__packed__));
376 tm_apply_check(struct rte_table_action_tm_params *p,
377 struct rte_table_action_tm_config *cfg)
379 if ((p->subport_id >= cfg->n_subports_per_port) ||
380 (p->pipe_id >= cfg->n_pipes_per_subport))
387 tm_apply(struct tm_data *data,
388 struct rte_table_action_tm_params *p,
389 struct rte_table_action_tm_config *cfg)
393 /* Check input arguments */
394 status = tm_apply_check(p, cfg);
399 data->queue_tc_color = 0;
400 data->subport = (uint16_t) p->subport_id;
401 data->pipe = p->pipe_id;
406 static __rte_always_inline void
407 pkt_work_tm(struct rte_mbuf *mbuf,
408 struct tm_data *data,
409 struct dscp_table_data *dscp_table,
412 struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
413 struct tm_data *sched_ptr = (struct tm_data *) &mbuf->hash.sched;
414 struct tm_data sched;
417 sched.queue_tc_color = dscp_entry->queue_tc_color;
422 * RTE_TABLE_ACTION_ENCAP
425 encap_valid(enum rte_table_action_encap_type encap)
428 case RTE_TABLE_ACTION_ENCAP_ETHER:
429 case RTE_TABLE_ACTION_ENCAP_VLAN:
430 case RTE_TABLE_ACTION_ENCAP_QINQ:
431 case RTE_TABLE_ACTION_ENCAP_MPLS:
432 case RTE_TABLE_ACTION_ENCAP_PPPOE:
440 encap_cfg_check(struct rte_table_action_encap_config *encap)
442 if ((encap->encap_mask == 0) ||
443 (__builtin_popcountll(encap->encap_mask) != 1))
449 struct encap_ether_data {
450 struct ether_hdr ether;
451 } __attribute__((__packed__));
453 #define VLAN(pcp, dei, vid) \
454 ((uint16_t)((((uint64_t)(pcp)) & 0x7LLU) << 13) | \
455 ((((uint64_t)(dei)) & 0x1LLU) << 12) | \
456 (((uint64_t)(vid)) & 0xFFFLLU)) \
458 struct encap_vlan_data {
459 struct ether_hdr ether;
460 struct vlan_hdr vlan;
461 } __attribute__((__packed__));
463 struct encap_qinq_data {
464 struct ether_hdr ether;
465 struct vlan_hdr svlan;
466 struct vlan_hdr cvlan;
467 } __attribute__((__packed__));
469 #define ETHER_TYPE_MPLS_UNICAST 0x8847
471 #define ETHER_TYPE_MPLS_MULTICAST 0x8848
473 #define MPLS(label, tc, s, ttl) \
474 ((uint32_t)(((((uint64_t)(label)) & 0xFFFFFLLU) << 12) |\
475 ((((uint64_t)(tc)) & 0x7LLU) << 9) | \
476 ((((uint64_t)(s)) & 0x1LLU) << 8) | \
477 (((uint64_t)(ttl)) & 0xFFLLU)))
479 struct encap_mpls_data {
480 struct ether_hdr ether;
481 uint32_t mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX];
483 } __attribute__((__packed__));
485 #define ETHER_TYPE_PPPOE_SESSION 0x8864
487 #define PPP_PROTOCOL_IP 0x0021
489 struct pppoe_ppp_hdr {
490 uint16_t ver_type_code;
494 } __attribute__((__packed__));
496 struct encap_pppoe_data {
497 struct ether_hdr ether;
498 struct pppoe_ppp_hdr pppoe_ppp;
499 } __attribute__((__packed__));
502 encap_data_size(struct rte_table_action_encap_config *encap)
504 switch (encap->encap_mask) {
505 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
506 return sizeof(struct encap_ether_data);
508 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
509 return sizeof(struct encap_vlan_data);
511 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
512 return sizeof(struct encap_qinq_data);
514 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
515 return sizeof(struct encap_mpls_data);
517 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
518 return sizeof(struct encap_pppoe_data);
526 encap_apply_check(struct rte_table_action_encap_params *p,
527 struct rte_table_action_encap_config *cfg)
529 if ((encap_valid(p->type) == 0) ||
530 ((cfg->encap_mask & (1LLU << p->type)) == 0))
534 case RTE_TABLE_ACTION_ENCAP_ETHER:
537 case RTE_TABLE_ACTION_ENCAP_VLAN:
540 case RTE_TABLE_ACTION_ENCAP_QINQ:
543 case RTE_TABLE_ACTION_ENCAP_MPLS:
544 if ((p->mpls.mpls_count == 0) ||
545 (p->mpls.mpls_count > RTE_TABLE_ACTION_MPLS_LABELS_MAX))
550 case RTE_TABLE_ACTION_ENCAP_PPPOE:
559 encap_ether_apply(void *data,
560 struct rte_table_action_encap_params *p,
561 struct rte_table_action_common_config *common_cfg)
563 struct encap_ether_data *d = data;
564 uint16_t ethertype = (common_cfg->ip_version) ?
569 ether_addr_copy(&p->ether.ether.da, &d->ether.d_addr);
570 ether_addr_copy(&p->ether.ether.sa, &d->ether.s_addr);
571 d->ether.ether_type = rte_htons(ethertype);
577 encap_vlan_apply(void *data,
578 struct rte_table_action_encap_params *p,
579 struct rte_table_action_common_config *common_cfg)
581 struct encap_vlan_data *d = data;
582 uint16_t ethertype = (common_cfg->ip_version) ?
587 ether_addr_copy(&p->vlan.ether.da, &d->ether.d_addr);
588 ether_addr_copy(&p->vlan.ether.sa, &d->ether.s_addr);
589 d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
592 d->vlan.vlan_tci = rte_htons(VLAN(p->vlan.vlan.pcp,
595 d->vlan.eth_proto = rte_htons(ethertype);
601 encap_qinq_apply(void *data,
602 struct rte_table_action_encap_params *p,
603 struct rte_table_action_common_config *common_cfg)
605 struct encap_qinq_data *d = data;
606 uint16_t ethertype = (common_cfg->ip_version) ?
611 ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
612 ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
613 d->ether.ether_type = rte_htons(ETHER_TYPE_QINQ);
616 d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
619 d->svlan.eth_proto = rte_htons(ETHER_TYPE_VLAN);
622 d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
625 d->cvlan.eth_proto = rte_htons(ethertype);
631 encap_mpls_apply(void *data,
632 struct rte_table_action_encap_params *p)
634 struct encap_mpls_data *d = data;
635 uint16_t ethertype = (p->mpls.unicast) ?
636 ETHER_TYPE_MPLS_UNICAST :
637 ETHER_TYPE_MPLS_MULTICAST;
641 ether_addr_copy(&p->mpls.ether.da, &d->ether.d_addr);
642 ether_addr_copy(&p->mpls.ether.sa, &d->ether.s_addr);
643 d->ether.ether_type = rte_htons(ethertype);
646 for (i = 0; i < p->mpls.mpls_count - 1; i++)
647 d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
650 p->mpls.mpls[i].ttl));
652 d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
655 p->mpls.mpls[i].ttl));
657 d->mpls_count = p->mpls.mpls_count;
662 encap_pppoe_apply(void *data,
663 struct rte_table_action_encap_params *p)
665 struct encap_pppoe_data *d = data;
668 ether_addr_copy(&p->pppoe.ether.da, &d->ether.d_addr);
669 ether_addr_copy(&p->pppoe.ether.sa, &d->ether.s_addr);
670 d->ether.ether_type = rte_htons(ETHER_TYPE_PPPOE_SESSION);
673 d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
674 d->pppoe_ppp.session_id = rte_htons(p->pppoe.pppoe.session_id);
675 d->pppoe_ppp.length = 0; /* not pre-computed */
676 d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
682 encap_apply(void *data,
683 struct rte_table_action_encap_params *p,
684 struct rte_table_action_encap_config *cfg,
685 struct rte_table_action_common_config *common_cfg)
689 /* Check input arguments */
690 status = encap_apply_check(p, cfg);
695 case RTE_TABLE_ACTION_ENCAP_ETHER:
696 return encap_ether_apply(data, p, common_cfg);
698 case RTE_TABLE_ACTION_ENCAP_VLAN:
699 return encap_vlan_apply(data, p, common_cfg);
701 case RTE_TABLE_ACTION_ENCAP_QINQ:
702 return encap_qinq_apply(data, p, common_cfg);
704 case RTE_TABLE_ACTION_ENCAP_MPLS:
705 return encap_mpls_apply(data, p);
707 case RTE_TABLE_ACTION_ENCAP_PPPOE:
708 return encap_pppoe_apply(data, p);
715 static __rte_always_inline void *
716 encap(void *dst, const void *src, size_t n)
718 dst = ((uint8_t *) dst) - n;
719 return rte_memcpy(dst, src, n);
722 static __rte_always_inline void
723 pkt_work_encap(struct rte_mbuf *mbuf,
725 struct rte_table_action_encap_config *cfg,
727 uint16_t total_length,
730 switch (cfg->encap_mask) {
731 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
732 encap(ip, data, sizeof(struct encap_ether_data));
733 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
734 sizeof(struct encap_ether_data));
735 mbuf->pkt_len = mbuf->data_len = total_length +
736 sizeof(struct encap_ether_data);
739 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
740 encap(ip, data, sizeof(struct encap_vlan_data));
741 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
742 sizeof(struct encap_vlan_data));
743 mbuf->pkt_len = mbuf->data_len = total_length +
744 sizeof(struct encap_vlan_data);
747 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
748 encap(ip, data, sizeof(struct encap_qinq_data));
749 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
750 sizeof(struct encap_qinq_data));
751 mbuf->pkt_len = mbuf->data_len = total_length +
752 sizeof(struct encap_qinq_data);
755 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
757 struct encap_mpls_data *mpls = data;
758 size_t size = sizeof(struct ether_hdr) +
759 mpls->mpls_count * 4;
761 encap(ip, data, size);
762 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + size);
763 mbuf->pkt_len = mbuf->data_len = total_length + size;
767 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
769 struct encap_pppoe_data *pppoe =
770 encap(ip, data, sizeof(struct encap_pppoe_data));
771 pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
772 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
773 sizeof(struct encap_pppoe_data));
774 mbuf->pkt_len = mbuf->data_len = total_length +
775 sizeof(struct encap_pppoe_data);
785 * RTE_TABLE_ACTION_NAT
788 nat_cfg_check(struct rte_table_action_nat_config *nat)
790 if ((nat->proto != 0x06) &&
791 (nat->proto != 0x11))
797 struct nat_ipv4_data {
800 } __attribute__((__packed__));
802 struct nat_ipv6_data {
805 } __attribute__((__packed__));
808 nat_data_size(struct rte_table_action_nat_config *nat __rte_unused,
809 struct rte_table_action_common_config *common)
811 int ip_version = common->ip_version;
813 return (ip_version) ?
814 sizeof(struct nat_ipv4_data) :
815 sizeof(struct nat_ipv6_data);
819 nat_apply_check(struct rte_table_action_nat_params *p,
820 struct rte_table_action_common_config *cfg)
822 if ((p->ip_version && (cfg->ip_version == 0)) ||
823 ((p->ip_version == 0) && cfg->ip_version))
830 nat_apply(void *data,
831 struct rte_table_action_nat_params *p,
832 struct rte_table_action_common_config *cfg)
836 /* Check input arguments */
837 status = nat_apply_check(p, cfg);
843 struct nat_ipv4_data *d = data;
845 d->addr = rte_htonl(p->addr.ipv4);
846 d->port = rte_htons(p->port);
848 struct nat_ipv6_data *d = data;
850 memcpy(d->addr, p->addr.ipv6, sizeof(d->addr));
851 d->port = rte_htons(p->port);
857 static __rte_always_inline uint16_t
858 nat_ipv4_checksum_update(uint16_t cksum0,
865 cksum1 = ~cksum1 & 0xFFFF;
867 /* Subtract ip0 (one's complement logic) */
868 cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF);
869 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
870 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
872 /* Add ip1 (one's complement logic) */
873 cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF);
874 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
875 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
877 return (uint16_t)(~cksum1);
880 static __rte_always_inline uint16_t
881 nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0,
890 cksum1 = ~cksum1 & 0xFFFF;
892 /* Subtract ip0 and port 0 (one's complement logic) */
893 cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF) + port0;
894 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
895 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
897 /* Add ip1 and port1 (one's complement logic) */
898 cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF) + port1;
899 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
900 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
902 return (uint16_t)(~cksum1);
905 static __rte_always_inline uint16_t
906 nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0,
915 cksum1 = ~cksum1 & 0xFFFF;
917 /* Subtract ip0 and port 0 (one's complement logic) */
918 cksum1 -= ip0[0] + ip0[1] + ip0[2] + ip0[3] +
919 ip0[4] + ip0[5] + ip0[6] + ip0[7] + port0;
920 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
921 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
923 /* Add ip1 and port1 (one's complement logic) */
924 cksum1 += ip1[0] + ip1[1] + ip1[2] + ip1[3] +
925 ip1[4] + ip1[5] + ip1[6] + ip1[7] + port1;
926 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
927 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
929 return (uint16_t)(~cksum1);
932 static __rte_always_inline void
933 pkt_ipv4_work_nat(struct ipv4_hdr *ip,
934 struct nat_ipv4_data *data,
935 struct rte_table_action_nat_config *cfg)
937 if (cfg->source_nat) {
938 if (cfg->proto == 0x6) {
939 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
940 uint16_t ip_cksum, tcp_cksum;
942 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
946 tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
952 ip->src_addr = data->addr;
953 ip->hdr_checksum = ip_cksum;
954 tcp->src_port = data->port;
955 tcp->cksum = tcp_cksum;
957 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
958 uint16_t ip_cksum, udp_cksum;
960 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
964 udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
970 ip->src_addr = data->addr;
971 ip->hdr_checksum = ip_cksum;
972 udp->src_port = data->port;
973 if (udp->dgram_cksum)
974 udp->dgram_cksum = udp_cksum;
977 if (cfg->proto == 0x6) {
978 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
979 uint16_t ip_cksum, tcp_cksum;
981 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
985 tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
991 ip->dst_addr = data->addr;
992 ip->hdr_checksum = ip_cksum;
993 tcp->dst_port = data->port;
994 tcp->cksum = tcp_cksum;
996 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
997 uint16_t ip_cksum, udp_cksum;
999 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1003 udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1009 ip->dst_addr = data->addr;
1010 ip->hdr_checksum = ip_cksum;
1011 udp->dst_port = data->port;
1012 if (udp->dgram_cksum)
1013 udp->dgram_cksum = udp_cksum;
1018 static __rte_always_inline void
1019 pkt_ipv6_work_nat(struct ipv6_hdr *ip,
1020 struct nat_ipv6_data *data,
1021 struct rte_table_action_nat_config *cfg)
1023 if (cfg->source_nat) {
1024 if (cfg->proto == 0x6) {
1025 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1028 tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1029 (uint16_t *)ip->src_addr,
1030 (uint16_t *)data->addr,
1034 rte_memcpy(ip->src_addr, data->addr, 16);
1035 tcp->src_port = data->port;
1036 tcp->cksum = tcp_cksum;
1038 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1041 udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1042 (uint16_t *)ip->src_addr,
1043 (uint16_t *)data->addr,
1047 rte_memcpy(ip->src_addr, data->addr, 16);
1048 udp->src_port = data->port;
1049 udp->dgram_cksum = udp_cksum;
1052 if (cfg->proto == 0x6) {
1053 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1056 tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1057 (uint16_t *)ip->dst_addr,
1058 (uint16_t *)data->addr,
1062 rte_memcpy(ip->dst_addr, data->addr, 16);
1063 tcp->dst_port = data->port;
1064 tcp->cksum = tcp_cksum;
1066 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1069 udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1070 (uint16_t *)ip->dst_addr,
1071 (uint16_t *)data->addr,
1075 rte_memcpy(ip->dst_addr, data->addr, 16);
1076 udp->dst_port = data->port;
1077 udp->dgram_cksum = udp_cksum;
1083 * RTE_TABLE_ACTION_TTL
1086 ttl_cfg_check(struct rte_table_action_ttl_config *ttl)
1096 } __attribute__((__packed__));
1098 #define TTL_INIT(data, decrement) \
1099 ((data)->n_packets = (decrement) ? 1 : 0)
1101 #define TTL_DEC_GET(data) \
1102 ((uint8_t)((data)->n_packets & 1))
1104 #define TTL_STATS_RESET(data) \
1105 ((data)->n_packets = ((data)->n_packets & 1))
1107 #define TTL_STATS_READ(data) \
1108 ((data)->n_packets >> 1)
1110 #define TTL_STATS_ADD(data, value) \
1111 ((data)->n_packets = \
1112 (((((data)->n_packets >> 1) + (value)) << 1) | \
1113 ((data)->n_packets & 1)))
1116 ttl_apply(void *data,
1117 struct rte_table_action_ttl_params *p)
1119 struct ttl_data *d = data;
1121 TTL_INIT(d, p->decrement);
1126 static __rte_always_inline uint64_t
1127 pkt_ipv4_work_ttl(struct ipv4_hdr *ip,
1128 struct ttl_data *data)
1131 uint16_t cksum = ip->hdr_checksum;
1132 uint8_t ttl = ip->time_to_live;
1133 uint8_t ttl_diff = TTL_DEC_GET(data);
1138 ip->hdr_checksum = cksum;
1139 ip->time_to_live = ttl;
1141 drop = (ttl == 0) ? 1 : 0;
1142 TTL_STATS_ADD(data, drop);
1147 static __rte_always_inline uint64_t
1148 pkt_ipv6_work_ttl(struct ipv6_hdr *ip,
1149 struct ttl_data *data)
1152 uint8_t ttl = ip->hop_limits;
1153 uint8_t ttl_diff = TTL_DEC_GET(data);
1157 ip->hop_limits = ttl;
1159 drop = (ttl == 0) ? 1 : 0;
1160 TTL_STATS_ADD(data, drop);
1166 * RTE_TABLE_ACTION_STATS
1169 stats_cfg_check(struct rte_table_action_stats_config *stats)
1171 if ((stats->n_packets_enabled == 0) && (stats->n_bytes_enabled == 0))
1180 } __attribute__((__packed__));
1183 stats_apply(struct stats_data *data,
1184 struct rte_table_action_stats_params *p)
1186 data->n_packets = p->n_packets;
1187 data->n_bytes = p->n_bytes;
1192 static __rte_always_inline void
1193 pkt_work_stats(struct stats_data *data,
1194 uint16_t total_length)
1197 data->n_bytes += total_length;
1201 * RTE_TABLE_ACTION_TIME
1205 } __attribute__((__packed__));
1208 time_apply(struct time_data *data,
1209 struct rte_table_action_time_params *p)
1211 data->time = p->time;
1215 static __rte_always_inline void
1216 pkt_work_time(struct time_data *data,
1226 action_valid(enum rte_table_action_type action)
1229 case RTE_TABLE_ACTION_FWD:
1230 case RTE_TABLE_ACTION_LB:
1231 case RTE_TABLE_ACTION_MTR:
1232 case RTE_TABLE_ACTION_TM:
1233 case RTE_TABLE_ACTION_ENCAP:
1234 case RTE_TABLE_ACTION_NAT:
1235 case RTE_TABLE_ACTION_TTL:
1236 case RTE_TABLE_ACTION_STATS:
1237 case RTE_TABLE_ACTION_TIME:
1245 #define RTE_TABLE_ACTION_MAX 64
1248 uint64_t action_mask;
1249 struct rte_table_action_common_config common;
1250 struct rte_table_action_lb_config lb;
1251 struct rte_table_action_mtr_config mtr;
1252 struct rte_table_action_tm_config tm;
1253 struct rte_table_action_encap_config encap;
1254 struct rte_table_action_nat_config nat;
1255 struct rte_table_action_ttl_config ttl;
1256 struct rte_table_action_stats_config stats;
1260 action_cfg_size(enum rte_table_action_type action)
1263 case RTE_TABLE_ACTION_LB:
1264 return sizeof(struct rte_table_action_lb_config);
1265 case RTE_TABLE_ACTION_MTR:
1266 return sizeof(struct rte_table_action_mtr_config);
1267 case RTE_TABLE_ACTION_TM:
1268 return sizeof(struct rte_table_action_tm_config);
1269 case RTE_TABLE_ACTION_ENCAP:
1270 return sizeof(struct rte_table_action_encap_config);
1271 case RTE_TABLE_ACTION_NAT:
1272 return sizeof(struct rte_table_action_nat_config);
1273 case RTE_TABLE_ACTION_TTL:
1274 return sizeof(struct rte_table_action_ttl_config);
1275 case RTE_TABLE_ACTION_STATS:
1276 return sizeof(struct rte_table_action_stats_config);
1283 action_cfg_get(struct ap_config *ap_config,
1284 enum rte_table_action_type type)
1287 case RTE_TABLE_ACTION_LB:
1288 return &ap_config->lb;
1290 case RTE_TABLE_ACTION_MTR:
1291 return &ap_config->mtr;
1293 case RTE_TABLE_ACTION_TM:
1294 return &ap_config->tm;
1296 case RTE_TABLE_ACTION_ENCAP:
1297 return &ap_config->encap;
1299 case RTE_TABLE_ACTION_NAT:
1300 return &ap_config->nat;
1302 case RTE_TABLE_ACTION_TTL:
1303 return &ap_config->ttl;
1305 case RTE_TABLE_ACTION_STATS:
1306 return &ap_config->stats;
1314 action_cfg_set(struct ap_config *ap_config,
1315 enum rte_table_action_type type,
1318 void *dst = action_cfg_get(ap_config, type);
1321 memcpy(dst, action_cfg, action_cfg_size(type));
1323 ap_config->action_mask |= 1LLU << type;
1327 size_t offset[RTE_TABLE_ACTION_MAX];
1332 action_data_size(enum rte_table_action_type action,
1333 struct ap_config *ap_config)
1336 case RTE_TABLE_ACTION_FWD:
1337 return sizeof(struct fwd_data);
1339 case RTE_TABLE_ACTION_LB:
1340 return sizeof(struct lb_data);
1342 case RTE_TABLE_ACTION_MTR:
1343 return mtr_data_size(&ap_config->mtr);
1345 case RTE_TABLE_ACTION_TM:
1346 return sizeof(struct tm_data);
1348 case RTE_TABLE_ACTION_ENCAP:
1349 return encap_data_size(&ap_config->encap);
1351 case RTE_TABLE_ACTION_NAT:
1352 return nat_data_size(&ap_config->nat,
1353 &ap_config->common);
1355 case RTE_TABLE_ACTION_TTL:
1356 return sizeof(struct ttl_data);
1358 case RTE_TABLE_ACTION_STATS:
1359 return sizeof(struct stats_data);
1361 case RTE_TABLE_ACTION_TIME:
1362 return sizeof(struct time_data);
1371 action_data_offset_set(struct ap_data *ap_data,
1372 struct ap_config *ap_config)
1374 uint64_t action_mask = ap_config->action_mask;
1378 memset(ap_data->offset, 0, sizeof(ap_data->offset));
1381 for (action = 0; action < RTE_TABLE_ACTION_MAX; action++)
1382 if (action_mask & (1LLU << action)) {
1383 ap_data->offset[action] = offset;
1384 offset += action_data_size((enum rte_table_action_type)action,
1388 ap_data->total_size = offset;
1391 struct rte_table_action_profile {
1392 struct ap_config cfg;
1393 struct ap_data data;
1397 struct rte_table_action_profile *
1398 rte_table_action_profile_create(struct rte_table_action_common_config *common)
1400 struct rte_table_action_profile *ap;
1402 /* Check input arguments */
1406 /* Memory allocation */
1407 ap = calloc(1, sizeof(struct rte_table_action_profile));
1411 /* Initialization */
1412 memcpy(&ap->cfg.common, common, sizeof(*common));
1419 rte_table_action_profile_action_register(struct rte_table_action_profile *profile,
1420 enum rte_table_action_type type,
1421 void *action_config)
1425 /* Check input arguments */
1426 if ((profile == NULL) ||
1428 (action_valid(type) == 0) ||
1429 (profile->cfg.action_mask & (1LLU << type)) ||
1430 ((action_cfg_size(type) == 0) && action_config) ||
1431 (action_cfg_size(type) && (action_config == NULL)))
1435 case RTE_TABLE_ACTION_LB:
1436 status = lb_cfg_check(action_config);
1439 case RTE_TABLE_ACTION_MTR:
1440 status = mtr_cfg_check(action_config);
1443 case RTE_TABLE_ACTION_TM:
1444 status = tm_cfg_check(action_config);
1447 case RTE_TABLE_ACTION_ENCAP:
1448 status = encap_cfg_check(action_config);
1451 case RTE_TABLE_ACTION_NAT:
1452 status = nat_cfg_check(action_config);
1455 case RTE_TABLE_ACTION_TTL:
1456 status = ttl_cfg_check(action_config);
1459 case RTE_TABLE_ACTION_STATS:
1460 status = stats_cfg_check(action_config);
1472 action_cfg_set(&profile->cfg, type, action_config);
1478 rte_table_action_profile_freeze(struct rte_table_action_profile *profile)
1480 if (profile->frozen)
1483 profile->cfg.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD;
1484 action_data_offset_set(&profile->data, &profile->cfg);
1485 profile->frozen = 1;
1491 rte_table_action_profile_free(struct rte_table_action_profile *profile)
1493 if (profile == NULL)
1503 #define METER_PROFILES_MAX 32
1505 struct rte_table_action {
1506 struct ap_config cfg;
1507 struct ap_data data;
1508 struct dscp_table_data dscp_table;
1509 struct meter_profile_data mp[METER_PROFILES_MAX];
1512 struct rte_table_action *
1513 rte_table_action_create(struct rte_table_action_profile *profile,
1516 struct rte_table_action *action;
1518 /* Check input arguments */
1519 if ((profile == NULL) ||
1520 (profile->frozen == 0))
1523 /* Memory allocation */
1524 action = rte_zmalloc_socket(NULL,
1525 sizeof(struct rte_table_action),
1526 RTE_CACHE_LINE_SIZE,
1531 /* Initialization */
1532 memcpy(&action->cfg, &profile->cfg, sizeof(profile->cfg));
1533 memcpy(&action->data, &profile->data, sizeof(profile->data));
1538 static __rte_always_inline void *
1539 action_data_get(void *data,
1540 struct rte_table_action *action,
1541 enum rte_table_action_type type)
1543 size_t offset = action->data.offset[type];
1544 uint8_t *data_bytes = data;
1546 return &data_bytes[offset];
1550 rte_table_action_apply(struct rte_table_action *action,
1552 enum rte_table_action_type type,
1553 void *action_params)
1557 /* Check input arguments */
1558 if ((action == NULL) ||
1560 (action_valid(type) == 0) ||
1561 ((action->cfg.action_mask & (1LLU << type)) == 0) ||
1562 (action_params == NULL))
1566 action_data = action_data_get(data, action, type);
1569 case RTE_TABLE_ACTION_FWD:
1570 return fwd_apply(action_data,
1573 case RTE_TABLE_ACTION_LB:
1574 return lb_apply(action_data,
1577 case RTE_TABLE_ACTION_MTR:
1578 return mtr_apply(action_data,
1582 RTE_DIM(action->mp));
1584 case RTE_TABLE_ACTION_TM:
1585 return tm_apply(action_data,
1589 case RTE_TABLE_ACTION_ENCAP:
1590 return encap_apply(action_data,
1593 &action->cfg.common);
1595 case RTE_TABLE_ACTION_NAT:
1596 return nat_apply(action_data,
1598 &action->cfg.common);
1600 case RTE_TABLE_ACTION_TTL:
1601 return ttl_apply(action_data,
1604 case RTE_TABLE_ACTION_STATS:
1605 return stats_apply(action_data,
1608 case RTE_TABLE_ACTION_TIME:
1609 return time_apply(action_data,
1618 rte_table_action_dscp_table_update(struct rte_table_action *action,
1620 struct rte_table_action_dscp_table *table)
1624 /* Check input arguments */
1625 if ((action == NULL) ||
1626 ((action->cfg.action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
1627 (1LLU << RTE_TABLE_ACTION_TM))) == 0) ||
1632 for (i = 0; i < RTE_DIM(table->entry); i++) {
1633 struct dscp_table_entry_data *data =
1634 &action->dscp_table.entry[i];
1635 struct rte_table_action_dscp_table_entry *entry =
1637 uint16_t queue_tc_color =
1638 MBUF_SCHED_QUEUE_TC_COLOR(entry->tc_queue_id,
1642 if ((dscp_mask & (1LLU << i)) == 0)
1645 data->color = entry->color;
1646 data->tc = entry->tc_id;
1647 data->queue_tc_color = queue_tc_color;
1654 rte_table_action_meter_profile_add(struct rte_table_action *action,
1655 uint32_t meter_profile_id,
1656 struct rte_table_action_meter_profile *profile)
1658 struct meter_profile_data *mp_data;
1661 /* Check input arguments */
1662 if ((action == NULL) ||
1663 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
1667 if (profile->alg != RTE_TABLE_ACTION_METER_TRTCM)
1670 mp_data = meter_profile_data_find(action->mp,
1671 RTE_DIM(action->mp),
1676 mp_data = meter_profile_data_find_unused(action->mp,
1677 RTE_DIM(action->mp));
1681 /* Install new profile */
1682 status = rte_meter_trtcm_profile_config(&mp_data->profile,
1687 mp_data->profile_id = meter_profile_id;
1694 rte_table_action_meter_profile_delete(struct rte_table_action *action,
1695 uint32_t meter_profile_id)
1697 struct meter_profile_data *mp_data;
1699 /* Check input arguments */
1700 if ((action == NULL) ||
1701 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0))
1704 mp_data = meter_profile_data_find(action->mp,
1705 RTE_DIM(action->mp),
1710 /* Uninstall profile */
1717 rte_table_action_meter_read(struct rte_table_action *action,
1720 struct rte_table_action_mtr_counters *stats,
1723 struct mtr_trtcm_data *mtr_data;
1726 /* Check input arguments */
1727 if ((action == NULL) ||
1728 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
1730 (tc_mask > RTE_LEN2MASK(action->cfg.mtr.n_tc, uint32_t)))
1733 mtr_data = action_data_get(data, action, RTE_TABLE_ACTION_MTR);
1737 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
1738 struct rte_table_action_mtr_counters_tc *dst =
1740 struct mtr_trtcm_data *src = &mtr_data[i];
1742 if ((tc_mask & (1 << i)) == 0)
1745 dst->n_packets[e_RTE_METER_GREEN] =
1746 mtr_trtcm_data_stats_get(src, e_RTE_METER_GREEN);
1748 dst->n_packets[e_RTE_METER_YELLOW] =
1749 mtr_trtcm_data_stats_get(src, e_RTE_METER_YELLOW);
1751 dst->n_packets[e_RTE_METER_RED] =
1752 mtr_trtcm_data_stats_get(src, e_RTE_METER_RED);
1754 dst->n_packets_valid = 1;
1755 dst->n_bytes_valid = 0;
1758 stats->tc_mask = tc_mask;
1763 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
1764 struct mtr_trtcm_data *src = &mtr_data[i];
1766 if ((tc_mask & (1 << i)) == 0)
1769 mtr_trtcm_data_stats_reset(src, e_RTE_METER_GREEN);
1770 mtr_trtcm_data_stats_reset(src, e_RTE_METER_YELLOW);
1771 mtr_trtcm_data_stats_reset(src, e_RTE_METER_RED);
1779 rte_table_action_ttl_read(struct rte_table_action *action,
1781 struct rte_table_action_ttl_counters *stats,
1784 struct ttl_data *ttl_data;
1786 /* Check input arguments */
1787 if ((action == NULL) ||
1788 ((action->cfg.action_mask &
1789 (1LLU << RTE_TABLE_ACTION_TTL)) == 0) ||
1793 ttl_data = action_data_get(data, action, RTE_TABLE_ACTION_TTL);
1797 stats->n_packets = TTL_STATS_READ(ttl_data);
1801 TTL_STATS_RESET(ttl_data);
1807 rte_table_action_stats_read(struct rte_table_action *action,
1809 struct rte_table_action_stats_counters *stats,
1812 struct stats_data *stats_data;
1814 /* Check input arguments */
1815 if ((action == NULL) ||
1816 ((action->cfg.action_mask &
1817 (1LLU << RTE_TABLE_ACTION_STATS)) == 0) ||
1821 stats_data = action_data_get(data, action,
1822 RTE_TABLE_ACTION_STATS);
1826 stats->n_packets = stats_data->n_packets;
1827 stats->n_bytes = stats_data->n_bytes;
1828 stats->n_packets_valid = 1;
1829 stats->n_bytes_valid = 1;
1834 stats_data->n_packets = 0;
1835 stats_data->n_bytes = 0;
1842 rte_table_action_time_read(struct rte_table_action *action,
1844 uint64_t *timestamp)
1846 struct time_data *time_data;
1848 /* Check input arguments */
1849 if ((action == NULL) ||
1850 ((action->cfg.action_mask &
1851 (1LLU << RTE_TABLE_ACTION_TIME)) == 0) ||
1853 (timestamp == NULL))
1856 time_data = action_data_get(data, action, RTE_TABLE_ACTION_TIME);
1859 *timestamp = time_data->time;
1864 static __rte_always_inline uint64_t
1865 pkt_work(struct rte_mbuf *mbuf,
1866 struct rte_pipeline_table_entry *table_entry,
1868 struct rte_table_action *action,
1869 struct ap_config *cfg)
1871 uint64_t drop_mask = 0;
1873 uint32_t ip_offset = action->cfg.common.ip_offset;
1874 void *ip = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ip_offset);
1877 uint16_t total_length;
1879 if (cfg->common.ip_version) {
1880 struct ipv4_hdr *hdr = ip;
1882 dscp = hdr->type_of_service >> 2;
1883 total_length = rte_ntohs(hdr->total_length);
1885 struct ipv6_hdr *hdr = ip;
1887 dscp = (rte_ntohl(hdr->vtc_flow) & 0x0F600000) >> 18;
1889 rte_ntohs(hdr->payload_len) + sizeof(struct ipv6_hdr);
1892 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
1894 action_data_get(table_entry, action, RTE_TABLE_ACTION_LB);
1900 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
1902 action_data_get(table_entry, action, RTE_TABLE_ACTION_MTR);
1904 drop_mask |= pkt_work_mtr(mbuf,
1906 &action->dscp_table,
1913 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
1915 action_data_get(table_entry, action, RTE_TABLE_ACTION_TM);
1919 &action->dscp_table,
1923 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
1925 action_data_get(table_entry, action, RTE_TABLE_ACTION_ENCAP);
1927 pkt_work_encap(mbuf,
1935 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
1937 action_data_get(table_entry, action, RTE_TABLE_ACTION_NAT);
1939 if (cfg->common.ip_version)
1940 pkt_ipv4_work_nat(ip, data, &cfg->nat);
1942 pkt_ipv6_work_nat(ip, data, &cfg->nat);
1945 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
1947 action_data_get(table_entry, action, RTE_TABLE_ACTION_TTL);
1949 if (cfg->common.ip_version)
1950 drop_mask |= pkt_ipv4_work_ttl(ip, data);
1952 drop_mask |= pkt_ipv6_work_ttl(ip, data);
1955 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
1957 action_data_get(table_entry, action, RTE_TABLE_ACTION_STATS);
1959 pkt_work_stats(data, total_length);
1962 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
1964 action_data_get(table_entry, action, RTE_TABLE_ACTION_TIME);
1966 pkt_work_time(data, time);
1972 static __rte_always_inline uint64_t
1973 pkt4_work(struct rte_mbuf **mbufs,
1974 struct rte_pipeline_table_entry **table_entries,
1976 struct rte_table_action *action,
1977 struct ap_config *cfg)
1979 uint64_t drop_mask0 = 0;
1980 uint64_t drop_mask1 = 0;
1981 uint64_t drop_mask2 = 0;
1982 uint64_t drop_mask3 = 0;
1984 struct rte_mbuf *mbuf0 = mbufs[0];
1985 struct rte_mbuf *mbuf1 = mbufs[1];
1986 struct rte_mbuf *mbuf2 = mbufs[2];
1987 struct rte_mbuf *mbuf3 = mbufs[3];
1989 struct rte_pipeline_table_entry *table_entry0 = table_entries[0];
1990 struct rte_pipeline_table_entry *table_entry1 = table_entries[1];
1991 struct rte_pipeline_table_entry *table_entry2 = table_entries[2];
1992 struct rte_pipeline_table_entry *table_entry3 = table_entries[3];
1994 uint32_t ip_offset = action->cfg.common.ip_offset;
1995 void *ip0 = RTE_MBUF_METADATA_UINT32_PTR(mbuf0, ip_offset);
1996 void *ip1 = RTE_MBUF_METADATA_UINT32_PTR(mbuf1, ip_offset);
1997 void *ip2 = RTE_MBUF_METADATA_UINT32_PTR(mbuf2, ip_offset);
1998 void *ip3 = RTE_MBUF_METADATA_UINT32_PTR(mbuf3, ip_offset);
2000 uint32_t dscp0, dscp1, dscp2, dscp3;
2001 uint16_t total_length0, total_length1, total_length2, total_length3;
2003 if (cfg->common.ip_version) {
2004 struct ipv4_hdr *hdr0 = ip0;
2005 struct ipv4_hdr *hdr1 = ip1;
2006 struct ipv4_hdr *hdr2 = ip2;
2007 struct ipv4_hdr *hdr3 = ip3;
2009 dscp0 = hdr0->type_of_service >> 2;
2010 dscp1 = hdr1->type_of_service >> 2;
2011 dscp2 = hdr2->type_of_service >> 2;
2012 dscp3 = hdr3->type_of_service >> 2;
2014 total_length0 = rte_ntohs(hdr0->total_length);
2015 total_length1 = rte_ntohs(hdr1->total_length);
2016 total_length2 = rte_ntohs(hdr2->total_length);
2017 total_length3 = rte_ntohs(hdr3->total_length);
2019 struct ipv6_hdr *hdr0 = ip0;
2020 struct ipv6_hdr *hdr1 = ip1;
2021 struct ipv6_hdr *hdr2 = ip2;
2022 struct ipv6_hdr *hdr3 = ip3;
2024 dscp0 = (rte_ntohl(hdr0->vtc_flow) & 0x0F600000) >> 18;
2025 dscp1 = (rte_ntohl(hdr1->vtc_flow) & 0x0F600000) >> 18;
2026 dscp2 = (rte_ntohl(hdr2->vtc_flow) & 0x0F600000) >> 18;
2027 dscp3 = (rte_ntohl(hdr3->vtc_flow) & 0x0F600000) >> 18;
2030 rte_ntohs(hdr0->payload_len) + sizeof(struct ipv6_hdr);
2032 rte_ntohs(hdr1->payload_len) + sizeof(struct ipv6_hdr);
2034 rte_ntohs(hdr2->payload_len) + sizeof(struct ipv6_hdr);
2036 rte_ntohs(hdr3->payload_len) + sizeof(struct ipv6_hdr);
2039 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2041 action_data_get(table_entry0, action, RTE_TABLE_ACTION_LB);
2043 action_data_get(table_entry1, action, RTE_TABLE_ACTION_LB);
2045 action_data_get(table_entry2, action, RTE_TABLE_ACTION_LB);
2047 action_data_get(table_entry3, action, RTE_TABLE_ACTION_LB);
2066 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2068 action_data_get(table_entry0, action, RTE_TABLE_ACTION_MTR);
2070 action_data_get(table_entry1, action, RTE_TABLE_ACTION_MTR);
2072 action_data_get(table_entry2, action, RTE_TABLE_ACTION_MTR);
2074 action_data_get(table_entry3, action, RTE_TABLE_ACTION_MTR);
2076 drop_mask0 |= pkt_work_mtr(mbuf0,
2078 &action->dscp_table,
2084 drop_mask1 |= pkt_work_mtr(mbuf1,
2086 &action->dscp_table,
2092 drop_mask2 |= pkt_work_mtr(mbuf2,
2094 &action->dscp_table,
2100 drop_mask3 |= pkt_work_mtr(mbuf3,
2102 &action->dscp_table,
2109 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2111 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TM);
2113 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TM);
2115 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TM);
2117 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TM);
2121 &action->dscp_table,
2126 &action->dscp_table,
2131 &action->dscp_table,
2136 &action->dscp_table,
2140 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2142 action_data_get(table_entry0, action, RTE_TABLE_ACTION_ENCAP);
2144 action_data_get(table_entry1, action, RTE_TABLE_ACTION_ENCAP);
2146 action_data_get(table_entry2, action, RTE_TABLE_ACTION_ENCAP);
2148 action_data_get(table_entry3, action, RTE_TABLE_ACTION_ENCAP);
2150 pkt_work_encap(mbuf0,
2157 pkt_work_encap(mbuf1,
2164 pkt_work_encap(mbuf2,
2171 pkt_work_encap(mbuf3,
2179 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2181 action_data_get(table_entry0, action, RTE_TABLE_ACTION_NAT);
2183 action_data_get(table_entry1, action, RTE_TABLE_ACTION_NAT);
2185 action_data_get(table_entry2, action, RTE_TABLE_ACTION_NAT);
2187 action_data_get(table_entry3, action, RTE_TABLE_ACTION_NAT);
2189 if (cfg->common.ip_version) {
2190 pkt_ipv4_work_nat(ip0, data0, &cfg->nat);
2191 pkt_ipv4_work_nat(ip1, data1, &cfg->nat);
2192 pkt_ipv4_work_nat(ip2, data2, &cfg->nat);
2193 pkt_ipv4_work_nat(ip3, data3, &cfg->nat);
2195 pkt_ipv6_work_nat(ip0, data0, &cfg->nat);
2196 pkt_ipv6_work_nat(ip1, data1, &cfg->nat);
2197 pkt_ipv6_work_nat(ip2, data2, &cfg->nat);
2198 pkt_ipv6_work_nat(ip3, data3, &cfg->nat);
2202 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2204 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TTL);
2206 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TTL);
2208 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TTL);
2210 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TTL);
2212 if (cfg->common.ip_version) {
2213 drop_mask0 |= pkt_ipv4_work_ttl(ip0, data0);
2214 drop_mask1 |= pkt_ipv4_work_ttl(ip1, data1);
2215 drop_mask2 |= pkt_ipv4_work_ttl(ip2, data2);
2216 drop_mask3 |= pkt_ipv4_work_ttl(ip3, data3);
2218 drop_mask0 |= pkt_ipv6_work_ttl(ip0, data0);
2219 drop_mask1 |= pkt_ipv6_work_ttl(ip1, data1);
2220 drop_mask2 |= pkt_ipv6_work_ttl(ip2, data2);
2221 drop_mask3 |= pkt_ipv6_work_ttl(ip3, data3);
2225 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2227 action_data_get(table_entry0, action, RTE_TABLE_ACTION_STATS);
2229 action_data_get(table_entry1, action, RTE_TABLE_ACTION_STATS);
2231 action_data_get(table_entry2, action, RTE_TABLE_ACTION_STATS);
2233 action_data_get(table_entry3, action, RTE_TABLE_ACTION_STATS);
2235 pkt_work_stats(data0, total_length0);
2236 pkt_work_stats(data1, total_length1);
2237 pkt_work_stats(data2, total_length2);
2238 pkt_work_stats(data3, total_length3);
2241 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2243 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TIME);
2245 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TIME);
2247 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TIME);
2249 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TIME);
2251 pkt_work_time(data0, time);
2252 pkt_work_time(data1, time);
2253 pkt_work_time(data2, time);
2254 pkt_work_time(data3, time);
2263 static __rte_always_inline int
2264 ah(struct rte_pipeline *p,
2265 struct rte_mbuf **pkts,
2267 struct rte_pipeline_table_entry **entries,
2268 struct rte_table_action *action,
2269 struct ap_config *cfg)
2271 uint64_t pkts_drop_mask = 0;
2274 if (cfg->action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
2275 (1LLU << RTE_TABLE_ACTION_TIME)))
2278 if ((pkts_mask & (pkts_mask + 1)) == 0) {
2279 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
2282 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
2285 drop_mask = pkt4_work(&pkts[i],
2291 pkts_drop_mask |= drop_mask << i;
2294 for ( ; i < n_pkts; i++) {
2297 drop_mask = pkt_work(pkts[i],
2303 pkts_drop_mask |= drop_mask << i;
2306 for ( ; pkts_mask; ) {
2307 uint32_t pos = __builtin_ctzll(pkts_mask);
2308 uint64_t pkt_mask = 1LLU << pos;
2311 drop_mask = pkt_work(pkts[pos],
2317 pkts_mask &= ~pkt_mask;
2318 pkts_drop_mask |= drop_mask << pos;
2321 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
2327 ah_default(struct rte_pipeline *p,
2328 struct rte_mbuf **pkts,
2330 struct rte_pipeline_table_entry **entries,
2333 struct rte_table_action *action = arg;
2343 static rte_pipeline_table_action_handler_hit
2344 ah_selector(struct rte_table_action *action)
2346 if (action->cfg.action_mask == (1LLU << RTE_TABLE_ACTION_FWD))
2353 rte_table_action_table_params_get(struct rte_table_action *action,
2354 struct rte_pipeline_table_params *params)
2356 rte_pipeline_table_action_handler_hit f_action_hit;
2357 uint32_t total_size;
2359 /* Check input arguments */
2360 if ((action == NULL) ||
2364 f_action_hit = ah_selector(action);
2365 total_size = rte_align32pow2(action->data.total_size);
2367 /* Fill in params */
2368 params->f_action_hit = f_action_hit;
2369 params->f_action_miss = NULL;
2370 params->arg_ah = (f_action_hit) ? action : NULL;
2371 params->action_data_size = total_size -
2372 sizeof(struct rte_pipeline_table_entry);
2378 rte_table_action_free(struct rte_table_action *action)