1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_byteorder.h>
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
12 #include <rte_ether.h>
16 #include <rte_vxlan.h>
17 #include <rte_cryptodev.h>
19 #include "rte_table_action.h"
21 #define rte_htons rte_cpu_to_be_16
22 #define rte_htonl rte_cpu_to_be_32
24 #define rte_ntohs rte_be_to_cpu_16
25 #define rte_ntohl rte_be_to_cpu_32
28 * RTE_TABLE_ACTION_FWD
30 #define fwd_data rte_pipeline_table_entry
33 fwd_apply(struct fwd_data *data,
34 struct rte_table_action_fwd_params *p)
36 data->action = p->action;
38 if (p->action == RTE_PIPELINE_ACTION_PORT)
39 data->port_id = p->id;
41 if (p->action == RTE_PIPELINE_ACTION_TABLE)
42 data->table_id = p->id;
51 lb_cfg_check(struct rte_table_action_lb_config *cfg)
54 (cfg->key_size < RTE_TABLE_ACTION_LB_KEY_SIZE_MIN) ||
55 (cfg->key_size > RTE_TABLE_ACTION_LB_KEY_SIZE_MAX) ||
56 (!rte_is_power_of_2(cfg->key_size)) ||
57 (cfg->f_hash == NULL))
64 uint32_t out[RTE_TABLE_ACTION_LB_TABLE_SIZE];
68 lb_apply(struct lb_data *data,
69 struct rte_table_action_lb_params *p)
71 memcpy(data->out, p->out, sizeof(data->out));
76 static __rte_always_inline void
77 pkt_work_lb(struct rte_mbuf *mbuf,
79 struct rte_table_action_lb_config *cfg)
81 uint8_t *pkt_key = RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->key_offset);
82 uint32_t *out = RTE_MBUF_METADATA_UINT32_PTR(mbuf, cfg->out_offset);
86 digest = cfg->f_hash(pkt_key,
90 pos = digest & (RTE_TABLE_ACTION_LB_TABLE_SIZE - 1);
91 out_val = data->out[pos];
97 * RTE_TABLE_ACTION_MTR
100 mtr_cfg_check(struct rte_table_action_mtr_config *mtr)
102 if ((mtr->alg == RTE_TABLE_ACTION_METER_SRTCM) ||
103 ((mtr->n_tc != 1) && (mtr->n_tc != 4)) ||
104 (mtr->n_bytes_enabled != 0))
109 struct mtr_trtcm_data {
110 struct rte_meter_trtcm trtcm;
111 uint64_t stats[RTE_COLORS];
114 #define MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data) \
115 (((data)->stats[RTE_COLOR_GREEN] & 0xF8LLU) >> 3)
118 mtr_trtcm_data_meter_profile_id_set(struct mtr_trtcm_data *data,
121 data->stats[RTE_COLOR_GREEN] &= ~0xF8LLU;
122 data->stats[RTE_COLOR_GREEN] |= (profile_id % 32) << 3;
125 #define MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color)\
126 (((data)->stats[(color)] & 4LLU) >> 2)
128 #define MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color)\
129 ((enum rte_color)((data)->stats[(color)] & 3LLU))
132 mtr_trtcm_data_policer_action_set(struct mtr_trtcm_data *data,
133 enum rte_color color,
134 enum rte_table_action_policer action)
136 if (action == RTE_TABLE_ACTION_POLICER_DROP) {
137 data->stats[color] |= 4LLU;
139 data->stats[color] &= ~7LLU;
140 data->stats[color] |= color & 3LLU;
145 mtr_trtcm_data_stats_get(struct mtr_trtcm_data *data,
146 enum rte_color color)
148 return data->stats[color] >> 8;
152 mtr_trtcm_data_stats_reset(struct mtr_trtcm_data *data,
153 enum rte_color color)
155 data->stats[color] &= 0xFFLU;
158 #define MTR_TRTCM_DATA_STATS_INC(data, color) \
159 ((data)->stats[(color)] += (1LLU << 8))
162 mtr_data_size(struct rte_table_action_mtr_config *mtr)
164 return mtr->n_tc * sizeof(struct mtr_trtcm_data);
167 struct dscp_table_entry_data {
168 enum rte_color color;
173 struct dscp_table_data {
174 struct dscp_table_entry_data entry[64];
177 struct meter_profile_data {
178 struct rte_meter_trtcm_profile profile;
183 static struct meter_profile_data *
184 meter_profile_data_find(struct meter_profile_data *mp,
190 for (i = 0; i < mp_size; i++) {
191 struct meter_profile_data *mp_data = &mp[i];
193 if (mp_data->valid && (mp_data->profile_id == profile_id))
200 static struct meter_profile_data *
201 meter_profile_data_find_unused(struct meter_profile_data *mp,
206 for (i = 0; i < mp_size; i++) {
207 struct meter_profile_data *mp_data = &mp[i];
217 mtr_apply_check(struct rte_table_action_mtr_params *p,
218 struct rte_table_action_mtr_config *cfg,
219 struct meter_profile_data *mp,
224 if (p->tc_mask > RTE_LEN2MASK(cfg->n_tc, uint32_t))
227 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
228 struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
229 struct meter_profile_data *mp_data;
231 if ((p->tc_mask & (1LLU << i)) == 0)
234 mp_data = meter_profile_data_find(mp,
236 p_tc->meter_profile_id);
245 mtr_apply(struct mtr_trtcm_data *data,
246 struct rte_table_action_mtr_params *p,
247 struct rte_table_action_mtr_config *cfg,
248 struct meter_profile_data *mp,
254 /* Check input arguments */
255 status = mtr_apply_check(p, cfg, mp, mp_size);
260 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
261 struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
262 struct mtr_trtcm_data *data_tc = &data[i];
263 struct meter_profile_data *mp_data;
265 if ((p->tc_mask & (1LLU << i)) == 0)
269 mp_data = meter_profile_data_find(mp,
271 p_tc->meter_profile_id);
275 memset(data_tc, 0, sizeof(*data_tc));
278 status = rte_meter_trtcm_config(&data_tc->trtcm,
284 mtr_trtcm_data_meter_profile_id_set(data_tc,
287 /* Policer actions */
288 mtr_trtcm_data_policer_action_set(data_tc,
290 p_tc->policer[RTE_COLOR_GREEN]);
292 mtr_trtcm_data_policer_action_set(data_tc,
294 p_tc->policer[RTE_COLOR_YELLOW]);
296 mtr_trtcm_data_policer_action_set(data_tc,
298 p_tc->policer[RTE_COLOR_RED]);
304 static __rte_always_inline uint64_t
305 pkt_work_mtr(struct rte_mbuf *mbuf,
306 struct mtr_trtcm_data *data,
307 struct dscp_table_data *dscp_table,
308 struct meter_profile_data *mp,
311 uint16_t total_length)
314 struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
315 enum rte_color color_in, color_meter, color_policer;
319 color_in = dscp_entry->color;
321 mp_id = MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data);
324 color_meter = rte_meter_trtcm_color_aware_check(
332 MTR_TRTCM_DATA_STATS_INC(data, color_meter);
335 drop_mask = MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color_meter);
337 MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color_meter);
338 rte_mbuf_sched_color_set(mbuf, (uint8_t)color_policer);
344 * RTE_TABLE_ACTION_TM
347 tm_cfg_check(struct rte_table_action_tm_config *tm)
349 if ((tm->n_subports_per_port == 0) ||
350 (rte_is_power_of_2(tm->n_subports_per_port) == 0) ||
351 (tm->n_subports_per_port > UINT16_MAX) ||
352 (tm->n_pipes_per_subport == 0) ||
353 (rte_is_power_of_2(tm->n_pipes_per_subport) == 0))
365 tm_apply_check(struct rte_table_action_tm_params *p,
366 struct rte_table_action_tm_config *cfg)
368 if ((p->subport_id >= cfg->n_subports_per_port) ||
369 (p->pipe_id >= cfg->n_pipes_per_subport))
376 tm_apply(struct tm_data *data,
377 struct rte_table_action_tm_params *p,
378 struct rte_table_action_tm_config *cfg)
382 /* Check input arguments */
383 status = tm_apply_check(p, cfg);
388 data->queue_id = p->subport_id <<
389 (__builtin_ctz(cfg->n_pipes_per_subport) + 4) |
395 static __rte_always_inline void
396 pkt_work_tm(struct rte_mbuf *mbuf,
397 struct tm_data *data,
398 struct dscp_table_data *dscp_table,
401 struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
402 uint32_t queue_id = data->queue_id |
403 dscp_entry->tc_queue;
404 rte_mbuf_sched_set(mbuf, queue_id, dscp_entry->tc,
405 (uint8_t)dscp_entry->color);
409 * RTE_TABLE_ACTION_ENCAP
412 encap_valid(enum rte_table_action_encap_type encap)
415 case RTE_TABLE_ACTION_ENCAP_ETHER:
416 case RTE_TABLE_ACTION_ENCAP_VLAN:
417 case RTE_TABLE_ACTION_ENCAP_QINQ:
418 case RTE_TABLE_ACTION_ENCAP_MPLS:
419 case RTE_TABLE_ACTION_ENCAP_PPPOE:
420 case RTE_TABLE_ACTION_ENCAP_VXLAN:
421 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
429 encap_cfg_check(struct rte_table_action_encap_config *encap)
431 if ((encap->encap_mask == 0) ||
432 (__builtin_popcountll(encap->encap_mask) != 1))
438 struct encap_ether_data {
439 struct rte_ether_hdr ether;
442 #define VLAN(pcp, dei, vid) \
443 ((uint16_t)((((uint64_t)(pcp)) & 0x7LLU) << 13) | \
444 ((((uint64_t)(dei)) & 0x1LLU) << 12) | \
445 (((uint64_t)(vid)) & 0xFFFLLU)) \
447 struct encap_vlan_data {
448 struct rte_ether_hdr ether;
449 struct rte_vlan_hdr vlan;
452 struct encap_qinq_data {
453 struct rte_ether_hdr ether;
454 struct rte_vlan_hdr svlan;
455 struct rte_vlan_hdr cvlan;
458 #define ETHER_TYPE_MPLS_UNICAST 0x8847
460 #define ETHER_TYPE_MPLS_MULTICAST 0x8848
462 #define MPLS(label, tc, s, ttl) \
463 ((uint32_t)(((((uint64_t)(label)) & 0xFFFFFLLU) << 12) |\
464 ((((uint64_t)(tc)) & 0x7LLU) << 9) | \
465 ((((uint64_t)(s)) & 0x1LLU) << 8) | \
466 (((uint64_t)(ttl)) & 0xFFLLU)))
468 struct encap_mpls_data {
469 struct rte_ether_hdr ether;
470 uint32_t mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX];
472 } __rte_packed __rte_aligned(2);
474 #define PPP_PROTOCOL_IP 0x0021
476 struct pppoe_ppp_hdr {
477 uint16_t ver_type_code;
483 struct encap_pppoe_data {
484 struct rte_ether_hdr ether;
485 struct pppoe_ppp_hdr pppoe_ppp;
488 #define IP_PROTO_UDP 17
490 struct encap_vxlan_ipv4_data {
491 struct rte_ether_hdr ether;
492 struct rte_ipv4_hdr ipv4;
493 struct rte_udp_hdr udp;
494 struct rte_vxlan_hdr vxlan;
495 } __rte_packed __rte_aligned(2);
497 struct encap_vxlan_ipv4_vlan_data {
498 struct rte_ether_hdr ether;
499 struct rte_vlan_hdr vlan;
500 struct rte_ipv4_hdr ipv4;
501 struct rte_udp_hdr udp;
502 struct rte_vxlan_hdr vxlan;
503 } __rte_packed __rte_aligned(2);
505 struct encap_vxlan_ipv6_data {
506 struct rte_ether_hdr ether;
507 struct rte_ipv6_hdr ipv6;
508 struct rte_udp_hdr udp;
509 struct rte_vxlan_hdr vxlan;
510 } __rte_packed __rte_aligned(2);
512 struct encap_vxlan_ipv6_vlan_data {
513 struct rte_ether_hdr ether;
514 struct rte_vlan_hdr vlan;
515 struct rte_ipv6_hdr ipv6;
516 struct rte_udp_hdr udp;
517 struct rte_vxlan_hdr vxlan;
518 } __rte_packed __rte_aligned(2);
520 struct encap_qinq_pppoe_data {
521 struct rte_ether_hdr ether;
522 struct rte_vlan_hdr svlan;
523 struct rte_vlan_hdr cvlan;
524 struct pppoe_ppp_hdr pppoe_ppp;
525 } __rte_packed __rte_aligned(2);
528 encap_data_size(struct rte_table_action_encap_config *encap)
530 switch (encap->encap_mask) {
531 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
532 return sizeof(struct encap_ether_data);
534 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
535 return sizeof(struct encap_vlan_data);
537 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
538 return sizeof(struct encap_qinq_data);
540 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
541 return sizeof(struct encap_mpls_data);
543 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
544 return sizeof(struct encap_pppoe_data);
546 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
547 if (encap->vxlan.ip_version)
548 if (encap->vxlan.vlan)
549 return sizeof(struct encap_vxlan_ipv4_vlan_data);
551 return sizeof(struct encap_vxlan_ipv4_data);
553 if (encap->vxlan.vlan)
554 return sizeof(struct encap_vxlan_ipv6_vlan_data);
556 return sizeof(struct encap_vxlan_ipv6_data);
558 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
559 return sizeof(struct encap_qinq_pppoe_data);
567 encap_apply_check(struct rte_table_action_encap_params *p,
568 struct rte_table_action_encap_config *cfg)
570 if ((encap_valid(p->type) == 0) ||
571 ((cfg->encap_mask & (1LLU << p->type)) == 0))
575 case RTE_TABLE_ACTION_ENCAP_ETHER:
578 case RTE_TABLE_ACTION_ENCAP_VLAN:
581 case RTE_TABLE_ACTION_ENCAP_QINQ:
584 case RTE_TABLE_ACTION_ENCAP_MPLS:
585 if ((p->mpls.mpls_count == 0) ||
586 (p->mpls.mpls_count > RTE_TABLE_ACTION_MPLS_LABELS_MAX))
591 case RTE_TABLE_ACTION_ENCAP_PPPOE:
594 case RTE_TABLE_ACTION_ENCAP_VXLAN:
597 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
606 encap_ether_apply(void *data,
607 struct rte_table_action_encap_params *p,
608 struct rte_table_action_common_config *common_cfg)
610 struct encap_ether_data *d = data;
611 uint16_t ethertype = (common_cfg->ip_version) ?
612 RTE_ETHER_TYPE_IPV4 :
616 rte_ether_addr_copy(&p->ether.ether.da, &d->ether.dst_addr);
617 rte_ether_addr_copy(&p->ether.ether.sa, &d->ether.src_addr);
618 d->ether.ether_type = rte_htons(ethertype);
624 encap_vlan_apply(void *data,
625 struct rte_table_action_encap_params *p,
626 struct rte_table_action_common_config *common_cfg)
628 struct encap_vlan_data *d = data;
629 uint16_t ethertype = (common_cfg->ip_version) ?
630 RTE_ETHER_TYPE_IPV4 :
634 rte_ether_addr_copy(&p->vlan.ether.da, &d->ether.dst_addr);
635 rte_ether_addr_copy(&p->vlan.ether.sa, &d->ether.src_addr);
636 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
639 d->vlan.vlan_tci = rte_htons(VLAN(p->vlan.vlan.pcp,
642 d->vlan.eth_proto = rte_htons(ethertype);
648 encap_qinq_apply(void *data,
649 struct rte_table_action_encap_params *p,
650 struct rte_table_action_common_config *common_cfg)
652 struct encap_qinq_data *d = data;
653 uint16_t ethertype = (common_cfg->ip_version) ?
654 RTE_ETHER_TYPE_IPV4 :
658 rte_ether_addr_copy(&p->qinq.ether.da, &d->ether.dst_addr);
659 rte_ether_addr_copy(&p->qinq.ether.sa, &d->ether.src_addr);
660 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_QINQ);
663 d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
666 d->svlan.eth_proto = rte_htons(RTE_ETHER_TYPE_VLAN);
669 d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
672 d->cvlan.eth_proto = rte_htons(ethertype);
678 encap_qinq_pppoe_apply(void *data,
679 struct rte_table_action_encap_params *p)
681 struct encap_qinq_pppoe_data *d = data;
684 rte_ether_addr_copy(&p->qinq.ether.da, &d->ether.dst_addr);
685 rte_ether_addr_copy(&p->qinq.ether.sa, &d->ether.src_addr);
686 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
689 d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
692 d->svlan.eth_proto = rte_htons(RTE_ETHER_TYPE_VLAN);
695 d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
698 d->cvlan.eth_proto = rte_htons(RTE_ETHER_TYPE_PPPOE_SESSION);
701 d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
702 d->pppoe_ppp.session_id = rte_htons(p->qinq_pppoe.pppoe.session_id);
703 d->pppoe_ppp.length = 0; /* not pre-computed */
704 d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
710 encap_mpls_apply(void *data,
711 struct rte_table_action_encap_params *p)
713 struct encap_mpls_data *d = data;
714 uint16_t ethertype = (p->mpls.unicast) ?
715 ETHER_TYPE_MPLS_UNICAST :
716 ETHER_TYPE_MPLS_MULTICAST;
720 rte_ether_addr_copy(&p->mpls.ether.da, &d->ether.dst_addr);
721 rte_ether_addr_copy(&p->mpls.ether.sa, &d->ether.src_addr);
722 d->ether.ether_type = rte_htons(ethertype);
725 for (i = 0; i < p->mpls.mpls_count - 1; i++)
726 d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
729 p->mpls.mpls[i].ttl));
731 d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
734 p->mpls.mpls[i].ttl));
736 d->mpls_count = p->mpls.mpls_count;
741 encap_pppoe_apply(void *data,
742 struct rte_table_action_encap_params *p)
744 struct encap_pppoe_data *d = data;
747 rte_ether_addr_copy(&p->pppoe.ether.da, &d->ether.dst_addr);
748 rte_ether_addr_copy(&p->pppoe.ether.sa, &d->ether.src_addr);
749 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_PPPOE_SESSION);
752 d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
753 d->pppoe_ppp.session_id = rte_htons(p->pppoe.pppoe.session_id);
754 d->pppoe_ppp.length = 0; /* not pre-computed */
755 d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
761 encap_vxlan_apply(void *data,
762 struct rte_table_action_encap_params *p,
763 struct rte_table_action_encap_config *cfg)
765 if ((p->vxlan.vxlan.vni > 0xFFFFFF) ||
766 (cfg->vxlan.ip_version && (p->vxlan.ipv4.dscp > 0x3F)) ||
767 (!cfg->vxlan.ip_version && (p->vxlan.ipv6.flow_label > 0xFFFFF)) ||
768 (!cfg->vxlan.ip_version && (p->vxlan.ipv6.dscp > 0x3F)) ||
769 (cfg->vxlan.vlan && (p->vxlan.vlan.vid > 0xFFF)))
772 if (cfg->vxlan.ip_version)
773 if (cfg->vxlan.vlan) {
774 struct encap_vxlan_ipv4_vlan_data *d = data;
777 rte_ether_addr_copy(&p->vxlan.ether.da,
779 rte_ether_addr_copy(&p->vxlan.ether.sa,
781 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
784 d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
787 d->vlan.eth_proto = rte_htons(RTE_ETHER_TYPE_IPV4);
790 d->ipv4.version_ihl = 0x45;
791 d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
792 d->ipv4.total_length = 0; /* not pre-computed */
793 d->ipv4.packet_id = 0;
794 d->ipv4.fragment_offset = 0;
795 d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
796 d->ipv4.next_proto_id = IP_PROTO_UDP;
797 d->ipv4.hdr_checksum = 0;
798 d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
799 d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
801 d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
804 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
805 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
806 d->udp.dgram_len = 0; /* not pre-computed */
807 d->udp.dgram_cksum = 0;
810 d->vxlan.vx_flags = rte_htonl(0x08000000);
811 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
815 struct encap_vxlan_ipv4_data *d = data;
818 rte_ether_addr_copy(&p->vxlan.ether.da,
820 rte_ether_addr_copy(&p->vxlan.ether.sa,
822 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_IPV4);
825 d->ipv4.version_ihl = 0x45;
826 d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
827 d->ipv4.total_length = 0; /* not pre-computed */
828 d->ipv4.packet_id = 0;
829 d->ipv4.fragment_offset = 0;
830 d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
831 d->ipv4.next_proto_id = IP_PROTO_UDP;
832 d->ipv4.hdr_checksum = 0;
833 d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
834 d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
836 d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
839 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
840 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
841 d->udp.dgram_len = 0; /* not pre-computed */
842 d->udp.dgram_cksum = 0;
845 d->vxlan.vx_flags = rte_htonl(0x08000000);
846 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
851 if (cfg->vxlan.vlan) {
852 struct encap_vxlan_ipv6_vlan_data *d = data;
855 rte_ether_addr_copy(&p->vxlan.ether.da,
857 rte_ether_addr_copy(&p->vxlan.ether.sa,
859 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
862 d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
865 d->vlan.eth_proto = rte_htons(RTE_ETHER_TYPE_IPV6);
868 d->ipv6.vtc_flow = rte_htonl((6 << 28) |
869 (p->vxlan.ipv6.dscp << 22) |
870 p->vxlan.ipv6.flow_label);
871 d->ipv6.payload_len = 0; /* not pre-computed */
872 d->ipv6.proto = IP_PROTO_UDP;
873 d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
874 memcpy(d->ipv6.src_addr,
876 sizeof(p->vxlan.ipv6.sa));
877 memcpy(d->ipv6.dst_addr,
879 sizeof(p->vxlan.ipv6.da));
882 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
883 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
884 d->udp.dgram_len = 0; /* not pre-computed */
885 d->udp.dgram_cksum = 0;
888 d->vxlan.vx_flags = rte_htonl(0x08000000);
889 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
893 struct encap_vxlan_ipv6_data *d = data;
896 rte_ether_addr_copy(&p->vxlan.ether.da,
898 rte_ether_addr_copy(&p->vxlan.ether.sa,
900 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_IPV6);
903 d->ipv6.vtc_flow = rte_htonl((6 << 28) |
904 (p->vxlan.ipv6.dscp << 22) |
905 p->vxlan.ipv6.flow_label);
906 d->ipv6.payload_len = 0; /* not pre-computed */
907 d->ipv6.proto = IP_PROTO_UDP;
908 d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
909 memcpy(d->ipv6.src_addr,
911 sizeof(p->vxlan.ipv6.sa));
912 memcpy(d->ipv6.dst_addr,
914 sizeof(p->vxlan.ipv6.da));
917 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
918 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
919 d->udp.dgram_len = 0; /* not pre-computed */
920 d->udp.dgram_cksum = 0;
923 d->vxlan.vx_flags = rte_htonl(0x08000000);
924 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
931 encap_apply(void *data,
932 struct rte_table_action_encap_params *p,
933 struct rte_table_action_encap_config *cfg,
934 struct rte_table_action_common_config *common_cfg)
938 /* Check input arguments */
939 status = encap_apply_check(p, cfg);
944 case RTE_TABLE_ACTION_ENCAP_ETHER:
945 return encap_ether_apply(data, p, common_cfg);
947 case RTE_TABLE_ACTION_ENCAP_VLAN:
948 return encap_vlan_apply(data, p, common_cfg);
950 case RTE_TABLE_ACTION_ENCAP_QINQ:
951 return encap_qinq_apply(data, p, common_cfg);
953 case RTE_TABLE_ACTION_ENCAP_MPLS:
954 return encap_mpls_apply(data, p);
956 case RTE_TABLE_ACTION_ENCAP_PPPOE:
957 return encap_pppoe_apply(data, p);
959 case RTE_TABLE_ACTION_ENCAP_VXLAN:
960 return encap_vxlan_apply(data, p, cfg);
962 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
963 return encap_qinq_pppoe_apply(data, p);
970 static __rte_always_inline uint16_t
971 encap_vxlan_ipv4_checksum_update(uint16_t cksum0,
972 uint16_t total_length)
977 cksum1 = ~cksum1 & 0xFFFF;
979 /* Add total length (one's complement logic) */
980 cksum1 += total_length;
981 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
982 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
984 return (uint16_t)(~cksum1);
987 static __rte_always_inline void *
988 encap(void *dst, const void *src, size_t n)
990 dst = ((uint8_t *) dst) - n;
991 return rte_memcpy(dst, src, n);
994 static __rte_always_inline void
995 pkt_work_encap_vxlan_ipv4(struct rte_mbuf *mbuf,
996 struct encap_vxlan_ipv4_data *vxlan_tbl,
997 struct rte_table_action_encap_config *cfg)
999 uint32_t ether_offset = cfg->vxlan.data_offset;
1000 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1001 struct encap_vxlan_ipv4_data *vxlan_pkt;
1002 uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
1004 ether_length = (uint16_t)mbuf->pkt_len;
1005 ipv4_total_length = ether_length +
1006 (sizeof(struct rte_vxlan_hdr) +
1007 sizeof(struct rte_udp_hdr) +
1008 sizeof(struct rte_ipv4_hdr));
1009 ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
1010 rte_htons(ipv4_total_length));
1011 udp_length = ether_length +
1012 (sizeof(struct rte_vxlan_hdr) +
1013 sizeof(struct rte_udp_hdr));
1015 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1016 vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
1017 vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
1018 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1020 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1021 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1024 static __rte_always_inline void
1025 pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf *mbuf,
1026 struct encap_vxlan_ipv4_vlan_data *vxlan_tbl,
1027 struct rte_table_action_encap_config *cfg)
1029 uint32_t ether_offset = cfg->vxlan.data_offset;
1030 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1031 struct encap_vxlan_ipv4_vlan_data *vxlan_pkt;
1032 uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
1034 ether_length = (uint16_t)mbuf->pkt_len;
1035 ipv4_total_length = ether_length +
1036 (sizeof(struct rte_vxlan_hdr) +
1037 sizeof(struct rte_udp_hdr) +
1038 sizeof(struct rte_ipv4_hdr));
1039 ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
1040 rte_htons(ipv4_total_length));
1041 udp_length = ether_length +
1042 (sizeof(struct rte_vxlan_hdr) +
1043 sizeof(struct rte_udp_hdr));
1045 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1046 vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
1047 vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
1048 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1050 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1051 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1054 static __rte_always_inline void
1055 pkt_work_encap_vxlan_ipv6(struct rte_mbuf *mbuf,
1056 struct encap_vxlan_ipv6_data *vxlan_tbl,
1057 struct rte_table_action_encap_config *cfg)
1059 uint32_t ether_offset = cfg->vxlan.data_offset;
1060 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1061 struct encap_vxlan_ipv6_data *vxlan_pkt;
1062 uint16_t ether_length, ipv6_payload_length, udp_length;
1064 ether_length = (uint16_t)mbuf->pkt_len;
1065 ipv6_payload_length = ether_length +
1066 (sizeof(struct rte_vxlan_hdr) +
1067 sizeof(struct rte_udp_hdr));
1068 udp_length = ether_length +
1069 (sizeof(struct rte_vxlan_hdr) +
1070 sizeof(struct rte_udp_hdr));
1072 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1073 vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
1074 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1076 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1077 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1080 static __rte_always_inline void
1081 pkt_work_encap_vxlan_ipv6_vlan(struct rte_mbuf *mbuf,
1082 struct encap_vxlan_ipv6_vlan_data *vxlan_tbl,
1083 struct rte_table_action_encap_config *cfg)
1085 uint32_t ether_offset = cfg->vxlan.data_offset;
1086 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1087 struct encap_vxlan_ipv6_vlan_data *vxlan_pkt;
1088 uint16_t ether_length, ipv6_payload_length, udp_length;
1090 ether_length = (uint16_t)mbuf->pkt_len;
1091 ipv6_payload_length = ether_length +
1092 (sizeof(struct rte_vxlan_hdr) +
1093 sizeof(struct rte_udp_hdr));
1094 udp_length = ether_length +
1095 (sizeof(struct rte_vxlan_hdr) +
1096 sizeof(struct rte_udp_hdr));
1098 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1099 vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
1100 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1102 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1103 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1106 static __rte_always_inline void
1107 pkt_work_encap(struct rte_mbuf *mbuf,
1109 struct rte_table_action_encap_config *cfg,
1111 uint16_t total_length,
1114 switch (cfg->encap_mask) {
1115 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
1116 encap(ip, data, sizeof(struct encap_ether_data));
1117 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1118 sizeof(struct encap_ether_data));
1119 mbuf->pkt_len = mbuf->data_len = total_length +
1120 sizeof(struct encap_ether_data);
1123 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
1124 encap(ip, data, sizeof(struct encap_vlan_data));
1125 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1126 sizeof(struct encap_vlan_data));
1127 mbuf->pkt_len = mbuf->data_len = total_length +
1128 sizeof(struct encap_vlan_data);
1131 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
1132 encap(ip, data, sizeof(struct encap_qinq_data));
1133 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1134 sizeof(struct encap_qinq_data));
1135 mbuf->pkt_len = mbuf->data_len = total_length +
1136 sizeof(struct encap_qinq_data);
1139 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
1141 struct encap_mpls_data *mpls = data;
1142 size_t size = sizeof(struct rte_ether_hdr) +
1143 mpls->mpls_count * 4;
1145 encap(ip, data, size);
1146 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + size);
1147 mbuf->pkt_len = mbuf->data_len = total_length + size;
1151 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
1153 struct encap_pppoe_data *pppoe =
1154 encap(ip, data, sizeof(struct encap_pppoe_data));
1155 pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
1156 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1157 sizeof(struct encap_pppoe_data));
1158 mbuf->pkt_len = mbuf->data_len = total_length +
1159 sizeof(struct encap_pppoe_data);
1163 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
1165 struct encap_qinq_pppoe_data *qinq_pppoe =
1166 encap(ip, data, sizeof(struct encap_qinq_pppoe_data));
1167 qinq_pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
1168 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1169 sizeof(struct encap_qinq_pppoe_data));
1170 mbuf->pkt_len = mbuf->data_len = total_length +
1171 sizeof(struct encap_qinq_pppoe_data);
1175 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
1177 if (cfg->vxlan.ip_version)
1178 if (cfg->vxlan.vlan)
1179 pkt_work_encap_vxlan_ipv4_vlan(mbuf, data, cfg);
1181 pkt_work_encap_vxlan_ipv4(mbuf, data, cfg);
1183 if (cfg->vxlan.vlan)
1184 pkt_work_encap_vxlan_ipv6_vlan(mbuf, data, cfg);
1186 pkt_work_encap_vxlan_ipv6(mbuf, data, cfg);
1195 * RTE_TABLE_ACTION_NAT
1198 nat_cfg_check(struct rte_table_action_nat_config *nat)
1200 if ((nat->proto != 0x06) &&
1201 (nat->proto != 0x11))
1207 struct nat_ipv4_data {
1212 struct nat_ipv6_data {
1218 nat_data_size(struct rte_table_action_nat_config *nat __rte_unused,
1219 struct rte_table_action_common_config *common)
1221 int ip_version = common->ip_version;
1223 return (ip_version) ?
1224 sizeof(struct nat_ipv4_data) :
1225 sizeof(struct nat_ipv6_data);
1229 nat_apply_check(struct rte_table_action_nat_params *p,
1230 struct rte_table_action_common_config *cfg)
1232 if ((p->ip_version && (cfg->ip_version == 0)) ||
1233 ((p->ip_version == 0) && cfg->ip_version))
1240 nat_apply(void *data,
1241 struct rte_table_action_nat_params *p,
1242 struct rte_table_action_common_config *cfg)
1246 /* Check input arguments */
1247 status = nat_apply_check(p, cfg);
1252 if (p->ip_version) {
1253 struct nat_ipv4_data *d = data;
1255 d->addr = rte_htonl(p->addr.ipv4);
1256 d->port = rte_htons(p->port);
1258 struct nat_ipv6_data *d = data;
1260 memcpy(d->addr, p->addr.ipv6, sizeof(d->addr));
1261 d->port = rte_htons(p->port);
1267 static __rte_always_inline uint16_t
1268 nat_ipv4_checksum_update(uint16_t cksum0,
1275 cksum1 = ~cksum1 & 0xFFFF;
1277 /* Subtract ip0 (one's complement logic) */
1278 cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF);
1279 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1280 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1282 /* Add ip1 (one's complement logic) */
1283 cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF);
1284 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1285 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1287 return (uint16_t)(~cksum1);
1290 static __rte_always_inline uint16_t
1291 nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0,
1300 cksum1 = ~cksum1 & 0xFFFF;
1302 /* Subtract ip0 and port 0 (one's complement logic) */
1303 cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF) + port0;
1304 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1305 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1307 /* Add ip1 and port1 (one's complement logic) */
1308 cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF) + port1;
1309 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1310 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1312 return (uint16_t)(~cksum1);
1315 static __rte_always_inline uint16_t
1316 nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0,
1325 cksum1 = ~cksum1 & 0xFFFF;
1327 /* Subtract ip0 and port 0 (one's complement logic) */
1328 cksum1 -= ip0[0] + ip0[1] + ip0[2] + ip0[3] +
1329 ip0[4] + ip0[5] + ip0[6] + ip0[7] + port0;
1330 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1331 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1333 /* Add ip1 and port1 (one's complement logic) */
1334 cksum1 += ip1[0] + ip1[1] + ip1[2] + ip1[3] +
1335 ip1[4] + ip1[5] + ip1[6] + ip1[7] + port1;
1336 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1337 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1339 return (uint16_t)(~cksum1);
1342 static __rte_always_inline void
1343 pkt_ipv4_work_nat(struct rte_ipv4_hdr *ip,
1344 struct nat_ipv4_data *data,
1345 struct rte_table_action_nat_config *cfg)
1347 if (cfg->source_nat) {
1348 if (cfg->proto == 0x6) {
1349 struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
1350 uint16_t ip_cksum, tcp_cksum;
1352 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1356 tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
1362 ip->src_addr = data->addr;
1363 ip->hdr_checksum = ip_cksum;
1364 tcp->src_port = data->port;
1365 tcp->cksum = tcp_cksum;
1367 struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
1368 uint16_t ip_cksum, udp_cksum;
1370 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1374 udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1380 ip->src_addr = data->addr;
1381 ip->hdr_checksum = ip_cksum;
1382 udp->src_port = data->port;
1383 if (udp->dgram_cksum)
1384 udp->dgram_cksum = udp_cksum;
1387 if (cfg->proto == 0x6) {
1388 struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
1389 uint16_t ip_cksum, tcp_cksum;
1391 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1395 tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
1401 ip->dst_addr = data->addr;
1402 ip->hdr_checksum = ip_cksum;
1403 tcp->dst_port = data->port;
1404 tcp->cksum = tcp_cksum;
1406 struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
1407 uint16_t ip_cksum, udp_cksum;
1409 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1413 udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1419 ip->dst_addr = data->addr;
1420 ip->hdr_checksum = ip_cksum;
1421 udp->dst_port = data->port;
1422 if (udp->dgram_cksum)
1423 udp->dgram_cksum = udp_cksum;
1428 static __rte_always_inline void
1429 pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
1430 struct nat_ipv6_data *data,
1431 struct rte_table_action_nat_config *cfg)
1433 if (cfg->source_nat) {
1434 if (cfg->proto == 0x6) {
1435 struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
1438 tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1439 (uint16_t *)ip->src_addr,
1440 (uint16_t *)data->addr,
1444 rte_memcpy(ip->src_addr, data->addr, 16);
1445 tcp->src_port = data->port;
1446 tcp->cksum = tcp_cksum;
1448 struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
1451 udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1452 (uint16_t *)ip->src_addr,
1453 (uint16_t *)data->addr,
1457 rte_memcpy(ip->src_addr, data->addr, 16);
1458 udp->src_port = data->port;
1459 udp->dgram_cksum = udp_cksum;
1462 if (cfg->proto == 0x6) {
1463 struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
1466 tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1467 (uint16_t *)ip->dst_addr,
1468 (uint16_t *)data->addr,
1472 rte_memcpy(ip->dst_addr, data->addr, 16);
1473 tcp->dst_port = data->port;
1474 tcp->cksum = tcp_cksum;
1476 struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
1479 udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1480 (uint16_t *)ip->dst_addr,
1481 (uint16_t *)data->addr,
1485 rte_memcpy(ip->dst_addr, data->addr, 16);
1486 udp->dst_port = data->port;
1487 udp->dgram_cksum = udp_cksum;
1493 * RTE_TABLE_ACTION_TTL
1496 ttl_cfg_check(struct rte_table_action_ttl_config *ttl)
1508 #define TTL_INIT(data, decrement) \
1509 ((data)->n_packets = (decrement) ? 1 : 0)
1511 #define TTL_DEC_GET(data) \
1512 ((uint8_t)((data)->n_packets & 1))
1514 #define TTL_STATS_RESET(data) \
1515 ((data)->n_packets = ((data)->n_packets & 1))
1517 #define TTL_STATS_READ(data) \
1518 ((data)->n_packets >> 1)
1520 #define TTL_STATS_ADD(data, value) \
1521 ((data)->n_packets = \
1522 (((((data)->n_packets >> 1) + (value)) << 1) | \
1523 ((data)->n_packets & 1)))
1526 ttl_apply(void *data,
1527 struct rte_table_action_ttl_params *p)
1529 struct ttl_data *d = data;
1531 TTL_INIT(d, p->decrement);
1536 static __rte_always_inline uint64_t
1537 pkt_ipv4_work_ttl(struct rte_ipv4_hdr *ip,
1538 struct ttl_data *data)
1541 uint16_t cksum = ip->hdr_checksum;
1542 uint8_t ttl = ip->time_to_live;
1543 uint8_t ttl_diff = TTL_DEC_GET(data);
1548 ip->hdr_checksum = cksum;
1549 ip->time_to_live = ttl;
1551 drop = (ttl == 0) ? 1 : 0;
1552 TTL_STATS_ADD(data, drop);
1557 static __rte_always_inline uint64_t
1558 pkt_ipv6_work_ttl(struct rte_ipv6_hdr *ip,
1559 struct ttl_data *data)
1562 uint8_t ttl = ip->hop_limits;
1563 uint8_t ttl_diff = TTL_DEC_GET(data);
1567 ip->hop_limits = ttl;
1569 drop = (ttl == 0) ? 1 : 0;
1570 TTL_STATS_ADD(data, drop);
1576 * RTE_TABLE_ACTION_STATS
1579 stats_cfg_check(struct rte_table_action_stats_config *stats)
1581 if ((stats->n_packets_enabled == 0) && (stats->n_bytes_enabled == 0))
1593 stats_apply(struct stats_data *data,
1594 struct rte_table_action_stats_params *p)
1596 data->n_packets = p->n_packets;
1597 data->n_bytes = p->n_bytes;
1602 static __rte_always_inline void
1603 pkt_work_stats(struct stats_data *data,
1604 uint16_t total_length)
1607 data->n_bytes += total_length;
1611 * RTE_TABLE_ACTION_TIME
1618 time_apply(struct time_data *data,
1619 struct rte_table_action_time_params *p)
1621 data->time = p->time;
1625 static __rte_always_inline void
1626 pkt_work_time(struct time_data *data,
1634 * RTE_TABLE_ACTION_CRYPTO
1637 #define CRYPTO_OP_MASK_CIPHER 0x1
1638 #define CRYPTO_OP_MASK_AUTH 0x2
1639 #define CRYPTO_OP_MASK_AEAD 0x4
1641 struct crypto_op_sym_iv_aad {
1642 struct rte_crypto_op op;
1643 struct rte_crypto_sym_op sym_op;
1647 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1649 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1653 uint8_t iv[RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1654 uint8_t aad[RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX];
1660 struct sym_crypto_data {
1665 /** Length of cipher iv. */
1666 uint16_t cipher_iv_len;
1668 /** Offset from start of IP header to the cipher iv. */
1669 uint16_t cipher_iv_data_offset;
1671 /** Length of cipher iv to be updated in the mbuf. */
1672 uint16_t cipher_iv_update_len;
1674 /** Offset from start of IP header to the auth iv. */
1675 uint16_t auth_iv_data_offset;
1677 /** Length of auth iv in the mbuf. */
1678 uint16_t auth_iv_len;
1680 /** Length of auth iv to be updated in the mbuf. */
1681 uint16_t auth_iv_update_len;
1686 /** Length of iv. */
1689 /** Offset from start of IP header to the aead iv. */
1690 uint16_t iv_data_offset;
1692 /** Length of iv to be updated in the mbuf. */
1693 uint16_t iv_update_len;
1695 /** Length of aad */
1698 /** Offset from start of IP header to the aad. */
1699 uint16_t aad_data_offset;
1701 /** Length of aad to updated in the mbuf. */
1702 uint16_t aad_update_len;
1707 /** Offset from start of IP header to the data. */
1708 uint16_t data_offset;
1710 /** Digest length. */
1711 uint16_t digest_len;
1714 uint16_t block_size;
1716 /** Mask of crypto operation */
1719 /** Session pointer. */
1720 struct rte_cryptodev_sym_session *session;
1722 /** Direction of crypto, encrypt or decrypt */
1725 /** Private data size to store cipher iv / aad. */
1726 uint8_t iv_aad_data[32];
1731 sym_crypto_cfg_check(struct rte_table_action_sym_crypto_config *cfg)
1733 if (!rte_cryptodev_is_valid_dev(cfg->cryptodev_id))
1735 if (cfg->mp_create == NULL || cfg->mp_init == NULL)
1742 get_block_size(const struct rte_crypto_sym_xform *xform, uint8_t cdev_id)
1744 struct rte_cryptodev_info dev_info;
1745 const struct rte_cryptodev_capabilities *cap;
1748 rte_cryptodev_info_get(cdev_id, &dev_info);
1750 for (i = 0; dev_info.capabilities[i].op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
1752 cap = &dev_info.capabilities[i];
1754 if (cap->sym.xform_type != xform->type)
1757 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
1758 (cap->sym.cipher.algo == xform->cipher.algo))
1759 return cap->sym.cipher.block_size;
1761 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) &&
1762 (cap->sym.aead.algo == xform->aead.algo))
1763 return cap->sym.aead.block_size;
1765 if (xform->type == RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED)
1773 sym_crypto_apply(struct sym_crypto_data *data,
1774 struct rte_table_action_sym_crypto_config *cfg,
1775 struct rte_table_action_sym_crypto_params *p)
1777 const struct rte_crypto_cipher_xform *cipher_xform = NULL;
1778 const struct rte_crypto_auth_xform *auth_xform = NULL;
1779 const struct rte_crypto_aead_xform *aead_xform = NULL;
1780 struct rte_crypto_sym_xform *xform = p->xform;
1781 struct rte_cryptodev_sym_session *session;
1784 memset(data, 0, sizeof(*data));
1787 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1788 cipher_xform = &xform->cipher;
1790 if (cipher_xform->iv.length >
1791 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
1793 if (cipher_xform->iv.offset !=
1794 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
1797 ret = get_block_size(xform, cfg->cryptodev_id);
1800 data->block_size = (uint16_t)ret;
1801 data->op_mask |= CRYPTO_OP_MASK_CIPHER;
1803 data->cipher_auth.cipher_iv_len =
1804 cipher_xform->iv.length;
1805 data->cipher_auth.cipher_iv_data_offset = (uint16_t)
1806 p->cipher_auth.cipher_iv_update.offset;
1807 data->cipher_auth.cipher_iv_update_len = (uint16_t)
1808 p->cipher_auth.cipher_iv_update.length;
1810 rte_memcpy(data->iv_aad_data,
1811 p->cipher_auth.cipher_iv.val,
1812 p->cipher_auth.cipher_iv.length);
1814 data->direction = cipher_xform->op;
1816 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1817 auth_xform = &xform->auth;
1818 if (auth_xform->iv.length >
1819 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
1821 data->op_mask |= CRYPTO_OP_MASK_AUTH;
1823 data->cipher_auth.auth_iv_len = auth_xform->iv.length;
1824 data->cipher_auth.auth_iv_data_offset = (uint16_t)
1825 p->cipher_auth.auth_iv_update.offset;
1826 data->cipher_auth.auth_iv_update_len = (uint16_t)
1827 p->cipher_auth.auth_iv_update.length;
1828 data->digest_len = auth_xform->digest_length;
1830 data->direction = (auth_xform->op ==
1831 RTE_CRYPTO_AUTH_OP_GENERATE) ?
1832 RTE_CRYPTO_CIPHER_OP_ENCRYPT :
1833 RTE_CRYPTO_CIPHER_OP_DECRYPT;
1835 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1836 aead_xform = &xform->aead;
1838 if ((aead_xform->iv.length >
1839 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX) || (
1840 aead_xform->aad_length >
1841 RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX))
1843 if (aead_xform->iv.offset !=
1844 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
1847 ret = get_block_size(xform, cfg->cryptodev_id);
1850 data->block_size = (uint16_t)ret;
1851 data->op_mask |= CRYPTO_OP_MASK_AEAD;
1853 data->digest_len = aead_xform->digest_length;
1854 data->aead.iv_len = aead_xform->iv.length;
1855 data->aead.aad_len = aead_xform->aad_length;
1857 data->aead.iv_data_offset = (uint16_t)
1858 p->aead.iv_update.offset;
1859 data->aead.iv_update_len = (uint16_t)
1860 p->aead.iv_update.length;
1861 data->aead.aad_data_offset = (uint16_t)
1862 p->aead.aad_update.offset;
1863 data->aead.aad_update_len = (uint16_t)
1864 p->aead.aad_update.length;
1866 rte_memcpy(data->iv_aad_data,
1870 rte_memcpy(data->iv_aad_data + p->aead.iv.length,
1872 p->aead.aad.length);
1874 data->direction = (aead_xform->op ==
1875 RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1876 RTE_CRYPTO_CIPHER_OP_ENCRYPT :
1877 RTE_CRYPTO_CIPHER_OP_DECRYPT;
1881 xform = xform->next;
1884 if (auth_xform && auth_xform->iv.length) {
1886 if (auth_xform->iv.offset !=
1887 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET +
1888 cipher_xform->iv.length)
1891 rte_memcpy(data->iv_aad_data + cipher_xform->iv.length,
1892 p->cipher_auth.auth_iv.val,
1893 p->cipher_auth.auth_iv.length);
1895 rte_memcpy(data->iv_aad_data,
1896 p->cipher_auth.auth_iv.val,
1897 p->cipher_auth.auth_iv.length);
1901 session = rte_cryptodev_sym_session_create(cfg->mp_create);
1905 ret = rte_cryptodev_sym_session_init(cfg->cryptodev_id, session,
1906 p->xform, cfg->mp_init);
1908 rte_cryptodev_sym_session_free(session);
1912 data->data_offset = (uint16_t)p->data_offset;
1913 data->session = session;
1918 static __rte_always_inline uint64_t
1919 pkt_work_sym_crypto(struct rte_mbuf *mbuf, struct sym_crypto_data *data,
1920 struct rte_table_action_sym_crypto_config *cfg,
1923 struct crypto_op_sym_iv_aad *crypto_op = (struct crypto_op_sym_iv_aad *)
1924 RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->op_offset);
1925 struct rte_crypto_op *op = &crypto_op->op;
1926 struct rte_crypto_sym_op *sym = op->sym;
1927 uint32_t pkt_offset = sizeof(*mbuf) + mbuf->data_off;
1928 uint32_t payload_len = pkt_offset + mbuf->data_len - data->data_offset;
1930 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1931 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1932 op->phys_addr = mbuf->buf_iova + cfg->op_offset - sizeof(*mbuf);
1933 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1936 sym->session = data->session;
1938 /** pad the packet */
1939 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1940 uint32_t append_len = RTE_ALIGN_CEIL(payload_len,
1941 data->block_size) - payload_len;
1943 if (unlikely(rte_pktmbuf_append(mbuf, append_len +
1944 data->digest_len) == NULL))
1947 payload_len += append_len;
1949 payload_len -= data->digest_len;
1951 if (data->op_mask & CRYPTO_OP_MASK_CIPHER) {
1952 /** prepare cipher op */
1953 uint8_t *iv = crypto_op->iv_aad.cipher_auth.cipher_iv;
1955 sym->cipher.data.length = payload_len;
1956 sym->cipher.data.offset = data->data_offset - pkt_offset;
1958 if (data->cipher_auth.cipher_iv_update_len) {
1959 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1960 data->cipher_auth.cipher_iv_data_offset
1963 /** For encryption, update the pkt iv field, otherwise
1964 * update the iv_aad_field
1966 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1967 rte_memcpy(pkt_iv, data->iv_aad_data,
1968 data->cipher_auth.cipher_iv_update_len);
1970 rte_memcpy(data->iv_aad_data, pkt_iv,
1971 data->cipher_auth.cipher_iv_update_len);
1975 rte_memcpy(iv, data->iv_aad_data,
1976 data->cipher_auth.cipher_iv_len);
1979 if (data->op_mask & CRYPTO_OP_MASK_AUTH) {
1980 /** authentication always start from IP header. */
1981 sym->auth.data.offset = ip_offset - pkt_offset;
1982 sym->auth.data.length = mbuf->data_len - sym->auth.data.offset -
1984 sym->auth.digest.data = rte_pktmbuf_mtod_offset(mbuf,
1985 uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
1987 sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
1988 rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
1990 if (data->cipher_auth.auth_iv_update_len) {
1991 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1992 data->cipher_auth.auth_iv_data_offset
1994 uint8_t *data_iv = data->iv_aad_data +
1995 data->cipher_auth.cipher_iv_len;
1997 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1998 rte_memcpy(pkt_iv, data_iv,
1999 data->cipher_auth.auth_iv_update_len);
2001 rte_memcpy(data_iv, pkt_iv,
2002 data->cipher_auth.auth_iv_update_len);
2005 if (data->cipher_auth.auth_iv_len) {
2006 /** prepare cipher op */
2007 uint8_t *iv = crypto_op->iv_aad.cipher_auth.auth_iv;
2009 rte_memcpy(iv, data->iv_aad_data +
2010 data->cipher_auth.cipher_iv_len,
2011 data->cipher_auth.auth_iv_len);
2015 if (data->op_mask & CRYPTO_OP_MASK_AEAD) {
2016 uint8_t *iv = crypto_op->iv_aad.aead_iv_aad.iv;
2017 uint8_t *aad = crypto_op->iv_aad.aead_iv_aad.aad;
2019 sym->aead.aad.data = aad;
2020 sym->aead.aad.phys_addr = rte_pktmbuf_iova_offset(mbuf,
2021 aad - rte_pktmbuf_mtod(mbuf, uint8_t *));
2022 sym->aead.digest.data = rte_pktmbuf_mtod_offset(mbuf,
2023 uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
2025 sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
2026 rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
2027 sym->aead.data.offset = data->data_offset - pkt_offset;
2028 sym->aead.data.length = payload_len;
2030 if (data->aead.iv_update_len) {
2031 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
2032 data->aead.iv_data_offset + ip_offset);
2033 uint8_t *data_iv = data->iv_aad_data;
2035 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2036 rte_memcpy(pkt_iv, data_iv,
2037 data->aead.iv_update_len);
2039 rte_memcpy(data_iv, pkt_iv,
2040 data->aead.iv_update_len);
2043 rte_memcpy(iv, data->iv_aad_data, data->aead.iv_len);
2045 if (data->aead.aad_update_len) {
2046 uint8_t *pkt_aad = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
2047 data->aead.aad_data_offset + ip_offset);
2048 uint8_t *data_aad = data->iv_aad_data +
2051 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2052 rte_memcpy(pkt_aad, data_aad,
2053 data->aead.iv_update_len);
2055 rte_memcpy(data_aad, pkt_aad,
2056 data->aead.iv_update_len);
2059 rte_memcpy(aad, data->iv_aad_data + data->aead.iv_len,
2060 data->aead.aad_len);
2067 * RTE_TABLE_ACTION_TAG
2074 tag_apply(struct tag_data *data,
2075 struct rte_table_action_tag_params *p)
2081 static __rte_always_inline void
2082 pkt_work_tag(struct rte_mbuf *mbuf,
2083 struct tag_data *data)
2085 mbuf->hash.fdir.hi = data->tag;
2086 mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
2089 static __rte_always_inline void
2090 pkt4_work_tag(struct rte_mbuf *mbuf0,
2091 struct rte_mbuf *mbuf1,
2092 struct rte_mbuf *mbuf2,
2093 struct rte_mbuf *mbuf3,
2094 struct tag_data *data0,
2095 struct tag_data *data1,
2096 struct tag_data *data2,
2097 struct tag_data *data3)
2099 mbuf0->hash.fdir.hi = data0->tag;
2100 mbuf1->hash.fdir.hi = data1->tag;
2101 mbuf2->hash.fdir.hi = data2->tag;
2102 mbuf3->hash.fdir.hi = data3->tag;
2104 mbuf0->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
2105 mbuf1->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
2106 mbuf2->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
2107 mbuf3->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
2111 * RTE_TABLE_ACTION_DECAP
2118 decap_apply(struct decap_data *data,
2119 struct rte_table_action_decap_params *p)
2125 static __rte_always_inline void
2126 pkt_work_decap(struct rte_mbuf *mbuf,
2127 struct decap_data *data)
2129 uint16_t data_off = mbuf->data_off;
2130 uint16_t data_len = mbuf->data_len;
2131 uint32_t pkt_len = mbuf->pkt_len;
2132 uint16_t n = data->n;
2134 mbuf->data_off = data_off + n;
2135 mbuf->data_len = data_len - n;
2136 mbuf->pkt_len = pkt_len - n;
2139 static __rte_always_inline void
2140 pkt4_work_decap(struct rte_mbuf *mbuf0,
2141 struct rte_mbuf *mbuf1,
2142 struct rte_mbuf *mbuf2,
2143 struct rte_mbuf *mbuf3,
2144 struct decap_data *data0,
2145 struct decap_data *data1,
2146 struct decap_data *data2,
2147 struct decap_data *data3)
2149 uint16_t data_off0 = mbuf0->data_off;
2150 uint16_t data_len0 = mbuf0->data_len;
2151 uint32_t pkt_len0 = mbuf0->pkt_len;
2153 uint16_t data_off1 = mbuf1->data_off;
2154 uint16_t data_len1 = mbuf1->data_len;
2155 uint32_t pkt_len1 = mbuf1->pkt_len;
2157 uint16_t data_off2 = mbuf2->data_off;
2158 uint16_t data_len2 = mbuf2->data_len;
2159 uint32_t pkt_len2 = mbuf2->pkt_len;
2161 uint16_t data_off3 = mbuf3->data_off;
2162 uint16_t data_len3 = mbuf3->data_len;
2163 uint32_t pkt_len3 = mbuf3->pkt_len;
2165 uint16_t n0 = data0->n;
2166 uint16_t n1 = data1->n;
2167 uint16_t n2 = data2->n;
2168 uint16_t n3 = data3->n;
2170 mbuf0->data_off = data_off0 + n0;
2171 mbuf0->data_len = data_len0 - n0;
2172 mbuf0->pkt_len = pkt_len0 - n0;
2174 mbuf1->data_off = data_off1 + n1;
2175 mbuf1->data_len = data_len1 - n1;
2176 mbuf1->pkt_len = pkt_len1 - n1;
2178 mbuf2->data_off = data_off2 + n2;
2179 mbuf2->data_len = data_len2 - n2;
2180 mbuf2->pkt_len = pkt_len2 - n2;
2182 mbuf3->data_off = data_off3 + n3;
2183 mbuf3->data_len = data_len3 - n3;
2184 mbuf3->pkt_len = pkt_len3 - n3;
2191 action_valid(enum rte_table_action_type action)
2194 case RTE_TABLE_ACTION_FWD:
2195 case RTE_TABLE_ACTION_LB:
2196 case RTE_TABLE_ACTION_MTR:
2197 case RTE_TABLE_ACTION_TM:
2198 case RTE_TABLE_ACTION_ENCAP:
2199 case RTE_TABLE_ACTION_NAT:
2200 case RTE_TABLE_ACTION_TTL:
2201 case RTE_TABLE_ACTION_STATS:
2202 case RTE_TABLE_ACTION_TIME:
2203 case RTE_TABLE_ACTION_SYM_CRYPTO:
2204 case RTE_TABLE_ACTION_TAG:
2205 case RTE_TABLE_ACTION_DECAP:
2213 #define RTE_TABLE_ACTION_MAX 64
2216 uint64_t action_mask;
2217 struct rte_table_action_common_config common;
2218 struct rte_table_action_lb_config lb;
2219 struct rte_table_action_mtr_config mtr;
2220 struct rte_table_action_tm_config tm;
2221 struct rte_table_action_encap_config encap;
2222 struct rte_table_action_nat_config nat;
2223 struct rte_table_action_ttl_config ttl;
2224 struct rte_table_action_stats_config stats;
2225 struct rte_table_action_sym_crypto_config sym_crypto;
2229 action_cfg_size(enum rte_table_action_type action)
2232 case RTE_TABLE_ACTION_LB:
2233 return sizeof(struct rte_table_action_lb_config);
2234 case RTE_TABLE_ACTION_MTR:
2235 return sizeof(struct rte_table_action_mtr_config);
2236 case RTE_TABLE_ACTION_TM:
2237 return sizeof(struct rte_table_action_tm_config);
2238 case RTE_TABLE_ACTION_ENCAP:
2239 return sizeof(struct rte_table_action_encap_config);
2240 case RTE_TABLE_ACTION_NAT:
2241 return sizeof(struct rte_table_action_nat_config);
2242 case RTE_TABLE_ACTION_TTL:
2243 return sizeof(struct rte_table_action_ttl_config);
2244 case RTE_TABLE_ACTION_STATS:
2245 return sizeof(struct rte_table_action_stats_config);
2246 case RTE_TABLE_ACTION_SYM_CRYPTO:
2247 return sizeof(struct rte_table_action_sym_crypto_config);
2254 action_cfg_get(struct ap_config *ap_config,
2255 enum rte_table_action_type type)
2258 case RTE_TABLE_ACTION_LB:
2259 return &ap_config->lb;
2261 case RTE_TABLE_ACTION_MTR:
2262 return &ap_config->mtr;
2264 case RTE_TABLE_ACTION_TM:
2265 return &ap_config->tm;
2267 case RTE_TABLE_ACTION_ENCAP:
2268 return &ap_config->encap;
2270 case RTE_TABLE_ACTION_NAT:
2271 return &ap_config->nat;
2273 case RTE_TABLE_ACTION_TTL:
2274 return &ap_config->ttl;
2276 case RTE_TABLE_ACTION_STATS:
2277 return &ap_config->stats;
2279 case RTE_TABLE_ACTION_SYM_CRYPTO:
2280 return &ap_config->sym_crypto;
2287 action_cfg_set(struct ap_config *ap_config,
2288 enum rte_table_action_type type,
2291 void *dst = action_cfg_get(ap_config, type);
2294 memcpy(dst, action_cfg, action_cfg_size(type));
2296 ap_config->action_mask |= 1LLU << type;
2300 size_t offset[RTE_TABLE_ACTION_MAX];
2305 action_data_size(enum rte_table_action_type action,
2306 struct ap_config *ap_config)
2309 case RTE_TABLE_ACTION_FWD:
2310 return sizeof(struct fwd_data);
2312 case RTE_TABLE_ACTION_LB:
2313 return sizeof(struct lb_data);
2315 case RTE_TABLE_ACTION_MTR:
2316 return mtr_data_size(&ap_config->mtr);
2318 case RTE_TABLE_ACTION_TM:
2319 return sizeof(struct tm_data);
2321 case RTE_TABLE_ACTION_ENCAP:
2322 return encap_data_size(&ap_config->encap);
2324 case RTE_TABLE_ACTION_NAT:
2325 return nat_data_size(&ap_config->nat,
2326 &ap_config->common);
2328 case RTE_TABLE_ACTION_TTL:
2329 return sizeof(struct ttl_data);
2331 case RTE_TABLE_ACTION_STATS:
2332 return sizeof(struct stats_data);
2334 case RTE_TABLE_ACTION_TIME:
2335 return sizeof(struct time_data);
2337 case RTE_TABLE_ACTION_SYM_CRYPTO:
2338 return (sizeof(struct sym_crypto_data));
2340 case RTE_TABLE_ACTION_TAG:
2341 return sizeof(struct tag_data);
2343 case RTE_TABLE_ACTION_DECAP:
2344 return sizeof(struct decap_data);
2353 action_data_offset_set(struct ap_data *ap_data,
2354 struct ap_config *ap_config)
2356 uint64_t action_mask = ap_config->action_mask;
2360 memset(ap_data->offset, 0, sizeof(ap_data->offset));
2363 for (action = 0; action < RTE_TABLE_ACTION_MAX; action++)
2364 if (action_mask & (1LLU << action)) {
2365 ap_data->offset[action] = offset;
2366 offset += action_data_size((enum rte_table_action_type)action,
2370 ap_data->total_size = offset;
2373 struct rte_table_action_profile {
2374 struct ap_config cfg;
2375 struct ap_data data;
2379 struct rte_table_action_profile *
2380 rte_table_action_profile_create(struct rte_table_action_common_config *common)
2382 struct rte_table_action_profile *ap;
2384 /* Check input arguments */
2388 /* Memory allocation */
2389 ap = calloc(1, sizeof(struct rte_table_action_profile));
2393 /* Initialization */
2394 memcpy(&ap->cfg.common, common, sizeof(*common));
2401 rte_table_action_profile_action_register(struct rte_table_action_profile *profile,
2402 enum rte_table_action_type type,
2403 void *action_config)
2407 /* Check input arguments */
2408 if ((profile == NULL) ||
2410 (action_valid(type) == 0) ||
2411 (profile->cfg.action_mask & (1LLU << type)) ||
2412 ((action_cfg_size(type) == 0) && action_config) ||
2413 (action_cfg_size(type) && (action_config == NULL)))
2417 case RTE_TABLE_ACTION_LB:
2418 status = lb_cfg_check(action_config);
2421 case RTE_TABLE_ACTION_MTR:
2422 status = mtr_cfg_check(action_config);
2425 case RTE_TABLE_ACTION_TM:
2426 status = tm_cfg_check(action_config);
2429 case RTE_TABLE_ACTION_ENCAP:
2430 status = encap_cfg_check(action_config);
2433 case RTE_TABLE_ACTION_NAT:
2434 status = nat_cfg_check(action_config);
2437 case RTE_TABLE_ACTION_TTL:
2438 status = ttl_cfg_check(action_config);
2441 case RTE_TABLE_ACTION_STATS:
2442 status = stats_cfg_check(action_config);
2445 case RTE_TABLE_ACTION_SYM_CRYPTO:
2446 status = sym_crypto_cfg_check(action_config);
2458 action_cfg_set(&profile->cfg, type, action_config);
2464 rte_table_action_profile_freeze(struct rte_table_action_profile *profile)
2466 if (profile->frozen)
2469 profile->cfg.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD;
2470 action_data_offset_set(&profile->data, &profile->cfg);
2471 profile->frozen = 1;
2477 rte_table_action_profile_free(struct rte_table_action_profile *profile)
2479 if (profile == NULL)
2489 #define METER_PROFILES_MAX 32
2491 struct rte_table_action {
2492 struct ap_config cfg;
2493 struct ap_data data;
2494 struct dscp_table_data dscp_table;
2495 struct meter_profile_data mp[METER_PROFILES_MAX];
2498 struct rte_table_action *
2499 rte_table_action_create(struct rte_table_action_profile *profile,
2502 struct rte_table_action *action;
2504 /* Check input arguments */
2505 if ((profile == NULL) ||
2506 (profile->frozen == 0))
2509 /* Memory allocation */
2510 action = rte_zmalloc_socket(NULL,
2511 sizeof(struct rte_table_action),
2512 RTE_CACHE_LINE_SIZE,
2517 /* Initialization */
2518 memcpy(&action->cfg, &profile->cfg, sizeof(profile->cfg));
2519 memcpy(&action->data, &profile->data, sizeof(profile->data));
2524 static __rte_always_inline void *
2525 action_data_get(void *data,
2526 struct rte_table_action *action,
2527 enum rte_table_action_type type)
2529 size_t offset = action->data.offset[type];
2530 uint8_t *data_bytes = data;
2532 return &data_bytes[offset];
2536 rte_table_action_apply(struct rte_table_action *action,
2538 enum rte_table_action_type type,
2539 void *action_params)
2543 /* Check input arguments */
2544 if ((action == NULL) ||
2546 (action_valid(type) == 0) ||
2547 ((action->cfg.action_mask & (1LLU << type)) == 0) ||
2548 (action_params == NULL))
2552 action_data = action_data_get(data, action, type);
2555 case RTE_TABLE_ACTION_FWD:
2556 return fwd_apply(action_data,
2559 case RTE_TABLE_ACTION_LB:
2560 return lb_apply(action_data,
2563 case RTE_TABLE_ACTION_MTR:
2564 return mtr_apply(action_data,
2568 RTE_DIM(action->mp));
2570 case RTE_TABLE_ACTION_TM:
2571 return tm_apply(action_data,
2575 case RTE_TABLE_ACTION_ENCAP:
2576 return encap_apply(action_data,
2579 &action->cfg.common);
2581 case RTE_TABLE_ACTION_NAT:
2582 return nat_apply(action_data,
2584 &action->cfg.common);
2586 case RTE_TABLE_ACTION_TTL:
2587 return ttl_apply(action_data,
2590 case RTE_TABLE_ACTION_STATS:
2591 return stats_apply(action_data,
2594 case RTE_TABLE_ACTION_TIME:
2595 return time_apply(action_data,
2598 case RTE_TABLE_ACTION_SYM_CRYPTO:
2599 return sym_crypto_apply(action_data,
2600 &action->cfg.sym_crypto,
2603 case RTE_TABLE_ACTION_TAG:
2604 return tag_apply(action_data,
2607 case RTE_TABLE_ACTION_DECAP:
2608 return decap_apply(action_data,
2617 rte_table_action_dscp_table_update(struct rte_table_action *action,
2619 struct rte_table_action_dscp_table *table)
2623 /* Check input arguments */
2624 if ((action == NULL) ||
2625 ((action->cfg.action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
2626 (1LLU << RTE_TABLE_ACTION_TM))) == 0) ||
2631 for (i = 0; i < RTE_DIM(table->entry); i++) {
2632 struct dscp_table_entry_data *data =
2633 &action->dscp_table.entry[i];
2634 struct rte_table_action_dscp_table_entry *entry =
2637 if ((dscp_mask & (1LLU << i)) == 0)
2640 data->color = entry->color;
2641 data->tc = entry->tc_id;
2642 data->tc_queue = entry->tc_queue_id;
2649 rte_table_action_meter_profile_add(struct rte_table_action *action,
2650 uint32_t meter_profile_id,
2651 struct rte_table_action_meter_profile *profile)
2653 struct meter_profile_data *mp_data;
2656 /* Check input arguments */
2657 if ((action == NULL) ||
2658 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
2662 if (profile->alg != RTE_TABLE_ACTION_METER_TRTCM)
2665 mp_data = meter_profile_data_find(action->mp,
2666 RTE_DIM(action->mp),
2671 mp_data = meter_profile_data_find_unused(action->mp,
2672 RTE_DIM(action->mp));
2676 /* Install new profile */
2677 status = rte_meter_trtcm_profile_config(&mp_data->profile,
2682 mp_data->profile_id = meter_profile_id;
2689 rte_table_action_meter_profile_delete(struct rte_table_action *action,
2690 uint32_t meter_profile_id)
2692 struct meter_profile_data *mp_data;
2694 /* Check input arguments */
2695 if ((action == NULL) ||
2696 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0))
2699 mp_data = meter_profile_data_find(action->mp,
2700 RTE_DIM(action->mp),
2705 /* Uninstall profile */
2712 rte_table_action_meter_read(struct rte_table_action *action,
2715 struct rte_table_action_mtr_counters *stats,
2718 struct mtr_trtcm_data *mtr_data;
2721 /* Check input arguments */
2722 if ((action == NULL) ||
2723 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
2725 (tc_mask > RTE_LEN2MASK(action->cfg.mtr.n_tc, uint32_t)))
2728 mtr_data = action_data_get(data, action, RTE_TABLE_ACTION_MTR);
2732 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
2733 struct rte_table_action_mtr_counters_tc *dst =
2735 struct mtr_trtcm_data *src = &mtr_data[i];
2737 if ((tc_mask & (1 << i)) == 0)
2740 dst->n_packets[RTE_COLOR_GREEN] =
2741 mtr_trtcm_data_stats_get(src, RTE_COLOR_GREEN);
2743 dst->n_packets[RTE_COLOR_YELLOW] =
2744 mtr_trtcm_data_stats_get(src, RTE_COLOR_YELLOW);
2746 dst->n_packets[RTE_COLOR_RED] =
2747 mtr_trtcm_data_stats_get(src, RTE_COLOR_RED);
2749 dst->n_packets_valid = 1;
2750 dst->n_bytes_valid = 0;
2753 stats->tc_mask = tc_mask;
2758 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
2759 struct mtr_trtcm_data *src = &mtr_data[i];
2761 if ((tc_mask & (1 << i)) == 0)
2764 mtr_trtcm_data_stats_reset(src, RTE_COLOR_GREEN);
2765 mtr_trtcm_data_stats_reset(src, RTE_COLOR_YELLOW);
2766 mtr_trtcm_data_stats_reset(src, RTE_COLOR_RED);
2774 rte_table_action_ttl_read(struct rte_table_action *action,
2776 struct rte_table_action_ttl_counters *stats,
2779 struct ttl_data *ttl_data;
2781 /* Check input arguments */
2782 if ((action == NULL) ||
2783 ((action->cfg.action_mask &
2784 (1LLU << RTE_TABLE_ACTION_TTL)) == 0) ||
2788 ttl_data = action_data_get(data, action, RTE_TABLE_ACTION_TTL);
2792 stats->n_packets = TTL_STATS_READ(ttl_data);
2796 TTL_STATS_RESET(ttl_data);
2802 rte_table_action_stats_read(struct rte_table_action *action,
2804 struct rte_table_action_stats_counters *stats,
2807 struct stats_data *stats_data;
2809 /* Check input arguments */
2810 if ((action == NULL) ||
2811 ((action->cfg.action_mask &
2812 (1LLU << RTE_TABLE_ACTION_STATS)) == 0) ||
2816 stats_data = action_data_get(data, action,
2817 RTE_TABLE_ACTION_STATS);
2821 stats->n_packets = stats_data->n_packets;
2822 stats->n_bytes = stats_data->n_bytes;
2823 stats->n_packets_valid = 1;
2824 stats->n_bytes_valid = 1;
2829 stats_data->n_packets = 0;
2830 stats_data->n_bytes = 0;
2837 rte_table_action_time_read(struct rte_table_action *action,
2839 uint64_t *timestamp)
2841 struct time_data *time_data;
2843 /* Check input arguments */
2844 if ((action == NULL) ||
2845 ((action->cfg.action_mask &
2846 (1LLU << RTE_TABLE_ACTION_TIME)) == 0) ||
2848 (timestamp == NULL))
2851 time_data = action_data_get(data, action, RTE_TABLE_ACTION_TIME);
2854 *timestamp = time_data->time;
2859 struct rte_cryptodev_sym_session *
2860 rte_table_action_crypto_sym_session_get(struct rte_table_action *action,
2863 struct sym_crypto_data *sym_crypto_data;
2865 /* Check input arguments */
2866 if ((action == NULL) ||
2867 ((action->cfg.action_mask &
2868 (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) == 0) ||
2872 sym_crypto_data = action_data_get(data, action,
2873 RTE_TABLE_ACTION_SYM_CRYPTO);
2875 return sym_crypto_data->session;
2878 static __rte_always_inline uint64_t
2879 pkt_work(struct rte_mbuf *mbuf,
2880 struct rte_pipeline_table_entry *table_entry,
2882 struct rte_table_action *action,
2883 struct ap_config *cfg)
2885 uint64_t drop_mask = 0;
2887 uint32_t ip_offset = action->cfg.common.ip_offset;
2888 void *ip = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ip_offset);
2891 uint16_t total_length;
2893 if (cfg->common.ip_version) {
2894 struct rte_ipv4_hdr *hdr = ip;
2896 dscp = hdr->type_of_service >> 2;
2897 total_length = rte_ntohs(hdr->total_length);
2899 struct rte_ipv6_hdr *hdr = ip;
2901 dscp = (rte_ntohl(hdr->vtc_flow) & 0x0F600000) >> 18;
2902 total_length = rte_ntohs(hdr->payload_len) +
2903 sizeof(struct rte_ipv6_hdr);
2906 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2908 action_data_get(table_entry, action, RTE_TABLE_ACTION_LB);
2914 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2916 action_data_get(table_entry, action, RTE_TABLE_ACTION_MTR);
2918 drop_mask |= pkt_work_mtr(mbuf,
2920 &action->dscp_table,
2927 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2929 action_data_get(table_entry, action, RTE_TABLE_ACTION_TM);
2933 &action->dscp_table,
2937 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
2938 void *data = action_data_get(table_entry,
2940 RTE_TABLE_ACTION_DECAP);
2942 pkt_work_decap(mbuf, data);
2945 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2947 action_data_get(table_entry, action, RTE_TABLE_ACTION_ENCAP);
2949 pkt_work_encap(mbuf,
2957 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2959 action_data_get(table_entry, action, RTE_TABLE_ACTION_NAT);
2961 if (cfg->common.ip_version)
2962 pkt_ipv4_work_nat(ip, data, &cfg->nat);
2964 pkt_ipv6_work_nat(ip, data, &cfg->nat);
2967 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2969 action_data_get(table_entry, action, RTE_TABLE_ACTION_TTL);
2971 if (cfg->common.ip_version)
2972 drop_mask |= pkt_ipv4_work_ttl(ip, data);
2974 drop_mask |= pkt_ipv6_work_ttl(ip, data);
2977 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2979 action_data_get(table_entry, action, RTE_TABLE_ACTION_STATS);
2981 pkt_work_stats(data, total_length);
2984 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2986 action_data_get(table_entry, action, RTE_TABLE_ACTION_TIME);
2988 pkt_work_time(data, time);
2991 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
2992 void *data = action_data_get(table_entry, action,
2993 RTE_TABLE_ACTION_SYM_CRYPTO);
2995 drop_mask |= pkt_work_sym_crypto(mbuf, data, &cfg->sym_crypto,
2999 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
3000 void *data = action_data_get(table_entry,
3002 RTE_TABLE_ACTION_TAG);
3004 pkt_work_tag(mbuf, data);
3010 static __rte_always_inline uint64_t
3011 pkt4_work(struct rte_mbuf **mbufs,
3012 struct rte_pipeline_table_entry **table_entries,
3014 struct rte_table_action *action,
3015 struct ap_config *cfg)
3017 uint64_t drop_mask0 = 0;
3018 uint64_t drop_mask1 = 0;
3019 uint64_t drop_mask2 = 0;
3020 uint64_t drop_mask3 = 0;
3022 struct rte_mbuf *mbuf0 = mbufs[0];
3023 struct rte_mbuf *mbuf1 = mbufs[1];
3024 struct rte_mbuf *mbuf2 = mbufs[2];
3025 struct rte_mbuf *mbuf3 = mbufs[3];
3027 struct rte_pipeline_table_entry *table_entry0 = table_entries[0];
3028 struct rte_pipeline_table_entry *table_entry1 = table_entries[1];
3029 struct rte_pipeline_table_entry *table_entry2 = table_entries[2];
3030 struct rte_pipeline_table_entry *table_entry3 = table_entries[3];
3032 uint32_t ip_offset = action->cfg.common.ip_offset;
3033 void *ip0 = RTE_MBUF_METADATA_UINT32_PTR(mbuf0, ip_offset);
3034 void *ip1 = RTE_MBUF_METADATA_UINT32_PTR(mbuf1, ip_offset);
3035 void *ip2 = RTE_MBUF_METADATA_UINT32_PTR(mbuf2, ip_offset);
3036 void *ip3 = RTE_MBUF_METADATA_UINT32_PTR(mbuf3, ip_offset);
3038 uint32_t dscp0, dscp1, dscp2, dscp3;
3039 uint16_t total_length0, total_length1, total_length2, total_length3;
3041 if (cfg->common.ip_version) {
3042 struct rte_ipv4_hdr *hdr0 = ip0;
3043 struct rte_ipv4_hdr *hdr1 = ip1;
3044 struct rte_ipv4_hdr *hdr2 = ip2;
3045 struct rte_ipv4_hdr *hdr3 = ip3;
3047 dscp0 = hdr0->type_of_service >> 2;
3048 dscp1 = hdr1->type_of_service >> 2;
3049 dscp2 = hdr2->type_of_service >> 2;
3050 dscp3 = hdr3->type_of_service >> 2;
3052 total_length0 = rte_ntohs(hdr0->total_length);
3053 total_length1 = rte_ntohs(hdr1->total_length);
3054 total_length2 = rte_ntohs(hdr2->total_length);
3055 total_length3 = rte_ntohs(hdr3->total_length);
3057 struct rte_ipv6_hdr *hdr0 = ip0;
3058 struct rte_ipv6_hdr *hdr1 = ip1;
3059 struct rte_ipv6_hdr *hdr2 = ip2;
3060 struct rte_ipv6_hdr *hdr3 = ip3;
3062 dscp0 = (rte_ntohl(hdr0->vtc_flow) & 0x0F600000) >> 18;
3063 dscp1 = (rte_ntohl(hdr1->vtc_flow) & 0x0F600000) >> 18;
3064 dscp2 = (rte_ntohl(hdr2->vtc_flow) & 0x0F600000) >> 18;
3065 dscp3 = (rte_ntohl(hdr3->vtc_flow) & 0x0F600000) >> 18;
3067 total_length0 = rte_ntohs(hdr0->payload_len) +
3068 sizeof(struct rte_ipv6_hdr);
3069 total_length1 = rte_ntohs(hdr1->payload_len) +
3070 sizeof(struct rte_ipv6_hdr);
3071 total_length2 = rte_ntohs(hdr2->payload_len) +
3072 sizeof(struct rte_ipv6_hdr);
3073 total_length3 = rte_ntohs(hdr3->payload_len) +
3074 sizeof(struct rte_ipv6_hdr);
3077 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
3079 action_data_get(table_entry0, action, RTE_TABLE_ACTION_LB);
3081 action_data_get(table_entry1, action, RTE_TABLE_ACTION_LB);
3083 action_data_get(table_entry2, action, RTE_TABLE_ACTION_LB);
3085 action_data_get(table_entry3, action, RTE_TABLE_ACTION_LB);
3104 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
3106 action_data_get(table_entry0, action, RTE_TABLE_ACTION_MTR);
3108 action_data_get(table_entry1, action, RTE_TABLE_ACTION_MTR);
3110 action_data_get(table_entry2, action, RTE_TABLE_ACTION_MTR);
3112 action_data_get(table_entry3, action, RTE_TABLE_ACTION_MTR);
3114 drop_mask0 |= pkt_work_mtr(mbuf0,
3116 &action->dscp_table,
3122 drop_mask1 |= pkt_work_mtr(mbuf1,
3124 &action->dscp_table,
3130 drop_mask2 |= pkt_work_mtr(mbuf2,
3132 &action->dscp_table,
3138 drop_mask3 |= pkt_work_mtr(mbuf3,
3140 &action->dscp_table,
3147 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
3149 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TM);
3151 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TM);
3153 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TM);
3155 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TM);
3159 &action->dscp_table,
3164 &action->dscp_table,
3169 &action->dscp_table,
3174 &action->dscp_table,
3178 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
3179 void *data0 = action_data_get(table_entry0,
3181 RTE_TABLE_ACTION_DECAP);
3182 void *data1 = action_data_get(table_entry1,
3184 RTE_TABLE_ACTION_DECAP);
3185 void *data2 = action_data_get(table_entry2,
3187 RTE_TABLE_ACTION_DECAP);
3188 void *data3 = action_data_get(table_entry3,
3190 RTE_TABLE_ACTION_DECAP);
3192 pkt4_work_decap(mbuf0, mbuf1, mbuf2, mbuf3,
3193 data0, data1, data2, data3);
3196 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
3198 action_data_get(table_entry0, action, RTE_TABLE_ACTION_ENCAP);
3200 action_data_get(table_entry1, action, RTE_TABLE_ACTION_ENCAP);
3202 action_data_get(table_entry2, action, RTE_TABLE_ACTION_ENCAP);
3204 action_data_get(table_entry3, action, RTE_TABLE_ACTION_ENCAP);
3206 pkt_work_encap(mbuf0,
3213 pkt_work_encap(mbuf1,
3220 pkt_work_encap(mbuf2,
3227 pkt_work_encap(mbuf3,
3235 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
3237 action_data_get(table_entry0, action, RTE_TABLE_ACTION_NAT);
3239 action_data_get(table_entry1, action, RTE_TABLE_ACTION_NAT);
3241 action_data_get(table_entry2, action, RTE_TABLE_ACTION_NAT);
3243 action_data_get(table_entry3, action, RTE_TABLE_ACTION_NAT);
3245 if (cfg->common.ip_version) {
3246 pkt_ipv4_work_nat(ip0, data0, &cfg->nat);
3247 pkt_ipv4_work_nat(ip1, data1, &cfg->nat);
3248 pkt_ipv4_work_nat(ip2, data2, &cfg->nat);
3249 pkt_ipv4_work_nat(ip3, data3, &cfg->nat);
3251 pkt_ipv6_work_nat(ip0, data0, &cfg->nat);
3252 pkt_ipv6_work_nat(ip1, data1, &cfg->nat);
3253 pkt_ipv6_work_nat(ip2, data2, &cfg->nat);
3254 pkt_ipv6_work_nat(ip3, data3, &cfg->nat);
3258 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
3260 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TTL);
3262 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TTL);
3264 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TTL);
3266 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TTL);
3268 if (cfg->common.ip_version) {
3269 drop_mask0 |= pkt_ipv4_work_ttl(ip0, data0);
3270 drop_mask1 |= pkt_ipv4_work_ttl(ip1, data1);
3271 drop_mask2 |= pkt_ipv4_work_ttl(ip2, data2);
3272 drop_mask3 |= pkt_ipv4_work_ttl(ip3, data3);
3274 drop_mask0 |= pkt_ipv6_work_ttl(ip0, data0);
3275 drop_mask1 |= pkt_ipv6_work_ttl(ip1, data1);
3276 drop_mask2 |= pkt_ipv6_work_ttl(ip2, data2);
3277 drop_mask3 |= pkt_ipv6_work_ttl(ip3, data3);
3281 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
3283 action_data_get(table_entry0, action, RTE_TABLE_ACTION_STATS);
3285 action_data_get(table_entry1, action, RTE_TABLE_ACTION_STATS);
3287 action_data_get(table_entry2, action, RTE_TABLE_ACTION_STATS);
3289 action_data_get(table_entry3, action, RTE_TABLE_ACTION_STATS);
3291 pkt_work_stats(data0, total_length0);
3292 pkt_work_stats(data1, total_length1);
3293 pkt_work_stats(data2, total_length2);
3294 pkt_work_stats(data3, total_length3);
3297 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
3299 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TIME);
3301 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TIME);
3303 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TIME);
3305 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TIME);
3307 pkt_work_time(data0, time);
3308 pkt_work_time(data1, time);
3309 pkt_work_time(data2, time);
3310 pkt_work_time(data3, time);
3313 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
3314 void *data0 = action_data_get(table_entry0, action,
3315 RTE_TABLE_ACTION_SYM_CRYPTO);
3316 void *data1 = action_data_get(table_entry1, action,
3317 RTE_TABLE_ACTION_SYM_CRYPTO);
3318 void *data2 = action_data_get(table_entry2, action,
3319 RTE_TABLE_ACTION_SYM_CRYPTO);
3320 void *data3 = action_data_get(table_entry3, action,
3321 RTE_TABLE_ACTION_SYM_CRYPTO);
3323 drop_mask0 |= pkt_work_sym_crypto(mbuf0, data0, &cfg->sym_crypto,
3325 drop_mask1 |= pkt_work_sym_crypto(mbuf1, data1, &cfg->sym_crypto,
3327 drop_mask2 |= pkt_work_sym_crypto(mbuf2, data2, &cfg->sym_crypto,
3329 drop_mask3 |= pkt_work_sym_crypto(mbuf3, data3, &cfg->sym_crypto,
3333 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
3334 void *data0 = action_data_get(table_entry0,
3336 RTE_TABLE_ACTION_TAG);
3337 void *data1 = action_data_get(table_entry1,
3339 RTE_TABLE_ACTION_TAG);
3340 void *data2 = action_data_get(table_entry2,
3342 RTE_TABLE_ACTION_TAG);
3343 void *data3 = action_data_get(table_entry3,
3345 RTE_TABLE_ACTION_TAG);
3347 pkt4_work_tag(mbuf0, mbuf1, mbuf2, mbuf3,
3348 data0, data1, data2, data3);
3357 static __rte_always_inline int
3358 ah(struct rte_pipeline *p,
3359 struct rte_mbuf **pkts,
3361 struct rte_pipeline_table_entry **entries,
3362 struct rte_table_action *action,
3363 struct ap_config *cfg)
3365 uint64_t pkts_drop_mask = 0;
3368 if (cfg->action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
3369 (1LLU << RTE_TABLE_ACTION_TIME)))
3372 if ((pkts_mask & (pkts_mask + 1)) == 0) {
3373 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
3376 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
3379 drop_mask = pkt4_work(&pkts[i],
3385 pkts_drop_mask |= drop_mask << i;
3388 for ( ; i < n_pkts; i++) {
3391 drop_mask = pkt_work(pkts[i],
3397 pkts_drop_mask |= drop_mask << i;
3400 for ( ; pkts_mask; ) {
3401 uint32_t pos = __builtin_ctzll(pkts_mask);
3402 uint64_t pkt_mask = 1LLU << pos;
3405 drop_mask = pkt_work(pkts[pos],
3411 pkts_mask &= ~pkt_mask;
3412 pkts_drop_mask |= drop_mask << pos;
3415 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
3421 ah_default(struct rte_pipeline *p,
3422 struct rte_mbuf **pkts,
3424 struct rte_pipeline_table_entry **entries,
3427 struct rte_table_action *action = arg;
3437 static rte_pipeline_table_action_handler_hit
3438 ah_selector(struct rte_table_action *action)
3440 if (action->cfg.action_mask == (1LLU << RTE_TABLE_ACTION_FWD))
3447 rte_table_action_table_params_get(struct rte_table_action *action,
3448 struct rte_pipeline_table_params *params)
3450 rte_pipeline_table_action_handler_hit f_action_hit;
3451 uint32_t total_size;
3453 /* Check input arguments */
3454 if ((action == NULL) ||
3458 f_action_hit = ah_selector(action);
3459 total_size = rte_align32pow2(action->data.total_size);
3461 /* Fill in params */
3462 params->f_action_hit = f_action_hit;
3463 params->f_action_miss = NULL;
3464 params->arg_ah = (f_action_hit) ? action : NULL;
3465 params->action_data_size = total_size -
3466 sizeof(struct rte_pipeline_table_entry);
3472 rte_table_action_free(struct rte_table_action *action)