1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_byteorder.h>
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
12 #include <rte_ether.h>
17 #include <rte_cryptodev.h>
18 #include <rte_cryptodev_pmd.h>
20 #include "rte_table_action.h"
22 #define rte_htons rte_cpu_to_be_16
23 #define rte_htonl rte_cpu_to_be_32
25 #define rte_ntohs rte_be_to_cpu_16
26 #define rte_ntohl rte_be_to_cpu_32
29 * RTE_TABLE_ACTION_FWD
31 #define fwd_data rte_pipeline_table_entry
34 fwd_apply(struct fwd_data *data,
35 struct rte_table_action_fwd_params *p)
37 data->action = p->action;
39 if (p->action == RTE_PIPELINE_ACTION_PORT)
40 data->port_id = p->id;
42 if (p->action == RTE_PIPELINE_ACTION_TABLE)
43 data->table_id = p->id;
52 lb_cfg_check(struct rte_table_action_lb_config *cfg)
55 (cfg->key_size < RTE_TABLE_ACTION_LB_KEY_SIZE_MIN) ||
56 (cfg->key_size > RTE_TABLE_ACTION_LB_KEY_SIZE_MAX) ||
57 (!rte_is_power_of_2(cfg->key_size)) ||
58 (cfg->f_hash == NULL))
65 uint32_t out[RTE_TABLE_ACTION_LB_TABLE_SIZE];
66 } __attribute__((__packed__));
69 lb_apply(struct lb_data *data,
70 struct rte_table_action_lb_params *p)
72 memcpy(data->out, p->out, sizeof(data->out));
77 static __rte_always_inline void
78 pkt_work_lb(struct rte_mbuf *mbuf,
80 struct rte_table_action_lb_config *cfg)
82 uint8_t *pkt_key = RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->key_offset);
83 uint32_t *out = RTE_MBUF_METADATA_UINT32_PTR(mbuf, cfg->out_offset);
87 digest = cfg->f_hash(pkt_key,
91 pos = digest & (RTE_TABLE_ACTION_LB_TABLE_SIZE - 1);
92 out_val = data->out[pos];
98 * RTE_TABLE_ACTION_MTR
101 mtr_cfg_check(struct rte_table_action_mtr_config *mtr)
103 if ((mtr->alg == RTE_TABLE_ACTION_METER_SRTCM) ||
104 ((mtr->n_tc != 1) && (mtr->n_tc != 4)) ||
105 (mtr->n_bytes_enabled != 0))
110 struct mtr_trtcm_data {
111 struct rte_meter_trtcm trtcm;
112 uint64_t stats[e_RTE_METER_COLORS];
113 } __attribute__((__packed__));
115 #define MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data) \
116 (((data)->stats[e_RTE_METER_GREEN] & 0xF8LLU) >> 3)
119 mtr_trtcm_data_meter_profile_id_set(struct mtr_trtcm_data *data,
122 data->stats[e_RTE_METER_GREEN] &= ~0xF8LLU;
123 data->stats[e_RTE_METER_GREEN] |= (profile_id % 32) << 3;
126 #define MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color)\
127 (((data)->stats[(color)] & 4LLU) >> 2)
129 #define MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color)\
130 ((enum rte_meter_color)((data)->stats[(color)] & 3LLU))
133 mtr_trtcm_data_policer_action_set(struct mtr_trtcm_data *data,
134 enum rte_meter_color color,
135 enum rte_table_action_policer action)
137 if (action == RTE_TABLE_ACTION_POLICER_DROP) {
138 data->stats[color] |= 4LLU;
140 data->stats[color] &= ~7LLU;
141 data->stats[color] |= color & 3LLU;
146 mtr_trtcm_data_stats_get(struct mtr_trtcm_data *data,
147 enum rte_meter_color color)
149 return data->stats[color] >> 8;
153 mtr_trtcm_data_stats_reset(struct mtr_trtcm_data *data,
154 enum rte_meter_color color)
156 data->stats[color] &= 0xFFLU;
159 #define MTR_TRTCM_DATA_STATS_INC(data, color) \
160 ((data)->stats[(color)] += (1LLU << 8))
163 mtr_data_size(struct rte_table_action_mtr_config *mtr)
165 return mtr->n_tc * sizeof(struct mtr_trtcm_data);
168 struct dscp_table_entry_data {
169 enum rte_meter_color color;
174 struct dscp_table_data {
175 struct dscp_table_entry_data entry[64];
178 struct meter_profile_data {
179 struct rte_meter_trtcm_profile profile;
184 static struct meter_profile_data *
185 meter_profile_data_find(struct meter_profile_data *mp,
191 for (i = 0; i < mp_size; i++) {
192 struct meter_profile_data *mp_data = &mp[i];
194 if (mp_data->valid && (mp_data->profile_id == profile_id))
201 static struct meter_profile_data *
202 meter_profile_data_find_unused(struct meter_profile_data *mp,
207 for (i = 0; i < mp_size; i++) {
208 struct meter_profile_data *mp_data = &mp[i];
218 mtr_apply_check(struct rte_table_action_mtr_params *p,
219 struct rte_table_action_mtr_config *cfg,
220 struct meter_profile_data *mp,
225 if (p->tc_mask > RTE_LEN2MASK(cfg->n_tc, uint32_t))
228 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
229 struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
230 struct meter_profile_data *mp_data;
232 if ((p->tc_mask & (1LLU << i)) == 0)
235 mp_data = meter_profile_data_find(mp,
237 p_tc->meter_profile_id);
246 mtr_apply(struct mtr_trtcm_data *data,
247 struct rte_table_action_mtr_params *p,
248 struct rte_table_action_mtr_config *cfg,
249 struct meter_profile_data *mp,
255 /* Check input arguments */
256 status = mtr_apply_check(p, cfg, mp, mp_size);
261 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
262 struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
263 struct mtr_trtcm_data *data_tc = &data[i];
264 struct meter_profile_data *mp_data;
266 if ((p->tc_mask & (1LLU << i)) == 0)
270 mp_data = meter_profile_data_find(mp,
272 p_tc->meter_profile_id);
276 memset(data_tc, 0, sizeof(*data_tc));
279 status = rte_meter_trtcm_config(&data_tc->trtcm,
285 mtr_trtcm_data_meter_profile_id_set(data_tc,
288 /* Policer actions */
289 mtr_trtcm_data_policer_action_set(data_tc,
291 p_tc->policer[e_RTE_METER_GREEN]);
293 mtr_trtcm_data_policer_action_set(data_tc,
295 p_tc->policer[e_RTE_METER_YELLOW]);
297 mtr_trtcm_data_policer_action_set(data_tc,
299 p_tc->policer[e_RTE_METER_RED]);
305 static __rte_always_inline uint64_t
306 pkt_work_mtr(struct rte_mbuf *mbuf,
307 struct mtr_trtcm_data *data,
308 struct dscp_table_data *dscp_table,
309 struct meter_profile_data *mp,
312 uint16_t total_length)
315 struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
316 enum rte_meter_color color_in, color_meter, color_policer;
320 color_in = dscp_entry->color;
322 mp_id = MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data);
325 color_meter = rte_meter_trtcm_color_aware_check(
333 MTR_TRTCM_DATA_STATS_INC(data, color_meter);
336 drop_mask = MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color_meter);
338 MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color_meter);
339 rte_mbuf_sched_color_set(mbuf, (uint8_t)color_policer);
345 * RTE_TABLE_ACTION_TM
348 tm_cfg_check(struct rte_table_action_tm_config *tm)
350 if ((tm->n_subports_per_port == 0) ||
351 (rte_is_power_of_2(tm->n_subports_per_port) == 0) ||
352 (tm->n_subports_per_port > UINT16_MAX) ||
353 (tm->n_pipes_per_subport == 0) ||
354 (rte_is_power_of_2(tm->n_pipes_per_subport) == 0))
363 } __attribute__((__packed__));
366 tm_apply_check(struct rte_table_action_tm_params *p,
367 struct rte_table_action_tm_config *cfg)
369 if ((p->subport_id >= cfg->n_subports_per_port) ||
370 (p->pipe_id >= cfg->n_pipes_per_subport))
377 tm_apply(struct tm_data *data,
378 struct rte_table_action_tm_params *p,
379 struct rte_table_action_tm_config *cfg)
383 /* Check input arguments */
384 status = tm_apply_check(p, cfg);
389 data->queue_id = p->subport_id <<
390 (__builtin_ctz(cfg->n_pipes_per_subport) + 4) |
396 static __rte_always_inline void
397 pkt_work_tm(struct rte_mbuf *mbuf,
398 struct tm_data *data,
399 struct dscp_table_data *dscp_table,
402 struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
403 uint32_t queue_id = data->queue_id |
404 (dscp_entry->tc << 2) |
405 dscp_entry->tc_queue;
406 rte_mbuf_sched_set(mbuf, queue_id, dscp_entry->tc,
407 (uint8_t)dscp_entry->color);
411 * RTE_TABLE_ACTION_ENCAP
414 encap_valid(enum rte_table_action_encap_type encap)
417 case RTE_TABLE_ACTION_ENCAP_ETHER:
418 case RTE_TABLE_ACTION_ENCAP_VLAN:
419 case RTE_TABLE_ACTION_ENCAP_QINQ:
420 case RTE_TABLE_ACTION_ENCAP_MPLS:
421 case RTE_TABLE_ACTION_ENCAP_PPPOE:
422 case RTE_TABLE_ACTION_ENCAP_VXLAN:
430 encap_cfg_check(struct rte_table_action_encap_config *encap)
432 if ((encap->encap_mask == 0) ||
433 (__builtin_popcountll(encap->encap_mask) != 1))
439 struct encap_ether_data {
440 struct ether_hdr ether;
441 } __attribute__((__packed__));
443 #define VLAN(pcp, dei, vid) \
444 ((uint16_t)((((uint64_t)(pcp)) & 0x7LLU) << 13) | \
445 ((((uint64_t)(dei)) & 0x1LLU) << 12) | \
446 (((uint64_t)(vid)) & 0xFFFLLU)) \
448 struct encap_vlan_data {
449 struct ether_hdr ether;
450 struct vlan_hdr vlan;
451 } __attribute__((__packed__));
453 struct encap_qinq_data {
454 struct ether_hdr ether;
455 struct vlan_hdr svlan;
456 struct vlan_hdr cvlan;
457 } __attribute__((__packed__));
459 #define ETHER_TYPE_MPLS_UNICAST 0x8847
461 #define ETHER_TYPE_MPLS_MULTICAST 0x8848
463 #define MPLS(label, tc, s, ttl) \
464 ((uint32_t)(((((uint64_t)(label)) & 0xFFFFFLLU) << 12) |\
465 ((((uint64_t)(tc)) & 0x7LLU) << 9) | \
466 ((((uint64_t)(s)) & 0x1LLU) << 8) | \
467 (((uint64_t)(ttl)) & 0xFFLLU)))
469 struct encap_mpls_data {
470 struct ether_hdr ether;
471 uint32_t mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX];
473 } __attribute__((__packed__));
475 #define ETHER_TYPE_PPPOE_SESSION 0x8864
477 #define PPP_PROTOCOL_IP 0x0021
479 struct pppoe_ppp_hdr {
480 uint16_t ver_type_code;
484 } __attribute__((__packed__));
486 struct encap_pppoe_data {
487 struct ether_hdr ether;
488 struct pppoe_ppp_hdr pppoe_ppp;
489 } __attribute__((__packed__));
491 #define IP_PROTO_UDP 17
493 struct encap_vxlan_ipv4_data {
494 struct ether_hdr ether;
495 struct ipv4_hdr ipv4;
497 struct vxlan_hdr vxlan;
498 } __attribute__((__packed__));
500 struct encap_vxlan_ipv4_vlan_data {
501 struct ether_hdr ether;
502 struct vlan_hdr vlan;
503 struct ipv4_hdr ipv4;
505 struct vxlan_hdr vxlan;
506 } __attribute__((__packed__));
508 struct encap_vxlan_ipv6_data {
509 struct ether_hdr ether;
510 struct ipv6_hdr ipv6;
512 struct vxlan_hdr vxlan;
513 } __attribute__((__packed__));
515 struct encap_vxlan_ipv6_vlan_data {
516 struct ether_hdr ether;
517 struct vlan_hdr vlan;
518 struct ipv6_hdr ipv6;
520 struct vxlan_hdr vxlan;
521 } __attribute__((__packed__));
524 encap_data_size(struct rte_table_action_encap_config *encap)
526 switch (encap->encap_mask) {
527 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
528 return sizeof(struct encap_ether_data);
530 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
531 return sizeof(struct encap_vlan_data);
533 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
534 return sizeof(struct encap_qinq_data);
536 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
537 return sizeof(struct encap_mpls_data);
539 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
540 return sizeof(struct encap_pppoe_data);
542 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
543 if (encap->vxlan.ip_version)
544 if (encap->vxlan.vlan)
545 return sizeof(struct encap_vxlan_ipv4_vlan_data);
547 return sizeof(struct encap_vxlan_ipv4_data);
549 if (encap->vxlan.vlan)
550 return sizeof(struct encap_vxlan_ipv6_vlan_data);
552 return sizeof(struct encap_vxlan_ipv6_data);
560 encap_apply_check(struct rte_table_action_encap_params *p,
561 struct rte_table_action_encap_config *cfg)
563 if ((encap_valid(p->type) == 0) ||
564 ((cfg->encap_mask & (1LLU << p->type)) == 0))
568 case RTE_TABLE_ACTION_ENCAP_ETHER:
571 case RTE_TABLE_ACTION_ENCAP_VLAN:
574 case RTE_TABLE_ACTION_ENCAP_QINQ:
577 case RTE_TABLE_ACTION_ENCAP_MPLS:
578 if ((p->mpls.mpls_count == 0) ||
579 (p->mpls.mpls_count > RTE_TABLE_ACTION_MPLS_LABELS_MAX))
584 case RTE_TABLE_ACTION_ENCAP_PPPOE:
587 case RTE_TABLE_ACTION_ENCAP_VXLAN:
596 encap_ether_apply(void *data,
597 struct rte_table_action_encap_params *p,
598 struct rte_table_action_common_config *common_cfg)
600 struct encap_ether_data *d = data;
601 uint16_t ethertype = (common_cfg->ip_version) ?
606 ether_addr_copy(&p->ether.ether.da, &d->ether.d_addr);
607 ether_addr_copy(&p->ether.ether.sa, &d->ether.s_addr);
608 d->ether.ether_type = rte_htons(ethertype);
614 encap_vlan_apply(void *data,
615 struct rte_table_action_encap_params *p,
616 struct rte_table_action_common_config *common_cfg)
618 struct encap_vlan_data *d = data;
619 uint16_t ethertype = (common_cfg->ip_version) ?
624 ether_addr_copy(&p->vlan.ether.da, &d->ether.d_addr);
625 ether_addr_copy(&p->vlan.ether.sa, &d->ether.s_addr);
626 d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
629 d->vlan.vlan_tci = rte_htons(VLAN(p->vlan.vlan.pcp,
632 d->vlan.eth_proto = rte_htons(ethertype);
638 encap_qinq_apply(void *data,
639 struct rte_table_action_encap_params *p,
640 struct rte_table_action_common_config *common_cfg)
642 struct encap_qinq_data *d = data;
643 uint16_t ethertype = (common_cfg->ip_version) ?
648 ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
649 ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
650 d->ether.ether_type = rte_htons(ETHER_TYPE_QINQ);
653 d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
656 d->svlan.eth_proto = rte_htons(ETHER_TYPE_VLAN);
659 d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
662 d->cvlan.eth_proto = rte_htons(ethertype);
668 encap_mpls_apply(void *data,
669 struct rte_table_action_encap_params *p)
671 struct encap_mpls_data *d = data;
672 uint16_t ethertype = (p->mpls.unicast) ?
673 ETHER_TYPE_MPLS_UNICAST :
674 ETHER_TYPE_MPLS_MULTICAST;
678 ether_addr_copy(&p->mpls.ether.da, &d->ether.d_addr);
679 ether_addr_copy(&p->mpls.ether.sa, &d->ether.s_addr);
680 d->ether.ether_type = rte_htons(ethertype);
683 for (i = 0; i < p->mpls.mpls_count - 1; i++)
684 d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
687 p->mpls.mpls[i].ttl));
689 d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
692 p->mpls.mpls[i].ttl));
694 d->mpls_count = p->mpls.mpls_count;
699 encap_pppoe_apply(void *data,
700 struct rte_table_action_encap_params *p)
702 struct encap_pppoe_data *d = data;
705 ether_addr_copy(&p->pppoe.ether.da, &d->ether.d_addr);
706 ether_addr_copy(&p->pppoe.ether.sa, &d->ether.s_addr);
707 d->ether.ether_type = rte_htons(ETHER_TYPE_PPPOE_SESSION);
710 d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
711 d->pppoe_ppp.session_id = rte_htons(p->pppoe.pppoe.session_id);
712 d->pppoe_ppp.length = 0; /* not pre-computed */
713 d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
719 encap_vxlan_apply(void *data,
720 struct rte_table_action_encap_params *p,
721 struct rte_table_action_encap_config *cfg)
723 if ((p->vxlan.vxlan.vni > 0xFFFFFF) ||
724 (cfg->vxlan.ip_version && (p->vxlan.ipv4.dscp > 0x3F)) ||
725 (!cfg->vxlan.ip_version && (p->vxlan.ipv6.flow_label > 0xFFFFF)) ||
726 (!cfg->vxlan.ip_version && (p->vxlan.ipv6.dscp > 0x3F)) ||
727 (cfg->vxlan.vlan && (p->vxlan.vlan.vid > 0xFFF)))
730 if (cfg->vxlan.ip_version)
731 if (cfg->vxlan.vlan) {
732 struct encap_vxlan_ipv4_vlan_data *d = data;
735 ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
736 ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
737 d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
740 d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
743 d->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv4);
746 d->ipv4.version_ihl = 0x45;
747 d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
748 d->ipv4.total_length = 0; /* not pre-computed */
749 d->ipv4.packet_id = 0;
750 d->ipv4.fragment_offset = 0;
751 d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
752 d->ipv4.next_proto_id = IP_PROTO_UDP;
753 d->ipv4.hdr_checksum = 0;
754 d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
755 d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
757 d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
760 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
761 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
762 d->udp.dgram_len = 0; /* not pre-computed */
763 d->udp.dgram_cksum = 0;
766 d->vxlan.vx_flags = rte_htonl(0x08000000);
767 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
771 struct encap_vxlan_ipv4_data *d = data;
774 ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
775 ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
776 d->ether.ether_type = rte_htons(ETHER_TYPE_IPv4);
779 d->ipv4.version_ihl = 0x45;
780 d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
781 d->ipv4.total_length = 0; /* not pre-computed */
782 d->ipv4.packet_id = 0;
783 d->ipv4.fragment_offset = 0;
784 d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
785 d->ipv4.next_proto_id = IP_PROTO_UDP;
786 d->ipv4.hdr_checksum = 0;
787 d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
788 d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
790 d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
793 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
794 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
795 d->udp.dgram_len = 0; /* not pre-computed */
796 d->udp.dgram_cksum = 0;
799 d->vxlan.vx_flags = rte_htonl(0x08000000);
800 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
805 if (cfg->vxlan.vlan) {
806 struct encap_vxlan_ipv6_vlan_data *d = data;
809 ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
810 ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
811 d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
814 d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
817 d->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv6);
820 d->ipv6.vtc_flow = rte_htonl((6 << 28) |
821 (p->vxlan.ipv6.dscp << 22) |
822 p->vxlan.ipv6.flow_label);
823 d->ipv6.payload_len = 0; /* not pre-computed */
824 d->ipv6.proto = IP_PROTO_UDP;
825 d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
826 memcpy(d->ipv6.src_addr,
828 sizeof(p->vxlan.ipv6.sa));
829 memcpy(d->ipv6.dst_addr,
831 sizeof(p->vxlan.ipv6.da));
834 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
835 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
836 d->udp.dgram_len = 0; /* not pre-computed */
837 d->udp.dgram_cksum = 0;
840 d->vxlan.vx_flags = rte_htonl(0x08000000);
841 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
845 struct encap_vxlan_ipv6_data *d = data;
848 ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
849 ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
850 d->ether.ether_type = rte_htons(ETHER_TYPE_IPv6);
853 d->ipv6.vtc_flow = rte_htonl((6 << 28) |
854 (p->vxlan.ipv6.dscp << 22) |
855 p->vxlan.ipv6.flow_label);
856 d->ipv6.payload_len = 0; /* not pre-computed */
857 d->ipv6.proto = IP_PROTO_UDP;
858 d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
859 memcpy(d->ipv6.src_addr,
861 sizeof(p->vxlan.ipv6.sa));
862 memcpy(d->ipv6.dst_addr,
864 sizeof(p->vxlan.ipv6.da));
867 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
868 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
869 d->udp.dgram_len = 0; /* not pre-computed */
870 d->udp.dgram_cksum = 0;
873 d->vxlan.vx_flags = rte_htonl(0x08000000);
874 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
881 encap_apply(void *data,
882 struct rte_table_action_encap_params *p,
883 struct rte_table_action_encap_config *cfg,
884 struct rte_table_action_common_config *common_cfg)
888 /* Check input arguments */
889 status = encap_apply_check(p, cfg);
894 case RTE_TABLE_ACTION_ENCAP_ETHER:
895 return encap_ether_apply(data, p, common_cfg);
897 case RTE_TABLE_ACTION_ENCAP_VLAN:
898 return encap_vlan_apply(data, p, common_cfg);
900 case RTE_TABLE_ACTION_ENCAP_QINQ:
901 return encap_qinq_apply(data, p, common_cfg);
903 case RTE_TABLE_ACTION_ENCAP_MPLS:
904 return encap_mpls_apply(data, p);
906 case RTE_TABLE_ACTION_ENCAP_PPPOE:
907 return encap_pppoe_apply(data, p);
909 case RTE_TABLE_ACTION_ENCAP_VXLAN:
910 return encap_vxlan_apply(data, p, cfg);
917 static __rte_always_inline uint16_t
918 encap_vxlan_ipv4_checksum_update(uint16_t cksum0,
919 uint16_t total_length)
924 cksum1 = ~cksum1 & 0xFFFF;
926 /* Add total length (one's complement logic) */
927 cksum1 += total_length;
928 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
929 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
931 return (uint16_t)(~cksum1);
934 static __rte_always_inline void *
935 encap(void *dst, const void *src, size_t n)
937 dst = ((uint8_t *) dst) - n;
938 return rte_memcpy(dst, src, n);
941 static __rte_always_inline void
942 pkt_work_encap_vxlan_ipv4(struct rte_mbuf *mbuf,
943 struct encap_vxlan_ipv4_data *vxlan_tbl,
944 struct rte_table_action_encap_config *cfg)
946 uint32_t ether_offset = cfg->vxlan.data_offset;
947 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
948 struct encap_vxlan_ipv4_data *vxlan_pkt;
949 uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
951 ether_length = (uint16_t)mbuf->pkt_len;
952 ipv4_total_length = ether_length +
953 (sizeof(struct vxlan_hdr) +
954 sizeof(struct udp_hdr) +
955 sizeof(struct ipv4_hdr));
956 ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
957 rte_htons(ipv4_total_length));
958 udp_length = ether_length +
959 (sizeof(struct vxlan_hdr) +
960 sizeof(struct udp_hdr));
962 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
963 vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
964 vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
965 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
967 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
968 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
971 static __rte_always_inline void
972 pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf *mbuf,
973 struct encap_vxlan_ipv4_vlan_data *vxlan_tbl,
974 struct rte_table_action_encap_config *cfg)
976 uint32_t ether_offset = cfg->vxlan.data_offset;
977 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
978 struct encap_vxlan_ipv4_vlan_data *vxlan_pkt;
979 uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
981 ether_length = (uint16_t)mbuf->pkt_len;
982 ipv4_total_length = ether_length +
983 (sizeof(struct vxlan_hdr) +
984 sizeof(struct udp_hdr) +
985 sizeof(struct ipv4_hdr));
986 ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
987 rte_htons(ipv4_total_length));
988 udp_length = ether_length +
989 (sizeof(struct vxlan_hdr) +
990 sizeof(struct udp_hdr));
992 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
993 vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
994 vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
995 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
997 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
998 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1001 static __rte_always_inline void
1002 pkt_work_encap_vxlan_ipv6(struct rte_mbuf *mbuf,
1003 struct encap_vxlan_ipv6_data *vxlan_tbl,
1004 struct rte_table_action_encap_config *cfg)
1006 uint32_t ether_offset = cfg->vxlan.data_offset;
1007 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1008 struct encap_vxlan_ipv6_data *vxlan_pkt;
1009 uint16_t ether_length, ipv6_payload_length, udp_length;
1011 ether_length = (uint16_t)mbuf->pkt_len;
1012 ipv6_payload_length = ether_length +
1013 (sizeof(struct vxlan_hdr) +
1014 sizeof(struct udp_hdr));
1015 udp_length = ether_length +
1016 (sizeof(struct vxlan_hdr) +
1017 sizeof(struct udp_hdr));
1019 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1020 vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
1021 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1023 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1024 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1027 static __rte_always_inline void
1028 pkt_work_encap_vxlan_ipv6_vlan(struct rte_mbuf *mbuf,
1029 struct encap_vxlan_ipv6_vlan_data *vxlan_tbl,
1030 struct rte_table_action_encap_config *cfg)
1032 uint32_t ether_offset = cfg->vxlan.data_offset;
1033 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1034 struct encap_vxlan_ipv6_vlan_data *vxlan_pkt;
1035 uint16_t ether_length, ipv6_payload_length, udp_length;
1037 ether_length = (uint16_t)mbuf->pkt_len;
1038 ipv6_payload_length = ether_length +
1039 (sizeof(struct vxlan_hdr) +
1040 sizeof(struct udp_hdr));
1041 udp_length = ether_length +
1042 (sizeof(struct vxlan_hdr) +
1043 sizeof(struct udp_hdr));
1045 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1046 vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
1047 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1049 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1050 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1053 static __rte_always_inline void
1054 pkt_work_encap(struct rte_mbuf *mbuf,
1056 struct rte_table_action_encap_config *cfg,
1058 uint16_t total_length,
1061 switch (cfg->encap_mask) {
1062 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
1063 encap(ip, data, sizeof(struct encap_ether_data));
1064 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1065 sizeof(struct encap_ether_data));
1066 mbuf->pkt_len = mbuf->data_len = total_length +
1067 sizeof(struct encap_ether_data);
1070 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
1071 encap(ip, data, sizeof(struct encap_vlan_data));
1072 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1073 sizeof(struct encap_vlan_data));
1074 mbuf->pkt_len = mbuf->data_len = total_length +
1075 sizeof(struct encap_vlan_data);
1078 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
1079 encap(ip, data, sizeof(struct encap_qinq_data));
1080 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1081 sizeof(struct encap_qinq_data));
1082 mbuf->pkt_len = mbuf->data_len = total_length +
1083 sizeof(struct encap_qinq_data);
1086 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
1088 struct encap_mpls_data *mpls = data;
1089 size_t size = sizeof(struct ether_hdr) +
1090 mpls->mpls_count * 4;
1092 encap(ip, data, size);
1093 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + size);
1094 mbuf->pkt_len = mbuf->data_len = total_length + size;
1098 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
1100 struct encap_pppoe_data *pppoe =
1101 encap(ip, data, sizeof(struct encap_pppoe_data));
1102 pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
1103 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1104 sizeof(struct encap_pppoe_data));
1105 mbuf->pkt_len = mbuf->data_len = total_length +
1106 sizeof(struct encap_pppoe_data);
1110 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
1112 if (cfg->vxlan.ip_version)
1113 if (cfg->vxlan.vlan)
1114 pkt_work_encap_vxlan_ipv4_vlan(mbuf, data, cfg);
1116 pkt_work_encap_vxlan_ipv4(mbuf, data, cfg);
1118 if (cfg->vxlan.vlan)
1119 pkt_work_encap_vxlan_ipv6_vlan(mbuf, data, cfg);
1121 pkt_work_encap_vxlan_ipv6(mbuf, data, cfg);
1130 * RTE_TABLE_ACTION_NAT
1133 nat_cfg_check(struct rte_table_action_nat_config *nat)
1135 if ((nat->proto != 0x06) &&
1136 (nat->proto != 0x11))
1142 struct nat_ipv4_data {
1145 } __attribute__((__packed__));
1147 struct nat_ipv6_data {
1150 } __attribute__((__packed__));
1153 nat_data_size(struct rte_table_action_nat_config *nat __rte_unused,
1154 struct rte_table_action_common_config *common)
1156 int ip_version = common->ip_version;
1158 return (ip_version) ?
1159 sizeof(struct nat_ipv4_data) :
1160 sizeof(struct nat_ipv6_data);
1164 nat_apply_check(struct rte_table_action_nat_params *p,
1165 struct rte_table_action_common_config *cfg)
1167 if ((p->ip_version && (cfg->ip_version == 0)) ||
1168 ((p->ip_version == 0) && cfg->ip_version))
1175 nat_apply(void *data,
1176 struct rte_table_action_nat_params *p,
1177 struct rte_table_action_common_config *cfg)
1181 /* Check input arguments */
1182 status = nat_apply_check(p, cfg);
1187 if (p->ip_version) {
1188 struct nat_ipv4_data *d = data;
1190 d->addr = rte_htonl(p->addr.ipv4);
1191 d->port = rte_htons(p->port);
1193 struct nat_ipv6_data *d = data;
1195 memcpy(d->addr, p->addr.ipv6, sizeof(d->addr));
1196 d->port = rte_htons(p->port);
1202 static __rte_always_inline uint16_t
1203 nat_ipv4_checksum_update(uint16_t cksum0,
1210 cksum1 = ~cksum1 & 0xFFFF;
1212 /* Subtract ip0 (one's complement logic) */
1213 cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF);
1214 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1215 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1217 /* Add ip1 (one's complement logic) */
1218 cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF);
1219 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1220 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1222 return (uint16_t)(~cksum1);
1225 static __rte_always_inline uint16_t
1226 nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0,
1235 cksum1 = ~cksum1 & 0xFFFF;
1237 /* Subtract ip0 and port 0 (one's complement logic) */
1238 cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF) + port0;
1239 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1240 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1242 /* Add ip1 and port1 (one's complement logic) */
1243 cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF) + port1;
1244 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1245 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1247 return (uint16_t)(~cksum1);
1250 static __rte_always_inline uint16_t
1251 nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0,
1260 cksum1 = ~cksum1 & 0xFFFF;
1262 /* Subtract ip0 and port 0 (one's complement logic) */
1263 cksum1 -= ip0[0] + ip0[1] + ip0[2] + ip0[3] +
1264 ip0[4] + ip0[5] + ip0[6] + ip0[7] + port0;
1265 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1266 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1268 /* Add ip1 and port1 (one's complement logic) */
1269 cksum1 += ip1[0] + ip1[1] + ip1[2] + ip1[3] +
1270 ip1[4] + ip1[5] + ip1[6] + ip1[7] + port1;
1271 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1272 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1274 return (uint16_t)(~cksum1);
1277 static __rte_always_inline void
1278 pkt_ipv4_work_nat(struct ipv4_hdr *ip,
1279 struct nat_ipv4_data *data,
1280 struct rte_table_action_nat_config *cfg)
1282 if (cfg->source_nat) {
1283 if (cfg->proto == 0x6) {
1284 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1285 uint16_t ip_cksum, tcp_cksum;
1287 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1291 tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
1297 ip->src_addr = data->addr;
1298 ip->hdr_checksum = ip_cksum;
1299 tcp->src_port = data->port;
1300 tcp->cksum = tcp_cksum;
1302 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1303 uint16_t ip_cksum, udp_cksum;
1305 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1309 udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1315 ip->src_addr = data->addr;
1316 ip->hdr_checksum = ip_cksum;
1317 udp->src_port = data->port;
1318 if (udp->dgram_cksum)
1319 udp->dgram_cksum = udp_cksum;
1322 if (cfg->proto == 0x6) {
1323 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1324 uint16_t ip_cksum, tcp_cksum;
1326 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1330 tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
1336 ip->dst_addr = data->addr;
1337 ip->hdr_checksum = ip_cksum;
1338 tcp->dst_port = data->port;
1339 tcp->cksum = tcp_cksum;
1341 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1342 uint16_t ip_cksum, udp_cksum;
1344 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1348 udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1354 ip->dst_addr = data->addr;
1355 ip->hdr_checksum = ip_cksum;
1356 udp->dst_port = data->port;
1357 if (udp->dgram_cksum)
1358 udp->dgram_cksum = udp_cksum;
1363 static __rte_always_inline void
1364 pkt_ipv6_work_nat(struct ipv6_hdr *ip,
1365 struct nat_ipv6_data *data,
1366 struct rte_table_action_nat_config *cfg)
1368 if (cfg->source_nat) {
1369 if (cfg->proto == 0x6) {
1370 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1373 tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1374 (uint16_t *)ip->src_addr,
1375 (uint16_t *)data->addr,
1379 rte_memcpy(ip->src_addr, data->addr, 16);
1380 tcp->src_port = data->port;
1381 tcp->cksum = tcp_cksum;
1383 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1386 udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1387 (uint16_t *)ip->src_addr,
1388 (uint16_t *)data->addr,
1392 rte_memcpy(ip->src_addr, data->addr, 16);
1393 udp->src_port = data->port;
1394 udp->dgram_cksum = udp_cksum;
1397 if (cfg->proto == 0x6) {
1398 struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
1401 tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1402 (uint16_t *)ip->dst_addr,
1403 (uint16_t *)data->addr,
1407 rte_memcpy(ip->dst_addr, data->addr, 16);
1408 tcp->dst_port = data->port;
1409 tcp->cksum = tcp_cksum;
1411 struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
1414 udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1415 (uint16_t *)ip->dst_addr,
1416 (uint16_t *)data->addr,
1420 rte_memcpy(ip->dst_addr, data->addr, 16);
1421 udp->dst_port = data->port;
1422 udp->dgram_cksum = udp_cksum;
1428 * RTE_TABLE_ACTION_TTL
1431 ttl_cfg_check(struct rte_table_action_ttl_config *ttl)
1441 } __attribute__((__packed__));
1443 #define TTL_INIT(data, decrement) \
1444 ((data)->n_packets = (decrement) ? 1 : 0)
1446 #define TTL_DEC_GET(data) \
1447 ((uint8_t)((data)->n_packets & 1))
1449 #define TTL_STATS_RESET(data) \
1450 ((data)->n_packets = ((data)->n_packets & 1))
1452 #define TTL_STATS_READ(data) \
1453 ((data)->n_packets >> 1)
1455 #define TTL_STATS_ADD(data, value) \
1456 ((data)->n_packets = \
1457 (((((data)->n_packets >> 1) + (value)) << 1) | \
1458 ((data)->n_packets & 1)))
1461 ttl_apply(void *data,
1462 struct rte_table_action_ttl_params *p)
1464 struct ttl_data *d = data;
1466 TTL_INIT(d, p->decrement);
1471 static __rte_always_inline uint64_t
1472 pkt_ipv4_work_ttl(struct ipv4_hdr *ip,
1473 struct ttl_data *data)
1476 uint16_t cksum = ip->hdr_checksum;
1477 uint8_t ttl = ip->time_to_live;
1478 uint8_t ttl_diff = TTL_DEC_GET(data);
1483 ip->hdr_checksum = cksum;
1484 ip->time_to_live = ttl;
1486 drop = (ttl == 0) ? 1 : 0;
1487 TTL_STATS_ADD(data, drop);
1492 static __rte_always_inline uint64_t
1493 pkt_ipv6_work_ttl(struct ipv6_hdr *ip,
1494 struct ttl_data *data)
1497 uint8_t ttl = ip->hop_limits;
1498 uint8_t ttl_diff = TTL_DEC_GET(data);
1502 ip->hop_limits = ttl;
1504 drop = (ttl == 0) ? 1 : 0;
1505 TTL_STATS_ADD(data, drop);
1511 * RTE_TABLE_ACTION_STATS
1514 stats_cfg_check(struct rte_table_action_stats_config *stats)
1516 if ((stats->n_packets_enabled == 0) && (stats->n_bytes_enabled == 0))
1525 } __attribute__((__packed__));
1528 stats_apply(struct stats_data *data,
1529 struct rte_table_action_stats_params *p)
1531 data->n_packets = p->n_packets;
1532 data->n_bytes = p->n_bytes;
1537 static __rte_always_inline void
1538 pkt_work_stats(struct stats_data *data,
1539 uint16_t total_length)
1542 data->n_bytes += total_length;
1546 * RTE_TABLE_ACTION_TIME
1550 } __attribute__((__packed__));
1553 time_apply(struct time_data *data,
1554 struct rte_table_action_time_params *p)
1556 data->time = p->time;
1560 static __rte_always_inline void
1561 pkt_work_time(struct time_data *data,
1569 * RTE_TABLE_ACTION_CRYPTO
1572 #define CRYPTO_OP_MASK_CIPHER 0x1
1573 #define CRYPTO_OP_MASK_AUTH 0x2
1574 #define CRYPTO_OP_MASK_AEAD 0x4
1576 struct crypto_op_sym_iv_aad {
1577 struct rte_crypto_op op;
1578 struct rte_crypto_sym_op sym_op;
1582 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1584 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1588 uint8_t iv[RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1589 uint8_t aad[RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX];
1595 struct sym_crypto_data {
1600 /** Length of cipher iv. */
1601 uint16_t cipher_iv_len;
1603 /** Offset from start of IP header to the cipher iv. */
1604 uint16_t cipher_iv_data_offset;
1606 /** Length of cipher iv to be updated in the mbuf. */
1607 uint16_t cipher_iv_update_len;
1609 /** Offset from start of IP header to the auth iv. */
1610 uint16_t auth_iv_data_offset;
1612 /** Length of auth iv in the mbuf. */
1613 uint16_t auth_iv_len;
1615 /** Length of auth iv to be updated in the mbuf. */
1616 uint16_t auth_iv_update_len;
1621 /** Length of iv. */
1624 /** Offset from start of IP header to the aead iv. */
1625 uint16_t iv_data_offset;
1627 /** Length of iv to be updated in the mbuf. */
1628 uint16_t iv_update_len;
1630 /** Length of aad */
1633 /** Offset from start of IP header to the aad. */
1634 uint16_t aad_data_offset;
1636 /** Length of aad to updated in the mbuf. */
1637 uint16_t aad_update_len;
1642 /** Offset from start of IP header to the data. */
1643 uint16_t data_offset;
1645 /** Digest length. */
1646 uint16_t digest_len;
1649 uint16_t block_size;
1651 /** Mask of crypto operation */
1654 /** Session pointer. */
1655 struct rte_cryptodev_sym_session *session;
1657 /** Direction of crypto, encrypt or decrypt */
1660 /** Private data size to store cipher iv / aad. */
1661 uint8_t iv_aad_data[32];
1663 } __attribute__((__packed__));
1666 sym_crypto_cfg_check(struct rte_table_action_sym_crypto_config *cfg)
1668 if (!rte_cryptodev_pmd_is_valid_dev(cfg->cryptodev_id))
1670 if (cfg->mp_create == NULL || cfg->mp_init == NULL)
1677 get_block_size(const struct rte_crypto_sym_xform *xform, uint8_t cdev_id)
1679 struct rte_cryptodev_info dev_info;
1680 const struct rte_cryptodev_capabilities *cap;
1683 rte_cryptodev_info_get(cdev_id, &dev_info);
1685 for (i = 0; dev_info.capabilities[i].op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
1687 cap = &dev_info.capabilities[i];
1689 if (cap->sym.xform_type != xform->type)
1692 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
1693 (cap->sym.cipher.algo == xform->cipher.algo))
1694 return cap->sym.cipher.block_size;
1696 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) &&
1697 (cap->sym.aead.algo == xform->aead.algo))
1698 return cap->sym.aead.block_size;
1700 if (xform->type == RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED)
1708 sym_crypto_apply(struct sym_crypto_data *data,
1709 struct rte_table_action_sym_crypto_config *cfg,
1710 struct rte_table_action_sym_crypto_params *p)
1712 const struct rte_crypto_cipher_xform *cipher_xform = NULL;
1713 const struct rte_crypto_auth_xform *auth_xform = NULL;
1714 const struct rte_crypto_aead_xform *aead_xform = NULL;
1715 struct rte_crypto_sym_xform *xform = p->xform;
1716 struct rte_cryptodev_sym_session *session;
1719 memset(data, 0, sizeof(*data));
1722 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1723 cipher_xform = &xform->cipher;
1725 if (cipher_xform->iv.length >
1726 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
1728 if (cipher_xform->iv.offset !=
1729 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
1732 ret = get_block_size(xform, cfg->cryptodev_id);
1735 data->block_size = (uint16_t)ret;
1736 data->op_mask |= CRYPTO_OP_MASK_CIPHER;
1738 data->cipher_auth.cipher_iv_len =
1739 cipher_xform->iv.length;
1740 data->cipher_auth.cipher_iv_data_offset = (uint16_t)
1741 p->cipher_auth.cipher_iv_update.offset;
1742 data->cipher_auth.cipher_iv_update_len = (uint16_t)
1743 p->cipher_auth.cipher_iv_update.length;
1745 rte_memcpy(data->iv_aad_data,
1746 p->cipher_auth.cipher_iv.val,
1747 p->cipher_auth.cipher_iv.length);
1749 data->direction = cipher_xform->op;
1751 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1752 auth_xform = &xform->auth;
1753 if (auth_xform->iv.length >
1754 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
1756 data->op_mask |= CRYPTO_OP_MASK_AUTH;
1758 data->cipher_auth.auth_iv_len = auth_xform->iv.length;
1759 data->cipher_auth.auth_iv_data_offset = (uint16_t)
1760 p->cipher_auth.auth_iv_update.offset;
1761 data->cipher_auth.auth_iv_update_len = (uint16_t)
1762 p->cipher_auth.auth_iv_update.length;
1763 data->digest_len = auth_xform->digest_length;
1765 data->direction = (auth_xform->op ==
1766 RTE_CRYPTO_AUTH_OP_GENERATE) ?
1767 RTE_CRYPTO_CIPHER_OP_ENCRYPT :
1768 RTE_CRYPTO_CIPHER_OP_DECRYPT;
1770 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1771 aead_xform = &xform->aead;
1773 if ((aead_xform->iv.length >
1774 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX) || (
1775 aead_xform->aad_length >
1776 RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX))
1778 if (aead_xform->iv.offset !=
1779 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
1782 ret = get_block_size(xform, cfg->cryptodev_id);
1785 data->block_size = (uint16_t)ret;
1786 data->op_mask |= CRYPTO_OP_MASK_AEAD;
1788 data->digest_len = aead_xform->digest_length;
1789 data->aead.iv_len = aead_xform->iv.length;
1790 data->aead.aad_len = aead_xform->aad_length;
1792 data->aead.iv_data_offset = (uint16_t)
1793 p->aead.iv_update.offset;
1794 data->aead.iv_update_len = (uint16_t)
1795 p->aead.iv_update.length;
1796 data->aead.aad_data_offset = (uint16_t)
1797 p->aead.aad_update.offset;
1798 data->aead.aad_update_len = (uint16_t)
1799 p->aead.aad_update.length;
1801 rte_memcpy(data->iv_aad_data,
1805 rte_memcpy(data->iv_aad_data + p->aead.iv.length,
1807 p->aead.aad.length);
1809 data->direction = (aead_xform->op ==
1810 RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1811 RTE_CRYPTO_CIPHER_OP_ENCRYPT :
1812 RTE_CRYPTO_CIPHER_OP_DECRYPT;
1816 xform = xform->next;
1819 if (auth_xform && auth_xform->iv.length) {
1821 if (auth_xform->iv.offset !=
1822 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET +
1823 cipher_xform->iv.length)
1826 rte_memcpy(data->iv_aad_data + cipher_xform->iv.length,
1827 p->cipher_auth.auth_iv.val,
1828 p->cipher_auth.auth_iv.length);
1830 rte_memcpy(data->iv_aad_data,
1831 p->cipher_auth.auth_iv.val,
1832 p->cipher_auth.auth_iv.length);
1836 session = rte_cryptodev_sym_session_create(cfg->mp_create);
1840 ret = rte_cryptodev_sym_session_init(cfg->cryptodev_id, session,
1841 p->xform, cfg->mp_init);
1843 rte_cryptodev_sym_session_free(session);
1847 data->data_offset = (uint16_t)p->data_offset;
1848 data->session = session;
1853 static __rte_always_inline uint64_t
1854 pkt_work_sym_crypto(struct rte_mbuf *mbuf, struct sym_crypto_data *data,
1855 struct rte_table_action_sym_crypto_config *cfg,
1858 struct crypto_op_sym_iv_aad *crypto_op = (struct crypto_op_sym_iv_aad *)
1859 RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->op_offset);
1860 struct rte_crypto_op *op = &crypto_op->op;
1861 struct rte_crypto_sym_op *sym = op->sym;
1862 uint32_t pkt_offset = sizeof(*mbuf) + mbuf->data_off;
1863 uint32_t payload_len = pkt_offset + mbuf->data_len - data->data_offset;
1865 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1866 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1867 op->phys_addr = mbuf->buf_iova + cfg->op_offset - sizeof(*mbuf);
1868 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1871 sym->session = data->session;
1873 /** pad the packet */
1874 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1875 uint32_t append_len = RTE_ALIGN_CEIL(payload_len,
1876 data->block_size) - payload_len;
1878 if (unlikely(rte_pktmbuf_append(mbuf, append_len +
1879 data->digest_len) == NULL))
1882 payload_len += append_len;
1884 payload_len -= data->digest_len;
1886 if (data->op_mask & CRYPTO_OP_MASK_CIPHER) {
1887 /** prepare cipher op */
1888 uint8_t *iv = crypto_op->iv_aad.cipher_auth.cipher_iv;
1890 sym->cipher.data.length = payload_len;
1891 sym->cipher.data.offset = data->data_offset - pkt_offset;
1893 if (data->cipher_auth.cipher_iv_update_len) {
1894 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1895 data->cipher_auth.cipher_iv_data_offset
1898 /** For encryption, update the pkt iv field, otherwise
1899 * update the iv_aad_field
1901 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1902 rte_memcpy(pkt_iv, data->iv_aad_data,
1903 data->cipher_auth.cipher_iv_update_len);
1905 rte_memcpy(data->iv_aad_data, pkt_iv,
1906 data->cipher_auth.cipher_iv_update_len);
1910 rte_memcpy(iv, data->iv_aad_data,
1911 data->cipher_auth.cipher_iv_len);
1914 if (data->op_mask & CRYPTO_OP_MASK_AUTH) {
1915 /** authentication always start from IP header. */
1916 sym->auth.data.offset = ip_offset - pkt_offset;
1917 sym->auth.data.length = mbuf->data_len - sym->auth.data.offset -
1919 sym->auth.digest.data = rte_pktmbuf_mtod_offset(mbuf,
1920 uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
1922 sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
1923 rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
1925 if (data->cipher_auth.auth_iv_update_len) {
1926 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1927 data->cipher_auth.auth_iv_data_offset
1929 uint8_t *data_iv = data->iv_aad_data +
1930 data->cipher_auth.cipher_iv_len;
1932 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1933 rte_memcpy(pkt_iv, data_iv,
1934 data->cipher_auth.auth_iv_update_len);
1936 rte_memcpy(data_iv, pkt_iv,
1937 data->cipher_auth.auth_iv_update_len);
1940 if (data->cipher_auth.auth_iv_len) {
1941 /** prepare cipher op */
1942 uint8_t *iv = crypto_op->iv_aad.cipher_auth.auth_iv;
1944 rte_memcpy(iv, data->iv_aad_data +
1945 data->cipher_auth.cipher_iv_len,
1946 data->cipher_auth.auth_iv_len);
1950 if (data->op_mask & CRYPTO_OP_MASK_AEAD) {
1951 uint8_t *iv = crypto_op->iv_aad.aead_iv_aad.iv;
1952 uint8_t *aad = crypto_op->iv_aad.aead_iv_aad.aad;
1954 sym->aead.aad.data = aad;
1955 sym->aead.aad.phys_addr = rte_pktmbuf_iova_offset(mbuf,
1956 aad - rte_pktmbuf_mtod(mbuf, uint8_t *));
1957 sym->aead.digest.data = rte_pktmbuf_mtod_offset(mbuf,
1958 uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
1960 sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
1961 rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
1962 sym->aead.data.offset = data->data_offset - pkt_offset;
1963 sym->aead.data.length = payload_len;
1965 if (data->aead.iv_update_len) {
1966 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1967 data->aead.iv_data_offset + ip_offset);
1968 uint8_t *data_iv = data->iv_aad_data;
1970 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1971 rte_memcpy(pkt_iv, data_iv,
1972 data->aead.iv_update_len);
1974 rte_memcpy(data_iv, pkt_iv,
1975 data->aead.iv_update_len);
1978 rte_memcpy(iv, data->iv_aad_data, data->aead.iv_len);
1980 if (data->aead.aad_update_len) {
1981 uint8_t *pkt_aad = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1982 data->aead.aad_data_offset + ip_offset);
1983 uint8_t *data_aad = data->iv_aad_data +
1986 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1987 rte_memcpy(pkt_aad, data_aad,
1988 data->aead.iv_update_len);
1990 rte_memcpy(data_aad, pkt_aad,
1991 data->aead.iv_update_len);
1994 rte_memcpy(aad, data->iv_aad_data + data->aead.iv_len,
1995 data->aead.aad_len);
2002 * RTE_TABLE_ACTION_TAG
2006 } __attribute__((__packed__));
2009 tag_apply(struct tag_data *data,
2010 struct rte_table_action_tag_params *p)
2016 static __rte_always_inline void
2017 pkt_work_tag(struct rte_mbuf *mbuf,
2018 struct tag_data *data)
2020 mbuf->hash.fdir.hi = data->tag;
2021 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2024 static __rte_always_inline void
2025 pkt4_work_tag(struct rte_mbuf *mbuf0,
2026 struct rte_mbuf *mbuf1,
2027 struct rte_mbuf *mbuf2,
2028 struct rte_mbuf *mbuf3,
2029 struct tag_data *data0,
2030 struct tag_data *data1,
2031 struct tag_data *data2,
2032 struct tag_data *data3)
2034 mbuf0->hash.fdir.hi = data0->tag;
2035 mbuf1->hash.fdir.hi = data1->tag;
2036 mbuf2->hash.fdir.hi = data2->tag;
2037 mbuf3->hash.fdir.hi = data3->tag;
2039 mbuf0->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2040 mbuf1->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2041 mbuf2->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2042 mbuf3->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2046 * RTE_TABLE_ACTION_DECAP
2050 } __attribute__((__packed__));
2053 decap_apply(struct decap_data *data,
2054 struct rte_table_action_decap_params *p)
2060 static __rte_always_inline void
2061 pkt_work_decap(struct rte_mbuf *mbuf,
2062 struct decap_data *data)
2064 uint16_t data_off = mbuf->data_off;
2065 uint16_t data_len = mbuf->data_len;
2066 uint32_t pkt_len = mbuf->pkt_len;
2067 uint16_t n = data->n;
2069 mbuf->data_off = data_off + n;
2070 mbuf->data_len = data_len - n;
2071 mbuf->pkt_len = pkt_len - n;
2074 static __rte_always_inline void
2075 pkt4_work_decap(struct rte_mbuf *mbuf0,
2076 struct rte_mbuf *mbuf1,
2077 struct rte_mbuf *mbuf2,
2078 struct rte_mbuf *mbuf3,
2079 struct decap_data *data0,
2080 struct decap_data *data1,
2081 struct decap_data *data2,
2082 struct decap_data *data3)
2084 uint16_t data_off0 = mbuf0->data_off;
2085 uint16_t data_len0 = mbuf0->data_len;
2086 uint32_t pkt_len0 = mbuf0->pkt_len;
2088 uint16_t data_off1 = mbuf1->data_off;
2089 uint16_t data_len1 = mbuf1->data_len;
2090 uint32_t pkt_len1 = mbuf1->pkt_len;
2092 uint16_t data_off2 = mbuf2->data_off;
2093 uint16_t data_len2 = mbuf2->data_len;
2094 uint32_t pkt_len2 = mbuf2->pkt_len;
2096 uint16_t data_off3 = mbuf3->data_off;
2097 uint16_t data_len3 = mbuf3->data_len;
2098 uint32_t pkt_len3 = mbuf3->pkt_len;
2100 uint16_t n0 = data0->n;
2101 uint16_t n1 = data1->n;
2102 uint16_t n2 = data2->n;
2103 uint16_t n3 = data3->n;
2105 mbuf0->data_off = data_off0 + n0;
2106 mbuf0->data_len = data_len0 - n0;
2107 mbuf0->pkt_len = pkt_len0 - n0;
2109 mbuf1->data_off = data_off1 + n1;
2110 mbuf1->data_len = data_len1 - n1;
2111 mbuf1->pkt_len = pkt_len1 - n1;
2113 mbuf2->data_off = data_off2 + n2;
2114 mbuf2->data_len = data_len2 - n2;
2115 mbuf2->pkt_len = pkt_len2 - n2;
2117 mbuf3->data_off = data_off3 + n3;
2118 mbuf3->data_len = data_len3 - n3;
2119 mbuf3->pkt_len = pkt_len3 - n3;
2126 action_valid(enum rte_table_action_type action)
2129 case RTE_TABLE_ACTION_FWD:
2130 case RTE_TABLE_ACTION_LB:
2131 case RTE_TABLE_ACTION_MTR:
2132 case RTE_TABLE_ACTION_TM:
2133 case RTE_TABLE_ACTION_ENCAP:
2134 case RTE_TABLE_ACTION_NAT:
2135 case RTE_TABLE_ACTION_TTL:
2136 case RTE_TABLE_ACTION_STATS:
2137 case RTE_TABLE_ACTION_TIME:
2138 case RTE_TABLE_ACTION_SYM_CRYPTO:
2139 case RTE_TABLE_ACTION_TAG:
2140 case RTE_TABLE_ACTION_DECAP:
2148 #define RTE_TABLE_ACTION_MAX 64
2151 uint64_t action_mask;
2152 struct rte_table_action_common_config common;
2153 struct rte_table_action_lb_config lb;
2154 struct rte_table_action_mtr_config mtr;
2155 struct rte_table_action_tm_config tm;
2156 struct rte_table_action_encap_config encap;
2157 struct rte_table_action_nat_config nat;
2158 struct rte_table_action_ttl_config ttl;
2159 struct rte_table_action_stats_config stats;
2160 struct rte_table_action_sym_crypto_config sym_crypto;
2164 action_cfg_size(enum rte_table_action_type action)
2167 case RTE_TABLE_ACTION_LB:
2168 return sizeof(struct rte_table_action_lb_config);
2169 case RTE_TABLE_ACTION_MTR:
2170 return sizeof(struct rte_table_action_mtr_config);
2171 case RTE_TABLE_ACTION_TM:
2172 return sizeof(struct rte_table_action_tm_config);
2173 case RTE_TABLE_ACTION_ENCAP:
2174 return sizeof(struct rte_table_action_encap_config);
2175 case RTE_TABLE_ACTION_NAT:
2176 return sizeof(struct rte_table_action_nat_config);
2177 case RTE_TABLE_ACTION_TTL:
2178 return sizeof(struct rte_table_action_ttl_config);
2179 case RTE_TABLE_ACTION_STATS:
2180 return sizeof(struct rte_table_action_stats_config);
2181 case RTE_TABLE_ACTION_SYM_CRYPTO:
2182 return sizeof(struct rte_table_action_sym_crypto_config);
2189 action_cfg_get(struct ap_config *ap_config,
2190 enum rte_table_action_type type)
2193 case RTE_TABLE_ACTION_LB:
2194 return &ap_config->lb;
2196 case RTE_TABLE_ACTION_MTR:
2197 return &ap_config->mtr;
2199 case RTE_TABLE_ACTION_TM:
2200 return &ap_config->tm;
2202 case RTE_TABLE_ACTION_ENCAP:
2203 return &ap_config->encap;
2205 case RTE_TABLE_ACTION_NAT:
2206 return &ap_config->nat;
2208 case RTE_TABLE_ACTION_TTL:
2209 return &ap_config->ttl;
2211 case RTE_TABLE_ACTION_STATS:
2212 return &ap_config->stats;
2214 case RTE_TABLE_ACTION_SYM_CRYPTO:
2215 return &ap_config->sym_crypto;
2222 action_cfg_set(struct ap_config *ap_config,
2223 enum rte_table_action_type type,
2226 void *dst = action_cfg_get(ap_config, type);
2229 memcpy(dst, action_cfg, action_cfg_size(type));
2231 ap_config->action_mask |= 1LLU << type;
2235 size_t offset[RTE_TABLE_ACTION_MAX];
2240 action_data_size(enum rte_table_action_type action,
2241 struct ap_config *ap_config)
2244 case RTE_TABLE_ACTION_FWD:
2245 return sizeof(struct fwd_data);
2247 case RTE_TABLE_ACTION_LB:
2248 return sizeof(struct lb_data);
2250 case RTE_TABLE_ACTION_MTR:
2251 return mtr_data_size(&ap_config->mtr);
2253 case RTE_TABLE_ACTION_TM:
2254 return sizeof(struct tm_data);
2256 case RTE_TABLE_ACTION_ENCAP:
2257 return encap_data_size(&ap_config->encap);
2259 case RTE_TABLE_ACTION_NAT:
2260 return nat_data_size(&ap_config->nat,
2261 &ap_config->common);
2263 case RTE_TABLE_ACTION_TTL:
2264 return sizeof(struct ttl_data);
2266 case RTE_TABLE_ACTION_STATS:
2267 return sizeof(struct stats_data);
2269 case RTE_TABLE_ACTION_TIME:
2270 return sizeof(struct time_data);
2272 case RTE_TABLE_ACTION_SYM_CRYPTO:
2273 return (sizeof(struct sym_crypto_data));
2275 case RTE_TABLE_ACTION_TAG:
2276 return sizeof(struct tag_data);
2278 case RTE_TABLE_ACTION_DECAP:
2279 return sizeof(struct decap_data);
2288 action_data_offset_set(struct ap_data *ap_data,
2289 struct ap_config *ap_config)
2291 uint64_t action_mask = ap_config->action_mask;
2295 memset(ap_data->offset, 0, sizeof(ap_data->offset));
2298 for (action = 0; action < RTE_TABLE_ACTION_MAX; action++)
2299 if (action_mask & (1LLU << action)) {
2300 ap_data->offset[action] = offset;
2301 offset += action_data_size((enum rte_table_action_type)action,
2305 ap_data->total_size = offset;
2308 struct rte_table_action_profile {
2309 struct ap_config cfg;
2310 struct ap_data data;
2314 struct rte_table_action_profile *
2315 rte_table_action_profile_create(struct rte_table_action_common_config *common)
2317 struct rte_table_action_profile *ap;
2319 /* Check input arguments */
2323 /* Memory allocation */
2324 ap = calloc(1, sizeof(struct rte_table_action_profile));
2328 /* Initialization */
2329 memcpy(&ap->cfg.common, common, sizeof(*common));
2336 rte_table_action_profile_action_register(struct rte_table_action_profile *profile,
2337 enum rte_table_action_type type,
2338 void *action_config)
2342 /* Check input arguments */
2343 if ((profile == NULL) ||
2345 (action_valid(type) == 0) ||
2346 (profile->cfg.action_mask & (1LLU << type)) ||
2347 ((action_cfg_size(type) == 0) && action_config) ||
2348 (action_cfg_size(type) && (action_config == NULL)))
2352 case RTE_TABLE_ACTION_LB:
2353 status = lb_cfg_check(action_config);
2356 case RTE_TABLE_ACTION_MTR:
2357 status = mtr_cfg_check(action_config);
2360 case RTE_TABLE_ACTION_TM:
2361 status = tm_cfg_check(action_config);
2364 case RTE_TABLE_ACTION_ENCAP:
2365 status = encap_cfg_check(action_config);
2368 case RTE_TABLE_ACTION_NAT:
2369 status = nat_cfg_check(action_config);
2372 case RTE_TABLE_ACTION_TTL:
2373 status = ttl_cfg_check(action_config);
2376 case RTE_TABLE_ACTION_STATS:
2377 status = stats_cfg_check(action_config);
2380 case RTE_TABLE_ACTION_SYM_CRYPTO:
2381 status = sym_crypto_cfg_check(action_config);
2393 action_cfg_set(&profile->cfg, type, action_config);
2399 rte_table_action_profile_freeze(struct rte_table_action_profile *profile)
2401 if (profile->frozen)
2404 profile->cfg.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD;
2405 action_data_offset_set(&profile->data, &profile->cfg);
2406 profile->frozen = 1;
2412 rte_table_action_profile_free(struct rte_table_action_profile *profile)
2414 if (profile == NULL)
2424 #define METER_PROFILES_MAX 32
2426 struct rte_table_action {
2427 struct ap_config cfg;
2428 struct ap_data data;
2429 struct dscp_table_data dscp_table;
2430 struct meter_profile_data mp[METER_PROFILES_MAX];
2433 struct rte_table_action *
2434 rte_table_action_create(struct rte_table_action_profile *profile,
2437 struct rte_table_action *action;
2439 /* Check input arguments */
2440 if ((profile == NULL) ||
2441 (profile->frozen == 0))
2444 /* Memory allocation */
2445 action = rte_zmalloc_socket(NULL,
2446 sizeof(struct rte_table_action),
2447 RTE_CACHE_LINE_SIZE,
2452 /* Initialization */
2453 memcpy(&action->cfg, &profile->cfg, sizeof(profile->cfg));
2454 memcpy(&action->data, &profile->data, sizeof(profile->data));
2459 static __rte_always_inline void *
2460 action_data_get(void *data,
2461 struct rte_table_action *action,
2462 enum rte_table_action_type type)
2464 size_t offset = action->data.offset[type];
2465 uint8_t *data_bytes = data;
2467 return &data_bytes[offset];
2471 rte_table_action_apply(struct rte_table_action *action,
2473 enum rte_table_action_type type,
2474 void *action_params)
2478 /* Check input arguments */
2479 if ((action == NULL) ||
2481 (action_valid(type) == 0) ||
2482 ((action->cfg.action_mask & (1LLU << type)) == 0) ||
2483 (action_params == NULL))
2487 action_data = action_data_get(data, action, type);
2490 case RTE_TABLE_ACTION_FWD:
2491 return fwd_apply(action_data,
2494 case RTE_TABLE_ACTION_LB:
2495 return lb_apply(action_data,
2498 case RTE_TABLE_ACTION_MTR:
2499 return mtr_apply(action_data,
2503 RTE_DIM(action->mp));
2505 case RTE_TABLE_ACTION_TM:
2506 return tm_apply(action_data,
2510 case RTE_TABLE_ACTION_ENCAP:
2511 return encap_apply(action_data,
2514 &action->cfg.common);
2516 case RTE_TABLE_ACTION_NAT:
2517 return nat_apply(action_data,
2519 &action->cfg.common);
2521 case RTE_TABLE_ACTION_TTL:
2522 return ttl_apply(action_data,
2525 case RTE_TABLE_ACTION_STATS:
2526 return stats_apply(action_data,
2529 case RTE_TABLE_ACTION_TIME:
2530 return time_apply(action_data,
2533 case RTE_TABLE_ACTION_SYM_CRYPTO:
2534 return sym_crypto_apply(action_data,
2535 &action->cfg.sym_crypto,
2538 case RTE_TABLE_ACTION_TAG:
2539 return tag_apply(action_data,
2542 case RTE_TABLE_ACTION_DECAP:
2543 return decap_apply(action_data,
2552 rte_table_action_dscp_table_update(struct rte_table_action *action,
2554 struct rte_table_action_dscp_table *table)
2558 /* Check input arguments */
2559 if ((action == NULL) ||
2560 ((action->cfg.action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
2561 (1LLU << RTE_TABLE_ACTION_TM))) == 0) ||
2566 for (i = 0; i < RTE_DIM(table->entry); i++) {
2567 struct dscp_table_entry_data *data =
2568 &action->dscp_table.entry[i];
2569 struct rte_table_action_dscp_table_entry *entry =
2572 if ((dscp_mask & (1LLU << i)) == 0)
2575 data->color = entry->color;
2576 data->tc = entry->tc_id;
2577 data->tc_queue = entry->tc_queue_id;
2584 rte_table_action_meter_profile_add(struct rte_table_action *action,
2585 uint32_t meter_profile_id,
2586 struct rte_table_action_meter_profile *profile)
2588 struct meter_profile_data *mp_data;
2591 /* Check input arguments */
2592 if ((action == NULL) ||
2593 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
2597 if (profile->alg != RTE_TABLE_ACTION_METER_TRTCM)
2600 mp_data = meter_profile_data_find(action->mp,
2601 RTE_DIM(action->mp),
2606 mp_data = meter_profile_data_find_unused(action->mp,
2607 RTE_DIM(action->mp));
2611 /* Install new profile */
2612 status = rte_meter_trtcm_profile_config(&mp_data->profile,
2617 mp_data->profile_id = meter_profile_id;
2624 rte_table_action_meter_profile_delete(struct rte_table_action *action,
2625 uint32_t meter_profile_id)
2627 struct meter_profile_data *mp_data;
2629 /* Check input arguments */
2630 if ((action == NULL) ||
2631 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0))
2634 mp_data = meter_profile_data_find(action->mp,
2635 RTE_DIM(action->mp),
2640 /* Uninstall profile */
2647 rte_table_action_meter_read(struct rte_table_action *action,
2650 struct rte_table_action_mtr_counters *stats,
2653 struct mtr_trtcm_data *mtr_data;
2656 /* Check input arguments */
2657 if ((action == NULL) ||
2658 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
2660 (tc_mask > RTE_LEN2MASK(action->cfg.mtr.n_tc, uint32_t)))
2663 mtr_data = action_data_get(data, action, RTE_TABLE_ACTION_MTR);
2667 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
2668 struct rte_table_action_mtr_counters_tc *dst =
2670 struct mtr_trtcm_data *src = &mtr_data[i];
2672 if ((tc_mask & (1 << i)) == 0)
2675 dst->n_packets[e_RTE_METER_GREEN] =
2676 mtr_trtcm_data_stats_get(src, e_RTE_METER_GREEN);
2678 dst->n_packets[e_RTE_METER_YELLOW] =
2679 mtr_trtcm_data_stats_get(src, e_RTE_METER_YELLOW);
2681 dst->n_packets[e_RTE_METER_RED] =
2682 mtr_trtcm_data_stats_get(src, e_RTE_METER_RED);
2684 dst->n_packets_valid = 1;
2685 dst->n_bytes_valid = 0;
2688 stats->tc_mask = tc_mask;
2693 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
2694 struct mtr_trtcm_data *src = &mtr_data[i];
2696 if ((tc_mask & (1 << i)) == 0)
2699 mtr_trtcm_data_stats_reset(src, e_RTE_METER_GREEN);
2700 mtr_trtcm_data_stats_reset(src, e_RTE_METER_YELLOW);
2701 mtr_trtcm_data_stats_reset(src, e_RTE_METER_RED);
2709 rte_table_action_ttl_read(struct rte_table_action *action,
2711 struct rte_table_action_ttl_counters *stats,
2714 struct ttl_data *ttl_data;
2716 /* Check input arguments */
2717 if ((action == NULL) ||
2718 ((action->cfg.action_mask &
2719 (1LLU << RTE_TABLE_ACTION_TTL)) == 0) ||
2723 ttl_data = action_data_get(data, action, RTE_TABLE_ACTION_TTL);
2727 stats->n_packets = TTL_STATS_READ(ttl_data);
2731 TTL_STATS_RESET(ttl_data);
2737 rte_table_action_stats_read(struct rte_table_action *action,
2739 struct rte_table_action_stats_counters *stats,
2742 struct stats_data *stats_data;
2744 /* Check input arguments */
2745 if ((action == NULL) ||
2746 ((action->cfg.action_mask &
2747 (1LLU << RTE_TABLE_ACTION_STATS)) == 0) ||
2751 stats_data = action_data_get(data, action,
2752 RTE_TABLE_ACTION_STATS);
2756 stats->n_packets = stats_data->n_packets;
2757 stats->n_bytes = stats_data->n_bytes;
2758 stats->n_packets_valid = 1;
2759 stats->n_bytes_valid = 1;
2764 stats_data->n_packets = 0;
2765 stats_data->n_bytes = 0;
2772 rte_table_action_time_read(struct rte_table_action *action,
2774 uint64_t *timestamp)
2776 struct time_data *time_data;
2778 /* Check input arguments */
2779 if ((action == NULL) ||
2780 ((action->cfg.action_mask &
2781 (1LLU << RTE_TABLE_ACTION_TIME)) == 0) ||
2783 (timestamp == NULL))
2786 time_data = action_data_get(data, action, RTE_TABLE_ACTION_TIME);
2789 *timestamp = time_data->time;
2794 struct rte_cryptodev_sym_session *
2795 rte_table_action_crypto_sym_session_get(struct rte_table_action *action,
2798 struct sym_crypto_data *sym_crypto_data;
2800 /* Check input arguments */
2801 if ((action == NULL) ||
2802 ((action->cfg.action_mask &
2803 (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) == 0) ||
2807 sym_crypto_data = action_data_get(data, action,
2808 RTE_TABLE_ACTION_SYM_CRYPTO);
2810 return sym_crypto_data->session;
2813 static __rte_always_inline uint64_t
2814 pkt_work(struct rte_mbuf *mbuf,
2815 struct rte_pipeline_table_entry *table_entry,
2817 struct rte_table_action *action,
2818 struct ap_config *cfg)
2820 uint64_t drop_mask = 0;
2822 uint32_t ip_offset = action->cfg.common.ip_offset;
2823 void *ip = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ip_offset);
2826 uint16_t total_length;
2828 if (cfg->common.ip_version) {
2829 struct ipv4_hdr *hdr = ip;
2831 dscp = hdr->type_of_service >> 2;
2832 total_length = rte_ntohs(hdr->total_length);
2834 struct ipv6_hdr *hdr = ip;
2836 dscp = (rte_ntohl(hdr->vtc_flow) & 0x0F600000) >> 18;
2838 rte_ntohs(hdr->payload_len) + sizeof(struct ipv6_hdr);
2841 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2843 action_data_get(table_entry, action, RTE_TABLE_ACTION_LB);
2849 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2851 action_data_get(table_entry, action, RTE_TABLE_ACTION_MTR);
2853 drop_mask |= pkt_work_mtr(mbuf,
2855 &action->dscp_table,
2862 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2864 action_data_get(table_entry, action, RTE_TABLE_ACTION_TM);
2868 &action->dscp_table,
2872 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
2873 void *data = action_data_get(table_entry,
2875 RTE_TABLE_ACTION_DECAP);
2877 pkt_work_decap(mbuf, data);
2880 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2882 action_data_get(table_entry, action, RTE_TABLE_ACTION_ENCAP);
2884 pkt_work_encap(mbuf,
2892 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2894 action_data_get(table_entry, action, RTE_TABLE_ACTION_NAT);
2896 if (cfg->common.ip_version)
2897 pkt_ipv4_work_nat(ip, data, &cfg->nat);
2899 pkt_ipv6_work_nat(ip, data, &cfg->nat);
2902 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2904 action_data_get(table_entry, action, RTE_TABLE_ACTION_TTL);
2906 if (cfg->common.ip_version)
2907 drop_mask |= pkt_ipv4_work_ttl(ip, data);
2909 drop_mask |= pkt_ipv6_work_ttl(ip, data);
2912 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2914 action_data_get(table_entry, action, RTE_TABLE_ACTION_STATS);
2916 pkt_work_stats(data, total_length);
2919 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2921 action_data_get(table_entry, action, RTE_TABLE_ACTION_TIME);
2923 pkt_work_time(data, time);
2926 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
2927 void *data = action_data_get(table_entry, action,
2928 RTE_TABLE_ACTION_SYM_CRYPTO);
2930 drop_mask |= pkt_work_sym_crypto(mbuf, data, &cfg->sym_crypto,
2934 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
2935 void *data = action_data_get(table_entry,
2937 RTE_TABLE_ACTION_TAG);
2939 pkt_work_tag(mbuf, data);
2945 static __rte_always_inline uint64_t
2946 pkt4_work(struct rte_mbuf **mbufs,
2947 struct rte_pipeline_table_entry **table_entries,
2949 struct rte_table_action *action,
2950 struct ap_config *cfg)
2952 uint64_t drop_mask0 = 0;
2953 uint64_t drop_mask1 = 0;
2954 uint64_t drop_mask2 = 0;
2955 uint64_t drop_mask3 = 0;
2957 struct rte_mbuf *mbuf0 = mbufs[0];
2958 struct rte_mbuf *mbuf1 = mbufs[1];
2959 struct rte_mbuf *mbuf2 = mbufs[2];
2960 struct rte_mbuf *mbuf3 = mbufs[3];
2962 struct rte_pipeline_table_entry *table_entry0 = table_entries[0];
2963 struct rte_pipeline_table_entry *table_entry1 = table_entries[1];
2964 struct rte_pipeline_table_entry *table_entry2 = table_entries[2];
2965 struct rte_pipeline_table_entry *table_entry3 = table_entries[3];
2967 uint32_t ip_offset = action->cfg.common.ip_offset;
2968 void *ip0 = RTE_MBUF_METADATA_UINT32_PTR(mbuf0, ip_offset);
2969 void *ip1 = RTE_MBUF_METADATA_UINT32_PTR(mbuf1, ip_offset);
2970 void *ip2 = RTE_MBUF_METADATA_UINT32_PTR(mbuf2, ip_offset);
2971 void *ip3 = RTE_MBUF_METADATA_UINT32_PTR(mbuf3, ip_offset);
2973 uint32_t dscp0, dscp1, dscp2, dscp3;
2974 uint16_t total_length0, total_length1, total_length2, total_length3;
2976 if (cfg->common.ip_version) {
2977 struct ipv4_hdr *hdr0 = ip0;
2978 struct ipv4_hdr *hdr1 = ip1;
2979 struct ipv4_hdr *hdr2 = ip2;
2980 struct ipv4_hdr *hdr3 = ip3;
2982 dscp0 = hdr0->type_of_service >> 2;
2983 dscp1 = hdr1->type_of_service >> 2;
2984 dscp2 = hdr2->type_of_service >> 2;
2985 dscp3 = hdr3->type_of_service >> 2;
2987 total_length0 = rte_ntohs(hdr0->total_length);
2988 total_length1 = rte_ntohs(hdr1->total_length);
2989 total_length2 = rte_ntohs(hdr2->total_length);
2990 total_length3 = rte_ntohs(hdr3->total_length);
2992 struct ipv6_hdr *hdr0 = ip0;
2993 struct ipv6_hdr *hdr1 = ip1;
2994 struct ipv6_hdr *hdr2 = ip2;
2995 struct ipv6_hdr *hdr3 = ip3;
2997 dscp0 = (rte_ntohl(hdr0->vtc_flow) & 0x0F600000) >> 18;
2998 dscp1 = (rte_ntohl(hdr1->vtc_flow) & 0x0F600000) >> 18;
2999 dscp2 = (rte_ntohl(hdr2->vtc_flow) & 0x0F600000) >> 18;
3000 dscp3 = (rte_ntohl(hdr3->vtc_flow) & 0x0F600000) >> 18;
3003 rte_ntohs(hdr0->payload_len) + sizeof(struct ipv6_hdr);
3005 rte_ntohs(hdr1->payload_len) + sizeof(struct ipv6_hdr);
3007 rte_ntohs(hdr2->payload_len) + sizeof(struct ipv6_hdr);
3009 rte_ntohs(hdr3->payload_len) + sizeof(struct ipv6_hdr);
3012 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
3014 action_data_get(table_entry0, action, RTE_TABLE_ACTION_LB);
3016 action_data_get(table_entry1, action, RTE_TABLE_ACTION_LB);
3018 action_data_get(table_entry2, action, RTE_TABLE_ACTION_LB);
3020 action_data_get(table_entry3, action, RTE_TABLE_ACTION_LB);
3039 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
3041 action_data_get(table_entry0, action, RTE_TABLE_ACTION_MTR);
3043 action_data_get(table_entry1, action, RTE_TABLE_ACTION_MTR);
3045 action_data_get(table_entry2, action, RTE_TABLE_ACTION_MTR);
3047 action_data_get(table_entry3, action, RTE_TABLE_ACTION_MTR);
3049 drop_mask0 |= pkt_work_mtr(mbuf0,
3051 &action->dscp_table,
3057 drop_mask1 |= pkt_work_mtr(mbuf1,
3059 &action->dscp_table,
3065 drop_mask2 |= pkt_work_mtr(mbuf2,
3067 &action->dscp_table,
3073 drop_mask3 |= pkt_work_mtr(mbuf3,
3075 &action->dscp_table,
3082 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
3084 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TM);
3086 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TM);
3088 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TM);
3090 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TM);
3094 &action->dscp_table,
3099 &action->dscp_table,
3104 &action->dscp_table,
3109 &action->dscp_table,
3113 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
3114 void *data0 = action_data_get(table_entry0,
3116 RTE_TABLE_ACTION_DECAP);
3117 void *data1 = action_data_get(table_entry1,
3119 RTE_TABLE_ACTION_DECAP);
3120 void *data2 = action_data_get(table_entry2,
3122 RTE_TABLE_ACTION_DECAP);
3123 void *data3 = action_data_get(table_entry3,
3125 RTE_TABLE_ACTION_DECAP);
3127 pkt4_work_decap(mbuf0, mbuf1, mbuf2, mbuf3,
3128 data0, data1, data2, data3);
3131 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
3133 action_data_get(table_entry0, action, RTE_TABLE_ACTION_ENCAP);
3135 action_data_get(table_entry1, action, RTE_TABLE_ACTION_ENCAP);
3137 action_data_get(table_entry2, action, RTE_TABLE_ACTION_ENCAP);
3139 action_data_get(table_entry3, action, RTE_TABLE_ACTION_ENCAP);
3141 pkt_work_encap(mbuf0,
3148 pkt_work_encap(mbuf1,
3155 pkt_work_encap(mbuf2,
3162 pkt_work_encap(mbuf3,
3170 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
3172 action_data_get(table_entry0, action, RTE_TABLE_ACTION_NAT);
3174 action_data_get(table_entry1, action, RTE_TABLE_ACTION_NAT);
3176 action_data_get(table_entry2, action, RTE_TABLE_ACTION_NAT);
3178 action_data_get(table_entry3, action, RTE_TABLE_ACTION_NAT);
3180 if (cfg->common.ip_version) {
3181 pkt_ipv4_work_nat(ip0, data0, &cfg->nat);
3182 pkt_ipv4_work_nat(ip1, data1, &cfg->nat);
3183 pkt_ipv4_work_nat(ip2, data2, &cfg->nat);
3184 pkt_ipv4_work_nat(ip3, data3, &cfg->nat);
3186 pkt_ipv6_work_nat(ip0, data0, &cfg->nat);
3187 pkt_ipv6_work_nat(ip1, data1, &cfg->nat);
3188 pkt_ipv6_work_nat(ip2, data2, &cfg->nat);
3189 pkt_ipv6_work_nat(ip3, data3, &cfg->nat);
3193 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
3195 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TTL);
3197 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TTL);
3199 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TTL);
3201 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TTL);
3203 if (cfg->common.ip_version) {
3204 drop_mask0 |= pkt_ipv4_work_ttl(ip0, data0);
3205 drop_mask1 |= pkt_ipv4_work_ttl(ip1, data1);
3206 drop_mask2 |= pkt_ipv4_work_ttl(ip2, data2);
3207 drop_mask3 |= pkt_ipv4_work_ttl(ip3, data3);
3209 drop_mask0 |= pkt_ipv6_work_ttl(ip0, data0);
3210 drop_mask1 |= pkt_ipv6_work_ttl(ip1, data1);
3211 drop_mask2 |= pkt_ipv6_work_ttl(ip2, data2);
3212 drop_mask3 |= pkt_ipv6_work_ttl(ip3, data3);
3216 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
3218 action_data_get(table_entry0, action, RTE_TABLE_ACTION_STATS);
3220 action_data_get(table_entry1, action, RTE_TABLE_ACTION_STATS);
3222 action_data_get(table_entry2, action, RTE_TABLE_ACTION_STATS);
3224 action_data_get(table_entry3, action, RTE_TABLE_ACTION_STATS);
3226 pkt_work_stats(data0, total_length0);
3227 pkt_work_stats(data1, total_length1);
3228 pkt_work_stats(data2, total_length2);
3229 pkt_work_stats(data3, total_length3);
3232 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
3234 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TIME);
3236 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TIME);
3238 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TIME);
3240 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TIME);
3242 pkt_work_time(data0, time);
3243 pkt_work_time(data1, time);
3244 pkt_work_time(data2, time);
3245 pkt_work_time(data3, time);
3248 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
3249 void *data0 = action_data_get(table_entry0, action,
3250 RTE_TABLE_ACTION_SYM_CRYPTO);
3251 void *data1 = action_data_get(table_entry1, action,
3252 RTE_TABLE_ACTION_SYM_CRYPTO);
3253 void *data2 = action_data_get(table_entry2, action,
3254 RTE_TABLE_ACTION_SYM_CRYPTO);
3255 void *data3 = action_data_get(table_entry3, action,
3256 RTE_TABLE_ACTION_SYM_CRYPTO);
3258 drop_mask0 |= pkt_work_sym_crypto(mbuf0, data0, &cfg->sym_crypto,
3260 drop_mask1 |= pkt_work_sym_crypto(mbuf1, data1, &cfg->sym_crypto,
3262 drop_mask2 |= pkt_work_sym_crypto(mbuf2, data2, &cfg->sym_crypto,
3264 drop_mask3 |= pkt_work_sym_crypto(mbuf3, data3, &cfg->sym_crypto,
3268 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
3269 void *data0 = action_data_get(table_entry0,
3271 RTE_TABLE_ACTION_TAG);
3272 void *data1 = action_data_get(table_entry1,
3274 RTE_TABLE_ACTION_TAG);
3275 void *data2 = action_data_get(table_entry2,
3277 RTE_TABLE_ACTION_TAG);
3278 void *data3 = action_data_get(table_entry3,
3280 RTE_TABLE_ACTION_TAG);
3282 pkt4_work_tag(mbuf0, mbuf1, mbuf2, mbuf3,
3283 data0, data1, data2, data3);
3292 static __rte_always_inline int
3293 ah(struct rte_pipeline *p,
3294 struct rte_mbuf **pkts,
3296 struct rte_pipeline_table_entry **entries,
3297 struct rte_table_action *action,
3298 struct ap_config *cfg)
3300 uint64_t pkts_drop_mask = 0;
3303 if (cfg->action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
3304 (1LLU << RTE_TABLE_ACTION_TIME)))
3307 if ((pkts_mask & (pkts_mask + 1)) == 0) {
3308 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
3311 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
3314 drop_mask = pkt4_work(&pkts[i],
3320 pkts_drop_mask |= drop_mask << i;
3323 for ( ; i < n_pkts; i++) {
3326 drop_mask = pkt_work(pkts[i],
3332 pkts_drop_mask |= drop_mask << i;
3335 for ( ; pkts_mask; ) {
3336 uint32_t pos = __builtin_ctzll(pkts_mask);
3337 uint64_t pkt_mask = 1LLU << pos;
3340 drop_mask = pkt_work(pkts[pos],
3346 pkts_mask &= ~pkt_mask;
3347 pkts_drop_mask |= drop_mask << pos;
3350 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
3356 ah_default(struct rte_pipeline *p,
3357 struct rte_mbuf **pkts,
3359 struct rte_pipeline_table_entry **entries,
3362 struct rte_table_action *action = arg;
3372 static rte_pipeline_table_action_handler_hit
3373 ah_selector(struct rte_table_action *action)
3375 if (action->cfg.action_mask == (1LLU << RTE_TABLE_ACTION_FWD))
3382 rte_table_action_table_params_get(struct rte_table_action *action,
3383 struct rte_pipeline_table_params *params)
3385 rte_pipeline_table_action_handler_hit f_action_hit;
3386 uint32_t total_size;
3388 /* Check input arguments */
3389 if ((action == NULL) ||
3393 f_action_hit = ah_selector(action);
3394 total_size = rte_align32pow2(action->data.total_size);
3396 /* Fill in params */
3397 params->f_action_hit = f_action_hit;
3398 params->f_action_miss = NULL;
3399 params->arg_ah = (f_action_hit) ? action : NULL;
3400 params->action_data_size = total_size -
3401 sizeof(struct rte_pipeline_table_entry);
3407 rte_table_action_free(struct rte_table_action *action)