1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_byteorder.h>
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
12 #include <rte_ether.h>
17 #include <rte_cryptodev.h>
18 #include <rte_cryptodev_pmd.h>
20 #include "rte_table_action.h"
22 #define rte_htons rte_cpu_to_be_16
23 #define rte_htonl rte_cpu_to_be_32
25 #define rte_ntohs rte_be_to_cpu_16
26 #define rte_ntohl rte_be_to_cpu_32
29 * RTE_TABLE_ACTION_FWD
31 #define fwd_data rte_pipeline_table_entry
34 fwd_apply(struct fwd_data *data,
35 struct rte_table_action_fwd_params *p)
37 data->action = p->action;
39 if (p->action == RTE_PIPELINE_ACTION_PORT)
40 data->port_id = p->id;
42 if (p->action == RTE_PIPELINE_ACTION_TABLE)
43 data->table_id = p->id;
52 lb_cfg_check(struct rte_table_action_lb_config *cfg)
55 (cfg->key_size < RTE_TABLE_ACTION_LB_KEY_SIZE_MIN) ||
56 (cfg->key_size > RTE_TABLE_ACTION_LB_KEY_SIZE_MAX) ||
57 (!rte_is_power_of_2(cfg->key_size)) ||
58 (cfg->f_hash == NULL))
65 uint32_t out[RTE_TABLE_ACTION_LB_TABLE_SIZE];
66 } __attribute__((__packed__));
69 lb_apply(struct lb_data *data,
70 struct rte_table_action_lb_params *p)
72 memcpy(data->out, p->out, sizeof(data->out));
77 static __rte_always_inline void
78 pkt_work_lb(struct rte_mbuf *mbuf,
80 struct rte_table_action_lb_config *cfg)
82 uint8_t *pkt_key = RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->key_offset);
83 uint32_t *out = RTE_MBUF_METADATA_UINT32_PTR(mbuf, cfg->out_offset);
87 digest = cfg->f_hash(pkt_key,
91 pos = digest & (RTE_TABLE_ACTION_LB_TABLE_SIZE - 1);
92 out_val = data->out[pos];
98 * RTE_TABLE_ACTION_MTR
101 mtr_cfg_check(struct rte_table_action_mtr_config *mtr)
103 if ((mtr->alg == RTE_TABLE_ACTION_METER_SRTCM) ||
104 ((mtr->n_tc != 1) && (mtr->n_tc != 4)) ||
105 (mtr->n_bytes_enabled != 0))
110 struct mtr_trtcm_data {
111 struct rte_meter_trtcm trtcm;
112 uint64_t stats[RTE_COLORS];
113 } __attribute__((__packed__));
115 #define MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data) \
116 (((data)->stats[RTE_COLOR_GREEN] & 0xF8LLU) >> 3)
119 mtr_trtcm_data_meter_profile_id_set(struct mtr_trtcm_data *data,
122 data->stats[RTE_COLOR_GREEN] &= ~0xF8LLU;
123 data->stats[RTE_COLOR_GREEN] |= (profile_id % 32) << 3;
126 #define MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color)\
127 (((data)->stats[(color)] & 4LLU) >> 2)
129 #define MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color)\
130 ((enum rte_color)((data)->stats[(color)] & 3LLU))
133 mtr_trtcm_data_policer_action_set(struct mtr_trtcm_data *data,
134 enum rte_color color,
135 enum rte_table_action_policer action)
137 if (action == RTE_TABLE_ACTION_POLICER_DROP) {
138 data->stats[color] |= 4LLU;
140 data->stats[color] &= ~7LLU;
141 data->stats[color] |= color & 3LLU;
146 mtr_trtcm_data_stats_get(struct mtr_trtcm_data *data,
147 enum rte_color color)
149 return data->stats[color] >> 8;
153 mtr_trtcm_data_stats_reset(struct mtr_trtcm_data *data,
154 enum rte_color color)
156 data->stats[color] &= 0xFFLU;
159 #define MTR_TRTCM_DATA_STATS_INC(data, color) \
160 ((data)->stats[(color)] += (1LLU << 8))
163 mtr_data_size(struct rte_table_action_mtr_config *mtr)
165 return mtr->n_tc * sizeof(struct mtr_trtcm_data);
168 struct dscp_table_entry_data {
169 enum rte_color color;
174 struct dscp_table_data {
175 struct dscp_table_entry_data entry[64];
178 struct meter_profile_data {
179 struct rte_meter_trtcm_profile profile;
184 static struct meter_profile_data *
185 meter_profile_data_find(struct meter_profile_data *mp,
191 for (i = 0; i < mp_size; i++) {
192 struct meter_profile_data *mp_data = &mp[i];
194 if (mp_data->valid && (mp_data->profile_id == profile_id))
201 static struct meter_profile_data *
202 meter_profile_data_find_unused(struct meter_profile_data *mp,
207 for (i = 0; i < mp_size; i++) {
208 struct meter_profile_data *mp_data = &mp[i];
218 mtr_apply_check(struct rte_table_action_mtr_params *p,
219 struct rte_table_action_mtr_config *cfg,
220 struct meter_profile_data *mp,
225 if (p->tc_mask > RTE_LEN2MASK(cfg->n_tc, uint32_t))
228 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
229 struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
230 struct meter_profile_data *mp_data;
232 if ((p->tc_mask & (1LLU << i)) == 0)
235 mp_data = meter_profile_data_find(mp,
237 p_tc->meter_profile_id);
246 mtr_apply(struct mtr_trtcm_data *data,
247 struct rte_table_action_mtr_params *p,
248 struct rte_table_action_mtr_config *cfg,
249 struct meter_profile_data *mp,
255 /* Check input arguments */
256 status = mtr_apply_check(p, cfg, mp, mp_size);
261 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
262 struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
263 struct mtr_trtcm_data *data_tc = &data[i];
264 struct meter_profile_data *mp_data;
266 if ((p->tc_mask & (1LLU << i)) == 0)
270 mp_data = meter_profile_data_find(mp,
272 p_tc->meter_profile_id);
276 memset(data_tc, 0, sizeof(*data_tc));
279 status = rte_meter_trtcm_config(&data_tc->trtcm,
285 mtr_trtcm_data_meter_profile_id_set(data_tc,
288 /* Policer actions */
289 mtr_trtcm_data_policer_action_set(data_tc,
291 p_tc->policer[RTE_COLOR_GREEN]);
293 mtr_trtcm_data_policer_action_set(data_tc,
295 p_tc->policer[RTE_COLOR_YELLOW]);
297 mtr_trtcm_data_policer_action_set(data_tc,
299 p_tc->policer[RTE_COLOR_RED]);
305 static __rte_always_inline uint64_t
306 pkt_work_mtr(struct rte_mbuf *mbuf,
307 struct mtr_trtcm_data *data,
308 struct dscp_table_data *dscp_table,
309 struct meter_profile_data *mp,
312 uint16_t total_length)
315 struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
316 enum rte_color color_in, color_meter, color_policer;
320 color_in = dscp_entry->color;
322 mp_id = MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data);
325 color_meter = rte_meter_trtcm_color_aware_check(
333 MTR_TRTCM_DATA_STATS_INC(data, color_meter);
336 drop_mask = MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color_meter);
338 MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color_meter);
339 rte_mbuf_sched_color_set(mbuf, (uint8_t)color_policer);
345 * RTE_TABLE_ACTION_TM
348 tm_cfg_check(struct rte_table_action_tm_config *tm)
350 if ((tm->n_subports_per_port == 0) ||
351 (rte_is_power_of_2(tm->n_subports_per_port) == 0) ||
352 (tm->n_subports_per_port > UINT16_MAX) ||
353 (tm->n_pipes_per_subport == 0) ||
354 (rte_is_power_of_2(tm->n_pipes_per_subport) == 0))
363 } __attribute__((__packed__));
366 tm_apply_check(struct rte_table_action_tm_params *p,
367 struct rte_table_action_tm_config *cfg)
369 if ((p->subport_id >= cfg->n_subports_per_port) ||
370 (p->pipe_id >= cfg->n_pipes_per_subport))
377 tm_apply(struct tm_data *data,
378 struct rte_table_action_tm_params *p,
379 struct rte_table_action_tm_config *cfg)
383 /* Check input arguments */
384 status = tm_apply_check(p, cfg);
389 data->queue_id = p->subport_id <<
390 (__builtin_ctz(cfg->n_pipes_per_subport) + 4) |
396 static __rte_always_inline void
397 pkt_work_tm(struct rte_mbuf *mbuf,
398 struct tm_data *data,
399 struct dscp_table_data *dscp_table,
402 struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
403 uint32_t queue_id = data->queue_id |
404 dscp_entry->tc_queue;
405 rte_mbuf_sched_set(mbuf, queue_id, dscp_entry->tc,
406 (uint8_t)dscp_entry->color);
410 * RTE_TABLE_ACTION_ENCAP
413 encap_valid(enum rte_table_action_encap_type encap)
416 case RTE_TABLE_ACTION_ENCAP_ETHER:
417 case RTE_TABLE_ACTION_ENCAP_VLAN:
418 case RTE_TABLE_ACTION_ENCAP_QINQ:
419 case RTE_TABLE_ACTION_ENCAP_MPLS:
420 case RTE_TABLE_ACTION_ENCAP_PPPOE:
421 case RTE_TABLE_ACTION_ENCAP_VXLAN:
422 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
430 encap_cfg_check(struct rte_table_action_encap_config *encap)
432 if ((encap->encap_mask == 0) ||
433 (__builtin_popcountll(encap->encap_mask) != 1))
439 struct encap_ether_data {
440 struct rte_ether_hdr ether;
441 } __attribute__((__packed__));
443 #define VLAN(pcp, dei, vid) \
444 ((uint16_t)((((uint64_t)(pcp)) & 0x7LLU) << 13) | \
445 ((((uint64_t)(dei)) & 0x1LLU) << 12) | \
446 (((uint64_t)(vid)) & 0xFFFLLU)) \
448 struct encap_vlan_data {
449 struct rte_ether_hdr ether;
450 struct rte_vlan_hdr vlan;
451 } __attribute__((__packed__));
453 struct encap_qinq_data {
454 struct rte_ether_hdr ether;
455 struct rte_vlan_hdr svlan;
456 struct rte_vlan_hdr cvlan;
457 } __attribute__((__packed__));
459 #define ETHER_TYPE_MPLS_UNICAST 0x8847
461 #define ETHER_TYPE_MPLS_MULTICAST 0x8848
463 #define MPLS(label, tc, s, ttl) \
464 ((uint32_t)(((((uint64_t)(label)) & 0xFFFFFLLU) << 12) |\
465 ((((uint64_t)(tc)) & 0x7LLU) << 9) | \
466 ((((uint64_t)(s)) & 0x1LLU) << 8) | \
467 (((uint64_t)(ttl)) & 0xFFLLU)))
469 struct encap_mpls_data {
470 struct rte_ether_hdr ether;
471 uint32_t mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX];
473 } __attribute__((__packed__));
475 #define PPP_PROTOCOL_IP 0x0021
477 struct pppoe_ppp_hdr {
478 uint16_t ver_type_code;
482 } __attribute__((__packed__));
484 struct encap_pppoe_data {
485 struct rte_ether_hdr ether;
486 struct pppoe_ppp_hdr pppoe_ppp;
487 } __attribute__((__packed__));
489 #define IP_PROTO_UDP 17
491 struct encap_vxlan_ipv4_data {
492 struct rte_ether_hdr ether;
493 struct rte_ipv4_hdr ipv4;
494 struct rte_udp_hdr udp;
495 struct rte_vxlan_hdr vxlan;
496 } __attribute__((__packed__));
498 struct encap_vxlan_ipv4_vlan_data {
499 struct rte_ether_hdr ether;
500 struct rte_vlan_hdr vlan;
501 struct rte_ipv4_hdr ipv4;
502 struct rte_udp_hdr udp;
503 struct rte_vxlan_hdr vxlan;
504 } __attribute__((__packed__));
506 struct encap_vxlan_ipv6_data {
507 struct rte_ether_hdr ether;
508 struct rte_ipv6_hdr ipv6;
509 struct rte_udp_hdr udp;
510 struct rte_vxlan_hdr vxlan;
511 } __attribute__((__packed__));
513 struct encap_vxlan_ipv6_vlan_data {
514 struct rte_ether_hdr ether;
515 struct rte_vlan_hdr vlan;
516 struct rte_ipv6_hdr ipv6;
517 struct rte_udp_hdr udp;
518 struct rte_vxlan_hdr vxlan;
519 } __attribute__((__packed__));
521 struct encap_qinq_pppoe_data {
522 struct rte_ether_hdr ether;
523 struct rte_vlan_hdr svlan;
524 struct rte_vlan_hdr cvlan;
525 struct pppoe_ppp_hdr pppoe_ppp;
526 } __attribute__((__packed__));
529 encap_data_size(struct rte_table_action_encap_config *encap)
531 switch (encap->encap_mask) {
532 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
533 return sizeof(struct encap_ether_data);
535 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
536 return sizeof(struct encap_vlan_data);
538 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
539 return sizeof(struct encap_qinq_data);
541 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
542 return sizeof(struct encap_mpls_data);
544 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
545 return sizeof(struct encap_pppoe_data);
547 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
548 if (encap->vxlan.ip_version)
549 if (encap->vxlan.vlan)
550 return sizeof(struct encap_vxlan_ipv4_vlan_data);
552 return sizeof(struct encap_vxlan_ipv4_data);
554 if (encap->vxlan.vlan)
555 return sizeof(struct encap_vxlan_ipv6_vlan_data);
557 return sizeof(struct encap_vxlan_ipv6_data);
559 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
560 return sizeof(struct encap_qinq_pppoe_data);
568 encap_apply_check(struct rte_table_action_encap_params *p,
569 struct rte_table_action_encap_config *cfg)
571 if ((encap_valid(p->type) == 0) ||
572 ((cfg->encap_mask & (1LLU << p->type)) == 0))
576 case RTE_TABLE_ACTION_ENCAP_ETHER:
579 case RTE_TABLE_ACTION_ENCAP_VLAN:
582 case RTE_TABLE_ACTION_ENCAP_QINQ:
585 case RTE_TABLE_ACTION_ENCAP_MPLS:
586 if ((p->mpls.mpls_count == 0) ||
587 (p->mpls.mpls_count > RTE_TABLE_ACTION_MPLS_LABELS_MAX))
592 case RTE_TABLE_ACTION_ENCAP_PPPOE:
595 case RTE_TABLE_ACTION_ENCAP_VXLAN:
598 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
607 encap_ether_apply(void *data,
608 struct rte_table_action_encap_params *p,
609 struct rte_table_action_common_config *common_cfg)
611 struct encap_ether_data *d = data;
612 uint16_t ethertype = (common_cfg->ip_version) ?
613 RTE_ETHER_TYPE_IPV4 :
617 rte_ether_addr_copy(&p->ether.ether.da, &d->ether.d_addr);
618 rte_ether_addr_copy(&p->ether.ether.sa, &d->ether.s_addr);
619 d->ether.ether_type = rte_htons(ethertype);
625 encap_vlan_apply(void *data,
626 struct rte_table_action_encap_params *p,
627 struct rte_table_action_common_config *common_cfg)
629 struct encap_vlan_data *d = data;
630 uint16_t ethertype = (common_cfg->ip_version) ?
631 RTE_ETHER_TYPE_IPV4 :
635 rte_ether_addr_copy(&p->vlan.ether.da, &d->ether.d_addr);
636 rte_ether_addr_copy(&p->vlan.ether.sa, &d->ether.s_addr);
637 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
640 d->vlan.vlan_tci = rte_htons(VLAN(p->vlan.vlan.pcp,
643 d->vlan.eth_proto = rte_htons(ethertype);
649 encap_qinq_apply(void *data,
650 struct rte_table_action_encap_params *p,
651 struct rte_table_action_common_config *common_cfg)
653 struct encap_qinq_data *d = data;
654 uint16_t ethertype = (common_cfg->ip_version) ?
655 RTE_ETHER_TYPE_IPV4 :
659 rte_ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
660 rte_ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
661 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_QINQ);
664 d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
667 d->svlan.eth_proto = rte_htons(RTE_ETHER_TYPE_VLAN);
670 d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
673 d->cvlan.eth_proto = rte_htons(ethertype);
679 encap_qinq_pppoe_apply(void *data,
680 struct rte_table_action_encap_params *p)
682 struct encap_qinq_pppoe_data *d = data;
685 rte_ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
686 rte_ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
687 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
690 d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
693 d->svlan.eth_proto = rte_htons(RTE_ETHER_TYPE_VLAN);
696 d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
699 d->cvlan.eth_proto = rte_htons(ETHER_TYPE_PPPOE_SESSION);
702 d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
703 d->pppoe_ppp.session_id = rte_htons(p->qinq_pppoe.pppoe.session_id);
704 d->pppoe_ppp.length = 0; /* not pre-computed */
705 d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
711 encap_mpls_apply(void *data,
712 struct rte_table_action_encap_params *p)
714 struct encap_mpls_data *d = data;
715 uint16_t ethertype = (p->mpls.unicast) ?
716 ETHER_TYPE_MPLS_UNICAST :
717 ETHER_TYPE_MPLS_MULTICAST;
721 rte_ether_addr_copy(&p->mpls.ether.da, &d->ether.d_addr);
722 rte_ether_addr_copy(&p->mpls.ether.sa, &d->ether.s_addr);
723 d->ether.ether_type = rte_htons(ethertype);
726 for (i = 0; i < p->mpls.mpls_count - 1; i++)
727 d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
730 p->mpls.mpls[i].ttl));
732 d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
735 p->mpls.mpls[i].ttl));
737 d->mpls_count = p->mpls.mpls_count;
742 encap_pppoe_apply(void *data,
743 struct rte_table_action_encap_params *p)
745 struct encap_pppoe_data *d = data;
748 rte_ether_addr_copy(&p->pppoe.ether.da, &d->ether.d_addr);
749 rte_ether_addr_copy(&p->pppoe.ether.sa, &d->ether.s_addr);
750 d->ether.ether_type = rte_htons(ETHER_TYPE_PPPOE_SESSION);
753 d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
754 d->pppoe_ppp.session_id = rte_htons(p->pppoe.pppoe.session_id);
755 d->pppoe_ppp.length = 0; /* not pre-computed */
756 d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
762 encap_vxlan_apply(void *data,
763 struct rte_table_action_encap_params *p,
764 struct rte_table_action_encap_config *cfg)
766 if ((p->vxlan.vxlan.vni > 0xFFFFFF) ||
767 (cfg->vxlan.ip_version && (p->vxlan.ipv4.dscp > 0x3F)) ||
768 (!cfg->vxlan.ip_version && (p->vxlan.ipv6.flow_label > 0xFFFFF)) ||
769 (!cfg->vxlan.ip_version && (p->vxlan.ipv6.dscp > 0x3F)) ||
770 (cfg->vxlan.vlan && (p->vxlan.vlan.vid > 0xFFF)))
773 if (cfg->vxlan.ip_version)
774 if (cfg->vxlan.vlan) {
775 struct encap_vxlan_ipv4_vlan_data *d = data;
778 rte_ether_addr_copy(&p->vxlan.ether.da,
780 rte_ether_addr_copy(&p->vxlan.ether.sa,
782 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
785 d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
788 d->vlan.eth_proto = rte_htons(RTE_ETHER_TYPE_IPV4);
791 d->ipv4.version_ihl = 0x45;
792 d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
793 d->ipv4.total_length = 0; /* not pre-computed */
794 d->ipv4.packet_id = 0;
795 d->ipv4.fragment_offset = 0;
796 d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
797 d->ipv4.next_proto_id = IP_PROTO_UDP;
798 d->ipv4.hdr_checksum = 0;
799 d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
800 d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
802 d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
805 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
806 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
807 d->udp.dgram_len = 0; /* not pre-computed */
808 d->udp.dgram_cksum = 0;
811 d->vxlan.vx_flags = rte_htonl(0x08000000);
812 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
816 struct encap_vxlan_ipv4_data *d = data;
819 rte_ether_addr_copy(&p->vxlan.ether.da,
821 rte_ether_addr_copy(&p->vxlan.ether.sa,
823 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_IPV4);
826 d->ipv4.version_ihl = 0x45;
827 d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
828 d->ipv4.total_length = 0; /* not pre-computed */
829 d->ipv4.packet_id = 0;
830 d->ipv4.fragment_offset = 0;
831 d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
832 d->ipv4.next_proto_id = IP_PROTO_UDP;
833 d->ipv4.hdr_checksum = 0;
834 d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
835 d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
837 d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
840 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
841 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
842 d->udp.dgram_len = 0; /* not pre-computed */
843 d->udp.dgram_cksum = 0;
846 d->vxlan.vx_flags = rte_htonl(0x08000000);
847 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
852 if (cfg->vxlan.vlan) {
853 struct encap_vxlan_ipv6_vlan_data *d = data;
856 rte_ether_addr_copy(&p->vxlan.ether.da,
858 rte_ether_addr_copy(&p->vxlan.ether.sa,
860 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
863 d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
866 d->vlan.eth_proto = rte_htons(RTE_ETHER_TYPE_IPV6);
869 d->ipv6.vtc_flow = rte_htonl((6 << 28) |
870 (p->vxlan.ipv6.dscp << 22) |
871 p->vxlan.ipv6.flow_label);
872 d->ipv6.payload_len = 0; /* not pre-computed */
873 d->ipv6.proto = IP_PROTO_UDP;
874 d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
875 memcpy(d->ipv6.src_addr,
877 sizeof(p->vxlan.ipv6.sa));
878 memcpy(d->ipv6.dst_addr,
880 sizeof(p->vxlan.ipv6.da));
883 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
884 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
885 d->udp.dgram_len = 0; /* not pre-computed */
886 d->udp.dgram_cksum = 0;
889 d->vxlan.vx_flags = rte_htonl(0x08000000);
890 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
894 struct encap_vxlan_ipv6_data *d = data;
897 rte_ether_addr_copy(&p->vxlan.ether.da,
899 rte_ether_addr_copy(&p->vxlan.ether.sa,
901 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_IPV6);
904 d->ipv6.vtc_flow = rte_htonl((6 << 28) |
905 (p->vxlan.ipv6.dscp << 22) |
906 p->vxlan.ipv6.flow_label);
907 d->ipv6.payload_len = 0; /* not pre-computed */
908 d->ipv6.proto = IP_PROTO_UDP;
909 d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
910 memcpy(d->ipv6.src_addr,
912 sizeof(p->vxlan.ipv6.sa));
913 memcpy(d->ipv6.dst_addr,
915 sizeof(p->vxlan.ipv6.da));
918 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
919 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
920 d->udp.dgram_len = 0; /* not pre-computed */
921 d->udp.dgram_cksum = 0;
924 d->vxlan.vx_flags = rte_htonl(0x08000000);
925 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
932 encap_apply(void *data,
933 struct rte_table_action_encap_params *p,
934 struct rte_table_action_encap_config *cfg,
935 struct rte_table_action_common_config *common_cfg)
939 /* Check input arguments */
940 status = encap_apply_check(p, cfg);
945 case RTE_TABLE_ACTION_ENCAP_ETHER:
946 return encap_ether_apply(data, p, common_cfg);
948 case RTE_TABLE_ACTION_ENCAP_VLAN:
949 return encap_vlan_apply(data, p, common_cfg);
951 case RTE_TABLE_ACTION_ENCAP_QINQ:
952 return encap_qinq_apply(data, p, common_cfg);
954 case RTE_TABLE_ACTION_ENCAP_MPLS:
955 return encap_mpls_apply(data, p);
957 case RTE_TABLE_ACTION_ENCAP_PPPOE:
958 return encap_pppoe_apply(data, p);
960 case RTE_TABLE_ACTION_ENCAP_VXLAN:
961 return encap_vxlan_apply(data, p, cfg);
963 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
964 return encap_qinq_pppoe_apply(data, p);
971 static __rte_always_inline uint16_t
972 encap_vxlan_ipv4_checksum_update(uint16_t cksum0,
973 uint16_t total_length)
978 cksum1 = ~cksum1 & 0xFFFF;
980 /* Add total length (one's complement logic) */
981 cksum1 += total_length;
982 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
983 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
985 return (uint16_t)(~cksum1);
988 static __rte_always_inline void *
989 encap(void *dst, const void *src, size_t n)
991 dst = ((uint8_t *) dst) - n;
992 return rte_memcpy(dst, src, n);
995 static __rte_always_inline void
996 pkt_work_encap_vxlan_ipv4(struct rte_mbuf *mbuf,
997 struct encap_vxlan_ipv4_data *vxlan_tbl,
998 struct rte_table_action_encap_config *cfg)
1000 uint32_t ether_offset = cfg->vxlan.data_offset;
1001 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1002 struct encap_vxlan_ipv4_data *vxlan_pkt;
1003 uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
1005 ether_length = (uint16_t)mbuf->pkt_len;
1006 ipv4_total_length = ether_length +
1007 (sizeof(struct rte_vxlan_hdr) +
1008 sizeof(struct rte_udp_hdr) +
1009 sizeof(struct rte_ipv4_hdr));
1010 ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
1011 rte_htons(ipv4_total_length));
1012 udp_length = ether_length +
1013 (sizeof(struct rte_vxlan_hdr) +
1014 sizeof(struct rte_udp_hdr));
1016 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1017 vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
1018 vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
1019 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1021 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1022 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1025 static __rte_always_inline void
1026 pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf *mbuf,
1027 struct encap_vxlan_ipv4_vlan_data *vxlan_tbl,
1028 struct rte_table_action_encap_config *cfg)
1030 uint32_t ether_offset = cfg->vxlan.data_offset;
1031 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1032 struct encap_vxlan_ipv4_vlan_data *vxlan_pkt;
1033 uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
1035 ether_length = (uint16_t)mbuf->pkt_len;
1036 ipv4_total_length = ether_length +
1037 (sizeof(struct rte_vxlan_hdr) +
1038 sizeof(struct rte_udp_hdr) +
1039 sizeof(struct rte_ipv4_hdr));
1040 ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
1041 rte_htons(ipv4_total_length));
1042 udp_length = ether_length +
1043 (sizeof(struct rte_vxlan_hdr) +
1044 sizeof(struct rte_udp_hdr));
1046 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1047 vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
1048 vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
1049 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1051 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1052 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1055 static __rte_always_inline void
1056 pkt_work_encap_vxlan_ipv6(struct rte_mbuf *mbuf,
1057 struct encap_vxlan_ipv6_data *vxlan_tbl,
1058 struct rte_table_action_encap_config *cfg)
1060 uint32_t ether_offset = cfg->vxlan.data_offset;
1061 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1062 struct encap_vxlan_ipv6_data *vxlan_pkt;
1063 uint16_t ether_length, ipv6_payload_length, udp_length;
1065 ether_length = (uint16_t)mbuf->pkt_len;
1066 ipv6_payload_length = ether_length +
1067 (sizeof(struct rte_vxlan_hdr) +
1068 sizeof(struct rte_udp_hdr));
1069 udp_length = ether_length +
1070 (sizeof(struct rte_vxlan_hdr) +
1071 sizeof(struct rte_udp_hdr));
1073 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1074 vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
1075 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1077 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1078 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1081 static __rte_always_inline void
1082 pkt_work_encap_vxlan_ipv6_vlan(struct rte_mbuf *mbuf,
1083 struct encap_vxlan_ipv6_vlan_data *vxlan_tbl,
1084 struct rte_table_action_encap_config *cfg)
1086 uint32_t ether_offset = cfg->vxlan.data_offset;
1087 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1088 struct encap_vxlan_ipv6_vlan_data *vxlan_pkt;
1089 uint16_t ether_length, ipv6_payload_length, udp_length;
1091 ether_length = (uint16_t)mbuf->pkt_len;
1092 ipv6_payload_length = ether_length +
1093 (sizeof(struct rte_vxlan_hdr) +
1094 sizeof(struct rte_udp_hdr));
1095 udp_length = ether_length +
1096 (sizeof(struct rte_vxlan_hdr) +
1097 sizeof(struct rte_udp_hdr));
1099 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1100 vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
1101 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1103 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1104 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1107 static __rte_always_inline void
1108 pkt_work_encap(struct rte_mbuf *mbuf,
1110 struct rte_table_action_encap_config *cfg,
1112 uint16_t total_length,
1115 switch (cfg->encap_mask) {
1116 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
1117 encap(ip, data, sizeof(struct encap_ether_data));
1118 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1119 sizeof(struct encap_ether_data));
1120 mbuf->pkt_len = mbuf->data_len = total_length +
1121 sizeof(struct encap_ether_data);
1124 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
1125 encap(ip, data, sizeof(struct encap_vlan_data));
1126 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1127 sizeof(struct encap_vlan_data));
1128 mbuf->pkt_len = mbuf->data_len = total_length +
1129 sizeof(struct encap_vlan_data);
1132 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
1133 encap(ip, data, sizeof(struct encap_qinq_data));
1134 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1135 sizeof(struct encap_qinq_data));
1136 mbuf->pkt_len = mbuf->data_len = total_length +
1137 sizeof(struct encap_qinq_data);
1140 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
1142 struct encap_mpls_data *mpls = data;
1143 size_t size = sizeof(struct rte_ether_hdr) +
1144 mpls->mpls_count * 4;
1146 encap(ip, data, size);
1147 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + size);
1148 mbuf->pkt_len = mbuf->data_len = total_length + size;
1152 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
1154 struct encap_pppoe_data *pppoe =
1155 encap(ip, data, sizeof(struct encap_pppoe_data));
1156 pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
1157 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1158 sizeof(struct encap_pppoe_data));
1159 mbuf->pkt_len = mbuf->data_len = total_length +
1160 sizeof(struct encap_pppoe_data);
1164 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
1166 struct encap_qinq_pppoe_data *qinq_pppoe =
1167 encap(ip, data, sizeof(struct encap_qinq_pppoe_data));
1168 qinq_pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
1169 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1170 sizeof(struct encap_qinq_pppoe_data));
1171 mbuf->pkt_len = mbuf->data_len = total_length +
1172 sizeof(struct encap_qinq_pppoe_data);
1176 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
1178 if (cfg->vxlan.ip_version)
1179 if (cfg->vxlan.vlan)
1180 pkt_work_encap_vxlan_ipv4_vlan(mbuf, data, cfg);
1182 pkt_work_encap_vxlan_ipv4(mbuf, data, cfg);
1184 if (cfg->vxlan.vlan)
1185 pkt_work_encap_vxlan_ipv6_vlan(mbuf, data, cfg);
1187 pkt_work_encap_vxlan_ipv6(mbuf, data, cfg);
1196 * RTE_TABLE_ACTION_NAT
1199 nat_cfg_check(struct rte_table_action_nat_config *nat)
1201 if ((nat->proto != 0x06) &&
1202 (nat->proto != 0x11))
1208 struct nat_ipv4_data {
1211 } __attribute__((__packed__));
1213 struct nat_ipv6_data {
1216 } __attribute__((__packed__));
1219 nat_data_size(struct rte_table_action_nat_config *nat __rte_unused,
1220 struct rte_table_action_common_config *common)
1222 int ip_version = common->ip_version;
1224 return (ip_version) ?
1225 sizeof(struct nat_ipv4_data) :
1226 sizeof(struct nat_ipv6_data);
1230 nat_apply_check(struct rte_table_action_nat_params *p,
1231 struct rte_table_action_common_config *cfg)
1233 if ((p->ip_version && (cfg->ip_version == 0)) ||
1234 ((p->ip_version == 0) && cfg->ip_version))
1241 nat_apply(void *data,
1242 struct rte_table_action_nat_params *p,
1243 struct rte_table_action_common_config *cfg)
1247 /* Check input arguments */
1248 status = nat_apply_check(p, cfg);
1253 if (p->ip_version) {
1254 struct nat_ipv4_data *d = data;
1256 d->addr = rte_htonl(p->addr.ipv4);
1257 d->port = rte_htons(p->port);
1259 struct nat_ipv6_data *d = data;
1261 memcpy(d->addr, p->addr.ipv6, sizeof(d->addr));
1262 d->port = rte_htons(p->port);
1268 static __rte_always_inline uint16_t
1269 nat_ipv4_checksum_update(uint16_t cksum0,
1276 cksum1 = ~cksum1 & 0xFFFF;
1278 /* Subtract ip0 (one's complement logic) */
1279 cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF);
1280 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1281 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1283 /* Add ip1 (one's complement logic) */
1284 cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF);
1285 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1286 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1288 return (uint16_t)(~cksum1);
1291 static __rte_always_inline uint16_t
1292 nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0,
1301 cksum1 = ~cksum1 & 0xFFFF;
1303 /* Subtract ip0 and port 0 (one's complement logic) */
1304 cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF) + port0;
1305 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1306 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1308 /* Add ip1 and port1 (one's complement logic) */
1309 cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF) + port1;
1310 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1311 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1313 return (uint16_t)(~cksum1);
1316 static __rte_always_inline uint16_t
1317 nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0,
1326 cksum1 = ~cksum1 & 0xFFFF;
1328 /* Subtract ip0 and port 0 (one's complement logic) */
1329 cksum1 -= ip0[0] + ip0[1] + ip0[2] + ip0[3] +
1330 ip0[4] + ip0[5] + ip0[6] + ip0[7] + port0;
1331 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1332 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1334 /* Add ip1 and port1 (one's complement logic) */
1335 cksum1 += ip1[0] + ip1[1] + ip1[2] + ip1[3] +
1336 ip1[4] + ip1[5] + ip1[6] + ip1[7] + port1;
1337 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1338 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1340 return (uint16_t)(~cksum1);
1343 static __rte_always_inline void
1344 pkt_ipv4_work_nat(struct rte_ipv4_hdr *ip,
1345 struct nat_ipv4_data *data,
1346 struct rte_table_action_nat_config *cfg)
1348 if (cfg->source_nat) {
1349 if (cfg->proto == 0x6) {
1350 struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
1351 uint16_t ip_cksum, tcp_cksum;
1353 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1357 tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
1363 ip->src_addr = data->addr;
1364 ip->hdr_checksum = ip_cksum;
1365 tcp->src_port = data->port;
1366 tcp->cksum = tcp_cksum;
1368 struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
1369 uint16_t ip_cksum, udp_cksum;
1371 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1375 udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1381 ip->src_addr = data->addr;
1382 ip->hdr_checksum = ip_cksum;
1383 udp->src_port = data->port;
1384 if (udp->dgram_cksum)
1385 udp->dgram_cksum = udp_cksum;
1388 if (cfg->proto == 0x6) {
1389 struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
1390 uint16_t ip_cksum, tcp_cksum;
1392 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1396 tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
1402 ip->dst_addr = data->addr;
1403 ip->hdr_checksum = ip_cksum;
1404 tcp->dst_port = data->port;
1405 tcp->cksum = tcp_cksum;
1407 struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
1408 uint16_t ip_cksum, udp_cksum;
1410 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1414 udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1420 ip->dst_addr = data->addr;
1421 ip->hdr_checksum = ip_cksum;
1422 udp->dst_port = data->port;
1423 if (udp->dgram_cksum)
1424 udp->dgram_cksum = udp_cksum;
1429 static __rte_always_inline void
1430 pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
1431 struct nat_ipv6_data *data,
1432 struct rte_table_action_nat_config *cfg)
1434 if (cfg->source_nat) {
1435 if (cfg->proto == 0x6) {
1436 struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
1439 tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1440 (uint16_t *)ip->src_addr,
1441 (uint16_t *)data->addr,
1445 rte_memcpy(ip->src_addr, data->addr, 16);
1446 tcp->src_port = data->port;
1447 tcp->cksum = tcp_cksum;
1449 struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
1452 udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1453 (uint16_t *)ip->src_addr,
1454 (uint16_t *)data->addr,
1458 rte_memcpy(ip->src_addr, data->addr, 16);
1459 udp->src_port = data->port;
1460 udp->dgram_cksum = udp_cksum;
1463 if (cfg->proto == 0x6) {
1464 struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
1467 tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1468 (uint16_t *)ip->dst_addr,
1469 (uint16_t *)data->addr,
1473 rte_memcpy(ip->dst_addr, data->addr, 16);
1474 tcp->dst_port = data->port;
1475 tcp->cksum = tcp_cksum;
1477 struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
1480 udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1481 (uint16_t *)ip->dst_addr,
1482 (uint16_t *)data->addr,
1486 rte_memcpy(ip->dst_addr, data->addr, 16);
1487 udp->dst_port = data->port;
1488 udp->dgram_cksum = udp_cksum;
1494 * RTE_TABLE_ACTION_TTL
1497 ttl_cfg_check(struct rte_table_action_ttl_config *ttl)
1507 } __attribute__((__packed__));
1509 #define TTL_INIT(data, decrement) \
1510 ((data)->n_packets = (decrement) ? 1 : 0)
1512 #define TTL_DEC_GET(data) \
1513 ((uint8_t)((data)->n_packets & 1))
1515 #define TTL_STATS_RESET(data) \
1516 ((data)->n_packets = ((data)->n_packets & 1))
1518 #define TTL_STATS_READ(data) \
1519 ((data)->n_packets >> 1)
1521 #define TTL_STATS_ADD(data, value) \
1522 ((data)->n_packets = \
1523 (((((data)->n_packets >> 1) + (value)) << 1) | \
1524 ((data)->n_packets & 1)))
1527 ttl_apply(void *data,
1528 struct rte_table_action_ttl_params *p)
1530 struct ttl_data *d = data;
1532 TTL_INIT(d, p->decrement);
1537 static __rte_always_inline uint64_t
1538 pkt_ipv4_work_ttl(struct rte_ipv4_hdr *ip,
1539 struct ttl_data *data)
1542 uint16_t cksum = ip->hdr_checksum;
1543 uint8_t ttl = ip->time_to_live;
1544 uint8_t ttl_diff = TTL_DEC_GET(data);
1549 ip->hdr_checksum = cksum;
1550 ip->time_to_live = ttl;
1552 drop = (ttl == 0) ? 1 : 0;
1553 TTL_STATS_ADD(data, drop);
1558 static __rte_always_inline uint64_t
1559 pkt_ipv6_work_ttl(struct rte_ipv6_hdr *ip,
1560 struct ttl_data *data)
1563 uint8_t ttl = ip->hop_limits;
1564 uint8_t ttl_diff = TTL_DEC_GET(data);
1568 ip->hop_limits = ttl;
1570 drop = (ttl == 0) ? 1 : 0;
1571 TTL_STATS_ADD(data, drop);
1577 * RTE_TABLE_ACTION_STATS
1580 stats_cfg_check(struct rte_table_action_stats_config *stats)
1582 if ((stats->n_packets_enabled == 0) && (stats->n_bytes_enabled == 0))
1591 } __attribute__((__packed__));
1594 stats_apply(struct stats_data *data,
1595 struct rte_table_action_stats_params *p)
1597 data->n_packets = p->n_packets;
1598 data->n_bytes = p->n_bytes;
1603 static __rte_always_inline void
1604 pkt_work_stats(struct stats_data *data,
1605 uint16_t total_length)
1608 data->n_bytes += total_length;
1612 * RTE_TABLE_ACTION_TIME
1616 } __attribute__((__packed__));
1619 time_apply(struct time_data *data,
1620 struct rte_table_action_time_params *p)
1622 data->time = p->time;
1626 static __rte_always_inline void
1627 pkt_work_time(struct time_data *data,
1635 * RTE_TABLE_ACTION_CRYPTO
1638 #define CRYPTO_OP_MASK_CIPHER 0x1
1639 #define CRYPTO_OP_MASK_AUTH 0x2
1640 #define CRYPTO_OP_MASK_AEAD 0x4
1642 struct crypto_op_sym_iv_aad {
1643 struct rte_crypto_op op;
1644 struct rte_crypto_sym_op sym_op;
1648 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1650 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1654 uint8_t iv[RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1655 uint8_t aad[RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX];
1661 struct sym_crypto_data {
1666 /** Length of cipher iv. */
1667 uint16_t cipher_iv_len;
1669 /** Offset from start of IP header to the cipher iv. */
1670 uint16_t cipher_iv_data_offset;
1672 /** Length of cipher iv to be updated in the mbuf. */
1673 uint16_t cipher_iv_update_len;
1675 /** Offset from start of IP header to the auth iv. */
1676 uint16_t auth_iv_data_offset;
1678 /** Length of auth iv in the mbuf. */
1679 uint16_t auth_iv_len;
1681 /** Length of auth iv to be updated in the mbuf. */
1682 uint16_t auth_iv_update_len;
1687 /** Length of iv. */
1690 /** Offset from start of IP header to the aead iv. */
1691 uint16_t iv_data_offset;
1693 /** Length of iv to be updated in the mbuf. */
1694 uint16_t iv_update_len;
1696 /** Length of aad */
1699 /** Offset from start of IP header to the aad. */
1700 uint16_t aad_data_offset;
1702 /** Length of aad to updated in the mbuf. */
1703 uint16_t aad_update_len;
1708 /** Offset from start of IP header to the data. */
1709 uint16_t data_offset;
1711 /** Digest length. */
1712 uint16_t digest_len;
1715 uint16_t block_size;
1717 /** Mask of crypto operation */
1720 /** Session pointer. */
1721 struct rte_cryptodev_sym_session *session;
1723 /** Direction of crypto, encrypt or decrypt */
1726 /** Private data size to store cipher iv / aad. */
1727 uint8_t iv_aad_data[32];
1729 } __attribute__((__packed__));
1732 sym_crypto_cfg_check(struct rte_table_action_sym_crypto_config *cfg)
1734 if (!rte_cryptodev_pmd_is_valid_dev(cfg->cryptodev_id))
1736 if (cfg->mp_create == NULL || cfg->mp_init == NULL)
1743 get_block_size(const struct rte_crypto_sym_xform *xform, uint8_t cdev_id)
1745 struct rte_cryptodev_info dev_info;
1746 const struct rte_cryptodev_capabilities *cap;
1749 rte_cryptodev_info_get(cdev_id, &dev_info);
1751 for (i = 0; dev_info.capabilities[i].op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
1753 cap = &dev_info.capabilities[i];
1755 if (cap->sym.xform_type != xform->type)
1758 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
1759 (cap->sym.cipher.algo == xform->cipher.algo))
1760 return cap->sym.cipher.block_size;
1762 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) &&
1763 (cap->sym.aead.algo == xform->aead.algo))
1764 return cap->sym.aead.block_size;
1766 if (xform->type == RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED)
1774 sym_crypto_apply(struct sym_crypto_data *data,
1775 struct rte_table_action_sym_crypto_config *cfg,
1776 struct rte_table_action_sym_crypto_params *p)
1778 const struct rte_crypto_cipher_xform *cipher_xform = NULL;
1779 const struct rte_crypto_auth_xform *auth_xform = NULL;
1780 const struct rte_crypto_aead_xform *aead_xform = NULL;
1781 struct rte_crypto_sym_xform *xform = p->xform;
1782 struct rte_cryptodev_sym_session *session;
1785 memset(data, 0, sizeof(*data));
1788 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1789 cipher_xform = &xform->cipher;
1791 if (cipher_xform->iv.length >
1792 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
1794 if (cipher_xform->iv.offset !=
1795 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
1798 ret = get_block_size(xform, cfg->cryptodev_id);
1801 data->block_size = (uint16_t)ret;
1802 data->op_mask |= CRYPTO_OP_MASK_CIPHER;
1804 data->cipher_auth.cipher_iv_len =
1805 cipher_xform->iv.length;
1806 data->cipher_auth.cipher_iv_data_offset = (uint16_t)
1807 p->cipher_auth.cipher_iv_update.offset;
1808 data->cipher_auth.cipher_iv_update_len = (uint16_t)
1809 p->cipher_auth.cipher_iv_update.length;
1811 rte_memcpy(data->iv_aad_data,
1812 p->cipher_auth.cipher_iv.val,
1813 p->cipher_auth.cipher_iv.length);
1815 data->direction = cipher_xform->op;
1817 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1818 auth_xform = &xform->auth;
1819 if (auth_xform->iv.length >
1820 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
1822 data->op_mask |= CRYPTO_OP_MASK_AUTH;
1824 data->cipher_auth.auth_iv_len = auth_xform->iv.length;
1825 data->cipher_auth.auth_iv_data_offset = (uint16_t)
1826 p->cipher_auth.auth_iv_update.offset;
1827 data->cipher_auth.auth_iv_update_len = (uint16_t)
1828 p->cipher_auth.auth_iv_update.length;
1829 data->digest_len = auth_xform->digest_length;
1831 data->direction = (auth_xform->op ==
1832 RTE_CRYPTO_AUTH_OP_GENERATE) ?
1833 RTE_CRYPTO_CIPHER_OP_ENCRYPT :
1834 RTE_CRYPTO_CIPHER_OP_DECRYPT;
1836 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1837 aead_xform = &xform->aead;
1839 if ((aead_xform->iv.length >
1840 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX) || (
1841 aead_xform->aad_length >
1842 RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX))
1844 if (aead_xform->iv.offset !=
1845 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
1848 ret = get_block_size(xform, cfg->cryptodev_id);
1851 data->block_size = (uint16_t)ret;
1852 data->op_mask |= CRYPTO_OP_MASK_AEAD;
1854 data->digest_len = aead_xform->digest_length;
1855 data->aead.iv_len = aead_xform->iv.length;
1856 data->aead.aad_len = aead_xform->aad_length;
1858 data->aead.iv_data_offset = (uint16_t)
1859 p->aead.iv_update.offset;
1860 data->aead.iv_update_len = (uint16_t)
1861 p->aead.iv_update.length;
1862 data->aead.aad_data_offset = (uint16_t)
1863 p->aead.aad_update.offset;
1864 data->aead.aad_update_len = (uint16_t)
1865 p->aead.aad_update.length;
1867 rte_memcpy(data->iv_aad_data,
1871 rte_memcpy(data->iv_aad_data + p->aead.iv.length,
1873 p->aead.aad.length);
1875 data->direction = (aead_xform->op ==
1876 RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1877 RTE_CRYPTO_CIPHER_OP_ENCRYPT :
1878 RTE_CRYPTO_CIPHER_OP_DECRYPT;
1882 xform = xform->next;
1885 if (auth_xform && auth_xform->iv.length) {
1887 if (auth_xform->iv.offset !=
1888 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET +
1889 cipher_xform->iv.length)
1892 rte_memcpy(data->iv_aad_data + cipher_xform->iv.length,
1893 p->cipher_auth.auth_iv.val,
1894 p->cipher_auth.auth_iv.length);
1896 rte_memcpy(data->iv_aad_data,
1897 p->cipher_auth.auth_iv.val,
1898 p->cipher_auth.auth_iv.length);
1902 session = rte_cryptodev_sym_session_create(cfg->mp_create);
1906 ret = rte_cryptodev_sym_session_init(cfg->cryptodev_id, session,
1907 p->xform, cfg->mp_init);
1909 rte_cryptodev_sym_session_free(session);
1913 data->data_offset = (uint16_t)p->data_offset;
1914 data->session = session;
1919 static __rte_always_inline uint64_t
1920 pkt_work_sym_crypto(struct rte_mbuf *mbuf, struct sym_crypto_data *data,
1921 struct rte_table_action_sym_crypto_config *cfg,
1924 struct crypto_op_sym_iv_aad *crypto_op = (struct crypto_op_sym_iv_aad *)
1925 RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->op_offset);
1926 struct rte_crypto_op *op = &crypto_op->op;
1927 struct rte_crypto_sym_op *sym = op->sym;
1928 uint32_t pkt_offset = sizeof(*mbuf) + mbuf->data_off;
1929 uint32_t payload_len = pkt_offset + mbuf->data_len - data->data_offset;
1931 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1932 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1933 op->phys_addr = mbuf->buf_iova + cfg->op_offset - sizeof(*mbuf);
1934 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1937 sym->session = data->session;
1939 /** pad the packet */
1940 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1941 uint32_t append_len = RTE_ALIGN_CEIL(payload_len,
1942 data->block_size) - payload_len;
1944 if (unlikely(rte_pktmbuf_append(mbuf, append_len +
1945 data->digest_len) == NULL))
1948 payload_len += append_len;
1950 payload_len -= data->digest_len;
1952 if (data->op_mask & CRYPTO_OP_MASK_CIPHER) {
1953 /** prepare cipher op */
1954 uint8_t *iv = crypto_op->iv_aad.cipher_auth.cipher_iv;
1956 sym->cipher.data.length = payload_len;
1957 sym->cipher.data.offset = data->data_offset - pkt_offset;
1959 if (data->cipher_auth.cipher_iv_update_len) {
1960 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1961 data->cipher_auth.cipher_iv_data_offset
1964 /** For encryption, update the pkt iv field, otherwise
1965 * update the iv_aad_field
1967 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1968 rte_memcpy(pkt_iv, data->iv_aad_data,
1969 data->cipher_auth.cipher_iv_update_len);
1971 rte_memcpy(data->iv_aad_data, pkt_iv,
1972 data->cipher_auth.cipher_iv_update_len);
1976 rte_memcpy(iv, data->iv_aad_data,
1977 data->cipher_auth.cipher_iv_len);
1980 if (data->op_mask & CRYPTO_OP_MASK_AUTH) {
1981 /** authentication always start from IP header. */
1982 sym->auth.data.offset = ip_offset - pkt_offset;
1983 sym->auth.data.length = mbuf->data_len - sym->auth.data.offset -
1985 sym->auth.digest.data = rte_pktmbuf_mtod_offset(mbuf,
1986 uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
1988 sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
1989 rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
1991 if (data->cipher_auth.auth_iv_update_len) {
1992 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1993 data->cipher_auth.auth_iv_data_offset
1995 uint8_t *data_iv = data->iv_aad_data +
1996 data->cipher_auth.cipher_iv_len;
1998 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1999 rte_memcpy(pkt_iv, data_iv,
2000 data->cipher_auth.auth_iv_update_len);
2002 rte_memcpy(data_iv, pkt_iv,
2003 data->cipher_auth.auth_iv_update_len);
2006 if (data->cipher_auth.auth_iv_len) {
2007 /** prepare cipher op */
2008 uint8_t *iv = crypto_op->iv_aad.cipher_auth.auth_iv;
2010 rte_memcpy(iv, data->iv_aad_data +
2011 data->cipher_auth.cipher_iv_len,
2012 data->cipher_auth.auth_iv_len);
2016 if (data->op_mask & CRYPTO_OP_MASK_AEAD) {
2017 uint8_t *iv = crypto_op->iv_aad.aead_iv_aad.iv;
2018 uint8_t *aad = crypto_op->iv_aad.aead_iv_aad.aad;
2020 sym->aead.aad.data = aad;
2021 sym->aead.aad.phys_addr = rte_pktmbuf_iova_offset(mbuf,
2022 aad - rte_pktmbuf_mtod(mbuf, uint8_t *));
2023 sym->aead.digest.data = rte_pktmbuf_mtod_offset(mbuf,
2024 uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
2026 sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
2027 rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
2028 sym->aead.data.offset = data->data_offset - pkt_offset;
2029 sym->aead.data.length = payload_len;
2031 if (data->aead.iv_update_len) {
2032 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
2033 data->aead.iv_data_offset + ip_offset);
2034 uint8_t *data_iv = data->iv_aad_data;
2036 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2037 rte_memcpy(pkt_iv, data_iv,
2038 data->aead.iv_update_len);
2040 rte_memcpy(data_iv, pkt_iv,
2041 data->aead.iv_update_len);
2044 rte_memcpy(iv, data->iv_aad_data, data->aead.iv_len);
2046 if (data->aead.aad_update_len) {
2047 uint8_t *pkt_aad = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
2048 data->aead.aad_data_offset + ip_offset);
2049 uint8_t *data_aad = data->iv_aad_data +
2052 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2053 rte_memcpy(pkt_aad, data_aad,
2054 data->aead.iv_update_len);
2056 rte_memcpy(data_aad, pkt_aad,
2057 data->aead.iv_update_len);
2060 rte_memcpy(aad, data->iv_aad_data + data->aead.iv_len,
2061 data->aead.aad_len);
2068 * RTE_TABLE_ACTION_TAG
2072 } __attribute__((__packed__));
2075 tag_apply(struct tag_data *data,
2076 struct rte_table_action_tag_params *p)
2082 static __rte_always_inline void
2083 pkt_work_tag(struct rte_mbuf *mbuf,
2084 struct tag_data *data)
2086 mbuf->hash.fdir.hi = data->tag;
2087 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2090 static __rte_always_inline void
2091 pkt4_work_tag(struct rte_mbuf *mbuf0,
2092 struct rte_mbuf *mbuf1,
2093 struct rte_mbuf *mbuf2,
2094 struct rte_mbuf *mbuf3,
2095 struct tag_data *data0,
2096 struct tag_data *data1,
2097 struct tag_data *data2,
2098 struct tag_data *data3)
2100 mbuf0->hash.fdir.hi = data0->tag;
2101 mbuf1->hash.fdir.hi = data1->tag;
2102 mbuf2->hash.fdir.hi = data2->tag;
2103 mbuf3->hash.fdir.hi = data3->tag;
2105 mbuf0->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2106 mbuf1->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2107 mbuf2->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2108 mbuf3->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2112 * RTE_TABLE_ACTION_DECAP
2116 } __attribute__((__packed__));
2119 decap_apply(struct decap_data *data,
2120 struct rte_table_action_decap_params *p)
2126 static __rte_always_inline void
2127 pkt_work_decap(struct rte_mbuf *mbuf,
2128 struct decap_data *data)
2130 uint16_t data_off = mbuf->data_off;
2131 uint16_t data_len = mbuf->data_len;
2132 uint32_t pkt_len = mbuf->pkt_len;
2133 uint16_t n = data->n;
2135 mbuf->data_off = data_off + n;
2136 mbuf->data_len = data_len - n;
2137 mbuf->pkt_len = pkt_len - n;
2140 static __rte_always_inline void
2141 pkt4_work_decap(struct rte_mbuf *mbuf0,
2142 struct rte_mbuf *mbuf1,
2143 struct rte_mbuf *mbuf2,
2144 struct rte_mbuf *mbuf3,
2145 struct decap_data *data0,
2146 struct decap_data *data1,
2147 struct decap_data *data2,
2148 struct decap_data *data3)
2150 uint16_t data_off0 = mbuf0->data_off;
2151 uint16_t data_len0 = mbuf0->data_len;
2152 uint32_t pkt_len0 = mbuf0->pkt_len;
2154 uint16_t data_off1 = mbuf1->data_off;
2155 uint16_t data_len1 = mbuf1->data_len;
2156 uint32_t pkt_len1 = mbuf1->pkt_len;
2158 uint16_t data_off2 = mbuf2->data_off;
2159 uint16_t data_len2 = mbuf2->data_len;
2160 uint32_t pkt_len2 = mbuf2->pkt_len;
2162 uint16_t data_off3 = mbuf3->data_off;
2163 uint16_t data_len3 = mbuf3->data_len;
2164 uint32_t pkt_len3 = mbuf3->pkt_len;
2166 uint16_t n0 = data0->n;
2167 uint16_t n1 = data1->n;
2168 uint16_t n2 = data2->n;
2169 uint16_t n3 = data3->n;
2171 mbuf0->data_off = data_off0 + n0;
2172 mbuf0->data_len = data_len0 - n0;
2173 mbuf0->pkt_len = pkt_len0 - n0;
2175 mbuf1->data_off = data_off1 + n1;
2176 mbuf1->data_len = data_len1 - n1;
2177 mbuf1->pkt_len = pkt_len1 - n1;
2179 mbuf2->data_off = data_off2 + n2;
2180 mbuf2->data_len = data_len2 - n2;
2181 mbuf2->pkt_len = pkt_len2 - n2;
2183 mbuf3->data_off = data_off3 + n3;
2184 mbuf3->data_len = data_len3 - n3;
2185 mbuf3->pkt_len = pkt_len3 - n3;
2192 action_valid(enum rte_table_action_type action)
2195 case RTE_TABLE_ACTION_FWD:
2196 case RTE_TABLE_ACTION_LB:
2197 case RTE_TABLE_ACTION_MTR:
2198 case RTE_TABLE_ACTION_TM:
2199 case RTE_TABLE_ACTION_ENCAP:
2200 case RTE_TABLE_ACTION_NAT:
2201 case RTE_TABLE_ACTION_TTL:
2202 case RTE_TABLE_ACTION_STATS:
2203 case RTE_TABLE_ACTION_TIME:
2204 case RTE_TABLE_ACTION_SYM_CRYPTO:
2205 case RTE_TABLE_ACTION_TAG:
2206 case RTE_TABLE_ACTION_DECAP:
2214 #define RTE_TABLE_ACTION_MAX 64
2217 uint64_t action_mask;
2218 struct rte_table_action_common_config common;
2219 struct rte_table_action_lb_config lb;
2220 struct rte_table_action_mtr_config mtr;
2221 struct rte_table_action_tm_config tm;
2222 struct rte_table_action_encap_config encap;
2223 struct rte_table_action_nat_config nat;
2224 struct rte_table_action_ttl_config ttl;
2225 struct rte_table_action_stats_config stats;
2226 struct rte_table_action_sym_crypto_config sym_crypto;
2230 action_cfg_size(enum rte_table_action_type action)
2233 case RTE_TABLE_ACTION_LB:
2234 return sizeof(struct rte_table_action_lb_config);
2235 case RTE_TABLE_ACTION_MTR:
2236 return sizeof(struct rte_table_action_mtr_config);
2237 case RTE_TABLE_ACTION_TM:
2238 return sizeof(struct rte_table_action_tm_config);
2239 case RTE_TABLE_ACTION_ENCAP:
2240 return sizeof(struct rte_table_action_encap_config);
2241 case RTE_TABLE_ACTION_NAT:
2242 return sizeof(struct rte_table_action_nat_config);
2243 case RTE_TABLE_ACTION_TTL:
2244 return sizeof(struct rte_table_action_ttl_config);
2245 case RTE_TABLE_ACTION_STATS:
2246 return sizeof(struct rte_table_action_stats_config);
2247 case RTE_TABLE_ACTION_SYM_CRYPTO:
2248 return sizeof(struct rte_table_action_sym_crypto_config);
2255 action_cfg_get(struct ap_config *ap_config,
2256 enum rte_table_action_type type)
2259 case RTE_TABLE_ACTION_LB:
2260 return &ap_config->lb;
2262 case RTE_TABLE_ACTION_MTR:
2263 return &ap_config->mtr;
2265 case RTE_TABLE_ACTION_TM:
2266 return &ap_config->tm;
2268 case RTE_TABLE_ACTION_ENCAP:
2269 return &ap_config->encap;
2271 case RTE_TABLE_ACTION_NAT:
2272 return &ap_config->nat;
2274 case RTE_TABLE_ACTION_TTL:
2275 return &ap_config->ttl;
2277 case RTE_TABLE_ACTION_STATS:
2278 return &ap_config->stats;
2280 case RTE_TABLE_ACTION_SYM_CRYPTO:
2281 return &ap_config->sym_crypto;
2288 action_cfg_set(struct ap_config *ap_config,
2289 enum rte_table_action_type type,
2292 void *dst = action_cfg_get(ap_config, type);
2295 memcpy(dst, action_cfg, action_cfg_size(type));
2297 ap_config->action_mask |= 1LLU << type;
2301 size_t offset[RTE_TABLE_ACTION_MAX];
2306 action_data_size(enum rte_table_action_type action,
2307 struct ap_config *ap_config)
2310 case RTE_TABLE_ACTION_FWD:
2311 return sizeof(struct fwd_data);
2313 case RTE_TABLE_ACTION_LB:
2314 return sizeof(struct lb_data);
2316 case RTE_TABLE_ACTION_MTR:
2317 return mtr_data_size(&ap_config->mtr);
2319 case RTE_TABLE_ACTION_TM:
2320 return sizeof(struct tm_data);
2322 case RTE_TABLE_ACTION_ENCAP:
2323 return encap_data_size(&ap_config->encap);
2325 case RTE_TABLE_ACTION_NAT:
2326 return nat_data_size(&ap_config->nat,
2327 &ap_config->common);
2329 case RTE_TABLE_ACTION_TTL:
2330 return sizeof(struct ttl_data);
2332 case RTE_TABLE_ACTION_STATS:
2333 return sizeof(struct stats_data);
2335 case RTE_TABLE_ACTION_TIME:
2336 return sizeof(struct time_data);
2338 case RTE_TABLE_ACTION_SYM_CRYPTO:
2339 return (sizeof(struct sym_crypto_data));
2341 case RTE_TABLE_ACTION_TAG:
2342 return sizeof(struct tag_data);
2344 case RTE_TABLE_ACTION_DECAP:
2345 return sizeof(struct decap_data);
2354 action_data_offset_set(struct ap_data *ap_data,
2355 struct ap_config *ap_config)
2357 uint64_t action_mask = ap_config->action_mask;
2361 memset(ap_data->offset, 0, sizeof(ap_data->offset));
2364 for (action = 0; action < RTE_TABLE_ACTION_MAX; action++)
2365 if (action_mask & (1LLU << action)) {
2366 ap_data->offset[action] = offset;
2367 offset += action_data_size((enum rte_table_action_type)action,
2371 ap_data->total_size = offset;
2374 struct rte_table_action_profile {
2375 struct ap_config cfg;
2376 struct ap_data data;
2380 struct rte_table_action_profile *
2381 rte_table_action_profile_create(struct rte_table_action_common_config *common)
2383 struct rte_table_action_profile *ap;
2385 /* Check input arguments */
2389 /* Memory allocation */
2390 ap = calloc(1, sizeof(struct rte_table_action_profile));
2394 /* Initialization */
2395 memcpy(&ap->cfg.common, common, sizeof(*common));
2402 rte_table_action_profile_action_register(struct rte_table_action_profile *profile,
2403 enum rte_table_action_type type,
2404 void *action_config)
2408 /* Check input arguments */
2409 if ((profile == NULL) ||
2411 (action_valid(type) == 0) ||
2412 (profile->cfg.action_mask & (1LLU << type)) ||
2413 ((action_cfg_size(type) == 0) && action_config) ||
2414 (action_cfg_size(type) && (action_config == NULL)))
2418 case RTE_TABLE_ACTION_LB:
2419 status = lb_cfg_check(action_config);
2422 case RTE_TABLE_ACTION_MTR:
2423 status = mtr_cfg_check(action_config);
2426 case RTE_TABLE_ACTION_TM:
2427 status = tm_cfg_check(action_config);
2430 case RTE_TABLE_ACTION_ENCAP:
2431 status = encap_cfg_check(action_config);
2434 case RTE_TABLE_ACTION_NAT:
2435 status = nat_cfg_check(action_config);
2438 case RTE_TABLE_ACTION_TTL:
2439 status = ttl_cfg_check(action_config);
2442 case RTE_TABLE_ACTION_STATS:
2443 status = stats_cfg_check(action_config);
2446 case RTE_TABLE_ACTION_SYM_CRYPTO:
2447 status = sym_crypto_cfg_check(action_config);
2459 action_cfg_set(&profile->cfg, type, action_config);
2465 rte_table_action_profile_freeze(struct rte_table_action_profile *profile)
2467 if (profile->frozen)
2470 profile->cfg.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD;
2471 action_data_offset_set(&profile->data, &profile->cfg);
2472 profile->frozen = 1;
2478 rte_table_action_profile_free(struct rte_table_action_profile *profile)
2480 if (profile == NULL)
2490 #define METER_PROFILES_MAX 32
2492 struct rte_table_action {
2493 struct ap_config cfg;
2494 struct ap_data data;
2495 struct dscp_table_data dscp_table;
2496 struct meter_profile_data mp[METER_PROFILES_MAX];
2499 struct rte_table_action *
2500 rte_table_action_create(struct rte_table_action_profile *profile,
2503 struct rte_table_action *action;
2505 /* Check input arguments */
2506 if ((profile == NULL) ||
2507 (profile->frozen == 0))
2510 /* Memory allocation */
2511 action = rte_zmalloc_socket(NULL,
2512 sizeof(struct rte_table_action),
2513 RTE_CACHE_LINE_SIZE,
2518 /* Initialization */
2519 memcpy(&action->cfg, &profile->cfg, sizeof(profile->cfg));
2520 memcpy(&action->data, &profile->data, sizeof(profile->data));
2525 static __rte_always_inline void *
2526 action_data_get(void *data,
2527 struct rte_table_action *action,
2528 enum rte_table_action_type type)
2530 size_t offset = action->data.offset[type];
2531 uint8_t *data_bytes = data;
2533 return &data_bytes[offset];
2537 rte_table_action_apply(struct rte_table_action *action,
2539 enum rte_table_action_type type,
2540 void *action_params)
2544 /* Check input arguments */
2545 if ((action == NULL) ||
2547 (action_valid(type) == 0) ||
2548 ((action->cfg.action_mask & (1LLU << type)) == 0) ||
2549 (action_params == NULL))
2553 action_data = action_data_get(data, action, type);
2556 case RTE_TABLE_ACTION_FWD:
2557 return fwd_apply(action_data,
2560 case RTE_TABLE_ACTION_LB:
2561 return lb_apply(action_data,
2564 case RTE_TABLE_ACTION_MTR:
2565 return mtr_apply(action_data,
2569 RTE_DIM(action->mp));
2571 case RTE_TABLE_ACTION_TM:
2572 return tm_apply(action_data,
2576 case RTE_TABLE_ACTION_ENCAP:
2577 return encap_apply(action_data,
2580 &action->cfg.common);
2582 case RTE_TABLE_ACTION_NAT:
2583 return nat_apply(action_data,
2585 &action->cfg.common);
2587 case RTE_TABLE_ACTION_TTL:
2588 return ttl_apply(action_data,
2591 case RTE_TABLE_ACTION_STATS:
2592 return stats_apply(action_data,
2595 case RTE_TABLE_ACTION_TIME:
2596 return time_apply(action_data,
2599 case RTE_TABLE_ACTION_SYM_CRYPTO:
2600 return sym_crypto_apply(action_data,
2601 &action->cfg.sym_crypto,
2604 case RTE_TABLE_ACTION_TAG:
2605 return tag_apply(action_data,
2608 case RTE_TABLE_ACTION_DECAP:
2609 return decap_apply(action_data,
2618 rte_table_action_dscp_table_update(struct rte_table_action *action,
2620 struct rte_table_action_dscp_table *table)
2624 /* Check input arguments */
2625 if ((action == NULL) ||
2626 ((action->cfg.action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
2627 (1LLU << RTE_TABLE_ACTION_TM))) == 0) ||
2632 for (i = 0; i < RTE_DIM(table->entry); i++) {
2633 struct dscp_table_entry_data *data =
2634 &action->dscp_table.entry[i];
2635 struct rte_table_action_dscp_table_entry *entry =
2638 if ((dscp_mask & (1LLU << i)) == 0)
2641 data->color = entry->color;
2642 data->tc = entry->tc_id;
2643 data->tc_queue = entry->tc_queue_id;
2650 rte_table_action_meter_profile_add(struct rte_table_action *action,
2651 uint32_t meter_profile_id,
2652 struct rte_table_action_meter_profile *profile)
2654 struct meter_profile_data *mp_data;
2657 /* Check input arguments */
2658 if ((action == NULL) ||
2659 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
2663 if (profile->alg != RTE_TABLE_ACTION_METER_TRTCM)
2666 mp_data = meter_profile_data_find(action->mp,
2667 RTE_DIM(action->mp),
2672 mp_data = meter_profile_data_find_unused(action->mp,
2673 RTE_DIM(action->mp));
2677 /* Install new profile */
2678 status = rte_meter_trtcm_profile_config(&mp_data->profile,
2683 mp_data->profile_id = meter_profile_id;
2690 rte_table_action_meter_profile_delete(struct rte_table_action *action,
2691 uint32_t meter_profile_id)
2693 struct meter_profile_data *mp_data;
2695 /* Check input arguments */
2696 if ((action == NULL) ||
2697 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0))
2700 mp_data = meter_profile_data_find(action->mp,
2701 RTE_DIM(action->mp),
2706 /* Uninstall profile */
2713 rte_table_action_meter_read(struct rte_table_action *action,
2716 struct rte_table_action_mtr_counters *stats,
2719 struct mtr_trtcm_data *mtr_data;
2722 /* Check input arguments */
2723 if ((action == NULL) ||
2724 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
2726 (tc_mask > RTE_LEN2MASK(action->cfg.mtr.n_tc, uint32_t)))
2729 mtr_data = action_data_get(data, action, RTE_TABLE_ACTION_MTR);
2733 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
2734 struct rte_table_action_mtr_counters_tc *dst =
2736 struct mtr_trtcm_data *src = &mtr_data[i];
2738 if ((tc_mask & (1 << i)) == 0)
2741 dst->n_packets[RTE_COLOR_GREEN] =
2742 mtr_trtcm_data_stats_get(src, RTE_COLOR_GREEN);
2744 dst->n_packets[RTE_COLOR_YELLOW] =
2745 mtr_trtcm_data_stats_get(src, RTE_COLOR_YELLOW);
2747 dst->n_packets[RTE_COLOR_RED] =
2748 mtr_trtcm_data_stats_get(src, RTE_COLOR_RED);
2750 dst->n_packets_valid = 1;
2751 dst->n_bytes_valid = 0;
2754 stats->tc_mask = tc_mask;
2759 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
2760 struct mtr_trtcm_data *src = &mtr_data[i];
2762 if ((tc_mask & (1 << i)) == 0)
2765 mtr_trtcm_data_stats_reset(src, RTE_COLOR_GREEN);
2766 mtr_trtcm_data_stats_reset(src, RTE_COLOR_YELLOW);
2767 mtr_trtcm_data_stats_reset(src, RTE_COLOR_RED);
2775 rte_table_action_ttl_read(struct rte_table_action *action,
2777 struct rte_table_action_ttl_counters *stats,
2780 struct ttl_data *ttl_data;
2782 /* Check input arguments */
2783 if ((action == NULL) ||
2784 ((action->cfg.action_mask &
2785 (1LLU << RTE_TABLE_ACTION_TTL)) == 0) ||
2789 ttl_data = action_data_get(data, action, RTE_TABLE_ACTION_TTL);
2793 stats->n_packets = TTL_STATS_READ(ttl_data);
2797 TTL_STATS_RESET(ttl_data);
2803 rte_table_action_stats_read(struct rte_table_action *action,
2805 struct rte_table_action_stats_counters *stats,
2808 struct stats_data *stats_data;
2810 /* Check input arguments */
2811 if ((action == NULL) ||
2812 ((action->cfg.action_mask &
2813 (1LLU << RTE_TABLE_ACTION_STATS)) == 0) ||
2817 stats_data = action_data_get(data, action,
2818 RTE_TABLE_ACTION_STATS);
2822 stats->n_packets = stats_data->n_packets;
2823 stats->n_bytes = stats_data->n_bytes;
2824 stats->n_packets_valid = 1;
2825 stats->n_bytes_valid = 1;
2830 stats_data->n_packets = 0;
2831 stats_data->n_bytes = 0;
2838 rte_table_action_time_read(struct rte_table_action *action,
2840 uint64_t *timestamp)
2842 struct time_data *time_data;
2844 /* Check input arguments */
2845 if ((action == NULL) ||
2846 ((action->cfg.action_mask &
2847 (1LLU << RTE_TABLE_ACTION_TIME)) == 0) ||
2849 (timestamp == NULL))
2852 time_data = action_data_get(data, action, RTE_TABLE_ACTION_TIME);
2855 *timestamp = time_data->time;
2860 struct rte_cryptodev_sym_session *
2861 rte_table_action_crypto_sym_session_get(struct rte_table_action *action,
2864 struct sym_crypto_data *sym_crypto_data;
2866 /* Check input arguments */
2867 if ((action == NULL) ||
2868 ((action->cfg.action_mask &
2869 (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) == 0) ||
2873 sym_crypto_data = action_data_get(data, action,
2874 RTE_TABLE_ACTION_SYM_CRYPTO);
2876 return sym_crypto_data->session;
2879 static __rte_always_inline uint64_t
2880 pkt_work(struct rte_mbuf *mbuf,
2881 struct rte_pipeline_table_entry *table_entry,
2883 struct rte_table_action *action,
2884 struct ap_config *cfg)
2886 uint64_t drop_mask = 0;
2888 uint32_t ip_offset = action->cfg.common.ip_offset;
2889 void *ip = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ip_offset);
2892 uint16_t total_length;
2894 if (cfg->common.ip_version) {
2895 struct rte_ipv4_hdr *hdr = ip;
2897 dscp = hdr->type_of_service >> 2;
2898 total_length = rte_ntohs(hdr->total_length);
2900 struct rte_ipv6_hdr *hdr = ip;
2902 dscp = (rte_ntohl(hdr->vtc_flow) & 0x0F600000) >> 18;
2903 total_length = rte_ntohs(hdr->payload_len) +
2904 sizeof(struct rte_ipv6_hdr);
2907 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2909 action_data_get(table_entry, action, RTE_TABLE_ACTION_LB);
2915 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2917 action_data_get(table_entry, action, RTE_TABLE_ACTION_MTR);
2919 drop_mask |= pkt_work_mtr(mbuf,
2921 &action->dscp_table,
2928 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2930 action_data_get(table_entry, action, RTE_TABLE_ACTION_TM);
2934 &action->dscp_table,
2938 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
2939 void *data = action_data_get(table_entry,
2941 RTE_TABLE_ACTION_DECAP);
2943 pkt_work_decap(mbuf, data);
2946 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2948 action_data_get(table_entry, action, RTE_TABLE_ACTION_ENCAP);
2950 pkt_work_encap(mbuf,
2958 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2960 action_data_get(table_entry, action, RTE_TABLE_ACTION_NAT);
2962 if (cfg->common.ip_version)
2963 pkt_ipv4_work_nat(ip, data, &cfg->nat);
2965 pkt_ipv6_work_nat(ip, data, &cfg->nat);
2968 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2970 action_data_get(table_entry, action, RTE_TABLE_ACTION_TTL);
2972 if (cfg->common.ip_version)
2973 drop_mask |= pkt_ipv4_work_ttl(ip, data);
2975 drop_mask |= pkt_ipv6_work_ttl(ip, data);
2978 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2980 action_data_get(table_entry, action, RTE_TABLE_ACTION_STATS);
2982 pkt_work_stats(data, total_length);
2985 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2987 action_data_get(table_entry, action, RTE_TABLE_ACTION_TIME);
2989 pkt_work_time(data, time);
2992 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
2993 void *data = action_data_get(table_entry, action,
2994 RTE_TABLE_ACTION_SYM_CRYPTO);
2996 drop_mask |= pkt_work_sym_crypto(mbuf, data, &cfg->sym_crypto,
3000 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
3001 void *data = action_data_get(table_entry,
3003 RTE_TABLE_ACTION_TAG);
3005 pkt_work_tag(mbuf, data);
3011 static __rte_always_inline uint64_t
3012 pkt4_work(struct rte_mbuf **mbufs,
3013 struct rte_pipeline_table_entry **table_entries,
3015 struct rte_table_action *action,
3016 struct ap_config *cfg)
3018 uint64_t drop_mask0 = 0;
3019 uint64_t drop_mask1 = 0;
3020 uint64_t drop_mask2 = 0;
3021 uint64_t drop_mask3 = 0;
3023 struct rte_mbuf *mbuf0 = mbufs[0];
3024 struct rte_mbuf *mbuf1 = mbufs[1];
3025 struct rte_mbuf *mbuf2 = mbufs[2];
3026 struct rte_mbuf *mbuf3 = mbufs[3];
3028 struct rte_pipeline_table_entry *table_entry0 = table_entries[0];
3029 struct rte_pipeline_table_entry *table_entry1 = table_entries[1];
3030 struct rte_pipeline_table_entry *table_entry2 = table_entries[2];
3031 struct rte_pipeline_table_entry *table_entry3 = table_entries[3];
3033 uint32_t ip_offset = action->cfg.common.ip_offset;
3034 void *ip0 = RTE_MBUF_METADATA_UINT32_PTR(mbuf0, ip_offset);
3035 void *ip1 = RTE_MBUF_METADATA_UINT32_PTR(mbuf1, ip_offset);
3036 void *ip2 = RTE_MBUF_METADATA_UINT32_PTR(mbuf2, ip_offset);
3037 void *ip3 = RTE_MBUF_METADATA_UINT32_PTR(mbuf3, ip_offset);
3039 uint32_t dscp0, dscp1, dscp2, dscp3;
3040 uint16_t total_length0, total_length1, total_length2, total_length3;
3042 if (cfg->common.ip_version) {
3043 struct rte_ipv4_hdr *hdr0 = ip0;
3044 struct rte_ipv4_hdr *hdr1 = ip1;
3045 struct rte_ipv4_hdr *hdr2 = ip2;
3046 struct rte_ipv4_hdr *hdr3 = ip3;
3048 dscp0 = hdr0->type_of_service >> 2;
3049 dscp1 = hdr1->type_of_service >> 2;
3050 dscp2 = hdr2->type_of_service >> 2;
3051 dscp3 = hdr3->type_of_service >> 2;
3053 total_length0 = rte_ntohs(hdr0->total_length);
3054 total_length1 = rte_ntohs(hdr1->total_length);
3055 total_length2 = rte_ntohs(hdr2->total_length);
3056 total_length3 = rte_ntohs(hdr3->total_length);
3058 struct rte_ipv6_hdr *hdr0 = ip0;
3059 struct rte_ipv6_hdr *hdr1 = ip1;
3060 struct rte_ipv6_hdr *hdr2 = ip2;
3061 struct rte_ipv6_hdr *hdr3 = ip3;
3063 dscp0 = (rte_ntohl(hdr0->vtc_flow) & 0x0F600000) >> 18;
3064 dscp1 = (rte_ntohl(hdr1->vtc_flow) & 0x0F600000) >> 18;
3065 dscp2 = (rte_ntohl(hdr2->vtc_flow) & 0x0F600000) >> 18;
3066 dscp3 = (rte_ntohl(hdr3->vtc_flow) & 0x0F600000) >> 18;
3068 total_length0 = rte_ntohs(hdr0->payload_len) +
3069 sizeof(struct rte_ipv6_hdr);
3070 total_length1 = rte_ntohs(hdr1->payload_len) +
3071 sizeof(struct rte_ipv6_hdr);
3072 total_length2 = rte_ntohs(hdr2->payload_len) +
3073 sizeof(struct rte_ipv6_hdr);
3074 total_length3 = rte_ntohs(hdr3->payload_len) +
3075 sizeof(struct rte_ipv6_hdr);
3078 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
3080 action_data_get(table_entry0, action, RTE_TABLE_ACTION_LB);
3082 action_data_get(table_entry1, action, RTE_TABLE_ACTION_LB);
3084 action_data_get(table_entry2, action, RTE_TABLE_ACTION_LB);
3086 action_data_get(table_entry3, action, RTE_TABLE_ACTION_LB);
3105 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
3107 action_data_get(table_entry0, action, RTE_TABLE_ACTION_MTR);
3109 action_data_get(table_entry1, action, RTE_TABLE_ACTION_MTR);
3111 action_data_get(table_entry2, action, RTE_TABLE_ACTION_MTR);
3113 action_data_get(table_entry3, action, RTE_TABLE_ACTION_MTR);
3115 drop_mask0 |= pkt_work_mtr(mbuf0,
3117 &action->dscp_table,
3123 drop_mask1 |= pkt_work_mtr(mbuf1,
3125 &action->dscp_table,
3131 drop_mask2 |= pkt_work_mtr(mbuf2,
3133 &action->dscp_table,
3139 drop_mask3 |= pkt_work_mtr(mbuf3,
3141 &action->dscp_table,
3148 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
3150 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TM);
3152 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TM);
3154 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TM);
3156 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TM);
3160 &action->dscp_table,
3165 &action->dscp_table,
3170 &action->dscp_table,
3175 &action->dscp_table,
3179 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
3180 void *data0 = action_data_get(table_entry0,
3182 RTE_TABLE_ACTION_DECAP);
3183 void *data1 = action_data_get(table_entry1,
3185 RTE_TABLE_ACTION_DECAP);
3186 void *data2 = action_data_get(table_entry2,
3188 RTE_TABLE_ACTION_DECAP);
3189 void *data3 = action_data_get(table_entry3,
3191 RTE_TABLE_ACTION_DECAP);
3193 pkt4_work_decap(mbuf0, mbuf1, mbuf2, mbuf3,
3194 data0, data1, data2, data3);
3197 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
3199 action_data_get(table_entry0, action, RTE_TABLE_ACTION_ENCAP);
3201 action_data_get(table_entry1, action, RTE_TABLE_ACTION_ENCAP);
3203 action_data_get(table_entry2, action, RTE_TABLE_ACTION_ENCAP);
3205 action_data_get(table_entry3, action, RTE_TABLE_ACTION_ENCAP);
3207 pkt_work_encap(mbuf0,
3214 pkt_work_encap(mbuf1,
3221 pkt_work_encap(mbuf2,
3228 pkt_work_encap(mbuf3,
3236 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
3238 action_data_get(table_entry0, action, RTE_TABLE_ACTION_NAT);
3240 action_data_get(table_entry1, action, RTE_TABLE_ACTION_NAT);
3242 action_data_get(table_entry2, action, RTE_TABLE_ACTION_NAT);
3244 action_data_get(table_entry3, action, RTE_TABLE_ACTION_NAT);
3246 if (cfg->common.ip_version) {
3247 pkt_ipv4_work_nat(ip0, data0, &cfg->nat);
3248 pkt_ipv4_work_nat(ip1, data1, &cfg->nat);
3249 pkt_ipv4_work_nat(ip2, data2, &cfg->nat);
3250 pkt_ipv4_work_nat(ip3, data3, &cfg->nat);
3252 pkt_ipv6_work_nat(ip0, data0, &cfg->nat);
3253 pkt_ipv6_work_nat(ip1, data1, &cfg->nat);
3254 pkt_ipv6_work_nat(ip2, data2, &cfg->nat);
3255 pkt_ipv6_work_nat(ip3, data3, &cfg->nat);
3259 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
3261 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TTL);
3263 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TTL);
3265 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TTL);
3267 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TTL);
3269 if (cfg->common.ip_version) {
3270 drop_mask0 |= pkt_ipv4_work_ttl(ip0, data0);
3271 drop_mask1 |= pkt_ipv4_work_ttl(ip1, data1);
3272 drop_mask2 |= pkt_ipv4_work_ttl(ip2, data2);
3273 drop_mask3 |= pkt_ipv4_work_ttl(ip3, data3);
3275 drop_mask0 |= pkt_ipv6_work_ttl(ip0, data0);
3276 drop_mask1 |= pkt_ipv6_work_ttl(ip1, data1);
3277 drop_mask2 |= pkt_ipv6_work_ttl(ip2, data2);
3278 drop_mask3 |= pkt_ipv6_work_ttl(ip3, data3);
3282 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
3284 action_data_get(table_entry0, action, RTE_TABLE_ACTION_STATS);
3286 action_data_get(table_entry1, action, RTE_TABLE_ACTION_STATS);
3288 action_data_get(table_entry2, action, RTE_TABLE_ACTION_STATS);
3290 action_data_get(table_entry3, action, RTE_TABLE_ACTION_STATS);
3292 pkt_work_stats(data0, total_length0);
3293 pkt_work_stats(data1, total_length1);
3294 pkt_work_stats(data2, total_length2);
3295 pkt_work_stats(data3, total_length3);
3298 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
3300 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TIME);
3302 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TIME);
3304 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TIME);
3306 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TIME);
3308 pkt_work_time(data0, time);
3309 pkt_work_time(data1, time);
3310 pkt_work_time(data2, time);
3311 pkt_work_time(data3, time);
3314 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
3315 void *data0 = action_data_get(table_entry0, action,
3316 RTE_TABLE_ACTION_SYM_CRYPTO);
3317 void *data1 = action_data_get(table_entry1, action,
3318 RTE_TABLE_ACTION_SYM_CRYPTO);
3319 void *data2 = action_data_get(table_entry2, action,
3320 RTE_TABLE_ACTION_SYM_CRYPTO);
3321 void *data3 = action_data_get(table_entry3, action,
3322 RTE_TABLE_ACTION_SYM_CRYPTO);
3324 drop_mask0 |= pkt_work_sym_crypto(mbuf0, data0, &cfg->sym_crypto,
3326 drop_mask1 |= pkt_work_sym_crypto(mbuf1, data1, &cfg->sym_crypto,
3328 drop_mask2 |= pkt_work_sym_crypto(mbuf2, data2, &cfg->sym_crypto,
3330 drop_mask3 |= pkt_work_sym_crypto(mbuf3, data3, &cfg->sym_crypto,
3334 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
3335 void *data0 = action_data_get(table_entry0,
3337 RTE_TABLE_ACTION_TAG);
3338 void *data1 = action_data_get(table_entry1,
3340 RTE_TABLE_ACTION_TAG);
3341 void *data2 = action_data_get(table_entry2,
3343 RTE_TABLE_ACTION_TAG);
3344 void *data3 = action_data_get(table_entry3,
3346 RTE_TABLE_ACTION_TAG);
3348 pkt4_work_tag(mbuf0, mbuf1, mbuf2, mbuf3,
3349 data0, data1, data2, data3);
3358 static __rte_always_inline int
3359 ah(struct rte_pipeline *p,
3360 struct rte_mbuf **pkts,
3362 struct rte_pipeline_table_entry **entries,
3363 struct rte_table_action *action,
3364 struct ap_config *cfg)
3366 uint64_t pkts_drop_mask = 0;
3369 if (cfg->action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
3370 (1LLU << RTE_TABLE_ACTION_TIME)))
3373 if ((pkts_mask & (pkts_mask + 1)) == 0) {
3374 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
3377 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
3380 drop_mask = pkt4_work(&pkts[i],
3386 pkts_drop_mask |= drop_mask << i;
3389 for ( ; i < n_pkts; i++) {
3392 drop_mask = pkt_work(pkts[i],
3398 pkts_drop_mask |= drop_mask << i;
3401 for ( ; pkts_mask; ) {
3402 uint32_t pos = __builtin_ctzll(pkts_mask);
3403 uint64_t pkt_mask = 1LLU << pos;
3406 drop_mask = pkt_work(pkts[pos],
3412 pkts_mask &= ~pkt_mask;
3413 pkts_drop_mask |= drop_mask << pos;
3416 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
3422 ah_default(struct rte_pipeline *p,
3423 struct rte_mbuf **pkts,
3425 struct rte_pipeline_table_entry **entries,
3428 struct rte_table_action *action = arg;
3438 static rte_pipeline_table_action_handler_hit
3439 ah_selector(struct rte_table_action *action)
3441 if (action->cfg.action_mask == (1LLU << RTE_TABLE_ACTION_FWD))
3448 rte_table_action_table_params_get(struct rte_table_action *action,
3449 struct rte_pipeline_table_params *params)
3451 rte_pipeline_table_action_handler_hit f_action_hit;
3452 uint32_t total_size;
3454 /* Check input arguments */
3455 if ((action == NULL) ||
3459 f_action_hit = ah_selector(action);
3460 total_size = rte_align32pow2(action->data.total_size);
3462 /* Fill in params */
3463 params->f_action_hit = f_action_hit;
3464 params->f_action_miss = NULL;
3465 params->arg_ah = (f_action_hit) ? action : NULL;
3466 params->action_data_size = total_size -
3467 sizeof(struct rte_pipeline_table_entry);
3473 rte_table_action_free(struct rte_table_action *action)