4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #ifndef __INCLUDE_PIPELINE_ACTIONS_COMMON_H__
34 #define __INCLUDE_PIPELINE_ACTIONS_COMMON_H__
38 #include <rte_common.h>
39 #include <rte_cycles.h>
41 #include <rte_pipeline.h>
43 #define PIPELINE_PORT_IN_AH(f_ah, f_pkt_work, f_pkt4_work) \
46 __rte_unused struct rte_pipeline *p, \
47 struct rte_mbuf **pkts, \
53 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
54 f_pkt4_work(&pkts[i], arg); \
56 for ( ; i < n_pkts; i++) \
57 f_pkt_work(pkts[i], arg); \
62 #define PIPELINE_PORT_IN_AH_HIJACK_ALL(f_ah, f_pkt_work, f_pkt4_work) \
65 struct rte_pipeline *p, \
66 struct rte_mbuf **pkts, \
70 uint64_t pkt_mask = RTE_LEN2MASK(n_pkts, uint64_t); \
73 rte_pipeline_ah_packet_hijack(p, pkt_mask); \
75 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
76 f_pkt4_work(&pkts[i], arg); \
78 for ( ; i < n_pkts; i++) \
79 f_pkt_work(pkts[i], arg); \
84 #define PIPELINE_TABLE_AH_HIT(f_ah, f_pkt_work, f_pkt4_work) \
87 __rte_unused struct rte_pipeline *p, \
88 struct rte_mbuf **pkts, \
89 uint64_t pkts_in_mask, \
90 struct rte_pipeline_table_entry **entries, \
93 if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
94 uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
97 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
98 f_pkt4_work(&pkts[i], &entries[i], arg); \
100 for ( ; i < n_pkts; i++) \
101 f_pkt_work(pkts[i], entries[i], arg); \
103 for ( ; pkts_in_mask; ) { \
104 uint32_t pos = __builtin_ctzll(pkts_in_mask); \
105 uint64_t pkt_mask = 1LLU << pos; \
107 pkts_in_mask &= ~pkt_mask; \
108 f_pkt_work(pkts[pos], entries[pos], arg); \
114 #define PIPELINE_TABLE_AH_MISS(f_ah, f_pkt_work, f_pkt4_work) \
117 __rte_unused struct rte_pipeline *p, \
118 struct rte_mbuf **pkts, \
119 uint64_t pkts_in_mask, \
120 struct rte_pipeline_table_entry *entry, \
123 if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
124 uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
127 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
128 f_pkt4_work(&pkts[i], entry, arg); \
130 for ( ; i < n_pkts; i++) \
131 f_pkt_work(pkts[i], entry, arg); \
133 for ( ; pkts_in_mask; ) { \
134 uint32_t pos = __builtin_ctzll(pkts_in_mask); \
135 uint64_t pkt_mask = 1LLU << pos; \
137 pkts_in_mask &= ~pkt_mask; \
138 f_pkt_work(pkts[pos], entry, arg); \
144 #define PIPELINE_TABLE_AH_HIT_DROP_TIME(f_ah, f_pkt_work, f_pkt4_work) \
147 struct rte_pipeline *p, \
148 struct rte_mbuf **pkts, \
149 uint64_t pkts_mask, \
150 struct rte_pipeline_table_entry **entries, \
153 uint64_t pkts_in_mask = pkts_mask; \
154 uint64_t pkts_out_mask = pkts_mask; \
155 uint64_t time = rte_rdtsc(); \
157 if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
158 uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
161 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) { \
162 uint64_t mask = f_pkt4_work(&pkts[i], \
163 &entries[i], arg, time); \
164 pkts_out_mask ^= mask << i; \
167 for ( ; i < n_pkts; i++) { \
168 uint64_t mask = f_pkt_work(pkts[i], \
169 entries[i], arg, time); \
170 pkts_out_mask ^= mask << i; \
173 for ( ; pkts_in_mask; ) { \
174 uint32_t pos = __builtin_ctzll(pkts_in_mask); \
175 uint64_t pkt_mask = 1LLU << pos; \
176 uint64_t mask = f_pkt_work(pkts[pos], \
177 entries[pos], arg, time); \
179 pkts_in_mask &= ~pkt_mask; \
180 pkts_out_mask ^= mask << pos; \
183 rte_pipeline_ah_packet_drop(p, pkts_out_mask ^ pkts_mask); \
188 #define PIPELINE_TABLE_AH_MISS_DROP_TIME(f_ah, f_pkt_work, f_pkt4_work) \
191 struct rte_pipeline *p, \
192 struct rte_mbuf **pkts, \
193 uint64_t pkts_mask, \
194 struct rte_pipeline_table_entry *entry, \
197 uint64_t pkts_in_mask = pkts_mask; \
198 uint64_t pkts_out_mask = pkts_mask; \
199 uint64_t time = rte_rdtsc(); \
201 if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
202 uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
205 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) { \
206 uint64_t mask = f_pkt4_work(&pkts[i], \
208 pkts_out_mask ^= mask << i; \
211 for ( ; i < n_pkts; i++) { \
212 uint64_t mask = f_pkt_work(pkts[i], entry, arg, time);\
213 pkts_out_mask ^= mask << i; \
216 for ( ; pkts_in_mask; ) { \
217 uint32_t pos = __builtin_ctzll(pkts_in_mask); \
218 uint64_t pkt_mask = 1LLU << pos; \
219 uint64_t mask = f_pkt_work(pkts[pos], \
222 pkts_in_mask &= ~pkt_mask; \
223 pkts_out_mask ^= mask << pos; \
226 rte_pipeline_ah_packet_drop(p, pkts_out_mask ^ pkts_mask); \