4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_common.h>
37 #include <rte_malloc.h>
38 #include <rte_byteorder.h>
39 #include <rte_table_stub.h>
40 #include <rte_table_hash.h>
41 #include <rte_pipeline.h>
43 #include "pipeline_passthrough_be.h"
44 #include "pipeline_actions_common.h"
45 #include "hash_func.h"
47 struct pipeline_passthrough {
49 struct pipeline_passthrough_params params;
50 rte_table_hash_op_hash f_hash;
51 } __rte_cache_aligned;
53 static pipeline_msg_req_handler handlers[] = {
54 [PIPELINE_MSG_REQ_PING] =
55 pipeline_msg_req_ping_handler,
56 [PIPELINE_MSG_REQ_STATS_PORT_IN] =
57 pipeline_msg_req_stats_port_in_handler,
58 [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
59 pipeline_msg_req_stats_port_out_handler,
60 [PIPELINE_MSG_REQ_STATS_TABLE] =
61 pipeline_msg_req_stats_table_handler,
62 [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
63 pipeline_msg_req_port_in_enable_handler,
64 [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
65 pipeline_msg_req_port_in_disable_handler,
66 [PIPELINE_MSG_REQ_CUSTOM] =
67 pipeline_msg_req_invalid_handler,
70 static inline __attribute__((always_inline)) void
75 uint32_t hash_enabled)
77 struct pipeline_passthrough *p = arg;
79 uint64_t *dma_dst = RTE_MBUF_METADATA_UINT64_PTR(pkt,
80 p->params.dma_dst_offset);
81 uint64_t *dma_src = RTE_MBUF_METADATA_UINT64_PTR(pkt,
82 p->params.dma_src_offset);
83 uint64_t *dma_mask = (uint64_t *) p->params.dma_src_mask;
84 uint32_t *dma_hash = RTE_MBUF_METADATA_UINT32_PTR(pkt,
85 p->params.dma_hash_offset);
88 /* Read (dma_src), compute (dma_dst), write (dma_dst) */
89 for (i = 0; i < (dma_size / 8); i++)
90 dma_dst[i] = dma_src[i] & dma_mask[i];
92 /* Read (dma_dst), compute (hash), write (hash) */
94 *dma_hash = p->f_hash(dma_dst, dma_size, 0);
97 static inline __attribute__((always_inline)) void
99 struct rte_mbuf **pkts,
102 uint32_t hash_enabled)
104 struct pipeline_passthrough *p = arg;
106 uint64_t *dma_dst0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
107 p->params.dma_dst_offset);
108 uint64_t *dma_dst1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
109 p->params.dma_dst_offset);
110 uint64_t *dma_dst2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
111 p->params.dma_dst_offset);
112 uint64_t *dma_dst3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
113 p->params.dma_dst_offset);
115 uint64_t *dma_src0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
116 p->params.dma_src_offset);
117 uint64_t *dma_src1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
118 p->params.dma_src_offset);
119 uint64_t *dma_src2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
120 p->params.dma_src_offset);
121 uint64_t *dma_src3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
122 p->params.dma_src_offset);
124 uint64_t *dma_mask = (uint64_t *) p->params.dma_src_mask;
126 uint32_t *dma_hash0 = RTE_MBUF_METADATA_UINT32_PTR(pkts[0],
127 p->params.dma_hash_offset);
128 uint32_t *dma_hash1 = RTE_MBUF_METADATA_UINT32_PTR(pkts[1],
129 p->params.dma_hash_offset);
130 uint32_t *dma_hash2 = RTE_MBUF_METADATA_UINT32_PTR(pkts[2],
131 p->params.dma_hash_offset);
132 uint32_t *dma_hash3 = RTE_MBUF_METADATA_UINT32_PTR(pkts[3],
133 p->params.dma_hash_offset);
137 /* Read (dma_src), compute (dma_dst), write (dma_dst) */
138 for (i = 0; i < (dma_size / 8); i++) {
139 dma_dst0[i] = dma_src0[i] & dma_mask[i];
140 dma_dst1[i] = dma_src1[i] & dma_mask[i];
141 dma_dst2[i] = dma_src2[i] & dma_mask[i];
142 dma_dst3[i] = dma_src3[i] & dma_mask[i];
145 /* Read (dma_dst), compute (hash), write (hash) */
147 *dma_hash0 = p->f_hash(dma_dst0, dma_size, 0);
148 *dma_hash1 = p->f_hash(dma_dst1, dma_size, 0);
149 *dma_hash2 = p->f_hash(dma_dst2, dma_size, 0);
150 *dma_hash3 = p->f_hash(dma_dst3, dma_size, 0);
154 #define PKT_WORK(dma_size, hash_enabled) \
156 pkt_work_size##dma_size##_hash##hash_enabled( \
157 struct rte_mbuf *pkt, \
160 pkt_work(pkt, arg, dma_size, hash_enabled); \
163 #define PKT4_WORK(dma_size, hash_enabled) \
165 pkt4_work_size##dma_size##_hash##hash_enabled( \
166 struct rte_mbuf **pkts, \
169 pkt4_work(pkts, arg, dma_size, hash_enabled); \
172 #define port_in_ah(dma_size, hash_enabled) \
173 PKT_WORK(dma_size, hash_enabled) \
174 PKT4_WORK(dma_size, hash_enabled) \
175 PIPELINE_PORT_IN_AH(port_in_ah_size##dma_size##_hash##hash_enabled,\
176 pkt_work_size##dma_size##_hash##hash_enabled, \
177 pkt4_work_size##dma_size##_hash##hash_enabled)
197 static rte_pipeline_port_in_action_handler
198 get_port_in_ah(struct pipeline_passthrough *p)
200 if (p->params.dma_enabled == 0)
203 if (p->params.dma_hash_enabled)
204 switch (p->params.dma_size) {
206 case 8: return port_in_ah_size8_hash1;
207 case 16: return port_in_ah_size16_hash1;
208 case 24: return port_in_ah_size24_hash1;
209 case 32: return port_in_ah_size32_hash1;
210 case 40: return port_in_ah_size40_hash1;
211 case 48: return port_in_ah_size48_hash1;
212 case 56: return port_in_ah_size56_hash1;
213 case 64: return port_in_ah_size64_hash1;
214 default: return NULL;
217 switch (p->params.dma_size) {
219 case 8: return port_in_ah_size8_hash0;
220 case 16: return port_in_ah_size16_hash0;
221 case 24: return port_in_ah_size24_hash0;
222 case 32: return port_in_ah_size32_hash0;
223 case 40: return port_in_ah_size40_hash0;
224 case 48: return port_in_ah_size48_hash0;
225 case 56: return port_in_ah_size56_hash0;
226 case 64: return port_in_ah_size64_hash0;
227 default: return NULL;
232 pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p,
233 struct pipeline_params *params)
235 uint32_t dma_dst_offset_present = 0;
236 uint32_t dma_src_offset_present = 0;
237 uint32_t dma_src_mask_present = 0;
238 uint32_t dma_size_present = 0;
239 uint32_t dma_hash_offset_present = 0;
244 p->dma_hash_enabled = 0;
245 memset(p->dma_src_mask, 0xFF, sizeof(p->dma_src_mask));
247 for (i = 0; i < params->n_args; i++) {
248 char *arg_name = params->args_name[i];
249 char *arg_value = params->args_value[i];
252 if (strcmp(arg_name, "dma_dst_offset") == 0) {
253 if (dma_dst_offset_present)
255 dma_dst_offset_present = 1;
257 p->dma_dst_offset = atoi(arg_value);
264 if (strcmp(arg_name, "dma_src_offset") == 0) {
265 if (dma_src_offset_present)
267 dma_src_offset_present = 1;
269 p->dma_src_offset = atoi(arg_value);
276 if (strcmp(arg_name, "dma_size") == 0) {
277 if (dma_size_present)
279 dma_size_present = 1;
281 p->dma_size = atoi(arg_value);
282 if ((p->dma_size == 0) ||
283 (p->dma_size > PIPELINE_PASSTHROUGH_DMA_SIZE_MAX) ||
284 ((p->dma_size % 8) != 0))
293 if (strcmp(arg_name, "dma_src_mask") == 0) {
297 if (dma_src_mask_present ||
298 (dma_size_present == 0))
300 dma_src_mask_present = 1;
302 dma_size = p->dma_size;
303 status = parse_hex_string(arg_value,
307 (dma_size != p->dma_size))
316 if (strcmp(arg_name, "dma_dst_offset") == 0) {
317 if (dma_dst_offset_present)
319 dma_dst_offset_present = 1;
321 p->dma_dst_offset = atoi(arg_value);
327 /* dma_hash_offset */
328 if (strcmp(arg_name, "dma_hash_offset") == 0) {
329 if (dma_hash_offset_present)
331 dma_hash_offset_present = 1;
333 p->dma_hash_offset = atoi(arg_value);
334 p->dma_hash_enabled = 1;
344 /* Check correlations between arguments */
345 if ((dma_dst_offset_present != p->dma_enabled) ||
346 (dma_src_offset_present != p->dma_enabled) ||
347 (dma_size_present != p->dma_enabled) ||
348 (dma_hash_offset_present != p->dma_hash_enabled) ||
349 (p->dma_hash_enabled > p->dma_enabled))
356 static rte_table_hash_op_hash
357 get_hash_function(struct pipeline_passthrough *p)
359 switch (p->params.dma_size) {
361 case 8: return hash_default_key8;
362 case 16: return hash_default_key16;
363 case 24: return hash_default_key24;
364 case 32: return hash_default_key32;
365 case 40: return hash_default_key40;
366 case 48: return hash_default_key48;
367 case 56: return hash_default_key56;
368 case 64: return hash_default_key64;
369 default: return NULL;
374 pipeline_passthrough_init(struct pipeline_params *params,
375 __rte_unused void *arg)
378 struct pipeline_passthrough *p_pt;
381 /* Check input arguments */
382 if ((params == NULL) ||
383 (params->n_ports_in == 0) ||
384 (params->n_ports_out == 0) ||
385 (params->n_ports_in < params->n_ports_out) ||
386 (params->n_ports_in % params->n_ports_out))
389 /* Memory allocation */
390 size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_passthrough));
391 p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
392 p_pt = (struct pipeline_passthrough *) p;
396 strcpy(p->name, params->name);
397 p->log_level = params->log_level;
399 PLOG(p, HIGH, "Pass-through");
401 /* Parse arguments */
402 if (pipeline_passthrough_parse_args(&p_pt->params, params))
404 p_pt->f_hash = get_hash_function(p_pt);
408 struct rte_pipeline_params pipeline_params = {
409 .name = "PASS-THROUGH",
410 .socket_id = params->socket_id,
414 p->p = rte_pipeline_create(&pipeline_params);
422 p->n_ports_in = params->n_ports_in;
423 for (i = 0; i < p->n_ports_in; i++) {
424 struct rte_pipeline_port_in_params port_params = {
425 .ops = pipeline_port_in_params_get_ops(
426 ¶ms->port_in[i]),
427 .arg_create = pipeline_port_in_params_convert(
428 ¶ms->port_in[i]),
429 .f_action = get_port_in_ah(p_pt),
431 .burst_size = params->port_in[i].burst_size,
434 int status = rte_pipeline_port_in_create(p->p,
439 rte_pipeline_free(p->p);
446 p->n_ports_out = params->n_ports_out;
447 for (i = 0; i < p->n_ports_out; i++) {
448 struct rte_pipeline_port_out_params port_params = {
449 .ops = pipeline_port_out_params_get_ops(
450 ¶ms->port_out[i]),
451 .arg_create = pipeline_port_out_params_convert(
452 ¶ms->port_out[i]),
454 .f_action_bulk = NULL,
458 int status = rte_pipeline_port_out_create(p->p,
463 rte_pipeline_free(p->p);
470 p->n_tables = p->n_ports_in;
471 for (i = 0; i < p->n_ports_in; i++) {
472 struct rte_pipeline_table_params table_params = {
473 .ops = &rte_table_stub_ops,
475 .f_action_hit = NULL,
476 .f_action_miss = NULL,
478 .action_data_size = 0,
481 int status = rte_pipeline_table_create(p->p,
486 rte_pipeline_free(p->p);
492 /* Connecting input ports to tables */
493 for (i = 0; i < p->n_ports_in; i++) {
494 int status = rte_pipeline_port_in_connect_to_table(p->p,
499 rte_pipeline_free(p->p);
505 /* Add entries to tables */
506 for (i = 0; i < p->n_ports_in; i++) {
507 struct rte_pipeline_table_entry default_entry = {
508 .action = RTE_PIPELINE_ACTION_PORT,
509 {.port_id = p->port_out_id[
510 i / (p->n_ports_in / p->n_ports_out)]},
513 struct rte_pipeline_table_entry *default_entry_ptr;
515 int status = rte_pipeline_table_default_entry_add(p->p,
521 rte_pipeline_free(p->p);
527 /* Enable input ports */
528 for (i = 0; i < p->n_ports_in; i++) {
529 int status = rte_pipeline_port_in_enable(p->p,
533 rte_pipeline_free(p->p);
539 /* Check pipeline consistency */
540 if (rte_pipeline_check(p->p) < 0) {
541 rte_pipeline_free(p->p);
547 p->n_msgq = params->n_msgq;
548 for (i = 0; i < p->n_msgq; i++)
549 p->msgq_in[i] = params->msgq_in[i];
550 for (i = 0; i < p->n_msgq; i++)
551 p->msgq_out[i] = params->msgq_out[i];
553 /* Message handlers */
554 memcpy(p->handlers, handlers, sizeof(p->handlers));
560 pipeline_passthrough_free(void *pipeline)
562 struct pipeline *p = (struct pipeline *) pipeline;
564 /* Check input arguments */
569 rte_pipeline_free(p->p);
575 pipeline_passthrough_timer(void *pipeline)
577 struct pipeline *p = (struct pipeline *) pipeline;
579 pipeline_msg_req_handle(p);
580 rte_pipeline_flush(p->p);
586 pipeline_passthrough_track(void *pipeline, uint32_t port_in, uint32_t *port_out)
588 struct pipeline *p = (struct pipeline *) pipeline;
590 /* Check input arguments */
592 (port_in >= p->n_ports_in) ||
596 *port_out = port_in / p->n_ports_in;
600 struct pipeline_be_ops pipeline_passthrough_be_ops = {
601 .f_init = pipeline_passthrough_init,
602 .f_free = pipeline_passthrough_free,
604 .f_timer = pipeline_passthrough_timer,
605 .f_track = pipeline_passthrough_track,