X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-eventdev%2Ftest_order_common.h;h=92781d95872bfc3ccc62c34bbf8d1123824eae04;hb=3dea1b2693ab8b46f5add0642b08714884634bae;hp=88cb2acd9e36ccb95d509d4e8d3f0787735c8185;hpb=4d04346f1ec13b2afcf754fceaf7d613e85ac46a;p=dpdk.git diff --git a/app/test-eventdev/test_order_common.h b/app/test-eventdev/test_order_common.h index 88cb2acd9e..92781d9587 100644 --- a/app/test-eventdev/test_order_common.h +++ b/app/test-eventdev/test_order_common.h @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium 2017. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium networks nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ #ifndef _TEST_ORDER_COMMON_ @@ -41,6 +13,7 @@ #include #include #include +#include #include "evt_common.h" #include "evt_options.h" @@ -48,6 +21,9 @@ #define BURST_SIZE 16 +typedef uint32_t flow_id_t; +typedef uint32_t seqn_t; + struct test_order; struct worker_data { @@ -72,11 +48,13 @@ struct test_order { * The atomic_* is an expensive operation,Since it is a functional test, * We are using the atomic_ operation to reduce the code complexity. */ - rte_atomic64_t outstand_pkts; + uint64_t outstand_pkts; enum evt_test_result result; uint32_t nb_flows; uint64_t nb_pkts; struct rte_mempool *pool; + int flow_id_dynfield_offset; + int seqn_dynfield_offset; struct prod_data prod; struct worker_data worker[EVT_MAX_PORTS]; uint32_t *producer_flow_seq; @@ -84,25 +62,48 @@ struct test_order { struct evt_options *opt; } __rte_cache_aligned; +static inline void +order_flow_id_copy_from_mbuf(struct test_order *t, struct rte_event *event) +{ + event->flow_id = *RTE_MBUF_DYNFIELD(event->mbuf, + t->flow_id_dynfield_offset, flow_id_t *); +} + +static inline void +order_flow_id_save(struct test_order *t, flow_id_t flow_id, + struct rte_mbuf *mbuf, struct rte_event *event) +{ + *RTE_MBUF_DYNFIELD(mbuf, + t->flow_id_dynfield_offset, flow_id_t *) = flow_id; + event->flow_id = flow_id; + event->mbuf = mbuf; +} + +static inline seqn_t * +order_mbuf_seqn(struct test_order *t, struct rte_mbuf *mbuf) +{ + return RTE_MBUF_DYNFIELD(mbuf, t->seqn_dynfield_offset, seqn_t *); +} + static inline int order_nb_event_ports(struct evt_options *opt) { return evt_nr_active_lcores(opt->wlcores) + 1 /* producer */; } -static inline __attribute__((always_inline)) void +static __rte_always_inline void order_process_stage_1(struct test_order *const t, struct rte_event *const ev, const uint32_t nb_flows, uint32_t *const expected_flow_seq, - rte_atomic64_t *const outstand_pkts) + uint64_t *const outstand_pkts) { const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows; /* compare the seqn against expected value */ - if (ev->mbuf->seqn != expected_flow_seq[flow]) { + if (*order_mbuf_seqn(t, ev->mbuf) != expected_flow_seq[flow]) { evt_err("flow=%x seqn mismatch got=%x expected=%x", - flow, ev->mbuf->seqn, expected_flow_seq[flow]); + flow, *order_mbuf_seqn(t, ev->mbuf), + expected_flow_seq[flow]); t->err = true; - rte_smp_wmb(); } /* * Events from an atomic flow of an event queue can be scheduled only to @@ -112,16 +113,15 @@ order_process_stage_1(struct test_order *const t, */ expected_flow_seq[flow]++; rte_pktmbuf_free(ev->mbuf); - rte_atomic64_sub(outstand_pkts, 1); + __atomic_sub_fetch(outstand_pkts, 1, __ATOMIC_RELAXED); } -static inline __attribute__((always_inline)) void +static __rte_always_inline void order_process_stage_invalid(struct test_order *const t, struct rte_event *const ev) { evt_err("invalid queue %d", ev->queue_id); t->err = true; - rte_smp_wmb(); } #define ORDER_WORKER_INIT\ @@ -132,7 +132,7 @@ order_process_stage_invalid(struct test_order *const t, const uint8_t port = w->port_id;\ const uint32_t nb_flows = t->nb_flows;\ uint32_t *expected_flow_seq = t->expected_flow_seq;\ - rte_atomic64_t *outstand_pkts = &t->outstand_pkts;\ + uint64_t *outstand_pkts = &t->outstand_pkts;\ if (opt->verbose_level > 1)\ printf("%s(): lcore %d dev_id %d port=%d\n",\ __func__, rte_lcore_id(), dev_id, port)