app/testpmd: fix bonding mode set
[dpdk.git] / app / test-eventdev / test_order_common.h
index 90eac96..92781d9 100644 (file)
@@ -22,6 +22,7 @@
 #define BURST_SIZE 16
 
 typedef uint32_t flow_id_t;
+typedef uint32_t seqn_t;
 
 struct test_order;
 
@@ -47,12 +48,13 @@ struct test_order {
         * The atomic_* is an expensive operation,Since it is a functional test,
         * We are using the atomic_ operation to reduce the code complexity.
         */
-       rte_atomic64_t outstand_pkts;
+       uint64_t outstand_pkts;
        enum evt_test_result result;
        uint32_t nb_flows;
        uint64_t nb_pkts;
        struct rte_mempool *pool;
        int flow_id_dynfield_offset;
+       int seqn_dynfield_offset;
        struct prod_data prod;
        struct worker_data worker[EVT_MAX_PORTS];
        uint32_t *producer_flow_seq;
@@ -77,6 +79,12 @@ order_flow_id_save(struct test_order *t, flow_id_t flow_id,
        event->mbuf = mbuf;
 }
 
+static inline seqn_t *
+order_mbuf_seqn(struct test_order *t, struct rte_mbuf *mbuf)
+{
+       return RTE_MBUF_DYNFIELD(mbuf, t->seqn_dynfield_offset, seqn_t *);
+}
+
 static inline int
 order_nb_event_ports(struct evt_options *opt)
 {
@@ -87,15 +95,15 @@ static __rte_always_inline void
 order_process_stage_1(struct test_order *const t,
                struct rte_event *const ev, const uint32_t nb_flows,
                uint32_t *const expected_flow_seq,
-               rte_atomic64_t *const outstand_pkts)
+               uint64_t *const outstand_pkts)
 {
        const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
        /* compare the seqn against expected value */
-       if (ev->mbuf->seqn != expected_flow_seq[flow]) {
+       if (*order_mbuf_seqn(t, ev->mbuf) != expected_flow_seq[flow]) {
                evt_err("flow=%x seqn mismatch got=%x expected=%x",
-                       flow, ev->mbuf->seqn, expected_flow_seq[flow]);
+                       flow, *order_mbuf_seqn(t, ev->mbuf),
+                       expected_flow_seq[flow]);
                t->err = true;
-               rte_smp_wmb();
        }
        /*
         * Events from an atomic flow of an event queue can be scheduled only to
@@ -105,7 +113,7 @@ order_process_stage_1(struct test_order *const t,
         */
        expected_flow_seq[flow]++;
        rte_pktmbuf_free(ev->mbuf);
-       rte_atomic64_sub(outstand_pkts, 1);
+       __atomic_sub_fetch(outstand_pkts, 1, __ATOMIC_RELAXED);
 }
 
 static __rte_always_inline void
@@ -114,7 +122,6 @@ order_process_stage_invalid(struct test_order *const t,
 {
        evt_err("invalid queue %d", ev->queue_id);
        t->err = true;
-       rte_smp_wmb();
 }
 
 #define ORDER_WORKER_INIT\
@@ -125,7 +132,7 @@ order_process_stage_invalid(struct test_order *const t,
        const uint8_t port = w->port_id;\
        const uint32_t nb_flows = t->nb_flows;\
        uint32_t *expected_flow_seq = t->expected_flow_seq;\
-       rte_atomic64_t *outstand_pkts = &t->outstand_pkts;\
+       uint64_t *outstand_pkts = &t->outstand_pkts;\
        if (opt->verbose_level > 1)\
                printf("%s(): lcore %d dev_id %d port=%d\n",\
                        __func__, rte_lcore_id(), dev_id, port)