common/mlx5: fix user mode register access command
[dpdk.git] / app / test-eventdev / evt_common.h
index 7fc1e82..f9d7378 100644 (file)
@@ -1,33 +1,5 @@
-/*
- *   BSD LICENSE
- *
- *   Copyright (C) Cavium 2017.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Cavium nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
  */
 
 #ifndef _EVT_COMMON_
@@ -36,6 +8,7 @@
 #include <rte_common.h>
 #include <rte_debug.h>
 #include <rte_eventdev.h>
+#include <rte_service.h>
 
 #define CLNRM  "\x1b[0m"
 #define CLRED  "\x1b[31m"
 #define EVT_MAX_PORTS            256
 #define EVT_MAX_QUEUES           256
 
+enum evt_prod_type {
+       EVT_PROD_TYPE_NONE,
+       EVT_PROD_TYPE_SYNT,          /* Producer type Synthetic i.e. CPU. */
+       EVT_PROD_TYPE_ETH_RX_ADPTR,  /* Producer type Eth Rx Adapter. */
+       EVT_PROD_TYPE_EVENT_TIMER_ADPTR,  /* Producer type Timer Adapter. */
+       EVT_PROD_TYPE_MAX,
+};
+
+struct evt_options {
+#define EVT_TEST_NAME_MAX_LEN     32
+       char test_name[EVT_TEST_NAME_MAX_LEN];
+       bool plcores[RTE_MAX_LCORE];
+       bool wlcores[RTE_MAX_LCORE];
+       int pool_sz;
+       int socket_id;
+       int nb_stages;
+       int verbose_level;
+       uint8_t dev_id;
+       uint8_t timdev_cnt;
+       uint8_t nb_timer_adptrs;
+       uint8_t timdev_use_burst;
+       uint8_t sched_type_list[EVT_MAX_STAGES];
+       uint16_t mbuf_sz;
+       uint16_t wkr_deq_dep;
+       uint32_t nb_flows;
+       uint32_t tx_first;
+       uint32_t max_pkt_sz;
+       uint32_t deq_tmo_nsec;
+       uint32_t q_priority:1;
+       uint32_t fwd_latency:1;
+       uint64_t nb_pkts;
+       uint64_t nb_timers;
+       uint64_t expiry_nsec;
+       uint64_t max_tmo_nsec;
+       uint64_t timer_tick_nsec;
+       uint64_t optm_timer_tick_nsec;
+       enum evt_prod_type prod_type;
+};
+
 static inline bool
 evt_has_distributed_sched(uint8_t dev_id)
 {
@@ -92,25 +104,80 @@ evt_has_all_types_queue(uint8_t dev_id)
                        true : false;
 }
 
-static inline uint32_t
-evt_sched_type2queue_cfg(uint8_t sched_type)
+static inline int
+evt_service_setup(uint32_t service_id)
 {
-       uint32_t ret;
-
-       switch (sched_type) {
-       case RTE_SCHED_TYPE_ATOMIC:
-               ret = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY;
-               break;
-       case RTE_SCHED_TYPE_ORDERED:
-               ret = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY;
-               break;
-       case RTE_SCHED_TYPE_PARALLEL:
-               ret = RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
-               break;
-       default:
-               rte_panic("Invalid sched_type %d\n", sched_type);
+       int32_t core_cnt;
+       unsigned int lcore = 0;
+       uint32_t core_array[RTE_MAX_LCORE];
+       uint8_t cnt;
+       uint8_t min_cnt = UINT8_MAX;
+
+       if (!rte_service_lcore_count())
+               return -ENOENT;
+
+       core_cnt = rte_service_lcore_list(core_array,
+                       RTE_MAX_LCORE);
+       if (core_cnt < 0)
+               return -ENOENT;
+       /* Get the core which has least number of services running. */
+       while (core_cnt--) {
+               /* Reset default mapping */
+               rte_service_map_lcore_set(service_id,
+                               core_array[core_cnt], 0);
+               cnt = rte_service_lcore_count_services(
+                               core_array[core_cnt]);
+               if (cnt < min_cnt) {
+                       lcore = core_array[core_cnt];
+                       min_cnt = cnt;
+               }
        }
-       return ret;
+       if (rte_service_map_lcore_set(service_id, lcore, 1))
+               return -ENOENT;
+
+       return 0;
+}
+
+static inline int
+evt_configure_eventdev(struct evt_options *opt, uint8_t nb_queues,
+               uint8_t nb_ports)
+{
+       struct rte_event_dev_info info;
+       int ret;
+
+       memset(&info, 0, sizeof(struct rte_event_dev_info));
+       ret = rte_event_dev_info_get(opt->dev_id, &info);
+       if (ret) {
+               evt_err("failed to get eventdev info %d", opt->dev_id);
+               return ret;
+       }
+
+       if (opt->deq_tmo_nsec) {
+               if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
+                       opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
+                       evt_info("dequeue_timeout_ns too low, using %d",
+                                       opt->deq_tmo_nsec);
+               }
+               if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
+                       opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
+                       evt_info("dequeue_timeout_ns too high, using %d",
+                                       opt->deq_tmo_nsec);
+               }
+       }
+
+       const struct rte_event_dev_config config = {
+                       .dequeue_timeout_ns = opt->deq_tmo_nsec,
+                       .nb_event_queues = nb_queues,
+                       .nb_event_ports = nb_ports,
+                       .nb_events_limit  = info.max_num_events,
+                       .nb_event_queue_flows = opt->nb_flows,
+                       .nb_event_port_dequeue_depth =
+                               info.max_event_port_dequeue_depth,
+                       .nb_event_port_enqueue_depth =
+                               info.max_event_port_enqueue_depth,
+       };
+
+       return rte_event_dev_configure(opt->dev_id, &config);
 }
 
 #endif /*  _EVT_COMMON_*/