service: introduce service cores concept
authorHarry van Haaren <harry.van.haaren@intel.com>
Tue, 11 Jul 2017 14:19:27 +0000 (15:19 +0100)
committerThomas Monjalon <thomas@monjalon.net>
Sun, 16 Jul 2017 18:31:50 +0000 (20:31 +0200)
Add header files, update .map files with new service
functions, and add the service header to the doxygen
for building.

This service header API allows DPDK to use services as
a concept of something that requires CPU cycles. An example
is a PMD that runs in software to schedule events, where a
hardware version exists that does not require a CPU.

Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>
Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
17 files changed:
MAINTAINERS
doc/api/doxy-api-index.md
doc/guides/prog_guide/index.rst
doc/guides/prog_guide/service_cores.rst [new file with mode: 0644]
doc/guides/rel_notes/release_17_08.rst
lib/librte_eal/bsdapp/eal/Makefile
lib/librte_eal/bsdapp/eal/rte_eal_version.map
lib/librte_eal/common/Makefile
lib/librte_eal/common/eal_common_lcore.c
lib/librte_eal/common/include/rte_eal.h
lib/librte_eal/common/include/rte_lcore.h
lib/librte_eal/common/include/rte_service.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_service_component.h [new file with mode: 0644]
lib/librte_eal/common/rte_service.c [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/Makefile
lib/librte_eal/linuxapp/eal/eal_thread.c
lib/librte_eal/linuxapp/eal/rte_eal_version.map

index 368973a..28c87fe 100644 (file)
@@ -134,6 +134,13 @@ F: test/test/test_mp_secondary.c
 F: examples/multi_process/
 F: doc/guides/sample_app_ug/multi_process.rst
 
+Service Cores - EXPERIMENTAL
+M: Harry van Haaren <harry.van.haaren@intel.com>
+F: lib/librte_eal/common/include/rte_service.h
+F: lib/librte_eal/common/include/rte_service_component.h
+F: lib/librte_eal/common/rte_service.c
+F: doc/guides/prog_guide/service_cores.rst
+
 ARM v7
 M: Jan Viktorin <viktorin@rehivetech.com>
 M: Jianbo Liu <jianbo.liu@linaro.org>
index 172f356..a68805b 100644 (file)
@@ -86,6 +86,7 @@ There are many libraries, so their headers may be grouped by topics:
   [launch]             (@ref rte_launch.h),
   [lcore]              (@ref rte_lcore.h),
   [per-lcore]          (@ref rte_per_lcore.h),
+  [service cores]      (@ref rte_service.h),
   [power/freq]         (@ref rte_power.h)
 
 - **layers**:
index 7578395..5548aba 100644 (file)
@@ -38,6 +38,7 @@ Programmer's Guide
     intro
     overview
     env_abstraction_layer
+    service_cores
     ring_lib
     mempool_lib
     mbuf_lib
diff --git a/doc/guides/prog_guide/service_cores.rst b/doc/guides/prog_guide/service_cores.rst
new file mode 100644 (file)
index 0000000..3a029ba
--- /dev/null
@@ -0,0 +1,81 @@
+..  BSD LICENSE
+    Copyright(c) 2017 Intel Corporation. All rights reserved.
+    All rights reserved.
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions
+    are met:
+
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+    * Neither the name of Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived
+    from this software without specific prior written permission.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Service Cores
+=============
+
+DPDK has a concept known as service cores, which enables a dynamic way of
+performing work on DPDK lcores. Service core support is built into the EAL, and
+an API is provided to optionally allow applications to control how the service
+cores are used at runtime.
+
+The service cores concept is built up out of services (components of DPDK that
+require CPU cycles to operate) and service cores (DPDK lcores, tasked with
+running services). The power of the service core concept is that the mapping
+between service cores and services can be configured to abstract away the
+difference between platforms and environments.
+
+For example, the Eventdev has hardware and software PMDs. Of these the software
+PMD requires an lcore to perform the scheduling operations, while the hardware
+PMD does not. With service cores, the application would not directly notice
+that the scheduling is done in software.
+
+For detailed information about the service core API, please refer to the docs.
+
+Service Core Initialization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are two methods to having service cores in a DPDK application, either by
+using the service coremask, or by dynamically adding cores using the API.
+The simpler of the two is to pass the `-s` coremask argument to EAL, which will
+take any cores available in the main DPDK coremask, an if the bits are also set
+in the service coremask the cores become service-cores instead of DPDK
+application lcores.
+
+Enabling Services on Cores
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Each registered service can be individually mapped to a service core, or set of
+service cores. Enabling a service on a particular core means that the lcore in
+question will run the service. Disabling that core on the service stops the
+lcore in question from running the service.
+
+Using this method, it is possible to assign specific workloads to each
+service core, and map N workloads to M number of service cores. Each service
+lcore loops over the services that are enabled for that core, and invokes the
+function to run the service.
+
+Service Core Statistics
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The service core library is capable of collecting runtime statistics like number
+of calls to a specific service, and number of cycles used by the service. The
+cycle count collection is dynamically configurable, allowing any application to
+profile the services running on the system at any time.
index 6273098..ef51ebb 100644 (file)
@@ -41,6 +41,14 @@ New Features
      Also, make sure to start the actual text at the margin.
      =========================================================
 
+* **Added Service Core functionality.**
+
+  The service core functionality added to EAL allows DPDK to run services such
+  as SW PMDs on lcores without the application manually running them. The
+  service core infrastructure allows flexibility of running multiple services
+  on the same service lcore, and provides the application with powerful APIs to
+  configure the mapping from service lcores to services.
+
 * **Added Generic Receive Offload API.**
 
   Generic Receive Offload (GRO) API supports to reassemble TCP/IPv4
index a0f9950..05517a2 100644 (file)
@@ -87,6 +87,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += rte_malloc.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += malloc_elem.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += malloc_heap.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += rte_keepalive.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += rte_service.c
 
 # from arch dir
 SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += rte_cpuflags.c
index 381f895..480ad23 100644 (file)
@@ -209,5 +209,28 @@ EXPERIMENTAL {
        rte_eal_devargs_parse;
        rte_eal_hotplug_add;
        rte_eal_hotplug_remove;
+       rte_service_disable_on_lcore;
+       rte_service_dump;
+       rte_service_enable_on_lcore;
+       rte_service_get_by_id;
+       rte_service_get_by_name;
+       rte_service_get_count;
+       rte_service_get_enabled_on_lcore;
+       rte_service_is_running;
+       rte_service_lcore_add;
+       rte_service_lcore_count;
+       rte_service_lcore_del;
+       rte_service_lcore_list;
+       rte_service_lcore_reset_all;
+       rte_service_lcore_start;
+       rte_service_lcore_stop;
+       rte_service_probe_capability;
+       rte_service_register;
+       rte_service_reset;
+       rte_service_set_stats_enable;
+       rte_service_start;
+       rte_service_start_with_defaults;
+       rte_service_stop;
+       rte_service_unregister;
 
 } DPDK_17.08;
index f2fe052..e8fd67a 100644 (file)
@@ -41,6 +41,7 @@ INC += rte_eal_memconfig.h rte_malloc_heap.h
 INC += rte_hexdump.h rte_devargs.h rte_bus.h rte_dev.h rte_vdev.h
 INC += rte_pci_dev_feature_defs.h rte_pci_dev_features.h
 INC += rte_malloc.h rte_keepalive.h rte_time.h
+INC += rte_service.h rte_service_component.h
 
 GENERIC_INC := rte_atomic.h rte_byteorder.h rte_cycles.h rte_prefetch.h
 GENERIC_INC += rte_spinlock.h rte_memcpy.h rte_cpuflags.h rte_rwlock.h
index 84fa0cb..0db1555 100644 (file)
@@ -81,6 +81,7 @@ rte_eal_cpu_init(void)
 
                /* By default, each detected core is enabled */
                config->lcore_role[lcore_id] = ROLE_RTE;
+               lcore_config[lcore_id].core_role = ROLE_RTE;
                lcore_config[lcore_id].core_id = eal_cpu_core_id(lcore_id);
                lcore_config[lcore_id].socket_id = eal_cpu_socket_id(lcore_id);
                if (lcore_config[lcore_id].socket_id >= RTE_MAX_NUMA_NODES) {
index 6b7c5ca..0e7363d 100644 (file)
@@ -61,6 +61,7 @@ extern "C" {
 enum rte_lcore_role_t {
        ROLE_RTE,
        ROLE_OFF,
+       ROLE_SERVICE,
 };
 
 /**
@@ -80,6 +81,7 @@ enum rte_proc_type_t {
 struct rte_config {
        uint32_t master_lcore;       /**< Id of the master lcore */
        uint32_t lcore_count;        /**< Number of available logical cores. */
+       uint32_t service_lcore_count;/**< Number of available service cores. */
        enum rte_lcore_role_t lcore_role[RTE_MAX_LCORE]; /**< State of cores. */
 
        /** Primary or secondary configuration */
@@ -185,6 +187,8 @@ int rte_eal_iopl_init(void);
  *
  *     EPROTO indicates that the PCI bus is either not present, or is not
  *            readable by the eal.
+ *
+ *     ENOEXEC indicates that a service core failed to launch successfully.
  */
 int rte_eal_init(int argc, char **argv);
 
index fe7b586..50e0d0f 100644 (file)
@@ -73,6 +73,7 @@ struct lcore_config {
        unsigned core_id;          /**< core number on socket for this lcore */
        int core_index;            /**< relative index, starting from 0 */
        rte_cpuset_t cpuset;       /**< cpu set which the lcore affinity to */
+       uint8_t core_role;         /**< role of core eg: OFF, RTE, SERVICE */
 };
 
 /**
@@ -175,7 +176,7 @@ rte_lcore_is_enabled(unsigned lcore_id)
        struct rte_config *cfg = rte_eal_get_configuration();
        if (lcore_id >= RTE_MAX_LCORE)
                return 0;
-       return cfg->lcore_role[lcore_id] != ROLE_OFF;
+       return cfg->lcore_role[lcore_id] == ROLE_RTE;
 }
 
 /**
diff --git a/lib/librte_eal/common/include/rte_service.h b/lib/librte_eal/common/include/rte_service.h
new file mode 100644 (file)
index 0000000..7c6f738
--- /dev/null
@@ -0,0 +1,387 @@
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_SERVICE_H_
+#define _RTE_SERVICE_H_
+
+/**
+ * @file
+ *
+ * Service functions
+ *
+ * The service functionality provided by this header allows a DPDK component
+ * to indicate that it requires a function call in order for it to perform
+ * its processing.
+ *
+ * An example usage of this functionality would be a component that registers
+ * a service to perform a particular packet processing duty: for example the
+ * eventdev software PMD. At startup the application requests all services
+ * that have been registered, and the cores in the service-coremask run the
+ * required services. The EAL removes these number of cores from the available
+ * runtime cores, and dedicates them to performing service-core workloads. The
+ * application has access to the remaining lcores as normal.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include<stdio.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <rte_lcore.h>
+
+/* forward declaration only. Definition in rte_service_private.h */
+struct rte_service_spec;
+
+#define RTE_SERVICE_NAME_MAX 32
+
+/* Capabilities of a service.
+ *
+ * Use the *rte_service_probe_capability* function to check if a service is
+ * capable of a specific capability.
+ */
+/** When set, the service is capable of having multiple threads run it at the
+ *  same time.
+ */
+#define RTE_SERVICE_CAP_MT_SAFE (1 << 0)
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ *  Return the number of services registered.
+ *
+ * The number of services registered can be passed to *rte_service_get_by_id*,
+ * enabling the application to retrieve the specification of each service.
+ *
+ * @return The number of services registered.
+ */
+uint32_t rte_service_get_count(void);
+
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Return the specification of a service by integer id.
+ *
+ * This function provides the specification of a service. This can be used by
+ * the application to understand what the service represents. The service
+ * must not be modified by the application directly, only passed to the various
+ * rte_service_* functions.
+ *
+ * @param id The integer id of the service to retrieve
+ * @retval non-zero A valid pointer to the service_spec
+ * @retval NULL Invalid *id* provided.
+ */
+struct rte_service_spec *rte_service_get_by_id(uint32_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Return the specification of a service by name.
+ *
+ * This function provides the specification of a service using the service name
+ * as lookup key. This can be used by the application to understand what the
+ * service represents. The service must not be modified by the application
+ * directly, only passed to the various rte_service_* functions.
+ *
+ * @param name The name of the service to retrieve
+ * @retval non-zero A valid pointer to the service_spec
+ * @retval NULL Invalid *name* provided.
+ */
+struct rte_service_spec *rte_service_get_by_name(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Return the name of the service.
+ *
+ * @return A pointer to the name of the service. The returned pointer remains
+ *         in ownership of the service, and the application must not free it.
+ */
+const char *rte_service_get_name(const struct rte_service_spec *service);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Check if a service has a specific capability.
+ *
+ * This function returns if *service* has implements *capability*.
+ * See RTE_SERVICE_CAP_* defines for a list of valid capabilities.
+ * @retval 1 Capability supported by this service instance
+ * @retval 0 Capability not supported by this service instance
+ */
+int32_t rte_service_probe_capability(const struct rte_service_spec *service,
+                                    uint32_t capability);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Enable a core to run a service.
+ *
+ * Each core can be added or removed from running specific services. This
+ * functions adds *lcore* to the set of cores that will run *service*.
+ *
+ * If multiple cores are enabled on a service, an atomic is used to ensure that
+ * only one cores runs the service at a time. The exception to this is when
+ * a service indicates that it is multi-thread safe by setting the capability
+ * called RTE_SERVICE_CAP_MT_SAFE. With the multi-thread safe capability set,
+ * the service function can be run on multiple threads at the same time.
+ *
+ * @retval 0 lcore added successfully
+ * @retval -EINVAL An invalid service or lcore was provided.
+ */
+int32_t rte_service_enable_on_lcore(struct rte_service_spec *service,
+                                  uint32_t lcore);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Disable a core to run a service.
+ *
+ * Each core can be added or removed from running specific services. This
+ * functions removes *lcore* to the set of cores that will run *service*.
+ *
+ * @retval 0 Lcore removed successfully
+ * @retval -EINVAL An invalid service or lcore was provided.
+ */
+int32_t rte_service_disable_on_lcore(struct rte_service_spec *service,
+                                  uint32_t lcore);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Return if an lcore is enabled for the service.
+ *
+ * This function allows the application to query if *lcore* is currently set to
+ * run *service*.
+ *
+ * @retval 1 Lcore enabled on this lcore
+ * @retval 0 Lcore disabled on this lcore
+ * @retval -EINVAL An invalid service or lcore was provided.
+ */
+int32_t rte_service_get_enabled_on_lcore(struct rte_service_spec *service,
+                                       uint32_t lcore);
+
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Enable *service* to run.
+ *
+ * This function switches on a service during runtime.
+ * @retval 0 The service was successfully started
+ */
+int32_t rte_service_start(struct rte_service_spec *service);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Disable *service*.
+ *
+ * Switch off a service, so it is not run until it is *rte_service_start* is
+ * called on it.
+ * @retval 0 Service successfully switched off
+ */
+int32_t rte_service_stop(struct rte_service_spec *service);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Returns if *service* is currently running.
+ *
+ * This function returns true if the service has been started using
+ * *rte_service_start*, AND a service core is mapped to the service. This
+ * function can be used to ensure that the service will be run.
+ *
+ * @retval 1 Service is currently running, and has a service lcore mapped
+ * @retval 0 Service is currently stopped, or no service lcore is mapped
+ * @retval -EINVAL Invalid service pointer provided
+ */
+int32_t rte_service_is_running(const struct rte_service_spec *service);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Start a service core.
+ *
+ * Starting a core makes the core begin polling. Any services assigned to it
+ * will be run as fast as possible.
+ *
+ * @retval 0 Success
+ * @retval -EINVAL Failed to start core. The *lcore_id* passed in is not
+ *          currently assigned to be a service core.
+ */
+int32_t rte_service_lcore_start(uint32_t lcore_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Stop a service core.
+ *
+ * Stopping a core makes the core become idle, but remains  assigned as a
+ * service core.
+ *
+ * @retval 0 Success
+ * @retval -EINVAL Invalid *lcore_id* provided
+ * @retval -EALREADY Already stopped core
+ * @retval -EBUSY Failed to stop core, as it would cause a service to not
+ *          be run, as this is the only core currently running the service.
+ *          The application must stop the service first, and then stop the
+ *          lcore.
+ */
+int32_t rte_service_lcore_stop(uint32_t lcore_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Adds lcore to the list of service cores.
+ *
+ * This functions can be used at runtime in order to modify the service core
+ * mask.
+ *
+ * @retval 0 Success
+ * @retval -EBUSY lcore is busy, and not available for service core duty
+ * @retval -EALREADY lcore is already added to the service core list
+ * @retval -EINVAL Invalid lcore provided
+ */
+int32_t rte_service_lcore_add(uint32_t lcore);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Removes lcore from the list of service cores.
+ *
+ * This can fail if the core is not stopped, see *rte_service_core_stop*.
+ *
+ * @retval 0 Success
+ * @retval -EBUSY Lcore is not stopped, stop service core before removing.
+ * @retval -EINVAL failed to add lcore to service core mask.
+ */
+int32_t rte_service_lcore_del(uint32_t lcore);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the number of service cores currently available.
+ *
+ * This function returns the integer count of service cores available. The
+ * service core count can be used in mapping logic when creating mappings
+ * from service cores to services.
+ *
+ * See *rte_service_lcore_list* for details on retrieving the lcore_id of each
+ * service core.
+ *
+ * @return The number of service cores currently configured.
+ */
+int32_t rte_service_lcore_count(void);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Resets all service core mappings. This does not remove the service cores
+ * from duty, just unmaps all services / cores, and stops() the service cores.
+ * The runstate of services is not modified.
+ *
+ * @retval 0 Success
+ */
+int32_t rte_service_lcore_reset_all(void);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Enable or disable statistics collection for *service*.
+ *
+ * This function enables per core, per-service cycle count collection.
+ * @param service The service to enable statistics gathering on.
+ * @param enable Zero to disable statistics, non-zero to enable.
+ * @retval 0 Success
+ * @retval -EINVAL Invalid service pointer passed
+ */
+int32_t rte_service_set_stats_enable(struct rte_service_spec *service,
+                                 int32_t enable);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the list of currently enabled service cores.
+ *
+ * This function fills in an application supplied array, with each element
+ * indicating the lcore_id of a service core.
+ *
+ * Adding and removing service cores can be performed using
+ * *rte_service_lcore_add* and *rte_service_lcore_del*.
+ * @param [out] array An array of at least *rte_service_lcore_count* items.
+ *              If statically allocating the buffer, use RTE_MAX_LCORE.
+ * @param [out] n The size of *array*.
+ * @retval >=0 Number of service cores that have been populated in the array
+ * @retval -ENOMEM The provided array is not large enough to fill in the
+ *          service core list. No items have been populated, call this function
+ *          with a size of at least *rte_service_core_count* items.
+ */
+int32_t rte_service_lcore_list(uint32_t array[], uint32_t n);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Dumps any information available about the service. If service is NULL,
+ * dumps info for all services.
+ */
+int32_t rte_service_dump(FILE *f, struct rte_service_spec *service);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _RTE_SERVICE_H_ */
diff --git a/lib/librte_eal/common/include/rte_service_component.h b/lib/librte_eal/common/include/rte_service_component.h
new file mode 100644 (file)
index 0000000..7a946a1
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_SERVICE_PRIVATE_H_
+#define _RTE_SERVICE_PRIVATE_H_
+
+/* This file specifies the internal service specification.
+ * Include this file if you are writing a component that requires CPU cycles to
+ * operate, and you wish to run the component using service cores
+ */
+
+#include <rte_service.h>
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Signature of callback function to run a service.
+ */
+typedef int32_t (*rte_service_func)(void *args);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * The specification of a service.
+ *
+ * This struct contains metadata about the service itself, the callback
+ * function to run one iteration of the service, a userdata pointer, flags etc.
+ */
+struct rte_service_spec {
+       /** The name of the service. This should be used by the application to
+        * understand what purpose this service provides.
+        */
+       char name[RTE_SERVICE_NAME_MAX];
+       /** The callback to invoke to run one iteration of the service. */
+       rte_service_func callback;
+       /** The userdata pointer provided to the service callback. */
+       void *callback_userdata;
+       /** Flags to indicate the capabilities of this service. See defines in
+        * the public header file for values of RTE_SERVICE_CAP_*
+        */
+       uint32_t capabilities;
+       /** NUMA socket ID that this service is affinitized to */
+       int socket_id;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Register a new service.
+ *
+ * A service represents a component that the requires CPU time periodically to
+ * achieve its purpose.
+ *
+ * For example the eventdev SW PMD requires CPU cycles to perform its
+ * scheduling. This can be achieved by registering it as a service, and the
+ * application can then assign CPU resources to it using
+ * *rte_service_set_coremask*.
+ *
+ * @param spec The specification of the service to register
+ * @retval 0 Successfully registered the service.
+ *         -EINVAL Attempted to register an invalid service (eg, no callback
+ *         set)
+ */
+int32_t rte_service_register(const struct rte_service_spec *spec);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Unregister a service.
+ *
+ * The service being removed must be stopped before calling this function.
+ *
+ * @retval 0 The service was successfully unregistered.
+ * @retval -EBUSY The service is currently running, stop the service before
+ *          calling unregister. No action has been taken.
+ */
+int32_t rte_service_unregister(struct rte_service_spec *service);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Private function to allow EAL to initialized default mappings.
+ *
+ * This function iterates all the services, and maps then to the available
+ * cores. Based on the capabilities of the services, they are set to run on the
+ * available cores in a round-robin manner.
+ *
+ * @retval 0 Success
+ * @retval -ENOTSUP No service lcores in use
+ * @retval -EINVAL Error while iterating over services
+ * @retval -ENODEV Error in enabling service lcore on a service
+ * @retval -ENOEXEC Error when starting services
+ */
+int32_t rte_service_start_with_defaults(void);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Initialize the service library.
+ *
+ * In order to use the service library, it must be initialized. EAL initializes
+ * the library at startup.
+ *
+ * @retval 0 Success
+ * @retval -EALREADY Service library is already initialized
+ */
+int32_t rte_service_init(void);
+
+#endif /* _RTE_SERVICE_PRIVATE_H_ */
diff --git a/lib/librte_eal/common/rte_service.c b/lib/librte_eal/common/rte_service.c
new file mode 100644 (file)
index 0000000..e82b9ad
--- /dev/null
@@ -0,0 +1,704 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <string.h>
+#include <dirent.h>
+
+#include <rte_service.h>
+#include "include/rte_service_component.h"
+
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+
+#define RTE_SERVICE_NUM_MAX 64
+
+#define SERVICE_F_REGISTERED    (1 << 0)
+#define SERVICE_F_STATS_ENABLED (1 << 1)
+
+/* runstates for services and lcores, denoting if they are active or not */
+#define RUNSTATE_STOPPED 0
+#define RUNSTATE_RUNNING 1
+
+/* internal representation of a service */
+struct rte_service_spec_impl {
+       /* public part of the struct */
+       struct rte_service_spec spec;
+
+       /* atomic lock that when set indicates a service core is currently
+        * running this service callback. When not set, a core may take the
+        * lock and then run the service callback.
+        */
+       rte_atomic32_t execute_lock;
+
+       /* API set/get-able variables */
+       int32_t runstate;
+       uint8_t internal_flags;
+
+       /* per service statistics */
+       uint32_t num_mapped_cores;
+       uint64_t calls;
+       uint64_t cycles_spent;
+} __rte_cache_aligned;
+
+/* the internal values of a service core */
+struct core_state {
+       /* map of services IDs are run on this core */
+       uint64_t service_mask;
+       uint8_t runstate; /* running or stopped */
+       uint8_t is_service_core; /* set if core is currently a service core */
+
+       /* extreme statistics */
+       uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
+} __rte_cache_aligned;
+
+static uint32_t rte_service_count;
+static struct rte_service_spec_impl *rte_services;
+static struct core_state *lcore_states;
+static uint32_t rte_service_library_initialized;
+
+int32_t rte_service_init(void)
+{
+       if (rte_service_library_initialized) {
+               printf("service library init() called, init flag %d\n",
+                       rte_service_library_initialized);
+               return -EALREADY;
+       }
+
+       rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
+                       sizeof(struct rte_service_spec_impl),
+                       RTE_CACHE_LINE_SIZE);
+       if (!rte_services) {
+               printf("error allocating rte services array\n");
+               return -ENOMEM;
+       }
+
+       lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
+                       sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
+       if (!lcore_states) {
+               printf("error allocating core states array\n");
+               return -ENOMEM;
+       }
+
+       int i;
+       int count = 0;
+       struct rte_config *cfg = rte_eal_get_configuration();
+       for (i = 0; i < RTE_MAX_LCORE; i++) {
+               if (lcore_config[i].core_role == ROLE_SERVICE) {
+                       if ((unsigned int)i == cfg->master_lcore)
+                               continue;
+                       rte_service_lcore_add(i);
+                       count++;
+               }
+       }
+
+       rte_service_library_initialized = 1;
+       return 0;
+}
+
+/* returns 1 if service is registered and has not been unregistered
+ * Returns 0 if service never registered, or has been unregistered
+ */
+static inline int
+service_valid(uint32_t id)
+{
+       return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
+}
+
+/* returns 1 if statistics should be colleced for service
+ * Returns 0 if statistics should not be collected for service
+ */
+static inline int
+service_stats_enabled(struct rte_service_spec_impl *impl)
+{
+       return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
+}
+
+static inline int
+service_mt_safe(struct rte_service_spec_impl *s)
+{
+       return s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE;
+}
+
+int32_t rte_service_set_stats_enable(struct rte_service_spec *service,
+                                 int32_t enabled)
+{
+       struct rte_service_spec_impl *impl =
+               (struct rte_service_spec_impl *)service;
+       if (!impl)
+               return -EINVAL;
+
+       if (enabled)
+               impl->internal_flags |= SERVICE_F_STATS_ENABLED;
+       else
+               impl->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
+
+       return 0;
+}
+
+uint32_t
+rte_service_get_count(void)
+{
+       return rte_service_count;
+}
+
+struct rte_service_spec *
+rte_service_get_by_id(uint32_t id)
+{
+       struct rte_service_spec *service = NULL;
+       if (id < rte_service_count)
+               service = (struct rte_service_spec *)&rte_services[id];
+
+       return service;
+}
+
+struct rte_service_spec *rte_service_get_by_name(const char *name)
+{
+       struct rte_service_spec *service = NULL;
+       int i;
+       for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
+               if (service_valid(i) &&
+                               strcmp(name, rte_services[i].spec.name) == 0) {
+                       service = (struct rte_service_spec *)&rte_services[i];
+                       break;
+               }
+       }
+
+       return service;
+}
+
+const char *
+rte_service_get_name(const struct rte_service_spec *service)
+{
+       return service->name;
+}
+
+int32_t
+rte_service_probe_capability(const struct rte_service_spec *service,
+                            uint32_t capability)
+{
+       return service->capabilities & capability;
+}
+
+int32_t
+rte_service_is_running(const struct rte_service_spec *spec)
+{
+       const struct rte_service_spec_impl *impl =
+               (const struct rte_service_spec_impl *)spec;
+       if (!impl)
+               return -EINVAL;
+
+       return (impl->runstate == RUNSTATE_RUNNING) &&
+               (impl->num_mapped_cores > 0);
+}
+
+int32_t
+rte_service_register(const struct rte_service_spec *spec)
+{
+       uint32_t i;
+       int32_t free_slot = -1;
+
+       if (spec->callback == NULL || strlen(spec->name) == 0)
+               return -EINVAL;
+
+       for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
+               if (!service_valid(i)) {
+                       free_slot = i;
+                       break;
+               }
+       }
+
+       if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
+               return -ENOSPC;
+
+       struct rte_service_spec_impl *s = &rte_services[free_slot];
+       s->spec = *spec;
+       s->internal_flags |= SERVICE_F_REGISTERED;
+
+       rte_smp_wmb();
+       rte_service_count++;
+
+       return 0;
+}
+
+int32_t
+rte_service_unregister(struct rte_service_spec *spec)
+{
+       struct rte_service_spec_impl *s = NULL;
+       struct rte_service_spec_impl *spec_impl =
+               (struct rte_service_spec_impl *)spec;
+
+       uint32_t i;
+       uint32_t service_id;
+       for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
+               if (&rte_services[i] == spec_impl) {
+                       s = spec_impl;
+                       service_id = i;
+                       break;
+               }
+       }
+
+       if (!s)
+               return -EINVAL;
+
+       rte_service_count--;
+       rte_smp_wmb();
+
+       s->internal_flags &= ~(SERVICE_F_REGISTERED);
+
+       for (i = 0; i < RTE_MAX_LCORE; i++)
+               lcore_states[i].service_mask &= ~(1 << service_id);
+
+       memset(&rte_services[service_id], 0,
+                       sizeof(struct rte_service_spec_impl));
+
+       return 0;
+}
+
+int32_t
+rte_service_start(struct rte_service_spec *service)
+{
+       struct rte_service_spec_impl *s =
+               (struct rte_service_spec_impl *)service;
+       s->runstate = RUNSTATE_RUNNING;
+       rte_smp_wmb();
+       return 0;
+}
+
+int32_t
+rte_service_stop(struct rte_service_spec *service)
+{
+       struct rte_service_spec_impl *s =
+               (struct rte_service_spec_impl *)service;
+       s->runstate = RUNSTATE_STOPPED;
+       rte_smp_wmb();
+       return 0;
+}
+
+static int32_t
+rte_service_runner_func(void *arg)
+{
+       RTE_SET_USED(arg);
+       uint32_t i;
+       const int lcore = rte_lcore_id();
+       struct core_state *cs = &lcore_states[lcore];
+
+       while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
+               const uint64_t service_mask = cs->service_mask;
+               for (i = 0; i < rte_service_count; i++) {
+                       struct rte_service_spec_impl *s = &rte_services[i];
+                       if (s->runstate != RUNSTATE_RUNNING ||
+                                       !(service_mask & (1 << i)))
+                               continue;
+
+                       /* check do we need cmpset, if MT safe or <= 1 core
+                        * mapped, atomic ops are not required.
+                        */
+                       const int need_cmpset = !((service_mt_safe(s) == 0) &&
+                                               (s->num_mapped_cores > 1));
+                       uint32_t *lock = (uint32_t *)&s->execute_lock;
+
+                       if (need_cmpset || rte_atomic32_cmpset(lock, 0, 1)) {
+                               void *userdata = s->spec.callback_userdata;
+
+                               if (service_stats_enabled(s)) {
+                                       uint64_t start = rte_rdtsc();
+                                       s->spec.callback(userdata);
+                                       uint64_t end = rte_rdtsc();
+                                       s->cycles_spent += end - start;
+                                       cs->calls_per_service[i]++;
+                                       s->calls++;
+                               } else
+                                       s->spec.callback(userdata);
+
+                               if (need_cmpset)
+                                       rte_atomic32_clear(&s->execute_lock);
+                       }
+               }
+
+               rte_smp_rmb();
+       }
+
+       lcore_config[lcore].state = WAIT;
+
+       return 0;
+}
+
+int32_t
+rte_service_lcore_count(void)
+{
+       int32_t count = 0;
+       uint32_t i;
+       for (i = 0; i < RTE_MAX_LCORE; i++)
+               count += lcore_states[i].is_service_core;
+       return count;
+}
+
+int32_t
+rte_service_lcore_list(uint32_t array[], uint32_t n)
+{
+       uint32_t count = rte_service_lcore_count();
+       if (count > n)
+               return -ENOMEM;
+
+       if (!array)
+               return -EINVAL;
+
+       uint32_t i;
+       uint32_t idx = 0;
+       for (i = 0; i < RTE_MAX_LCORE; i++) {
+               struct core_state *cs = &lcore_states[i];
+               if (cs->is_service_core) {
+                       array[idx] = i;
+                       idx++;
+               }
+       }
+
+       return count;
+}
+
+int32_t
+rte_service_start_with_defaults(void)
+{
+       /* create a default mapping from cores to services, then start the
+        * services to make them transparent to unaware applications.
+        */
+       uint32_t i;
+       int ret;
+       uint32_t count = rte_service_get_count();
+
+       int32_t lcore_iter = 0;
+       uint32_t ids[RTE_MAX_LCORE];
+       int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
+
+       if (lcore_count == 0)
+               return -ENOTSUP;
+
+       for (i = 0; (int)i < lcore_count; i++)
+               rte_service_lcore_start(ids[i]);
+
+       for (i = 0; i < count; i++) {
+               struct rte_service_spec *s = rte_service_get_by_id(i);
+               if (!s)
+                       return -EINVAL;
+
+               /* do 1:1 core mapping here, with each service getting
+                * assigned a single core by default. Adding multiple services
+                * should multiplex to a single core, or 1:1 if there are the
+                * same amount of services as service-cores
+                */
+               ret = rte_service_enable_on_lcore(s, ids[lcore_iter]);
+               if (ret)
+                       return -ENODEV;
+
+               lcore_iter++;
+               if (lcore_iter >= lcore_count)
+                       lcore_iter = 0;
+
+               ret = rte_service_start(s);
+               if (ret)
+                       return -ENOEXEC;
+       }
+
+       return 0;
+}
+
+static int32_t
+service_update(struct rte_service_spec *service, uint32_t lcore,
+               uint32_t *set, uint32_t *enabled)
+{
+       uint32_t i;
+       int32_t sid = -1;
+
+       for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
+               if ((struct rte_service_spec *)&rte_services[i] == service &&
+                               service_valid(i)) {
+                       sid = i;
+                       break;
+               }
+       }
+
+       if (sid == -1 || lcore >= RTE_MAX_LCORE)
+               return -EINVAL;
+
+       if (!lcore_states[lcore].is_service_core)
+               return -EINVAL;
+
+       if (set) {
+               if (*set) {
+                       lcore_states[lcore].service_mask |=  (1 << sid);
+                       rte_services[sid].num_mapped_cores++;
+               } else {
+                       lcore_states[lcore].service_mask &= ~(1 << sid);
+                       rte_services[sid].num_mapped_cores--;
+               }
+       }
+
+       if (enabled)
+               *enabled = (lcore_states[lcore].service_mask & (1 << sid));
+
+       rte_smp_wmb();
+
+       return 0;
+}
+
+int32_t rte_service_get_enabled_on_lcore(struct rte_service_spec *service,
+                                       uint32_t lcore)
+{
+       uint32_t enabled;
+       int ret = service_update(service, lcore, 0, &enabled);
+       if (ret == 0)
+               return enabled;
+       return -EINVAL;
+}
+
+int32_t
+rte_service_enable_on_lcore(struct rte_service_spec *service, uint32_t lcore)
+{
+       uint32_t on = 1;
+       return service_update(service, lcore, &on, 0);
+}
+
+int32_t
+rte_service_disable_on_lcore(struct rte_service_spec *service, uint32_t lcore)
+{
+       uint32_t off = 0;
+       return service_update(service, lcore, &off, 0);
+}
+
+int32_t rte_service_lcore_reset_all(void)
+{
+       /* loop over cores, reset all to mask 0 */
+       uint32_t i;
+       for (i = 0; i < RTE_MAX_LCORE; i++) {
+               lcore_states[i].service_mask = 0;
+               lcore_states[i].is_service_core = 0;
+               lcore_states[i].runstate = RUNSTATE_STOPPED;
+       }
+       for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
+               rte_services[i].num_mapped_cores = 0;
+
+       rte_smp_wmb();
+
+       return 0;
+}
+
+static void
+set_lcore_state(uint32_t lcore, int32_t state)
+{
+       /* mark core state in hugepage backed config */
+       struct rte_config *cfg = rte_eal_get_configuration();
+       cfg->lcore_role[lcore] = state;
+
+       /* mark state in process local lcore_config */
+       lcore_config[lcore].core_role = state;
+
+       /* update per-lcore optimized state tracking */
+       lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
+}
+
+int32_t
+rte_service_lcore_add(uint32_t lcore)
+{
+       if (lcore >= RTE_MAX_LCORE)
+               return -EINVAL;
+       if (lcore_states[lcore].is_service_core)
+               return -EALREADY;
+
+       set_lcore_state(lcore, ROLE_SERVICE);
+
+       /* ensure that after adding a core the mask and state are defaults */
+       lcore_states[lcore].service_mask = 0;
+       lcore_states[lcore].runstate = RUNSTATE_STOPPED;
+
+       rte_smp_wmb();
+       return 0;
+}
+
+int32_t
+rte_service_lcore_del(uint32_t lcore)
+{
+       if (lcore >= RTE_MAX_LCORE)
+               return -EINVAL;
+
+       struct core_state *cs = &lcore_states[lcore];
+       if (!cs->is_service_core)
+               return -EINVAL;
+
+       if (cs->runstate != RUNSTATE_STOPPED)
+               return -EBUSY;
+
+       set_lcore_state(lcore, ROLE_RTE);
+
+       rte_smp_wmb();
+       return 0;
+}
+
+int32_t
+rte_service_lcore_start(uint32_t lcore)
+{
+       if (lcore >= RTE_MAX_LCORE)
+               return -EINVAL;
+
+       struct core_state *cs = &lcore_states[lcore];
+       if (!cs->is_service_core)
+               return -EINVAL;
+
+       if (cs->runstate == RUNSTATE_RUNNING)
+               return -EALREADY;
+
+       /* set core to run state first, and then launch otherwise it will
+        * return immediately as runstate keeps it in the service poll loop
+        */
+       lcore_states[lcore].runstate = RUNSTATE_RUNNING;
+
+       int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
+       /* returns -EBUSY if the core is already launched, 0 on success */
+       return ret;
+}
+
+int32_t
+rte_service_lcore_stop(uint32_t lcore)
+{
+       if (lcore >= RTE_MAX_LCORE)
+               return -EINVAL;
+
+       if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
+               return -EALREADY;
+
+       uint32_t i;
+       for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
+               int32_t enabled = lcore_states[i].service_mask & (1 << i);
+               int32_t service_running = rte_services[i].runstate !=
+                                               RUNSTATE_STOPPED;
+               int32_t only_core = rte_services[i].num_mapped_cores == 1;
+
+               /* if the core is mapped, and the service is running, and this
+                * is the only core that is mapped, the service would cease to
+                * run if this core stopped, so fail instead.
+                */
+               if (enabled && service_running && only_core)
+                       return -EBUSY;
+       }
+
+       lcore_states[lcore].runstate = RUNSTATE_STOPPED;
+
+       return 0;
+}
+
+static void
+rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
+                    uint64_t all_cycles, uint32_t reset)
+{
+       /* avoid divide by zero */
+       if (all_cycles == 0)
+               all_cycles = 1;
+
+       int calls = 1;
+       if (s->calls != 0)
+               calls = s->calls;
+
+       fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
+                       PRIu64"\tavg: %"PRIu64"\n",
+                       s->spec.name, service_stats_enabled(s), s->calls,
+                       s->cycles_spent, s->cycles_spent / calls);
+
+       if (reset) {
+               s->cycles_spent = 0;
+               s->calls = 0;
+       }
+}
+
+static void
+service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
+{
+       uint32_t i;
+       struct core_state *cs = &lcore_states[lcore];
+
+       fprintf(f, "%02d\t", lcore);
+       for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
+               if (!service_valid(i))
+                       continue;
+               fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
+               if (reset)
+                       cs->calls_per_service[i] = 0;
+       }
+       fprintf(f, "\n");
+}
+
+int32_t rte_service_dump(FILE *f, struct rte_service_spec *service)
+{
+       uint32_t i;
+
+       uint64_t total_cycles = 0;
+       for (i = 0; i < rte_service_count; i++) {
+               if (!service_valid(i))
+                       continue;
+               total_cycles += rte_services[i].cycles_spent;
+       }
+
+       if (service) {
+               struct rte_service_spec_impl *s =
+                       (struct rte_service_spec_impl *)service;
+               fprintf(f, "Service %s Summary\n", s->spec.name);
+               uint32_t reset = 0;
+               rte_service_dump_one(f, s, total_cycles, reset);
+               return 0;
+       }
+
+       fprintf(f, "Services Summary\n");
+       for (i = 0; i < rte_service_count; i++) {
+               uint32_t reset = 1;
+               rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
+       }
+
+       fprintf(f, "Service Cores Summary\n");
+       for (i = 0; i < RTE_MAX_LCORE; i++) {
+               if (lcore_config[i].core_role != ROLE_SERVICE)
+                       continue;
+
+               uint32_t reset = 0;
+               service_dump_calls_per_lcore(f, i, reset);
+       }
+
+       return 0;
+}
index 8651e27..e6ab6c3 100644 (file)
@@ -99,6 +99,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += rte_malloc.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += malloc_elem.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += malloc_heap.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += rte_keepalive.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += rte_service.c
 
 # from arch dir
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += rte_cpuflags.c
index fba1a00..6481eee 100644 (file)
@@ -183,7 +183,14 @@ eal_thread_loop(__attribute__((unused)) void *arg)
                ret = lcore_config[lcore_id].f(fct_arg);
                lcore_config[lcore_id].ret = ret;
                rte_wmb();
-               lcore_config[lcore_id].state = FINISHED;
+
+               /* when a service core returns, it should go directly to WAIT
+                * state, because the application will not lcore_wait() for it.
+                */
+               if (lcore_config[lcore_id].core_role == ROLE_SERVICE)
+                       lcore_config[lcore_id].state = WAIT;
+               else
+                       lcore_config[lcore_id].state = FINISHED;
        }
 
        /* never reached */
index 0f9e009..fbaec39 100644 (file)
@@ -214,5 +214,28 @@ EXPERIMENTAL {
        rte_eal_devargs_parse;
        rte_eal_hotplug_add;
        rte_eal_hotplug_remove;
+       rte_service_disable_on_lcore;
+       rte_service_dump;
+       rte_service_enable_on_lcore;
+       rte_service_get_by_id;
+       rte_service_get_by_name;
+       rte_service_get_count;
+       rte_service_get_enabled_on_lcore;
+       rte_service_is_running;
+       rte_service_lcore_add;
+       rte_service_lcore_count;
+       rte_service_lcore_del;
+       rte_service_lcore_list;
+       rte_service_lcore_reset_all;
+       rte_service_lcore_start;
+       rte_service_lcore_stop;
+       rte_service_probe_capability;
+       rte_service_register;
+       rte_service_reset;
+       rte_service_set_stats_enable;
+       rte_service_start;
+       rte_service_start_with_defaults;
+       rte_service_stop;
+       rte_service_unregister;
 
 } DPDK_17.08;