#include <rte_kvargs.h>
#include <rte_errno.h>
#include <rte_ring.h>
+#include <rte_sched.h>
#include "rte_eth_softnic.h"
#include "rte_eth_softnic_internals.h"
#define DEV_HARD(p) \
(&rte_eth_devices[p->hard.port_id])
+#define PMD_PARAM_SOFT_TM "soft_tm"
+#define PMD_PARAM_SOFT_TM_RATE "soft_tm_rate"
+#define PMD_PARAM_SOFT_TM_NB_QUEUES "soft_tm_nb_queues"
+#define PMD_PARAM_SOFT_TM_QSIZE0 "soft_tm_qsize0"
+#define PMD_PARAM_SOFT_TM_QSIZE1 "soft_tm_qsize1"
+#define PMD_PARAM_SOFT_TM_QSIZE2 "soft_tm_qsize2"
+#define PMD_PARAM_SOFT_TM_QSIZE3 "soft_tm_qsize3"
+#define PMD_PARAM_SOFT_TM_ENQ_BSZ "soft_tm_enq_bsz"
+#define PMD_PARAM_SOFT_TM_DEQ_BSZ "soft_tm_deq_bsz"
+
#define PMD_PARAM_HARD_NAME "hard_name"
#define PMD_PARAM_HARD_TX_QUEUE_ID "hard_tx_queue_id"
static const char *pmd_valid_args[] = {
+ PMD_PARAM_SOFT_TM,
+ PMD_PARAM_SOFT_TM_RATE,
+ PMD_PARAM_SOFT_TM_NB_QUEUES,
+ PMD_PARAM_SOFT_TM_QSIZE0,
+ PMD_PARAM_SOFT_TM_QSIZE1,
+ PMD_PARAM_SOFT_TM_QSIZE2,
+ PMD_PARAM_SOFT_TM_QSIZE3,
+ PMD_PARAM_SOFT_TM_ENQ_BSZ,
+ PMD_PARAM_SOFT_TM_DEQ_BSZ,
PMD_PARAM_HARD_NAME,
PMD_PARAM_HARD_TX_QUEUE_ID,
NULL
{
struct pmd_internals *p = dev->data->dev_private;
+ if (tm_used(dev)) {
+ int status = tm_start(p);
+
+ if (status)
+ return status;
+ }
+
dev->data->dev_link.link_status = ETH_LINK_UP;
if (p->params.soft.intrusive) {
static void
pmd_dev_stop(struct rte_eth_dev *dev)
{
+ struct pmd_internals *p = dev->data->dev_private;
+
dev->data->dev_link.link_status = ETH_LINK_DOWN;
+
+ if (tm_used(dev))
+ tm_stop(p);
}
static void
return 0;
}
+static __rte_always_inline int
+run_tm(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* Persistent context: Read Only (update not required) */
+ struct rte_sched_port *sched = p->soft.tm.sched;
+ struct rte_mbuf **pkts_enq = p->soft.tm.pkts_enq;
+ struct rte_mbuf **pkts_deq = p->soft.tm.pkts_deq;
+ uint32_t enq_bsz = p->params.soft.tm.enq_bsz;
+ uint32_t deq_bsz = p->params.soft.tm.deq_bsz;
+ uint16_t nb_tx_queues = dev->data->nb_tx_queues;
+
+ /* Persistent context: Read - Write (update required) */
+ uint32_t txq_pos = p->soft.tm.txq_pos;
+ uint32_t pkts_enq_len = p->soft.tm.pkts_enq_len;
+ uint32_t flush_count = p->soft.tm.flush_count;
+
+ /* Not part of the persistent context */
+ uint32_t pkts_deq_len, pos;
+ uint16_t i;
+
+ /* Soft device TXQ read, TM enqueue */
+ for (i = 0; i < nb_tx_queues; i++) {
+ struct rte_ring *txq = dev->data->tx_queues[txq_pos];
+
+ /* Read TXQ burst to packet enqueue buffer */
+ pkts_enq_len += rte_ring_sc_dequeue_burst(txq,
+ (void **)&pkts_enq[pkts_enq_len],
+ enq_bsz,
+ NULL);
+
+ /* Increment TXQ */
+ txq_pos++;
+ if (txq_pos >= nb_tx_queues)
+ txq_pos = 0;
+
+ /* TM enqueue when complete burst is available */
+ if (pkts_enq_len >= enq_bsz) {
+ rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
+
+ pkts_enq_len = 0;
+ flush_count = 0;
+ break;
+ }
+ }
+
+ if (flush_count >= FLUSH_COUNT_THRESHOLD) {
+ if (pkts_enq_len)
+ rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
+
+ pkts_enq_len = 0;
+ flush_count = 0;
+ }
+
+ p->soft.tm.txq_pos = txq_pos;
+ p->soft.tm.pkts_enq_len = pkts_enq_len;
+ p->soft.tm.flush_count = flush_count + 1;
+
+ /* TM dequeue, Hard device TXQ write */
+ pkts_deq_len = rte_sched_port_dequeue(sched, pkts_deq, deq_bsz);
+
+ for (pos = 0; pos < pkts_deq_len; )
+ pos += rte_eth_tx_burst(p->hard.port_id,
+ p->params.hard.tx_queue_id,
+ &pkts_deq[pos],
+ (uint16_t)(pkts_deq_len - pos));
+
+ return 0;
+}
+
int
rte_pmd_softnic_run(uint16_t port_id)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
#endif
- return run_default(dev);
+ return (tm_used(dev)) ? run_tm(dev) : run_default(dev);
}
static struct ether_addr eth_addr = { .addr_bytes = {0} };
return NULL;
}
+ /* Traffic Management (TM)*/
+ if (params->soft.flags & PMD_FEATURE_TM) {
+ status = tm_init(p, params, numa_node);
+ if (status) {
+ default_free(p);
+ free(p->params.hard.name);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
return p;
}
static void
pmd_free(struct pmd_internals *p)
{
+ if (p->params.soft.flags & PMD_FEATURE_TM)
+ tm_free(p);
+
default_free(p);
free(p->params.hard.name);
pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
{
struct rte_kvargs *kvlist;
- int ret;
+ int i, ret;
kvlist = rte_kvargs_parse(params, pmd_valid_args);
if (kvlist == NULL)
memset(p, 0, sizeof(*p));
p->soft.name = name;
p->soft.intrusive = INTRUSIVE;
+ p->soft.tm.rate = 0;
+ p->soft.tm.nb_queues = SOFTNIC_SOFT_TM_NB_QUEUES;
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ p->soft.tm.qsize[i] = SOFTNIC_SOFT_TM_QUEUE_SIZE;
+ p->soft.tm.enq_bsz = SOFTNIC_SOFT_TM_ENQ_BSZ;
+ p->soft.tm.deq_bsz = SOFTNIC_SOFT_TM_DEQ_BSZ;
p->hard.tx_queue_id = SOFTNIC_HARD_TX_QUEUE_ID;
+ /* SOFT: TM (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM) == 1) {
+ char *s;
+
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM,
+ &get_string, &s);
+ if (ret < 0)
+ goto out_free;
+
+ if (strcmp(s, "on") == 0)
+ p->soft.flags |= PMD_FEATURE_TM;
+ else if (strcmp(s, "off") == 0)
+ p->soft.flags &= ~PMD_FEATURE_TM;
+ else
+ ret = -EINVAL;
+
+ free(s);
+ if (ret)
+ goto out_free;
+ }
+
+ /* SOFT: TM rate (measured in bytes/second) (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_RATE) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_RATE,
+ &get_uint32, &p->soft.tm.rate);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ /* SOFT: TM number of queues (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES,
+ &get_uint32, &p->soft.tm.nb_queues);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ /* SOFT: TM queue size 0 .. 3 (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE0) == 1) {
+ uint32_t qsize;
+
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE0,
+ &get_uint32, &qsize);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.tm.qsize[0] = (uint16_t)qsize;
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE1) == 1) {
+ uint32_t qsize;
+
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE1,
+ &get_uint32, &qsize);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.tm.qsize[1] = (uint16_t)qsize;
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE2) == 1) {
+ uint32_t qsize;
+
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE2,
+ &get_uint32, &qsize);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.tm.qsize[2] = (uint16_t)qsize;
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE3) == 1) {
+ uint32_t qsize;
+
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE3,
+ &get_uint32, &qsize);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.tm.qsize[3] = (uint16_t)qsize;
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ /* SOFT: TM enqueue burst size (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ,
+ &get_uint32, &p->soft.tm.enq_bsz);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ /* SOFT: TM dequeue burst size (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ,
+ &get_uint32, &p->soft.tm.deq_bsz);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
/* HARD: name (mandatory) */
if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_NAME) == 1) {
ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_NAME,
int status;
struct rte_eth_dev_info hard_info;
+ uint32_t hard_speed;
uint16_t hard_port_id;
int numa_node;
void *dev_private;
return -EINVAL;
rte_eth_dev_info_get(hard_port_id, &hard_info);
+ hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
numa_node = rte_eth_dev_socket_id(hard_port_id);
if (p.hard.tx_queue_id >= hard_info.max_tx_queues)
return -EINVAL;
+ if (p.soft.flags & PMD_FEATURE_TM) {
+ status = tm_params_check(&p, hard_speed);
+
+ if (status)
+ return status;
+ }
+
/* Allocate and initialize soft ethdev private data */
dev_private = pmd_init(&p, numa_node);
if (dev_private == NULL)
RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv);
RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
+ PMD_PARAM_SOFT_TM "=on|off "
+ PMD_PARAM_SOFT_TM_RATE "=<int> "
+ PMD_PARAM_SOFT_TM_NB_QUEUES "=<int> "
+ PMD_PARAM_SOFT_TM_QSIZE0 "=<int> "
+ PMD_PARAM_SOFT_TM_QSIZE1 "=<int> "
+ PMD_PARAM_SOFT_TM_QSIZE2 "=<int> "
+ PMD_PARAM_SOFT_TM_QSIZE3 "=<int> "
+ PMD_PARAM_SOFT_TM_ENQ_BSZ "=<int> "
+ PMD_PARAM_SOFT_TM_DEQ_BSZ "=<int> "
PMD_PARAM_HARD_NAME "=<string> "
PMD_PARAM_HARD_TX_QUEUE_ID "=<int>");
#include <stdint.h>
#include <rte_mbuf.h>
+#include <rte_sched.h>
#include <rte_ethdev.h>
#include "rte_eth_softnic.h"
+/**
+ * PMD Parameters
+ */
+
+enum pmd_feature {
+ PMD_FEATURE_TM = 1, /**< Traffic Management (TM) */
+};
+
#ifndef INTRUSIVE
#define INTRUSIVE 0
#endif
* (potentially faster).
*/
int intrusive;
+
+ /** Traffic Management (TM) */
+ struct {
+ uint32_t rate; /**< Rate (bytes/second) */
+ uint32_t nb_queues; /**< Number of queues */
+ uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ /**< Queue size per traffic class */
+ uint32_t enq_bsz; /**< Enqueue burst size */
+ uint32_t deq_bsz; /**< Dequeue burst size */
+ } tm;
} soft;
/** Parameters for the hard device (existing) */
uint32_t flush_count;
};
+/**
+ * Traffic Management (TM) Internals
+ */
+
+#ifndef TM_MAX_SUBPORTS
+#define TM_MAX_SUBPORTS 8
+#endif
+
+#ifndef TM_MAX_PIPES_PER_SUBPORT
+#define TM_MAX_PIPES_PER_SUBPORT 4096
+#endif
+
+struct tm_params {
+ struct rte_sched_port_params port_params;
+
+ struct rte_sched_subport_params subport_params[TM_MAX_SUBPORTS];
+
+ struct rte_sched_pipe_params
+ pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT];
+ uint32_t n_pipe_profiles;
+ uint32_t pipe_to_profile[TM_MAX_SUBPORTS * TM_MAX_PIPES_PER_SUBPORT];
+};
+
+/* TM Levels */
+enum tm_node_level {
+ TM_NODE_LEVEL_PORT = 0,
+ TM_NODE_LEVEL_SUBPORT,
+ TM_NODE_LEVEL_PIPE,
+ TM_NODE_LEVEL_TC,
+ TM_NODE_LEVEL_QUEUE,
+ TM_NODE_LEVEL_MAX,
+};
+
+/* TM Hierarchy Specification */
+struct tm_hierarchy {
+ uint32_t n_tm_nodes[TM_NODE_LEVEL_MAX];
+};
+
+struct tm_internals {
+ /** Hierarchy specification
+ *
+ * -Hierarchy is unfrozen at init and when port is stopped.
+ * -Hierarchy is frozen on successful hierarchy commit.
+ * -Run-time hierarchy changes are not allowed, therefore it makes
+ * sense to keep the hierarchy frozen after the port is started.
+ */
+ struct tm_hierarchy h;
+
+ /** Blueprints */
+ struct tm_params params;
+
+ /** Run-time */
+ struct rte_sched_port *sched;
+ struct rte_mbuf **pkts_enq;
+ struct rte_mbuf **pkts_deq;
+ uint32_t pkts_enq_len;
+ uint32_t txq_pos;
+ uint32_t flush_count;
+};
+
/**
* PMD Internals
*/
/** Soft device */
struct {
struct default_internals def; /**< Default */
+ struct tm_internals tm; /**< Traffic Management */
} soft;
/** Hard device */
} hard;
};
+int
+tm_params_check(struct pmd_params *params, uint32_t hard_rate);
+
+int
+tm_init(struct pmd_internals *p, struct pmd_params *params, int numa_node);
+
+void
+tm_free(struct pmd_internals *p);
+
+int
+tm_start(struct pmd_internals *p);
+
+void
+tm_stop(struct pmd_internals *p);
+
+static inline int
+tm_used(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ return (p->params.soft.flags & PMD_FEATURE_TM) &&
+ p->soft.tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT];
+}
+
#endif /* __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ */
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_malloc.h>
+
+#include "rte_eth_softnic_internals.h"
+#include "rte_eth_softnic.h"
+
+#define BYTES_IN_MBPS (1000 * 1000 / 8)
+
+int
+tm_params_check(struct pmd_params *params, uint32_t hard_rate)
+{
+ uint64_t hard_rate_bytes_per_sec = hard_rate * BYTES_IN_MBPS;
+ uint32_t i;
+
+ /* rate */
+ if (params->soft.tm.rate) {
+ if (params->soft.tm.rate > hard_rate_bytes_per_sec)
+ return -EINVAL;
+ } else {
+ params->soft.tm.rate =
+ (hard_rate_bytes_per_sec > UINT32_MAX) ?
+ UINT32_MAX : hard_rate_bytes_per_sec;
+ }
+
+ /* nb_queues */
+ if (params->soft.tm.nb_queues == 0)
+ return -EINVAL;
+
+ if (params->soft.tm.nb_queues < RTE_SCHED_QUEUES_PER_PIPE)
+ params->soft.tm.nb_queues = RTE_SCHED_QUEUES_PER_PIPE;
+
+ params->soft.tm.nb_queues =
+ rte_align32pow2(params->soft.tm.nb_queues);
+
+ /* qsize */
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ if (params->soft.tm.qsize[i] == 0)
+ return -EINVAL;
+
+ params->soft.tm.qsize[i] =
+ rte_align32pow2(params->soft.tm.qsize[i]);
+ }
+
+ /* enq_bsz, deq_bsz */
+ if (params->soft.tm.enq_bsz == 0 ||
+ params->soft.tm.deq_bsz == 0 ||
+ params->soft.tm.deq_bsz >= params->soft.tm.enq_bsz)
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+tm_init(struct pmd_internals *p,
+ struct pmd_params *params,
+ int numa_node)
+{
+ uint32_t enq_bsz = params->soft.tm.enq_bsz;
+ uint32_t deq_bsz = params->soft.tm.deq_bsz;
+
+ p->soft.tm.pkts_enq = rte_zmalloc_socket(params->soft.name,
+ 2 * enq_bsz * sizeof(struct rte_mbuf *),
+ 0,
+ numa_node);
+
+ if (p->soft.tm.pkts_enq == NULL)
+ return -ENOMEM;
+
+ p->soft.tm.pkts_deq = rte_zmalloc_socket(params->soft.name,
+ deq_bsz * sizeof(struct rte_mbuf *),
+ 0,
+ numa_node);
+
+ if (p->soft.tm.pkts_deq == NULL) {
+ rte_free(p->soft.tm.pkts_enq);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+tm_free(struct pmd_internals *p)
+{
+ rte_free(p->soft.tm.pkts_enq);
+ rte_free(p->soft.tm.pkts_deq);
+}
+
+int
+tm_start(struct pmd_internals *p)
+{
+ struct tm_params *t = &p->soft.tm.params;
+ uint32_t n_subports, subport_id;
+ int status;
+
+ /* Port */
+ p->soft.tm.sched = rte_sched_port_config(&t->port_params);
+ if (p->soft.tm.sched == NULL)
+ return -1;
+
+ /* Subport */
+ n_subports = t->port_params.n_subports_per_port;
+ for (subport_id = 0; subport_id < n_subports; subport_id++) {
+ uint32_t n_pipes_per_subport =
+ t->port_params.n_pipes_per_subport;
+ uint32_t pipe_id;
+
+ status = rte_sched_subport_config(p->soft.tm.sched,
+ subport_id,
+ &t->subport_params[subport_id]);
+ if (status) {
+ rte_sched_port_free(p->soft.tm.sched);
+ return -1;
+ }
+
+ /* Pipe */
+ n_pipes_per_subport = t->port_params.n_pipes_per_subport;
+ for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
+ int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
+ pipe_id;
+ int profile_id = t->pipe_to_profile[pos];
+
+ if (profile_id < 0)
+ continue;
+
+ status = rte_sched_pipe_config(p->soft.tm.sched,
+ subport_id,
+ pipe_id,
+ profile_id);
+ if (status) {
+ rte_sched_port_free(p->soft.tm.sched);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void
+tm_stop(struct pmd_internals *p)
+{
+ if (p->soft.tm.sched)
+ rte_sched_port_free(p->soft.tm.sched);
+}