*/
#define RTE_SCHED_TIME_SHIFT 8
-struct rte_sched_subport {
- /* Token bucket (TB) */
- uint64_t tb_time; /* time of last update */
- uint32_t tb_period;
- uint32_t tb_credits_per_period;
- uint32_t tb_size;
- uint32_t tb_credits;
-
- /* Traffic classes (TCs) */
- uint64_t tc_time; /* time of next update */
- uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
- uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
- uint32_t tc_period;
-
- /* TC oversubscription */
- uint32_t tc_ov_wm;
- uint32_t tc_ov_wm_min;
- uint32_t tc_ov_wm_max;
- uint8_t tc_ov_period_id;
- uint8_t tc_ov;
- uint32_t tc_ov_n;
- double tc_ov_rate;
-
- /* Statistics */
- struct rte_sched_subport_stats stats;
-};
-
struct rte_sched_pipe_profile {
/* Token bucket (TB) */
uint32_t tb_period;
/* TC oversubscription */
uint32_t tc_ov_credits;
uint8_t tc_ov_period_id;
- uint8_t reserved[3];
} __rte_cache_aligned;
struct rte_sched_queue {
uint8_t wrr_cost[RTE_SCHED_BE_QUEUES_PER_PIPE];
};
+struct rte_sched_subport {
+ /* Token bucket (TB) */
+ uint64_t tb_time; /* time of last update */
+ uint32_t tb_period;
+ uint32_t tb_credits_per_period;
+ uint32_t tb_size;
+ uint32_t tb_credits;
+
+ /* Traffic classes (TCs) */
+ uint64_t tc_time; /* time of next update */
+ uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t tc_period;
+
+ /* TC oversubscription */
+ uint32_t tc_ov_wm;
+ uint32_t tc_ov_wm_min;
+ uint32_t tc_ov_wm_max;
+ uint8_t tc_ov_period_id;
+ uint8_t tc_ov;
+ uint32_t tc_ov_n;
+ double tc_ov_rate;
+
+ /* Statistics */
+ struct rte_sched_subport_stats stats;
+
+ /* Subport pipes */
+ uint32_t n_pipes_per_subport_enabled;
+ uint32_t n_pipe_profiles;
+ uint32_t n_max_pipe_profiles;
+
+ /* Pipe best-effort TC rate */
+ uint32_t pipe_tc_be_rate_max;
+
+ /* Pipe queues size */
+ uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+
+#ifdef RTE_SCHED_RED
+ struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
+#endif
+
+ /* Scheduling loop detection */
+ uint32_t pipe_loop;
+ uint32_t pipe_exhaustion;
+
+ /* Bitmap */
+ struct rte_bitmap *bmp;
+ uint32_t grinder_base_bmp_pos[RTE_SCHED_PORT_N_GRINDERS] __rte_aligned_16;
+
+ /* Grinders */
+ struct rte_sched_grinder grinder[RTE_SCHED_PORT_N_GRINDERS];
+ uint32_t busy_grinders;
+
+ /* Queue base calculation */
+ uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
+ uint32_t qsize_sum;
+
+ struct rte_sched_pipe *pipe;
+ struct rte_sched_queue *queue;
+ struct rte_sched_queue_extra *queue_extra;
+ struct rte_sched_pipe_profile *pipe_profiles;
+ uint8_t *bmp_array;
+ struct rte_mbuf **queue_array;
+ uint8_t memory[0] __rte_cache_aligned;
+} __rte_cache_aligned;
+
struct rte_sched_port {
/* User parameters */
uint32_t n_subports_per_port;
uint32_t rate;
uint32_t mtu;
uint32_t frame_overhead;
+ int socket;
uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
uint32_t n_pipe_profiles;
uint32_t n_max_pipe_profiles;
uint32_t qsize_sum;
/* Large data structures */
+ struct rte_sched_subport *subports[0];
struct rte_sched_subport *subport;
struct rte_sched_pipe *pipe;
struct rte_sched_queue *queue;
e_RTE_SCHED_PORT_ARRAY_TOTAL,
};
+enum rte_sched_subport_array {
+ e_RTE_SCHED_SUBPORT_ARRAY_PIPE = 0,
+ e_RTE_SCHED_SUBPORT_ARRAY_QUEUE,
+ e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA,
+ e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES,
+ e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY,
+ e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY,
+ e_RTE_SCHED_SUBPORT_ARRAY_TOTAL,
+};
+
#ifdef RTE_SCHED_COLLECT_STATS
static inline uint32_t
#endif
+static inline uint32_t
+rte_sched_subport_pipe_queues(struct rte_sched_subport *subport)
+{
+ return RTE_SCHED_QUEUES_PER_PIPE * subport->n_pipes_per_subport_enabled;
+}
+
+static inline struct rte_mbuf **
+rte_sched_subport_pipe_qbase(struct rte_sched_subport *subport, uint32_t qindex)
+{
+ uint32_t pindex = qindex >> 4;
+ uint32_t qpos = qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1);
+
+ return (subport->queue_array + pindex *
+ subport->qsize_sum + subport->qsize_add[qpos]);
+}
+
+static inline uint16_t
+rte_sched_subport_pipe_qsize(struct rte_sched_port *port,
+struct rte_sched_subport *subport, uint32_t qindex)
+{
+ uint32_t tc = port->pipe_tc[qindex & (RTE_SCHED_QUEUES_PER_PIPE - 1)];
+
+ return subport->qsize[tc];
+}
+
static inline uint32_t
rte_sched_port_queues_per_port(struct rte_sched_port *port)
{
uint32_t i;
/* Pipe parameters */
- if (params == NULL)
- return -10;
+ if (params == NULL) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter params\n", __func__);
+ return -EINVAL;
+ }
/* TB rate: non-zero, not greater than port rate */
if (params->tb_rate == 0 ||
- params->tb_rate > rate)
- return -11;
+ params->tb_rate > rate) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for tb rate\n", __func__);
+ return -EINVAL;
+ }
/* TB size: non-zero */
- if (params->tb_size == 0)
- return -12;
+ if (params->tb_size == 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for tb size\n", __func__);
+ return -EINVAL;
+ }
/* TC rate: non-zero if qsize non-zero, less than pipe rate */
for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
if ((qsize[i] == 0 && params->tc_rate[i] != 0) ||
(qsize[i] != 0 && (params->tc_rate[i] == 0 ||
- params->tc_rate[i] > params->tb_rate)))
- return -13;
+ params->tc_rate[i] > params->tb_rate))) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for qsize or tc_rate\n", __func__);
+ return -EINVAL;
+ }
}
+
if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
- qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0)
- return -13;
+ qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for be traffic class rate\n", __func__);
+ return -EINVAL;
+ }
/* TC period: non-zero */
- if (params->tc_period == 0)
- return -14;
+ if (params->tc_period == 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for tc period\n", __func__);
+ return -EINVAL;
+ }
/* Best effort tc oversubscription weight: non-zero */
- if (params->tc_ov_weight == 0)
- return -15;
+ if (params->tc_ov_weight == 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for tc ov weight\n", __func__);
+ return -EINVAL;
+ }
/* Queue WRR weights: non-zero */
for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) {
- if (params->wrr_weights[i] == 0)
- return -16;
+ if (params->wrr_weights[i] == 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for wrr weight\n", __func__);
+ return -EINVAL;
+ }
}
return 0;
static int
rte_sched_port_check_params(struct rte_sched_port_params *params)
{
- uint32_t i;
-
- if (params == NULL)
- return -1;
+ if (params == NULL) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter params\n", __func__);
+ return -EINVAL;
+ }
/* socket */
- if (params->socket < 0)
- return -3;
+ if (params->socket < 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for socket id\n", __func__);
+ return -EINVAL;
+ }
/* rate */
- if (params->rate == 0)
- return -4;
+ if (params->rate == 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for rate\n", __func__);
+ return -EINVAL;
+ }
/* mtu */
- if (params->mtu == 0)
- return -5;
+ if (params->mtu == 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for mtu\n", __func__);
+ return -EINVAL;
+ }
/* n_subports_per_port: non-zero, limited to 16 bits, power of 2 */
if (params->n_subports_per_port == 0 ||
params->n_subports_per_port > 1u << 16 ||
- !rte_is_power_of_2(params->n_subports_per_port))
- return -6;
+ !rte_is_power_of_2(params->n_subports_per_port)) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for number of subports\n", __func__);
+ return -EINVAL;
+ }
/* n_pipes_per_subport: non-zero, power of 2 */
if (params->n_pipes_per_subport == 0 ||
- !rte_is_power_of_2(params->n_pipes_per_subport))
- return -7;
-
- /* qsize: non-zero, power of 2,
- * no bigger than 32K (due to 16-bit read/write pointers)
- */
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
- uint16_t qsize = params->qsize[i];
-
- if ((qsize != 0 && !rte_is_power_of_2(qsize)) ||
- ((i == RTE_SCHED_TRAFFIC_CLASS_BE) && (qsize == 0)))
- return -8;
- }
-
- /* pipe_profiles and n_pipe_profiles */
- if (params->pipe_profiles == NULL ||
- params->n_pipe_profiles == 0 ||
- params->n_pipe_profiles > params->n_max_pipe_profiles)
- return -9;
-
- for (i = 0; i < params->n_pipe_profiles; i++) {
- struct rte_sched_pipe_params *p = params->pipe_profiles + i;
- int status;
-
- status = pipe_profile_check(p, params->rate, ¶ms->qsize[0]);
- if (status != 0)
- return status;
+ !rte_is_power_of_2(params->n_pipes_per_subport)) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for maximum pipes number\n", __func__);
+ return -EINVAL;
}
return 0;
return base;
}
-uint32_t
-rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params)
+static uint32_t
+rte_sched_subport_get_array_base(struct rte_sched_subport_params *params,
+ enum rte_sched_subport_array array)
{
- uint32_t size0, size1;
- int status;
+ uint32_t n_pipes_per_subport = params->n_pipes_per_subport_enabled;
+ uint32_t n_subport_pipe_queues =
+ RTE_SCHED_QUEUES_PER_PIPE * n_pipes_per_subport;
- status = rte_sched_port_check_params(params);
- if (status != 0) {
- RTE_LOG(NOTICE, SCHED,
- "Port scheduler params check failed (%d)\n", status);
+ uint32_t size_pipe = n_pipes_per_subport * sizeof(struct rte_sched_pipe);
+ uint32_t size_queue =
+ n_subport_pipe_queues * sizeof(struct rte_sched_queue);
+ uint32_t size_queue_extra
+ = n_subport_pipe_queues * sizeof(struct rte_sched_queue_extra);
+ uint32_t size_pipe_profiles = params->n_max_pipe_profiles *
+ sizeof(struct rte_sched_pipe_profile);
+ uint32_t size_bmp_array =
+ rte_bitmap_get_memory_footprint(n_subport_pipe_queues);
+ uint32_t size_per_pipe_queue_array, size_queue_array;
- return 0;
+ uint32_t base, i;
+
+ size_per_pipe_queue_array = 0;
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ if (i < RTE_SCHED_TRAFFIC_CLASS_BE)
+ size_per_pipe_queue_array +=
+ params->qsize[i] * sizeof(struct rte_mbuf *);
+ else
+ size_per_pipe_queue_array += RTE_SCHED_MAX_QUEUES_PER_TC *
+ params->qsize[i] * sizeof(struct rte_mbuf *);
}
+ size_queue_array = n_pipes_per_subport * size_per_pipe_queue_array;
- size0 = sizeof(struct rte_sched_port);
- size1 = rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_TOTAL);
+ base = 0;
- return size0 + size1;
+ if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE)
+ return base;
+ base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
+
+ if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE)
+ return base;
+ base += RTE_CACHE_LINE_ROUNDUP(size_queue);
+
+ if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA)
+ return base;
+ base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
+
+ if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES)
+ return base;
+ base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
+
+ if (array == e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY)
+ return base;
+ base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
+
+ if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY)
+ return base;
+ base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
+
+ return base;
}
static void
-rte_sched_port_config_qsize(struct rte_sched_port *port)
+rte_sched_subport_config_qsize(struct rte_sched_subport *subport)
{
uint32_t i;
- port->qsize_add[0] = 0;
+ subport->qsize_add[0] = 0;
/* Strict prority traffic class */
for (i = 1; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
- port->qsize_add[i] = port->qsize_add[i-1] + port->qsize[i-1];
+ subport->qsize_add[i] = subport->qsize_add[i-1] + subport->qsize[i-1];
/* Best-effort traffic class */
- port->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] =
- port->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE] +
- port->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
- port->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] =
- port->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] +
- port->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
- port->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] =
- port->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] +
- port->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
-
- port->qsize_sum = port->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] +
- port->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
+ subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] =
+ subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE] +
+ subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
+ subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] =
+ subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 1] +
+ subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
+ subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] =
+ subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 2] +
+ subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
+
+ subport->qsize_sum = subport->qsize_add[RTE_SCHED_TRAFFIC_CLASS_BE + 3] +
+ subport->qsize[RTE_SCHED_TRAFFIC_CLASS_BE];
}
static void
dst->wrr_cost[3] = (uint8_t) wrr_cost[3];
}
-static void
-rte_sched_port_config_pipe_profile_table(struct rte_sched_port *port,
- struct rte_sched_port_params *params)
+static int
+rte_sched_subport_check_params(struct rte_sched_subport_params *params,
+ uint32_t n_max_pipes_per_subport,
+ uint32_t rate)
{
uint32_t i;
- for (i = 0; i < port->n_pipe_profiles; i++) {
- struct rte_sched_pipe_params *src = params->pipe_profiles + i;
- struct rte_sched_pipe_profile *dst = port->pipe_profiles + i;
+ /* Check user parameters */
+ if (params == NULL) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter params\n", __func__);
+ return -EINVAL;
+ }
+
+ if (params->tb_rate == 0 || params->tb_rate > rate) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for tb rate\n", __func__);
+ return -EINVAL;
+ }
+
+ if (params->tb_size == 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for tb size\n", __func__);
+ return -EINVAL;
+ }
+
+ /* qsize: if non-zero, power of 2,
+ * no bigger than 32K (due to 16-bit read/write pointers)
+ */
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ uint16_t qsize = params->qsize[i];
+
+ if (qsize != 0 && !rte_is_power_of_2(qsize)) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for qsize\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ uint32_t tc_rate = params->tc_rate[i];
+ uint16_t qsize = params->qsize[i];
+
+ if ((qsize == 0 && tc_rate != 0) ||
+ (qsize != 0 && tc_rate == 0) ||
+ (tc_rate > params->tb_rate)) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for tc rate\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
+ params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect qsize or tc rate(best effort)\n", __func__);
+ return -EINVAL;
+ }
+
+ if (params->tc_period == 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for tc period\n", __func__);
+ return -EINVAL;
+ }
+
+ /* n_pipes_per_subport: non-zero, power of 2 */
+ if (params->n_pipes_per_subport_enabled == 0 ||
+ params->n_pipes_per_subport_enabled > n_max_pipes_per_subport ||
+ !rte_is_power_of_2(params->n_pipes_per_subport_enabled)) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for pipes number\n", __func__);
+ return -EINVAL;
+ }
- rte_sched_pipe_profile_convert(port, src, dst, params->rate);
- rte_sched_port_log_pipe_profile(port, i);
+ /* pipe_profiles and n_pipe_profiles */
+ if (params->pipe_profiles == NULL ||
+ params->n_pipe_profiles == 0 ||
+ params->n_max_pipe_profiles == 0 ||
+ params->n_pipe_profiles > params->n_max_pipe_profiles) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for pipe profiles\n", __func__);
+ return -EINVAL;
}
- port->pipe_tc_be_rate_max = 0;
- for (i = 0; i < port->n_pipe_profiles; i++) {
- struct rte_sched_pipe_params *src = params->pipe_profiles + i;
- uint32_t pipe_tc_be_rate = src->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE];
+ for (i = 0; i < params->n_pipe_profiles; i++) {
+ struct rte_sched_pipe_params *p = params->pipe_profiles + i;
+ int status;
- if (port->pipe_tc_be_rate_max < pipe_tc_be_rate)
- port->pipe_tc_be_rate_max = pipe_tc_be_rate;
+ status = pipe_profile_check(p, rate, ¶ms->qsize[0]);
+ if (status != 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Pipe profile check failed(%d)\n", __func__, status);
+ return -EINVAL;
+ }
}
+
+ return 0;
+}
+
+uint32_t
+rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params)
+{
+ uint32_t size0, size1;
+ int status;
+
+ status = rte_sched_port_check_params(params);
+ if (status != 0) {
+ RTE_LOG(NOTICE, SCHED,
+ "Port scheduler params check failed (%d)\n", status);
+
+ return 0;
+ }
+
+ size0 = sizeof(struct rte_sched_port);
+ size1 = rte_sched_port_get_array_base(params,
+ e_RTE_SCHED_PORT_ARRAY_TOTAL);
+
+ return size0 + size1;
}
struct rte_sched_port *
rte_sched_port_config(struct rte_sched_port_params *params)
{
struct rte_sched_port *port = NULL;
- uint32_t mem_size, bmp_mem_size, n_queues_per_port, i, j, cycles_per_byte;
+ uint32_t size0, size1;
+ uint32_t cycles_per_byte;
+ uint32_t i, j;
+ int status;
- /* Check user parameters. Determine the amount of memory to allocate */
- mem_size = rte_sched_port_get_memory_footprint(params);
- if (mem_size == 0)
+ status = rte_sched_port_check_params(params);
+ if (status != 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Port scheduler params check failed (%d)\n",
+ __func__, status);
return NULL;
+ }
+
+ size0 = sizeof(struct rte_sched_port);
+ size1 = params->n_subports_per_port * sizeof(struct rte_sched_subport *);
/* Allocate memory to store the data structures */
- port = rte_zmalloc_socket("qos_params", mem_size, RTE_CACHE_LINE_SIZE,
+ port = rte_zmalloc_socket("qos_params", size0 + size1, RTE_CACHE_LINE_SIZE,
params->socket);
- if (port == NULL)
- return NULL;
+ if (port == NULL) {
+ RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
- /* compile time checks */
- RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0);
- RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS & (RTE_SCHED_PORT_N_GRINDERS - 1));
+ return NULL;
+ }
/* User parameters */
port->n_subports_per_port = params->n_subports_per_port;
port->n_pipes_per_subport = params->n_pipes_per_subport;
port->n_pipes_per_subport_log2 =
__builtin_ctz(params->n_pipes_per_subport);
+ port->socket = params->socket;
for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
port->pipe_queue[i] = i;
port->rate = params->rate;
port->mtu = params->mtu + params->frame_overhead;
port->frame_overhead = params->frame_overhead;
- memcpy(port->qsize, params->qsize, sizeof(params->qsize));
- port->n_pipe_profiles = params->n_pipe_profiles;
- port->n_max_pipe_profiles = params->n_max_pipe_profiles;
-
-#ifdef RTE_SCHED_RED
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
- uint32_t j;
-
- for (j = 0; j < RTE_COLORS; j++) {
- /* if min/max are both zero, then RED is disabled */
- if ((params->red_params[i][j].min_th |
- params->red_params[i][j].max_th) == 0) {
- continue;
- }
-
- if (rte_red_config_init(&port->red_config[i][j],
- params->red_params[i][j].wq_log2,
- params->red_params[i][j].min_th,
- params->red_params[i][j].max_th,
- params->red_params[i][j].maxp_inv) != 0) {
- rte_free(port);
- return NULL;
- }
- }
- }
-#endif
/* Timing */
port->time_cpu_cycles = rte_get_tsc_cycles();
/ params->rate;
port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
- /* Scheduling loop detection */
- port->pipe_loop = RTE_SCHED_PIPE_INVALID;
- port->pipe_exhaustion = 0;
-
/* Grinders */
- port->busy_grinders = 0;
port->pkts_out = NULL;
port->n_pkts_out = 0;
- /* Queue base calculation */
- rte_sched_port_config_qsize(port);
-
- /* Large data structures */
- port->subport = (struct rte_sched_subport *)
- (port->memory + rte_sched_port_get_array_base(params,
- e_RTE_SCHED_PORT_ARRAY_SUBPORT));
- port->pipe = (struct rte_sched_pipe *)
- (port->memory + rte_sched_port_get_array_base(params,
- e_RTE_SCHED_PORT_ARRAY_PIPE));
- port->queue = (struct rte_sched_queue *)
- (port->memory + rte_sched_port_get_array_base(params,
- e_RTE_SCHED_PORT_ARRAY_QUEUE));
- port->queue_extra = (struct rte_sched_queue_extra *)
- (port->memory + rte_sched_port_get_array_base(params,
- e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA));
- port->pipe_profiles = (struct rte_sched_pipe_profile *)
- (port->memory + rte_sched_port_get_array_base(params,
- e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES));
- port->bmp_array = port->memory
- + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY);
- port->queue_array = (struct rte_mbuf **)
- (port->memory + rte_sched_port_get_array_base(params,
- e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY));
-
- /* Pipe profile table */
- rte_sched_port_config_pipe_profile_table(port, params);
-
- /* Bitmap */
- n_queues_per_port = rte_sched_port_queues_per_port(port);
- bmp_mem_size = rte_bitmap_get_memory_footprint(n_queues_per_port);
- port->bmp = rte_bitmap_init(n_queues_per_port, port->bmp_array,
- bmp_mem_size);
- if (port->bmp == NULL) {
- RTE_LOG(ERR, SCHED, "Bitmap init error\n");
- rte_free(port);
- return NULL;
- }
-
- for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
- port->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
-
-
return port;
}
-void
-rte_sched_port_free(struct rte_sched_port *port)
+static inline void
+rte_sched_subport_free(struct rte_sched_port *port,
+ struct rte_sched_subport *subport)
{
+ uint32_t n_subport_pipe_queues;
uint32_t qindex;
- uint32_t n_queues_per_port;
- /* Check user parameters */
- if (port == NULL)
+ if (subport == NULL)
return;
- n_queues_per_port = rte_sched_port_queues_per_port(port);
+ n_subport_pipe_queues = rte_sched_subport_pipe_queues(subport);
/* Free enqueued mbufs */
- for (qindex = 0; qindex < n_queues_per_port; qindex++) {
- struct rte_mbuf **mbufs = rte_sched_port_qbase(port, qindex);
- uint16_t qsize = rte_sched_port_qsize(port, qindex);
+ for (qindex = 0; qindex < n_subport_pipe_queues; qindex++) {
+ struct rte_mbuf **mbufs =
+ rte_sched_subport_pipe_qbase(subport, qindex);
+ uint16_t qsize = rte_sched_subport_pipe_qsize(port, subport, qindex);
if (qsize != 0) {
- struct rte_sched_queue *queue = port->queue + qindex;
+ struct rte_sched_queue *queue = subport->queue + qindex;
uint16_t qr = queue->qr & (qsize - 1);
uint16_t qw = queue->qw & (qsize - 1);
}
}
- rte_bitmap_free(port->bmp);
+ rte_bitmap_free(subport->bmp);
+}
+
+void
+rte_sched_port_free(struct rte_sched_port *port)
+{
+ uint32_t i;
+
+ /* Check user parameters */
+ if (port == NULL)
+ return;
+
+ for (i = 0; i < port->n_subports_per_port; i++)
+ rte_sched_subport_free(port, port->subports[i]);
+
rte_free(port);
}
static void
rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i)
{
- struct rte_sched_subport *s = port->subport + i;
+ struct rte_sched_subport *s = port->subports[i];
RTE_LOG(DEBUG, SCHED, "Low level config for subport %u:\n"
" Token bucket: period = %u, credits per period = %u, size = %u\n"
s->tc_ov_wm_max);
}
+static void
+rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports)
+{
+ uint32_t i;
+
+ for (i = 0; i < n_subports; i++) {
+ struct rte_sched_subport *subport = port->subports[i];
+
+ rte_sched_subport_free(port, subport);
+ }
+
+ rte_free(port);
+}
+
int
rte_sched_subport_config(struct rte_sched_port *port,
uint32_t subport_id,
struct rte_sched_subport_params *params)
{
- struct rte_sched_subport *s;
- uint32_t i;
+ struct rte_sched_subport *s = NULL;
+ uint32_t n_subports = subport_id;
+ uint32_t n_subport_pipe_queues, i;
+ uint32_t size0, size1, bmp_mem_size;
+ int status;
/* Check user parameters */
- if (port == NULL ||
- subport_id >= port->n_subports_per_port ||
- params == NULL)
- return -1;
+ if (port == NULL) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter port\n", __func__);
+ return 0;
+ }
- if (params->tb_rate == 0 || params->tb_rate > port->rate)
- return -2;
+ if (subport_id >= port->n_subports_per_port) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for subport id\n", __func__);
- if (params->tb_size == 0)
- return -3;
+ rte_sched_free_memory(port, n_subports);
+ return -EINVAL;
+ }
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
- uint32_t tc_rate = params->tc_rate[i];
- uint16_t qsize = port->qsize[i];
+ status = rte_sched_subport_check_params(params,
+ port->n_pipes_per_subport,
+ port->rate);
+ if (status != 0) {
+ RTE_LOG(NOTICE, SCHED,
+ "%s: Port scheduler params check failed (%d)\n",
+ __func__, status);
- if ((qsize == 0 && tc_rate != 0) ||
- (qsize != 0 && tc_rate == 0) ||
- (tc_rate > params->tb_rate))
- return -4;
+ rte_sched_free_memory(port, n_subports);
+ return -EINVAL;
}
- if (port->qsize[RTE_SCHED_TRAFFIC_CLASS_BE] == 0 ||
- params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] == 0)
- return -4;
+ /* Determine the amount of memory to allocate */
+ size0 = sizeof(struct rte_sched_subport);
+ size1 = rte_sched_subport_get_array_base(params,
+ e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
- if (params->tc_period == 0)
- return -5;
+ /* Allocate memory to store the data structures */
+ s = rte_zmalloc_socket("subport_params", size0 + size1,
+ RTE_CACHE_LINE_SIZE, port->socket);
+ if (s == NULL) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Memory allocation fails\n", __func__);
+
+ rte_sched_free_memory(port, n_subports);
+ return -ENOMEM;
+ }
- s = port->subport + subport_id;
+ n_subports++;
+
+ /* Port */
+ port->subports[subport_id] = s;
/* Token Bucket (TB) */
if (params->tb_rate == port->rate) {
/* Traffic Classes (TCs) */
s->tc_period = rte_sched_time_ms_to_bytes(params->tc_period, port->rate);
for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
- if (port->qsize[i])
+ if (params->qsize[i])
s->tc_credits_per_period[i]
= rte_sched_time_ms_to_bytes(params->tc_period,
- params->tc_rate[i]);
-
+ params->tc_rate[i]);
}
s->tc_time = port->time + s->tc_period;
for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
- if (port->qsize[i])
+ if (params->qsize[i])
s->tc_credits[i] = s->tc_credits_per_period[i];
+ /* compile time checks */
+ RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0);
+ RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS &
+ (RTE_SCHED_PORT_N_GRINDERS - 1));
+
+ /* User parameters */
+ s->n_pipes_per_subport_enabled = params->n_pipes_per_subport_enabled;
+ memcpy(s->qsize, params->qsize, sizeof(params->qsize));
+ s->n_pipe_profiles = params->n_pipe_profiles;
+ s->n_max_pipe_profiles = params->n_max_pipe_profiles;
+
+#ifdef RTE_SCHED_RED
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ uint32_t j;
+
+ for (j = 0; j < RTE_COLORS; j++) {
+ /* if min/max are both zero, then RED is disabled */
+ if ((params->red_params[i][j].min_th |
+ params->red_params[i][j].max_th) == 0) {
+ continue;
+ }
+
+ if (rte_red_config_init(&s->red_config[i][j],
+ params->red_params[i][j].wq_log2,
+ params->red_params[i][j].min_th,
+ params->red_params[i][j].max_th,
+ params->red_params[i][j].maxp_inv) != 0) {
+ rte_sched_free_memory(port, n_subports);
+
+ RTE_LOG(NOTICE, SCHED,
+ "%s: RED configuration init fails\n", __func__);
+ return -EINVAL;
+ }
+ }
+ }
+#endif
+
+ /* Scheduling loop detection */
+ s->pipe_loop = RTE_SCHED_PIPE_INVALID;
+ s->pipe_exhaustion = 0;
+
+ /* Grinders */
+ s->busy_grinders = 0;
+
+ /* Queue base calculation */
+ rte_sched_subport_config_qsize(s);
+
+ /* Large data structures */
+ s->pipe = (struct rte_sched_pipe *)
+ (s->memory + rte_sched_subport_get_array_base(params,
+ e_RTE_SCHED_SUBPORT_ARRAY_PIPE));
+ s->queue = (struct rte_sched_queue *)
+ (s->memory + rte_sched_subport_get_array_base(params,
+ e_RTE_SCHED_SUBPORT_ARRAY_QUEUE));
+ s->queue_extra = (struct rte_sched_queue_extra *)
+ (s->memory + rte_sched_subport_get_array_base(params,
+ e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA));
+ s->pipe_profiles = (struct rte_sched_pipe_profile *)
+ (s->memory + rte_sched_subport_get_array_base(params,
+ e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES));
+ s->bmp_array = s->memory + rte_sched_subport_get_array_base(params,
+ e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY);
+ s->queue_array = (struct rte_mbuf **)
+ (s->memory + rte_sched_subport_get_array_base(params,
+ e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY));
+
+ /* Bitmap */
+ n_subport_pipe_queues = rte_sched_subport_pipe_queues(s);
+ bmp_mem_size = rte_bitmap_get_memory_footprint(n_subport_pipe_queues);
+ s->bmp = rte_bitmap_init(n_subport_pipe_queues, s->bmp_array,
+ bmp_mem_size);
+ if (s->bmp == NULL) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Subport bitmap init error\n", __func__);
+
+ rte_sched_free_memory(port, n_subports);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
+ s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
/* TC oversubscription */
s->tc_ov_wm_min = port->mtu;
s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(params->tc_period,
- port->pipe_tc_be_rate_max);
+ s->pipe_tc_be_rate_max);
s->tc_ov_wm = s->tc_ov_wm_max;
s->tc_ov_period_id = 0;
s->tc_ov = 0;
s->tc_ov_n = 0;
s->tc_ov_rate = 0;
+#endif
rte_sched_port_log_subport_config(port, subport_id);
profile = (uint32_t) pipe_profile;
deactivate = (pipe_profile < 0);
- if (port == NULL ||
- subport_id >= port->n_subports_per_port ||
- pipe_id >= port->n_pipes_per_subport ||
- (!deactivate && profile >= port->n_pipe_profiles))
- return -1;
+ if (port == NULL) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter port\n", __func__);
+ return -EINVAL;
+ }
+
+ if (subport_id >= port->n_subports_per_port) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter subport id\n", __func__);
+ return -EINVAL;
+ }
+ if (pipe_id >= port->n_pipes_per_subport) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter pipe id\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!deactivate && profile >= port->n_pipe_profiles) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter pipe profile\n", __func__);
+ return -EINVAL;
+ }
/* Check that subport configuration is valid */
s = port->subport + subport_id;
- if (s->tb_period == 0)
- return -2;
+ if (s->tb_period == 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Subport configuration invalid\n", __func__);
+ return -EINVAL;
+ }
p = port->pipe + (subport_id * port->n_pipes_per_subport + pipe_id);
int status;
/* Port */
- if (port == NULL)
- return -1;
+ if (port == NULL) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter port\n", __func__);
+ return -EINVAL;
+ }
/* Pipe profiles not exceeds the max limit */
- if (port->n_pipe_profiles >= port->n_max_pipe_profiles)
- return -2;
+ if (port->n_pipe_profiles >= port->n_max_pipe_profiles) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Number of pipe profiles exceeds the max limit\n", __func__);
+ return -EINVAL;
+ }
/* Pipe params */
status = pipe_profile_check(params, port->rate, &port->qsize[0]);
- if (status != 0)
- return status;
+ if (status != 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Pipe profile check failed(%d)\n", __func__, status);
+ return -EINVAL;
+ }
pp = &port->pipe_profiles[port->n_pipe_profiles];
rte_sched_pipe_profile_convert(port, params, pp, port->rate);
/* Pipe profile not exists */
for (i = 0; i < port->n_pipe_profiles; i++)
- if (memcmp(port->pipe_profiles + i, pp, sizeof(*pp)) == 0)
- return -3;
+ if (memcmp(port->pipe_profiles + i, pp, sizeof(*pp)) == 0) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Pipe profile doesn't exist\n", __func__);
+ return -EINVAL;
+ }
/* Pipe profile commit */
*pipe_profile_id = port->n_pipe_profiles;
struct rte_sched_subport *s;
/* Check user parameters */
- if (port == NULL || subport_id >= port->n_subports_per_port ||
- stats == NULL || tc_ov == NULL)
- return -1;
+ if (port == NULL) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter port\n", __func__);
+ return -EINVAL;
+ }
+
+ if (subport_id >= port->n_subports_per_port) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for subport id\n", __func__);
+ return -EINVAL;
+ }
+
+ if (stats == NULL) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter stats\n", __func__);
+ return -EINVAL;
+ }
+
+ if (tc_ov == NULL) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for tc_ov\n", __func__);
+ return -EINVAL;
+ }
s = port->subport + subport_id;
struct rte_sched_queue_extra *qe;
/* Check user parameters */
- if ((port == NULL) ||
- (queue_id >= rte_sched_port_queues_per_port(port)) ||
- (stats == NULL) ||
- (qlen == NULL)) {
- return -1;
+ if (port == NULL) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter port\n", __func__);
+ return -EINVAL;
+ }
+
+ if (queue_id >= rte_sched_port_queues_per_port(port)) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for queue id\n", __func__);
+ return -EINVAL;
+ }
+
+ if (stats == NULL) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter stats\n", __func__);
+ return -EINVAL;
+ }
+
+ if (qlen == NULL) {
+ RTE_LOG(ERR, SCHED,
+ "%s: Incorrect value for parameter qlen\n", __func__);
+ return -EINVAL;
}
q = port->queue + queue_id;
qe = port->queue_extra + queue_id;