-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#include <stdio.h>
#include <rte_prefetch.h>
#include <rte_branch_prediction.h>
#include <rte_mbuf.h>
+#include <rte_bitmap.h>
+#include <rte_reciprocal.h>
#include "rte_sched.h"
-#include "rte_bitmap.h"
#include "rte_sched_common.h"
#include "rte_approx.h"
#endif
#ifdef RTE_SCHED_VECTOR
-#include <immintrin.h>
+#include <rte_vect.h>
+
+#ifdef RTE_ARCH_X86
+#define SCHED_VECTOR_SSE4
+#elif defined(RTE_MACHINE_CPUFLAG_NEON)
+#define SCHED_VECTOR_NEON
+#endif
+
#endif
#define RTE_SCHED_TB_RATE_CONFIG_ERR (1e-7)
#define RTE_SCHED_PIPE_INVALID UINT32_MAX
#define RTE_SCHED_BMP_POS_INVALID UINT32_MAX
+/* Scaling for cycles_per_byte calculation
+ * Chosen so that minimum rate is 480 bit/sec
+ */
+#define RTE_SCHED_TIME_SHIFT 8
+
struct rte_sched_subport {
/* Token bucket (TB) */
uint64_t tb_time; /* time of last update */
e_GRINDER_READ_MBUF
};
-/*
- * Path through the scheduler hierarchy used by the scheduler enqueue
- * operation to identify the destination queue for the current
- * packet. Stored in the field pkt.hash.sched of struct rte_mbuf of
- * each packet, typically written by the classification stage and read
- * by scheduler enqueue.
- */
-struct rte_sched_port_hierarchy {
- uint16_t queue:2; /**< Queue ID (0 .. 3) */
- uint16_t traffic_class:2; /**< Traffic class ID (0 .. 3)*/
- uint32_t color:2; /**< Color */
- uint16_t unused:10;
- uint16_t subport; /**< Subport ID */
- uint32_t pipe; /**< Pipe ID */
-};
-
struct rte_sched_grinder {
/* Pipe cache */
uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
/* User parameters */
uint32_t n_subports_per_port;
uint32_t n_pipes_per_subport;
+ uint32_t n_pipes_per_subport_log2;
uint32_t rate;
uint32_t mtu;
uint32_t frame_overhead;
uint32_t n_pipe_profiles;
uint32_t pipe_tc3_rate_max;
#ifdef RTE_SCHED_RED
- struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][e_RTE_METER_COLORS];
+ struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
#endif
/* Timing */
uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cyles */
uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */
uint64_t time; /* Current NIC TX time measured in bytes */
- double cycles_per_byte; /* CPU cycles per byte */
+ struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
/* Scheduling loop detection */
uint32_t pipe_loop;
return port->qsize[tc];
}
+static int
+pipe_profile_check(struct rte_sched_pipe_params *params,
+ uint32_t rate)
+{
+ uint32_t i;
+
+ /* Pipe parameters */
+ if (params == NULL)
+ return -10;
+
+ /* TB rate: non-zero, not greater than port rate */
+ if (params->tb_rate == 0 ||
+ params->tb_rate > rate)
+ return -11;
+
+ /* TB size: non-zero */
+ if (params->tb_size == 0)
+ return -12;
+
+ /* TC rate: non-zero, less than pipe rate */
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ if (params->tc_rate[i] == 0 ||
+ params->tc_rate[i] > params->tb_rate)
+ return -13;
+ }
+
+ /* TC period: non-zero */
+ if (params->tc_period == 0)
+ return -14;
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ /* TC3 oversubscription weight: non-zero */
+ if (params->tc_ov_weight == 0)
+ return -15;
+#endif
+
+ /* Queue WRR weights: non-zero */
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
+ if (params->wrr_weights[i] == 0)
+ return -16;
+ }
+
+ return 0;
+}
+
static int
rte_sched_port_check_params(struct rte_sched_port_params *params)
{
- uint32_t i, j;
+ uint32_t i;
if (params == NULL)
return -1;
/* socket */
- if ((params->socket < 0) || (params->socket >= RTE_MAX_NUMA_NODES))
+ if (params->socket < 0)
return -3;
/* rate */
for (i = 0; i < params->n_pipe_profiles; i++) {
struct rte_sched_pipe_params *p = params->pipe_profiles + i;
+ int status;
- /* TB rate: non-zero, not greater than port rate */
- if (p->tb_rate == 0 || p->tb_rate > params->rate)
- return -10;
-
- /* TB size: non-zero */
- if (p->tb_size == 0)
- return -11;
-
- /* TC rate: non-zero, less than pipe rate */
- for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
- if (p->tc_rate[j] == 0 || p->tc_rate[j] > p->tb_rate)
- return -12;
- }
-
- /* TC period: non-zero */
- if (p->tc_period == 0)
- return -13;
-
-#ifdef RTE_SCHED_SUBPORT_TC_OV
- /* TC3 oversubscription weight: non-zero */
- if (p->tc_ov_weight == 0)
- return -14;
-#endif
-
- /* Queue WRR weights: non-zero */
- for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j++) {
- if (p->wrr_weights[j] == 0)
- return -15;
- }
+ status = pipe_profile_check(p, params->rate);
+ if (status != 0)
+ return status;
}
return 0;
size0 = sizeof(struct rte_sched_port);
size1 = rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_TOTAL);
- return (size0 + size1);
+ return size0 + size1;
}
static void
}
static void
-rte_sched_port_config_pipe_profile_table(struct rte_sched_port *port, struct rte_sched_port_params *params)
+rte_sched_pipe_profile_convert(struct rte_sched_pipe_params *src,
+ struct rte_sched_pipe_profile *dst,
+ uint32_t rate)
{
- uint32_t i, j;
+ uint32_t i;
- for (i = 0; i < port->n_pipe_profiles; i++) {
- struct rte_sched_pipe_params *src = params->pipe_profiles + i;
- struct rte_sched_pipe_profile *dst = port->pipe_profiles + i;
+ /* Token Bucket */
+ if (src->tb_rate == rate) {
+ dst->tb_credits_per_period = 1;
+ dst->tb_period = 1;
+ } else {
+ double tb_rate = (double) src->tb_rate
+ / (double) rate;
+ double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
- /* Token Bucket */
- if (src->tb_rate == params->rate) {
- dst->tb_credits_per_period = 1;
- dst->tb_period = 1;
- } else {
- double tb_rate = (double) src->tb_rate
- / (double) params->rate;
- double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
-
- rte_approx(tb_rate, d,
- &dst->tb_credits_per_period, &dst->tb_period);
- }
- dst->tb_size = src->tb_size;
+ rte_approx(tb_rate, d,
+ &dst->tb_credits_per_period, &dst->tb_period);
+ }
+
+ dst->tb_size = src->tb_size;
- /* Traffic Classes */
- dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period,
- params->rate);
+ /* Traffic Classes */
+ dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period,
+ rate);
- for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++)
- dst->tc_credits_per_period[j]
- = rte_sched_time_ms_to_bytes(src->tc_period,
- src->tc_rate[j]);
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ dst->tc_credits_per_period[i]
+ = rte_sched_time_ms_to_bytes(src->tc_period,
+ src->tc_rate[i]);
#ifdef RTE_SCHED_SUBPORT_TC_OV
- dst->tc_ov_weight = src->tc_ov_weight;
+ dst->tc_ov_weight = src->tc_ov_weight;
#endif
- /* WRR */
- for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
- uint32_t wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
- uint32_t lcd, lcd1, lcd2;
- uint32_t qindex;
-
- qindex = j * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
-
- wrr_cost[0] = src->wrr_weights[qindex];
- wrr_cost[1] = src->wrr_weights[qindex + 1];
- wrr_cost[2] = src->wrr_weights[qindex + 2];
- wrr_cost[3] = src->wrr_weights[qindex + 3];
-
- lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
- lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
- lcd = rte_get_lcd(lcd1, lcd2);
-
- wrr_cost[0] = lcd / wrr_cost[0];
- wrr_cost[1] = lcd / wrr_cost[1];
- wrr_cost[2] = lcd / wrr_cost[2];
- wrr_cost[3] = lcd / wrr_cost[3];
-
- dst->wrr_cost[qindex] = (uint8_t) wrr_cost[0];
- dst->wrr_cost[qindex + 1] = (uint8_t) wrr_cost[1];
- dst->wrr_cost[qindex + 2] = (uint8_t) wrr_cost[2];
- dst->wrr_cost[qindex + 3] = (uint8_t) wrr_cost[3];
- }
+ /* WRR */
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ uint32_t wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
+ uint32_t lcd, lcd1, lcd2;
+ uint32_t qindex;
+
+ qindex = i * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
+
+ wrr_cost[0] = src->wrr_weights[qindex];
+ wrr_cost[1] = src->wrr_weights[qindex + 1];
+ wrr_cost[2] = src->wrr_weights[qindex + 2];
+ wrr_cost[3] = src->wrr_weights[qindex + 3];
+
+ lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
+ lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
+ lcd = rte_get_lcd(lcd1, lcd2);
+
+ wrr_cost[0] = lcd / wrr_cost[0];
+ wrr_cost[1] = lcd / wrr_cost[1];
+ wrr_cost[2] = lcd / wrr_cost[2];
+ wrr_cost[3] = lcd / wrr_cost[3];
+
+ dst->wrr_cost[qindex] = (uint8_t) wrr_cost[0];
+ dst->wrr_cost[qindex + 1] = (uint8_t) wrr_cost[1];
+ dst->wrr_cost[qindex + 2] = (uint8_t) wrr_cost[2];
+ dst->wrr_cost[qindex + 3] = (uint8_t) wrr_cost[3];
+ }
+}
+static void
+rte_sched_port_config_pipe_profile_table(struct rte_sched_port *port,
+ struct rte_sched_port_params *params)
+{
+ uint32_t i;
+
+ for (i = 0; i < port->n_pipe_profiles; i++) {
+ struct rte_sched_pipe_params *src = params->pipe_profiles + i;
+ struct rte_sched_pipe_profile *dst = port->pipe_profiles + i;
+
+ rte_sched_pipe_profile_convert(src, dst, params->rate);
rte_sched_port_log_pipe_profile(port, i);
}
rte_sched_port_config(struct rte_sched_port_params *params)
{
struct rte_sched_port *port = NULL;
- uint32_t mem_size, bmp_mem_size, n_queues_per_port, i;
+ uint32_t mem_size, bmp_mem_size, n_queues_per_port, i, cycles_per_byte;
/* Check user parameters. Determine the amount of memory to allocate */
mem_size = rte_sched_port_get_memory_footprint(params);
return NULL;
/* Allocate memory to store the data structures */
- port = rte_zmalloc("qos_params", mem_size, RTE_CACHE_LINE_SIZE);
+ port = rte_zmalloc_socket("qos_params", mem_size, RTE_CACHE_LINE_SIZE,
+ params->socket);
if (port == NULL)
return NULL;
/* User parameters */
port->n_subports_per_port = params->n_subports_per_port;
port->n_pipes_per_subport = params->n_pipes_per_subport;
+ port->n_pipes_per_subport_log2 =
+ __builtin_ctz(params->n_pipes_per_subport);
port->rate = params->rate;
port->mtu = params->mtu + params->frame_overhead;
port->frame_overhead = params->frame_overhead;
for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
uint32_t j;
- for (j = 0; j < e_RTE_METER_COLORS; j++) {
+ for (j = 0; j < RTE_COLORS; j++) {
/* if min/max are both zero, then RED is disabled */
if ((params->red_params[i][j].min_th |
params->red_params[i][j].max_th) == 0) {
params->red_params[i][j].min_th,
params->red_params[i][j].max_th,
params->red_params[i][j].maxp_inv) != 0) {
+ rte_free(port);
return NULL;
}
}
port->time_cpu_cycles = rte_get_tsc_cycles();
port->time_cpu_bytes = 0;
port->time = 0;
- port->cycles_per_byte = ((double) rte_get_tsc_hz()) / ((double) params->rate);
+
+ cycles_per_byte = (rte_get_tsc_hz() << RTE_SCHED_TIME_SHIFT)
+ / params->rate;
+ port->inv_cycles_per_byte = rte_reciprocal_value(cycles_per_byte);
/* Scheduling loop detection */
port->pipe_loop = RTE_SCHED_PIPE_INVALID;
bmp_mem_size);
if (port->bmp == NULL) {
RTE_LOG(ERR, SCHED, "Bitmap init error\n");
+ rte_free(port);
return NULL;
}
void
rte_sched_port_free(struct rte_sched_port *port)
{
- unsigned int queue;
+ uint32_t qindex;
+ uint32_t n_queues_per_port;
/* Check user parameters */
if (port == NULL)
return;
- /* Free enqueued mbufs */
- for (queue = 0; queue < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; queue++) {
- struct rte_mbuf **mbufs = rte_sched_port_qbase(port, queue);
- unsigned int i;
+ n_queues_per_port = rte_sched_port_queues_per_port(port);
- for (i = 0; i < rte_sched_port_qsize(port, queue); i++)
- rte_pktmbuf_free(mbufs[i]);
+ /* Free enqueued mbufs */
+ for (qindex = 0; qindex < n_queues_per_port; qindex++) {
+ struct rte_mbuf **mbufs = rte_sched_port_qbase(port, qindex);
+ uint16_t qsize = rte_sched_port_qsize(port, qindex);
+ struct rte_sched_queue *queue = port->queue + qindex;
+ uint16_t qr = queue->qr & (qsize - 1);
+ uint16_t qw = queue->qw & (qsize - 1);
+
+ for (; qr != qw; qr = (qr + 1) & (qsize - 1))
+ rte_pktmbuf_free(mbufs[qr]);
}
rte_bitmap_free(port->bmp);
return 0;
}
-void
-rte_sched_port_pkt_write(struct rte_mbuf *pkt,
- uint32_t subport, uint32_t pipe, uint32_t traffic_class,
- uint32_t queue, enum rte_meter_color color)
+int
+rte_sched_port_pipe_profile_add(struct rte_sched_port *port,
+ struct rte_sched_pipe_params *params,
+ uint32_t *pipe_profile_id)
{
- struct rte_sched_port_hierarchy *sched
- = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
+ struct rte_sched_pipe_profile *pp;
+ uint32_t i;
+ int status;
+
+ /* Port */
+ if (port == NULL)
+ return -1;
+
+ /* Pipe profiles not exceeds the max limit */
+ if (port->n_pipe_profiles >= RTE_SCHED_PIPE_PROFILES_PER_PORT)
+ return -2;
+
+ /* Pipe params */
+ status = pipe_profile_check(params, port->rate);
+ if (status != 0)
+ return status;
+
+ pp = &port->pipe_profiles[port->n_pipe_profiles];
+ rte_sched_pipe_profile_convert(params, pp, port->rate);
+
+ /* Pipe profile not exists */
+ for (i = 0; i < port->n_pipe_profiles; i++)
+ if (memcmp(port->pipe_profiles + i, pp, sizeof(*pp)) == 0)
+ return -3;
+
+ /* Pipe profile commit */
+ *pipe_profile_id = port->n_pipe_profiles;
+ port->n_pipe_profiles++;
+
+ if (port->pipe_tc3_rate_max < params->tc_rate[3])
+ port->pipe_tc3_rate_max = params->tc_rate[3];
+
+ rte_sched_port_log_pipe_profile(port, *pipe_profile_id);
+
+ return 0;
+}
- RTE_BUILD_BUG_ON(sizeof(*sched) > sizeof(pkt->hash.sched));
+static inline uint32_t
+rte_sched_port_qindex(struct rte_sched_port *port,
+ uint32_t subport,
+ uint32_t pipe,
+ uint32_t traffic_class,
+ uint32_t queue)
+{
+ return ((subport & (port->n_subports_per_port - 1)) <<
+ (port->n_pipes_per_subport_log2 + 4)) |
+ ((pipe & (port->n_pipes_per_subport - 1)) << 4) |
+ ((traffic_class &
+ (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1)) << 2) |
+ (queue & (RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS - 1));
+}
- sched->color = (uint32_t) color;
- sched->subport = subport;
- sched->pipe = pipe;
- sched->traffic_class = traffic_class;
- sched->queue = queue;
+void
+rte_sched_port_pkt_write(struct rte_sched_port *port,
+ struct rte_mbuf *pkt,
+ uint32_t subport, uint32_t pipe,
+ uint32_t traffic_class,
+ uint32_t queue, enum rte_color color)
+{
+ uint32_t queue_id = rte_sched_port_qindex(port, subport, pipe,
+ traffic_class, queue);
+ rte_mbuf_sched_set(pkt, queue_id, traffic_class, (uint8_t)color);
}
void
-rte_sched_port_pkt_read_tree_path(const struct rte_mbuf *pkt,
+rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
+ const struct rte_mbuf *pkt,
uint32_t *subport, uint32_t *pipe,
uint32_t *traffic_class, uint32_t *queue)
{
- const struct rte_sched_port_hierarchy *sched
- = (const struct rte_sched_port_hierarchy *) &pkt->hash.sched;
+ uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
- *subport = sched->subport;
- *pipe = sched->pipe;
- *traffic_class = sched->traffic_class;
- *queue = sched->queue;
+ *subport = queue_id >> (port->n_pipes_per_subport_log2 + 4);
+ *pipe = (queue_id >> 4) & (port->n_pipes_per_subport - 1);
+ *traffic_class = (queue_id >> 2) &
+ (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1);
+ *queue = queue_id & (RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS - 1);
}
-enum rte_meter_color
+enum rte_color
rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
{
- const struct rte_sched_port_hierarchy *sched
- = (const struct rte_sched_port_hierarchy *) &pkt->hash.sched;
-
- return (enum rte_meter_color) sched->color;
+ return (enum rte_color)rte_mbuf_sched_color_get(pkt);
}
int
memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
- /* Subport TC ovesubscription status */
+ /* Subport TC oversubscription status */
*tc_ov = s->tc_ov;
return 0;
return 0;
}
-static inline uint32_t
-rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue)
-{
- uint32_t result;
-
- result = subport * port->n_pipes_per_subport + pipe;
- result = result * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + traffic_class;
- result = result * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
-
- return result;
-}
-
#ifdef RTE_SCHED_DEBUG
static inline int
{
struct rte_sched_queue *queue = port->queue + qindex;
- return (queue->qr == queue->qw);
-}
-
-static inline int
-rte_sched_port_queue_is_full(struct rte_sched_port *port, uint32_t qindex)
-{
- struct rte_sched_queue *queue = port->queue + qindex;
- uint16_t qsize = rte_sched_port_qsize(port, qindex);
- uint16_t qlen = queue->qw - queue->qr;
-
- return (qlen >= qsize);
+ return queue->qr == queue->qw;
}
#endif /* RTE_SCHED_DEBUG */
s->stats.n_bytes_tc[tc_index] += pkt_len;
}
+#ifdef RTE_SCHED_RED
+static inline void
+rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
+ uint32_t qindex,
+ struct rte_mbuf *pkt, uint32_t red)
+#else
static inline void
-rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
+rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
+ uint32_t qindex,
+ struct rte_mbuf *pkt, __rte_unused uint32_t red)
+#endif
{
struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
uint32_t tc_index = (qindex >> 2) & 0x3;
s->stats.n_pkts_tc_dropped[tc_index] += 1;
s->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
+#ifdef RTE_SCHED_RED
+ s->stats.n_pkts_red_dropped[tc_index] += red;
+#endif
}
static inline void
qe->stats.n_bytes += pkt_len;
}
+#ifdef RTE_SCHED_RED
static inline void
-rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
+rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port,
+ uint32_t qindex,
+ struct rte_mbuf *pkt, uint32_t red)
+#else
+static inline void
+rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port,
+ uint32_t qindex,
+ struct rte_mbuf *pkt, __rte_unused uint32_t red)
+#endif
{
struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
uint32_t pkt_len = pkt->pkt_len;
qe->stats.n_pkts_dropped += 1;
qe->stats.n_bytes_dropped += pkt_len;
+#ifdef RTE_SCHED_RED
+ qe->stats.n_pkts_red_dropped += red;
+#endif
}
#endif /* RTE_SCHED_COLLECT_STATS */
struct rte_red_config *red_cfg;
struct rte_red *red;
uint32_t tc_index;
- enum rte_meter_color color;
+ enum rte_color color;
tc_index = (qindex >> 2) & 0x3;
color = rte_sched_port_pkt_read_color(pkt);
#ifdef RTE_SCHED_DEBUG
-static inline int
-debug_pipe_is_empty(struct rte_sched_port *port, uint32_t pindex)
-{
- uint32_t qindex, i;
-
- qindex = pindex << 4;
-
- for (i = 0; i < 16; i++) {
- uint32_t queue_empty = rte_sched_port_queue_is_empty(port, qindex + i);
- uint32_t bmp_bit_clear = (rte_bitmap_get(port->bmp, qindex + i) == 0);
-
- if (queue_empty != bmp_bit_clear)
- rte_panic("Queue status mismatch for queue %u of pipe %u\n", i, pindex);
-
- if (!queue_empty)
- return 0;
- }
-
- return 1;
-}
-
static inline void
debug_check_queue_slab(struct rte_sched_port *port, uint32_t bmp_pos,
uint64_t bmp_slab)
#ifdef RTE_SCHED_COLLECT_STATS
struct rte_sched_queue_extra *qe;
#endif
- uint32_t subport, pipe, traffic_class, queue, qindex;
+ uint32_t qindex = rte_mbuf_sched_queue_get(pkt);
- rte_sched_port_pkt_read_tree_path(pkt, &subport, &pipe, &traffic_class, &queue);
-
- qindex = rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
q = port->queue + qindex;
rte_prefetch0(q);
#ifdef RTE_SCHED_COLLECT_STATS
(qlen >= qsize))) {
rte_pktmbuf_free(pkt);
#ifdef RTE_SCHED_COLLECT_STATS
- rte_sched_port_update_subport_stats_on_drop(port, qindex, pkt);
- rte_sched_port_update_queue_stats_on_drop(port, qindex, pkt);
+ rte_sched_port_update_subport_stats_on_drop(port, qindex, pkt,
+ qlen < qsize);
+ rte_sched_port_update_queue_stats_on_drop(port, qindex, pkt,
+ qlen < qsize);
#endif
return 0;
}
return 1;
}
-#ifdef RTE_SCHED_VECTOR
+#ifdef SCHED_VECTOR_SSE4
static inline int
grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
return 1;
}
+#elif defined(SCHED_VECTOR_NEON)
+
+static inline int
+grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
+{
+ uint32x4_t index, pipes;
+ uint32_t *pos = (uint32_t *)port->grinder_base_bmp_pos;
+
+ index = vmovq_n_u32(base_pipe);
+ pipes = vld1q_u32(pos);
+ if (!vminvq_u32(veorq_u32(pipes, index)))
+ return 1;
+
+ pipes = vld1q_u32(pos + 4);
+ if (!vminvq_u32(veorq_u32(pipes, index)))
+ return 1;
+
+ return 0;
+}
+
#else
static inline int
{
uint64_t cycles = rte_get_tsc_cycles();
uint64_t cycles_diff = cycles - port->time_cpu_cycles;
- double bytes_diff = ((double) cycles_diff) / port->cycles_per_byte;
+ uint64_t bytes_diff;
+
+ /* Compute elapsed time in bytes */
+ bytes_diff = rte_reciprocal_divide(cycles_diff << RTE_SCHED_TIME_SHIFT,
+ port->inv_cycles_per_byte);
/* Advance port time */
port->time_cpu_cycles = cycles;
- port->time_cpu_bytes += (uint64_t) bytes_diff;
+ port->time_cpu_bytes += bytes_diff;
if (port->time < port->time_cpu_bytes)
port->time = port->time_cpu_bytes;