structure would be made internal (or removed if all dependencies are cleared)
in future releases.
-* mbuf: The opaque ``mbuf->hash.sched`` field will be updated to support generic
- definition in line with the ethdev TM and MTR APIs. Currently, this field
- is defined in librte_sched in a non-generic way. The new generic format
- will contain: queue ID, traffic class, color. Field size will not change.
-
-* sched: Some API functions will change prototype due to the above
- deprecation note for mbuf->hash.sched, e.g. ``rte_sched_port_pkt_write()``
- and ``rte_sched_port_pkt_read()`` will likely have an additional parameter
- of type ``struct rte_sched_port``.
-
* mbuf: the macro ``RTE_MBUF_INDIRECT()`` will be removed in v18.08 or later and
replaced with ``RTE_MBUF_CLONED()`` which is already added in v18.05. As
``EXT_ATTACHED_MBUF`` is newly introduced in v18.05, ``RTE_MBUF_INDIRECT()``
``rte_malloc_get_socket_stats()`` are no longer safe to call concurrently with
``rte_malloc_heap_create()`` or ``rte_malloc_heap_destroy()`` function calls.
+* sched: As result of the new format of the mbuf sched field, the
+ functions ``rte_sched_port_pkt_write()`` and
+ ``rte_sched_port_pkt_read_tree_path()`` got an additional parameter of
+ type ``struct rte_sched_port``.
+
* pdump: The ``rte_pdump_set_socket_dir()``, the parameter ``path`` of
``rte_pdump_init()`` and enum ``rte_pdump_socktype`` were deprecated
since 18.05 and are removed in this release.
Also, make sure to start the actual text at the margin.
=========================================================
+* mbuf: The format of the sched field of ``rte_mbuf`` has been changed
+ to include the following fields: ``queue ID``, ``traffic class``, ``color``.
+
Shared Library Versions
-----------------------
librte_kvargs.so.1
librte_latencystats.so.1
librte_lpm.so.2
- librte_mbuf.so.4
+ + librte_mbuf.so.5
librte_member.so.1
librte_mempool.so.5
librte_meter.so.2
librte_rawdev.so.1
librte_reorder.so.1
librte_ring.so.2
- librte_sched.so.1
+ + librte_sched.so.2
librte_security.so.1
librte_table.so.3
librte_timer.so.1
for(i = 0; i < nb_rx; i++) {
get_pkt_sched(rx_mbufs[i],
&subport, &pipe, &traffic_class, &queue, &color);
- rte_sched_port_pkt_write(rx_mbufs[i], subport, pipe,
- traffic_class, queue, (enum rte_meter_color) color);
+ rte_sched_port_pkt_write(conf->sched_port,
+ rx_mbufs[i],
+ subport, pipe,
+ traffic_class, queue,
+ (enum rte_meter_color) color);
}
if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
flow->rx_thread.rx_port = flow->rx_port;
flow->rx_thread.rx_ring = flow->rx_ring;
flow->rx_thread.rx_queue = flow->rx_queue;
+ flow->rx_thread.sched_port = flow->sched_port;
rx_confs[rx_idx++] = &flow->rx_thread;
* function can be used to retrieve the adapter's service function ID.
*
* The ethernet port and transmit queue index to transmit the mbuf on are
- * specified using the mbuf port and the higher 16 bits of
- * struct rte_mbuf::hash::sched:hi. The application should use the
- * rte_event_eth_tx_adapter_txq_set() and rte_event_eth_tx_adapter_txq_get()
- * functions to access the transmit queue index since it is expected that the
- * transmit queue will be eventually defined within struct rte_mbuf and using
- * these macros will help with minimizing application impact due to
- * a change in how the transmit queue index is specified.
+ * specified using the mbuf port struct rte_mbuf::hash::txadapter:txq.
+ * The application should use the rte_event_eth_tx_adapter_txq_set()
+ * and rte_event_eth_tx_adapter_txq_get() functions to access the transmit
+ * queue index, using these macros will help with minimizing application
+ * impact due to a change in how the transmit queue index is specified.
*/
#ifdef __cplusplus
static __rte_always_inline void __rte_experimental
rte_event_eth_tx_adapter_txq_set(struct rte_mbuf *pkt, uint16_t queue)
{
- uint16_t *p = (uint16_t *)&pkt->hash.sched.hi;
- p[1] = queue;
+ pkt->hash.txadapter.txq = queue;
}
/**
static __rte_always_inline uint16_t __rte_experimental
rte_event_eth_tx_adapter_txq_get(struct rte_mbuf *pkt)
{
- uint16_t *p = (uint16_t *)&pkt->hash.sched.hi;
- return p[1];
+ return pkt->hash.txadapter.txq;
}
/**
EXPORT_MAP := rte_mbuf_version.map
-LIBABIVER := 4
+LIBABIVER := 5
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_MBUF) := rte_mbuf.c rte_mbuf_ptype.c rte_mbuf_pool_ops.c
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-version = 4
+version = 5
sources = files('rte_mbuf.c', 'rte_mbuf_ptype.c', 'rte_mbuf_pool_ops.c')
headers = files('rte_mbuf.h', 'rte_mbuf_ptype.h', 'rte_mbuf_pool_ops.h')
deps += ['mempool']
* on PKT_RX_FDIR_* flag in ol_flags.
*/
} fdir; /**< Filter identifier if FDIR enabled */
+ struct rte_mbuf_sched {
+ uint32_t queue_id; /**< Queue ID. */
+ uint8_t traffic_class;
+ /**< Traffic class ID. Traffic class 0
+ * is the highest priority traffic class.
+ */
+ uint8_t color;
+ /**< Color. @see enum rte_color.*/
+ uint16_t reserved; /**< Reserved. */
+ } sched; /**< Hierarchical scheduler */
struct {
- uint32_t lo;
- uint32_t hi;
+ uint32_t reserved1;
+ uint16_t reserved2;
+ uint16_t txq;
/**< The event eth Tx adapter uses this field
* to store Tx queue id.
* @see rte_event_eth_tx_adapter_txq_set()
*/
- } sched; /**< Hierarchical scheduler */
+ } txadapter; /**< Eventdev ethdev Tx adapter */
/**< User defined tags. See rte_distributor_process() */
uint32_t usr;
} hash; /**< hash information */
*/
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
+/**
+ * Get the value of mbuf sched queue_id field.
+ */
+static inline uint32_t
+rte_mbuf_sched_queue_get(const struct rte_mbuf *m)
+{
+ return m->hash.sched.queue_id;
+}
+
+/**
+ * Get the value of mbuf sched traffic_class field.
+ */
+static inline uint8_t
+rte_mbuf_sched_traffic_class_get(const struct rte_mbuf *m)
+{
+ return m->hash.sched.traffic_class;
+}
+
+/**
+ * Get the value of mbuf sched color field.
+ */
+static inline uint8_t
+rte_mbuf_sched_color_get(const struct rte_mbuf *m)
+{
+ return m->hash.sched.color;
+}
+
+/**
+ * Get the values of mbuf sched queue_id, traffic_class and color.
+ *
+ * @param m
+ * Mbuf to read
+ * @param queue_id
+ * Returns the queue id
+ * @param traffic_class
+ * Returns the traffic class id
+ * @param color
+ * Returns the colour id
+ */
+static inline void
+rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id,
+ uint8_t *traffic_class,
+ uint8_t *color)
+{
+ struct rte_mbuf_sched sched = m->hash.sched;
+
+ *queue_id = sched.queue_id;
+ *traffic_class = sched.traffic_class;
+ *color = sched.color;
+}
+
+/**
+ * Set the mbuf sched queue_id to the defined value.
+ */
+static inline void
+rte_mbuf_sched_queue_set(struct rte_mbuf *m, uint32_t queue_id)
+{
+ m->hash.sched.queue_id = queue_id;
+}
+
+/**
+ * Set the mbuf sched traffic_class id to the defined value.
+ */
+static inline void
+rte_mbuf_sched_traffic_class_set(struct rte_mbuf *m, uint8_t traffic_class)
+{
+ m->hash.sched.traffic_class = traffic_class;
+}
+
+/**
+ * Set the mbuf sched color id to the defined value.
+ */
+static inline void
+rte_mbuf_sched_color_set(struct rte_mbuf *m, uint8_t color)
+{
+ m->hash.sched.color = color;
+}
+
+/**
+ * Set the mbuf sched queue_id, traffic_class and color.
+ *
+ * @param m
+ * Mbuf to set
+ * @param queue_id
+ * Queue id value to be set
+ * @param traffic_class
+ * Traffic class id value to be set
+ * @param color
+ * Color id to be set
+ */
+static inline void
+rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id,
+ uint8_t traffic_class,
+ uint8_t color)
+{
+ m->hash.sched = (struct rte_mbuf_sched){
+ .queue_id = queue_id,
+ .traffic_class = traffic_class,
+ .color = color,
+ };
+}
+
#ifdef __cplusplus
}
#endif
return 0;
}
-#define MBUF_SCHED_QUEUE_TC_COLOR(queue, tc, color) \
- ((uint16_t)((((uint64_t)(queue)) & 0x3) | \
- ((((uint64_t)(tc)) & 0x3) << 2) | \
- ((((uint64_t)(color)) & 0x3) << 4)))
-
-#define MBUF_SCHED_COLOR(sched, color) \
- (((sched) & (~0x30LLU)) | ((color) << 4))
-
struct mtr_trtcm_data {
struct rte_meter_trtcm trtcm;
uint64_t stats[e_RTE_METER_COLORS];
struct dscp_table_entry_data {
enum rte_meter_color color;
uint16_t tc;
- uint16_t queue_tc_color;
+ uint16_t tc_queue;
};
struct dscp_table_data {
uint32_t dscp,
uint16_t total_length)
{
- uint64_t drop_mask, sched;
- uint64_t *sched_ptr = (uint64_t *) &mbuf->hash.sched;
+ uint64_t drop_mask;
struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
enum rte_meter_color color_in, color_meter, color_policer;
uint32_t tc, mp_id;
color_in = dscp_entry->color;
data += tc;
mp_id = MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data);
- sched = *sched_ptr;
/* Meter */
color_meter = rte_meter_trtcm_color_aware_check(
drop_mask = MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color_meter);
color_policer =
MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color_meter);
- *sched_ptr = MBUF_SCHED_COLOR(sched, color_policer);
+ rte_mbuf_sched_color_set(mbuf, (uint8_t)color_policer);
return drop_mask;
}
}
struct tm_data {
- uint16_t queue_tc_color;
- uint16_t subport;
- uint32_t pipe;
+ uint32_t queue_id;
+ uint32_t reserved;
} __attribute__((__packed__));
static int
return status;
/* Apply */
- data->queue_tc_color = 0;
- data->subport = (uint16_t) p->subport_id;
- data->pipe = p->pipe_id;
+ data->queue_id = p->subport_id <<
+ (__builtin_ctz(cfg->n_pipes_per_subport) + 4) |
+ p->pipe_id << 4;
return 0;
}
uint32_t dscp)
{
struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
- struct tm_data *sched_ptr = (struct tm_data *) &mbuf->hash.sched;
- struct tm_data sched;
-
- sched = *data;
- sched.queue_tc_color = dscp_entry->queue_tc_color;
- *sched_ptr = sched;
+ uint32_t queue_id = data->queue_id |
+ (dscp_entry->tc << 2) |
+ dscp_entry->tc_queue;
+ rte_mbuf_sched_set(mbuf, queue_id, dscp_entry->tc,
+ (uint8_t)dscp_entry->color);
}
/**
&action->dscp_table.entry[i];
struct rte_table_action_dscp_table_entry *entry =
&table->entry[i];
- uint16_t queue_tc_color =
- MBUF_SCHED_QUEUE_TC_COLOR(entry->tc_queue_id,
- entry->tc_id,
- entry->color);
if ((dscp_mask & (1LLU << i)) == 0)
continue;
data->color = entry->color;
data->tc = entry->tc_id;
- data->queue_tc_color = queue_tc_color;
+ data->tc_queue = entry->tc_queue_id;
}
return 0;
EXPORT_MAP := rte_sched_version.map
-LIBABIVER := 1
+LIBABIVER := 2
#
# all source are stored in SRCS-y
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 2
sources = files('rte_sched.c', 'rte_red.c', 'rte_approx.c')
headers = files('rte_sched.h', 'rte_sched_common.h',
'rte_red.h', 'rte_approx.h')
e_GRINDER_READ_MBUF
};
-/*
- * Path through the scheduler hierarchy used by the scheduler enqueue
- * operation to identify the destination queue for the current
- * packet. Stored in the field pkt.hash.sched of struct rte_mbuf of
- * each packet, typically written by the classification stage and read
- * by scheduler enqueue.
- */
-struct rte_sched_port_hierarchy {
- uint16_t queue:2; /**< Queue ID (0 .. 3) */
- uint16_t traffic_class:2; /**< Traffic class ID (0 .. 3)*/
- uint32_t color:2; /**< Color */
- uint16_t unused:10;
- uint16_t subport; /**< Subport ID */
- uint32_t pipe; /**< Pipe ID */
-};
-
struct rte_sched_grinder {
/* Pipe cache */
uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
/* User parameters */
uint32_t n_subports_per_port;
uint32_t n_pipes_per_subport;
+ uint32_t n_pipes_per_subport_log2;
uint32_t rate;
uint32_t mtu;
uint32_t frame_overhead;
/* User parameters */
port->n_subports_per_port = params->n_subports_per_port;
port->n_pipes_per_subport = params->n_pipes_per_subport;
+ port->n_pipes_per_subport_log2 =
+ __builtin_ctz(params->n_pipes_per_subport);
port->rate = params->rate;
port->mtu = params->mtu + params->frame_overhead;
port->frame_overhead = params->frame_overhead;
return 0;
}
+static inline uint32_t
+rte_sched_port_qindex(struct rte_sched_port *port,
+ uint32_t subport,
+ uint32_t pipe,
+ uint32_t traffic_class,
+ uint32_t queue)
+{
+ return ((subport & (port->n_subports_per_port - 1)) <<
+ (port->n_pipes_per_subport_log2 + 4)) |
+ ((pipe & (port->n_pipes_per_subport - 1)) << 4) |
+ ((traffic_class &
+ (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1)) << 2) |
+ (queue & (RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS - 1));
+}
+
void
-rte_sched_port_pkt_write(struct rte_mbuf *pkt,
- uint32_t subport, uint32_t pipe, uint32_t traffic_class,
+rte_sched_port_pkt_write(struct rte_sched_port *port,
+ struct rte_mbuf *pkt,
+ uint32_t subport, uint32_t pipe,
+ uint32_t traffic_class,
uint32_t queue, enum rte_meter_color color)
{
- struct rte_sched_port_hierarchy *sched
- = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
-
- RTE_BUILD_BUG_ON(sizeof(*sched) > sizeof(pkt->hash.sched));
-
- sched->color = (uint32_t) color;
- sched->subport = subport;
- sched->pipe = pipe;
- sched->traffic_class = traffic_class;
- sched->queue = queue;
+ uint32_t queue_id = rte_sched_port_qindex(port, subport, pipe,
+ traffic_class, queue);
+ rte_mbuf_sched_set(pkt, queue_id, traffic_class, (uint8_t)color);
}
void
-rte_sched_port_pkt_read_tree_path(const struct rte_mbuf *pkt,
+rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
+ const struct rte_mbuf *pkt,
uint32_t *subport, uint32_t *pipe,
uint32_t *traffic_class, uint32_t *queue)
{
- const struct rte_sched_port_hierarchy *sched
- = (const struct rte_sched_port_hierarchy *) &pkt->hash.sched;
+ uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
- *subport = sched->subport;
- *pipe = sched->pipe;
- *traffic_class = sched->traffic_class;
- *queue = sched->queue;
+ *subport = queue_id >> (port->n_pipes_per_subport_log2 + 4);
+ *pipe = (queue_id >> 4) & (port->n_pipes_per_subport - 1);
+ *traffic_class = (queue_id >> 2) &
+ (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1);
+ *queue = queue_id & (RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS - 1);
}
enum rte_meter_color
rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
{
- const struct rte_sched_port_hierarchy *sched
- = (const struct rte_sched_port_hierarchy *) &pkt->hash.sched;
-
- return (enum rte_meter_color) sched->color;
+ return (enum rte_meter_color)rte_mbuf_sched_color_get(pkt);
}
int
return 0;
}
-static inline uint32_t
-rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue)
-{
- uint32_t result;
-
- result = subport * port->n_pipes_per_subport + pipe;
- result = result * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + traffic_class;
- result = result * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
-
- return result;
-}
-
#ifdef RTE_SCHED_DEBUG
static inline int
#ifdef RTE_SCHED_COLLECT_STATS
struct rte_sched_queue_extra *qe;
#endif
- uint32_t subport, pipe, traffic_class, queue, qindex;
-
- rte_sched_port_pkt_read_tree_path(pkt, &subport, &pipe, &traffic_class, &queue);
+ uint32_t qindex = rte_mbuf_sched_queue_get(pkt);
- qindex = rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
q = port->queue + qindex;
rte_prefetch0(q);
#ifdef RTE_SCHED_COLLECT_STATS
* Scheduler hierarchy path write to packet descriptor. Typically
* called by the packet classification stage.
*
+ * @param port
+ * Handle to port scheduler instance
* @param pkt
* Packet descriptor handle
* @param subport
* Packet color set
*/
void
-rte_sched_port_pkt_write(struct rte_mbuf *pkt,
+rte_sched_port_pkt_write(struct rte_sched_port *port,
+ struct rte_mbuf *pkt,
uint32_t subport, uint32_t pipe, uint32_t traffic_class,
uint32_t queue, enum rte_meter_color color);
* enqueue operation. The subport, pipe, traffic class and queue
* parameters need to be pre-allocated by the caller.
*
+ * @param port
+ * Handle to port scheduler instance
* @param pkt
* Packet descriptor handle
* @param subport
*
*/
void
-rte_sched_port_pkt_read_tree_path(const struct rte_mbuf *pkt,
+rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
+ const struct rte_mbuf *pkt,
uint32_t *subport, uint32_t *pipe,
uint32_t *traffic_class, uint32_t *queue);
}
static void
-prepare_pkt(struct rte_mbuf *mbuf)
+prepare_pkt(struct rte_sched_port *port, struct rte_mbuf *mbuf)
{
struct ether_hdr *eth_hdr;
struct vlan_hdr *vlan1, *vlan2;
ip_hdr->dst_addr = IPv4(0,0,TC,QUEUE);
- rte_sched_port_pkt_write(mbuf, SUBPORT, PIPE, TC, QUEUE, e_RTE_METER_YELLOW);
+ rte_sched_port_pkt_write(port, mbuf, SUBPORT, PIPE, TC, QUEUE,
+ e_RTE_METER_YELLOW);
/* 64 byte packet */
mbuf->pkt_len = 60;
for (i = 0; i < 10; i++) {
in_mbufs[i] = rte_pktmbuf_alloc(mp);
TEST_ASSERT_NOT_NULL(in_mbufs[i], "Packet allocation failed\n");
- prepare_pkt(in_mbufs[i]);
+ prepare_pkt(port, in_mbufs[i]);
}
color = rte_sched_port_pkt_read_color(out_mbufs[i]);
TEST_ASSERT_EQUAL(color, e_RTE_METER_YELLOW, "Wrong color\n");
- rte_sched_port_pkt_read_tree_path(out_mbufs[i],
+ rte_sched_port_pkt_read_tree_path(port, out_mbufs[i],
&subport, &pipe, &traffic_class, &queue);
TEST_ASSERT_EQUAL(subport, SUBPORT, "Wrong subport\n");