mbuf: implement generic format for sched field
[dpdk.git] / lib / librte_sched / rte_sched.c
index 587d5e6..dd77391 100644 (file)
@@ -128,22 +128,6 @@ enum grinder_state {
        e_GRINDER_READ_MBUF
 };
 
-/*
- * Path through the scheduler hierarchy used by the scheduler enqueue
- * operation to identify the destination queue for the current
- * packet. Stored in the field pkt.hash.sched of struct rte_mbuf of
- * each packet, typically written by the classification stage and read
- * by scheduler enqueue.
- */
-struct rte_sched_port_hierarchy {
-       uint16_t queue:2;                /**< Queue ID (0 .. 3) */
-       uint16_t traffic_class:2;        /**< Traffic class ID (0 .. 3)*/
-       uint32_t color:2;                /**< Color */
-       uint16_t unused:10;
-       uint16_t subport;                /**< Subport ID */
-       uint32_t pipe;                   /**< Pipe ID */
-};
-
 struct rte_sched_grinder {
        /* Pipe cache */
        uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
@@ -185,6 +169,7 @@ struct rte_sched_port {
        /* User parameters */
        uint32_t n_subports_per_port;
        uint32_t n_pipes_per_subport;
+       uint32_t n_pipes_per_subport_log2;
        uint32_t rate;
        uint32_t mtu;
        uint32_t frame_overhead;
@@ -645,6 +630,8 @@ rte_sched_port_config(struct rte_sched_port_params *params)
        /* User parameters */
        port->n_subports_per_port = params->n_subports_per_port;
        port->n_pipes_per_subport = params->n_pipes_per_subport;
+       port->n_pipes_per_subport_log2 =
+                       __builtin_ctz(params->n_pipes_per_subport);
        port->rate = params->rate;
        port->mtu = params->mtu + params->frame_overhead;
        port->frame_overhead = params->frame_overhead;
@@ -1006,44 +993,52 @@ rte_sched_port_pipe_profile_add(struct rte_sched_port *port,
        return 0;
 }
 
+static inline uint32_t
+rte_sched_port_qindex(struct rte_sched_port *port,
+       uint32_t subport,
+       uint32_t pipe,
+       uint32_t traffic_class,
+       uint32_t queue)
+{
+       return ((subport & (port->n_subports_per_port - 1)) <<
+                       (port->n_pipes_per_subport_log2 + 4)) |
+                       ((pipe & (port->n_pipes_per_subport - 1)) << 4) |
+                       ((traffic_class &
+                           (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1)) << 2) |
+                       (queue & (RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS - 1));
+}
+
 void
-rte_sched_port_pkt_write(struct rte_mbuf *pkt,
-                        uint32_t subport, uint32_t pipe, uint32_t traffic_class,
+rte_sched_port_pkt_write(struct rte_sched_port *port,
+                        struct rte_mbuf *pkt,
+                        uint32_t subport, uint32_t pipe,
+                        uint32_t traffic_class,
                         uint32_t queue, enum rte_meter_color color)
 {
-       struct rte_sched_port_hierarchy *sched
-               = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
-
-       RTE_BUILD_BUG_ON(sizeof(*sched) > sizeof(pkt->hash.sched));
-
-       sched->color = (uint32_t) color;
-       sched->subport = subport;
-       sched->pipe = pipe;
-       sched->traffic_class = traffic_class;
-       sched->queue = queue;
+       uint32_t queue_id = rte_sched_port_qindex(port, subport, pipe,
+                       traffic_class, queue);
+       rte_mbuf_sched_set(pkt, queue_id, traffic_class, (uint8_t)color);
 }
 
 void
-rte_sched_port_pkt_read_tree_path(const struct rte_mbuf *pkt,
+rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
+                                 const struct rte_mbuf *pkt,
                                  uint32_t *subport, uint32_t *pipe,
                                  uint32_t *traffic_class, uint32_t *queue)
 {
-       const struct rte_sched_port_hierarchy *sched
-               = (const struct rte_sched_port_hierarchy *) &pkt->hash.sched;
+       uint32_t queue_id = rte_mbuf_sched_queue_get(pkt);
 
-       *subport = sched->subport;
-       *pipe = sched->pipe;
-       *traffic_class = sched->traffic_class;
-       *queue = sched->queue;
+       *subport = queue_id >> (port->n_pipes_per_subport_log2 + 4);
+       *pipe = (queue_id >> 4) & (port->n_pipes_per_subport - 1);
+       *traffic_class = (queue_id >> 2) &
+                               (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1);
+       *queue = queue_id & (RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS - 1);
 }
 
 enum rte_meter_color
 rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
 {
-       const struct rte_sched_port_hierarchy *sched
-               = (const struct rte_sched_port_hierarchy *) &pkt->hash.sched;
-
-       return (enum rte_meter_color) sched->color;
+       return (enum rte_meter_color)rte_mbuf_sched_color_get(pkt);
 }
 
 int
@@ -1100,18 +1095,6 @@ rte_sched_queue_read_stats(struct rte_sched_port *port,
        return 0;
 }
 
-static inline uint32_t
-rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue)
-{
-       uint32_t result;
-
-       result = subport * port->n_pipes_per_subport + pipe;
-       result = result * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + traffic_class;
-       result = result * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
-
-       return result;
-}
-
 #ifdef RTE_SCHED_DEBUG
 
 static inline int
@@ -1272,11 +1255,8 @@ rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_port *port,
 #ifdef RTE_SCHED_COLLECT_STATS
        struct rte_sched_queue_extra *qe;
 #endif
-       uint32_t subport, pipe, traffic_class, queue, qindex;
-
-       rte_sched_port_pkt_read_tree_path(pkt, &subport, &pipe, &traffic_class, &queue);
+       uint32_t qindex = rte_mbuf_sched_queue_get(pkt);
 
-       qindex = rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
        q = port->queue + qindex;
        rte_prefetch0(q);
 #ifdef RTE_SCHED_COLLECT_STATS