X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Fsched%2Frte_sched.c;h=62b3d2e3153ba6fc5bd35607be6c35e92de60d44;hb=edcf22c6d3898efd6ce346f1c6fa1264181c8d09;hp=cd87e688e489feee8ee4f1ddef4b5e151372fd75;hpb=3a91d2d138988e208b96a98a859fc3a0d9a9fe4d;p=dpdk.git diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c index cd87e688e4..62b3d2e315 100644 --- a/lib/sched/rte_sched.c +++ b/lib/sched/rte_sched.c @@ -89,8 +89,12 @@ struct rte_sched_queue { struct rte_sched_queue_extra { struct rte_sched_queue_stats stats; -#ifdef RTE_SCHED_RED - struct rte_red red; +#ifdef RTE_SCHED_CMAN + RTE_STD_C11 + union { + struct rte_red red; + struct rte_pie pie; + }; #endif }; @@ -183,8 +187,15 @@ struct rte_sched_subport { /* Pipe queues size */ uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; -#ifdef RTE_SCHED_RED - struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS]; +#ifdef RTE_SCHED_CMAN + bool cman_enabled; + enum rte_sched_cman_mode cman; + + RTE_STD_C11 + union { + struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS]; + struct rte_pie_config pie_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + }; #endif /* Scheduling loop detection */ @@ -228,7 +239,7 @@ struct rte_sched_port { int socket; /* Timing */ - uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cyles */ + uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cycles */ uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */ uint64_t time; /* Current NIC TX time measured in bytes */ struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */ @@ -579,7 +590,7 @@ rte_sched_subport_config_qsize(struct rte_sched_subport *subport) subport->qsize_add[0] = 0; - /* Strict prority traffic class */ + /* Strict priority traffic class */ for (i = 1; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) subport->qsize_add[i] = subport->qsize_add[i-1] + subport->qsize[i-1]; @@ -961,9 +972,9 @@ rte_sched_port_config(struct rte_sched_port_params *params) /* Allocate memory to store the subport profile */ port->subport_profiles = rte_zmalloc_socket("subport_profile", size2, RTE_CACHE_LINE_SIZE, params->socket); - if (port == NULL) { + if (port->subport_profiles == NULL) { RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__); - + rte_free(port); return NULL; } @@ -1078,6 +1089,90 @@ rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports) rte_free(port); } +#ifdef RTE_SCHED_CMAN +static int +rte_sched_red_config(struct rte_sched_port *port, + struct rte_sched_subport *s, + struct rte_sched_subport_params *params, + uint32_t n_subports) +{ + uint32_t i; + + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { + + uint32_t j; + + for (j = 0; j < RTE_COLORS; j++) { + /* if min/max are both zero, then RED is disabled */ + if ((params->cman_params->red_params[i][j].min_th | + params->cman_params->red_params[i][j].max_th) == 0) { + continue; + } + + if (rte_red_config_init(&s->red_config[i][j], + params->cman_params->red_params[i][j].wq_log2, + params->cman_params->red_params[i][j].min_th, + params->cman_params->red_params[i][j].max_th, + params->cman_params->red_params[i][j].maxp_inv) != 0) { + rte_sched_free_memory(port, n_subports); + + RTE_LOG(NOTICE, SCHED, + "%s: RED configuration init fails\n", __func__); + return -EINVAL; + } + } + } + s->cman = RTE_SCHED_CMAN_RED; + return 0; +} + +static int +rte_sched_pie_config(struct rte_sched_port *port, + struct rte_sched_subport *s, + struct rte_sched_subport_params *params, + uint32_t n_subports) +{ + uint32_t i; + + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { + if (params->cman_params->pie_params[i].tailq_th > params->qsize[i]) { + RTE_LOG(NOTICE, SCHED, + "%s: PIE tailq threshold incorrect\n", __func__); + return -EINVAL; + } + + if (rte_pie_config_init(&s->pie_config[i], + params->cman_params->pie_params[i].qdelay_ref, + params->cman_params->pie_params[i].dp_update_interval, + params->cman_params->pie_params[i].max_burst, + params->cman_params->pie_params[i].tailq_th) != 0) { + rte_sched_free_memory(port, n_subports); + + RTE_LOG(NOTICE, SCHED, + "%s: PIE configuration init fails\n", __func__); + return -EINVAL; + } + } + s->cman = RTE_SCHED_CMAN_PIE; + return 0; +} + +static int +rte_sched_cman_config(struct rte_sched_port *port, + struct rte_sched_subport *s, + struct rte_sched_subport_params *params, + uint32_t n_subports) +{ + if (params->cman_params->cman_mode == RTE_SCHED_CMAN_RED) + return rte_sched_red_config(port, s, params, n_subports); + + else if (params->cman_params->cman_mode == RTE_SCHED_CMAN_PIE) + return rte_sched_pie_config(port, s, params, n_subports); + + return -EINVAL; +} +#endif + int rte_sched_subport_config(struct rte_sched_port *port, uint32_t subport_id, @@ -1090,6 +1185,7 @@ rte_sched_subport_config(struct rte_sched_port *port, uint32_t n_subport_pipe_queues, i; uint32_t size0, size1, bmp_mem_size; int status; + int ret; /* Check user parameters */ if (port == NULL) { @@ -1101,17 +1197,16 @@ rte_sched_subport_config(struct rte_sched_port *port, if (subport_id >= port->n_subports_per_port) { RTE_LOG(ERR, SCHED, "%s: Incorrect value for subport id\n", __func__); - - rte_sched_free_memory(port, n_subports); - return -EINVAL; + ret = -EINVAL; + goto out; } if (subport_profile_id >= port->n_max_subport_profiles) { RTE_LOG(ERR, SCHED, "%s: " "Number of subport profile exceeds the max limit\n", __func__); - rte_sched_free_memory(port, n_subports); - return -EINVAL; + ret = -EINVAL; + goto out; } /** Memory is allocated only on first invocation of the api for a @@ -1127,9 +1222,8 @@ rte_sched_subport_config(struct rte_sched_port *port, RTE_LOG(NOTICE, SCHED, "%s: Port scheduler params check failed (%d)\n", __func__, status); - - rte_sched_free_memory(port, n_subports); - return -EINVAL; + ret = -EINVAL; + goto out; } /* Determine the amount of memory to allocate */ @@ -1143,9 +1237,8 @@ rte_sched_subport_config(struct rte_sched_port *port, if (s == NULL) { RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__); - - rte_sched_free_memory(port, n_subports); - return -ENOMEM; + ret = -ENOMEM; + goto out; } n_subports++; @@ -1169,30 +1262,17 @@ rte_sched_subport_config(struct rte_sched_port *port, s->n_pipe_profiles = params->n_pipe_profiles; s->n_max_pipe_profiles = params->n_max_pipe_profiles; -#ifdef RTE_SCHED_RED - for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { - uint32_t j; - - for (j = 0; j < RTE_COLORS; j++) { - /* if min/max are both zero, then RED is disabled */ - if ((params->red_params[i][j].min_th | - params->red_params[i][j].max_th) == 0) { - continue; - } - - if (rte_red_config_init(&s->red_config[i][j], - params->red_params[i][j].wq_log2, - params->red_params[i][j].min_th, - params->red_params[i][j].max_th, - params->red_params[i][j].maxp_inv) != 0) { - rte_sched_free_memory(port, n_subports); - - RTE_LOG(NOTICE, SCHED, - "%s: RED configuration init fails\n", - __func__); - return -EINVAL; - } +#ifdef RTE_SCHED_CMAN + if (params->cman_params != NULL) { + s->cman_enabled = true; + status = rte_sched_cman_config(port, s, params, n_subports); + if (status) { + RTE_LOG(NOTICE, SCHED, + "%s: CMAN configuration fails\n", __func__); + return status; } + } else { + s->cman_enabled = false; } #endif @@ -1238,9 +1318,8 @@ rte_sched_subport_config(struct rte_sched_port *port, if (s->bmp == NULL) { RTE_LOG(ERR, SCHED, "%s: Subport bitmap init error\n", __func__); - - rte_sched_free_memory(port, n_subports); - return -EINVAL; + ret = -EINVAL; + goto out; } for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) @@ -1285,6 +1364,11 @@ rte_sched_subport_config(struct rte_sched_port *port, rte_sched_port_log_subport_profile(port, subport_profile_id); return 0; + +out: + rte_sched_free_memory(port, n_subports); + + return ret; } int @@ -1299,6 +1383,7 @@ rte_sched_pipe_config(struct rte_sched_port *port, struct rte_sched_pipe_profile *params; uint32_t n_subports = subport_id + 1; uint32_t deactivate, profile, i; + int ret; /* Check user parameters */ profile = (uint32_t) pipe_profile; @@ -1313,26 +1398,23 @@ rte_sched_pipe_config(struct rte_sched_port *port, if (subport_id >= port->n_subports_per_port) { RTE_LOG(ERR, SCHED, "%s: Incorrect value for parameter subport id\n", __func__); - - rte_sched_free_memory(port, n_subports); - return -EINVAL; + ret = -EINVAL; + goto out; } s = port->subports[subport_id]; if (pipe_id >= s->n_pipes_per_subport_enabled) { RTE_LOG(ERR, SCHED, "%s: Incorrect value for parameter pipe id\n", __func__); - - rte_sched_free_memory(port, n_subports); - return -EINVAL; + ret = -EINVAL; + goto out; } if (!deactivate && profile >= s->n_pipe_profiles) { RTE_LOG(ERR, SCHED, "%s: Incorrect value for parameter pipe profile\n", __func__); - - rte_sched_free_memory(port, n_subports); - return -EINVAL; + ret = -EINVAL; + goto out; } sp = port->subport_profiles + s->profile; @@ -1406,6 +1488,11 @@ rte_sched_pipe_config(struct rte_sched_port *port, } return 0; + +out: + rte_sched_free_memory(port, n_subports); + + return ret; } int @@ -1714,30 +1801,19 @@ rte_sched_port_update_subport_stats(struct rte_sched_port *port, subport->stats.n_bytes_tc[tc_index] += pkt_len; } -#ifdef RTE_SCHED_RED -static inline void -rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, - struct rte_sched_subport *subport, - uint32_t qindex, - struct rte_mbuf *pkt, - uint32_t red) -#else static inline void rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, struct rte_sched_subport *subport, uint32_t qindex, struct rte_mbuf *pkt, - __rte_unused uint32_t red) -#endif + __rte_unused uint32_t n_pkts_cman_dropped) { uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex); uint32_t pkt_len = pkt->pkt_len; subport->stats.n_pkts_tc_dropped[tc_index] += 1; subport->stats.n_bytes_tc_dropped[tc_index] += pkt_len; -#ifdef RTE_SCHED_RED - subport->stats.n_pkts_red_dropped[tc_index] += red; -#endif + subport->stats.n_pkts_cman_dropped[tc_index] += n_pkts_cman_dropped; } static inline void @@ -1752,73 +1828,99 @@ rte_sched_port_update_queue_stats(struct rte_sched_subport *subport, qe->stats.n_bytes += pkt_len; } -#ifdef RTE_SCHED_RED -static inline void -rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport, - uint32_t qindex, - struct rte_mbuf *pkt, - uint32_t red) -#else static inline void rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport, uint32_t qindex, struct rte_mbuf *pkt, - __rte_unused uint32_t red) -#endif + __rte_unused uint32_t n_pkts_cman_dropped) { struct rte_sched_queue_extra *qe = subport->queue_extra + qindex; uint32_t pkt_len = pkt->pkt_len; qe->stats.n_pkts_dropped += 1; qe->stats.n_bytes_dropped += pkt_len; -#ifdef RTE_SCHED_RED - qe->stats.n_pkts_red_dropped += red; +#ifdef RTE_SCHED_CMAN + if (subport->cman_enabled) + qe->stats.n_pkts_cman_dropped += n_pkts_cman_dropped; #endif } #endif /* RTE_SCHED_COLLECT_STATS */ -#ifdef RTE_SCHED_RED +#ifdef RTE_SCHED_CMAN static inline int -rte_sched_port_red_drop(struct rte_sched_port *port, +rte_sched_port_cman_drop(struct rte_sched_port *port, struct rte_sched_subport *subport, struct rte_mbuf *pkt, uint32_t qindex, uint16_t qlen) { + if (!subport->cman_enabled) + return 0; + struct rte_sched_queue_extra *qe; - struct rte_red_config *red_cfg; - struct rte_red *red; uint32_t tc_index; - enum rte_color color; tc_index = rte_sched_port_pipe_tc(port, qindex); - color = rte_sched_port_pkt_read_color(pkt); - red_cfg = &subport->red_config[tc_index][color]; + qe = subport->queue_extra + qindex; - if ((red_cfg->min_th | red_cfg->max_th) == 0) - return 0; + /* RED */ + if (subport->cman == RTE_SCHED_CMAN_RED) { + struct rte_red_config *red_cfg; + struct rte_red *red; + enum rte_color color; - qe = subport->queue_extra + qindex; - red = &qe->red; + color = rte_sched_port_pkt_read_color(pkt); + red_cfg = &subport->red_config[tc_index][color]; + + if ((red_cfg->min_th | red_cfg->max_th) == 0) + return 0; + + red = &qe->red; + + return rte_red_enqueue(red_cfg, red, qlen, port->time); + } - return rte_red_enqueue(red_cfg, red, qlen, port->time); + /* PIE */ + struct rte_pie_config *pie_cfg = &subport->pie_config[tc_index]; + struct rte_pie *pie = &qe->pie; + + return rte_pie_enqueue(pie_cfg, pie, qlen, pkt->pkt_len, port->time_cpu_cycles); } static inline void -rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port *port, +rte_sched_port_red_set_queue_empty_timestamp(struct rte_sched_port *port, struct rte_sched_subport *subport, uint32_t qindex) { - struct rte_sched_queue_extra *qe = subport->queue_extra + qindex; - struct rte_red *red = &qe->red; + if (subport->cman_enabled) { + struct rte_sched_queue_extra *qe = subport->queue_extra + qindex; + if (subport->cman == RTE_SCHED_CMAN_RED) { + struct rte_red *red = &qe->red; + + rte_red_mark_queue_empty(red, port->time); + } + } +} - rte_red_mark_queue_empty(red, port->time); +static inline void +rte_sched_port_pie_dequeue(struct rte_sched_subport *subport, +uint32_t qindex, uint32_t pkt_len, uint64_t time) { + if (subport->cman_enabled && subport->cman == RTE_SCHED_CMAN_PIE) { + struct rte_sched_queue_extra *qe = subport->queue_extra + qindex; + struct rte_pie *pie = &qe->pie; + + /* Update queue length */ + pie->qlen -= 1; + pie->qlen_bytes -= pkt_len; + + rte_pie_dequeue(pie, pkt_len, time); + } } #else -static inline int rte_sched_port_red_drop(struct rte_sched_port *port __rte_unused, +static inline int rte_sched_port_cman_drop(struct rte_sched_port *port __rte_unused, struct rte_sched_subport *subport __rte_unused, struct rte_mbuf *pkt __rte_unused, uint32_t qindex __rte_unused, @@ -1827,9 +1929,17 @@ static inline int rte_sched_port_red_drop(struct rte_sched_port *port __rte_unus return 0; } -#define rte_sched_port_set_queue_empty_timestamp(port, subport, qindex) +#define rte_sched_port_red_set_queue_empty_timestamp(port, subport, qindex) -#endif /* RTE_SCHED_RED */ +static inline void +rte_sched_port_pie_dequeue(struct rte_sched_subport *subport __rte_unused, + uint32_t qindex __rte_unused, + uint32_t pkt_len __rte_unused, + uint64_t time __rte_unused) { + /* do-nothing when RTE_SCHED_CMAN not defined */ +} + +#endif /* RTE_SCHED_CMAN */ #ifdef RTE_SCHED_DEBUG @@ -1925,7 +2035,7 @@ rte_sched_port_enqueue_qwa(struct rte_sched_port *port, qlen = q->qw - q->qr; /* Drop the packet (and update drop stats) when queue is full */ - if (unlikely(rte_sched_port_red_drop(port, subport, pkt, qindex, qlen) || + if (unlikely(rte_sched_port_cman_drop(port, subport, pkt, qindex, qlen) || (qlen >= qsize))) { rte_pktmbuf_free(pkt); #ifdef RTE_SCHED_COLLECT_STATS @@ -2398,6 +2508,7 @@ grinder_schedule(struct rte_sched_port *port, { struct rte_sched_grinder *grinder = subport->grinder + pos; struct rte_sched_queue *queue = grinder->queue[grinder->qpos]; + uint32_t qindex = grinder->qindex[grinder->qpos]; struct rte_mbuf *pkt = grinder->pkt; uint32_t pkt_len = pkt->pkt_len + port->frame_overhead; uint32_t be_tc_active; @@ -2417,15 +2528,16 @@ grinder_schedule(struct rte_sched_port *port, (pkt_len * grinder->wrr_cost[grinder->qpos]) & be_tc_active; if (queue->qr == queue->qw) { - uint32_t qindex = grinder->qindex[grinder->qpos]; - rte_bitmap_clear(subport->bmp, qindex); grinder->qmask &= ~(1 << grinder->qpos); if (be_tc_active) grinder->wrr_mask[grinder->qpos] = 0; - rte_sched_port_set_queue_empty_timestamp(port, subport, qindex); + + rte_sched_port_red_set_queue_empty_timestamp(port, subport, qindex); } + rte_sched_port_pie_dequeue(subport, qindex, pkt_len, port->time_cpu_cycles); + /* Reset pipe loop detection */ subport->pipe_loop = RTE_SCHED_PIPE_INVALID; grinder->productive = 1;