X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Fsw%2Fsw_evdev_scheduler.c;h=cff747da8913b38a173b671e6500f35d0b1d67bf;hb=3a22f3877c9d8b13d7d1cdbd4c130b38cdbc8bcb;hp=17bd4c0a6463f6d165c6fa519532ea419027f76c;hpb=63ddc002391359e5b2bb375b63939eb4e911a709;p=dpdk.git diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c index 17bd4c0a64..cff747da89 100644 --- a/drivers/event/sw/sw_evdev_scheduler.c +++ b/drivers/event/sw/sw_evdev_scheduler.c @@ -51,9 +51,11 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, int cq = fid->cq; if (cq < 0) { - uint32_t cq_idx = qid->cq_next_tx++; - if (qid->cq_next_tx == qid->cq_num_mapped_cqs) + uint32_t cq_idx; + if (qid->cq_next_tx >= qid->cq_num_mapped_cqs) qid->cq_next_tx = 0; + cq_idx = qid->cq_next_tx++; + cq = qid->cq_map[cq_idx]; /* find least used */ @@ -140,9 +142,10 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, do { if (++cq_check_count > qid->cq_num_mapped_cqs) goto exit; - cq = qid->cq_map[cq_idx]; - if (++cq_idx == qid->cq_num_mapped_cqs) + if (cq_idx >= qid->cq_num_mapped_cqs) cq_idx = 0; + cq = qid->cq_map[cq_idx++]; + } while (rte_event_ring_free_count( sw->ports[cq].cq_worker_ring) == 0 || sw->ports[cq].inflights == SW_PORT_HIST_LIST); @@ -220,7 +223,7 @@ sw_schedule_qid_to_cq(struct sw_evdev *sw) int iq_num = PKT_MASK_TO_IQ(qid->iq_pkt_mask); /* zero mapped CQs indicates directed */ - if (iq_num >= SW_IQS_MAX) + if (iq_num >= SW_IQS_MAX || qid->cq_num_mapped_cqs == 0) continue; uint32_t pkts_done = 0; @@ -517,13 +520,18 @@ sw_event_schedule(struct rte_eventdev *dev) /* Pull from rx_ring for ports */ do { in_pkts = 0; - for (i = 0; i < sw->port_count; i++) + for (i = 0; i < sw->port_count; i++) { + /* ack the unlinks in progress as done */ + if (sw->ports[i].unlinks_in_progress) + sw->ports[i].unlinks_in_progress = 0; + if (sw->ports[i].is_directed) in_pkts += sw_schedule_pull_port_dir(sw, i); else if (sw->ports[i].num_ordered_qids > 0) in_pkts += sw_schedule_pull_port_lb(sw, i); else in_pkts += sw_schedule_pull_port_no_reorder(sw, i); + } /* QID scan for re-ordered */ in_pkts += sw_schedule_reorder(sw, 0, @@ -532,8 +540,7 @@ sw_event_schedule(struct rte_eventdev *dev) } while (in_pkts > 4 && (int)in_pkts_this_iteration < sched_quanta); - out_pkts = 0; - out_pkts += sw_schedule_qid_to_cq(sw); + out_pkts = sw_schedule_qid_to_cq(sw); out_pkts_total += out_pkts; in_pkts_total += in_pkts_this_iteration; @@ -541,6 +548,12 @@ sw_event_schedule(struct rte_eventdev *dev) break; } while ((int)out_pkts_total < sched_quanta); + sw->stats.tx_pkts += out_pkts_total; + sw->stats.rx_pkts += in_pkts_total; + + sw->sched_no_iq_enqueues += (in_pkts_total == 0); + sw->sched_no_cq_enqueues += (out_pkts_total == 0); + /* push all the internal buffered QEs in port->cq_ring to the * worker cores: aka, do the ring transfers batched. */ @@ -552,10 +565,4 @@ sw_event_schedule(struct rte_eventdev *dev) sw->ports[i].cq_buf_count = 0; } - sw->stats.tx_pkts += out_pkts_total; - sw->stats.rx_pkts += in_pkts_total; - - sw->sched_no_iq_enqueues += (in_pkts_total == 0); - sw->sched_no_cq_enqueues += (out_pkts_total == 0); - }