X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Fsw%2Fsw_evdev_scheduler.c;h=cff747da8913b38a173b671e6500f35d0b1d67bf;hb=c2c15f769ab978c8ec5f9e17bd7ae2d63176f276;hp=aa2971f9a875a047f467aed96103f036adf509d4;hpb=5566a3e35866ce9e5eacf886c27b460ebfcd6ee9;p=dpdk.git diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c index aa2971f9a8..cff747da89 100644 --- a/drivers/event/sw/sw_evdev_scheduler.c +++ b/drivers/event/sw/sw_evdev_scheduler.c @@ -6,7 +6,7 @@ #include #include #include "sw_evdev.h" -#include "iq_ring.h" +#include "iq_chunk.h" #define SW_IQS_MASK (SW_IQS_MAX-1) @@ -43,7 +43,7 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, */ uint32_t qid_id = qid->id; - iq_ring_dequeue_burst(qid->iq[iq_num], qes, count); + iq_dequeue_burst(sw, &qid->iq[iq_num], qes, count); for (i = 0; i < count; i++) { const struct rte_event *qe = &qes[i]; const uint16_t flow_id = SW_HASH_FLOWID(qes[i].flow_id); @@ -51,9 +51,11 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, int cq = fid->cq; if (cq < 0) { - uint32_t cq_idx = qid->cq_next_tx++; - if (qid->cq_next_tx == qid->cq_num_mapped_cqs) + uint32_t cq_idx; + if (qid->cq_next_tx >= qid->cq_num_mapped_cqs) qid->cq_next_tx = 0; + cq_idx = qid->cq_next_tx++; + cq = qid->cq_map[cq_idx]; /* find least used */ @@ -102,7 +104,7 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, p->cq_buf_count = 0; } } - iq_ring_put_back(qid->iq[iq_num], blocked_qes, nb_blocked); + iq_put_back(sw, &qid->iq[iq_num], blocked_qes, nb_blocked); return count - nb_blocked; } @@ -128,7 +130,7 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, rte_ring_count(qid->reorder_buffer_freelist)); for (i = 0; i < count; i++) { - const struct rte_event *qe = iq_ring_peek(qid->iq[iq_num]); + const struct rte_event *qe = iq_peek(&qid->iq[iq_num]); uint32_t cq_check_count = 0; uint32_t cq; @@ -140,9 +142,10 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, do { if (++cq_check_count > qid->cq_num_mapped_cqs) goto exit; - cq = qid->cq_map[cq_idx]; - if (++cq_idx == qid->cq_num_mapped_cqs) + if (cq_idx >= qid->cq_num_mapped_cqs) cq_idx = 0; + cq = qid->cq_map[cq_idx++]; + } while (rte_event_ring_free_count( sw->ports[cq].cq_worker_ring) == 0 || sw->ports[cq].inflights == SW_PORT_HIST_LIST); @@ -165,7 +168,7 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, (void *)&p->hist_list[head].rob_entry); sw->ports[cq].cq_buf[sw->ports[cq].cq_buf_count++] = *qe; - iq_ring_pop(qid->iq[iq_num]); + iq_pop(sw, &qid->iq[iq_num]); rte_compiler_barrier(); p->inflights++; @@ -190,8 +193,8 @@ sw_schedule_dir_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, return 0; /* burst dequeue from the QID IQ ring */ - struct iq_ring *ring = qid->iq[iq_num]; - uint32_t ret = iq_ring_dequeue_burst(ring, + struct sw_iq *iq = &qid->iq[iq_num]; + uint32_t ret = iq_dequeue_burst(sw, iq, &port->cq_buf[port->cq_buf_count], count_free); port->cq_buf_count += ret; @@ -220,11 +223,11 @@ sw_schedule_qid_to_cq(struct sw_evdev *sw) int iq_num = PKT_MASK_TO_IQ(qid->iq_pkt_mask); /* zero mapped CQs indicates directed */ - if (iq_num >= SW_IQS_MAX) + if (iq_num >= SW_IQS_MAX || qid->cq_num_mapped_cqs == 0) continue; uint32_t pkts_done = 0; - uint32_t count = iq_ring_count(qid->iq[iq_num]); + uint32_t count = iq_count(&qid->iq[iq_num]); if (count > 0) { if (type == SW_SCHED_TYPE_DIRECT) @@ -296,22 +299,15 @@ sw_schedule_reorder(struct sw_evdev *sw, int qid_start, int qid_end) continue; } - struct sw_qid *dest_qid_ptr = - &sw->qids[dest_qid]; - const struct iq_ring *dest_iq_ptr = - dest_qid_ptr->iq[dest_iq]; - if (iq_ring_free_count(dest_iq_ptr) == 0) - break; - pkts_iter++; struct sw_qid *q = &sw->qids[dest_qid]; - struct iq_ring *r = q->iq[dest_iq]; + struct sw_iq *iq = &q->iq[dest_iq]; /* we checked for space above, so enqueue must * succeed */ - iq_ring_enqueue(r, qe); + iq_enqueue(sw, iq, qe); q->iq_pkt_mask |= (1 << (dest_iq)); q->iq_pkt_count[dest_iq]++; q->stats.rx_pkts++; @@ -376,10 +372,6 @@ __pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder) uint32_t iq_num = PRIO_TO_IQ(qe->priority); struct sw_qid *qid = &sw->qids[qe->queue_id]; - if ((flags & QE_FLAG_VALID) && - iq_ring_free_count(qid->iq[iq_num]) == 0) - break; - /* now process based on flags. Note that for directed * queues, the enqueue_flush masks off all but the * valid flag. This makes FWD and PARTIAL enqueues just @@ -443,7 +435,7 @@ __pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder) */ qid->iq_pkt_mask |= (1 << (iq_num)); - iq_ring_enqueue(qid->iq[iq_num], qe); + iq_enqueue(sw, &qid->iq[iq_num], qe); qid->iq_pkt_count[iq_num]++; qid->stats.rx_pkts++; pkts_iter++; @@ -488,10 +480,7 @@ sw_schedule_pull_port_dir(struct sw_evdev *sw, uint32_t port_id) uint32_t iq_num = PRIO_TO_IQ(qe->priority); struct sw_qid *qid = &sw->qids[qe->queue_id]; - struct iq_ring *iq_ring = qid->iq[iq_num]; - - if (iq_ring_free_count(iq_ring) == 0) - break; /* move to next port */ + struct sw_iq *iq = &qid->iq[iq_num]; port->stats.rx_pkts++; @@ -499,7 +488,7 @@ sw_schedule_pull_port_dir(struct sw_evdev *sw, uint32_t port_id) * into the qid at the right priority */ qid->iq_pkt_mask |= (1 << (iq_num)); - iq_ring_enqueue(iq_ring, qe); + iq_enqueue(sw, iq, qe); qid->iq_pkt_count[iq_num]++; qid->stats.rx_pkts++; pkts_iter++; @@ -522,7 +511,7 @@ sw_event_schedule(struct rte_eventdev *dev) uint32_t i; sw->sched_called++; - if (!sw->started) + if (unlikely(!sw->started)) return; do { @@ -531,13 +520,18 @@ sw_event_schedule(struct rte_eventdev *dev) /* Pull from rx_ring for ports */ do { in_pkts = 0; - for (i = 0; i < sw->port_count; i++) + for (i = 0; i < sw->port_count; i++) { + /* ack the unlinks in progress as done */ + if (sw->ports[i].unlinks_in_progress) + sw->ports[i].unlinks_in_progress = 0; + if (sw->ports[i].is_directed) in_pkts += sw_schedule_pull_port_dir(sw, i); else if (sw->ports[i].num_ordered_qids > 0) in_pkts += sw_schedule_pull_port_lb(sw, i); else in_pkts += sw_schedule_pull_port_no_reorder(sw, i); + } /* QID scan for re-ordered */ in_pkts += sw_schedule_reorder(sw, 0, @@ -546,8 +540,7 @@ sw_event_schedule(struct rte_eventdev *dev) } while (in_pkts > 4 && (int)in_pkts_this_iteration < sched_quanta); - out_pkts = 0; - out_pkts += sw_schedule_qid_to_cq(sw); + out_pkts = sw_schedule_qid_to_cq(sw); out_pkts_total += out_pkts; in_pkts_total += in_pkts_this_iteration; @@ -555,6 +548,12 @@ sw_event_schedule(struct rte_eventdev *dev) break; } while ((int)out_pkts_total < sched_quanta); + sw->stats.tx_pkts += out_pkts_total; + sw->stats.rx_pkts += in_pkts_total; + + sw->sched_no_iq_enqueues += (in_pkts_total == 0); + sw->sched_no_cq_enqueues += (out_pkts_total == 0); + /* push all the internal buffered QEs in port->cq_ring to the * worker cores: aka, do the ring transfers batched. */ @@ -566,10 +565,4 @@ sw_event_schedule(struct rte_eventdev *dev) sw->ports[i].cq_buf_count = 0; } - sw->stats.tx_pkts += out_pkts_total; - sw->stats.rx_pkts += in_pkts_total; - - sw->sched_no_iq_enqueues += (in_pkts_total == 0); - sw->sched_no_cq_enqueues += (out_pkts_total == 0); - }