sw->chunk_list_head = chunk;
}
+static __rte_always_inline void
+iq_free_chunk_list(struct sw_evdev *sw, struct sw_queue_chunk *head)
+{
+ while (head) {
+ struct sw_queue_chunk *next;
+ next = head->next;
+ iq_free_chunk(sw, head);
+ head = next;
+ }
+}
+
static __rte_always_inline void
iq_init(struct sw_evdev *sw, struct sw_iq *iq)
{
iq->tail = iq->head;
iq->head_idx = 0;
iq->tail_idx = 0;
+ iq->count = 0;
}
static __rte_always_inline void
done:
if (unlikely(index == SW_EVS_PER_Q_CHUNK)) {
- struct sw_queue_chunk *next = iq->head->next;
+ struct sw_queue_chunk *next = current->next;
iq_free_chunk(sw, current);
iq->head = next;
iq->head_idx = 0;
char buf[IQ_ROB_NAMESIZE];
struct sw_qid *qid = &sw->qids[idx];
- for (i = 0; i < SW_IQS_MAX; i++)
- iq_init(sw, &qid->iq[i]);
-
/* Initialize the FID structures to no pinning (-1), and zero packets */
const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
for (i = 0; i < RTE_DIM(qid->fids); i++)
return 0;
cleanup:
- for (i = 0; i < SW_IQS_MAX; i++) {
- if (qid->iq[i].head)
- iq_free_chunk(sw, qid->iq[i].head);
- }
-
if (qid->reorder_buffer) {
rte_free(qid->reorder_buffer);
qid->reorder_buffer = NULL;
{
struct sw_evdev *sw = sw_pmd_priv(dev);
struct sw_qid *qid = &sw->qids[id];
- uint32_t i;
-
- for (i = 0; i < SW_IQS_MAX; i++) {
- if (!qid->iq[i].head)
- continue;
- iq_free_chunk(sw, qid->iq[i].head);
- }
if (qid->type == RTE_SCHED_TYPE_ORDERED) {
rte_free(qid->reorder_buffer);
return qid_init(sw, queue_id, type, conf);
}
+static void
+sw_init_qid_iqs(struct sw_evdev *sw)
+{
+ int i, j;
+
+ /* Initialize the IQ memory of all configured qids */
+ for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+ struct sw_qid *qid = &sw->qids[i];
+
+ if (!qid->initialized)
+ continue;
+
+ for (j = 0; j < SW_IQS_MAX; j++)
+ iq_init(sw, &qid->iq[j]);
+ }
+}
+
+static void
+sw_clean_qid_iqs(struct sw_evdev *sw)
+{
+ int i, j;
+
+ /* Release the IQ memory of all configured qids */
+ for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+ struct sw_qid *qid = &sw->qids[i];
+
+ for (j = 0; j < SW_IQS_MAX; j++) {
+ if (!qid->iq[j].head)
+ continue;
+ iq_free_chunk_list(sw, qid->iq[j].head);
+ qid->iq[j].head = NULL;
+ }
+ }
+}
+
static void
sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
struct rte_event_queue_conf *conf)
num_chunks = ((SW_INFLIGHT_EVENTS_TOTAL/SW_EVS_PER_Q_CHUNK)+1) +
sw->qid_count*SW_IQS_MAX*2;
- /* If this is a reconfiguration, free the previous IQ allocation */
+ /* If this is a reconfiguration, free the previous IQ allocation. All
+ * IQ chunk references were cleaned out of the QIDs in sw_stop(), and
+ * will be reinitialized in sw_start().
+ */
if (sw->chunks)
rte_free(sw->chunks);
/* check all queues are configured and mapped to ports*/
for (i = 0; i < sw->qid_count; i++)
- if (sw->qids[i].iq[0].head == NULL ||
- sw->qids[i].cq_num_mapped_cqs == 0) {
+ if (!sw->qids[i].initialized ||
+ sw->qids[i].cq_num_mapped_cqs == 0) {
SW_LOG_ERR("Queue %d not configured\n", i);
return -ENOLINK;
}
}
}
+ sw_init_qid_iqs(sw);
+
if (sw_xstats_init(sw) < 0)
return -EINVAL;
sw_stop(struct rte_eventdev *dev)
{
struct sw_evdev *sw = sw_pmd_priv(dev);
+ sw_clean_qid_iqs(sw);
sw_xstats_uninit(sw);
sw->started = 0;
rte_smp_wmb();