eal: rename power monitor condition member
[dpdk.git] / drivers / event / dlb / dlb.c
index 1aab7ce..a65f708 100644 (file)
@@ -72,16 +72,24 @@ static struct rte_event_dev_info evdev_dlb_default_info = {
 struct process_local_port_data
 dlb_port[DLB_MAX_NUM_PORTS][NUM_DLB_PORT_TYPES];
 
-uint32_t
-dlb_get_queue_depth(struct dlb_eventdev *dlb,
-                   struct dlb_eventdev_queue *queue)
-{
-       /* DUMMY FOR NOW So "xstats" patch compiles */
-       RTE_SET_USED(dlb);
-       RTE_SET_USED(queue);
+static inline uint16_t
+dlb_event_enqueue_delayed(void *event_port,
+                         const struct rte_event events[]);
 
-       return 0;
-}
+static inline uint16_t
+dlb_event_enqueue_burst_delayed(void *event_port,
+                               const struct rte_event events[],
+                               uint16_t num);
+
+static inline uint16_t
+dlb_event_enqueue_new_burst_delayed(void *event_port,
+                                   const struct rte_event events[],
+                                   uint16_t num);
+
+static inline uint16_t
+dlb_event_enqueue_forward_burst_delayed(void *event_port,
+                                       const struct rte_event events[],
+                                       uint16_t num);
 
 static int
 dlb_hw_query_resources(struct dlb_eventdev *dlb)
@@ -151,6 +159,9 @@ dlb_free_qe_mem(struct dlb_port *qm_port)
 
        rte_free(qm_port->consume_qe);
        qm_port->consume_qe = NULL;
+
+       rte_memzone_free(dlb_port[qm_port->id][PORT_TYPE(qm_port)].mz);
+       dlb_port[qm_port->id][PORT_TYPE(qm_port)].mz = NULL;
 }
 
 static int
@@ -989,7 +1000,6 @@ dlb_hw_create_ldb_port(struct dlb_eventdev *dlb,
        /* The credit window is one high water mark of QEs */
        qm_port->dir_pushcount_at_credit_expiry = 0;
        qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
-       qm_port->cq_depth = cfg.cq_depth;
        /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
         * the effective depth is smaller.
         */
@@ -1014,6 +1024,33 @@ dlb_hw_create_ldb_port(struct dlb_eventdev *dlb,
 
        qm_port->dequeue_depth = dequeue_depth;
 
+       /* When using the reserved token scheme, token_pop_thresh is
+        * initially 2 * dequeue_depth. Once the tokens are reserved,
+        * the enqueue code re-assigns it to dequeue_depth.
+        */
+       qm_port->token_pop_thresh = cq_depth;
+
+       /* When the deferred scheduling vdev arg is selected, use deferred pop
+        * for all single-entry CQs.
+        */
+       if (cfg.cq_depth == 1 || (cfg.cq_depth == 2 && use_rsvd_token_scheme)) {
+               if (dlb->defer_sched)
+                       qm_port->token_pop_mode = DEFERRED_POP;
+       }
+
+       /* The default enqueue functions do not include delayed-pop support for
+        * performance reasons.
+        */
+       if (qm_port->token_pop_mode == DELAYED_POP) {
+               dlb->event_dev->enqueue = dlb_event_enqueue_delayed;
+               dlb->event_dev->enqueue_burst =
+                       dlb_event_enqueue_burst_delayed;
+               dlb->event_dev->enqueue_new_burst =
+                       dlb_event_enqueue_new_burst_delayed;
+               dlb->event_dev->enqueue_forward_burst =
+                       dlb_event_enqueue_forward_burst_delayed;
+       }
+
        qm_port->owed_tokens = 0;
        qm_port->issued_releases = 0;
 
@@ -1174,6 +1211,8 @@ dlb_hw_create_dir_port(struct dlb_eventdev *dlb,
 
        qm_port->dequeue_depth = dequeue_depth;
 
+       /* Directed ports are auto-pop, by default. */
+       qm_port->token_pop_mode = AUTO_POP;
        qm_port->owed_tokens = 0;
        qm_port->issued_releases = 0;
 
@@ -2583,6 +2622,30 @@ dlb_event_build_hcws(struct dlb_port *qm_port,
        }
 }
 
+static inline void
+dlb_construct_token_pop_qe(struct dlb_port *qm_port, int idx)
+{
+       struct dlb_cq_pop_qe *qe = (void *)qm_port->qe4;
+       int num = qm_port->owed_tokens;
+
+       if (qm_port->use_rsvd_token_scheme) {
+               /* Check if there's a deficit of reserved tokens, and return
+                * early if there are no (unreserved) tokens to consume.
+                */
+               if (num <= qm_port->cq_rsvd_token_deficit) {
+                       qm_port->cq_rsvd_token_deficit -= num;
+                       qm_port->owed_tokens = 0;
+                       return;
+               }
+               num -= qm_port->cq_rsvd_token_deficit;
+               qm_port->cq_rsvd_token_deficit = 0;
+       }
+
+       qe[idx].cmd_byte = DLB_POP_CMD_BYTE;
+       qe[idx].tokens = num - 1;
+       qm_port->owed_tokens = 0;
+}
+
 static __rte_always_inline void
 dlb_pp_write(struct dlb_enqueue_qe *qe4,
             struct process_local_port_data *port_data)
@@ -2649,7 +2712,8 @@ dlb_consume_qe_immediate(struct dlb_port *qm_port, int num)
 static inline uint16_t
 __dlb_event_enqueue_burst(void *event_port,
                          const struct rte_event events[],
-                         uint16_t num)
+                         uint16_t num,
+                         bool use_delayed)
 {
        struct dlb_eventdev_port *ev_port = event_port;
        struct dlb_port *qm_port = &ev_port->qm_port;
@@ -2677,6 +2741,35 @@ __dlb_event_enqueue_burst(void *event_port,
 
                for (; j < DLB_NUM_QES_PER_CACHE_LINE && (i + j) < num; j++) {
                        const struct rte_event *ev = &events[i + j];
+                       int16_t thresh = qm_port->token_pop_thresh;
+
+                       if (use_delayed &&
+                           qm_port->token_pop_mode == DELAYED_POP &&
+                           (ev->op == RTE_EVENT_OP_FORWARD ||
+                            ev->op == RTE_EVENT_OP_RELEASE) &&
+                           qm_port->issued_releases >= thresh - 1) {
+                               /* Insert the token pop QE and break out. This
+                                * may result in a partial HCW, but that is
+                                * simpler than supporting arbitrary QE
+                                * insertion.
+                                */
+                               dlb_construct_token_pop_qe(qm_port, j);
+
+                               /* Reset the releases for the next QE batch */
+                               qm_port->issued_releases -= thresh;
+
+                               /* When using delayed token pop mode, the
+                                * initial token threshold is the full CQ
+                                * depth. After the first token pop, we need to
+                                * reset it to the dequeue_depth.
+                                */
+                               qm_port->token_pop_thresh =
+                                       qm_port->dequeue_depth;
+
+                               pop_offs = 1;
+                               j++;
+                               break;
+                       }
 
                        if (dlb_event_enqueue_prep(ev_port, qm_port, ev,
                                                   port_data, &sched_types[j],
@@ -2712,14 +2805,29 @@ dlb_event_enqueue_burst(void *event_port,
                        const struct rte_event events[],
                        uint16_t num)
 {
-       return __dlb_event_enqueue_burst(event_port, events, num);
+       return __dlb_event_enqueue_burst(event_port, events, num, false);
+}
+
+static inline uint16_t
+dlb_event_enqueue_burst_delayed(void *event_port,
+                               const struct rte_event events[],
+                               uint16_t num)
+{
+       return __dlb_event_enqueue_burst(event_port, events, num, true);
 }
 
 static inline uint16_t
 dlb_event_enqueue(void *event_port,
                  const struct rte_event events[])
 {
-       return __dlb_event_enqueue_burst(event_port, events, 1);
+       return __dlb_event_enqueue_burst(event_port, events, 1, false);
+}
+
+static inline uint16_t
+dlb_event_enqueue_delayed(void *event_port,
+                         const struct rte_event events[])
+{
+       return __dlb_event_enqueue_burst(event_port, events, 1, true);
 }
 
 static uint16_t
@@ -2727,7 +2835,15 @@ dlb_event_enqueue_new_burst(void *event_port,
                            const struct rte_event events[],
                            uint16_t num)
 {
-       return __dlb_event_enqueue_burst(event_port, events, num);
+       return __dlb_event_enqueue_burst(event_port, events, num, false);
+}
+
+static uint16_t
+dlb_event_enqueue_new_burst_delayed(void *event_port,
+                                   const struct rte_event events[],
+                                   uint16_t num)
+{
+       return __dlb_event_enqueue_burst(event_port, events, num, true);
 }
 
 static uint16_t
@@ -2735,7 +2851,15 @@ dlb_event_enqueue_forward_burst(void *event_port,
                                const struct rte_event events[],
                                uint16_t num)
 {
-       return __dlb_event_enqueue_burst(event_port, events, num);
+       return __dlb_event_enqueue_burst(event_port, events, num, false);
+}
+
+static uint16_t
+dlb_event_enqueue_forward_burst_delayed(void *event_port,
+                                       const struct rte_event events[],
+                                       uint16_t num)
+{
+       return __dlb_event_enqueue_burst(event_port, events, num, true);
 }
 
 static __rte_always_inline int
@@ -3037,6 +3161,7 @@ dlb_dequeue_wait(struct dlb_eventdev *dlb,
                /* Interrupts not supported by PF PMD */
                return 1;
        } else if (dlb->umwait_allowed) {
+               struct rte_power_monitor_cond pmc;
                volatile struct dlb_dequeue_qe *cq_base;
                union {
                        uint64_t raw_qe[2];
@@ -3057,9 +3182,12 @@ dlb_dequeue_wait(struct dlb_eventdev *dlb,
                else
                        expected_value = 0;
 
-               rte_power_monitor(monitor_addr, expected_value,
-                                 qe_mask.raw_qe[1], timeout + start_ticks,
-                                 sizeof(uint64_t));
+               pmc.addr = monitor_addr;
+               pmc.val = expected_value;
+               pmc.mask = qe_mask.raw_qe[1];
+               pmc.size = sizeof(uint64_t);
+
+               rte_power_monitor(&pmc, timeout + start_ticks);
 
                DLB_INC_STAT(ev_port->stats.traffic.rx_umonitor_umwait, 1);
        } else {
@@ -3135,7 +3263,8 @@ dlb_hw_dequeue(struct dlb_eventdev *dlb,
 
        qm_port->owed_tokens += num;
 
-       dlb_consume_qe_immediate(qm_port, num);
+       if (num && qm_port->token_pop_mode == AUTO_POP)
+               dlb_consume_qe_immediate(qm_port, num);
 
        ev_port->outstanding_releases += num;
 
@@ -3260,7 +3389,8 @@ dlb_hw_dequeue_sparse(struct dlb_eventdev *dlb,
 
        qm_port->owed_tokens += num;
 
-       dlb_consume_qe_immediate(qm_port, num);
+       if (num && qm_port->token_pop_mode == AUTO_POP)
+               dlb_consume_qe_immediate(qm_port, num);
 
        ev_port->outstanding_releases += num;
 
@@ -3304,6 +3434,28 @@ dlb_event_release(struct dlb_eventdev *dlb, uint8_t port_id, int n)
                qm_port->qe4[3].cmd_byte = 0;
 
                for (; j < DLB_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++) {
+                       int16_t thresh = qm_port->token_pop_thresh;
+
+                       if (qm_port->token_pop_mode == DELAYED_POP &&
+                           qm_port->issued_releases >= thresh - 1) {
+                               /* Insert the token pop QE */
+                               dlb_construct_token_pop_qe(qm_port, j);
+
+                               /* Reset the releases for the next QE batch */
+                               qm_port->issued_releases -= thresh;
+
+                               /* When using delayed token pop mode, the
+                                * initial token threshold is the full CQ
+                                * depth. After the first token pop, we need to
+                                * reset it to the dequeue_depth.
+                                */
+                               qm_port->token_pop_thresh =
+                                       qm_port->dequeue_depth;
+
+                               pop_offs = 1;
+                               j++;
+                               break;
+                       }
 
                        qm_port->qe4[j].cmd_byte = DLB_COMP_CMD_BYTE;
                        qm_port->issued_releases++;
@@ -3336,6 +3488,7 @@ dlb_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
                        uint64_t wait)
 {
        struct dlb_eventdev_port *ev_port = event_port;
+       struct dlb_port *qm_port = &ev_port->qm_port;
        struct dlb_eventdev *dlb = ev_port->dlb;
        uint16_t cnt;
        int ret;
@@ -3355,6 +3508,10 @@ dlb_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
                DLB_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
        }
 
+       if (qm_port->token_pop_mode == DEFERRED_POP &&
+                       qm_port->owed_tokens)
+               dlb_consume_qe_immediate(qm_port, qm_port->owed_tokens);
+
        cnt = dlb_hw_dequeue(dlb, ev_port, ev, num, wait);
 
        DLB_INC_STAT(ev_port->stats.traffic.total_polls, 1);
@@ -3373,6 +3530,7 @@ dlb_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
                               uint16_t num, uint64_t wait)
 {
        struct dlb_eventdev_port *ev_port = event_port;
+       struct dlb_port *qm_port = &ev_port->qm_port;
        struct dlb_eventdev *dlb = ev_port->dlb;
        uint16_t cnt;
        int ret;
@@ -3392,6 +3550,10 @@ dlb_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
                DLB_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
        }
 
+       if (qm_port->token_pop_mode == DEFERRED_POP &&
+           qm_port->owed_tokens)
+               dlb_consume_qe_immediate(qm_port, qm_port->owed_tokens);
+
        cnt = dlb_hw_dequeue_sparse(dlb, ev_port, ev, num, wait);
 
        DLB_INC_STAT(ev_port->stats.traffic.total_polls, 1);
@@ -3405,6 +3567,283 @@ dlb_event_dequeue_sparse(void *event_port, struct rte_event *ev, uint64_t wait)
        return dlb_event_dequeue_burst_sparse(event_port, ev, 1, wait);
 }
 
+static uint32_t
+dlb_get_ldb_queue_depth(struct dlb_eventdev *dlb,
+                       struct dlb_eventdev_queue *queue)
+{
+       struct dlb_hw_dev *handle = &dlb->qm_instance;
+       struct dlb_get_ldb_queue_depth_args cfg;
+       struct dlb_cmd_response response;
+       int ret;
+
+       cfg.queue_id = queue->qm_queue.id;
+       cfg.response = (uintptr_t)&response;
+
+       ret = dlb_iface_get_ldb_queue_depth(handle, &cfg);
+       if (ret < 0) {
+               DLB_LOG_ERR("dlb: get_ldb_queue_depth ret=%d (driver status: %s)\n",
+                           ret, dlb_error_strings[response.status]);
+               return ret;
+       }
+
+       return response.id;
+}
+
+static uint32_t
+dlb_get_dir_queue_depth(struct dlb_eventdev *dlb,
+                       struct dlb_eventdev_queue *queue)
+{
+       struct dlb_hw_dev *handle = &dlb->qm_instance;
+       struct dlb_get_dir_queue_depth_args cfg;
+       struct dlb_cmd_response response;
+       int ret;
+
+       cfg.queue_id = queue->qm_queue.id;
+       cfg.response = (uintptr_t)&response;
+
+       ret = dlb_iface_get_dir_queue_depth(handle, &cfg);
+       if (ret < 0) {
+               DLB_LOG_ERR("dlb: get_dir_queue_depth ret=%d (driver status: %s)\n",
+                           ret, dlb_error_strings[response.status]);
+               return ret;
+       }
+
+       return response.id;
+}
+
+uint32_t
+dlb_get_queue_depth(struct dlb_eventdev *dlb,
+                   struct dlb_eventdev_queue *queue)
+{
+       if (queue->qm_queue.is_directed)
+               return dlb_get_dir_queue_depth(dlb, queue);
+       else
+               return dlb_get_ldb_queue_depth(dlb, queue);
+}
+
+static bool
+dlb_queue_is_empty(struct dlb_eventdev *dlb,
+                  struct dlb_eventdev_queue *queue)
+{
+       return dlb_get_queue_depth(dlb, queue) == 0;
+}
+
+static bool
+dlb_linked_queues_empty(struct dlb_eventdev *dlb)
+{
+       int i;
+
+       for (i = 0; i < dlb->num_queues; i++) {
+               if (dlb->ev_queues[i].num_links == 0)
+                       continue;
+               if (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))
+                       return false;
+       }
+
+       return true;
+}
+
+static bool
+dlb_queues_empty(struct dlb_eventdev *dlb)
+{
+       int i;
+
+       for (i = 0; i < dlb->num_queues; i++) {
+               if (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))
+                       return false;
+       }
+
+       return true;
+}
+
+static void
+dlb_flush_port(struct rte_eventdev *dev, int port_id)
+{
+       struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+       eventdev_stop_flush_t flush;
+       struct rte_event ev;
+       uint8_t dev_id;
+       void *arg;
+       int i;
+
+       flush = dev->dev_ops->dev_stop_flush;
+       dev_id = dev->data->dev_id;
+       arg = dev->data->dev_stop_flush_arg;
+
+       while (rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0)) {
+               if (flush)
+                       flush(dev_id, ev, arg);
+
+               if (dlb->ev_ports[port_id].qm_port.is_directed)
+                       continue;
+
+               ev.op = RTE_EVENT_OP_RELEASE;
+
+               rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
+       }
+
+       /* Enqueue any additional outstanding releases */
+       ev.op = RTE_EVENT_OP_RELEASE;
+
+       for (i = dlb->ev_ports[port_id].outstanding_releases; i > 0; i--)
+               rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
+}
+
+static void
+dlb_drain(struct rte_eventdev *dev)
+{
+       struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+       struct dlb_eventdev_port *ev_port = NULL;
+       uint8_t dev_id;
+       int i;
+
+       dev_id = dev->data->dev_id;
+
+       while (!dlb_linked_queues_empty(dlb)) {
+               /* Flush all the ev_ports, which will drain all their connected
+                * queues.
+                */
+               for (i = 0; i < dlb->num_ports; i++)
+                       dlb_flush_port(dev, i);
+       }
+
+       /* The queues are empty, but there may be events left in the ports. */
+       for (i = 0; i < dlb->num_ports; i++)
+               dlb_flush_port(dev, i);
+
+       /* If the domain's queues are empty, we're done. */
+       if (dlb_queues_empty(dlb))
+               return;
+
+       /* Else, there must be at least one unlinked load-balanced queue.
+        * Select a load-balanced port with which to drain the unlinked
+        * queue(s).
+        */
+       for (i = 0; i < dlb->num_ports; i++) {
+               ev_port = &dlb->ev_ports[i];
+
+               if (!ev_port->qm_port.is_directed)
+                       break;
+       }
+
+       if (i == dlb->num_ports) {
+               DLB_LOG_ERR("internal error: no LDB ev_ports\n");
+               return;
+       }
+
+       rte_errno = 0;
+       rte_event_port_unlink(dev_id, ev_port->id, NULL, 0);
+
+       if (rte_errno) {
+               DLB_LOG_ERR("internal error: failed to unlink ev_port %d\n",
+                           ev_port->id);
+               return;
+       }
+
+       for (i = 0; i < dlb->num_queues; i++) {
+               uint8_t qid, prio;
+               int ret;
+
+               if (dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))
+                       continue;
+
+               qid = i;
+               prio = 0;
+
+               /* Link the ev_port to the queue */
+               ret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1);
+               if (ret != 1) {
+                       DLB_LOG_ERR("internal error: failed to link ev_port %d to queue %d\n",
+                                   ev_port->id, qid);
+                       return;
+               }
+
+               /* Flush the queue */
+               while (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))
+                       dlb_flush_port(dev, ev_port->id);
+
+               /* Drain any extant events in the ev_port. */
+               dlb_flush_port(dev, ev_port->id);
+
+               /* Unlink the ev_port from the queue */
+               ret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1);
+               if (ret != 1) {
+                       DLB_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d\n",
+                                   ev_port->id, qid);
+                       return;
+               }
+       }
+}
+
+static void
+dlb_eventdev_stop(struct rte_eventdev *dev)
+{
+       struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+
+       rte_spinlock_lock(&dlb->qm_instance.resource_lock);
+
+       if (dlb->run_state == DLB_RUN_STATE_STOPPED) {
+               DLB_LOG_DBG("Internal error: already stopped\n");
+               rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
+               return;
+       } else if (dlb->run_state != DLB_RUN_STATE_STARTED) {
+               DLB_LOG_ERR("Internal error: bad state %d for dev_stop\n",
+                           (int)dlb->run_state);
+               rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
+               return;
+       }
+
+       dlb->run_state = DLB_RUN_STATE_STOPPING;
+
+       rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
+
+       dlb_drain(dev);
+
+       dlb->run_state = DLB_RUN_STATE_STOPPED;
+}
+
+static int
+dlb_eventdev_close(struct rte_eventdev *dev)
+{
+       dlb_hw_reset_sched_domain(dev, false);
+
+       return 0;
+}
+
+static void
+dlb_eventdev_port_release(void *port)
+{
+       struct dlb_eventdev_port *ev_port = port;
+
+       if (ev_port) {
+               struct dlb_port *qm_port = &ev_port->qm_port;
+
+               if (qm_port->config_state == DLB_CONFIGURED)
+                       dlb_free_qe_mem(qm_port);
+       }
+}
+
+static void
+dlb_eventdev_queue_release(struct rte_eventdev *dev, uint8_t id)
+{
+       RTE_SET_USED(dev);
+       RTE_SET_USED(id);
+
+       /* This function intentionally left blank. */
+}
+
+static int
+dlb_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
+                          uint64_t *timeout_ticks)
+{
+       RTE_SET_USED(dev);
+       uint64_t cycles_per_ns = rte_get_timer_hz() / 1E9;
+
+       *timeout_ticks = ns * cycles_per_ns;
+
+       return 0;
+}
+
 void
 dlb_entry_points_init(struct rte_eventdev *dev)
 {
@@ -3414,19 +3853,25 @@ dlb_entry_points_init(struct rte_eventdev *dev)
                .dev_infos_get    = dlb_eventdev_info_get,
                .dev_configure    = dlb_eventdev_configure,
                .dev_start        = dlb_eventdev_start,
+               .dev_stop         = dlb_eventdev_stop,
+               .dev_close        = dlb_eventdev_close,
                .queue_def_conf   = dlb_eventdev_queue_default_conf_get,
                .port_def_conf    = dlb_eventdev_port_default_conf_get,
                .queue_setup      = dlb_eventdev_queue_setup,
+               .queue_release    = dlb_eventdev_queue_release,
                .port_setup       = dlb_eventdev_port_setup,
+               .port_release     = dlb_eventdev_port_release,
                .port_link        = dlb_eventdev_port_link,
                .port_unlink      = dlb_eventdev_port_unlink,
                .port_unlinks_in_progress =
                                    dlb_eventdev_port_unlinks_in_progress,
+               .timeout_ticks    = dlb_eventdev_timeout_ticks,
                .dump             = dlb_eventdev_dump,
                .xstats_get       = dlb_eventdev_xstats_get,
                .xstats_get_names = dlb_eventdev_xstats_get_names,
                .xstats_get_by_name = dlb_eventdev_xstats_get_by_name,
                .xstats_reset       = dlb_eventdev_xstats_reset,
+               .dev_selftest     = test_dlb_eventdev,
        };
 
        /* Expose PMD's eventdev interface */
@@ -3453,7 +3898,7 @@ dlb_primary_eventdev_probe(struct rte_eventdev *dev,
                           struct dlb_devargs *dlb_args)
 {
        struct dlb_eventdev *dlb;
-       int err;
+       int err, i;
 
        dlb = dev->data->dev_private;
 
@@ -3502,6 +3947,10 @@ dlb_primary_eventdev_probe(struct rte_eventdev *dev,
                return err;
        }
 
+       /* Initialize each port's token pop mode */
+       for (i = 0; i < DLB_MAX_NUM_PORTS; i++)
+               dlb->ev_ports[i].qm_port.token_pop_mode = AUTO_POP;
+
        rte_spinlock_init(&dlb->qm_instance.resource_lock);
 
        dlb_iface_low_level_io_init(dlb);