#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
+#include <rte_power_intrinsics.h>
#include <rte_prefetch.h>
#include <rte_ring.h>
#include <rte_string_fns.h>
struct process_local_port_data
dlb_port[DLB_MAX_NUM_PORTS][NUM_DLB_PORT_TYPES];
-uint32_t
-dlb_get_queue_depth(struct dlb_eventdev *dlb,
- struct dlb_eventdev_queue *queue)
-{
- /* DUMMY FOR NOW So "xstats" patch compiles */
- RTE_SET_USED(dlb);
- RTE_SET_USED(queue);
+static inline uint16_t
+dlb_event_enqueue_delayed(void *event_port,
+ const struct rte_event events[]);
- return 0;
-}
+static inline uint16_t
+dlb_event_enqueue_burst_delayed(void *event_port,
+ const struct rte_event events[],
+ uint16_t num);
+
+static inline uint16_t
+dlb_event_enqueue_new_burst_delayed(void *event_port,
+ const struct rte_event events[],
+ uint16_t num);
+
+static inline uint16_t
+dlb_event_enqueue_forward_burst_delayed(void *event_port,
+ const struct rte_event events[],
+ uint16_t num);
static int
dlb_hw_query_resources(struct dlb_eventdev *dlb)
rte_free(qm_port->consume_qe);
qm_port->consume_qe = NULL;
+
+ rte_memzone_free(dlb_port[qm_port->id][PORT_TYPE(qm_port)].mz);
+ dlb_port[qm_port->id][PORT_TYPE(qm_port)].mz = NULL;
}
static int
/* The credit window is one high water mark of QEs */
qm_port->dir_pushcount_at_credit_expiry = 0;
qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
- qm_port->cq_depth = cfg.cq_depth;
/* CQs with depth < 8 use an 8-entry queue, but withhold credits so
* the effective depth is smaller.
*/
qm_port->dequeue_depth = dequeue_depth;
+ /* When using the reserved token scheme, token_pop_thresh is
+ * initially 2 * dequeue_depth. Once the tokens are reserved,
+ * the enqueue code re-assigns it to dequeue_depth.
+ */
+ qm_port->token_pop_thresh = cq_depth;
+
+ /* When the deferred scheduling vdev arg is selected, use deferred pop
+ * for all single-entry CQs.
+ */
+ if (cfg.cq_depth == 1 || (cfg.cq_depth == 2 && use_rsvd_token_scheme)) {
+ if (dlb->defer_sched)
+ qm_port->token_pop_mode = DEFERRED_POP;
+ }
+
+ /* The default enqueue functions do not include delayed-pop support for
+ * performance reasons.
+ */
+ if (qm_port->token_pop_mode == DELAYED_POP) {
+ dlb->event_dev->enqueue = dlb_event_enqueue_delayed;
+ dlb->event_dev->enqueue_burst =
+ dlb_event_enqueue_burst_delayed;
+ dlb->event_dev->enqueue_new_burst =
+ dlb_event_enqueue_new_burst_delayed;
+ dlb->event_dev->enqueue_forward_burst =
+ dlb_event_enqueue_forward_burst_delayed;
+ }
+
qm_port->owed_tokens = 0;
qm_port->issued_releases = 0;
qm_port->dequeue_depth = dequeue_depth;
+ /* Directed ports are auto-pop, by default. */
+ qm_port->token_pop_mode = AUTO_POP;
qm_port->owed_tokens = 0;
qm_port->issued_releases = 0;
return 0;
}
+static int
+dlb_eventdev_reapply_configuration(struct rte_eventdev *dev)
+{
+ struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+ int ret, i;
+
+ /* If an event queue or port was previously configured, but hasn't been
+ * reconfigured, reapply its original configuration.
+ */
+ for (i = 0; i < dlb->num_queues; i++) {
+ struct dlb_eventdev_queue *ev_queue;
+
+ ev_queue = &dlb->ev_queues[i];
+
+ if (ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED)
+ continue;
+
+ ret = dlb_eventdev_queue_setup(dev, i, &ev_queue->conf);
+ if (ret < 0) {
+ DLB_LOG_ERR("dlb: failed to reconfigure queue %d", i);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < dlb->num_ports; i++) {
+ struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
+
+ if (ev_port->qm_port.config_state != DLB_PREV_CONFIGURED)
+ continue;
+
+ ret = dlb_eventdev_port_setup(dev, i, &ev_port->conf);
+ if (ret < 0) {
+ DLB_LOG_ERR("dlb: failed to reconfigure ev_port %d",
+ i);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int
set_dev_id(const char *key __rte_unused,
const char *value,
return 0;
}
+static int32_t
+dlb_hw_create_dir_queue(struct dlb_eventdev *dlb, int32_t qm_port_id)
+{
+ struct dlb_hw_dev *handle = &dlb->qm_instance;
+ struct dlb_create_dir_queue_args cfg;
+ struct dlb_cmd_response response;
+ int32_t ret;
+
+ cfg.response = (uintptr_t)&response;
+
+ /* The directed port is always configured before its queue */
+ cfg.port_id = qm_port_id;
+
+ ret = dlb_iface_dir_queue_create(handle, &cfg);
+ if (ret < 0) {
+ DLB_LOG_ERR("dlb: create DIR event queue error, ret=%d (driver status: %s)\n",
+ ret, dlb_error_strings[response.status]);
+ return -EINVAL;
+ }
+
+ return response.id;
+}
+
+static int
+dlb_eventdev_dir_queue_setup(struct dlb_eventdev *dlb,
+ struct dlb_eventdev_queue *ev_queue,
+ struct dlb_eventdev_port *ev_port)
+{
+ int32_t qm_qid;
+
+ qm_qid = dlb_hw_create_dir_queue(dlb, ev_port->qm_port.id);
+
+ if (qm_qid < 0) {
+ DLB_LOG_ERR("Failed to create the DIR queue\n");
+ return qm_qid;
+ }
+
+ dlb->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
+
+ ev_queue->qm_queue.id = qm_qid;
+
+ return 0;
+}
+
static int16_t
dlb_hw_map_ldb_qid_to_port(struct dlb_hw_dev *handle,
uint32_t qm_port_id,
return ret;
}
-static int32_t
-dlb_hw_create_dir_queue(struct dlb_eventdev *dlb, int32_t qm_port_id)
-{
- struct dlb_hw_dev *handle = &dlb->qm_instance;
- struct dlb_create_dir_queue_args cfg;
- struct dlb_cmd_response response;
- int32_t ret;
-
- cfg.response = (uintptr_t)&response;
-
- /* The directed port is always configured before its queue */
- cfg.port_id = qm_port_id;
-
- ret = dlb_iface_dir_queue_create(handle, &cfg);
- if (ret < 0) {
- DLB_LOG_ERR("dlb: create DIR event queue error, ret=%d (driver status: %s)\n",
- ret, dlb_error_strings[response.status]);
- return -EINVAL;
- }
-
- return response.id;
-}
-
-static int
-dlb_eventdev_dir_queue_setup(struct dlb_eventdev *dlb,
- struct dlb_eventdev_queue *ev_queue,
- struct dlb_eventdev_port *ev_port)
-{
- int32_t qm_qid;
-
- qm_qid = dlb_hw_create_dir_queue(dlb, ev_port->qm_port.id);
-
- if (qm_qid < 0) {
- DLB_LOG_ERR("Failed to create the DIR queue\n");
- return qm_qid;
- }
-
- dlb->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
-
- ev_queue->qm_queue.id = qm_qid;
-
- return 0;
-}
-
static int
dlb_do_port_link(struct rte_eventdev *dev,
struct dlb_eventdev_queue *ev_queue,
return 0;
}
+static int
+dlb_eventdev_apply_port_links(struct rte_eventdev *dev)
+{
+ struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+ int i;
+
+ /* Perform requested port->queue links */
+ for (i = 0; i < dlb->num_ports; i++) {
+ struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
+ int j;
+
+ for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
+ struct dlb_eventdev_queue *ev_queue;
+ uint8_t prio, queue_id;
+
+ if (!ev_port->link[j].valid)
+ continue;
+
+ prio = ev_port->link[j].priority;
+ queue_id = ev_port->link[j].queue_id;
+
+ if (dlb_validate_port_link(ev_port, queue_id, true, j))
+ return -EINVAL;
+
+ ev_queue = &dlb->ev_queues[queue_id];
+
+ if (dlb_do_port_link(dev, ev_queue, ev_port, prio))
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int
dlb_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
const uint8_t queues[], const uint8_t priorities[],
return i;
}
-void
-dlb_entry_points_init(struct rte_eventdev *dev)
+static int
+dlb_eventdev_start(struct rte_eventdev *dev)
{
- static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
- .dev_infos_get = dlb_eventdev_info_get,
- .dev_configure = dlb_eventdev_configure,
- .queue_def_conf = dlb_eventdev_queue_default_conf_get,
- .port_def_conf = dlb_eventdev_port_default_conf_get,
- .queue_setup = dlb_eventdev_queue_setup,
- .port_setup = dlb_eventdev_port_setup,
- .port_link = dlb_eventdev_port_link,
- .port_unlink = dlb_eventdev_port_unlink,
- .port_unlinks_in_progress =
- dlb_eventdev_port_unlinks_in_progress,
- .dump = dlb_eventdev_dump,
- .xstats_get = dlb_eventdev_xstats_get,
- .xstats_get_names = dlb_eventdev_xstats_get_names,
- .xstats_get_by_name = dlb_eventdev_xstats_get_by_name,
- .xstats_reset = dlb_eventdev_xstats_reset,
- };
-
- /* Expose PMD's eventdev interface */
- dev->dev_ops = &dlb_eventdev_entry_ops;
-}
+ struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+ struct dlb_hw_dev *handle = &dlb->qm_instance;
+ struct dlb_start_domain_args cfg;
+ struct dlb_cmd_response response;
+ int ret, i;
-int
-dlb_primary_eventdev_probe(struct rte_eventdev *dev,
- const char *name,
- struct dlb_devargs *dlb_args)
-{
- struct dlb_eventdev *dlb;
- int err;
+ rte_spinlock_lock(&dlb->qm_instance.resource_lock);
+ if (dlb->run_state != DLB_RUN_STATE_STOPPED) {
+ DLB_LOG_ERR("bad state %d for dev_start\n",
+ (int)dlb->run_state);
+ rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
+ return -EINVAL;
+ }
+ dlb->run_state = DLB_RUN_STATE_STARTING;
+ rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
- dlb = dev->data->dev_private;
+ /* If the device was configured more than once, some event ports and/or
+ * queues may need to be reconfigured.
+ */
+ ret = dlb_eventdev_reapply_configuration(dev);
+ if (ret)
+ return ret;
- dlb->event_dev = dev; /* backlink */
+ /* The DLB PMD delays port links until the device is started. */
+ ret = dlb_eventdev_apply_port_links(dev);
+ if (ret)
+ return ret;
- evdev_dlb_default_info.driver_name = name;
+ cfg.response = (uintptr_t)&response;
- dlb->max_num_events_override = dlb_args->max_num_events;
- dlb->num_dir_credits_override = dlb_args->num_dir_credits_override;
- dlb->defer_sched = dlb_args->defer_sched;
- dlb->num_atm_inflights_per_queue = dlb_args->num_atm_inflights;
+ for (i = 0; i < dlb->num_ports; i++) {
+ if (!dlb->ev_ports[i].setup_done) {
+ DLB_LOG_ERR("dlb: port %d not setup", i);
+ return -ESTALE;
+ }
+ }
- /* Open the interface.
- * For vdev mode, this means open the dlb kernel module.
- */
- err = dlb_iface_open(&dlb->qm_instance, name);
- if (err < 0) {
- DLB_LOG_ERR("could not open event hardware device, err=%d\n",
- err);
- return err;
+ for (i = 0; i < dlb->num_queues; i++) {
+ if (dlb->ev_queues[i].num_links == 0) {
+ DLB_LOG_ERR("dlb: queue %d is not linked", i);
+ return -ENOLINK;
+ }
}
- err = dlb_iface_get_device_version(&dlb->qm_instance, &dlb->revision);
- if (err < 0) {
- DLB_LOG_ERR("dlb: failed to get the device version, err=%d\n",
- err);
- return err;
+ ret = dlb_iface_sched_domain_start(handle, &cfg);
+ if (ret < 0) {
+ DLB_LOG_ERR("dlb: sched_domain_start ret=%d (driver status: %s)\n",
+ ret, dlb_error_strings[response.status]);
+ return ret;
}
- err = dlb_hw_query_resources(dlb);
- if (err) {
- DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
- return err;
+ dlb->run_state = DLB_RUN_STATE_STARTED;
+ DLB_LOG_DBG("dlb: sched_domain_start completed OK\n");
+
+ return 0;
+}
+
+static inline int
+dlb_check_enqueue_sw_credits(struct dlb_eventdev *dlb,
+ struct dlb_eventdev_port *ev_port)
+{
+ uint32_t sw_inflights = __atomic_load_n(&dlb->inflights,
+ __ATOMIC_SEQ_CST);
+ const int num = 1;
+
+ if (unlikely(ev_port->inflight_max < sw_inflights)) {
+ DLB_INC_STAT(ev_port->stats.traffic.tx_nospc_inflight_max, 1);
+ rte_errno = -ENOSPC;
+ return 1;
}
- err = dlb_iface_get_cq_poll_mode(&dlb->qm_instance, &dlb->poll_mode);
- if (err < 0) {
- DLB_LOG_ERR("dlb: failed to get the poll mode, err=%d\n", err);
- return err;
+ if (ev_port->inflight_credits < num) {
+ /* check if event enqueue brings ev_port over max threshold */
+ uint32_t credit_update_quanta = ev_port->credit_update_quanta;
+
+ if (sw_inflights + credit_update_quanta >
+ dlb->new_event_limit) {
+ DLB_INC_STAT(
+ ev_port->stats.traffic.tx_nospc_new_event_limit,
+ 1);
+ rte_errno = -ENOSPC;
+ return 1;
+ }
+
+ __atomic_fetch_add(&dlb->inflights, credit_update_quanta,
+ __ATOMIC_SEQ_CST);
+ ev_port->inflight_credits += (credit_update_quanta);
+
+ if (ev_port->inflight_credits < num) {
+ DLB_INC_STAT(
+ ev_port->stats.traffic.tx_nospc_inflight_credits,
+ 1);
+ rte_errno = -ENOSPC;
+ return 1;
+ }
}
- /* Complete xtstats runtime initialization */
- err = dlb_xstats_init(dlb);
- if (err) {
- DLB_LOG_ERR("dlb: failed to init xstats, err=%d\n", err);
- return err;
+ return 0;
+}
+
+static inline void
+dlb_replenish_sw_credits(struct dlb_eventdev *dlb,
+ struct dlb_eventdev_port *ev_port)
+{
+ uint16_t quanta = ev_port->credit_update_quanta;
+
+ if (ev_port->inflight_credits >= quanta * 2) {
+ /* Replenish credits, saving one quanta for enqueues */
+ uint16_t val = ev_port->inflight_credits - quanta;
+
+ __atomic_fetch_sub(&dlb->inflights, val, __ATOMIC_SEQ_CST);
+ ev_port->inflight_credits -= val;
}
+}
+
+static __rte_always_inline uint16_t
+dlb_read_pc(struct process_local_port_data *port_data, bool ldb)
+{
+ volatile uint16_t *popcount;
+
+ if (ldb)
+ popcount = port_data->ldb_popcount;
+ else
+ popcount = port_data->dir_popcount;
+
+ return *popcount;
+}
+
+static inline int
+dlb_check_enqueue_hw_ldb_credits(struct dlb_port *qm_port,
+ struct process_local_port_data *port_data)
+{
+ if (unlikely(qm_port->cached_ldb_credits == 0)) {
+ uint16_t pc;
+
+ pc = dlb_read_pc(port_data, true);
+
+ qm_port->cached_ldb_credits = pc -
+ qm_port->ldb_pushcount_at_credit_expiry;
+ if (unlikely(qm_port->cached_ldb_credits == 0)) {
+ DLB_INC_STAT(
+ qm_port->ev_port->stats.traffic.tx_nospc_ldb_hw_credits,
+ 1);
+
+ DLB_LOG_DBG("ldb credits exhausted\n");
+ return 1;
+ }
+ qm_port->ldb_pushcount_at_credit_expiry +=
+ qm_port->cached_ldb_credits;
+ }
+
+ return 0;
+}
+
+static inline int
+dlb_check_enqueue_hw_dir_credits(struct dlb_port *qm_port,
+ struct process_local_port_data *port_data)
+{
+ if (unlikely(qm_port->cached_dir_credits == 0)) {
+ uint16_t pc;
+
+ pc = dlb_read_pc(port_data, false);
+
+ qm_port->cached_dir_credits = pc -
+ qm_port->dir_pushcount_at_credit_expiry;
+
+ if (unlikely(qm_port->cached_dir_credits == 0)) {
+ DLB_INC_STAT(
+ qm_port->ev_port->stats.traffic.tx_nospc_dir_hw_credits,
+ 1);
+
+ DLB_LOG_DBG("dir credits exhausted\n");
+ return 1;
+ }
+ qm_port->dir_pushcount_at_credit_expiry +=
+ qm_port->cached_dir_credits;
+ }
+
+ return 0;
+}
+
+static inline int
+dlb_event_enqueue_prep(struct dlb_eventdev_port *ev_port,
+ struct dlb_port *qm_port,
+ const struct rte_event ev[],
+ struct process_local_port_data *port_data,
+ uint8_t *sched_type,
+ uint8_t *queue_id)
+{
+ struct dlb_eventdev *dlb = ev_port->dlb;
+ struct dlb_eventdev_queue *ev_queue;
+ uint16_t *cached_credits = NULL;
+ struct dlb_queue *qm_queue;
+
+ ev_queue = &dlb->ev_queues[ev->queue_id];
+ qm_queue = &ev_queue->qm_queue;
+ *queue_id = qm_queue->id;
+
+ /* Ignore sched_type and hardware credits on release events */
+ if (ev->op == RTE_EVENT_OP_RELEASE)
+ goto op_check;
+
+ if (!qm_queue->is_directed) {
+ /* Load balanced destination queue */
+
+ if (dlb_check_enqueue_hw_ldb_credits(qm_port, port_data)) {
+ rte_errno = -ENOSPC;
+ return 1;
+ }
+ cached_credits = &qm_port->cached_ldb_credits;
+
+ switch (ev->sched_type) {
+ case RTE_SCHED_TYPE_ORDERED:
+ DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_ORDERED\n");
+ if (qm_queue->sched_type != RTE_SCHED_TYPE_ORDERED) {
+ DLB_LOG_ERR("dlb: tried to send ordered event to unordered queue %d\n",
+ *queue_id);
+ rte_errno = -EINVAL;
+ return 1;
+ }
+ *sched_type = DLB_SCHED_ORDERED;
+ break;
+ case RTE_SCHED_TYPE_ATOMIC:
+ DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_ATOMIC\n");
+ *sched_type = DLB_SCHED_ATOMIC;
+ break;
+ case RTE_SCHED_TYPE_PARALLEL:
+ DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_PARALLEL\n");
+ if (qm_queue->sched_type == RTE_SCHED_TYPE_ORDERED)
+ *sched_type = DLB_SCHED_ORDERED;
+ else
+ *sched_type = DLB_SCHED_UNORDERED;
+ break;
+ default:
+ DLB_LOG_ERR("Unsupported LDB sched type in put_qe\n");
+ DLB_INC_STAT(ev_port->stats.tx_invalid, 1);
+ rte_errno = -EINVAL;
+ return 1;
+ }
+ } else {
+ /* Directed destination queue */
+
+ if (dlb_check_enqueue_hw_dir_credits(qm_port, port_data)) {
+ rte_errno = -ENOSPC;
+ return 1;
+ }
+ cached_credits = &qm_port->cached_dir_credits;
+
+ DLB_LOG_DBG("dlb: put_qe: RTE_SCHED_TYPE_DIRECTED\n");
+
+ *sched_type = DLB_SCHED_DIRECTED;
+ }
+
+op_check:
+ switch (ev->op) {
+ case RTE_EVENT_OP_NEW:
+ /* Check that a sw credit is available */
+ if (dlb_check_enqueue_sw_credits(dlb, ev_port)) {
+ rte_errno = -ENOSPC;
+ return 1;
+ }
+ ev_port->inflight_credits--;
+ (*cached_credits)--;
+ break;
+ case RTE_EVENT_OP_FORWARD:
+ /* Check for outstanding_releases underflow. If this occurs,
+ * the application is not using the EVENT_OPs correctly; for
+ * example, forwarding or releasing events that were not
+ * dequeued.
+ */
+ RTE_ASSERT(ev_port->outstanding_releases > 0);
+ ev_port->outstanding_releases--;
+ qm_port->issued_releases++;
+ (*cached_credits)--;
+ break;
+ case RTE_EVENT_OP_RELEASE:
+ ev_port->inflight_credits++;
+ /* Check for outstanding_releases underflow. If this occurs,
+ * the application is not using the EVENT_OPs correctly; for
+ * example, forwarding or releasing events that were not
+ * dequeued.
+ */
+ RTE_ASSERT(ev_port->outstanding_releases > 0);
+ ev_port->outstanding_releases--;
+ qm_port->issued_releases++;
+ /* Replenish s/w credits if enough are cached */
+ dlb_replenish_sw_credits(dlb, ev_port);
+ break;
+ }
+
+ DLB_INC_STAT(ev_port->stats.tx_op_cnt[ev->op], 1);
+ DLB_INC_STAT(ev_port->stats.traffic.tx_ok, 1);
+
+#ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS
+ if (ev->op != RTE_EVENT_OP_RELEASE) {
+ DLB_INC_STAT(ev_port->stats.enq_ok[ev->queue_id], 1);
+ DLB_INC_STAT(ev_port->stats.tx_sched_cnt[*sched_type], 1);
+ }
+#endif
+
+ return 0;
+}
+
+static uint8_t cmd_byte_map[NUM_DLB_PORT_TYPES][DLB_NUM_HW_SCHED_TYPES] = {
+ {
+ /* Load-balanced cmd bytes */
+ [RTE_EVENT_OP_NEW] = DLB_NEW_CMD_BYTE,
+ [RTE_EVENT_OP_FORWARD] = DLB_FWD_CMD_BYTE,
+ [RTE_EVENT_OP_RELEASE] = DLB_COMP_CMD_BYTE,
+ },
+ {
+ /* Directed cmd bytes */
+ [RTE_EVENT_OP_NEW] = DLB_NEW_CMD_BYTE,
+ [RTE_EVENT_OP_FORWARD] = DLB_NEW_CMD_BYTE,
+ [RTE_EVENT_OP_RELEASE] = DLB_NOOP_CMD_BYTE,
+ },
+};
+
+static inline void
+dlb_event_build_hcws(struct dlb_port *qm_port,
+ const struct rte_event ev[],
+ int num,
+ uint8_t *sched_type,
+ uint8_t *queue_id)
+{
+ struct dlb_enqueue_qe *qe;
+ uint16_t sched_word[4];
+ __m128i sse_qe[2];
+ int i;
+
+ qe = qm_port->qe4;
+
+ sse_qe[0] = _mm_setzero_si128();
+ sse_qe[1] = _mm_setzero_si128();
+
+ switch (num) {
+ case 4:
+ /* Construct the metadata portion of two HCWs in one 128b SSE
+ * register. HCW metadata is constructed in the SSE registers
+ * like so:
+ * sse_qe[0][63:0]: qe[0]'s metadata
+ * sse_qe[0][127:64]: qe[1]'s metadata
+ * sse_qe[1][63:0]: qe[2]'s metadata
+ * sse_qe[1][127:64]: qe[3]'s metadata
+ */
+
+ /* Convert the event operation into a command byte and store it
+ * in the metadata:
+ * sse_qe[0][63:56] = cmd_byte_map[is_directed][ev[0].op]
+ * sse_qe[0][127:120] = cmd_byte_map[is_directed][ev[1].op]
+ * sse_qe[1][63:56] = cmd_byte_map[is_directed][ev[2].op]
+ * sse_qe[1][127:120] = cmd_byte_map[is_directed][ev[3].op]
+ */
+#define DLB_QE_CMD_BYTE 7
+ sse_qe[0] = _mm_insert_epi8(sse_qe[0],
+ cmd_byte_map[qm_port->is_directed][ev[0].op],
+ DLB_QE_CMD_BYTE);
+ sse_qe[0] = _mm_insert_epi8(sse_qe[0],
+ cmd_byte_map[qm_port->is_directed][ev[1].op],
+ DLB_QE_CMD_BYTE + 8);
+ sse_qe[1] = _mm_insert_epi8(sse_qe[1],
+ cmd_byte_map[qm_port->is_directed][ev[2].op],
+ DLB_QE_CMD_BYTE);
+ sse_qe[1] = _mm_insert_epi8(sse_qe[1],
+ cmd_byte_map[qm_port->is_directed][ev[3].op],
+ DLB_QE_CMD_BYTE + 8);
+
+ /* Store priority, scheduling type, and queue ID in the sched
+ * word array because these values are re-used when the
+ * destination is a directed queue.
+ */
+ sched_word[0] = EV_TO_DLB_PRIO(ev[0].priority) << 10 |
+ sched_type[0] << 8 |
+ queue_id[0];
+ sched_word[1] = EV_TO_DLB_PRIO(ev[1].priority) << 10 |
+ sched_type[1] << 8 |
+ queue_id[1];
+ sched_word[2] = EV_TO_DLB_PRIO(ev[2].priority) << 10 |
+ sched_type[2] << 8 |
+ queue_id[2];
+ sched_word[3] = EV_TO_DLB_PRIO(ev[3].priority) << 10 |
+ sched_type[3] << 8 |
+ queue_id[3];
+
+ /* Store the event priority, scheduling type, and queue ID in
+ * the metadata:
+ * sse_qe[0][31:16] = sched_word[0]
+ * sse_qe[0][95:80] = sched_word[1]
+ * sse_qe[1][31:16] = sched_word[2]
+ * sse_qe[1][95:80] = sched_word[3]
+ */
+#define DLB_QE_QID_SCHED_WORD 1
+ sse_qe[0] = _mm_insert_epi16(sse_qe[0],
+ sched_word[0],
+ DLB_QE_QID_SCHED_WORD);
+ sse_qe[0] = _mm_insert_epi16(sse_qe[0],
+ sched_word[1],
+ DLB_QE_QID_SCHED_WORD + 4);
+ sse_qe[1] = _mm_insert_epi16(sse_qe[1],
+ sched_word[2],
+ DLB_QE_QID_SCHED_WORD);
+ sse_qe[1] = _mm_insert_epi16(sse_qe[1],
+ sched_word[3],
+ DLB_QE_QID_SCHED_WORD + 4);
+
+ /* If the destination is a load-balanced queue, store the lock
+ * ID. If it is a directed queue, DLB places this field in
+ * bytes 10-11 of the received QE, so we format it accordingly:
+ * sse_qe[0][47:32] = dir queue ? sched_word[0] : flow_id[0]
+ * sse_qe[0][111:96] = dir queue ? sched_word[1] : flow_id[1]
+ * sse_qe[1][47:32] = dir queue ? sched_word[2] : flow_id[2]
+ * sse_qe[1][111:96] = dir queue ? sched_word[3] : flow_id[3]
+ */
+#define DLB_QE_LOCK_ID_WORD 2
+ sse_qe[0] = _mm_insert_epi16(sse_qe[0],
+ (sched_type[0] == DLB_SCHED_DIRECTED) ?
+ sched_word[0] : ev[0].flow_id,
+ DLB_QE_LOCK_ID_WORD);
+ sse_qe[0] = _mm_insert_epi16(sse_qe[0],
+ (sched_type[1] == DLB_SCHED_DIRECTED) ?
+ sched_word[1] : ev[1].flow_id,
+ DLB_QE_LOCK_ID_WORD + 4);
+ sse_qe[1] = _mm_insert_epi16(sse_qe[1],
+ (sched_type[2] == DLB_SCHED_DIRECTED) ?
+ sched_word[2] : ev[2].flow_id,
+ DLB_QE_LOCK_ID_WORD);
+ sse_qe[1] = _mm_insert_epi16(sse_qe[1],
+ (sched_type[3] == DLB_SCHED_DIRECTED) ?
+ sched_word[3] : ev[3].flow_id,
+ DLB_QE_LOCK_ID_WORD + 4);
+
+ /* Store the event type and sub event type in the metadata:
+ * sse_qe[0][15:0] = flow_id[0]
+ * sse_qe[0][79:64] = flow_id[1]
+ * sse_qe[1][15:0] = flow_id[2]
+ * sse_qe[1][79:64] = flow_id[3]
+ */
+#define DLB_QE_EV_TYPE_WORD 0
+ sse_qe[0] = _mm_insert_epi16(sse_qe[0],
+ ev[0].sub_event_type << 8 |
+ ev[0].event_type,
+ DLB_QE_EV_TYPE_WORD);
+ sse_qe[0] = _mm_insert_epi16(sse_qe[0],
+ ev[1].sub_event_type << 8 |
+ ev[1].event_type,
+ DLB_QE_EV_TYPE_WORD + 4);
+ sse_qe[1] = _mm_insert_epi16(sse_qe[1],
+ ev[2].sub_event_type << 8 |
+ ev[2].event_type,
+ DLB_QE_EV_TYPE_WORD);
+ sse_qe[1] = _mm_insert_epi16(sse_qe[1],
+ ev[3].sub_event_type << 8 |
+ ev[3].event_type,
+ DLB_QE_EV_TYPE_WORD + 4);
+
+ /* Store the metadata to memory (use the double-precision
+ * _mm_storeh_pd because there is no integer function for
+ * storing the upper 64b):
+ * qe[0] metadata = sse_qe[0][63:0]
+ * qe[1] metadata = sse_qe[0][127:64]
+ * qe[2] metadata = sse_qe[1][63:0]
+ * qe[3] metadata = sse_qe[1][127:64]
+ */
+ _mm_storel_epi64((__m128i *)&qe[0].u.opaque_data, sse_qe[0]);
+ _mm_storeh_pd((double *)&qe[1].u.opaque_data,
+ (__m128d) sse_qe[0]);
+ _mm_storel_epi64((__m128i *)&qe[2].u.opaque_data, sse_qe[1]);
+ _mm_storeh_pd((double *)&qe[3].u.opaque_data,
+ (__m128d) sse_qe[1]);
+
+ qe[0].data = ev[0].u64;
+ qe[1].data = ev[1].u64;
+ qe[2].data = ev[2].u64;
+ qe[3].data = ev[3].u64;
+
+ break;
+ case 3:
+ case 2:
+ case 1:
+ for (i = 0; i < num; i++) {
+ qe[i].cmd_byte =
+ cmd_byte_map[qm_port->is_directed][ev[i].op];
+ qe[i].sched_type = sched_type[i];
+ qe[i].data = ev[i].u64;
+ qe[i].qid = queue_id[i];
+ qe[i].priority = EV_TO_DLB_PRIO(ev[i].priority);
+ qe[i].lock_id = ev[i].flow_id;
+ if (sched_type[i] == DLB_SCHED_DIRECTED) {
+ struct dlb_msg_info *info =
+ (struct dlb_msg_info *)&qe[i].lock_id;
+
+ info->qid = queue_id[i];
+ info->sched_type = DLB_SCHED_DIRECTED;
+ info->priority = qe[i].priority;
+ }
+ qe[i].u.event_type.major = ev[i].event_type;
+ qe[i].u.event_type.sub = ev[i].sub_event_type;
+ }
+ break;
+ case 0:
+ break;
+ }
+}
+
+static inline void
+dlb_construct_token_pop_qe(struct dlb_port *qm_port, int idx)
+{
+ struct dlb_cq_pop_qe *qe = (void *)qm_port->qe4;
+ int num = qm_port->owed_tokens;
+
+ if (qm_port->use_rsvd_token_scheme) {
+ /* Check if there's a deficit of reserved tokens, and return
+ * early if there are no (unreserved) tokens to consume.
+ */
+ if (num <= qm_port->cq_rsvd_token_deficit) {
+ qm_port->cq_rsvd_token_deficit -= num;
+ qm_port->owed_tokens = 0;
+ return;
+ }
+ num -= qm_port->cq_rsvd_token_deficit;
+ qm_port->cq_rsvd_token_deficit = 0;
+ }
+
+ qe[idx].cmd_byte = DLB_POP_CMD_BYTE;
+ qe[idx].tokens = num - 1;
+ qm_port->owed_tokens = 0;
+}
+
+static __rte_always_inline void
+dlb_pp_write(struct dlb_enqueue_qe *qe4,
+ struct process_local_port_data *port_data)
+{
+ dlb_movdir64b(port_data->pp_addr, qe4);
+}
+
+static inline void
+dlb_hw_do_enqueue(struct dlb_port *qm_port,
+ bool do_sfence,
+ struct process_local_port_data *port_data)
+{
+ DLB_LOG_DBG("dlb: Flushing QE(s) to DLB\n");
+
+ /* Since MOVDIR64B is weakly-ordered, use an SFENCE to ensure that
+ * application writes complete before enqueueing the release HCW.
+ */
+ if (do_sfence)
+ rte_wmb();
+
+ dlb_pp_write(qm_port->qe4, port_data);
+}
+
+static inline int
+dlb_consume_qe_immediate(struct dlb_port *qm_port, int num)
+{
+ struct process_local_port_data *port_data;
+ struct dlb_cq_pop_qe *qe;
+
+ RTE_ASSERT(qm_port->config_state == DLB_CONFIGURED);
+
+ if (qm_port->use_rsvd_token_scheme) {
+ /* Check if there's a deficit of reserved tokens, and return
+ * early if there are no (unreserved) tokens to consume.
+ */
+ if (num <= qm_port->cq_rsvd_token_deficit) {
+ qm_port->cq_rsvd_token_deficit -= num;
+ qm_port->owed_tokens = 0;
+ return 0;
+ }
+ num -= qm_port->cq_rsvd_token_deficit;
+ qm_port->cq_rsvd_token_deficit = 0;
+ }
+
+ qe = qm_port->consume_qe;
+
+ qe->tokens = num - 1;
+ qe->int_arm = 0;
+
+ /* No store fence needed since no pointer is being sent, and CQ token
+ * pops can be safely reordered with other HCWs.
+ */
+ port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
+
+ dlb_movntdq_single(port_data->pp_addr, qe);
+
+ DLB_LOG_DBG("dlb: consume immediate - %d QEs\n", num);
+
+ qm_port->owed_tokens = 0;
+
+ return 0;
+}
+
+static inline uint16_t
+__dlb_event_enqueue_burst(void *event_port,
+ const struct rte_event events[],
+ uint16_t num,
+ bool use_delayed)
+{
+ struct dlb_eventdev_port *ev_port = event_port;
+ struct dlb_port *qm_port = &ev_port->qm_port;
+ struct process_local_port_data *port_data;
+ int i;
+
+ RTE_ASSERT(ev_port->enq_configured);
+ RTE_ASSERT(events != NULL);
+
+ rte_errno = 0;
+ i = 0;
+
+ port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
+
+ while (i < num) {
+ uint8_t sched_types[DLB_NUM_QES_PER_CACHE_LINE];
+ uint8_t queue_ids[DLB_NUM_QES_PER_CACHE_LINE];
+ int pop_offs = 0;
+ int j = 0;
+
+ memset(qm_port->qe4,
+ 0,
+ DLB_NUM_QES_PER_CACHE_LINE *
+ sizeof(struct dlb_enqueue_qe));
+
+ for (; j < DLB_NUM_QES_PER_CACHE_LINE && (i + j) < num; j++) {
+ const struct rte_event *ev = &events[i + j];
+ int16_t thresh = qm_port->token_pop_thresh;
+
+ if (use_delayed &&
+ qm_port->token_pop_mode == DELAYED_POP &&
+ (ev->op == RTE_EVENT_OP_FORWARD ||
+ ev->op == RTE_EVENT_OP_RELEASE) &&
+ qm_port->issued_releases >= thresh - 1) {
+ /* Insert the token pop QE and break out. This
+ * may result in a partial HCW, but that is
+ * simpler than supporting arbitrary QE
+ * insertion.
+ */
+ dlb_construct_token_pop_qe(qm_port, j);
+
+ /* Reset the releases for the next QE batch */
+ qm_port->issued_releases -= thresh;
+
+ /* When using delayed token pop mode, the
+ * initial token threshold is the full CQ
+ * depth. After the first token pop, we need to
+ * reset it to the dequeue_depth.
+ */
+ qm_port->token_pop_thresh =
+ qm_port->dequeue_depth;
+
+ pop_offs = 1;
+ j++;
+ break;
+ }
+
+ if (dlb_event_enqueue_prep(ev_port, qm_port, ev,
+ port_data, &sched_types[j],
+ &queue_ids[j]))
+ break;
+ }
+
+ if (j == 0)
+ break;
+
+ dlb_event_build_hcws(qm_port, &events[i], j - pop_offs,
+ sched_types, queue_ids);
+
+ dlb_hw_do_enqueue(qm_port, i == 0, port_data);
+
+ /* Don't include the token pop QE in the enqueue count */
+ i += j - pop_offs;
+
+ /* Don't interpret j < DLB_NUM_... as out-of-credits if
+ * pop_offs != 0
+ */
+ if (j < DLB_NUM_QES_PER_CACHE_LINE && pop_offs == 0)
+ break;
+ }
+
+ RTE_ASSERT(!((i == 0 && rte_errno != -ENOSPC)));
+
+ return i;
+}
+
+static inline uint16_t
+dlb_event_enqueue_burst(void *event_port,
+ const struct rte_event events[],
+ uint16_t num)
+{
+ return __dlb_event_enqueue_burst(event_port, events, num, false);
+}
+
+static inline uint16_t
+dlb_event_enqueue_burst_delayed(void *event_port,
+ const struct rte_event events[],
+ uint16_t num)
+{
+ return __dlb_event_enqueue_burst(event_port, events, num, true);
+}
+
+static inline uint16_t
+dlb_event_enqueue(void *event_port,
+ const struct rte_event events[])
+{
+ return __dlb_event_enqueue_burst(event_port, events, 1, false);
+}
+
+static inline uint16_t
+dlb_event_enqueue_delayed(void *event_port,
+ const struct rte_event events[])
+{
+ return __dlb_event_enqueue_burst(event_port, events, 1, true);
+}
+
+static uint16_t
+dlb_event_enqueue_new_burst(void *event_port,
+ const struct rte_event events[],
+ uint16_t num)
+{
+ return __dlb_event_enqueue_burst(event_port, events, num, false);
+}
+
+static uint16_t
+dlb_event_enqueue_new_burst_delayed(void *event_port,
+ const struct rte_event events[],
+ uint16_t num)
+{
+ return __dlb_event_enqueue_burst(event_port, events, num, true);
+}
+
+static uint16_t
+dlb_event_enqueue_forward_burst(void *event_port,
+ const struct rte_event events[],
+ uint16_t num)
+{
+ return __dlb_event_enqueue_burst(event_port, events, num, false);
+}
+
+static uint16_t
+dlb_event_enqueue_forward_burst_delayed(void *event_port,
+ const struct rte_event events[],
+ uint16_t num)
+{
+ return __dlb_event_enqueue_burst(event_port, events, num, true);
+}
+
+static __rte_always_inline int
+dlb_recv_qe(struct dlb_port *qm_port, struct dlb_dequeue_qe *qe,
+ uint8_t *offset)
+{
+ uint8_t xor_mask[2][4] = { {0x0F, 0x0E, 0x0C, 0x08},
+ {0x00, 0x01, 0x03, 0x07} };
+ uint8_t and_mask[4] = {0x0F, 0x0E, 0x0C, 0x08};
+ volatile struct dlb_dequeue_qe *cq_addr;
+ __m128i *qes = (__m128i *)qe;
+ uint64_t *cache_line_base;
+ uint8_t gen_bits;
+
+ cq_addr = dlb_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
+ cq_addr = &cq_addr[qm_port->cq_idx];
+
+ cache_line_base = (void *)(((uintptr_t)cq_addr) & ~0x3F);
+ *offset = ((uintptr_t)cq_addr & 0x30) >> 4;
+
+ /* Load the next CQ cache line from memory. Pack these reads as tight
+ * as possible to reduce the chance that DLB invalidates the line while
+ * the CPU is reading it. Read the cache line backwards to ensure that
+ * if QE[N] (N > 0) is valid, then QEs[0:N-1] are too.
+ *
+ * (Valid QEs start at &qe[offset])
+ */
+ qes[3] = _mm_load_si128((__m128i *)&cache_line_base[6]);
+ qes[2] = _mm_load_si128((__m128i *)&cache_line_base[4]);
+ qes[1] = _mm_load_si128((__m128i *)&cache_line_base[2]);
+ qes[0] = _mm_load_si128((__m128i *)&cache_line_base[0]);
+
+ /* Evict the cache line ASAP */
+ rte_cldemote(cache_line_base);
+
+ /* Extract and combine the gen bits */
+ gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
+ ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
+ ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
+ ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
+
+ /* XOR the combined bits such that a 1 represents a valid QE */
+ gen_bits ^= xor_mask[qm_port->gen_bit][*offset];
+
+ /* Mask off gen bits we don't care about */
+ gen_bits &= and_mask[*offset];
+
+ return __builtin_popcount(gen_bits);
+}
+
+static inline void
+dlb_inc_cq_idx(struct dlb_port *qm_port, int cnt)
+{
+ uint16_t idx = qm_port->cq_idx_unmasked + cnt;
+
+ qm_port->cq_idx_unmasked = idx;
+ qm_port->cq_idx = idx & qm_port->cq_depth_mask;
+ qm_port->gen_bit = (~(idx >> qm_port->gen_bit_shift)) & 0x1;
+}
+
+static inline int
+dlb_process_dequeue_qes(struct dlb_eventdev_port *ev_port,
+ struct dlb_port *qm_port,
+ struct rte_event *events,
+ struct dlb_dequeue_qe *qes,
+ int cnt)
+{
+ uint8_t *qid_mappings = qm_port->qid_mappings;
+ int i, num;
+
+ RTE_SET_USED(ev_port); /* avoids unused variable error */
+
+ for (i = 0, num = 0; i < cnt; i++) {
+ struct dlb_dequeue_qe *qe = &qes[i];
+ int sched_type_map[4] = {
+ [DLB_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
+ [DLB_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
+ [DLB_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
+ [DLB_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
+ };
+
+ DLB_LOG_DBG("dequeue success, data = 0x%llx, qid=%d, event_type=%d, subevent=%d\npp_id = %d, sched_type = %d, qid = %d, err=%d\n",
+ (long long)qe->data, qe->qid,
+ qe->u.event_type.major,
+ qe->u.event_type.sub,
+ qe->pp_id, qe->sched_type, qe->qid, qe->error);
+
+ /* Fill in event information.
+ * Note that flow_id must be embedded in the data by
+ * the app, such as the mbuf RSS hash field if the data
+ * buffer is a mbuf.
+ */
+ if (unlikely(qe->error)) {
+ DLB_LOG_ERR("QE error bit ON\n");
+ DLB_INC_STAT(ev_port->stats.traffic.rx_drop, 1);
+ dlb_consume_qe_immediate(qm_port, 1);
+ continue; /* Ignore */
+ }
+
+ events[num].u64 = qe->data;
+ events[num].queue_id = qid_mappings[qe->qid];
+ events[num].priority = DLB_TO_EV_PRIO((uint8_t)qe->priority);
+ events[num].event_type = qe->u.event_type.major;
+ events[num].sub_event_type = qe->u.event_type.sub;
+ events[num].sched_type = sched_type_map[qe->sched_type];
+ DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qe->sched_type], 1);
+ num++;
+ }
+ DLB_INC_STAT(ev_port->stats.traffic.rx_ok, num);
+
+ return num;
+}
+
+static inline int
+dlb_process_dequeue_four_qes(struct dlb_eventdev_port *ev_port,
+ struct dlb_port *qm_port,
+ struct rte_event *events,
+ struct dlb_dequeue_qe *qes)
+{
+ int sched_type_map[] = {
+ [DLB_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
+ [DLB_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
+ [DLB_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
+ [DLB_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
+ };
+ const int num_events = DLB_NUM_QES_PER_CACHE_LINE;
+ uint8_t *qid_mappings = qm_port->qid_mappings;
+ __m128i sse_evt[2];
+ int i;
+
+ /* In the unlikely case that any of the QE error bits are set, process
+ * them one at a time.
+ */
+ if (unlikely(qes[0].error || qes[1].error ||
+ qes[2].error || qes[3].error))
+ return dlb_process_dequeue_qes(ev_port, qm_port, events,
+ qes, num_events);
+
+ for (i = 0; i < DLB_NUM_QES_PER_CACHE_LINE; i++) {
+ DLB_LOG_DBG("dequeue success, data = 0x%llx, qid=%d, event_type=%d, subevent=%d\npp_id = %d, sched_type = %d, qid = %d, err=%d\n",
+ (long long)qes[i].data, qes[i].qid,
+ qes[i].u.event_type.major,
+ qes[i].u.event_type.sub,
+ qes[i].pp_id, qes[i].sched_type, qes[i].qid,
+ qes[i].error);
+ }
+
+ events[0].u64 = qes[0].data;
+ events[1].u64 = qes[1].data;
+ events[2].u64 = qes[2].data;
+ events[3].u64 = qes[3].data;
+
+ /* Construct the metadata portion of two struct rte_events
+ * in one 128b SSE register. Event metadata is constructed in the SSE
+ * registers like so:
+ * sse_evt[0][63:0]: event[0]'s metadata
+ * sse_evt[0][127:64]: event[1]'s metadata
+ * sse_evt[1][63:0]: event[2]'s metadata
+ * sse_evt[1][127:64]: event[3]'s metadata
+ */
+ sse_evt[0] = _mm_setzero_si128();
+ sse_evt[1] = _mm_setzero_si128();
+
+ /* Convert the hardware queue ID to an event queue ID and store it in
+ * the metadata:
+ * sse_evt[0][47:40] = qid_mappings[qes[0].qid]
+ * sse_evt[0][111:104] = qid_mappings[qes[1].qid]
+ * sse_evt[1][47:40] = qid_mappings[qes[2].qid]
+ * sse_evt[1][111:104] = qid_mappings[qes[3].qid]
+ */
+#define DLB_EVENT_QUEUE_ID_BYTE 5
+ sse_evt[0] = _mm_insert_epi8(sse_evt[0],
+ qid_mappings[qes[0].qid],
+ DLB_EVENT_QUEUE_ID_BYTE);
+ sse_evt[0] = _mm_insert_epi8(sse_evt[0],
+ qid_mappings[qes[1].qid],
+ DLB_EVENT_QUEUE_ID_BYTE + 8);
+ sse_evt[1] = _mm_insert_epi8(sse_evt[1],
+ qid_mappings[qes[2].qid],
+ DLB_EVENT_QUEUE_ID_BYTE);
+ sse_evt[1] = _mm_insert_epi8(sse_evt[1],
+ qid_mappings[qes[3].qid],
+ DLB_EVENT_QUEUE_ID_BYTE + 8);
+
+ /* Convert the hardware priority to an event priority and store it in
+ * the metadata:
+ * sse_evt[0][55:48] = DLB_TO_EV_PRIO(qes[0].priority)
+ * sse_evt[0][119:112] = DLB_TO_EV_PRIO(qes[1].priority)
+ * sse_evt[1][55:48] = DLB_TO_EV_PRIO(qes[2].priority)
+ * sse_evt[1][119:112] = DLB_TO_EV_PRIO(qes[3].priority)
+ */
+#define DLB_EVENT_PRIO_BYTE 6
+ sse_evt[0] = _mm_insert_epi8(sse_evt[0],
+ DLB_TO_EV_PRIO((uint8_t)qes[0].priority),
+ DLB_EVENT_PRIO_BYTE);
+ sse_evt[0] = _mm_insert_epi8(sse_evt[0],
+ DLB_TO_EV_PRIO((uint8_t)qes[1].priority),
+ DLB_EVENT_PRIO_BYTE + 8);
+ sse_evt[1] = _mm_insert_epi8(sse_evt[1],
+ DLB_TO_EV_PRIO((uint8_t)qes[2].priority),
+ DLB_EVENT_PRIO_BYTE);
+ sse_evt[1] = _mm_insert_epi8(sse_evt[1],
+ DLB_TO_EV_PRIO((uint8_t)qes[3].priority),
+ DLB_EVENT_PRIO_BYTE + 8);
+
+ /* Write the event type and sub event type to the event metadata. Leave
+ * flow ID unspecified, since the hardware does not maintain it during
+ * scheduling:
+ * sse_evt[0][31:0] = qes[0].u.event_type.major << 28 |
+ * qes[0].u.event_type.sub << 20;
+ * sse_evt[0][95:64] = qes[1].u.event_type.major << 28 |
+ * qes[1].u.event_type.sub << 20;
+ * sse_evt[1][31:0] = qes[2].u.event_type.major << 28 |
+ * qes[2].u.event_type.sub << 20;
+ * sse_evt[1][95:64] = qes[3].u.event_type.major << 28 |
+ * qes[3].u.event_type.sub << 20;
+ */
+#define DLB_EVENT_EV_TYPE_DW 0
+#define DLB_EVENT_EV_TYPE_SHIFT 28
+#define DLB_EVENT_SUB_EV_TYPE_SHIFT 20
+ sse_evt[0] = _mm_insert_epi32(sse_evt[0],
+ qes[0].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
+ qes[0].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
+ DLB_EVENT_EV_TYPE_DW);
+ sse_evt[0] = _mm_insert_epi32(sse_evt[0],
+ qes[1].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
+ qes[1].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
+ DLB_EVENT_EV_TYPE_DW + 2);
+ sse_evt[1] = _mm_insert_epi32(sse_evt[1],
+ qes[2].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
+ qes[2].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
+ DLB_EVENT_EV_TYPE_DW);
+ sse_evt[1] = _mm_insert_epi32(sse_evt[1],
+ qes[3].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
+ qes[3].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
+ DLB_EVENT_EV_TYPE_DW + 2);
+
+ /* Write the sched type to the event metadata. 'op' and 'rsvd' are not
+ * set:
+ * sse_evt[0][39:32] = sched_type_map[qes[0].sched_type] << 6
+ * sse_evt[0][103:96] = sched_type_map[qes[1].sched_type] << 6
+ * sse_evt[1][39:32] = sched_type_map[qes[2].sched_type] << 6
+ * sse_evt[1][103:96] = sched_type_map[qes[3].sched_type] << 6
+ */
+#define DLB_EVENT_SCHED_TYPE_BYTE 4
+#define DLB_EVENT_SCHED_TYPE_SHIFT 6
+ sse_evt[0] = _mm_insert_epi8(sse_evt[0],
+ sched_type_map[qes[0].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
+ DLB_EVENT_SCHED_TYPE_BYTE);
+ sse_evt[0] = _mm_insert_epi8(sse_evt[0],
+ sched_type_map[qes[1].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
+ DLB_EVENT_SCHED_TYPE_BYTE + 8);
+ sse_evt[1] = _mm_insert_epi8(sse_evt[1],
+ sched_type_map[qes[2].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
+ DLB_EVENT_SCHED_TYPE_BYTE);
+ sse_evt[1] = _mm_insert_epi8(sse_evt[1],
+ sched_type_map[qes[3].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
+ DLB_EVENT_SCHED_TYPE_BYTE + 8);
+
+ /* Store the metadata to the event (use the double-precision
+ * _mm_storeh_pd because there is no integer function for storing the
+ * upper 64b):
+ * events[0].event = sse_evt[0][63:0]
+ * events[1].event = sse_evt[0][127:64]
+ * events[2].event = sse_evt[1][63:0]
+ * events[3].event = sse_evt[1][127:64]
+ */
+ _mm_storel_epi64((__m128i *)&events[0].event, sse_evt[0]);
+ _mm_storeh_pd((double *)&events[1].event, (__m128d) sse_evt[0]);
+ _mm_storel_epi64((__m128i *)&events[2].event, sse_evt[1]);
+ _mm_storeh_pd((double *)&events[3].event, (__m128d) sse_evt[1]);
+
+ DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qes[0].sched_type], 1);
+ DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qes[1].sched_type], 1);
+ DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qes[2].sched_type], 1);
+ DLB_INC_STAT(ev_port->stats.rx_sched_cnt[qes[3].sched_type], 1);
+
+ DLB_INC_STAT(ev_port->stats.traffic.rx_ok, num_events);
+
+ return num_events;
+}
+
+static inline int
+dlb_dequeue_wait(struct dlb_eventdev *dlb,
+ struct dlb_eventdev_port *ev_port,
+ struct dlb_port *qm_port,
+ uint64_t timeout,
+ uint64_t start_ticks)
+{
+ struct process_local_port_data *port_data;
+ uint64_t elapsed_ticks;
+
+ port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
+
+ elapsed_ticks = rte_get_timer_cycles() - start_ticks;
+
+ /* Wait/poll time expired */
+ if (elapsed_ticks >= timeout) {
+ /* Interrupts not supported by PF PMD */
+ return 1;
+ } else if (dlb->umwait_allowed) {
+ struct rte_power_monitor_cond pmc;
+ volatile struct dlb_dequeue_qe *cq_base;
+ union {
+ uint64_t raw_qe[2];
+ struct dlb_dequeue_qe qe;
+ } qe_mask;
+ uint64_t expected_value;
+ volatile uint64_t *monitor_addr;
+
+ qe_mask.qe.cq_gen = 1; /* set mask */
+
+ cq_base = port_data->cq_base;
+ monitor_addr = (volatile uint64_t *)(volatile void *)
+ &cq_base[qm_port->cq_idx];
+ monitor_addr++; /* cq_gen bit is in second 64bit location */
+
+ if (qm_port->gen_bit)
+ expected_value = qe_mask.raw_qe[1];
+ else
+ expected_value = 0;
+
+ pmc.addr = monitor_addr;
+ pmc.val = expected_value;
+ pmc.mask = qe_mask.raw_qe[1];
+ pmc.size = sizeof(uint64_t);
+
+ rte_power_monitor(&pmc, timeout + start_ticks);
+
+ DLB_INC_STAT(ev_port->stats.traffic.rx_umonitor_umwait, 1);
+ } else {
+ uint64_t poll_interval = RTE_LIBRTE_PMD_DLB_POLL_INTERVAL;
+ uint64_t curr_ticks = rte_get_timer_cycles();
+ uint64_t init_ticks = curr_ticks;
+
+ while ((curr_ticks - start_ticks < timeout) &&
+ (curr_ticks - init_ticks < poll_interval))
+ curr_ticks = rte_get_timer_cycles();
+ }
+
+ return 0;
+}
+
+static inline int16_t
+dlb_hw_dequeue(struct dlb_eventdev *dlb,
+ struct dlb_eventdev_port *ev_port,
+ struct rte_event *events,
+ uint16_t max_num,
+ uint64_t dequeue_timeout_ticks)
+{
+ uint64_t timeout;
+ uint64_t start_ticks = 0ULL;
+ struct dlb_port *qm_port;
+ int num = 0;
+
+ qm_port = &ev_port->qm_port;
+
+ /* If configured for per dequeue wait, then use wait value provided
+ * to this API. Otherwise we must use the global
+ * value from eventdev config time.
+ */
+ if (!dlb->global_dequeue_wait)
+ timeout = dequeue_timeout_ticks;
+ else
+ timeout = dlb->global_dequeue_wait_ticks;
+
+ if (timeout)
+ start_ticks = rte_get_timer_cycles();
+
+ while (num < max_num) {
+ struct dlb_dequeue_qe qes[DLB_NUM_QES_PER_CACHE_LINE];
+ uint8_t offset;
+ int num_avail;
+
+ /* Copy up to 4 QEs from the current cache line into qes */
+ num_avail = dlb_recv_qe(qm_port, qes, &offset);
+
+ /* But don't process more than the user requested */
+ num_avail = RTE_MIN(num_avail, max_num - num);
+
+ dlb_inc_cq_idx(qm_port, num_avail);
+
+ if (num_avail == DLB_NUM_QES_PER_CACHE_LINE)
+ num += dlb_process_dequeue_four_qes(ev_port,
+ qm_port,
+ &events[num],
+ &qes[offset]);
+ else if (num_avail)
+ num += dlb_process_dequeue_qes(ev_port,
+ qm_port,
+ &events[num],
+ &qes[offset],
+ num_avail);
+ else if ((timeout == 0) || (num > 0))
+ /* Not waiting in any form, or 1+ events received? */
+ break;
+ else if (dlb_dequeue_wait(dlb, ev_port, qm_port,
+ timeout, start_ticks))
+ break;
+ }
+
+ qm_port->owed_tokens += num;
+
+ if (num && qm_port->token_pop_mode == AUTO_POP)
+ dlb_consume_qe_immediate(qm_port, num);
+
+ ev_port->outstanding_releases += num;
+
+ return num;
+}
+
+static __rte_always_inline int
+dlb_recv_qe_sparse(struct dlb_port *qm_port, struct dlb_dequeue_qe *qe)
+{
+ volatile struct dlb_dequeue_qe *cq_addr;
+ uint8_t xor_mask[2] = {0x0F, 0x00};
+ const uint8_t and_mask = 0x0F;
+ __m128i *qes = (__m128i *)qe;
+ uint8_t gen_bits, gen_bit;
+ uintptr_t addr[4];
+ uint16_t idx;
+
+ cq_addr = dlb_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
+
+ idx = qm_port->cq_idx;
+
+ /* Load the next 4 QEs */
+ addr[0] = (uintptr_t)&cq_addr[idx];
+ addr[1] = (uintptr_t)&cq_addr[(idx + 4) & qm_port->cq_depth_mask];
+ addr[2] = (uintptr_t)&cq_addr[(idx + 8) & qm_port->cq_depth_mask];
+ addr[3] = (uintptr_t)&cq_addr[(idx + 12) & qm_port->cq_depth_mask];
+
+ /* Prefetch next batch of QEs (all CQs occupy minimum 8 cache lines) */
+ rte_prefetch0(&cq_addr[(idx + 16) & qm_port->cq_depth_mask]);
+ rte_prefetch0(&cq_addr[(idx + 20) & qm_port->cq_depth_mask]);
+ rte_prefetch0(&cq_addr[(idx + 24) & qm_port->cq_depth_mask]);
+ rte_prefetch0(&cq_addr[(idx + 28) & qm_port->cq_depth_mask]);
+
+ /* Correct the xor_mask for wrap-around QEs */
+ gen_bit = qm_port->gen_bit;
+ xor_mask[gen_bit] ^= !!((idx + 4) > qm_port->cq_depth_mask) << 1;
+ xor_mask[gen_bit] ^= !!((idx + 8) > qm_port->cq_depth_mask) << 2;
+ xor_mask[gen_bit] ^= !!((idx + 12) > qm_port->cq_depth_mask) << 3;
+
+ /* Read the cache lines backwards to ensure that if QE[N] (N > 0) is
+ * valid, then QEs[0:N-1] are too.
+ */
+ qes[3] = _mm_load_si128((__m128i *)(void *)addr[3]);
+ rte_compiler_barrier();
+ qes[2] = _mm_load_si128((__m128i *)(void *)addr[2]);
+ rte_compiler_barrier();
+ qes[1] = _mm_load_si128((__m128i *)(void *)addr[1]);
+ rte_compiler_barrier();
+ qes[0] = _mm_load_si128((__m128i *)(void *)addr[0]);
+
+ /* Extract and combine the gen bits */
+ gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
+ ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
+ ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
+ ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
+
+ /* XOR the combined bits such that a 1 represents a valid QE */
+ gen_bits ^= xor_mask[gen_bit];
+
+ /* Mask off gen bits we don't care about */
+ gen_bits &= and_mask;
+
+ return __builtin_popcount(gen_bits);
+}
+
+static inline int16_t
+dlb_hw_dequeue_sparse(struct dlb_eventdev *dlb,
+ struct dlb_eventdev_port *ev_port,
+ struct rte_event *events,
+ uint16_t max_num,
+ uint64_t dequeue_timeout_ticks)
+{
+ uint64_t timeout;
+ uint64_t start_ticks = 0ULL;
+ struct dlb_port *qm_port;
+ int num = 0;
+
+ qm_port = &ev_port->qm_port;
+
+ /* If configured for per dequeue wait, then use wait value provided
+ * to this API. Otherwise we must use the global
+ * value from eventdev config time.
+ */
+ if (!dlb->global_dequeue_wait)
+ timeout = dequeue_timeout_ticks;
+ else
+ timeout = dlb->global_dequeue_wait_ticks;
+
+ if (timeout)
+ start_ticks = rte_get_timer_cycles();
+
+ while (num < max_num) {
+ struct dlb_dequeue_qe qes[DLB_NUM_QES_PER_CACHE_LINE];
+ int num_avail;
+
+ /* Copy up to 4 QEs from the current cache line into qes */
+ num_avail = dlb_recv_qe_sparse(qm_port, qes);
+
+ /* But don't process more than the user requested */
+ num_avail = RTE_MIN(num_avail, max_num - num);
+
+ dlb_inc_cq_idx(qm_port, num_avail << 2);
+
+ if (num_avail == DLB_NUM_QES_PER_CACHE_LINE)
+ num += dlb_process_dequeue_four_qes(ev_port,
+ qm_port,
+ &events[num],
+ &qes[0]);
+ else if (num_avail)
+ num += dlb_process_dequeue_qes(ev_port,
+ qm_port,
+ &events[num],
+ &qes[0],
+ num_avail);
+ else if ((timeout == 0) || (num > 0))
+ /* Not waiting in any form, or 1+ events received? */
+ break;
+ else if (dlb_dequeue_wait(dlb, ev_port, qm_port,
+ timeout, start_ticks))
+ break;
+ }
+
+ qm_port->owed_tokens += num;
+
+ if (num && qm_port->token_pop_mode == AUTO_POP)
+ dlb_consume_qe_immediate(qm_port, num);
+
+ ev_port->outstanding_releases += num;
+
+ return num;
+}
+
+static int
+dlb_event_release(struct dlb_eventdev *dlb, uint8_t port_id, int n)
+{
+ struct process_local_port_data *port_data;
+ struct dlb_eventdev_port *ev_port;
+ struct dlb_port *qm_port;
+ int i;
+
+ if (port_id > dlb->num_ports) {
+ DLB_LOG_ERR("Invalid port id %d in dlb-event_release\n",
+ port_id);
+ rte_errno = -EINVAL;
+ return rte_errno;
+ }
+
+ ev_port = &dlb->ev_ports[port_id];
+ qm_port = &ev_port->qm_port;
+ port_data = &dlb_port[qm_port->id][PORT_TYPE(qm_port)];
+
+ i = 0;
+
+ if (qm_port->is_directed) {
+ i = n;
+ goto sw_credit_update;
+ }
+
+ while (i < n) {
+ int pop_offs = 0;
+ int j = 0;
+
+ /* Zero-out QEs */
+ qm_port->qe4[0].cmd_byte = 0;
+ qm_port->qe4[1].cmd_byte = 0;
+ qm_port->qe4[2].cmd_byte = 0;
+ qm_port->qe4[3].cmd_byte = 0;
+
+ for (; j < DLB_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++) {
+ int16_t thresh = qm_port->token_pop_thresh;
+
+ if (qm_port->token_pop_mode == DELAYED_POP &&
+ qm_port->issued_releases >= thresh - 1) {
+ /* Insert the token pop QE */
+ dlb_construct_token_pop_qe(qm_port, j);
+
+ /* Reset the releases for the next QE batch */
+ qm_port->issued_releases -= thresh;
+
+ /* When using delayed token pop mode, the
+ * initial token threshold is the full CQ
+ * depth. After the first token pop, we need to
+ * reset it to the dequeue_depth.
+ */
+ qm_port->token_pop_thresh =
+ qm_port->dequeue_depth;
+
+ pop_offs = 1;
+ j++;
+ break;
+ }
+
+ qm_port->qe4[j].cmd_byte = DLB_COMP_CMD_BYTE;
+ qm_port->issued_releases++;
+ }
+
+ dlb_hw_do_enqueue(qm_port, i == 0, port_data);
+
+ /* Don't include the token pop QE in the release count */
+ i += j - pop_offs;
+ }
+
+sw_credit_update:
+ /* each release returns one credit */
+ if (!ev_port->outstanding_releases) {
+ DLB_LOG_ERR("Unrecoverable application error. Outstanding releases underflowed.\n");
+ rte_errno = -ENOTRECOVERABLE;
+ return rte_errno;
+ }
+
+ ev_port->outstanding_releases -= i;
+ ev_port->inflight_credits += i;
+
+ /* Replenish s/w credits if enough releases are performed */
+ dlb_replenish_sw_credits(dlb, ev_port);
+ return 0;
+}
+
+static uint16_t
+dlb_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
+ uint64_t wait)
+{
+ struct dlb_eventdev_port *ev_port = event_port;
+ struct dlb_port *qm_port = &ev_port->qm_port;
+ struct dlb_eventdev *dlb = ev_port->dlb;
+ uint16_t cnt;
+ int ret;
+
+ rte_errno = 0;
+
+ RTE_ASSERT(ev_port->setup_done);
+ RTE_ASSERT(ev != NULL);
+
+ if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
+ uint16_t out_rels = ev_port->outstanding_releases;
+
+ ret = dlb_event_release(dlb, ev_port->id, out_rels);
+ if (ret)
+ return(ret);
+
+ DLB_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
+ }
+
+ if (qm_port->token_pop_mode == DEFERRED_POP &&
+ qm_port->owed_tokens)
+ dlb_consume_qe_immediate(qm_port, qm_port->owed_tokens);
+
+ cnt = dlb_hw_dequeue(dlb, ev_port, ev, num, wait);
+
+ DLB_INC_STAT(ev_port->stats.traffic.total_polls, 1);
+ DLB_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
+ return cnt;
+}
+
+static uint16_t
+dlb_event_dequeue(void *event_port, struct rte_event *ev, uint64_t wait)
+{
+ return dlb_event_dequeue_burst(event_port, ev, 1, wait);
+}
+
+static uint16_t
+dlb_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
+ uint16_t num, uint64_t wait)
+{
+ struct dlb_eventdev_port *ev_port = event_port;
+ struct dlb_port *qm_port = &ev_port->qm_port;
+ struct dlb_eventdev *dlb = ev_port->dlb;
+ uint16_t cnt;
+ int ret;
+
+ rte_errno = 0;
+
+ RTE_ASSERT(ev_port->setup_done);
+ RTE_ASSERT(ev != NULL);
+
+ if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
+ uint16_t out_rels = ev_port->outstanding_releases;
+
+ ret = dlb_event_release(dlb, ev_port->id, out_rels);
+ if (ret)
+ return(ret);
+
+ DLB_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
+ }
+
+ if (qm_port->token_pop_mode == DEFERRED_POP &&
+ qm_port->owed_tokens)
+ dlb_consume_qe_immediate(qm_port, qm_port->owed_tokens);
+
+ cnt = dlb_hw_dequeue_sparse(dlb, ev_port, ev, num, wait);
+
+ DLB_INC_STAT(ev_port->stats.traffic.total_polls, 1);
+ DLB_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
+ return cnt;
+}
+
+static uint16_t
+dlb_event_dequeue_sparse(void *event_port, struct rte_event *ev, uint64_t wait)
+{
+ return dlb_event_dequeue_burst_sparse(event_port, ev, 1, wait);
+}
+
+static uint32_t
+dlb_get_ldb_queue_depth(struct dlb_eventdev *dlb,
+ struct dlb_eventdev_queue *queue)
+{
+ struct dlb_hw_dev *handle = &dlb->qm_instance;
+ struct dlb_get_ldb_queue_depth_args cfg;
+ struct dlb_cmd_response response;
+ int ret;
+
+ cfg.queue_id = queue->qm_queue.id;
+ cfg.response = (uintptr_t)&response;
+
+ ret = dlb_iface_get_ldb_queue_depth(handle, &cfg);
+ if (ret < 0) {
+ DLB_LOG_ERR("dlb: get_ldb_queue_depth ret=%d (driver status: %s)\n",
+ ret, dlb_error_strings[response.status]);
+ return ret;
+ }
+
+ return response.id;
+}
+
+static uint32_t
+dlb_get_dir_queue_depth(struct dlb_eventdev *dlb,
+ struct dlb_eventdev_queue *queue)
+{
+ struct dlb_hw_dev *handle = &dlb->qm_instance;
+ struct dlb_get_dir_queue_depth_args cfg;
+ struct dlb_cmd_response response;
+ int ret;
+
+ cfg.queue_id = queue->qm_queue.id;
+ cfg.response = (uintptr_t)&response;
+
+ ret = dlb_iface_get_dir_queue_depth(handle, &cfg);
+ if (ret < 0) {
+ DLB_LOG_ERR("dlb: get_dir_queue_depth ret=%d (driver status: %s)\n",
+ ret, dlb_error_strings[response.status]);
+ return ret;
+ }
+
+ return response.id;
+}
+
+uint32_t
+dlb_get_queue_depth(struct dlb_eventdev *dlb,
+ struct dlb_eventdev_queue *queue)
+{
+ if (queue->qm_queue.is_directed)
+ return dlb_get_dir_queue_depth(dlb, queue);
+ else
+ return dlb_get_ldb_queue_depth(dlb, queue);
+}
+
+static bool
+dlb_queue_is_empty(struct dlb_eventdev *dlb,
+ struct dlb_eventdev_queue *queue)
+{
+ return dlb_get_queue_depth(dlb, queue) == 0;
+}
+
+static bool
+dlb_linked_queues_empty(struct dlb_eventdev *dlb)
+{
+ int i;
+
+ for (i = 0; i < dlb->num_queues; i++) {
+ if (dlb->ev_queues[i].num_links == 0)
+ continue;
+ if (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+dlb_queues_empty(struct dlb_eventdev *dlb)
+{
+ int i;
+
+ for (i = 0; i < dlb->num_queues; i++) {
+ if (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))
+ return false;
+ }
+
+ return true;
+}
+
+static void
+dlb_flush_port(struct rte_eventdev *dev, int port_id)
+{
+ struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+ eventdev_stop_flush_t flush;
+ struct rte_event ev;
+ uint8_t dev_id;
+ void *arg;
+ int i;
+
+ flush = dev->dev_ops->dev_stop_flush;
+ dev_id = dev->data->dev_id;
+ arg = dev->data->dev_stop_flush_arg;
+
+ while (rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0)) {
+ if (flush)
+ flush(dev_id, ev, arg);
+
+ if (dlb->ev_ports[port_id].qm_port.is_directed)
+ continue;
+
+ ev.op = RTE_EVENT_OP_RELEASE;
+
+ rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
+ }
+
+ /* Enqueue any additional outstanding releases */
+ ev.op = RTE_EVENT_OP_RELEASE;
+
+ for (i = dlb->ev_ports[port_id].outstanding_releases; i > 0; i--)
+ rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
+}
+
+static void
+dlb_drain(struct rte_eventdev *dev)
+{
+ struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+ struct dlb_eventdev_port *ev_port = NULL;
+ uint8_t dev_id;
+ int i;
+
+ dev_id = dev->data->dev_id;
+
+ while (!dlb_linked_queues_empty(dlb)) {
+ /* Flush all the ev_ports, which will drain all their connected
+ * queues.
+ */
+ for (i = 0; i < dlb->num_ports; i++)
+ dlb_flush_port(dev, i);
+ }
+
+ /* The queues are empty, but there may be events left in the ports. */
+ for (i = 0; i < dlb->num_ports; i++)
+ dlb_flush_port(dev, i);
+
+ /* If the domain's queues are empty, we're done. */
+ if (dlb_queues_empty(dlb))
+ return;
+
+ /* Else, there must be at least one unlinked load-balanced queue.
+ * Select a load-balanced port with which to drain the unlinked
+ * queue(s).
+ */
+ for (i = 0; i < dlb->num_ports; i++) {
+ ev_port = &dlb->ev_ports[i];
+
+ if (!ev_port->qm_port.is_directed)
+ break;
+ }
+
+ if (i == dlb->num_ports) {
+ DLB_LOG_ERR("internal error: no LDB ev_ports\n");
+ return;
+ }
+
+ rte_errno = 0;
+ rte_event_port_unlink(dev_id, ev_port->id, NULL, 0);
+
+ if (rte_errno) {
+ DLB_LOG_ERR("internal error: failed to unlink ev_port %d\n",
+ ev_port->id);
+ return;
+ }
+
+ for (i = 0; i < dlb->num_queues; i++) {
+ uint8_t qid, prio;
+ int ret;
+
+ if (dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))
+ continue;
+
+ qid = i;
+ prio = 0;
+
+ /* Link the ev_port to the queue */
+ ret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1);
+ if (ret != 1) {
+ DLB_LOG_ERR("internal error: failed to link ev_port %d to queue %d\n",
+ ev_port->id, qid);
+ return;
+ }
+
+ /* Flush the queue */
+ while (!dlb_queue_is_empty(dlb, &dlb->ev_queues[i]))
+ dlb_flush_port(dev, ev_port->id);
+
+ /* Drain any extant events in the ev_port. */
+ dlb_flush_port(dev, ev_port->id);
+
+ /* Unlink the ev_port from the queue */
+ ret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1);
+ if (ret != 1) {
+ DLB_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d\n",
+ ev_port->id, qid);
+ return;
+ }
+ }
+}
+
+static void
+dlb_eventdev_stop(struct rte_eventdev *dev)
+{
+ struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+
+ rte_spinlock_lock(&dlb->qm_instance.resource_lock);
+
+ if (dlb->run_state == DLB_RUN_STATE_STOPPED) {
+ DLB_LOG_DBG("Internal error: already stopped\n");
+ rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
+ return;
+ } else if (dlb->run_state != DLB_RUN_STATE_STARTED) {
+ DLB_LOG_ERR("Internal error: bad state %d for dev_stop\n",
+ (int)dlb->run_state);
+ rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
+ return;
+ }
+
+ dlb->run_state = DLB_RUN_STATE_STOPPING;
+
+ rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
+
+ dlb_drain(dev);
+
+ dlb->run_state = DLB_RUN_STATE_STOPPED;
+}
+
+static int
+dlb_eventdev_close(struct rte_eventdev *dev)
+{
+ dlb_hw_reset_sched_domain(dev, false);
+
+ return 0;
+}
+
+static void
+dlb_eventdev_port_release(void *port)
+{
+ struct dlb_eventdev_port *ev_port = port;
+
+ if (ev_port) {
+ struct dlb_port *qm_port = &ev_port->qm_port;
+
+ if (qm_port->config_state == DLB_CONFIGURED)
+ dlb_free_qe_mem(qm_port);
+ }
+}
+
+static void
+dlb_eventdev_queue_release(struct rte_eventdev *dev, uint8_t id)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(id);
+
+ /* This function intentionally left blank. */
+}
+
+static int
+dlb_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ RTE_SET_USED(dev);
+ uint64_t cycles_per_ns = rte_get_timer_hz() / 1E9;
+
+ *timeout_ticks = ns * cycles_per_ns;
+
+ return 0;
+}
+
+void
+dlb_entry_points_init(struct rte_eventdev *dev)
+{
+ struct dlb_eventdev *dlb;
+
+ static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
+ .dev_infos_get = dlb_eventdev_info_get,
+ .dev_configure = dlb_eventdev_configure,
+ .dev_start = dlb_eventdev_start,
+ .dev_stop = dlb_eventdev_stop,
+ .dev_close = dlb_eventdev_close,
+ .queue_def_conf = dlb_eventdev_queue_default_conf_get,
+ .port_def_conf = dlb_eventdev_port_default_conf_get,
+ .queue_setup = dlb_eventdev_queue_setup,
+ .queue_release = dlb_eventdev_queue_release,
+ .port_setup = dlb_eventdev_port_setup,
+ .port_release = dlb_eventdev_port_release,
+ .port_link = dlb_eventdev_port_link,
+ .port_unlink = dlb_eventdev_port_unlink,
+ .port_unlinks_in_progress =
+ dlb_eventdev_port_unlinks_in_progress,
+ .timeout_ticks = dlb_eventdev_timeout_ticks,
+ .dump = dlb_eventdev_dump,
+ .xstats_get = dlb_eventdev_xstats_get,
+ .xstats_get_names = dlb_eventdev_xstats_get_names,
+ .xstats_get_by_name = dlb_eventdev_xstats_get_by_name,
+ .xstats_reset = dlb_eventdev_xstats_reset,
+ .dev_selftest = test_dlb_eventdev,
+ };
+
+ /* Expose PMD's eventdev interface */
+ dev->dev_ops = &dlb_eventdev_entry_ops;
+
+ dev->enqueue = dlb_event_enqueue;
+ dev->enqueue_burst = dlb_event_enqueue_burst;
+ dev->enqueue_new_burst = dlb_event_enqueue_new_burst;
+ dev->enqueue_forward_burst = dlb_event_enqueue_forward_burst;
+ dev->dequeue = dlb_event_dequeue;
+ dev->dequeue_burst = dlb_event_dequeue_burst;
+
+ dlb = dev->data->dev_private;
+
+ if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE) {
+ dev->dequeue = dlb_event_dequeue_sparse;
+ dev->dequeue_burst = dlb_event_dequeue_burst_sparse;
+ }
+}
+
+int
+dlb_primary_eventdev_probe(struct rte_eventdev *dev,
+ const char *name,
+ struct dlb_devargs *dlb_args)
+{
+ struct dlb_eventdev *dlb;
+ int err, i;
+
+ dlb = dev->data->dev_private;
+
+ dlb->event_dev = dev; /* backlink */
+
+ evdev_dlb_default_info.driver_name = name;
+
+ dlb->max_num_events_override = dlb_args->max_num_events;
+ dlb->num_dir_credits_override = dlb_args->num_dir_credits_override;
+ dlb->defer_sched = dlb_args->defer_sched;
+ dlb->num_atm_inflights_per_queue = dlb_args->num_atm_inflights;
+
+ /* Open the interface.
+ * For vdev mode, this means open the dlb kernel module.
+ */
+ err = dlb_iface_open(&dlb->qm_instance, name);
+ if (err < 0) {
+ DLB_LOG_ERR("could not open event hardware device, err=%d\n",
+ err);
+ return err;
+ }
+
+ err = dlb_iface_get_device_version(&dlb->qm_instance, &dlb->revision);
+ if (err < 0) {
+ DLB_LOG_ERR("dlb: failed to get the device version, err=%d\n",
+ err);
+ return err;
+ }
+
+ err = dlb_hw_query_resources(dlb);
+ if (err) {
+ DLB_LOG_ERR("get resources err=%d for %s\n", err, name);
+ return err;
+ }
+
+ err = dlb_iface_get_cq_poll_mode(&dlb->qm_instance, &dlb->poll_mode);
+ if (err < 0) {
+ DLB_LOG_ERR("dlb: failed to get the poll mode, err=%d\n", err);
+ return err;
+ }
+
+ /* Complete xtstats runtime initialization */
+ err = dlb_xstats_init(dlb);
+ if (err) {
+ DLB_LOG_ERR("dlb: failed to init xstats, err=%d\n", err);
+ return err;
+ }
+
+ /* Initialize each port's token pop mode */
+ for (i = 0; i < DLB_MAX_NUM_PORTS; i++)
+ dlb->ev_ports[i].qm_port.token_pop_mode = AUTO_POP;
rte_spinlock_init(&dlb->qm_instance.resource_lock);