#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
-#include <sys/fcntl.h>
+#include <fcntl.h>
#include <rte_common.h>
#include <rte_config.h>
#include <rte_dev.h>
#include <rte_errno.h>
#include <rte_eventdev.h>
-#include <rte_eventdev_pmd.h>
+#include <eventdev_pmd.h>
#include <rte_io.h>
#include <rte_kvargs.h>
#include <rte_log.h>
.max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH,
.max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ,
.max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
- .max_single_link_event_port_queue_pairs = DLB2_MAX_NUM_DIR_PORTS,
+ .max_single_link_event_port_queue_pairs =
+ DLB2_MAX_NUM_DIR_PORTS(DLB2_HW_V2),
.event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
RTE_EVENT_DEV_CAP_EVENT_QOS |
RTE_EVENT_DEV_CAP_BURST_MODE |
};
struct process_local_port_data
-dlb2_port[DLB2_MAX_NUM_PORTS][DLB2_NUM_PORT_TYPES];
+dlb2_port[DLB2_MAX_NUM_PORTS_ALL][DLB2_NUM_PORT_TYPES];
static void
dlb2_free_qe_mem(struct dlb2_port *qm_port)
{
int q;
- for (q = 0; q < DLB2_MAX_NUM_QUEUES; q++) {
+ for (q = 0; q < DLB2_MAX_NUM_QUEUES(dlb2->version); q++) {
if (qid_depth_thresholds[q] != 0)
dlb2->ev_queues[q].depth_threshold =
qid_depth_thresholds[q];
evdev_dlb2_default_info.max_event_ports =
dlb2->hw_rsrc_query_results.num_ldb_ports;
- evdev_dlb2_default_info.max_num_events =
- dlb2->hw_rsrc_query_results.num_ldb_credits;
-
+ if (dlb2->version == DLB2_HW_V2_5) {
+ evdev_dlb2_default_info.max_num_events =
+ dlb2->hw_rsrc_query_results.num_credits;
+ } else {
+ evdev_dlb2_default_info.max_num_events =
+ dlb2->hw_rsrc_query_results.num_ldb_credits;
+ }
/* Save off values used when creating the scheduling domain. */
handle->info.num_sched_domains =
dlb2->hw_rsrc_query_results.num_sched_domains;
- handle->info.hw_rsrc_max.nb_events_limit =
- dlb2->hw_rsrc_query_results.num_ldb_credits;
-
+ if (dlb2->version == DLB2_HW_V2_5) {
+ handle->info.hw_rsrc_max.nb_events_limit =
+ dlb2->hw_rsrc_query_results.num_credits;
+ } else {
+ handle->info.hw_rsrc_max.nb_events_limit =
+ dlb2->hw_rsrc_query_results.num_ldb_credits;
+ }
handle->info.hw_rsrc_max.num_queues =
dlb2->hw_rsrc_query_results.num_ldb_queues +
dlb2->hw_rsrc_query_results.num_dir_ports;
return ret;
if (*num_dir_credits < 0 ||
- *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS) {
+ *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2)) {
DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n",
- DLB2_MAX_NUM_DIR_CREDITS);
+ DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2));
return -EINVAL;
}
return 0;
}
-
static int
set_qid_depth_thresh(const char *key __rte_unused,
const char *value,
*/
if (sscanf(value, "all:%d", &thresh) == 1) {
first = 0;
- last = DLB2_MAX_NUM_QUEUES - 1;
+ last = DLB2_MAX_NUM_QUEUES(DLB2_HW_V2) - 1;
+ } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
+ /* we have everything we need */
+ } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
+ last = first;
+ } else {
+ DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
+ return -EINVAL;
+ }
+
+ if (first > last || first < 0 ||
+ last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2)) {
+ DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
+ return -EINVAL;
+ }
+
+ if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
+ DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
+ DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
+ return -EINVAL;
+ }
+
+ for (i = first; i <= last; i++)
+ qid_thresh->val[i] = thresh; /* indexed by qid */
+
+ return 0;
+}
+
+static int
+set_qid_depth_thresh_v2_5(const char *key __rte_unused,
+ const char *value,
+ void *opaque)
+{
+ struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
+ int first, last, thresh, i;
+
+ if (value == NULL || opaque == NULL) {
+ DLB2_LOG_ERR("NULL pointer\n");
+ return -EINVAL;
+ }
+
+ /* command line override may take one of the following 3 forms:
+ * qid_depth_thresh=all:<threshold_value> ... all queues
+ * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
+ * qid_depth_thresh=qid:<threshold_value> ... just one queue
+ */
+ if (sscanf(value, "all:%d", &thresh) == 1) {
+ first = 0;
+ last = DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5) - 1;
} else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
/* we have everything we need */
} else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
return -EINVAL;
}
- if (first > last || first < 0 || last >= DLB2_MAX_NUM_QUEUES) {
+ if (first > last || first < 0 ||
+ last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5)) {
DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
return -EINVAL;
}
*/
evdev_dlb2_default_info.max_event_ports += dlb2->num_ldb_ports;
evdev_dlb2_default_info.max_event_queues += dlb2->num_ldb_queues;
- evdev_dlb2_default_info.max_num_events += dlb2->max_ldb_credits;
-
+ if (dlb2->version == DLB2_HW_V2_5) {
+ evdev_dlb2_default_info.max_num_events +=
+ dlb2->max_credits;
+ } else {
+ evdev_dlb2_default_info.max_num_events +=
+ dlb2->max_ldb_credits;
+ }
evdev_dlb2_default_info.max_event_queues =
RTE_MIN(evdev_dlb2_default_info.max_event_queues,
RTE_EVENT_MAX_QUEUES_PER_DEV);
static int
dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle,
- const struct dlb2_hw_rsrcs *resources_asked)
+ const struct dlb2_hw_rsrcs *resources_asked,
+ uint8_t device_version)
{
int ret = 0;
struct dlb2_create_sched_domain_args *cfg;
/* DIR ports and queues */
cfg->num_dir_ports = resources_asked->num_dir_ports;
-
- cfg->num_dir_credits = resources_asked->num_dir_credits;
+ if (device_version == DLB2_HW_V2_5)
+ cfg->num_credits = resources_asked->num_credits;
+ else
+ cfg->num_dir_credits = resources_asked->num_dir_credits;
/* LDB queues */
break;
}
- cfg->num_ldb_credits =
- resources_asked->num_ldb_credits;
+ if (device_version == DLB2_HW_V2)
+ cfg->num_ldb_credits = resources_asked->num_ldb_credits;
cfg->num_atomic_inflights =
DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE *
cfg->num_hist_list_entries = resources_asked->num_ldb_ports *
DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
- DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n",
- cfg->num_ldb_queues,
- resources_asked->num_ldb_ports,
- cfg->num_dir_ports,
- cfg->num_atomic_inflights,
- cfg->num_hist_list_entries,
- cfg->num_ldb_credits,
- cfg->num_dir_credits);
+ if (device_version == DLB2_HW_V2_5) {
+ DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, credits=%d\n",
+ cfg->num_ldb_queues,
+ resources_asked->num_ldb_ports,
+ cfg->num_dir_ports,
+ cfg->num_atomic_inflights,
+ cfg->num_hist_list_entries,
+ cfg->num_credits);
+ } else {
+ DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n",
+ cfg->num_ldb_queues,
+ resources_asked->num_ldb_ports,
+ cfg->num_dir_ports,
+ cfg->num_atomic_inflights,
+ cfg->num_hist_list_entries,
+ cfg->num_ldb_credits,
+ cfg->num_dir_credits);
+ }
/* Configure the QM */
for (i = 0; i < dlb2->num_queues; i++)
dlb2->ev_queues[i].qm_queue.config_state = config_state;
- for (i = 0; i < DLB2_MAX_NUM_QUEUES; i++)
+ for (i = 0; i < DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5); i++)
dlb2->ev_queues[i].setup_done = false;
dlb2->num_ports = 0;
*/
if (dlb2->configured) {
dlb2_hw_reset_sched_domain(dev, true);
-
ret = dlb2_hw_query_resources(dlb2);
if (ret) {
DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
/* 1 dir queue per dir port */
rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
- /* Scale down nb_events_limit by 4 for directed credits, since there
- * are 4x as many load-balanced credits.
- */
- rsrcs->num_ldb_credits = 0;
- rsrcs->num_dir_credits = 0;
+ if (dlb2->version == DLB2_HW_V2_5) {
+ rsrcs->num_credits = 0;
+ if (rsrcs->num_ldb_queues || rsrcs->num_dir_ports)
+ rsrcs->num_credits = config->nb_events_limit;
+ } else {
+ /* Scale down nb_events_limit by 4 for directed credits,
+ * since there are 4x as many load-balanced credits.
+ */
+ rsrcs->num_ldb_credits = 0;
+ rsrcs->num_dir_credits = 0;
- if (rsrcs->num_ldb_queues)
- rsrcs->num_ldb_credits = config->nb_events_limit;
- if (rsrcs->num_dir_ports)
- rsrcs->num_dir_credits = config->nb_events_limit / 4;
- if (dlb2->num_dir_credits_override != -1)
- rsrcs->num_dir_credits = dlb2->num_dir_credits_override;
+ if (rsrcs->num_ldb_queues)
+ rsrcs->num_ldb_credits = config->nb_events_limit;
+ if (rsrcs->num_dir_ports)
+ rsrcs->num_dir_credits = config->nb_events_limit / 4;
+ if (dlb2->num_dir_credits_override != -1)
+ rsrcs->num_dir_credits = dlb2->num_dir_credits_override;
+ }
- if (dlb2_hw_create_sched_domain(handle, rsrcs) < 0) {
+ if (dlb2_hw_create_sched_domain(handle, rsrcs, dlb2->version) < 0) {
DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n");
return -ENODEV;
}
dlb2->num_ldb_ports = dlb2->num_ports - dlb2->num_dir_ports;
dlb2->num_ldb_queues = dlb2->num_queues - dlb2->num_dir_ports;
dlb2->num_dir_queues = dlb2->num_dir_ports;
- dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
- dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
- dlb2->dir_credit_pool = rsrcs->num_dir_credits;
- dlb2->max_dir_credits = rsrcs->num_dir_credits;
+ if (dlb2->version == DLB2_HW_V2_5) {
+ dlb2->credit_pool = rsrcs->num_credits;
+ dlb2->max_credits = rsrcs->num_credits;
+ } else {
+ dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
+ dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
+ dlb2->dir_credit_pool = rsrcs->num_dir_credits;
+ dlb2->max_dir_credits = rsrcs->num_dir_credits;
+ }
dlb2->configured = true;
return ret;
}
+static inline uint16_t
+dlb2_event_enqueue_delayed(void *event_port,
+ const struct rte_event events[]);
+
+static inline uint16_t
+dlb2_event_enqueue_burst_delayed(void *event_port,
+ const struct rte_event events[],
+ uint16_t num);
+
+static inline uint16_t
+dlb2_event_enqueue_new_burst_delayed(void *event_port,
+ const struct rte_event events[],
+ uint16_t num);
+
+static inline uint16_t
+dlb2_event_enqueue_forward_burst_delayed(void *event_port,
+ const struct rte_event events[],
+ uint16_t num);
+
static int
dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
struct dlb2_eventdev_port *ev_port,
struct dlb2_port *qm_port = NULL;
char mz_name[RTE_MEMZONE_NAMESIZE];
uint32_t qm_port_id;
- uint16_t ldb_credit_high_watermark;
- uint16_t dir_credit_high_watermark;
+ uint16_t ldb_credit_high_watermark = 0;
+ uint16_t dir_credit_high_watermark = 0;
+ uint16_t credit_high_watermark = 0;
if (handle == NULL)
return -EINVAL;
/* User controls the LDB high watermark via enqueue depth. The DIR high
* watermark is equal, unless the directed credit pool is too small.
*/
- ldb_credit_high_watermark = enqueue_depth;
-
- /* If there are no directed ports, the kernel driver will ignore this
- * port's directed credit settings. Don't use enqueue_depth if it would
- * require more directed credits than are available.
- */
- dir_credit_high_watermark =
- RTE_MIN(enqueue_depth,
- handle->cfg.num_dir_credits / dlb2->num_ports);
+ if (dlb2->version == DLB2_HW_V2) {
+ ldb_credit_high_watermark = enqueue_depth;
+ /* If there are no directed ports, the kernel driver will
+ * ignore this port's directed credit settings. Don't use
+ * enqueue_depth if it would require more directed credits
+ * than are available.
+ */
+ dir_credit_high_watermark =
+ RTE_MIN(enqueue_depth,
+ handle->cfg.num_dir_credits / dlb2->num_ports);
+ } else
+ credit_high_watermark = enqueue_depth;
/* Per QM values */
qm_port->id = qm_port_id;
- qm_port->cached_ldb_credits = 0;
- qm_port->cached_dir_credits = 0;
+ if (dlb2->version == DLB2_HW_V2) {
+ qm_port->cached_ldb_credits = 0;
+ qm_port->cached_dir_credits = 0;
+ } else
+ qm_port->cached_credits = 0;
+
/* CQs with depth < 8 use an 8-entry queue, but withhold credits so
* the effective depth is smaller.
*/
qm_port->dequeue_depth = dequeue_depth;
qm_port->token_pop_thresh = dequeue_depth;
+
+ /* The default enqueue functions do not include delayed-pop support for
+ * performance reasons.
+ */
+ if (qm_port->token_pop_mode == DELAYED_POP) {
+ dlb2->event_dev->enqueue = dlb2_event_enqueue_delayed;
+ dlb2->event_dev->enqueue_burst =
+ dlb2_event_enqueue_burst_delayed;
+ dlb2->event_dev->enqueue_new_burst =
+ dlb2_event_enqueue_new_burst_delayed;
+ dlb2->event_dev->enqueue_forward_burst =
+ dlb2_event_enqueue_forward_burst_delayed;
+ }
+
qm_port->owed_tokens = 0;
qm_port->issued_releases = 0;
qm_port->state = PORT_STARTED; /* enabled at create time */
qm_port->config_state = DLB2_CONFIGURED;
- qm_port->dir_credits = dir_credit_high_watermark;
- qm_port->ldb_credits = ldb_credit_high_watermark;
- qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
- qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
-
- DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
- qm_port_id,
- dequeue_depth,
- qm_port->ldb_credits,
- qm_port->dir_credits);
+ if (dlb2->version == DLB2_HW_V2) {
+ qm_port->dir_credits = dir_credit_high_watermark;
+ qm_port->ldb_credits = ldb_credit_high_watermark;
+ qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
+ qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
+
+ DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
+ qm_port_id,
+ dequeue_depth,
+ qm_port->ldb_credits,
+ qm_port->dir_credits);
+ } else {
+ qm_port->credits = credit_high_watermark;
+ qm_port->credit_pool[DLB2_COMBINED_POOL] = &dlb2->credit_pool;
+ DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, credits=%d\n",
+ qm_port_id,
+ dequeue_depth,
+ qm_port->credits);
+ }
rte_spinlock_unlock(&handle->resource_lock);
return 0;
struct dlb2_port *qm_port = NULL;
char mz_name[RTE_MEMZONE_NAMESIZE];
uint32_t qm_port_id;
- uint16_t ldb_credit_high_watermark;
- uint16_t dir_credit_high_watermark;
+ uint16_t ldb_credit_high_watermark = 0;
+ uint16_t dir_credit_high_watermark = 0;
+ uint16_t credit_high_watermark = 0;
if (dlb2 == NULL || handle == NULL)
return -EINVAL;
/* User controls the LDB high watermark via enqueue depth. The DIR high
* watermark is equal, unless the directed credit pool is too small.
*/
- ldb_credit_high_watermark = enqueue_depth;
-
- /* Don't use enqueue_depth if it would require more directed credits
- * than are available.
- */
- dir_credit_high_watermark =
- RTE_MIN(enqueue_depth,
- handle->cfg.num_dir_credits / dlb2->num_ports);
+ if (dlb2->version == DLB2_HW_V2) {
+ ldb_credit_high_watermark = enqueue_depth;
+ /* Don't use enqueue_depth if it would require more directed
+ * credits than are available.
+ */
+ dir_credit_high_watermark =
+ RTE_MIN(enqueue_depth,
+ handle->cfg.num_dir_credits / dlb2->num_ports);
+ } else
+ credit_high_watermark = enqueue_depth;
/* Per QM values */
qm_port->id = qm_port_id;
- qm_port->cached_ldb_credits = 0;
- qm_port->cached_dir_credits = 0;
+ if (dlb2->version == DLB2_HW_V2) {
+ qm_port->cached_ldb_credits = 0;
+ qm_port->cached_dir_credits = 0;
+ } else
+ qm_port->cached_credits = 0;
+
/* CQs with depth < 8 use an 8-entry queue, but withhold credits so
* the effective depth is smaller.
*/
qm_port->state = PORT_STARTED; /* enabled at create time */
qm_port->config_state = DLB2_CONFIGURED;
- qm_port->dir_credits = dir_credit_high_watermark;
- qm_port->ldb_credits = ldb_credit_high_watermark;
- qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
- qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
-
- DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d\n",
- qm_port_id,
- dequeue_depth,
- dir_credit_high_watermark,
- ldb_credit_high_watermark);
+ if (dlb2->version == DLB2_HW_V2) {
+ qm_port->dir_credits = dir_credit_high_watermark;
+ qm_port->ldb_credits = ldb_credit_high_watermark;
+ qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
+ qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
+
+ DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d\n",
+ qm_port_id,
+ dequeue_depth,
+ dir_credit_high_watermark,
+ ldb_credit_high_watermark);
+ } else {
+ qm_port->credits = credit_high_watermark;
+ qm_port->credit_pool[DLB2_COMBINED_POOL] = &dlb2->credit_pool;
+ DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d\n",
+ qm_port_id,
+ dequeue_depth,
+ credit_high_watermark);
+ }
rte_spinlock_unlock(&handle->resource_lock);
return 0;
dlb2 = dlb2_pmd_priv(dev);
- if (ev_port_id >= DLB2_MAX_NUM_PORTS)
+ if (ev_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version))
return -EINVAL;
if (port_conf->dequeue_depth >
return 0;
}
+static inline int
+dlb2_check_enqueue_hw_credits(struct dlb2_port *qm_port)
+{
+ if (unlikely(qm_port->cached_credits == 0)) {
+ qm_port->cached_credits =
+ dlb2_port_credits_get(qm_port,
+ DLB2_COMBINED_POOL);
+ if (unlikely(qm_port->cached_credits == 0)) {
+ DLB2_INC_STAT(
+ qm_port->ev_port->stats.traffic.tx_nospc_hw_credits, 1);
+ DLB2_LOG_DBG("credits exhausted\n");
+ return 1; /* credits exhausted */
+ }
+ }
+
+ return 0;
+}
+
static __rte_always_inline void
dlb2_pp_write(struct dlb2_enqueue_qe *qe4,
struct process_local_port_data *port_data)
case 3:
case 2:
case 1:
- /* At least one QE will be valid, so only zero out three */
- qe[1].cmd_byte = 0;
- qe[2].cmd_byte = 0;
- qe[3].cmd_byte = 0;
-
for (i = 0; i < num; i++) {
qe[i].cmd_byte =
cmd_byte_map[qm_port->is_directed][ev[i].op];
qe[i].u.event_type.sub = ev[i].sub_event_type;
}
break;
+ case 0:
+ break;
}
}
if (!qm_queue->is_directed) {
/* Load balanced destination queue */
- if (dlb2_check_enqueue_hw_ldb_credits(qm_port)) {
- rte_errno = -ENOSPC;
- return 1;
+ if (dlb2->version == DLB2_HW_V2) {
+ if (dlb2_check_enqueue_hw_ldb_credits(qm_port)) {
+ rte_errno = -ENOSPC;
+ return 1;
+ }
+ cached_credits = &qm_port->cached_ldb_credits;
+ } else {
+ if (dlb2_check_enqueue_hw_credits(qm_port)) {
+ rte_errno = -ENOSPC;
+ return 1;
+ }
+ cached_credits = &qm_port->cached_credits;
}
- cached_credits = &qm_port->cached_ldb_credits;
-
switch (ev->sched_type) {
case RTE_SCHED_TYPE_ORDERED:
DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ORDERED\n");
} else {
/* Directed destination queue */
- if (dlb2_check_enqueue_hw_dir_credits(qm_port)) {
- rte_errno = -ENOSPC;
- return 1;
+ if (dlb2->version == DLB2_HW_V2) {
+ if (dlb2_check_enqueue_hw_dir_credits(qm_port)) {
+ rte_errno = -ENOSPC;
+ return 1;
+ }
+ cached_credits = &qm_port->cached_dir_credits;
+ } else {
+ if (dlb2_check_enqueue_hw_credits(qm_port)) {
+ rte_errno = -ENOSPC;
+ return 1;
+ }
+ cached_credits = &qm_port->cached_credits;
}
- cached_credits = &qm_port->cached_dir_credits;
-
DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_DIRECTED\n");
*sched_type = DLB2_SCHED_DIRECTED;
}
static inline uint16_t
-dlb2_event_enqueue_burst(void *event_port,
- const struct rte_event events[],
- uint16_t num)
+__dlb2_event_enqueue_burst(void *event_port,
+ const struct rte_event events[],
+ uint16_t num,
+ bool use_delayed)
{
struct dlb2_eventdev_port *ev_port = event_port;
struct dlb2_port *qm_port = &ev_port->qm_port;
struct process_local_port_data *port_data;
- int i, cnt;
+ int i;
RTE_ASSERT(ev_port->enq_configured);
RTE_ASSERT(events != NULL);
- cnt = 0;
+ i = 0;
port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
- for (i = 0; i < num; i += DLB2_NUM_QES_PER_CACHE_LINE) {
+ while (i < num) {
uint8_t sched_types[DLB2_NUM_QES_PER_CACHE_LINE];
uint8_t queue_ids[DLB2_NUM_QES_PER_CACHE_LINE];
+ int pop_offs = 0;
int j = 0;
+ memset(qm_port->qe4,
+ 0,
+ DLB2_NUM_QES_PER_CACHE_LINE *
+ sizeof(struct dlb2_enqueue_qe));
+
for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < num; j++) {
const struct rte_event *ev = &events[i + j];
+ int16_t thresh = qm_port->token_pop_thresh;
+
+ if (use_delayed &&
+ qm_port->token_pop_mode == DELAYED_POP &&
+ (ev->op == RTE_EVENT_OP_FORWARD ||
+ ev->op == RTE_EVENT_OP_RELEASE) &&
+ qm_port->issued_releases >= thresh - 1) {
+ /* Insert the token pop QE and break out. This
+ * may result in a partial HCW, but that is
+ * simpler than supporting arbitrary QE
+ * insertion.
+ */
+ dlb2_construct_token_pop_qe(qm_port, j);
+
+ /* Reset the releases for the next QE batch */
+ qm_port->issued_releases -= thresh;
+
+ pop_offs = 1;
+ j++;
+ break;
+ }
if (dlb2_event_enqueue_prep(ev_port, qm_port, ev,
&sched_types[j],
if (j == 0)
break;
- dlb2_event_build_hcws(qm_port, &events[i], j,
+ dlb2_event_build_hcws(qm_port, &events[i], j - pop_offs,
sched_types, queue_ids);
- if (qm_port->token_pop_mode == DELAYED_POP && j < 4 &&
- qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
- dlb2_construct_token_pop_qe(qm_port, j);
-
- /* Reset the releases counter for the next QE batch */
- qm_port->issued_releases -= qm_port->token_pop_thresh;
- }
-
dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
- cnt += j;
+ /* Don't include the token pop QE in the enqueue count */
+ i += j - pop_offs;
- if (j < DLB2_NUM_QES_PER_CACHE_LINE)
+ /* Don't interpret j < DLB2_NUM_... as out-of-credits if
+ * pop_offs != 0
+ */
+ if (j < DLB2_NUM_QES_PER_CACHE_LINE && pop_offs == 0)
break;
}
- if (qm_port->token_pop_mode == DELAYED_POP &&
- qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
- dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
- qm_port->issued_releases -= qm_port->token_pop_thresh;
- }
- return cnt;
+ return i;
+}
+
+static uint16_t
+dlb2_event_enqueue_burst(void *event_port,
+ const struct rte_event events[],
+ uint16_t num)
+{
+ return __dlb2_event_enqueue_burst(event_port, events, num, false);
+}
+
+static uint16_t
+dlb2_event_enqueue_burst_delayed(void *event_port,
+ const struct rte_event events[],
+ uint16_t num)
+{
+ return __dlb2_event_enqueue_burst(event_port, events, num, true);
}
static inline uint16_t
dlb2_event_enqueue(void *event_port,
const struct rte_event events[])
{
- return dlb2_event_enqueue_burst(event_port, events, 1);
+ return __dlb2_event_enqueue_burst(event_port, events, 1, false);
+}
+
+static inline uint16_t
+dlb2_event_enqueue_delayed(void *event_port,
+ const struct rte_event events[])
+{
+ return __dlb2_event_enqueue_burst(event_port, events, 1, true);
}
static uint16_t
const struct rte_event events[],
uint16_t num)
{
- return dlb2_event_enqueue_burst(event_port, events, num);
+ return __dlb2_event_enqueue_burst(event_port, events, num, false);
+}
+
+static uint16_t
+dlb2_event_enqueue_new_burst_delayed(void *event_port,
+ const struct rte_event events[],
+ uint16_t num)
+{
+ return __dlb2_event_enqueue_burst(event_port, events, num, true);
}
static uint16_t
const struct rte_event events[],
uint16_t num)
{
- return dlb2_event_enqueue_burst(event_port, events, num);
+ return __dlb2_event_enqueue_burst(event_port, events, num, false);
+}
+
+static uint16_t
+dlb2_event_enqueue_forward_burst_delayed(void *event_port,
+ const struct rte_event events[],
+ uint16_t num)
+{
+ return __dlb2_event_enqueue_burst(event_port, events, num, true);
+}
+
+static void
+dlb2_event_release(struct dlb2_eventdev *dlb2,
+ uint8_t port_id,
+ int n)
+{
+ struct process_local_port_data *port_data;
+ struct dlb2_eventdev_port *ev_port;
+ struct dlb2_port *qm_port;
+ int i;
+
+ if (port_id > dlb2->num_ports) {
+ DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release\n",
+ port_id);
+ rte_errno = -EINVAL;
+ return;
+ }
+
+ ev_port = &dlb2->ev_ports[port_id];
+ qm_port = &ev_port->qm_port;
+ port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
+
+ i = 0;
+
+ if (qm_port->is_directed) {
+ i = n;
+ goto sw_credit_update;
+ }
+
+ while (i < n) {
+ int pop_offs = 0;
+ int j = 0;
+
+ /* Zero-out QEs */
+ qm_port->qe4[0].cmd_byte = 0;
+ qm_port->qe4[1].cmd_byte = 0;
+ qm_port->qe4[2].cmd_byte = 0;
+ qm_port->qe4[3].cmd_byte = 0;
+
+ for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++) {
+ int16_t thresh = qm_port->token_pop_thresh;
+
+ if (qm_port->token_pop_mode == DELAYED_POP &&
+ qm_port->issued_releases >= thresh - 1) {
+ /* Insert the token pop QE */
+ dlb2_construct_token_pop_qe(qm_port, j);
+
+ /* Reset the releases for the next QE batch */
+ qm_port->issued_releases -= thresh;
+
+ pop_offs = 1;
+ j++;
+ break;
+ }
+
+ qm_port->qe4[j].cmd_byte = DLB2_COMP_CMD_BYTE;
+ qm_port->issued_releases++;
+ }
+
+ dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
+
+ /* Don't include the token pop QE in the release count */
+ i += j - pop_offs;
+ }
+
+sw_credit_update:
+ /* each release returns one credit */
+ if (!ev_port->outstanding_releases) {
+ DLB2_LOG_ERR("%s: Outstanding releases underflowed.\n",
+ __func__);
+ return;
+ }
+ ev_port->outstanding_releases -= i;
+ ev_port->inflight_credits += i;
+
+ /* Replenish s/w credits if enough releases are performed */
+ dlb2_replenish_sw_credits(dlb2, ev_port);
}
static inline void
/* increment port credits, and return to pool if exceeds threshold */
if (!qm_port->is_directed) {
- qm_port->cached_ldb_credits += num;
- if (qm_port->cached_ldb_credits >= 2 * batch_size) {
- __atomic_fetch_add(
- qm_port->credit_pool[DLB2_LDB_QUEUE],
- batch_size, __ATOMIC_SEQ_CST);
- qm_port->cached_ldb_credits -= batch_size;
+ if (qm_port->dlb2->version == DLB2_HW_V2) {
+ qm_port->cached_ldb_credits += num;
+ if (qm_port->cached_ldb_credits >= 2 * batch_size) {
+ __atomic_fetch_add(
+ qm_port->credit_pool[DLB2_LDB_QUEUE],
+ batch_size, __ATOMIC_SEQ_CST);
+ qm_port->cached_ldb_credits -= batch_size;
+ }
+ } else {
+ qm_port->cached_credits += num;
+ if (qm_port->cached_credits >= 2 * batch_size) {
+ __atomic_fetch_add(
+ qm_port->credit_pool[DLB2_COMBINED_POOL],
+ batch_size, __ATOMIC_SEQ_CST);
+ qm_port->cached_credits -= batch_size;
+ }
}
} else {
- qm_port->cached_dir_credits += num;
- if (qm_port->cached_dir_credits >= 2 * batch_size) {
- __atomic_fetch_add(
- qm_port->credit_pool[DLB2_DIR_QUEUE],
- batch_size, __ATOMIC_SEQ_CST);
- qm_port->cached_dir_credits -= batch_size;
+ if (qm_port->dlb2->version == DLB2_HW_V2) {
+ qm_port->cached_dir_credits += num;
+ if (qm_port->cached_dir_credits >= 2 * batch_size) {
+ __atomic_fetch_add(
+ qm_port->credit_pool[DLB2_DIR_QUEUE],
+ batch_size, __ATOMIC_SEQ_CST);
+ qm_port->cached_dir_credits -= batch_size;
+ }
+ } else {
+ qm_port->cached_credits += num;
+ if (qm_port->cached_credits >= 2 * batch_size) {
+ __atomic_fetch_add(
+ qm_port->credit_pool[DLB2_COMBINED_POOL],
+ batch_size, __ATOMIC_SEQ_CST);
+ qm_port->cached_credits -= batch_size;
+ }
}
}
}
if (elapsed_ticks >= timeout) {
return 1;
} else if (dlb2->umwait_allowed) {
+ struct rte_power_monitor_cond pmc;
volatile struct dlb2_dequeue_qe *cq_base;
union {
uint64_t raw_qe[2];
else
expected_value = 0;
- rte_power_monitor(monitor_addr, expected_value,
- qe_mask.raw_qe[1], timeout + start_ticks,
- sizeof(uint64_t));
+ pmc.addr = monitor_addr;
+ pmc.val = expected_value;
+ pmc.mask = qe_mask.raw_qe[1];
+ pmc.size = sizeof(uint64_t);
+
+ rte_power_monitor(&pmc, timeout + start_ticks);
DLB2_INC_STAT(ev_port->stats.traffic.rx_umonitor_umwait, 1);
} else {
qm_port->gen_bit = (~(idx >> qm_port->gen_bit_shift)) & 0x1;
}
-static int
-dlb2_event_release(struct dlb2_eventdev *dlb2,
- uint8_t port_id,
- int n)
-{
- struct process_local_port_data *port_data;
- struct dlb2_eventdev_port *ev_port;
- struct dlb2_port *qm_port;
- int i, cnt;
-
- if (port_id > dlb2->num_ports) {
- DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release\n",
- port_id);
- rte_errno = -EINVAL;
- return rte_errno;
- }
-
- ev_port = &dlb2->ev_ports[port_id];
- qm_port = &ev_port->qm_port;
- port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
-
- cnt = 0;
-
- if (qm_port->is_directed) {
- cnt = n;
- goto sw_credit_update;
- }
-
- for (i = 0; i < n; i += DLB2_NUM_QES_PER_CACHE_LINE) {
- int j;
-
- /* Zero-out QEs */
- qm_port->qe4[0].cmd_byte = 0;
- qm_port->qe4[1].cmd_byte = 0;
- qm_port->qe4[2].cmd_byte = 0;
- qm_port->qe4[3].cmd_byte = 0;
-
- for (j = 0; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++)
- qm_port->qe4[j].cmd_byte = DLB2_COMP_CMD_BYTE;
-
- qm_port->issued_releases += j;
-
- if (j == 0)
- break;
-
- if (qm_port->token_pop_mode == DELAYED_POP && j < 4 &&
- qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
- dlb2_construct_token_pop_qe(qm_port, j);
-
- /* Reset the releases counter for the next QE batch */
- qm_port->issued_releases -= qm_port->token_pop_thresh;
- }
-
- dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
-
- cnt += j;
- }
-
- if (qm_port->token_pop_mode == DELAYED_POP &&
- qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
- dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
- qm_port->issued_releases -= qm_port->token_pop_thresh;
- }
-
-sw_credit_update:
- /* each release returns one credit */
- if (!ev_port->outstanding_releases) {
- DLB2_LOG_ERR("Unrecoverable application error. Outstanding releases underflowed.\n");
- rte_errno = -ENOTRECOVERABLE;
- return rte_errno;
- }
-
- ev_port->outstanding_releases -= cnt;
- ev_port->inflight_credits += cnt;
-
- /* Replenish s/w credits if enough releases are performed */
- dlb2_replenish_sw_credits(dlb2, ev_port);
- return 0;
-}
-
static inline int16_t
dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
struct dlb2_eventdev_port *ev_port,
if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
uint16_t out_rels = ev_port->outstanding_releases;
- if (dlb2_event_release(dlb2, ev_port->id, out_rels))
- return 0; /* rte_errno is set */
+ dlb2_event_release(dlb2, ev_port->id, out_rels);
DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
}
if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
uint16_t out_rels = ev_port->outstanding_releases;
- if (dlb2_event_release(dlb2, ev_port->id, out_rels))
- return 0; /* rte_errno is set */
+ dlb2_event_release(dlb2, ev_port->id, out_rels);
DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
}
}
}
+static int
+dlb2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ RTE_SET_USED(dev);
+ uint64_t cycles_per_ns = rte_get_timer_hz() / 1E9;
+
+ *timeout_ticks = ns * cycles_per_ns;
+
+ return 0;
+}
+
static void
dlb2_entry_points_init(struct rte_eventdev *dev)
{
.port_unlink = dlb2_eventdev_port_unlink,
.port_unlinks_in_progress =
dlb2_eventdev_port_unlinks_in_progress,
+ .timeout_ticks = dlb2_eventdev_timeout_ticks,
.dump = dlb2_eventdev_dump,
.xstats_get = dlb2_eventdev_xstats_get,
.xstats_get_names = dlb2_eventdev_xstats_get_names,
}
/* Initialize each port's token pop mode */
- for (i = 0; i < DLB2_MAX_NUM_PORTS; i++)
+ for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); i++)
dlb2->ev_ports[i].qm_port.token_pop_mode = AUTO_POP;
rte_spinlock_init(&dlb2->qm_instance.resource_lock);
int
dlb2_parse_params(const char *params,
const char *name,
- struct dlb2_devargs *dlb2_args)
+ struct dlb2_devargs *dlb2_args,
+ uint8_t version)
{
int ret = 0;
static const char * const args[] = { NUMA_NODE_ARG,
return ret;
}
- ret = rte_kvargs_process(kvlist,
+ if (version == DLB2_HW_V2) {
+ ret = rte_kvargs_process(kvlist,
DLB2_NUM_DIR_CREDITS,
set_num_dir_credits,
&dlb2_args->num_dir_credits_override);
- if (ret != 0) {
- DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter",
- name);
- rte_kvargs_free(kvlist);
- return ret;
+ if (ret != 0) {
+ DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter",
+ name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
}
-
ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
set_dev_id,
&dlb2_args->dev_id);
return ret;
}
- ret = rte_kvargs_process(
+ if (version == DLB2_HW_V2) {
+ ret = rte_kvargs_process(
kvlist,
DLB2_QID_DEPTH_THRESH_ARG,
set_qid_depth_thresh,
&dlb2_args->qid_depth_thresholds);
+ } else {
+ ret = rte_kvargs_process(
+ kvlist,
+ DLB2_QID_DEPTH_THRESH_ARG,
+ set_qid_depth_thresh_v2_5,
+ &dlb2_args->qid_depth_thresholds);
+ }
if (ret != 0) {
DLB2_LOG_ERR("%s: Error parsing qid_depth_thresh parameter",
name);