*/
evdev_dlb2_default_info.max_event_ports += dlb2->num_ldb_ports;
evdev_dlb2_default_info.max_event_queues += dlb2->num_ldb_queues;
- evdev_dlb2_default_info.max_num_events += dlb2->max_ldb_credits;
-
+ if (dlb2->version == DLB2_HW_V2_5) {
+ evdev_dlb2_default_info.max_num_events +=
+ dlb2->max_credits;
+ } else {
+ evdev_dlb2_default_info.max_num_events +=
+ dlb2->max_ldb_credits;
+ }
evdev_dlb2_default_info.max_event_queues =
RTE_MIN(evdev_dlb2_default_info.max_event_queues,
RTE_EVENT_MAX_QUEUES_PER_DEV);
static int
dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle,
- const struct dlb2_hw_rsrcs *resources_asked)
+ const struct dlb2_hw_rsrcs *resources_asked,
+ uint8_t device_version)
{
int ret = 0;
struct dlb2_create_sched_domain_args *cfg;
/* DIR ports and queues */
cfg->num_dir_ports = resources_asked->num_dir_ports;
-
- cfg->num_dir_credits = resources_asked->num_dir_credits;
+ if (device_version == DLB2_HW_V2_5)
+ cfg->num_credits = resources_asked->num_credits;
+ else
+ cfg->num_dir_credits = resources_asked->num_dir_credits;
/* LDB queues */
break;
}
- cfg->num_ldb_credits =
- resources_asked->num_ldb_credits;
+ if (device_version == DLB2_HW_V2)
+ cfg->num_ldb_credits = resources_asked->num_ldb_credits;
cfg->num_atomic_inflights =
DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE *
cfg->num_hist_list_entries = resources_asked->num_ldb_ports *
DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
- DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n",
- cfg->num_ldb_queues,
- resources_asked->num_ldb_ports,
- cfg->num_dir_ports,
- cfg->num_atomic_inflights,
- cfg->num_hist_list_entries,
- cfg->num_ldb_credits,
- cfg->num_dir_credits);
+ if (device_version == DLB2_HW_V2_5) {
+ DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, credits=%d\n",
+ cfg->num_ldb_queues,
+ resources_asked->num_ldb_ports,
+ cfg->num_dir_ports,
+ cfg->num_atomic_inflights,
+ cfg->num_hist_list_entries,
+ cfg->num_credits);
+ } else {
+ DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n",
+ cfg->num_ldb_queues,
+ resources_asked->num_ldb_ports,
+ cfg->num_dir_ports,
+ cfg->num_atomic_inflights,
+ cfg->num_hist_list_entries,
+ cfg->num_ldb_credits,
+ cfg->num_dir_credits);
+ }
/* Configure the QM */
*/
if (dlb2->configured) {
dlb2_hw_reset_sched_domain(dev, true);
-
ret = dlb2_hw_query_resources(dlb2);
if (ret) {
DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
/* 1 dir queue per dir port */
rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
- /* Scale down nb_events_limit by 4 for directed credits, since there
- * are 4x as many load-balanced credits.
- */
- rsrcs->num_ldb_credits = 0;
- rsrcs->num_dir_credits = 0;
+ if (dlb2->version == DLB2_HW_V2_5) {
+ rsrcs->num_credits = 0;
+ if (rsrcs->num_ldb_queues || rsrcs->num_dir_ports)
+ rsrcs->num_credits = config->nb_events_limit;
+ } else {
+ /* Scale down nb_events_limit by 4 for directed credits,
+ * since there are 4x as many load-balanced credits.
+ */
+ rsrcs->num_ldb_credits = 0;
+ rsrcs->num_dir_credits = 0;
- if (rsrcs->num_ldb_queues)
- rsrcs->num_ldb_credits = config->nb_events_limit;
- if (rsrcs->num_dir_ports)
- rsrcs->num_dir_credits = config->nb_events_limit / 4;
- if (dlb2->num_dir_credits_override != -1)
- rsrcs->num_dir_credits = dlb2->num_dir_credits_override;
+ if (rsrcs->num_ldb_queues)
+ rsrcs->num_ldb_credits = config->nb_events_limit;
+ if (rsrcs->num_dir_ports)
+ rsrcs->num_dir_credits = config->nb_events_limit / 4;
+ if (dlb2->num_dir_credits_override != -1)
+ rsrcs->num_dir_credits = dlb2->num_dir_credits_override;
+ }
- if (dlb2_hw_create_sched_domain(handle, rsrcs) < 0) {
+ if (dlb2_hw_create_sched_domain(handle, rsrcs, dlb2->version) < 0) {
DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n");
return -ENODEV;
}
dlb2->num_ldb_ports = dlb2->num_ports - dlb2->num_dir_ports;
dlb2->num_ldb_queues = dlb2->num_queues - dlb2->num_dir_ports;
dlb2->num_dir_queues = dlb2->num_dir_ports;
- dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
- dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
- dlb2->dir_credit_pool = rsrcs->num_dir_credits;
- dlb2->max_dir_credits = rsrcs->num_dir_credits;
+ if (dlb2->version == DLB2_HW_V2_5) {
+ dlb2->credit_pool = rsrcs->num_credits;
+ dlb2->max_credits = rsrcs->num_credits;
+ } else {
+ dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
+ dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
+ dlb2->dir_credit_pool = rsrcs->num_dir_credits;
+ dlb2->max_dir_credits = rsrcs->num_dir_credits;
+ }
dlb2->configured = true;
struct dlb2_port *qm_port = NULL;
char mz_name[RTE_MEMZONE_NAMESIZE];
uint32_t qm_port_id;
- uint16_t ldb_credit_high_watermark;
- uint16_t dir_credit_high_watermark;
+ uint16_t ldb_credit_high_watermark = 0;
+ uint16_t dir_credit_high_watermark = 0;
+ uint16_t credit_high_watermark = 0;
if (handle == NULL)
return -EINVAL;
/* User controls the LDB high watermark via enqueue depth. The DIR high
* watermark is equal, unless the directed credit pool is too small.
*/
- ldb_credit_high_watermark = enqueue_depth;
-
- /* If there are no directed ports, the kernel driver will ignore this
- * port's directed credit settings. Don't use enqueue_depth if it would
- * require more directed credits than are available.
- */
- dir_credit_high_watermark =
- RTE_MIN(enqueue_depth,
- handle->cfg.num_dir_credits / dlb2->num_ports);
+ if (dlb2->version == DLB2_HW_V2) {
+ ldb_credit_high_watermark = enqueue_depth;
+ /* If there are no directed ports, the kernel driver will
+ * ignore this port's directed credit settings. Don't use
+ * enqueue_depth if it would require more directed credits
+ * than are available.
+ */
+ dir_credit_high_watermark =
+ RTE_MIN(enqueue_depth,
+ handle->cfg.num_dir_credits / dlb2->num_ports);
+ } else
+ credit_high_watermark = enqueue_depth;
/* Per QM values */
qm_port->id = qm_port_id;
- qm_port->cached_ldb_credits = 0;
- qm_port->cached_dir_credits = 0;
+ if (dlb2->version == DLB2_HW_V2) {
+ qm_port->cached_ldb_credits = 0;
+ qm_port->cached_dir_credits = 0;
+ } else
+ qm_port->cached_credits = 0;
+
/* CQs with depth < 8 use an 8-entry queue, but withhold credits so
* the effective depth is smaller.
*/
qm_port->state = PORT_STARTED; /* enabled at create time */
qm_port->config_state = DLB2_CONFIGURED;
- qm_port->dir_credits = dir_credit_high_watermark;
- qm_port->ldb_credits = ldb_credit_high_watermark;
- qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
- qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
-
- DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
- qm_port_id,
- dequeue_depth,
- qm_port->ldb_credits,
- qm_port->dir_credits);
+ if (dlb2->version == DLB2_HW_V2) {
+ qm_port->dir_credits = dir_credit_high_watermark;
+ qm_port->ldb_credits = ldb_credit_high_watermark;
+ qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
+ qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
+
+ DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
+ qm_port_id,
+ dequeue_depth,
+ qm_port->ldb_credits,
+ qm_port->dir_credits);
+ } else {
+ qm_port->credits = credit_high_watermark;
+ qm_port->credit_pool[DLB2_COMBINED_POOL] = &dlb2->credit_pool;
+ DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, credits=%d\n",
+ qm_port_id,
+ dequeue_depth,
+ qm_port->credits);
+ }
rte_spinlock_unlock(&handle->resource_lock);
return 0;
struct dlb2_port *qm_port = NULL;
char mz_name[RTE_MEMZONE_NAMESIZE];
uint32_t qm_port_id;
- uint16_t ldb_credit_high_watermark;
- uint16_t dir_credit_high_watermark;
+ uint16_t ldb_credit_high_watermark = 0;
+ uint16_t dir_credit_high_watermark = 0;
+ uint16_t credit_high_watermark = 0;
if (dlb2 == NULL || handle == NULL)
return -EINVAL;
/* User controls the LDB high watermark via enqueue depth. The DIR high
* watermark is equal, unless the directed credit pool is too small.
*/
- ldb_credit_high_watermark = enqueue_depth;
-
- /* Don't use enqueue_depth if it would require more directed credits
- * than are available.
- */
- dir_credit_high_watermark =
- RTE_MIN(enqueue_depth,
- handle->cfg.num_dir_credits / dlb2->num_ports);
+ if (dlb2->version == DLB2_HW_V2) {
+ ldb_credit_high_watermark = enqueue_depth;
+ /* Don't use enqueue_depth if it would require more directed
+ * credits than are available.
+ */
+ dir_credit_high_watermark =
+ RTE_MIN(enqueue_depth,
+ handle->cfg.num_dir_credits / dlb2->num_ports);
+ } else
+ credit_high_watermark = enqueue_depth;
/* Per QM values */
qm_port->id = qm_port_id;
- qm_port->cached_ldb_credits = 0;
- qm_port->cached_dir_credits = 0;
+ if (dlb2->version == DLB2_HW_V2) {
+ qm_port->cached_ldb_credits = 0;
+ qm_port->cached_dir_credits = 0;
+ } else
+ qm_port->cached_credits = 0;
+
/* CQs with depth < 8 use an 8-entry queue, but withhold credits so
* the effective depth is smaller.
*/
qm_port->state = PORT_STARTED; /* enabled at create time */
qm_port->config_state = DLB2_CONFIGURED;
- qm_port->dir_credits = dir_credit_high_watermark;
- qm_port->ldb_credits = ldb_credit_high_watermark;
- qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
- qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
-
- DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d\n",
- qm_port_id,
- dequeue_depth,
- dir_credit_high_watermark,
- ldb_credit_high_watermark);
+ if (dlb2->version == DLB2_HW_V2) {
+ qm_port->dir_credits = dir_credit_high_watermark;
+ qm_port->ldb_credits = ldb_credit_high_watermark;
+ qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
+ qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
+
+ DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d\n",
+ qm_port_id,
+ dequeue_depth,
+ dir_credit_high_watermark,
+ ldb_credit_high_watermark);
+ } else {
+ qm_port->credits = credit_high_watermark;
+ qm_port->credit_pool[DLB2_COMBINED_POOL] = &dlb2->credit_pool;
+ DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d\n",
+ qm_port_id,
+ dequeue_depth,
+ credit_high_watermark);
+ }
rte_spinlock_unlock(&handle->resource_lock);
return 0;
return 0;
}
+static inline int
+dlb2_check_enqueue_hw_credits(struct dlb2_port *qm_port)
+{
+ if (unlikely(qm_port->cached_credits == 0)) {
+ qm_port->cached_credits =
+ dlb2_port_credits_get(qm_port,
+ DLB2_COMBINED_POOL);
+ if (unlikely(qm_port->cached_credits == 0)) {
+ DLB2_INC_STAT(
+ qm_port->ev_port->stats.traffic.tx_nospc_hw_credits, 1);
+ DLB2_LOG_DBG("credits exhausted\n");
+ return 1; /* credits exhausted */
+ }
+ }
+
+ return 0;
+}
+
static __rte_always_inline void
dlb2_pp_write(struct dlb2_enqueue_qe *qe4,
struct process_local_port_data *port_data)
if (!qm_queue->is_directed) {
/* Load balanced destination queue */
- if (dlb2_check_enqueue_hw_ldb_credits(qm_port)) {
- rte_errno = -ENOSPC;
- return 1;
+ if (dlb2->version == DLB2_HW_V2) {
+ if (dlb2_check_enqueue_hw_ldb_credits(qm_port)) {
+ rte_errno = -ENOSPC;
+ return 1;
+ }
+ cached_credits = &qm_port->cached_ldb_credits;
+ } else {
+ if (dlb2_check_enqueue_hw_credits(qm_port)) {
+ rte_errno = -ENOSPC;
+ return 1;
+ }
+ cached_credits = &qm_port->cached_credits;
}
- cached_credits = &qm_port->cached_ldb_credits;
-
switch (ev->sched_type) {
case RTE_SCHED_TYPE_ORDERED:
DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ORDERED\n");
} else {
/* Directed destination queue */
- if (dlb2_check_enqueue_hw_dir_credits(qm_port)) {
- rte_errno = -ENOSPC;
- return 1;
+ if (dlb2->version == DLB2_HW_V2) {
+ if (dlb2_check_enqueue_hw_dir_credits(qm_port)) {
+ rte_errno = -ENOSPC;
+ return 1;
+ }
+ cached_credits = &qm_port->cached_dir_credits;
+ } else {
+ if (dlb2_check_enqueue_hw_credits(qm_port)) {
+ rte_errno = -ENOSPC;
+ return 1;
+ }
+ cached_credits = &qm_port->cached_credits;
}
- cached_credits = &qm_port->cached_dir_credits;
-
DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_DIRECTED\n");
*sched_type = DLB2_SCHED_DIRECTED;
/* increment port credits, and return to pool if exceeds threshold */
if (!qm_port->is_directed) {
- qm_port->cached_ldb_credits += num;
- if (qm_port->cached_ldb_credits >= 2 * batch_size) {
- __atomic_fetch_add(
- qm_port->credit_pool[DLB2_LDB_QUEUE],
- batch_size, __ATOMIC_SEQ_CST);
- qm_port->cached_ldb_credits -= batch_size;
+ if (qm_port->dlb2->version == DLB2_HW_V2) {
+ qm_port->cached_ldb_credits += num;
+ if (qm_port->cached_ldb_credits >= 2 * batch_size) {
+ __atomic_fetch_add(
+ qm_port->credit_pool[DLB2_LDB_QUEUE],
+ batch_size, __ATOMIC_SEQ_CST);
+ qm_port->cached_ldb_credits -= batch_size;
+ }
+ } else {
+ qm_port->cached_credits += num;
+ if (qm_port->cached_credits >= 2 * batch_size) {
+ __atomic_fetch_add(
+ qm_port->credit_pool[DLB2_COMBINED_POOL],
+ batch_size, __ATOMIC_SEQ_CST);
+ qm_port->cached_credits -= batch_size;
+ }
}
} else {
- qm_port->cached_dir_credits += num;
- if (qm_port->cached_dir_credits >= 2 * batch_size) {
- __atomic_fetch_add(
- qm_port->credit_pool[DLB2_DIR_QUEUE],
- batch_size, __ATOMIC_SEQ_CST);
- qm_port->cached_dir_credits -= batch_size;
+ if (qm_port->dlb2->version == DLB2_HW_V2) {
+ qm_port->cached_dir_credits += num;
+ if (qm_port->cached_dir_credits >= 2 * batch_size) {
+ __atomic_fetch_add(
+ qm_port->credit_pool[DLB2_DIR_QUEUE],
+ batch_size, __ATOMIC_SEQ_CST);
+ qm_port->cached_dir_credits -= batch_size;
+ }
+ } else {
+ qm_port->cached_credits += num;
+ if (qm_port->cached_credits >= 2 * batch_size) {
+ __atomic_fetch_add(
+ qm_port->credit_pool[DLB2_COMBINED_POOL],
+ batch_size, __ATOMIC_SEQ_CST);
+ qm_port->cached_credits -= batch_size;
+ }
}
}
}