return 0;
}
+static int
+dlb_eventdev_reapply_configuration(struct rte_eventdev *dev)
+{
+ struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+ int ret, i;
+
+ /* If an event queue or port was previously configured, but hasn't been
+ * reconfigured, reapply its original configuration.
+ */
+ for (i = 0; i < dlb->num_queues; i++) {
+ struct dlb_eventdev_queue *ev_queue;
+
+ ev_queue = &dlb->ev_queues[i];
+
+ if (ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED)
+ continue;
+
+ ret = dlb_eventdev_queue_setup(dev, i, &ev_queue->conf);
+ if (ret < 0) {
+ DLB_LOG_ERR("dlb: failed to reconfigure queue %d", i);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < dlb->num_ports; i++) {
+ struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
+
+ if (ev_port->qm_port.config_state != DLB_PREV_CONFIGURED)
+ continue;
+
+ ret = dlb_eventdev_port_setup(dev, i, &ev_port->conf);
+ if (ret < 0) {
+ DLB_LOG_ERR("dlb: failed to reconfigure ev_port %d",
+ i);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int
set_dev_id(const char *key __rte_unused,
const char *value,
return 0;
}
+static int32_t
+dlb_hw_create_dir_queue(struct dlb_eventdev *dlb, int32_t qm_port_id)
+{
+ struct dlb_hw_dev *handle = &dlb->qm_instance;
+ struct dlb_create_dir_queue_args cfg;
+ struct dlb_cmd_response response;
+ int32_t ret;
+
+ cfg.response = (uintptr_t)&response;
+
+ /* The directed port is always configured before its queue */
+ cfg.port_id = qm_port_id;
+
+ ret = dlb_iface_dir_queue_create(handle, &cfg);
+ if (ret < 0) {
+ DLB_LOG_ERR("dlb: create DIR event queue error, ret=%d (driver status: %s)\n",
+ ret, dlb_error_strings[response.status]);
+ return -EINVAL;
+ }
+
+ return response.id;
+}
+
+static int
+dlb_eventdev_dir_queue_setup(struct dlb_eventdev *dlb,
+ struct dlb_eventdev_queue *ev_queue,
+ struct dlb_eventdev_port *ev_port)
+{
+ int32_t qm_qid;
+
+ qm_qid = dlb_hw_create_dir_queue(dlb, ev_port->qm_port.id);
+
+ if (qm_qid < 0) {
+ DLB_LOG_ERR("Failed to create the DIR queue\n");
+ return qm_qid;
+ }
+
+ dlb->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
+
+ ev_queue->qm_queue.id = qm_qid;
+
+ return 0;
+}
+
static int16_t
dlb_hw_map_ldb_qid_to_port(struct dlb_hw_dev *handle,
uint32_t qm_port_id,
return ret;
}
-static int32_t
-dlb_hw_create_dir_queue(struct dlb_eventdev *dlb, int32_t qm_port_id)
-{
- struct dlb_hw_dev *handle = &dlb->qm_instance;
- struct dlb_create_dir_queue_args cfg;
- struct dlb_cmd_response response;
- int32_t ret;
-
- cfg.response = (uintptr_t)&response;
-
- /* The directed port is always configured before its queue */
- cfg.port_id = qm_port_id;
-
- ret = dlb_iface_dir_queue_create(handle, &cfg);
- if (ret < 0) {
- DLB_LOG_ERR("dlb: create DIR event queue error, ret=%d (driver status: %s)\n",
- ret, dlb_error_strings[response.status]);
- return -EINVAL;
- }
-
- return response.id;
-}
-
-static int
-dlb_eventdev_dir_queue_setup(struct dlb_eventdev *dlb,
- struct dlb_eventdev_queue *ev_queue,
- struct dlb_eventdev_port *ev_port)
-{
- int32_t qm_qid;
-
- qm_qid = dlb_hw_create_dir_queue(dlb, ev_port->qm_port.id);
-
- if (qm_qid < 0) {
- DLB_LOG_ERR("Failed to create the DIR queue\n");
- return qm_qid;
- }
-
- dlb->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
-
- ev_queue->qm_queue.id = qm_qid;
-
- return 0;
-}
-
static int
dlb_do_port_link(struct rte_eventdev *dev,
struct dlb_eventdev_queue *ev_queue,
return 0;
}
+static int
+dlb_eventdev_apply_port_links(struct rte_eventdev *dev)
+{
+ struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+ int i;
+
+ /* Perform requested port->queue links */
+ for (i = 0; i < dlb->num_ports; i++) {
+ struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
+ int j;
+
+ for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
+ struct dlb_eventdev_queue *ev_queue;
+ uint8_t prio, queue_id;
+
+ if (!ev_port->link[j].valid)
+ continue;
+
+ prio = ev_port->link[j].priority;
+ queue_id = ev_port->link[j].queue_id;
+
+ if (dlb_validate_port_link(ev_port, queue_id, true, j))
+ return -EINVAL;
+
+ ev_queue = &dlb->ev_queues[queue_id];
+
+ if (dlb_do_port_link(dev, ev_queue, ev_port, prio))
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int
dlb_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
const uint8_t queues[], const uint8_t priorities[],
return i;
}
+static int
+dlb_eventdev_start(struct rte_eventdev *dev)
+{
+ struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+ struct dlb_hw_dev *handle = &dlb->qm_instance;
+ struct dlb_start_domain_args cfg;
+ struct dlb_cmd_response response;
+ int ret, i;
+
+ rte_spinlock_lock(&dlb->qm_instance.resource_lock);
+ if (dlb->run_state != DLB_RUN_STATE_STOPPED) {
+ DLB_LOG_ERR("bad state %d for dev_start\n",
+ (int)dlb->run_state);
+ rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
+ return -EINVAL;
+ }
+ dlb->run_state = DLB_RUN_STATE_STARTING;
+ rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
+
+ /* If the device was configured more than once, some event ports and/or
+ * queues may need to be reconfigured.
+ */
+ ret = dlb_eventdev_reapply_configuration(dev);
+ if (ret)
+ return ret;
+
+ /* The DLB PMD delays port links until the device is started. */
+ ret = dlb_eventdev_apply_port_links(dev);
+ if (ret)
+ return ret;
+
+ cfg.response = (uintptr_t)&response;
+
+ for (i = 0; i < dlb->num_ports; i++) {
+ if (!dlb->ev_ports[i].setup_done) {
+ DLB_LOG_ERR("dlb: port %d not setup", i);
+ return -ESTALE;
+ }
+ }
+
+ for (i = 0; i < dlb->num_queues; i++) {
+ if (dlb->ev_queues[i].num_links == 0) {
+ DLB_LOG_ERR("dlb: queue %d is not linked", i);
+ return -ENOLINK;
+ }
+ }
+
+ ret = dlb_iface_sched_domain_start(handle, &cfg);
+ if (ret < 0) {
+ DLB_LOG_ERR("dlb: sched_domain_start ret=%d (driver status: %s)\n",
+ ret, dlb_error_strings[response.status]);
+ return ret;
+ }
+
+ dlb->run_state = DLB_RUN_STATE_STARTED;
+ DLB_LOG_DBG("dlb: sched_domain_start completed OK\n");
+
+ return 0;
+}
+
void
dlb_entry_points_init(struct rte_eventdev *dev)
{
static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
.dev_infos_get = dlb_eventdev_info_get,
.dev_configure = dlb_eventdev_configure,
+ .dev_start = dlb_eventdev_start,
.queue_def_conf = dlb_eventdev_queue_default_conf_get,
.port_def_conf = dlb_eventdev_port_default_conf_get,
.queue_setup = dlb_eventdev_queue_setup,
return 0;
}
+static int dlb_verify_start_domain_args(struct dlb_hw *hw,
+ u32 domain_id,
+ struct dlb_cmd_response *resp)
+{
+ struct dlb_domain *domain;
+
+ domain = dlb_get_domain_from_id(hw, domain_id);
+
+ if (domain == NULL) {
+ resp->status = DLB_ST_INVALID_DOMAIN_ID;
+ return -1;
+ }
+
+ if (!domain->configured) {
+ resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
+ return -1;
+ }
+
+ if (domain->started) {
+ resp->status = DLB_ST_DOMAIN_STARTED;
+ return -1;
+ }
+
+ return 0;
+}
+
static int dlb_verify_map_qid_slot_available(struct dlb_ldb_port *port,
struct dlb_ldb_queue *queue,
struct dlb_cmd_response *resp)
return 0;
}
+static void dlb_log_start_domain(struct dlb_hw *hw, u32 domain_id)
+{
+ DLB_HW_INFO(hw, "DLB start domain arguments:\n");
+ DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
+}
+
+static void dlb_ldb_pool_write_credit_count_reg(struct dlb_hw *hw,
+ u32 pool_id)
+{
+ union dlb_chp_ldb_pool_crd_cnt r0 = { {0} };
+ struct dlb_credit_pool *pool;
+
+ pool = &hw->rsrcs.ldb_credit_pools[pool_id];
+
+ r0.field.count = pool->avail_credits;
+
+ DLB_CSR_WR(hw,
+ DLB_CHP_LDB_POOL_CRD_CNT(pool->id),
+ r0.val);
+}
+
+static void dlb_dir_pool_write_credit_count_reg(struct dlb_hw *hw,
+ u32 pool_id)
+{
+ union dlb_chp_dir_pool_crd_cnt r0 = { {0} };
+ struct dlb_credit_pool *pool;
+
+ pool = &hw->rsrcs.dir_credit_pools[pool_id];
+
+ r0.field.count = pool->avail_credits;
+
+ DLB_CSR_WR(hw,
+ DLB_CHP_DIR_POOL_CRD_CNT(pool->id),
+ r0.val);
+}
+
+/**
+ * dlb_hw_start_domain() - Lock the domain configuration
+ * @hw: Contains the current state of the DLB hardware.
+ * @args: User-provided arguments.
+ * @resp: Response to user.
+ *
+ * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
+ * satisfy a request, resp->status will be set accordingly.
+ */
+int dlb_hw_start_domain(struct dlb_hw *hw,
+ u32 domain_id,
+ struct dlb_start_domain_args *arg,
+ struct dlb_cmd_response *resp)
+{
+ struct dlb_list_entry *iter;
+ struct dlb_dir_pq_pair *dir_queue;
+ struct dlb_ldb_queue *ldb_queue;
+ struct dlb_credit_pool *pool;
+ struct dlb_domain *domain;
+ RTE_SET_USED(arg);
+ RTE_SET_USED(iter);
+
+ dlb_log_start_domain(hw, domain_id);
+
+ if (dlb_verify_start_domain_args(hw, domain_id, resp))
+ return -EINVAL;
+
+ domain = dlb_get_domain_from_id(hw, domain_id);
+ if (domain == NULL) {
+ DLB_HW_ERR(hw,
+ "[%s():%d] Internal error: domain not found\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ /* Write the domain's pool credit counts, which have been updated
+ * during port configuration. The sum of the pool credit count plus
+ * each producer port's credit count must equal the pool's credit
+ * allocation *before* traffic is sent.
+ */
+ DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter)
+ dlb_ldb_pool_write_credit_count_reg(hw, pool->id);
+
+ DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter)
+ dlb_dir_pool_write_credit_count_reg(hw, pool->id);
+
+ /* Enable load-balanced and directed queue write permissions for the
+ * queues this domain owns. Without this, the DLB will drop all
+ * incoming traffic to those queues.
+ */
+ DLB_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
+ union dlb_sys_ldb_vasqid_v r0 = { {0} };
+ unsigned int offs;
+
+ r0.field.vasqid_v = 1;
+
+ offs = domain->id * DLB_MAX_NUM_LDB_QUEUES + ldb_queue->id;
+
+ DLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(offs), r0.val);
+ }
+
+ DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
+ union dlb_sys_dir_vasqid_v r0 = { {0} };
+ unsigned int offs;
+
+ r0.field.vasqid_v = 1;
+
+ offs = domain->id * DLB_MAX_NUM_DIR_PORTS + dir_queue->id;
+
+ DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(offs), r0.val);
+ }
+
+ dlb_flush_csr(hw);
+
+ domain->started = true;
+
+ resp->status = 0;
+
+ return 0;
+}