return i;
}
+static int16_t
+dlb2_hw_unmap_ldb_qid_from_port(struct dlb2_hw_dev *handle,
+ uint32_t qm_port_id,
+ uint16_t qm_qid)
+{
+ struct dlb2_unmap_qid_args cfg;
+ int32_t ret;
+
+ if (handle == NULL)
+ return -EINVAL;
+
+ cfg.port_id = qm_port_id;
+ cfg.qid = qm_qid;
+
+ ret = dlb2_iface_unmap_qid(handle, &cfg);
+ if (ret < 0)
+ DLB2_LOG_ERR("dlb2: unmap qid error, ret=%d (driver status: %s)\n",
+ ret, dlb2_error_strings[cfg.response.status]);
+
+ return ret;
+}
+
+static int
+dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2,
+ struct dlb2_eventdev_port *ev_port,
+ struct dlb2_eventdev_queue *ev_queue)
+{
+ int ret, i;
+
+ /* Don't unlink until start time. */
+ if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
+ return 0;
+
+ for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+ if (ev_port->link[i].valid &&
+ ev_port->link[i].queue_id == ev_queue->id)
+ break; /* found */
+ }
+
+ /* This is expected with eventdev API!
+ * It blindly attemmpts to unmap all queues.
+ */
+ if (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
+ DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.\n",
+ ev_queue->qm_queue.id,
+ ev_port->qm_port.id);
+ return 0;
+ }
+
+ ret = dlb2_hw_unmap_ldb_qid_from_port(&dlb2->qm_instance,
+ ev_port->qm_port.id,
+ ev_queue->qm_queue.id);
+ if (!ret)
+ ev_port->link[i].mapped = false;
+
+ return ret;
+}
+
+static int
+dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
+ uint8_t queues[], uint16_t nb_unlinks)
+{
+ struct dlb2_eventdev_port *ev_port = event_port;
+ struct dlb2_eventdev *dlb2;
+ int i;
+
+ RTE_SET_USED(dev);
+
+ if (!ev_port->setup_done) {
+ DLB2_LOG_ERR("dlb2: evport %d is not configured\n",
+ ev_port->id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ if (queues == NULL || nb_unlinks == 0) {
+ DLB2_LOG_DBG("dlb2: queues is NULL or nb_unlinks is 0\n");
+ return 0; /* Ignore and return success */
+ }
+
+ /* FIXME: How to handle unlink on a directed port? */
+ if (ev_port->qm_port.is_directed) {
+ DLB2_LOG_DBG("dlb2: ignore unlink from dir port %d\n",
+ ev_port->id);
+ rte_errno = 0;
+ return nb_unlinks; /* as if success */
+ }
+
+ dlb2 = ev_port->dlb2;
+
+ for (i = 0; i < nb_unlinks; i++) {
+ struct dlb2_eventdev_queue *ev_queue;
+ int ret, j;
+
+ if (queues[i] >= dlb2->num_queues) {
+ DLB2_LOG_ERR("dlb2: invalid queue id %d\n", queues[i]);
+ rte_errno = -EINVAL;
+ return i; /* return index of offending queue */
+ }
+
+ ev_queue = &dlb2->ev_queues[queues[i]];
+
+ /* Does a link exist? */
+ for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
+ if (ev_port->link[j].queue_id == queues[i] &&
+ ev_port->link[j].valid)
+ break;
+
+ if (j == DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
+ continue;
+
+ ret = dlb2_event_queue_detach_ldb(dlb2, ev_port, ev_queue);
+ if (ret) {
+ DLB2_LOG_ERR("unlink err=%d for port %d queue %d\n",
+ ret, ev_port->id, queues[i]);
+ rte_errno = -ENOENT;
+ return i; /* return index of offending queue */
+ }
+
+ ev_port->link[j].valid = false;
+ ev_port->num_links--;
+ ev_queue->num_links--;
+ }
+
+ return nb_unlinks;
+}
+
+static int
+dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
+ void *event_port)
+{
+ struct dlb2_eventdev_port *ev_port = event_port;
+ struct dlb2_eventdev *dlb2;
+ struct dlb2_hw_dev *handle;
+ struct dlb2_pending_port_unmaps_args cfg;
+ int ret;
+
+ RTE_SET_USED(dev);
+
+ if (!ev_port->setup_done) {
+ DLB2_LOG_ERR("dlb2: evport %d is not configured\n",
+ ev_port->id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ cfg.port_id = ev_port->qm_port.id;
+ dlb2 = ev_port->dlb2;
+ handle = &dlb2->qm_instance;
+ ret = dlb2_iface_pending_port_unmaps(handle, &cfg);
+
+ if (ret < 0) {
+ DLB2_LOG_ERR("dlb2: num_unlinks_in_progress ret=%d (driver status: %s)\n",
+ ret, dlb2_error_strings[cfg.response.status]);
+ return ret;
+ }
+
+ return cfg.response.id;
+}
+
static void
dlb2_entry_points_init(struct rte_eventdev *dev)
{
.port_def_conf = dlb2_eventdev_port_default_conf_get,
.port_setup = dlb2_eventdev_port_setup,
.port_link = dlb2_eventdev_port_link,
+ .port_unlink = dlb2_eventdev_port_unlink,
+ .port_unlinks_in_progress =
+ dlb2_eventdev_port_unlinks_in_progress,
.dump = dlb2_eventdev_dump,
.xstats_get = dlb2_eventdev_xstats_get,
.xstats_get_names = dlb2_eventdev_xstats_get_names,
return 0;
}
+
+static void dlb2_log_unmap_qid(struct dlb2_hw *hw,
+ u32 domain_id,
+ struct dlb2_unmap_qid_args *args,
+ bool vdev_req,
+ unsigned int vdev_id)
+{
+ DLB2_HW_DBG(hw, "DLB2 unmap QID arguments:\n");
+ if (vdev_req)
+ DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
+ DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
+ domain_id);
+ DLB2_HW_DBG(hw, "\tPort ID: %d\n",
+ args->port_id);
+ DLB2_HW_DBG(hw, "\tQueue ID: %d\n",
+ args->qid);
+ if (args->qid < DLB2_MAX_NUM_LDB_QUEUES)
+ DLB2_HW_DBG(hw, "\tQueue's num mappings: %d\n",
+ hw->rsrcs.ldb_queues[args->qid].num_mappings);
+}
+
+static int dlb2_verify_unmap_qid_args(struct dlb2_hw *hw,
+ u32 domain_id,
+ struct dlb2_unmap_qid_args *args,
+ struct dlb2_cmd_response *resp,
+ bool vdev_req,
+ unsigned int vdev_id)
+{
+ enum dlb2_qid_map_state state;
+ struct dlb2_hw_domain *domain;
+ struct dlb2_ldb_queue *queue;
+ struct dlb2_ldb_port *port;
+ int slot;
+ int id;
+
+ domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
+
+ if (domain == NULL) {
+ resp->status = DLB2_ST_INVALID_DOMAIN_ID;
+ return -EINVAL;
+ }
+
+ if (!domain->configured) {
+ resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
+ return -EINVAL;
+ }
+
+ id = args->port_id;
+
+ port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
+
+ if (port == NULL || !port->configured) {
+ resp->status = DLB2_ST_INVALID_PORT_ID;
+ return -EINVAL;
+ }
+
+ if (port->domain_id.phys_id != domain->id.phys_id) {
+ resp->status = DLB2_ST_INVALID_PORT_ID;
+ return -EINVAL;
+ }
+
+ queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
+
+ if (queue == NULL || !queue->configured) {
+ DLB2_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
+ __func__, args->qid);
+ resp->status = DLB2_ST_INVALID_QID;
+ return -EINVAL;
+ }
+
+ /*
+ * Verify that the port has the queue mapped. From the application's
+ * perspective a queue is mapped if it is actually mapped, the map is
+ * in progress, or the map is blocked pending an unmap.
+ */
+ state = DLB2_QUEUE_MAPPED;
+ if (dlb2_port_find_slot_queue(port, state, queue, &slot))
+ return 0;
+
+ state = DLB2_QUEUE_MAP_IN_PROG;
+ if (dlb2_port_find_slot_queue(port, state, queue, &slot))
+ return 0;
+
+ if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &slot))
+ return 0;
+
+ resp->status = DLB2_ST_INVALID_QID;
+ return -EINVAL;
+}
+
+int dlb2_hw_unmap_qid(struct dlb2_hw *hw,
+ u32 domain_id,
+ struct dlb2_unmap_qid_args *args,
+ struct dlb2_cmd_response *resp,
+ bool vdev_req,
+ unsigned int vdev_id)
+{
+ struct dlb2_hw_domain *domain;
+ struct dlb2_ldb_queue *queue;
+ enum dlb2_qid_map_state st;
+ struct dlb2_ldb_port *port;
+ bool unmap_complete;
+ int i, ret, id;
+
+ dlb2_log_unmap_qid(hw, domain_id, args, vdev_req, vdev_id);
+
+ /*
+ * Verify that hardware resources are available before attempting to
+ * satisfy the request. This simplifies the error unwinding code.
+ */
+ ret = dlb2_verify_unmap_qid_args(hw,
+ domain_id,
+ args,
+ resp,
+ vdev_req,
+ vdev_id);
+ if (ret)
+ return ret;
+
+ domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
+ if (domain == NULL) {
+ DLB2_HW_ERR(hw,
+ "[%s():%d] Internal error: domain not found\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ id = args->port_id;
+
+ port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
+ if (port == NULL) {
+ DLB2_HW_ERR(hw,
+ "[%s():%d] Internal error: port not found\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
+ if (queue == NULL) {
+ DLB2_HW_ERR(hw,
+ "[%s():%d] Internal error: queue not found\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ /*
+ * If the queue hasn't been mapped yet, we need to update the slot's
+ * state and re-enable the queue's inflights.
+ */
+ st = DLB2_QUEUE_MAP_IN_PROG;
+ if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
+ if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
+ DLB2_HW_ERR(hw,
+ "[%s():%d] Internal error: port slot tracking failed\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ /*
+ * Since the in-progress map was aborted, re-enable the QID's
+ * inflights.
+ */
+ if (queue->num_pending_additions == 0)
+ dlb2_ldb_queue_set_inflight_limit(hw, queue);
+
+ st = DLB2_QUEUE_UNMAPPED;
+ ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
+ if (ret)
+ return ret;
+
+ goto unmap_qid_done;
+ }
+
+ /*
+ * If the queue mapping is on hold pending an unmap, we simply need to
+ * update the slot's state.
+ */
+ if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
+ if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
+ DLB2_HW_ERR(hw,
+ "[%s():%d] Internal error: port slot tracking failed\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ st = DLB2_QUEUE_UNMAP_IN_PROG;
+ ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
+ if (ret)
+ return ret;
+
+ goto unmap_qid_done;
+ }
+
+ st = DLB2_QUEUE_MAPPED;
+ if (!dlb2_port_find_slot_queue(port, st, queue, &i)) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: no available CQ slots\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
+ DLB2_HW_ERR(hw,
+ "[%s():%d] Internal error: port slot tracking failed\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ /*
+ * QID->CQ mapping removal is an asynchronous procedure. It requires
+ * stopping the DLB2 from scheduling this CQ, draining all inflights
+ * from the CQ, then unmapping the queue from the CQ. This function
+ * simply marks the port as needing the queue unmapped, and (if
+ * necessary) starts the unmapping worker thread.
+ */
+ dlb2_ldb_port_cq_disable(hw, port);
+
+ st = DLB2_QUEUE_UNMAP_IN_PROG;
+ ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
+ if (ret)
+ return ret;
+
+ /*
+ * Attempt to finish the unmapping now, in case the port has no
+ * outstanding inflights. If that's not the case, this will fail and
+ * the unmapping will be completed at a later time.
+ */
+ unmap_complete = dlb2_domain_finish_unmap_port(hw, domain, port);
+
+ /*
+ * If the unmapping couldn't complete immediately, launch the worker
+ * thread (if it isn't already launched) to finish it later.
+ */
+ if (!unmap_complete && !os_worker_active(hw))
+ os_schedule_work(hw);
+
+unmap_qid_done:
+ resp->status = 0;
+
+ return 0;
+}
+
+static void
+dlb2_log_pending_port_unmaps_args(struct dlb2_hw *hw,
+ struct dlb2_pending_port_unmaps_args *args,
+ bool vdev_req,
+ unsigned int vdev_id)
+{
+ DLB2_HW_DBG(hw, "DLB unmaps in progress arguments:\n");
+ if (vdev_req)
+ DLB2_HW_DBG(hw, "(Request from VF %d)\n", vdev_id);
+ DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id);
+}
+
+int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw,
+ u32 domain_id,
+ struct dlb2_pending_port_unmaps_args *args,
+ struct dlb2_cmd_response *resp,
+ bool vdev_req,
+ unsigned int vdev_id)
+{
+ struct dlb2_hw_domain *domain;
+ struct dlb2_ldb_port *port;
+
+ dlb2_log_pending_port_unmaps_args(hw, args, vdev_req, vdev_id);
+
+ domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
+
+ if (domain == NULL) {
+ resp->status = DLB2_ST_INVALID_DOMAIN_ID;
+ return -EINVAL;
+ }
+
+ port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain);
+ if (port == NULL || !port->configured) {
+ resp->status = DLB2_ST_INVALID_PORT_ID;
+ return -EINVAL;
+ }
+
+ resp->id = port->num_pending_removals;
+
+ return 0;
+}