return -rte_errno;
}
+/**
+ * Destroy DevX Queue Pair.
+ *
+ * @param[in] qp
+ * DevX QP to destroy.
+ */
+void
+mlx5_devx_qp_destroy(struct mlx5_devx_qp *qp)
+{
+ if (qp->qp)
+ claim_zero(mlx5_devx_cmd_destroy(qp->qp));
+ if (qp->umem_obj)
+ claim_zero(mlx5_os_umem_dereg(qp->umem_obj));
+ if (qp->umem_buf)
+ mlx5_free((void *)(uintptr_t)qp->umem_buf);
+}
+
+/**
+ * Create Queue Pair using DevX API.
+ *
+ * Get a pointer to partially initialized attributes structure, and updates the
+ * following fields:
+ * wq_umem_id
+ * wq_umem_offset
+ * dbr_umem_valid
+ * dbr_umem_id
+ * dbr_address
+ * log_page_size
+ * All other fields are updated by caller.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param[in/out] qp_obj
+ * Pointer to QP to create.
+ * @param[in] log_wqbb_n
+ * Log of number of WQBBs in queue.
+ * @param[in] attr
+ * Pointer to QP attributes structure.
+ * @param[in] socket
+ * Socket to use for allocation.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_devx_qp_create(void *ctx, struct mlx5_devx_qp *qp_obj, uint16_t log_wqbb_n,
+ struct mlx5_devx_qp_attr *attr, int socket)
+{
+ struct mlx5_devx_obj *qp = NULL;
+ struct mlx5dv_devx_umem *umem_obj = NULL;
+ void *umem_buf = NULL;
+ size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
+ uint32_t umem_size, umem_dbrec;
+ uint16_t qp_size = 1 << log_wqbb_n;
+ int ret;
+
+ if (alignment == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get WQE buf alignment.");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ /* Allocate memory buffer for WQEs and doorbell record. */
+ umem_size = MLX5_WQE_SIZE * qp_size;
+ umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
+ umem_size += MLX5_DBR_SIZE;
+ umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
+ alignment, socket);
+ if (!umem_buf) {
+ DRV_LOG(ERR, "Failed to allocate memory for QP.");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ /* Register allocated buffer in user space with DevX. */
+ umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (!umem_obj) {
+ DRV_LOG(ERR, "Failed to register umem for QP.");
+ rte_errno = errno;
+ goto error;
+ }
+ /* Fill attributes for SQ object creation. */
+ attr->wq_umem_id = mlx5_os_get_umem_id(umem_obj);
+ attr->wq_umem_offset = 0;
+ attr->dbr_umem_valid = 1;
+ attr->dbr_umem_id = attr->wq_umem_id;
+ attr->dbr_address = umem_dbrec;
+ attr->log_page_size = MLX5_LOG_PAGE_SIZE;
+ /* Create send queue object with DevX. */
+ qp = mlx5_devx_cmd_create_qp(ctx, attr);
+ if (!qp) {
+ DRV_LOG(ERR, "Can't create DevX QP object.");
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ qp_obj->umem_buf = umem_buf;
+ qp_obj->umem_obj = umem_obj;
+ qp_obj->qp = qp;
+ qp_obj->db_rec = RTE_PTR_ADD(qp_obj->umem_buf, umem_dbrec);
+ return 0;
+error:
+ ret = rte_errno;
+ if (umem_obj)
+ claim_zero(mlx5_os_umem_dereg(umem_obj));
+ if (umem_buf)
+ mlx5_free((void *)(uintptr_t)umem_buf);
+ rte_errno = ret;
+ return -rte_errno;
+}
+
/**
* Destroy DevX Receive Queue.
*
return -rte_errno;
}
+
+/**
+ * Change QP state to RTS.
+ *
+ * @param[in] qp
+ * DevX QP to change.
+ * @param[in] remote_qp_id
+ * The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_devx_qp2rts(struct mlx5_devx_qp *qp, uint32_t remote_qp_id)
+{
+ if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_RST2INIT_QP,
+ remote_qp_id)) {
+ DRV_LOG(ERR, "Failed to modify QP to INIT state(%u).",
+ rte_errno);
+ return -1;
+ }
+ if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_INIT2RTR_QP,
+ remote_qp_id)) {
+ DRV_LOG(ERR, "Failed to modify QP to RTR state(%u).",
+ rte_errno);
+ return -1;
+ }
+ if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_RTR2RTS_QP,
+ remote_qp_id)) {
+ DRV_LOG(ERR, "Failed to modify QP to RTS state(%u).",
+ rte_errno);
+ return -1;
+ }
+ return 0;
+}
volatile uint32_t *db_rec; /* The SQ doorbell record. */
};
+/* DevX Queue Pair structure. */
+struct mlx5_devx_qp {
+ struct mlx5_devx_obj *qp; /* The QP DevX object. */
+ void *umem_obj; /* The QP umem object. */
+ union {
+ void *umem_buf;
+ struct mlx5_wqe *wqes; /* The QP ring buffer. */
+ struct mlx5_aso_wqe *aso_wqes;
+ };
+ volatile uint32_t *db_rec; /* The QP doorbell record. */
+};
+
/* DevX Receive Queue structure. */
struct mlx5_devx_rq {
struct mlx5_devx_obj *rq; /* The RQ DevX object. */
uint16_t log_wqbb_n,
struct mlx5_devx_create_sq_attr *attr, int socket);
+__rte_internal
+void mlx5_devx_qp_destroy(struct mlx5_devx_qp *qp);
+
+__rte_internal
+int mlx5_devx_qp_create(void *ctx, struct mlx5_devx_qp *qp_obj,
+ uint16_t log_wqbb_n,
+ struct mlx5_devx_qp_attr *attr, int socket);
+
__rte_internal
void mlx5_devx_rq_destroy(struct mlx5_devx_rq *rq);
uint32_t wqe_size, uint16_t log_wqbb_n,
struct mlx5_devx_create_rq_attr *attr, int socket);
+__rte_internal
+int mlx5_devx_qp2rts(struct mlx5_devx_qp *qp, uint32_t remote_qp_id);
+
#endif /* RTE_PMD_MLX5_COMMON_DEVX_H_ */
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pd, attr->pd);
MLX5_SET(qpc, qpc, ts_format, attr->ts_format);
+ MLX5_SET(qpc, qpc, user_index, attr->user_index);
if (attr->uar_index) {
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
MLX5_SET(qpc, qpc, uar_page, attr->uar_index);
uint64_t dbr_address;
uint32_t wq_umem_id;
uint64_t wq_umem_offset;
+ uint32_t user_index:24;
};
struct mlx5_devx_virtio_q_couners_attr {
mlx5_devx_get_out_command_status;
+ mlx5_devx_qp2rts;
+ mlx5_devx_qp_create;
+ mlx5_devx_qp_destroy;
mlx5_devx_rq_create;
mlx5_devx_rq_destroy;
mlx5_devx_sq_create;
{
if (qp == NULL)
return;
- if (qp->qp_obj != NULL)
- claim_zero(mlx5_devx_cmd_destroy(qp->qp_obj));
- if (qp->umem_obj != NULL)
- claim_zero(mlx5_glue->devx_umem_dereg(qp->umem_obj));
- if (qp->umem_buf != NULL)
- rte_free(qp->umem_buf);
+ mlx5_devx_qp_destroy(&qp->qp_obj);
mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
mlx5_devx_cq_destroy(&qp->cq_obj);
rte_free(qp);
return 0;
}
-static int
-mlx5_crypto_qp2rts(struct mlx5_crypto_qp *qp)
-{
- /*
- * In Order to configure self loopback, when calling these functions the
- * remote QP id that is used is the id of the same QP.
- */
- if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_RST2INIT_QP,
- qp->qp_obj->id)) {
- DRV_LOG(ERR, "Failed to modify QP to INIT state(%u).",
- rte_errno);
- return -1;
- }
- if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_INIT2RTR_QP,
- qp->qp_obj->id)) {
- DRV_LOG(ERR, "Failed to modify QP to RTR state(%u).",
- rte_errno);
- return -1;
- }
- if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_RTR2RTS_QP,
- qp->qp_obj->id)) {
- DRV_LOG(ERR, "Failed to modify QP to RTS state(%u).",
- rte_errno);
- return -1;
- }
- return 0;
-}
-
static __rte_noinline uint32_t
mlx5_crypto_get_block_size(struct rte_crypto_op *op)
{
memcpy(klms, &umr->kseg[0], sizeof(*klms) * klm_n);
}
ds = 2 + klm_n;
- cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) | ds);
+ cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds);
cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |
MLX5_OPCODE_RDMA_WRITE);
ds = RTE_ALIGN(ds, 4);
if (priv->max_rdmar_ds > ds) {
cseg += ds;
ds = priv->max_rdmar_ds - ds;
- cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) | ds);
+ cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds);
cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |
MLX5_OPCODE_NOP);
qp->db_pi += ds >> 2; /* Here, DS is 4 aligned for sure. */
do {
idx = qp->pi & mask;
op = *ops++;
- umr = RTE_PTR_ADD(qp->umem_buf, priv->wqe_set_size * idx);
+ umr = RTE_PTR_ADD(qp->qp_obj.umem_buf,
+ priv->wqe_set_size * idx);
if (unlikely(mlx5_crypto_wqe_set(priv, qp, op, umr) == 0)) {
qp->stats.enqueue_err_count++;
if (remain != nb_ops) {
} while (--remain);
qp->stats.enqueued_count += nb_ops;
rte_io_wmb();
- qp->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->db_pi);
+ qp->qp_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->db_pi);
rte_wmb();
mlx5_crypto_uar_write(*(volatile uint64_t *)qp->wqe, qp->priv);
rte_wmb();
uint32_t i;
for (i = 0 ; i < qp->entries_n; i++) {
- struct mlx5_wqe_cseg *cseg = RTE_PTR_ADD(qp->umem_buf, i *
- priv->wqe_set_size);
+ struct mlx5_wqe_cseg *cseg = RTE_PTR_ADD(qp->qp_obj.umem_buf,
+ i * priv->wqe_set_size);
struct mlx5_wqe_umr_cseg *ucseg = (struct mlx5_wqe_umr_cseg *)
(cseg + 1);
struct mlx5_wqe_umr_bsf_seg *bsf =
struct mlx5_wqe_rseg *rseg;
/* Init UMR WQE. */
- cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) |
+ cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) |
(priv->umr_wqe_size / MLX5_WSEG_SIZE));
cseg->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
MLX5_COMP_MODE_OFFSET);
.klm_num = RTE_ALIGN(priv->max_segs_num, 4),
};
- for (umr = (struct mlx5_umr_wqe *)qp->umem_buf, i = 0;
+ for (umr = (struct mlx5_umr_wqe *)qp->qp_obj.umem_buf, i = 0;
i < qp->entries_n; i++, umr = RTE_PTR_ADD(umr, priv->wqe_set_size)) {
attr.klm_array = (struct mlx5_klm *)&umr->kseg[0];
qp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->ctx, &attr);
struct mlx5_devx_qp_attr attr = {0};
struct mlx5_crypto_qp *qp;
uint16_t log_nb_desc = rte_log2_u32(qp_conf->nb_descriptors);
- uint32_t umem_size = RTE_BIT32(log_nb_desc) *
- priv->wqe_set_size +
- sizeof(*qp->db_rec) * 2;
+ uint32_t ret;
uint32_t alloc_size = sizeof(*qp);
struct mlx5_devx_cq_attr cq_attr = {
.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar),
DRV_LOG(ERR, "Failed to create CQ.");
goto error;
}
- qp->umem_buf = rte_zmalloc_socket(__func__, umem_size, 4096, socket_id);
- if (qp->umem_buf == NULL) {
- DRV_LOG(ERR, "Failed to allocate QP umem.");
- rte_errno = ENOMEM;
- goto error;
- }
- qp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
- (void *)(uintptr_t)qp->umem_buf,
- umem_size,
- IBV_ACCESS_LOCAL_WRITE);
- if (qp->umem_obj == NULL) {
- DRV_LOG(ERR, "Failed to register QP umem.");
+ attr.pd = priv->pdn;
+ attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar);
+ attr.cqn = qp->cq_obj.cq->id;
+ attr.rq_size = 0;
+ attr.sq_size = RTE_BIT32(log_nb_desc);
+ attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
+ ret = mlx5_devx_qp_create(priv->ctx, &qp->qp_obj, log_nb_desc, &attr,
+ socket_id);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to create QP.");
goto error;
}
if (mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
goto error;
}
qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen;
- attr.pd = priv->pdn;
- attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar);
- attr.cqn = qp->cq_obj.cq->id;
- attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
- attr.rq_size = 0;
- attr.sq_size = RTE_BIT32(log_nb_desc);
- attr.dbr_umem_valid = 1;
- attr.wq_umem_id = qp->umem_obj->umem_id;
- attr.wq_umem_offset = 0;
- attr.dbr_umem_id = qp->umem_obj->umem_id;
- attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
- attr.dbr_address = RTE_BIT64(log_nb_desc) * priv->wqe_set_size;
- qp->qp_obj = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
- if (qp->qp_obj == NULL) {
- DRV_LOG(ERR, "Failed to create QP(%u).", rte_errno);
- goto error;
- }
- qp->db_rec = RTE_PTR_ADD(qp->umem_buf, (uintptr_t)attr.dbr_address);
- if (mlx5_crypto_qp2rts(qp))
+ /*
+ * In Order to configure self loopback, when calling devx qp2rts the
+ * remote QP id that is used is the id of the same QP.
+ */
+ if (mlx5_devx_qp2rts(&qp->qp_obj, qp->qp_obj.qp->id))
goto error;
qp->mkey = (struct mlx5_devx_obj **)RTE_ALIGN((uintptr_t)(qp + 1),
RTE_CACHE_LINE_SIZE);
struct mlx5_crypto_qp {
struct mlx5_crypto_priv *priv;
struct mlx5_devx_cq cq_obj;
- struct mlx5_devx_obj *qp_obj;
+ struct mlx5_devx_qp qp_obj;
struct rte_cryptodev_stats stats;
- struct mlx5dv_devx_umem *umem_obj;
- void *umem_buf;
- volatile uint32_t *db_rec;
struct rte_crypto_op **ops;
struct mlx5_devx_obj **mkey; /* WQE's indirect mekys. */
struct mlx5_mr_ctrl mr_ctrl;
struct mlx5_vdpa_event_qp {
struct mlx5_vdpa_cq cq;
struct mlx5_devx_obj *fw_qp;
- struct mlx5_devx_obj *sw_qp;
- struct mlx5dv_devx_umem *umem_obj;
- void *umem_buf;
- volatile uint32_t *db_rec;
+ struct mlx5_devx_qp sw_qp;
};
struct mlx5_vdpa_query_mr {
cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
rte_io_wmb();
/* Ring SW QP doorbell record. */
- eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
+ eqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
}
return comp;
}
void
mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
{
- if (eqp->sw_qp)
- claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp));
- if (eqp->umem_obj)
- claim_zero(mlx5_glue->devx_umem_dereg(eqp->umem_obj));
- if (eqp->umem_buf)
- rte_free(eqp->umem_buf);
+ mlx5_devx_qp_destroy(&eqp->sw_qp);
if (eqp->fw_qp)
claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
mlx5_vdpa_cq_destroy(&eqp->cq);
mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
{
if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
- eqp->sw_qp->id)) {
+ eqp->sw_qp.qp->id)) {
DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
rte_errno);
return -1;
}
- if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RST2INIT_QP,
- eqp->fw_qp->id)) {
+ if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
+ MLX5_CMD_OP_RST2INIT_QP, eqp->fw_qp->id)) {
DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
rte_errno);
return -1;
}
if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
- eqp->sw_qp->id)) {
+ eqp->sw_qp.qp->id)) {
DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
rte_errno);
return -1;
}
- if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_INIT2RTR_QP,
- eqp->fw_qp->id)) {
+ if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
+ MLX5_CMD_OP_INIT2RTR_QP, eqp->fw_qp->id)) {
DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
rte_errno);
return -1;
}
if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
- eqp->sw_qp->id)) {
+ eqp->sw_qp.qp->id)) {
DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
rte_errno);
return -1;
}
- if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RTR2RTS_QP,
+ if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_RTR2RTS_QP,
eqp->fw_qp->id)) {
DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
rte_errno);
{
struct mlx5_devx_qp_attr attr = {0};
uint16_t log_desc_n = rte_log2_u32(desc_n);
- uint32_t umem_size = (1 << log_desc_n) * MLX5_WSEG_SIZE +
- sizeof(*eqp->db_rec) * 2;
+ uint32_t ret;
if (mlx5_vdpa_event_qp_global_prepare(priv))
return -1;
DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
goto error;
}
- eqp->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
- if (!eqp->umem_buf) {
- DRV_LOG(ERR, "Failed to allocate memory for SW QP.");
- rte_errno = ENOMEM;
- goto error;
- }
- eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
- (void *)(uintptr_t)eqp->umem_buf,
- umem_size,
- IBV_ACCESS_LOCAL_WRITE);
- if (!eqp->umem_obj) {
- DRV_LOG(ERR, "Failed to register umem for SW QP.");
- goto error;
- }
attr.uar_index = priv->uar->page_id;
attr.cqn = eqp->cq.cq_obj.cq->id;
- attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
- attr.rq_size = 1 << log_desc_n;
+ attr.rq_size = RTE_BIT32(log_desc_n);
attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
attr.sq_size = 0; /* No need SQ. */
- attr.dbr_umem_valid = 1;
- attr.wq_umem_id = eqp->umem_obj->umem_id;
- attr.wq_umem_offset = 0;
- attr.dbr_umem_id = eqp->umem_obj->umem_id;
attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
- attr.dbr_address = RTE_BIT64(log_desc_n) * MLX5_WSEG_SIZE;
- eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
- if (!eqp->sw_qp) {
+ ret = mlx5_devx_qp_create(priv->ctx, &(eqp->sw_qp), log_desc_n, &attr,
+ SOCKET_ID_ANY);
+ if (ret) {
DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
goto error;
}
- eqp->db_rec = RTE_PTR_ADD(eqp->umem_buf, (uintptr_t)attr.dbr_address);
if (mlx5_vdpa_qps2rts(eqp))
goto error;
/* First ringing. */
- rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->db_rec[0]);
+ rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
+ &eqp->sw_qp.db_rec[0]);
return 0;
error:
mlx5_vdpa_event_qp_destroy(eqp);