char name[RTE_MEMPOOL_NAMESIZE];
uint32_t cache_size, nb_req;
unsigned int req_size;
+ uint32_t nb_desc_min;
+
+ /*
+ * Update CPT FC threshold. Decrement by hardware burst size to allow
+ * simultaneous enqueue from all available cores.
+ */
+ if (roc_model_is_cn10k())
+ nb_desc_min = rte_lcore_count() * 32;
+ else
+ nb_desc_min = rte_lcore_count() * 2;
+
+ if (qp->lmtline.fc_thresh < nb_desc_min) {
+ plt_err("CPT queue depth not sufficient to allow enqueueing from %d cores",
+ rte_lcore_count());
+ return -ENOSPC;
+ }
+
+ qp->lmtline.fc_thresh -= nb_desc_min;
snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u",
cdev->data->dev_id, qp->lf.lf_id);
static int
crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
{
+ int ret;
+
rte_mempool_free(qp->ca.req_mp);
qp->ca.enabled = false;
+ ret = roc_cpt_lmtline_init(qp->lf.roc_cpt, &qp->lmtline, qp->lf.lf_id);
+ if (ret < 0) {
+ plt_err("Could not reset lmtline for queue pair %d",
+ qp->lf.lf_id);
+ return ret;
+ }
+
return 0;
}
plt_sso_dbg();
for (i = 0; i < dev->qos_queue_cnt; i++) {
- qos->hwgrp = dev->qos_parse_data[i].queue;
- qos->iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
- qos->taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
- qos->xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
+ qos[i].hwgrp = dev->qos_parse_data[i].queue;
+ qos[i].iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
+ qos[i].taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
+ qos[i].xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
}
rc = roc_sso_hwgrp_qos_config(&dev->sso, qos, dev->qos_queue_cnt,
dev->xae_cnt);