#include "roc_api.h"
#include "roc_priv.h"
+#define SSO_XAQ_CACHE_CNT (0x7)
+
/* Private functions. */
-static int
-sso_lf_alloc(struct roc_sso *roc_sso, enum sso_lf_type lf_type, uint16_t nb_lf,
+int
+sso_lf_alloc(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf,
void **rsp)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
int rc = -ENOSPC;
switch (lf_type) {
}
rc = mbox_process_msg(dev->mbox, rsp);
- if (rc < 0)
- return rc;
+ if (rc)
+ return -EIO;
return 0;
}
-static int
-sso_lf_free(struct roc_sso *roc_sso, enum sso_lf_type lf_type, uint16_t nb_lf)
+int
+sso_lf_free(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
int rc = -ENOSPC;
switch (lf_type) {
}
rc = mbox_process(dev->mbox);
- if (rc < 0)
- return rc;
+ if (rc)
+ return -EIO;
return 0;
}
}
req->modify = true;
- if (mbox_process(dev->mbox) < 0)
+ if (mbox_process(dev->mbox))
return -EIO;
return 0;
}
req->partial = true;
- if (mbox_process(dev->mbox) < 0)
+ if (mbox_process(dev->mbox))
return -EIO;
return 0;
mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
rc = mbox_process_msg(dev->mbox, (void **)&rsrc_cnt);
- if (rc < 0) {
+ if (rc) {
plt_err("Failed to get free resource count\n");
- return rc;
+ return -EIO;
}
roc_sso->max_hwgrp = rsrc_cnt->sso;
return 0;
}
-static void
+void
sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp,
uint16_t hwgrp[], uint16_t n, uint16_t enable)
{
k = k ? k : 4;
for (j = 0; j < k; j++) {
mask[j] = hwgrp[i + j] | enable << 14;
- enable ? plt_bitmap_set(bmp, hwgrp[i + j]) :
- plt_bitmap_clear(bmp, hwgrp[i + j]);
+ if (bmp) {
+ enable ? plt_bitmap_set(bmp, hwgrp[i + j]) :
+ plt_bitmap_clear(bmp, hwgrp[i + j]);
+ }
plt_sso_dbg("HWS %d Linked to HWGRP %d", hws,
hwgrp[i + j]);
}
mbox_alloc_msg_msix_offset(dev->mbox);
rc = mbox_process_msg(dev->mbox, (void **)&rsp);
- if (rc < 0)
- return rc;
+ if (rc)
+ return -EIO;
for (i = 0; i < nb_hws; i++)
sso->hws_msix_offset[i] = rsp->ssow_msixoff[i];
roc_sso_hws_stats_get(struct roc_sso *roc_sso, uint8_t hws,
struct roc_sso_hws_stats *stats)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_sso);
struct sso_hws_stats *req_rsp;
+ struct dev *dev = &sso->dev;
int rc;
+ plt_spinlock_lock(&sso->mbox_lock);
req_rsp = (struct sso_hws_stats *)mbox_alloc_msg_sso_hws_get_stats(
dev->mbox);
if (req_rsp == NULL) {
rc = mbox_process(dev->mbox);
- if (rc < 0)
- return rc;
+ if (rc) {
+ rc = -EIO;
+ goto fail;
+ }
req_rsp = (struct sso_hws_stats *)
mbox_alloc_msg_sso_hws_get_stats(dev->mbox);
- if (req_rsp == NULL)
- return -ENOSPC;
+ if (req_rsp == NULL) {
+ rc = -ENOSPC;
+ goto fail;
+ }
}
req_rsp->hws = hws;
rc = mbox_process_msg(dev->mbox, (void **)&req_rsp);
- if (rc)
- return rc;
+ if (rc) {
+ rc = -EIO;
+ goto fail;
+ }
stats->arbitration = req_rsp->arbitration;
- return 0;
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
int
roc_sso_hwgrp_stats_get(struct roc_sso *roc_sso, uint8_t hwgrp,
struct roc_sso_hwgrp_stats *stats)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_sso);
struct sso_grp_stats *req_rsp;
+ struct dev *dev = &sso->dev;
int rc;
+ plt_spinlock_lock(&sso->mbox_lock);
req_rsp = (struct sso_grp_stats *)mbox_alloc_msg_sso_grp_get_stats(
dev->mbox);
if (req_rsp == NULL) {
rc = mbox_process(dev->mbox);
- if (rc < 0)
- return rc;
+ if (rc) {
+ rc = -EIO;
+ goto fail;
+ }
req_rsp = (struct sso_grp_stats *)
mbox_alloc_msg_sso_grp_get_stats(dev->mbox);
- if (req_rsp == NULL)
- return -ENOSPC;
+ if (req_rsp == NULL) {
+ rc = -ENOSPC;
+ goto fail;
+ }
}
req_rsp->grp = hwgrp;
rc = mbox_process_msg(dev->mbox, (void **)&req_rsp);
- if (rc)
- return rc;
+ if (rc) {
+ rc = -EIO;
+ goto fail;
+ }
stats->aw_status = req_rsp->aw_status;
stats->dq_pc = req_rsp->dq_pc;
stats->ts_pc = req_rsp->ts_pc;
stats->wa_pc = req_rsp->wa_pc;
stats->ws_pc = req_rsp->ws_pc;
- return 0;
+
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
int
roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos,
uint8_t nb_qos, uint32_t nb_xaq)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_sso);
+ struct dev *dev = &sso->dev;
struct sso_grp_qos_cfg *req;
int i, rc;
+ plt_spinlock_lock(&sso->mbox_lock);
for (i = 0; i < nb_qos; i++) {
uint8_t xaq_prcnt = qos[i].xaq_prcnt;
uint8_t iaq_prcnt = qos[i].iaq_prcnt;
req = mbox_alloc_msg_sso_grp_qos_config(dev->mbox);
if (req == NULL) {
rc = mbox_process(dev->mbox);
- if (rc < 0)
- return rc;
+ if (rc) {
+ rc = -EIO;
+ goto fail;
+ }
+
req = mbox_alloc_msg_sso_grp_qos_config(dev->mbox);
- if (req == NULL)
- return -ENOSPC;
+ if (req == NULL) {
+ rc = -ENOSPC;
+ goto fail;
+ }
}
req->grp = qos[i].hwgrp;
req->xaq_limit = (nb_xaq * (xaq_prcnt ? xaq_prcnt : 100)) / 100;
100;
}
- return mbox_process(dev->mbox);
+ rc = mbox_process(dev->mbox);
+ if (rc)
+ rc = -EIO;
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
int
-roc_sso_hwgrp_alloc_xaq(struct roc_sso *roc_sso, uint32_t npa_aura_id,
- uint16_t hwgrps)
+sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
+ uint32_t nb_xae, uint32_t xae_waes,
+ uint32_t xaq_buf_size, uint16_t nb_hwgrp)
+{
+ struct npa_pool_s pool;
+ struct npa_aura_s aura;
+ plt_iova_t iova;
+ uint32_t i;
+ int rc;
+
+ if (xaq->mem != NULL) {
+ rc = sso_hwgrp_release_xaq(dev, nb_hwgrp);
+ if (rc < 0) {
+ plt_err("Failed to release XAQ %d", rc);
+ return rc;
+ }
+ roc_npa_pool_destroy(xaq->aura_handle);
+ plt_free(xaq->fc);
+ plt_free(xaq->mem);
+ memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
+ }
+
+ xaq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN);
+ if (xaq->fc == NULL) {
+ plt_err("Failed to allocate XAQ FC");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ xaq->nb_xae = nb_xae;
+
+ /* Taken from HRM 14.3.3(4) */
+ xaq->nb_xaq = (SSO_XAQ_CACHE_CNT * nb_hwgrp);
+ xaq->nb_xaq += PLT_MAX(1 + ((xaq->nb_xae - 1) / xae_waes), xaq->nb_xaq);
+
+ xaq->mem = plt_zmalloc(xaq_buf_size * xaq->nb_xaq, xaq_buf_size);
+ if (xaq->mem == NULL) {
+ plt_err("Failed to allocate XAQ mem");
+ rc = -ENOMEM;
+ goto free_fc;
+ }
+
+ memset(&pool, 0, sizeof(struct npa_pool_s));
+ pool.nat_align = 1;
+
+ memset(&aura, 0, sizeof(aura));
+ aura.fc_ena = 1;
+ aura.fc_addr = (uint64_t)xaq->fc;
+ aura.fc_hyst_bits = 0; /* Store count on all updates */
+ rc = roc_npa_pool_create(&xaq->aura_handle, xaq_buf_size, xaq->nb_xaq,
+ &aura, &pool);
+ if (rc) {
+ plt_err("Failed to create XAQ pool");
+ goto npa_fail;
+ }
+
+ iova = (uint64_t)xaq->mem;
+ for (i = 0; i < xaq->nb_xaq; i++) {
+ roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
+ iova += xaq_buf_size;
+ }
+ roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
+
+ if (roc_npa_aura_op_available_wait(xaq->aura_handle, xaq->nb_xaq, 0) !=
+ xaq->nb_xaq) {
+ plt_err("Failed to free all pointers to the pool");
+ rc = -ENOMEM;
+ goto npa_fill_fail;
+ }
+
+ /* When SW does addwork (enqueue) check if there is space in XAQ by
+ * comparing fc_addr above against the xaq_lmt calculated below.
+ * There should be a minimum headroom of 7 XAQs per HWGRP for SSO
+ * to request XAQ to cache them even before enqueue is called.
+ */
+ xaq->xaq_lmt = xaq->nb_xaq - (nb_hwgrp * SSO_XAQ_CACHE_CNT);
+
+ return 0;
+npa_fill_fail:
+ roc_npa_pool_destroy(xaq->aura_handle);
+npa_fail:
+ plt_free(xaq->mem);
+free_fc:
+ plt_free(xaq->fc);
+fail:
+ memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
+ return rc;
+}
+
+int
+roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso, uint32_t nb_xae)
+{
+ struct sso *sso = roc_sso_to_sso_priv(roc_sso);
+ struct dev *dev = &sso->dev;
+ int rc;
+
+ plt_spinlock_lock(&sso->mbox_lock);
+ rc = sso_hwgrp_init_xaq_aura(dev, &roc_sso->xaq, nb_xae,
+ roc_sso->xae_waes, roc_sso->xaq_buf_size,
+ roc_sso->nb_hwgrp);
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
+}
+
+int
+sso_hwgrp_free_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
+ uint16_t nb_hwgrp)
+{
+ int rc;
+
+ if (xaq->mem != NULL) {
+ if (nb_hwgrp) {
+ rc = sso_hwgrp_release_xaq(dev, nb_hwgrp);
+ if (rc < 0) {
+ plt_err("Failed to release XAQ %d", rc);
+ return rc;
+ }
+ }
+ roc_npa_pool_destroy(xaq->aura_handle);
+ plt_free(xaq->fc);
+ plt_free(xaq->mem);
+ }
+ memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
+
+ return 0;
+}
+
+int
+roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso, uint16_t nb_hwgrp)
+{
+ struct sso *sso = roc_sso_to_sso_priv(roc_sso);
+ struct dev *dev = &sso->dev;
+ int rc;
+
+ plt_spinlock_lock(&sso->mbox_lock);
+ rc = sso_hwgrp_free_xaq_aura(dev, &roc_sso->xaq, nb_hwgrp);
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
+}
+
+int
+sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
struct sso_hw_setconfig *req;
int rc = -ENOSPC;
req->npa_aura_id = npa_aura_id;
req->hwgrps = hwgrps;
- return mbox_process(dev->mbox);
+ if (mbox_process(dev->mbox))
+ return -EIO;
+
+ return 0;
}
int
-roc_sso_hwgrp_release_xaq(struct roc_sso *roc_sso, uint16_t hwgrps)
+roc_sso_hwgrp_alloc_xaq(struct roc_sso *roc_sso, uint32_t npa_aura_id,
+ uint16_t hwgrps)
+{
+ struct sso *sso = roc_sso_to_sso_priv(roc_sso);
+ struct dev *dev = &sso->dev;
+ int rc;
+
+ plt_spinlock_lock(&sso->mbox_lock);
+ rc = sso_hwgrp_alloc_xaq(dev, npa_aura_id, hwgrps);
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
+}
+
+int
+sso_hwgrp_release_xaq(struct dev *dev, uint16_t hwgrps)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
struct sso_hw_xaq_release *req;
req = mbox_alloc_msg_sso_hw_release_xaq_aura(dev->mbox);
return -EINVAL;
req->hwgrps = hwgrps;
- return mbox_process(dev->mbox);
+ if (mbox_process(dev->mbox))
+ return -EIO;
+
+ return 0;
+}
+
+int
+roc_sso_hwgrp_release_xaq(struct roc_sso *roc_sso, uint16_t hwgrps)
+{
+ struct sso *sso = roc_sso_to_sso_priv(roc_sso);
+ struct dev *dev = &sso->dev;
+ int rc;
+
+ plt_spinlock_lock(&sso->mbox_lock);
+ rc = sso_hwgrp_release_xaq(dev, hwgrps);
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
int
roc_sso_hwgrp_set_priority(struct roc_sso *roc_sso, uint16_t hwgrp,
uint8_t weight, uint8_t affinity, uint8_t priority)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_sso);
+ struct dev *dev = &sso->dev;
struct sso_grp_priority *req;
int rc = -ENOSPC;
+ plt_spinlock_lock(&sso->mbox_lock);
req = mbox_alloc_msg_sso_grp_set_priority(dev->mbox);
if (req == NULL)
- return rc;
+ goto fail;
req->grp = hwgrp;
req->weight = weight;
req->affinity = affinity;
req->priority = priority;
rc = mbox_process(dev->mbox);
- if (rc < 0)
- return rc;
+ if (rc) {
+ rc = -EIO;
+ goto fail;
+ }
+ plt_spinlock_unlock(&sso->mbox_lock);
plt_sso_dbg("HWGRP %d weight %d affinity %d priority %d", hwgrp, weight,
affinity, priority);
return 0;
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
int
if (roc_sso->max_hws < nb_hws)
return -ENOENT;
+ plt_spinlock_lock(&sso->mbox_lock);
rc = sso_rsrc_attach(roc_sso, SSO_LF_TYPE_HWS, nb_hws);
if (rc < 0) {
plt_err("Unable to attach SSO HWS LFs");
- return rc;
+ goto fail;
}
rc = sso_rsrc_attach(roc_sso, SSO_LF_TYPE_HWGRP, nb_hwgrp);
goto hwgrp_atch_fail;
}
- rc = sso_lf_alloc(roc_sso, SSO_LF_TYPE_HWS, nb_hws, NULL);
+ rc = sso_lf_alloc(&sso->dev, SSO_LF_TYPE_HWS, nb_hws, NULL);
if (rc < 0) {
plt_err("Unable to alloc SSO HWS LFs");
goto hws_alloc_fail;
}
- rc = sso_lf_alloc(roc_sso, SSO_LF_TYPE_HWGRP, nb_hwgrp,
+ rc = sso_lf_alloc(&sso->dev, SSO_LF_TYPE_HWGRP, nb_hwgrp,
(void **)&rsp_hwgrp);
if (rc < 0) {
plt_err("Unable to alloc SSO HWGRP Lfs");
goto sso_msix_fail;
}
- rc = sso_register_irqs_priv(roc_sso, &sso->pci_dev->intr_handle, nb_hws,
+ rc = sso_register_irqs_priv(roc_sso, sso->pci_dev->intr_handle, nb_hws,
nb_hwgrp);
if (rc < 0) {
plt_err("Failed to register SSO LF IRQs");
goto sso_msix_fail;
}
+ plt_spinlock_unlock(&sso->mbox_lock);
roc_sso->nb_hwgrp = nb_hwgrp;
roc_sso->nb_hws = nb_hws;
return 0;
sso_msix_fail:
- sso_lf_free(roc_sso, SSO_LF_TYPE_HWGRP, nb_hwgrp);
+ sso_lf_free(&sso->dev, SSO_LF_TYPE_HWGRP, nb_hwgrp);
hwgrp_alloc_fail:
- sso_lf_free(roc_sso, SSO_LF_TYPE_HWS, nb_hws);
+ sso_lf_free(&sso->dev, SSO_LF_TYPE_HWS, nb_hws);
hws_alloc_fail:
sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWGRP);
hwgrp_atch_fail:
sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWS);
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
return rc;
}
if (!roc_sso->nb_hws && !roc_sso->nb_hwgrp)
return;
- sso_unregister_irqs_priv(roc_sso, &sso->pci_dev->intr_handle,
+ sso_unregister_irqs_priv(roc_sso, sso->pci_dev->intr_handle,
roc_sso->nb_hws, roc_sso->nb_hwgrp);
- sso_lf_free(roc_sso, SSO_LF_TYPE_HWS, roc_sso->nb_hws);
- sso_lf_free(roc_sso, SSO_LF_TYPE_HWGRP, roc_sso->nb_hwgrp);
+ sso_lf_free(&sso->dev, SSO_LF_TYPE_HWS, roc_sso->nb_hws);
+ sso_lf_free(&sso->dev, SSO_LF_TYPE_HWGRP, roc_sso->nb_hwgrp);
sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWS);
sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWGRP);
roc_sso->nb_hwgrp = 0;
roc_sso->nb_hws = 0;
+ plt_spinlock_unlock(&sso->mbox_lock);
}
int
sso = roc_sso_to_sso_priv(roc_sso);
memset(sso, 0, sizeof(*sso));
pci_dev = roc_sso->pci_dev;
+ plt_spinlock_init(&sso->mbox_lock);
rc = dev_init(&sso->dev, pci_dev);
if (rc < 0) {
goto fail;
}
+ plt_spinlock_lock(&sso->mbox_lock);
rc = sso_rsrc_get(roc_sso);
if (rc < 0) {
plt_err("Failed to get SSO resources");
link_mem = PLT_PTR_ADD(link_mem, link_map_sz);
}
idev_sso_pffunc_set(sso->dev.pf_func);
+ idev_sso_set(roc_sso);
sso->pci_dev = pci_dev;
sso->dev.drv_inited = true;
roc_sso->lmt_base = sso->dev.lmt_base;
+ plt_spinlock_unlock(&sso->mbox_lock);
return 0;
link_mem_free:
rsrc_fail:
rc |= dev_fini(&sso->dev, pci_dev);
fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
return rc;
}