1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 #define SSO_XAQ_CACHE_CNT (0x7)
10 /* Private functions. */
12 sso_lf_alloc(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf,
18 case SSO_LF_TYPE_HWS: {
19 struct ssow_lf_alloc_req *req;
21 req = mbox_alloc_msg_ssow_lf_alloc(dev->mbox);
26 case SSO_LF_TYPE_HWGRP: {
27 struct sso_lf_alloc_req *req;
29 req = mbox_alloc_msg_sso_lf_alloc(dev->mbox);
38 rc = mbox_process_msg(dev->mbox, rsp);
46 sso_lf_free(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf)
51 case SSO_LF_TYPE_HWS: {
52 struct ssow_lf_free_req *req;
54 req = mbox_alloc_msg_ssow_lf_free(dev->mbox);
59 case SSO_LF_TYPE_HWGRP: {
60 struct sso_lf_free_req *req;
62 req = mbox_alloc_msg_sso_lf_free(dev->mbox);
71 rc = mbox_process(dev->mbox);
79 sso_rsrc_attach(struct roc_sso *roc_sso, enum sso_lf_type lf_type,
82 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
83 struct rsrc_attach_req *req;
86 req = mbox_alloc_msg_attach_resources(dev->mbox);
93 case SSO_LF_TYPE_HWGRP:
101 if (mbox_process(dev->mbox) < 0)
108 sso_rsrc_detach(struct roc_sso *roc_sso, enum sso_lf_type lf_type)
110 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
111 struct rsrc_detach_req *req;
114 req = mbox_alloc_msg_detach_resources(dev->mbox);
118 case SSO_LF_TYPE_HWS:
121 case SSO_LF_TYPE_HWGRP:
125 return SSO_ERR_PARAM;
129 if (mbox_process(dev->mbox) < 0)
136 sso_rsrc_get(struct roc_sso *roc_sso)
138 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
139 struct free_rsrcs_rsp *rsrc_cnt;
142 mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
143 rc = mbox_process_msg(dev->mbox, (void **)&rsrc_cnt);
145 plt_err("Failed to get free resource count\n");
149 roc_sso->max_hwgrp = rsrc_cnt->sso;
150 roc_sso->max_hws = rsrc_cnt->ssow;
156 sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp,
157 uint16_t hwgrp[], uint16_t n, uint16_t enable)
173 for (j = 0; j < k; j++) {
174 mask[j] = hwgrp[i + j] | enable << 14;
176 enable ? plt_bitmap_set(bmp, hwgrp[i + j]) :
177 plt_bitmap_clear(bmp, hwgrp[i + j]);
179 plt_sso_dbg("HWS %d Linked to HWGRP %d", hws,
185 reg = mask[0] | mask[1] << 16 | mask[2] << 32 | mask[3] << 48;
186 plt_write64(reg, base + SSOW_LF_GWS_GRPMSK_CHG);
191 sso_msix_fill(struct roc_sso *roc_sso, uint16_t nb_hws, uint16_t nb_hwgrp)
193 struct sso *sso = roc_sso_to_sso_priv(roc_sso);
194 struct msix_offset_rsp *rsp;
195 struct dev *dev = &sso->dev;
198 mbox_alloc_msg_msix_offset(dev->mbox);
199 rc = mbox_process_msg(dev->mbox, (void **)&rsp);
203 for (i = 0; i < nb_hws; i++)
204 sso->hws_msix_offset[i] = rsp->ssow_msixoff[i];
205 for (i = 0; i < nb_hwgrp; i++)
206 sso->hwgrp_msix_offset[i] = rsp->sso_msixoff[i];
211 /* Public Functions. */
213 roc_sso_hws_base_get(struct roc_sso *roc_sso, uint8_t hws)
215 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
217 return dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | hws << 12);
221 roc_sso_hwgrp_base_get(struct roc_sso *roc_sso, uint16_t hwgrp)
223 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
225 return dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 | hwgrp << 12);
229 roc_sso_ns_to_gw(struct roc_sso *roc_sso, uint64_t ns)
231 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
232 uint64_t current_us, current_ns, new_ns;
235 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20);
236 current_us = plt_read64(base + SSOW_LF_GWS_NW_TIM);
237 /* From HRM, table 14-19:
238 * The SSOW_LF_GWS_NW_TIM[NW_TIM] period is specified in n-1 notation.
242 /* From HRM, table 14-1:
243 * SSOW_LF_GWS_NW_TIM[NW_TIM] specifies the minimum timeout. The SSO
244 * hardware times out a GET_WORK request within 2 usec of the minimum
245 * timeout specified by SSOW_LF_GWS_NW_TIM[NW_TIM].
248 current_ns = current_us * 1E3;
249 new_ns = (ns - PLT_MIN(ns, current_ns));
250 new_ns = !new_ns ? 1 : new_ns;
251 return (new_ns * plt_tsc_hz()) / 1E9;
255 roc_sso_hws_link(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[],
258 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
262 sso = roc_sso_to_sso_priv(roc_sso);
263 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | hws << 12);
264 sso_hws_link_modify(hws, base, sso->link_map[hws], hwgrp, nb_hwgrp, 1);
270 roc_sso_hws_unlink(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[],
273 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
277 sso = roc_sso_to_sso_priv(roc_sso);
278 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | hws << 12);
279 sso_hws_link_modify(hws, base, sso->link_map[hws], hwgrp, nb_hwgrp, 0);
285 roc_sso_hws_stats_get(struct roc_sso *roc_sso, uint8_t hws,
286 struct roc_sso_hws_stats *stats)
288 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
289 struct sso_hws_stats *req_rsp;
292 req_rsp = (struct sso_hws_stats *)mbox_alloc_msg_sso_hws_get_stats(
294 if (req_rsp == NULL) {
295 rc = mbox_process(dev->mbox);
298 req_rsp = (struct sso_hws_stats *)
299 mbox_alloc_msg_sso_hws_get_stats(dev->mbox);
304 rc = mbox_process_msg(dev->mbox, (void **)&req_rsp);
308 stats->arbitration = req_rsp->arbitration;
313 roc_sso_hwgrp_stats_get(struct roc_sso *roc_sso, uint8_t hwgrp,
314 struct roc_sso_hwgrp_stats *stats)
316 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
317 struct sso_grp_stats *req_rsp;
320 req_rsp = (struct sso_grp_stats *)mbox_alloc_msg_sso_grp_get_stats(
322 if (req_rsp == NULL) {
323 rc = mbox_process(dev->mbox);
326 req_rsp = (struct sso_grp_stats *)
327 mbox_alloc_msg_sso_grp_get_stats(dev->mbox);
331 req_rsp->grp = hwgrp;
332 rc = mbox_process_msg(dev->mbox, (void **)&req_rsp);
336 stats->aw_status = req_rsp->aw_status;
337 stats->dq_pc = req_rsp->dq_pc;
338 stats->ds_pc = req_rsp->ds_pc;
339 stats->ext_pc = req_rsp->ext_pc;
340 stats->page_cnt = req_rsp->page_cnt;
341 stats->ts_pc = req_rsp->ts_pc;
342 stats->wa_pc = req_rsp->wa_pc;
343 stats->ws_pc = req_rsp->ws_pc;
348 roc_sso_hwgrp_hws_link_status(struct roc_sso *roc_sso, uint8_t hws,
353 sso = roc_sso_to_sso_priv(roc_sso);
354 return plt_bitmap_get(sso->link_map[hws], hwgrp);
358 roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos,
359 uint8_t nb_qos, uint32_t nb_xaq)
361 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
362 struct sso_grp_qos_cfg *req;
365 for (i = 0; i < nb_qos; i++) {
366 uint8_t xaq_prcnt = qos[i].xaq_prcnt;
367 uint8_t iaq_prcnt = qos[i].iaq_prcnt;
368 uint8_t taq_prcnt = qos[i].taq_prcnt;
370 req = mbox_alloc_msg_sso_grp_qos_config(dev->mbox);
372 rc = mbox_process(dev->mbox);
375 req = mbox_alloc_msg_sso_grp_qos_config(dev->mbox);
379 req->grp = qos[i].hwgrp;
380 req->xaq_limit = (nb_xaq * (xaq_prcnt ? xaq_prcnt : 100)) / 100;
381 req->taq_thr = (SSO_HWGRP_IAQ_MAX_THR_MASK *
382 (iaq_prcnt ? iaq_prcnt : 100)) /
384 req->iaq_thr = (SSO_HWGRP_TAQ_MAX_THR_MASK *
385 (taq_prcnt ? taq_prcnt : 100)) /
389 return mbox_process(dev->mbox);
393 sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
394 uint32_t nb_xae, uint32_t xae_waes,
395 uint32_t xaq_buf_size, uint16_t nb_hwgrp)
397 struct npa_pool_s pool;
398 struct npa_aura_s aura;
403 if (xaq->mem != NULL) {
404 rc = sso_hwgrp_release_xaq(dev, nb_hwgrp);
406 plt_err("Failed to release XAQ %d", rc);
409 roc_npa_pool_destroy(xaq->aura_handle);
412 memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
415 xaq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN);
416 if (xaq->fc == NULL) {
417 plt_err("Failed to allocate XAQ FC");
422 xaq->nb_xae = nb_xae;
424 /* Taken from HRM 14.3.3(4) */
425 xaq->nb_xaq = (SSO_XAQ_CACHE_CNT * nb_hwgrp);
426 xaq->nb_xaq += PLT_MAX(1 + ((xaq->nb_xae - 1) / xae_waes), xaq->nb_xaq);
428 xaq->mem = plt_zmalloc(xaq_buf_size * xaq->nb_xaq, xaq_buf_size);
429 if (xaq->mem == NULL) {
430 plt_err("Failed to allocate XAQ mem");
435 memset(&pool, 0, sizeof(struct npa_pool_s));
438 memset(&aura, 0, sizeof(aura));
440 aura.fc_addr = (uint64_t)xaq->fc;
441 aura.fc_hyst_bits = 0; /* Store count on all updates */
442 rc = roc_npa_pool_create(&xaq->aura_handle, xaq_buf_size, xaq->nb_xaq,
445 plt_err("Failed to create XAQ pool");
449 iova = (uint64_t)xaq->mem;
450 for (i = 0; i < xaq->nb_xaq; i++) {
451 roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
452 iova += xaq_buf_size;
454 roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
456 /* When SW does addwork (enqueue) check if there is space in XAQ by
457 * comparing fc_addr above against the xaq_lmt calculated below.
458 * There should be a minimum headroom of 7 XAQs per HWGRP for SSO
459 * to request XAQ to cache them even before enqueue is called.
461 xaq->xaq_lmt = xaq->nb_xaq - (nb_hwgrp * SSO_XAQ_CACHE_CNT);
469 memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
474 roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso, uint32_t nb_xae)
476 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
478 return sso_hwgrp_init_xaq_aura(dev, &roc_sso->xaq, nb_xae,
479 roc_sso->xae_waes, roc_sso->xaq_buf_size,
484 sso_hwgrp_free_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
489 if (xaq->mem != NULL) {
491 rc = sso_hwgrp_release_xaq(dev, nb_hwgrp);
493 plt_err("Failed to release XAQ %d", rc);
497 roc_npa_pool_destroy(xaq->aura_handle);
501 memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
507 roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso, uint16_t nb_hwgrp)
509 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
511 return sso_hwgrp_free_xaq_aura(dev, &roc_sso->xaq, nb_hwgrp);
515 sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps)
517 struct sso_hw_setconfig *req;
520 req = mbox_alloc_msg_sso_hw_setconfig(dev->mbox);
523 req->npa_pf_func = idev_npa_pffunc_get();
524 req->npa_aura_id = npa_aura_id;
525 req->hwgrps = hwgrps;
527 return mbox_process(dev->mbox);
531 roc_sso_hwgrp_alloc_xaq(struct roc_sso *roc_sso, uint32_t npa_aura_id,
534 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
536 return sso_hwgrp_alloc_xaq(dev, npa_aura_id, hwgrps);
540 sso_hwgrp_release_xaq(struct dev *dev, uint16_t hwgrps)
542 struct sso_hw_xaq_release *req;
544 req = mbox_alloc_msg_sso_hw_release_xaq_aura(dev->mbox);
547 req->hwgrps = hwgrps;
549 return mbox_process(dev->mbox);
553 roc_sso_hwgrp_release_xaq(struct roc_sso *roc_sso, uint16_t hwgrps)
555 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
557 return sso_hwgrp_release_xaq(dev, hwgrps);
561 roc_sso_hwgrp_set_priority(struct roc_sso *roc_sso, uint16_t hwgrp,
562 uint8_t weight, uint8_t affinity, uint8_t priority)
564 struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
565 struct sso_grp_priority *req;
568 req = mbox_alloc_msg_sso_grp_set_priority(dev->mbox);
572 req->weight = weight;
573 req->affinity = affinity;
574 req->priority = priority;
576 rc = mbox_process(dev->mbox);
579 plt_sso_dbg("HWGRP %d weight %d affinity %d priority %d", hwgrp, weight,
586 roc_sso_rsrc_init(struct roc_sso *roc_sso, uint8_t nb_hws, uint16_t nb_hwgrp)
588 struct sso *sso = roc_sso_to_sso_priv(roc_sso);
589 struct sso_lf_alloc_rsp *rsp_hwgrp;
592 if (roc_sso->max_hwgrp < nb_hwgrp)
594 if (roc_sso->max_hws < nb_hws)
597 rc = sso_rsrc_attach(roc_sso, SSO_LF_TYPE_HWS, nb_hws);
599 plt_err("Unable to attach SSO HWS LFs");
603 rc = sso_rsrc_attach(roc_sso, SSO_LF_TYPE_HWGRP, nb_hwgrp);
605 plt_err("Unable to attach SSO HWGRP LFs");
606 goto hwgrp_atch_fail;
609 rc = sso_lf_alloc(&sso->dev, SSO_LF_TYPE_HWS, nb_hws, NULL);
611 plt_err("Unable to alloc SSO HWS LFs");
615 rc = sso_lf_alloc(&sso->dev, SSO_LF_TYPE_HWGRP, nb_hwgrp,
616 (void **)&rsp_hwgrp);
618 plt_err("Unable to alloc SSO HWGRP Lfs");
619 goto hwgrp_alloc_fail;
622 roc_sso->xaq_buf_size = rsp_hwgrp->xaq_buf_size;
623 roc_sso->xae_waes = rsp_hwgrp->xaq_wq_entries;
624 roc_sso->iue = rsp_hwgrp->in_unit_entries;
626 rc = sso_msix_fill(roc_sso, nb_hws, nb_hwgrp);
628 plt_err("Unable to get MSIX offsets for SSO LFs");
632 rc = sso_register_irqs_priv(roc_sso, sso->pci_dev->intr_handle, nb_hws,
635 plt_err("Failed to register SSO LF IRQs");
639 roc_sso->nb_hwgrp = nb_hwgrp;
640 roc_sso->nb_hws = nb_hws;
644 sso_lf_free(&sso->dev, SSO_LF_TYPE_HWGRP, nb_hwgrp);
646 sso_lf_free(&sso->dev, SSO_LF_TYPE_HWS, nb_hws);
648 sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWGRP);
650 sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWS);
655 roc_sso_rsrc_fini(struct roc_sso *roc_sso)
657 struct sso *sso = roc_sso_to_sso_priv(roc_sso);
659 if (!roc_sso->nb_hws && !roc_sso->nb_hwgrp)
662 sso_unregister_irqs_priv(roc_sso, sso->pci_dev->intr_handle,
663 roc_sso->nb_hws, roc_sso->nb_hwgrp);
664 sso_lf_free(&sso->dev, SSO_LF_TYPE_HWS, roc_sso->nb_hws);
665 sso_lf_free(&sso->dev, SSO_LF_TYPE_HWGRP, roc_sso->nb_hwgrp);
667 sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWS);
668 sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWGRP);
670 roc_sso->nb_hwgrp = 0;
675 roc_sso_dev_init(struct roc_sso *roc_sso)
677 struct plt_pci_device *pci_dev;
678 uint32_t link_map_sz;
683 if (roc_sso == NULL || roc_sso->pci_dev == NULL)
684 return SSO_ERR_PARAM;
686 PLT_STATIC_ASSERT(sizeof(struct sso) <= ROC_SSO_MEM_SZ);
687 sso = roc_sso_to_sso_priv(roc_sso);
688 memset(sso, 0, sizeof(*sso));
689 pci_dev = roc_sso->pci_dev;
691 rc = dev_init(&sso->dev, pci_dev);
693 plt_err("Failed to init roc device");
697 rc = sso_rsrc_get(roc_sso);
699 plt_err("Failed to get SSO resources");
705 plt_zmalloc(sizeof(struct plt_bitmap *) * roc_sso->max_hws, 0);
706 if (sso->link_map == NULL) {
707 plt_err("Failed to allocate memory for link_map array");
711 link_map_sz = plt_bitmap_get_memory_footprint(roc_sso->max_hwgrp);
712 sso->link_map_mem = plt_zmalloc(link_map_sz * roc_sso->max_hws, 0);
713 if (sso->link_map_mem == NULL) {
714 plt_err("Failed to get link_map memory");
718 link_mem = sso->link_map_mem;
719 for (i = 0; i < roc_sso->max_hws; i++) {
720 sso->link_map[i] = plt_bitmap_init(roc_sso->max_hwgrp, link_mem,
722 if (sso->link_map[i] == NULL) {
723 plt_err("Failed to allocate link map");
726 link_mem = PLT_PTR_ADD(link_mem, link_map_sz);
728 idev_sso_pffunc_set(sso->dev.pf_func);
729 sso->pci_dev = pci_dev;
730 sso->dev.drv_inited = true;
731 roc_sso->lmt_base = sso->dev.lmt_base;
735 plt_free(sso->link_map_mem);
737 rc |= dev_fini(&sso->dev, pci_dev);
743 roc_sso_dev_fini(struct roc_sso *roc_sso)
747 sso = roc_sso_to_sso_priv(roc_sso);
748 sso->dev.drv_inited = false;
750 return dev_fini(&sso->dev, sso->pci_dev);