bool spb_drop_ena;
/* End of Input parameters */
struct roc_nix *roc_nix;
- bool inl_dev_ref;
+ uint16_t inl_dev_refs;
};
struct roc_nix_cq {
nix_dump(" vwqe_wait_tmo = %ld", rq->vwqe_wait_tmo);
nix_dump(" vwqe_aura_handle = %ld", rq->vwqe_aura_handle);
nix_dump(" roc_nix = %p", rq->roc_nix);
- nix_dump(" inl_dev_ref = %d", rq->inl_dev_ref);
+ nix_dump(" inl_dev_refs = %d", rq->inl_dev_refs);
}
void
struct nix_inl_dev *inl_dev =
(struct nix_inl_dev *)&roc_inl_dev->reserved;
struct dev *dev = &inl_dev->dev;
+ int i;
nix_dump("nix_inl_dev@%p", inl_dev);
nix_dump(" pf = %d", dev_get_pf(dev->pf_func));
nix_dump(" \tssow_msixoff = %d", inl_dev->ssow_msixoff);
nix_dump(" \tnix_cints = %d", inl_dev->cints);
nix_dump(" \tnix_qints = %d", inl_dev->qints);
- nix_dump(" \trq_refs = %d", inl_dev->rq_refs);
nix_dump(" \tinb_sa_base = 0x%p", inl_dev->inb_sa_base);
nix_dump(" \tinb_sa_sz = %d", inl_dev->inb_sa_sz);
nix_dump(" \txaq_buf_size = %u", inl_dev->xaq_buf_size);
nix_dump(" \txaq_mem = 0x%p", inl_dev->xaq.mem);
nix_dump(" \tinl_dev_rq:");
- roc_nix_rq_dump(&inl_dev->rq);
+ for (i = 0; i < inl_dev->nb_rqs; i++)
+ roc_nix_rq_dump(&inl_dev->rqs[i]);
}
roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
{
struct idev_cfg *idev = idev_get_cfg();
+ int port_id = rq->roc_nix->port_id;
struct nix_inl_dev *inl_dev;
struct roc_nix_rq *inl_rq;
+ uint16_t inl_rq_id;
struct dev *dev;
int rc;
if (!inl_dev)
return 0;
- /* Just take reference if already inited */
- if (inl_dev->rq_refs) {
- inl_dev->rq_refs++;
- rq->inl_dev_ref = true;
+ /* Check if this RQ is already holding reference */
+ if (rq->inl_dev_refs)
return 0;
- }
+ inl_rq_id = inl_dev->nb_rqs > 1 ? port_id : 0;
dev = &inl_dev->dev;
- inl_rq = &inl_dev->rq;
+ inl_rq = &inl_dev->rqs[inl_rq_id];
+
+ /* Just take reference if already inited */
+ if (inl_rq->inl_dev_refs) {
+ inl_rq->inl_dev_refs++;
+ rq->inl_dev_refs = 1;
+ return 0;
+ }
memset(inl_rq, 0, sizeof(struct roc_nix_rq));
/* Take RQ pool attributes from the first ethdev RQ */
- inl_rq->qid = 0;
+ inl_rq->qid = inl_rq_id;
inl_rq->aura_handle = rq->aura_handle;
inl_rq->first_skip = rq->first_skip;
inl_rq->later_skip = rq->later_skip;
return rc;
}
- inl_dev->rq_refs++;
- rq->inl_dev_ref = true;
+ inl_rq->inl_dev_refs++;
+ rq->inl_dev_refs = 1;
return 0;
}
roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
{
struct idev_cfg *idev = idev_get_cfg();
+ int port_id = rq->roc_nix->port_id;
struct nix_inl_dev *inl_dev;
struct roc_nix_rq *inl_rq;
+ uint16_t inl_rq_id;
struct dev *dev;
int rc;
if (idev == NULL)
return 0;
- if (!rq->inl_dev_ref)
+ if (!rq->inl_dev_refs)
return 0;
inl_dev = idev->nix_inl_dev;
return -EFAULT;
}
- rq->inl_dev_ref = false;
- inl_dev->rq_refs--;
- if (inl_dev->rq_refs)
+ dev = &inl_dev->dev;
+ inl_rq_id = inl_dev->nb_rqs > 1 ? port_id : 0;
+ inl_rq = &inl_dev->rqs[inl_rq_id];
+
+ rq->inl_dev_refs = 0;
+ inl_rq->inl_dev_refs--;
+ if (inl_rq->inl_dev_refs)
return 0;
- dev = &inl_dev->dev;
- inl_rq = &inl_dev->rq;
/* There are no more references, disable RQ */
rc = nix_rq_ena_dis(dev, inl_rq, false);
if (rc)
return rc;
}
-uint64_t
-roc_nix_inl_dev_rq_limit_get(void)
-{
- struct idev_cfg *idev = idev_get_cfg();
- struct nix_inl_dev *inl_dev;
- struct roc_nix_rq *inl_rq;
-
- if (!idev || !idev->nix_inl_dev)
- return 0;
-
- inl_dev = idev->nix_inl_dev;
- if (!inl_dev->rq_refs)
- return 0;
-
- inl_rq = &inl_dev->rq;
-
- return roc_npa_aura_op_limit_get(inl_rq->aura_handle);
-}
-
void
roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
{
}
struct roc_nix_rq *
-roc_nix_inl_dev_rq(void)
+roc_nix_inl_dev_rq(struct roc_nix *roc_nix)
{
struct idev_cfg *idev = idev_get_cfg();
+ int port_id = roc_nix->port_id;
struct nix_inl_dev *inl_dev;
+ struct roc_nix_rq *inl_rq;
+ uint16_t inl_rq_id;
if (idev != NULL) {
inl_dev = idev->nix_inl_dev;
- if (inl_dev != NULL && inl_dev->rq_refs)
- return &inl_dev->rq;
+ if (inl_dev != NULL) {
+ inl_rq_id = inl_dev->nb_rqs > 1 ? port_id : 0;
+ inl_rq = &inl_dev->rqs[inl_rq_id];
+ if (inl_rq->inl_dev_refs)
+ return inl_rq;
+ }
}
return NULL;
void *sa, *sa_base = NULL;
struct nix *nix = NULL;
uint16_t max_spi = 0;
+ uint32_t rq_refs = 0;
uint8_t pkind = 0;
int i;
}
if (inl_dev) {
- if (inl_dev->rq_refs == 0) {
+ for (i = 0; i < inl_dev->nb_rqs; i++)
+ rq_refs += inl_dev->rqs[i].inl_dev_refs;
+
+ if (rq_refs == 0) {
inl_dev->ts_ena = ts_ena;
max_spi = inl_dev->ipsec_in_max_spi;
sa_base = inl_dev->inb_sa_base;
int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq);
int __roc_api roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq);
bool __roc_api roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix);
-struct roc_nix_rq *__roc_api roc_nix_inl_dev_rq(void);
+struct roc_nix_rq *__roc_api roc_nix_inl_dev_rq(struct roc_nix *roc_nix);
int __roc_api roc_nix_inl_inb_tag_update(struct roc_nix *roc_nix,
uint32_t tag_const, uint8_t tt);
-uint64_t __roc_api roc_nix_inl_dev_rq_limit_get(void);
int __roc_api roc_nix_reassembly_configure(uint32_t max_wait_time,
- uint16_t max_frags);
+ uint16_t max_frags);
int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena,
bool inb_inl_dev);
struct nix_lf_alloc_rsp *rsp;
struct nix_lf_alloc_req *req;
struct nix_hw_info *hw_info;
+ struct roc_nix_rq *rqs;
uint64_t max_sa, i;
size_t inb_sa_sz;
int rc = -ENOSPC;
req = mbox_alloc_msg_nix_lf_alloc(mbox);
if (req == NULL)
return rc;
- req->rq_cnt = 1;
+ /* We will have per-port RQ if it is not with channel masking */
+ req->rq_cnt = inl_dev->nb_rqs;
req->sq_cnt = 1;
req->cq_cnt = 1;
/* XQESZ is W16 */
goto free_mem;
}
+ /* Allocate memory for RQ's */
+ rqs = plt_zmalloc(sizeof(struct roc_nix_rq) * PLT_MAX_ETHPORTS, 0);
+ if (!rqs) {
+ plt_err("Failed to allocate memory for RQ's");
+ goto free_mem;
+ }
+ inl_dev->rqs = rqs;
+
return 0;
free_mem:
plt_free(inl_dev->inb_sa_base);
if (req == NULL)
return -ENOSPC;
- return mbox_process(mbox);
+ rc = mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ plt_free(inl_dev->rqs);
+ plt_free(inl_dev->inb_sa_base);
+ inl_dev->rqs = NULL;
+ inl_dev->inb_sa_base = NULL;
+ return 0;
}
static int
no_pool:
/* Disable RQ if enabled */
- if (inl_dev->rq_refs) {
- rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rq, false);
+ for (i = 0; i < inl_dev->nb_rqs; i++) {
+ if (!inl_dev->rqs[i].inl_dev_refs)
+ continue;
+ rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rqs[i], false);
if (rc) {
- plt_err("Failed to disable inline dev RQ, rc=%d", rc);
+ plt_err("Failed to disable inline dev RQ %d, rc=%d", i,
+ rc);
return rc;
}
}
exit:
/* Renable RQ */
- if (inl_dev->rq_refs) {
- rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rq, true);
+ for (i = 0; i < inl_dev->nb_rqs; i++) {
+ if (!inl_dev->rqs[i].inl_dev_refs)
+ continue;
+
+ rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rqs[i], true);
if (rc)
- plt_err("Failed to enable inline dev RQ, rc=%d", rc);
+ plt_err("Failed to enable inline dev RQ %d, rc=%d", i,
+ rc);
}
return rc;
inl_dev->spb_drop_pc = NIX_AURA_DROP_PC_DFLT;
inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
inl_dev->set_soft_exp_poll = roc_inl_dev->set_soft_exp_poll;
+ inl_dev->nb_rqs = inl_dev->is_multi_channel ? 1 : PLT_MAX_ETHPORTS;
if (roc_inl_dev->spb_drop_pc)
inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
static void
nix_inl_nix_q_irq(void *param)
{
- struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
+ struct nix_inl_qint *qints_mem = (struct nix_inl_qint *)param;
+ struct nix_inl_dev *inl_dev = qints_mem->inl_dev;
uintptr_t nix_base = inl_dev->nix_base;
struct dev *dev = &inl_dev->dev;
+ uint16_t qint = qints_mem->qint;
volatile void *ctx;
uint64_t reg, intr;
+ uint64_t wdata;
uint8_t irq;
- int rc;
+ int rc, q;
- intr = plt_read64(nix_base + NIX_LF_QINTX_INT(0));
+ intr = plt_read64(nix_base + NIX_LF_QINTX_INT(qint));
if (intr == 0)
return;
plt_err("Queue_intr=0x%" PRIx64 " qintx 0 pf=%d, vf=%d", intr, dev->pf,
dev->vf);
- /* Get and clear RQ0 interrupt */
- reg = roc_atomic64_add_nosync(0,
- (int64_t *)(nix_base + NIX_LF_RQ_OP_INT));
- if (reg & BIT_ULL(42) /* OP_ERR */) {
- plt_err("Failed to get rq_int");
- return;
+ /* Handle RQ interrupts */
+ for (q = 0; q < inl_dev->nb_rqs; q++) {
+ /* Get and clear RQ interrupts */
+ wdata = (uint64_t)q << 44;
+ reg = roc_atomic64_add_nosync(wdata,
+ (int64_t *)(nix_base + NIX_LF_RQ_OP_INT));
+ if (reg & BIT_ULL(42) /* OP_ERR */) {
+ plt_err("Failed to get rq_int");
+ return;
+ }
+ irq = reg & 0xff;
+ plt_write64(wdata | irq, nix_base + NIX_LF_RQ_OP_INT);
+
+ if (irq & BIT_ULL(NIX_RQINT_DROP))
+ plt_err("RQ=0 NIX_RQINT_DROP");
+
+ if (irq & BIT_ULL(NIX_RQINT_RED))
+ plt_err("RQ=0 NIX_RQINT_RED");
}
- irq = reg & 0xff;
- plt_write64(0 | irq, nix_base + NIX_LF_RQ_OP_INT);
-
- if (irq & BIT_ULL(NIX_RQINT_DROP))
- plt_err("RQ=0 NIX_RQINT_DROP");
-
- if (irq & BIT_ULL(NIX_RQINT_RED))
- plt_err("RQ=0 NIX_RQINT_RED");
/* Clear interrupt */
- plt_write64(intr, nix_base + NIX_LF_QINTX_INT(0));
+ plt_write64(intr, nix_base + NIX_LF_QINTX_INT(qint));
/* Dump registers to std out */
nix_inl_nix_reg_dump(inl_dev);
- /* Dump RQ 0 */
- rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, 0, &ctx);
- if (rc) {
- plt_err("Failed to get rq context");
- return;
+ /* Dump RQs */
+ for (q = 0; q < inl_dev->nb_rqs; q++) {
+ rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
+ if (rc) {
+ plt_err("Failed to get rq %d context, rc=%d", q, rc);
+ continue;
+ }
+ nix_lf_rq_dump(ctx);
}
- nix_lf_rq_dump(ctx);
}
static void
struct dev *dev = &inl_dev->dev;
volatile void *ctx;
uint64_t intr;
- int rc;
+ int rc, q;
intr = plt_read64(nix_base + NIX_LF_RAS);
if (intr == 0)
/* Dump registers to std out */
nix_inl_nix_reg_dump(inl_dev);
- /* Dump RQ 0 */
- rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, 0, &ctx);
- if (rc) {
- plt_err("Failed to get rq context");
- return;
+ /* Dump RQs */
+ for (q = 0; q < inl_dev->nb_rqs; q++) {
+ rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
+ if (rc) {
+ plt_err("Failed to get rq %d context, rc=%d", q, rc);
+ continue;
+ }
+ nix_lf_rq_dump(ctx);
}
- nix_lf_rq_dump(ctx);
}
static void
struct dev *dev = &inl_dev->dev;
volatile void *ctx;
uint64_t intr;
- int rc;
+ int rc, q;
intr = plt_read64(nix_base + NIX_LF_ERR_INT);
if (intr == 0)
/* Dump registers to std out */
nix_inl_nix_reg_dump(inl_dev);
- /* Dump RQ 0 */
- rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, 0, &ctx);
- if (rc) {
- plt_err("Failed to get rq context");
- return;
+ /* Dump RQs */
+ for (q = 0; q < inl_dev->nb_rqs; q++) {
+ rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
+ if (rc) {
+ plt_err("Failed to get rq %d context, rc=%d", q, rc);
+ continue;
+ }
+ nix_lf_rq_dump(ctx);
}
- nix_lf_rq_dump(ctx);
}
int
{
struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
uintptr_t nix_base = inl_dev->nix_base;
+ struct nix_inl_qint *qints_mem;
+ int rc, q, ret = 0;
uint16_t msixoff;
- int rc;
+ int qints;
msixoff = inl_dev->nix_msixoff;
if (msixoff == MSIX_VECTOR_INVALID) {
/* Enable RAS interrupts */
plt_write64(~0ull, nix_base + NIX_LF_RAS_ENA_W1S);
- /* Setup queue irq for RQ 0 */
+ /* Setup queue irq for RQ's */
+ qints = PLT_MIN(inl_dev->nb_rqs, inl_dev->qints);
+ qints_mem = plt_zmalloc(sizeof(struct nix_inl_qint) * qints, 0);
+ if (!qints_mem) {
+ plt_err("Failed to allocate memory for %u qints", qints);
+ return -ENOMEM;
+ }
+
+ inl_dev->configured_qints = qints;
+ inl_dev->qints_mem = qints_mem;
- /* Clear QINT CNT, interrupt */
- plt_write64(0, nix_base + NIX_LF_QINTX_CNT(0));
- plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(0));
+ for (q = 0; q < qints; q++) {
+ /* Clear QINT CNT, interrupt */
+ plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
+ plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(q));
- /* Register queue irq vector */
- rc |= dev_irq_register(handle, nix_inl_nix_q_irq, inl_dev,
- msixoff + NIX_LF_INT_VEC_QINT_START);
+ /* Register queue irq vector */
+ ret = dev_irq_register(handle, nix_inl_nix_q_irq, &qints_mem[q],
+ msixoff + NIX_LF_INT_VEC_QINT_START + q);
+ if (ret)
+ break;
- plt_write64(0, nix_base + NIX_LF_QINTX_CNT(0));
- plt_write64(0, nix_base + NIX_LF_QINTX_INT(0));
- /* Enable QINT interrupt */
- plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1S(0));
+ plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
+ plt_write64(0, nix_base + NIX_LF_QINTX_INT(q));
+ /* Enable QINT interrupt */
+ plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1S(q));
+ qints_mem[q].inl_dev = inl_dev;
+ qints_mem[q].qint = q;
+ }
+
+ rc |= ret;
return rc;
}
nix_inl_nix_unregister_irqs(struct nix_inl_dev *inl_dev)
{
struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
+ struct nix_inl_qint *qints_mem = inl_dev->qints_mem;
uintptr_t nix_base = inl_dev->nix_base;
uint16_t msixoff;
+ int q;
msixoff = inl_dev->nix_msixoff;
/* Disable err interrupts */
dev_irq_unregister(handle, nix_inl_nix_ras_irq, inl_dev,
msixoff + NIX_LF_INT_VEC_POISON);
- /* Clear QINT CNT */
- plt_write64(0, nix_base + NIX_LF_QINTX_CNT(0));
- plt_write64(0, nix_base + NIX_LF_QINTX_INT(0));
+ for (q = 0; q < inl_dev->configured_qints; q++) {
+ /* Clear QINT CNT */
+ plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
+ plt_write64(0, nix_base + NIX_LF_QINTX_INT(q));
- /* Disable QINT interrupt */
- plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(0));
+ /* Disable QINT interrupt */
+ plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(q));
+
+ /* Unregister queue irq vector */
+ dev_irq_unregister(handle, nix_inl_nix_q_irq, &qints_mem[q],
+ msixoff + NIX_LF_INT_VEC_QINT_START + q);
+ }
- /* Unregister queue irq vector */
- dev_irq_unregister(handle, nix_inl_nix_q_irq, inl_dev,
- msixoff + NIX_LF_INT_VEC_QINT_START);
+ plt_free(inl_dev->qints_mem);
+ inl_dev->qints_mem = NULL;
}
#include <pthread.h>
#include <sys/types.h>
+struct nix_inl_dev;
+struct nix_inl_qint {
+ struct nix_inl_dev *inl_dev;
+ uint16_t qint;
+};
+
struct nix_inl_dev {
/* Base device object */
struct dev dev;
uint16_t vwqe_interval;
uint16_t cints;
uint16_t qints;
- struct roc_nix_rq rq;
- uint16_t rq_refs;
+ uint16_t configured_qints;
+ struct roc_nix_rq *rqs;
+ struct nix_inl_qint *qints_mem;
+ uint16_t nb_rqs;
bool is_nix1;
uint8_t spb_drop_pc;
uint8_t lpb_drop_pc;
uint8_t has_msns_act = 0;
int sel_act, req_act = 0;
uint16_t pf_func, vf_id;
+ struct roc_nix *roc_nix;
int errcode = 0;
int mark = 0;
int rq = 0;
*/
req_act |= ROC_NPC_ACTION_TYPE_SEC;
rq = 0;
+ roc_nix = roc_npc->roc_nix;
/* Special processing when with inline device */
- if (roc_nix_inb_is_with_inl_dev(roc_npc->roc_nix) &&
+ if (roc_nix_inb_is_with_inl_dev(roc_nix) &&
roc_nix_inl_dev_is_probed()) {
- rq = 0;
+ struct roc_nix_rq *inl_rq;
+
+ inl_rq = roc_nix_inl_dev_rq(roc_nix);
+ if (!inl_rq) {
+ errcode = NPC_ERR_INTERNAL;
+ goto err_exit;
+ }
+ rq = inl_rq->qid;
pf_func = nix_inl_dev_pffunc_get();
}
rc = npc_parse_msns_action(roc_npc, actions, flow,
roc_nix_inl_dev_rq;
roc_nix_inl_dev_rq_get;
roc_nix_inl_dev_rq_put;
- roc_nix_inl_dev_rq_limit_get;
roc_nix_inl_dev_unlock;
roc_nix_inl_dev_xaq_realloc;
roc_nix_inl_inb_is_enabled;
eth_dev->data->rx_queues[qid] = NULL;
}
- /* Clam up cq limit to size of packet pool aura for LBK
- * to avoid meta packet drop as LBK does not currently support
- * backpressure.
- */
- if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
- uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
-
- /* Use current RQ's aura limit if inl rq is not available */
- if (!pkt_pool_limit)
- pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
- nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
- }
-
/* Its a no-op when inline device is not used */
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY ||
dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
/* Initialize base roc nix */
nix->pci_dev = pci_dev;
nix->hw_vlan_ins = true;
+ nix->port_id = eth_dev->data->port_id;
rc = roc_nix_dev_init(nix);
if (rc) {
plt_err("Failed to initialize roc nix rc=%d", rc);