X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Focteontx2%2Fotx2_evdev_adptr.c;h=a91f784b1e000356ddb1ef159251ededd185b5f9;hb=53548ad3007b732e964dfa9217cf5471862cbf15;hp=d69f269df6c23f78d8085b3bcb15884eed7c2171;hpb=8e6663b8c145a3dcbd7b298b93682814973c7fcd;p=dpdk.git diff --git a/drivers/event/octeontx2/otx2_evdev_adptr.c b/drivers/event/octeontx2/otx2_evdev_adptr.c index d69f269df6..a91f784b1e 100644 --- a/drivers/event/octeontx2/otx2_evdev_adptr.c +++ b/drivers/event/octeontx2/otx2_evdev_adptr.c @@ -1,9 +1,11 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(C) 2019 Marvell International Ltd. + * Copyright(C) 2019-2021 Marvell. */ #include "otx2_evdev.h" +#define NIX_RQ_AURA_THRESH(x) (((x)*95) / 100) + int otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev, uint32_t *caps) @@ -306,6 +308,87 @@ sso_updt_lookup_mem(const struct rte_eventdev *event_dev, void *lookup_mem) } } +static inline void +sso_cfg_nix_mp_bpid(struct otx2_sso_evdev *dev, + struct otx2_eth_dev *otx2_eth_dev, struct otx2_eth_rxq *rxq, + uint8_t ena) +{ + struct otx2_fc_info *fc = &otx2_eth_dev->fc_info; + struct npa_aq_enq_req *req; + struct npa_aq_enq_rsp *rsp; + struct otx2_npa_lf *lf; + struct otx2_mbox *mbox; + uint32_t limit; + int rc; + + if (otx2_dev_is_sdp(otx2_eth_dev)) + return; + + lf = otx2_npa_lf_obj_get(); + if (!lf) + return; + mbox = lf->mbox; + + req = otx2_mbox_alloc_msg_npa_aq_enq(mbox); + if (req == NULL) + return; + + req->aura_id = npa_lf_aura_handle_to_aura(rxq->pool->pool_id); + req->ctype = NPA_AQ_CTYPE_AURA; + req->op = NPA_AQ_INSTOP_READ; + + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return; + + limit = rsp->aura.limit; + /* BP is already enabled. */ + if (rsp->aura.bp_ena) { + /* If BP ids don't match disable BP. */ + if ((rsp->aura.nix0_bpid != fc->bpid[0]) && !dev->force_rx_bp) { + req = otx2_mbox_alloc_msg_npa_aq_enq(mbox); + if (req == NULL) + return; + + req->aura_id = + npa_lf_aura_handle_to_aura(rxq->pool->pool_id); + req->ctype = NPA_AQ_CTYPE_AURA; + req->op = NPA_AQ_INSTOP_WRITE; + + req->aura.bp_ena = 0; + req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena); + + otx2_mbox_process(mbox); + } + return; + } + + /* BP was previously enabled but now disabled skip. */ + if (rsp->aura.bp) + return; + + req = otx2_mbox_alloc_msg_npa_aq_enq(mbox); + if (req == NULL) + return; + + req->aura_id = npa_lf_aura_handle_to_aura(rxq->pool->pool_id); + req->ctype = NPA_AQ_CTYPE_AURA; + req->op = NPA_AQ_INSTOP_WRITE; + + if (ena) { + req->aura.nix0_bpid = fc->bpid[0]; + req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid); + req->aura.bp = NIX_RQ_AURA_THRESH( + limit > 128 ? 256 : limit); /* 95% of size*/ + req->aura_mask.bp = ~(req->aura_mask.bp); + } + + req->aura.bp_ena = !!ena; + req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena); + + otx2_mbox_process(mbox); +} + int otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev, @@ -326,8 +409,9 @@ otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev, for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++) { rxq = eth_dev->data->rx_queues[i]; sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV); - rc = sso_xae_reconfigure((struct rte_eventdev *) - (uintptr_t)event_dev); + sso_cfg_nix_mp_bpid(dev, otx2_eth_dev, rxq, true); + rc = sso_xae_reconfigure( + (struct rte_eventdev *)(uintptr_t)event_dev); rc |= sso_rxq_enable(otx2_eth_dev, i, queue_conf->ev.sched_type, queue_conf->ev.queue_id, port); @@ -337,6 +421,7 @@ otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev, } else { rxq = eth_dev->data->rx_queues[rx_queue_id]; sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV); + sso_cfg_nix_mp_bpid(dev, otx2_eth_dev, rxq, true); rc = sso_xae_reconfigure((struct rte_eventdev *) (uintptr_t)event_dev); rc |= sso_rxq_enable(otx2_eth_dev, (uint16_t)rx_queue_id, @@ -363,19 +448,25 @@ otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev, int32_t rx_queue_id) { - struct otx2_eth_dev *dev = eth_dev->data->dev_private; + struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private; + struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); int i, rc; - RTE_SET_USED(event_dev); rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13); if (rc) return -EINVAL; if (rx_queue_id < 0) { - for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++) - rc = sso_rxq_disable(dev, i); + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + rc = sso_rxq_disable(otx2_eth_dev, i); + sso_cfg_nix_mp_bpid(dev, otx2_eth_dev, + eth_dev->data->rx_queues[i], false); + } } else { - rc = sso_rxq_disable(dev, (uint16_t)rx_queue_id); + rc = sso_rxq_disable(otx2_eth_dev, (uint16_t)rx_queue_id); + sso_cfg_nix_mp_bpid(dev, otx2_eth_dev, + eth_dev->data->rx_queues[rx_queue_id], + false); } if (rc < 0)