* Copyright(C) 2019 Marvell International Ltd.
*/
+#include <rte_kvargs.h>
#include <rte_malloc.h>
#include <rte_mbuf_pool_ops.h>
static struct rte_event_timer_adapter_ops otx2_tim_ops;
+static inline int
+tim_get_msix_offsets(void)
+{
+ struct otx2_tim_evdev *dev = tim_priv_get();
+ struct otx2_mbox *mbox = dev->mbox;
+ struct msix_offset_rsp *msix_rsp;
+ int i, rc;
+
+ /* Get TIM MSIX vector offsets */
+ otx2_mbox_alloc_msg_msix_offset(mbox);
+ rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
+
+ for (i = 0; i < dev->nb_rings; i++)
+ dev->tim_msixoff[i] = msix_rsp->timlf_msixoff[i];
+
+ return rc;
+}
+
static void
tim_optimze_bkt_param(struct otx2_tim_ring *tim_ring)
{
if (cache_sz > RTE_MEMPOOL_CACHE_MAX_SIZE)
cache_sz = RTE_MEMPOOL_CACHE_MAX_SIZE;
- /* NPA need not have cache as free is not visible to SW */
- tim_ring->chunk_pool = rte_mempool_create_empty(pool_name,
- tim_ring->nb_chunks,
- tim_ring->chunk_sz,
- 0, 0, rte_socket_id(),
- mp_flags);
+ if (!tim_ring->disable_npa) {
+ /* NPA need not have cache as free is not visible to SW */
+ tim_ring->chunk_pool = rte_mempool_create_empty(pool_name,
+ tim_ring->nb_chunks, tim_ring->chunk_sz,
+ 0, 0, rte_socket_id(), mp_flags);
- if (tim_ring->chunk_pool == NULL) {
- otx2_err("Unable to create chunkpool.");
- return -ENOMEM;
- }
+ if (tim_ring->chunk_pool == NULL) {
+ otx2_err("Unable to create chunkpool.");
+ return -ENOMEM;
+ }
- rc = rte_mempool_set_ops_byname(tim_ring->chunk_pool,
- rte_mbuf_platform_mempool_ops(), NULL);
- if (rc < 0) {
- otx2_err("Unable to set chunkpool ops");
- goto free;
- }
+ rc = rte_mempool_set_ops_byname(tim_ring->chunk_pool,
+ rte_mbuf_platform_mempool_ops(),
+ NULL);
+ if (rc < 0) {
+ otx2_err("Unable to set chunkpool ops");
+ goto free;
+ }
- rc = rte_mempool_populate_default(tim_ring->chunk_pool);
- if (rc < 0) {
- otx2_err("Unable to set populate chunkpool.");
- goto free;
+ rc = rte_mempool_populate_default(tim_ring->chunk_pool);
+ if (rc < 0) {
+ otx2_err("Unable to set populate chunkpool.");
+ goto free;
+ }
+ tim_ring->aura = npa_lf_aura_handle_to_aura(
+ tim_ring->chunk_pool->pool_id);
+ tim_ring->ena_dfb = 0;
+ } else {
+ tim_ring->chunk_pool = rte_mempool_create(pool_name,
+ tim_ring->nb_chunks, tim_ring->chunk_sz,
+ cache_sz, 0, NULL, NULL, NULL, NULL,
+ rte_socket_id(),
+ mp_flags);
+ if (tim_ring->chunk_pool == NULL) {
+ otx2_err("Unable to create chunkpool.");
+ return -ENOMEM;
+ }
+ tim_ring->ena_dfb = 1;
}
- tim_ring->aura = npa_lf_aura_handle_to_aura(
- tim_ring->chunk_pool->pool_id);
- tim_ring->ena_dfb = 0;
return 0;
tim_ring->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
tim_ring->max_tout = rcfg->max_tmo_ns;
tim_ring->nb_bkts = (tim_ring->max_tout / tim_ring->tck_nsec);
- tim_ring->chunk_sz = OTX2_TIM_RING_DEF_CHUNK_SZ;
+ tim_ring->chunk_sz = dev->chunk_sz;
nb_timers = rcfg->nb_timers;
+ tim_ring->disable_npa = dev->disable_npa;
+
tim_ring->nb_chunks = nb_timers / OTX2_TIM_NB_CHUNK_SLOTS(
tim_ring->chunk_sz);
tim_ring->nb_chunk_slots = OTX2_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
tim_ring->base = dev->bar2 +
(RVU_BLOCK_ADDR_TIM << 20 | tim_ring->ring_id << 12);
+ rc = tim_register_irq(tim_ring->ring_id);
+ if (rc < 0)
+ goto chnk_mem_err;
+
otx2_write64((uint64_t)tim_ring->bkt,
tim_ring->base + TIM_LF_RING_BASE);
otx2_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA);
+ /* Update SSO xae count. */
+ sso_updt_xae_cnt(sso_pmd_priv(dev->event_dev), (void *)&nb_timers,
+ RTE_EVENT_TYPE_TIMER);
+ sso_xae_reconfigure(dev->event_dev);
+
return rc;
chnk_mem_err:
if (dev == NULL)
return -ENODEV;
+ tim_unregister_irq(tim_ring->ring_id);
+
req = otx2_mbox_alloc_msg_tim_lf_free(dev->mbox);
req->ring = tim_ring->ring_id;
return 0;
}
+#define OTX2_TIM_DISABLE_NPA "tim_disable_npa"
+#define OTX2_TIM_CHNK_SLOTS "tim_chnk_slots"
+
+static void
+tim_parse_devargs(struct rte_devargs *devargs, struct otx2_tim_evdev *dev)
+{
+ struct rte_kvargs *kvlist;
+
+ if (devargs == NULL)
+ return;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ return;
+
+ rte_kvargs_process(kvlist, OTX2_TIM_DISABLE_NPA,
+ &parse_kvargs_flag, &dev->disable_npa);
+ rte_kvargs_process(kvlist, OTX2_TIM_CHNK_SLOTS,
+ &parse_kvargs_value, &dev->chunk_slots);
+}
+
void
otx2_tim_init(struct rte_pci_device *pci_dev, struct otx2_dev *cmn_dev)
{
struct rsrc_attach_req *atch_req;
+ struct rsrc_detach_req *dtch_req;
struct free_rsrcs_rsp *rsrc_cnt;
const struct rte_memzone *mz;
struct otx2_tim_evdev *dev;
dev->mbox = cmn_dev->mbox;
dev->bar2 = cmn_dev->bar2;
+ tim_parse_devargs(pci_dev->device.devargs, dev);
+
otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
if (rc < 0) {
goto mz_free;
}
+ rc = tim_get_msix_offsets();
+ if (rc < 0) {
+ otx2_err("Unable to get MSIX offsets for TIM.");
+ goto detach;
+ }
+
+ if (dev->chunk_slots &&
+ dev->chunk_slots <= OTX2_TIM_MAX_CHUNK_SLOTS &&
+ dev->chunk_slots >= OTX2_TIM_MIN_CHUNK_SLOTS) {
+ dev->chunk_sz = (dev->chunk_slots + 1) *
+ OTX2_TIM_CHUNK_ALIGNMENT;
+ } else {
+ dev->chunk_sz = OTX2_TIM_RING_DEF_CHUNK_SZ;
+ }
+
return;
+detach:
+ dtch_req = otx2_mbox_alloc_msg_detach_resources(dev->mbox);
+ dtch_req->partial = true;
+ dtch_req->timlfs = true;
+
+ otx2_mbox_process(dev->mbox);
mz_free:
rte_memzone_free(mz);
}