1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 tim_fill_msix(struct roc_tim *roc_tim, uint16_t nb_ring)
11 struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
12 struct tim *tim = roc_tim_to_tim_priv(roc_tim);
13 struct dev *dev = &sso->dev;
14 struct msix_offset_rsp *rsp;
17 mbox_alloc_msg_msix_offset(dev->mbox);
18 rc = mbox_process_msg(dev->mbox, (void **)&rsp);
22 for (i = 0; i < nb_ring; i++)
23 tim->tim_msix_offsets[i] = rsp->timlf_msixoff[i];
32 case TIM_AF_NO_RINGS_LEFT:
33 plt_err("Unable to allocate new TIM ring.");
35 case TIM_AF_INVALID_NPA_PF_FUNC:
36 plt_err("Invalid NPA pf func.");
38 case TIM_AF_INVALID_SSO_PF_FUNC:
39 plt_err("Invalid SSO pf func.");
41 case TIM_AF_RING_STILL_RUNNING:
42 plt_err("Ring busy.");
44 case TIM_AF_LF_INVALID:
45 plt_err("Invalid Ring id.");
47 case TIM_AF_CSIZE_NOT_ALIGNED:
48 plt_err("Chunk size specified needs to be multiple of 16.");
50 case TIM_AF_CSIZE_TOO_SMALL:
51 plt_err("Chunk size too small.");
53 case TIM_AF_CSIZE_TOO_BIG:
54 plt_err("Chunk size too big.");
56 case TIM_AF_INTERVAL_TOO_SMALL:
57 plt_err("Bucket traversal interval too small.");
59 case TIM_AF_INVALID_BIG_ENDIAN_VALUE:
60 plt_err("Invalid Big endian value.");
62 case TIM_AF_INVALID_CLOCK_SOURCE:
63 plt_err("Invalid Clock source specified.");
65 case TIM_AF_GPIO_CLK_SRC_NOT_ENABLED:
66 plt_err("GPIO clock source not enabled.");
68 case TIM_AF_INVALID_BSIZE:
69 plt_err("Invalid bucket size.");
71 case TIM_AF_INVALID_ENABLE_PERIODIC:
72 plt_err("Invalid bucket size.");
74 case TIM_AF_INVALID_ENABLE_DONTFREE:
75 plt_err("Invalid Don't free value.");
77 case TIM_AF_ENA_DONTFRE_NSET_PERIODIC:
78 plt_err("Don't free bit not set when periodic is enabled.");
80 case TIM_AF_RING_ALREADY_DISABLED:
81 plt_err("Ring already stopped");
84 plt_err("Unknown Error.");
89 roc_tim_lf_enable(struct roc_tim *roc_tim, uint8_t ring_id, uint64_t *start_tsc,
92 struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
93 struct dev *dev = &sso->dev;
94 struct tim_enable_rsp *rsp;
95 struct tim_ring_req *req;
98 plt_spinlock_lock(&sso->mbox_lock);
99 req = mbox_alloc_msg_tim_enable_ring(dev->mbox);
104 rc = mbox_process_msg(dev->mbox, (void **)&rsp);
112 *cur_bkt = rsp->currentbucket;
114 *start_tsc = rsp->timestarted;
117 plt_spinlock_unlock(&sso->mbox_lock);
122 roc_tim_lf_disable(struct roc_tim *roc_tim, uint8_t ring_id)
124 struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
125 struct dev *dev = &sso->dev;
126 struct tim_ring_req *req;
129 plt_spinlock_lock(&sso->mbox_lock);
130 req = mbox_alloc_msg_tim_disable_ring(dev->mbox);
135 rc = mbox_process(dev->mbox);
142 plt_spinlock_unlock(&sso->mbox_lock);
147 roc_tim_lf_base_get(struct roc_tim *roc_tim, uint8_t ring_id)
149 struct dev *dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev;
151 return dev->bar2 + (RVU_BLOCK_ADDR_TIM << 20 | ring_id << 12);
155 roc_tim_lf_config(struct roc_tim *roc_tim, uint8_t ring_id,
156 enum roc_tim_clk_src clk_src, uint8_t ena_periodic,
157 uint8_t ena_dfb, uint32_t bucket_sz, uint32_t chunk_sz,
158 uint32_t interval, uint64_t intervalns, uint64_t clockfreq)
160 struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
161 struct dev *dev = &sso->dev;
162 struct tim_config_req *req;
165 plt_spinlock_lock(&sso->mbox_lock);
166 req = mbox_alloc_msg_tim_config_ring(dev->mbox);
170 req->bigendian = false;
171 req->bucketsize = bucket_sz;
172 req->chunksize = chunk_sz;
173 req->clocksource = clk_src;
174 req->enableperiodic = ena_periodic;
175 req->enabledontfreebuffer = ena_dfb;
176 req->interval = interval;
177 req->intervalns = intervalns;
178 req->clockfreq = clockfreq;
179 req->gpioedge = TIM_GPIO_LTOH_TRANS;
181 rc = mbox_process(dev->mbox);
188 plt_spinlock_unlock(&sso->mbox_lock);
193 roc_tim_lf_interval(struct roc_tim *roc_tim, enum roc_tim_clk_src clk_src,
194 uint64_t clockfreq, uint64_t *intervalns,
197 struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
198 struct dev *dev = &sso->dev;
199 struct tim_intvl_req *req;
200 struct tim_intvl_rsp *rsp;
203 plt_spinlock_lock(&sso->mbox_lock);
204 req = mbox_alloc_msg_tim_get_min_intvl(dev->mbox);
208 req->clockfreq = clockfreq;
209 req->clocksource = clk_src;
210 rc = mbox_process_msg(dev->mbox, (void **)&rsp);
217 *intervalns = rsp->intvl_ns;
218 *interval = rsp->intvl_cyc;
221 plt_spinlock_unlock(&sso->mbox_lock);
226 roc_tim_lf_alloc(struct roc_tim *roc_tim, uint8_t ring_id, uint64_t *clk)
228 struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
229 struct tim *tim = roc_tim_to_tim_priv(roc_tim);
230 struct tim_ring_req *free_req;
231 struct tim_lf_alloc_req *req;
232 struct tim_lf_alloc_rsp *rsp;
233 struct dev *dev = &sso->dev;
236 plt_spinlock_lock(&sso->mbox_lock);
237 req = mbox_alloc_msg_tim_lf_alloc(dev->mbox);
240 req->npa_pf_func = idev_npa_pffunc_get();
241 req->sso_pf_func = idev_sso_pffunc_get();
244 rc = mbox_process_msg(dev->mbox, (void **)&rsp);
252 *clk = rsp->tenns_clk;
254 rc = tim_register_irq_priv(roc_tim, sso->pci_dev->intr_handle, ring_id,
255 tim->tim_msix_offsets[ring_id]);
257 plt_tim_dbg("Failed to register Ring[%d] IRQ", ring_id);
258 free_req = mbox_alloc_msg_tim_lf_free(dev->mbox);
259 if (free_req == NULL) {
263 free_req->ring = ring_id;
264 rc = mbox_process(dev->mbox);
270 plt_spinlock_unlock(&sso->mbox_lock);
275 roc_tim_lf_free(struct roc_tim *roc_tim, uint8_t ring_id)
277 struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
278 struct tim *tim = roc_tim_to_tim_priv(roc_tim);
279 struct dev *dev = &sso->dev;
280 struct tim_ring_req *req;
283 tim_unregister_irq_priv(roc_tim, sso->pci_dev->intr_handle, ring_id,
284 tim->tim_msix_offsets[ring_id]);
286 plt_spinlock_lock(&sso->mbox_lock);
287 req = mbox_alloc_msg_tim_lf_free(dev->mbox);
292 rc = mbox_process(dev->mbox);
299 plt_spinlock_unlock(&sso->mbox_lock);
304 roc_tim_init(struct roc_tim *roc_tim)
306 struct rsrc_attach_req *attach_req;
307 struct rsrc_detach_req *detach_req;
308 struct free_rsrcs_rsp *free_rsrc;
314 if (roc_tim == NULL || roc_tim->roc_sso == NULL)
315 return TIM_ERR_PARAM;
317 sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
319 PLT_STATIC_ASSERT(sizeof(struct tim) <= TIM_MEM_SZ);
320 nb_lfs = roc_tim->nb_lfs;
321 plt_spinlock_lock(&sso->mbox_lock);
322 mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
323 rc = mbox_process_msg(dev->mbox, (void *)&free_rsrc);
325 plt_err("Unable to get free rsrc count.");
330 if (nb_lfs && (free_rsrc->tim < nb_lfs)) {
331 plt_tim_dbg("Requested LFs : %d Available LFs : %d", nb_lfs,
337 attach_req = mbox_alloc_msg_attach_resources(dev->mbox);
338 if (attach_req == NULL) {
342 attach_req->modify = true;
343 attach_req->timlfs = nb_lfs ? nb_lfs : free_rsrc->tim;
344 nb_lfs = attach_req->timlfs;
346 rc = mbox_process(dev->mbox);
348 plt_err("Unable to attach TIM LFs.");
353 rc = tim_fill_msix(roc_tim, nb_lfs);
355 plt_err("Unable to get TIM MSIX vectors");
357 detach_req = mbox_alloc_msg_detach_resources(dev->mbox);
358 if (detach_req == NULL) {
362 detach_req->partial = true;
363 detach_req->timlfs = true;
364 mbox_process(dev->mbox);
369 plt_spinlock_unlock(&sso->mbox_lock);
374 roc_tim_fini(struct roc_tim *roc_tim)
376 struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
377 struct rsrc_detach_req *detach_req;
378 struct dev *dev = &sso->dev;
380 plt_spinlock_lock(&sso->mbox_lock);
381 detach_req = mbox_alloc_msg_detach_resources(dev->mbox);
382 PLT_ASSERT(detach_req);
383 detach_req->partial = true;
384 detach_req->timlfs = true;
386 mbox_process(dev->mbox);
387 plt_spinlock_unlock(&sso->mbox_lock);