net/bnxt: avoid invalid vnic id in set L2 Rx mask
[dpdk.git] / drivers / event / octeontx / timvf_evdev.c
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2017 Cavium, Inc
4  */
5
6 #include "timvf_evdev.h"
7
8 int otx_logtype_timvf;
9
10 RTE_INIT(otx_timvf_init_log);
11 static void
12 otx_timvf_init_log(void)
13 {
14         otx_logtype_timvf = rte_log_register("pmd.event.octeontx.timer");
15         if (otx_logtype_timvf >= 0)
16                 rte_log_set_level(otx_logtype_timvf, RTE_LOG_NOTICE);
17 }
18
19 struct __rte_packed timvf_mbox_dev_info {
20         uint64_t ring_active[4];
21         uint64_t clk_freq;
22 };
23
24 /* Response messages */
25 enum {
26         MBOX_RET_SUCCESS,
27         MBOX_RET_INVALID,
28         MBOX_RET_INTERNAL_ERR,
29 };
30
31 static int
32 timvf_mbox_dev_info_get(struct timvf_mbox_dev_info *info)
33 {
34         struct octeontx_mbox_hdr hdr = {0};
35         uint16_t len = sizeof(struct timvf_mbox_dev_info);
36
37         hdr.coproc = TIM_COPROC;
38         hdr.msg = TIM_GET_DEV_INFO;
39         hdr.vfid = 0; /* TIM DEV is always 0. TIM RING ID changes. */
40
41         memset(info, 0, len);
42         return octeontx_mbox_send(&hdr, NULL, 0, info, len);
43 }
44
45 static void
46 timvf_ring_info_get(const struct rte_event_timer_adapter *adptr,
47                 struct rte_event_timer_adapter_info *adptr_info)
48 {
49         struct timvf_ring *timr = adptr->data->adapter_priv;
50         adptr_info->max_tmo_ns = timr->max_tout;
51         adptr_info->min_resolution_ns = timr->tck_nsec;
52         rte_memcpy(&adptr_info->conf, &adptr->data->conf,
53                         sizeof(struct rte_event_timer_adapter_conf));
54 }
55
56 static int
57 timvf_ring_conf_set(struct timvf_ctrl_reg *rctl, uint8_t ring_id)
58 {
59         struct octeontx_mbox_hdr hdr = {0};
60         uint16_t len = sizeof(struct timvf_ctrl_reg);
61         int ret;
62
63         hdr.coproc = TIM_COPROC;
64         hdr.msg = TIM_SET_RING_INFO;
65         hdr.vfid = ring_id;
66
67         ret = octeontx_mbox_send(&hdr, rctl, len, NULL, 0);
68         if (ret < 0 || hdr.res_code != MBOX_RET_SUCCESS)
69                 return -EACCES;
70         return 0;
71 }
72
73 static int
74 timvf_get_start_cyc(uint64_t *now, uint8_t ring_id)
75 {
76         struct octeontx_mbox_hdr hdr = {0};
77
78         hdr.coproc = TIM_COPROC;
79         hdr.msg = TIM_RING_START_CYC_GET;
80         hdr.vfid = ring_id;
81         *now = 0;
82         return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t));
83 }
84
85 static int
86 optimize_bucket_parameters(struct timvf_ring *timr)
87 {
88         uint32_t hbkts;
89         uint32_t lbkts;
90         uint64_t tck_nsec;
91
92         hbkts = rte_align32pow2(timr->nb_bkts);
93         tck_nsec = RTE_ALIGN_MUL_CEIL(timr->max_tout / (hbkts - 1), 10);
94
95         if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
96                 hbkts = 0;
97
98         lbkts = rte_align32prevpow2(timr->nb_bkts);
99         tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / (lbkts - 1)), 10);
100
101         if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
102                 lbkts = 0;
103
104         if (!hbkts && !lbkts)
105                 return 0;
106
107         if (!hbkts) {
108                 timr->nb_bkts = lbkts;
109                 goto end;
110         } else if (!lbkts) {
111                 timr->nb_bkts = hbkts;
112                 goto end;
113         }
114
115         timr->nb_bkts = (hbkts - timr->nb_bkts) <
116                 (timr->nb_bkts - lbkts) ? hbkts : lbkts;
117 end:
118         timr->get_target_bkt = bkt_and;
119         timr->tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout /
120                                 (timr->nb_bkts - 1)), 10);
121         return 1;
122 }
123
124 static int
125 timvf_ring_start(const struct rte_event_timer_adapter *adptr)
126 {
127         int ret;
128         uint8_t use_fpa = 0;
129         uint64_t interval;
130         uintptr_t pool;
131         struct timvf_ctrl_reg rctrl;
132         struct timvf_mbox_dev_info dinfo;
133         struct timvf_ring *timr = adptr->data->adapter_priv;
134
135         ret = timvf_mbox_dev_info_get(&dinfo);
136         if (ret < 0 || ret != sizeof(struct timvf_mbox_dev_info))
137                 return -EINVAL;
138
139         /* Calculate the interval cycles according to clock source. */
140         switch (timr->clk_src) {
141         case TIM_CLK_SRC_SCLK:
142                 interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
143                 break;
144         case TIM_CLK_SRC_GPIO:
145                 /* GPIO doesn't work on tck_nsec. */
146                 interval = 0;
147                 break;
148         case TIM_CLK_SRC_GTI:
149                 interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
150                 break;
151         case TIM_CLK_SRC_PTP:
152                 interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
153                 break;
154         default:
155                 timvf_log_err("Unsupported clock source configured %d",
156                                 timr->clk_src);
157                 return -EINVAL;
158         }
159
160         if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf"))
161                 use_fpa = 1;
162
163         /*CTRL0 register.*/
164         rctrl.rctrl0 = interval;
165
166         /*CTRL1 register.*/
167         rctrl.rctrl1 =  (uint64_t)(timr->clk_src) << 51 |
168                 1ull << 48 /* LOCK_EN (Enable hw bucket lock mechanism) */ |
169                 1ull << 47 /* ENA */ |
170                 1ull << 44 /* ENA_LDWB */ |
171                 (timr->nb_bkts - 1);
172
173         rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
174
175         if (use_fpa) {
176                 pool = (uintptr_t)((struct rte_mempool *)
177                                 timr->chunk_pool)->pool_id;
178                 ret = octeontx_fpa_bufpool_gpool(pool);
179                 if (ret < 0) {
180                         timvf_log_dbg("Unable to get gaura id");
181                         ret = -ENOMEM;
182                         goto error;
183                 }
184                 timvf_write64((uint64_t)ret,
185                                 (uint8_t *)timr->vbar0 + TIM_VRING_AURA);
186         } else {
187                 rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */;
188         }
189
190         timvf_write64((uintptr_t)timr->bkt,
191                         (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
192         timvf_set_chunk_refill(timr, use_fpa);
193         if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
194                 ret = -EACCES;
195                 goto error;
196         }
197
198         if (timvf_get_start_cyc(&timr->ring_start_cyc,
199                                 timr->tim_ring_id) < 0) {
200                 ret = -EACCES;
201                 goto error;
202         }
203         timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz());
204         timr->fast_div = rte_reciprocal_value_u64(timr->tck_int);
205         timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64""
206                         " maxtmo %"PRIu64"\n",
207                         timr->nb_bkts, timr->tck_nsec, interval,
208                         timr->max_tout);
209
210         return 0;
211 error:
212         rte_free(timr->bkt);
213         rte_mempool_free(timr->chunk_pool);
214         return ret;
215 }
216
217 static int
218 timvf_ring_stop(const struct rte_event_timer_adapter *adptr)
219 {
220         struct timvf_ring *timr = adptr->data->adapter_priv;
221         struct timvf_ctrl_reg rctrl = {0};
222         rctrl.rctrl0 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL0);
223         rctrl.rctrl1 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL1);
224         rctrl.rctrl1 &= ~(1ull << 47); /* Disable */
225         rctrl.rctrl2 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL2);
226
227         if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id))
228                 return -EACCES;
229         return 0;
230 }
231
232 static int
233 timvf_ring_create(struct rte_event_timer_adapter *adptr)
234 {
235         char pool_name[25];
236         int ret;
237         uint64_t nb_timers;
238         struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
239         struct timvf_ring *timr;
240         struct timvf_info tinfo;
241         const char *mempool_ops;
242         unsigned int mp_flags = 0;
243
244         if (timvf_info(&tinfo) < 0)
245                 return -ENODEV;
246
247         if (adptr->data->id >= tinfo.total_timvfs)
248                 return -ENODEV;
249
250         timr = rte_zmalloc("octeontx_timvf_priv",
251                         sizeof(struct timvf_ring), 0);
252         if (timr == NULL)
253                 return -ENOMEM;
254
255         adptr->data->adapter_priv = timr;
256         /* Check config parameters. */
257         if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) &&
258                         (!rcfg->timer_tick_ns ||
259                          rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) {
260                 timvf_log_err("Too low timer ticks");
261                 goto cfg_err;
262         }
263
264         timr->clk_src = (int) rcfg->clk_src;
265         timr->tim_ring_id = adptr->data->id;
266         timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
267         timr->max_tout = rcfg->max_tmo_ns;
268         timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
269         timr->vbar0 = timvf_bar(timr->tim_ring_id, 0);
270         timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL;
271         nb_timers = rcfg->nb_timers;
272         timr->get_target_bkt = bkt_mod;
273
274         timr->nb_chunks = nb_timers / nb_chunk_slots;
275
276         /* Try to optimize the bucket parameters. */
277         if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
278                         && !rte_is_power_of_2(timr->nb_bkts)) {
279                 if (optimize_bucket_parameters(timr)) {
280                         timvf_log_info("Optimized configured values");
281                         timvf_log_dbg("nb_bkts  : %"PRIu32"", timr->nb_bkts);
282                         timvf_log_dbg("tck_nsec : %"PRIu64"", timr->tck_nsec);
283                 } else
284                         timvf_log_info("Failed to Optimize configured values");
285         }
286
287         if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
288                 mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
289                 timvf_log_info("Using single producer mode");
290         }
291
292         timr->bkt = rte_zmalloc("octeontx_timvf_bucket",
293                         (timr->nb_bkts) * sizeof(struct tim_mem_bucket),
294                         0);
295         if (timr->bkt == NULL)
296                 goto mem_err;
297
298         snprintf(pool_name, sizeof(pool_name), "timvf_chunk_pool%d",
299                         timr->tim_ring_id);
300         timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name,
301                         timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(),
302                         mp_flags);
303
304         if (!timr->chunk_pool) {
305                 rte_free(timr->bkt);
306                 timvf_log_err("Unable to create chunkpool.");
307                 return -ENOMEM;
308         }
309
310         mempool_ops = rte_mbuf_best_mempool_ops();
311         ret = rte_mempool_set_ops_byname(timr->chunk_pool,
312                         mempool_ops, NULL);
313
314         if (ret != 0) {
315                 timvf_log_err("Unable to set chunkpool ops.");
316                 goto mem_err;
317         }
318
319         ret = rte_mempool_populate_default(timr->chunk_pool);
320         if (ret < 0) {
321                 timvf_log_err("Unable to set populate chunkpool.");
322                 goto mem_err;
323         }
324         timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
325         timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT);
326         timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S);
327         timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C);
328         timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S);
329
330         return 0;
331 mem_err:
332         rte_free(timr);
333         return -ENOMEM;
334 cfg_err:
335         rte_free(timr);
336         return -EINVAL;
337 }
338
339 static int
340 timvf_ring_free(struct rte_event_timer_adapter *adptr)
341 {
342         struct timvf_ring *timr = adptr->data->adapter_priv;
343         rte_mempool_free(timr->chunk_pool);
344         rte_free(timr->bkt);
345         rte_free(adptr->data->adapter_priv);
346         return 0;
347 }
348
349 static int
350 timvf_stats_get(const struct rte_event_timer_adapter *adapter,
351                 struct rte_event_timer_adapter_stats *stats)
352 {
353         struct timvf_ring *timr = adapter->data->adapter_priv;
354         uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
355
356         stats->evtim_exp_count = timr->tim_arm_cnt;
357         stats->ev_enq_count = timr->tim_arm_cnt;
358         stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
359                                 &timr->fast_div);
360         return 0;
361 }
362
363 static int
364 timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
365 {
366         struct timvf_ring *timr = adapter->data->adapter_priv;
367
368         timr->tim_arm_cnt = 0;
369         return 0;
370 }
371
372 static struct rte_event_timer_adapter_ops timvf_ops = {
373         .init           = timvf_ring_create,
374         .uninit         = timvf_ring_free,
375         .start          = timvf_ring_start,
376         .stop           = timvf_ring_stop,
377         .get_info       = timvf_ring_info_get,
378 };
379
380 int
381 timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
382                 uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
383                 uint8_t enable_stats)
384 {
385         RTE_SET_USED(dev);
386
387         if (enable_stats) {
388                 timvf_ops.stats_get   = timvf_stats_get;
389                 timvf_ops.stats_reset = timvf_stats_reset;
390         }
391
392         if (flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT)
393                 timvf_ops.arm_burst = enable_stats ?
394                         timvf_timer_arm_burst_sp_stats :
395                         timvf_timer_arm_burst_sp;
396         else
397                 timvf_ops.arm_burst = enable_stats ?
398                         timvf_timer_arm_burst_mp_stats :
399                         timvf_timer_arm_burst_mp;
400
401         timvf_ops.arm_tmo_tick_burst = enable_stats ?
402                 timvf_timer_arm_tmo_brst_stats :
403                 timvf_timer_arm_tmo_brst;
404         timvf_ops.cancel_burst = timvf_timer_cancel_burst;
405         *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
406         *ops = &timvf_ops;
407         return 0;
408 }