a106bdffde4bea4a0ca6da4b3a85584662e3a34a
[dpdk.git] / drivers / net / bnxt / tf_core / tf_em_host.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include <string.h>
7 #include <math.h>
8 #include <sys/param.h>
9 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_log.h>
12
13 #include "tf_core.h"
14 #include "tf_util.h"
15 #include "tf_common.h"
16 #include "tf_em.h"
17 #include "tf_em_common.h"
18 #include "tf_msg.h"
19 #include "tfp.h"
20 #include "lookup3.h"
21 #include "tf_ext_flow_handle.h"
22
23 #include "bnxt.h"
24
25 #define PTU_PTE_VALID          0x1UL
26 #define PTU_PTE_LAST           0x2UL
27 #define PTU_PTE_NEXT_TO_LAST   0x4UL
28
29 /* Number of pointers per page_size */
30 #define MAX_PAGE_PTRS(page_size)  ((page_size) / sizeof(void *))
31
32 /**
33  * EM DBs.
34  */
35 extern void *eem_db[TF_DIR_MAX];
36
37 extern struct tf_tbl_scope_cb tbl_scopes[TF_NUM_TBL_SCOPE];
38
39 /**
40  * Function to free a page table
41  *
42  * [in] tp
43  *   Pointer to the page table to free
44  */
45 static void
46 tf_em_free_pg_tbl(struct hcapi_cfa_em_page_tbl *tp)
47 {
48         uint32_t i;
49
50         for (i = 0; i < tp->pg_count; i++) {
51                 if (!tp->pg_va_tbl[i]) {
52                         TFP_DRV_LOG(WARNING,
53                                     "No mapping for page: %d table: %016" PRIu64 "\n",
54                                     i,
55                                     (uint64_t)(uintptr_t)tp);
56                         continue;
57                 }
58
59                 tfp_free(tp->pg_va_tbl[i]);
60                 tp->pg_va_tbl[i] = NULL;
61         }
62
63         tp->pg_count = 0;
64         tfp_free(tp->pg_va_tbl);
65         tp->pg_va_tbl = NULL;
66         tfp_free(tp->pg_pa_tbl);
67         tp->pg_pa_tbl = NULL;
68 }
69
70 /**
71  * Function to free an EM table
72  *
73  * [in] tbl
74  *   Pointer to the EM table to free
75  */
76 static void
77 tf_em_free_page_table(struct hcapi_cfa_em_table *tbl)
78 {
79         struct hcapi_cfa_em_page_tbl *tp;
80         int i;
81
82         for (i = 0; i < tbl->num_lvl; i++) {
83                 tp = &tbl->pg_tbl[i];
84                 TFP_DRV_LOG(INFO,
85                            "EEM: Freeing page table: size %u lvl %d cnt %u\n",
86                            TF_EM_PAGE_SIZE,
87                             i,
88                             tp->pg_count);
89
90                 tf_em_free_pg_tbl(tp);
91         }
92
93         tbl->l0_addr = NULL;
94         tbl->l0_dma_addr = 0;
95         tbl->num_lvl = 0;
96         tbl->num_data_pages = 0;
97 }
98
99 /**
100  * Allocation of page tables
101  *
102  * [in] tfp
103  *   Pointer to a TruFlow handle
104  *
105  * [in] pg_count
106  *   Page count to allocate
107  *
108  * [in] pg_size
109  *   Size of each page
110  *
111  * Returns:
112  *   0       - Success
113  *   -ENOMEM - Out of memory
114  */
115 static int
116 tf_em_alloc_pg_tbl(struct hcapi_cfa_em_page_tbl *tp,
117                    uint32_t pg_count,
118                    uint32_t pg_size)
119 {
120         uint32_t i;
121         struct tfp_calloc_parms parms;
122
123         parms.nitems = pg_count;
124         parms.size = sizeof(void *);
125         parms.alignment = 0;
126
127         if (tfp_calloc(&parms) != 0)
128                 return -ENOMEM;
129
130         tp->pg_va_tbl = parms.mem_va;
131
132         if (tfp_calloc(&parms) != 0) {
133                 tfp_free(tp->pg_va_tbl);
134                 return -ENOMEM;
135         }
136
137         tp->pg_pa_tbl = parms.mem_va;
138
139         tp->pg_count = 0;
140         tp->pg_size = pg_size;
141
142         for (i = 0; i < pg_count; i++) {
143                 parms.nitems = 1;
144                 parms.size = pg_size;
145                 parms.alignment = TF_EM_PAGE_ALIGNMENT;
146
147                 if (tfp_calloc(&parms) != 0)
148                         goto cleanup;
149
150                 tp->pg_pa_tbl[i] = (uintptr_t)parms.mem_pa;
151                 tp->pg_va_tbl[i] = parms.mem_va;
152
153                 memset(tp->pg_va_tbl[i], 0, pg_size);
154                 tp->pg_count++;
155         }
156
157         return 0;
158
159 cleanup:
160         tf_em_free_pg_tbl(tp);
161         return -ENOMEM;
162 }
163
164 /**
165  * Allocates EM page tables
166  *
167  * [in] tbl
168  *   Table to allocate pages for
169  *
170  * Returns:
171  *   0       - Success
172  *   -ENOMEM - Out of memory
173  */
174 static int
175 tf_em_alloc_page_table(struct hcapi_cfa_em_table *tbl)
176 {
177         struct hcapi_cfa_em_page_tbl *tp;
178         int rc = 0;
179         int i;
180         uint32_t j;
181
182         for (i = 0; i < tbl->num_lvl; i++) {
183                 tp = &tbl->pg_tbl[i];
184
185                 rc = tf_em_alloc_pg_tbl(tp,
186                                         tbl->page_cnt[i],
187                                         TF_EM_PAGE_SIZE);
188                 if (rc) {
189                         TFP_DRV_LOG(WARNING,
190                                 "Failed to allocate page table: lvl: %d, rc:%s\n",
191                                 i,
192                                 strerror(-rc));
193                         goto cleanup;
194                 }
195
196                 for (j = 0; j < tp->pg_count; j++) {
197                         TFP_DRV_LOG(INFO,
198                                 "EEM: Allocated page table: size %u lvl %d cnt"
199                                 " %u VA:%p PA:%p\n",
200                                 TF_EM_PAGE_SIZE,
201                                 i,
202                                 tp->pg_count,
203                                 (void *)(uintptr_t)tp->pg_va_tbl[j],
204                                 (void *)(uintptr_t)tp->pg_pa_tbl[j]);
205                 }
206         }
207         return rc;
208
209 cleanup:
210         tf_em_free_page_table(tbl);
211         return rc;
212 }
213
214 /**
215  * Links EM page tables
216  *
217  * [in] tp
218  *   Pointer to page table
219  *
220  * [in] tp_next
221  *   Pointer to the next page table
222  *
223  * [in] set_pte_last
224  *   Flag controlling if the page table is last
225  */
226 static void
227 tf_em_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
228                       struct hcapi_cfa_em_page_tbl *tp_next,
229                       bool set_pte_last)
230 {
231         uint64_t *pg_pa = tp_next->pg_pa_tbl;
232         uint64_t *pg_va;
233         uint64_t valid;
234         uint32_t k = 0;
235         uint32_t i;
236         uint32_t j;
237
238         for (i = 0; i < tp->pg_count; i++) {
239                 pg_va = tp->pg_va_tbl[i];
240
241                 for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
242                         if (k == tp_next->pg_count - 2 && set_pte_last)
243                                 valid = PTU_PTE_NEXT_TO_LAST | PTU_PTE_VALID;
244                         else if (k == tp_next->pg_count - 1 && set_pte_last)
245                                 valid = PTU_PTE_LAST | PTU_PTE_VALID;
246                         else
247                                 valid = PTU_PTE_VALID;
248
249                         pg_va[j] = tfp_cpu_to_le_64(pg_pa[k] | valid);
250                         if (++k >= tp_next->pg_count)
251                                 return;
252                 }
253         }
254 }
255
256 /**
257  * Setup a EM page table
258  *
259  * [in] tbl
260  *   Pointer to EM page table
261  */
262 static void
263 tf_em_setup_page_table(struct hcapi_cfa_em_table *tbl)
264 {
265         struct hcapi_cfa_em_page_tbl *tp_next;
266         struct hcapi_cfa_em_page_tbl *tp;
267         bool set_pte_last = 0;
268         int i;
269
270         for (i = 0; i < tbl->num_lvl - 1; i++) {
271                 tp = &tbl->pg_tbl[i];
272                 tp_next = &tbl->pg_tbl[i + 1];
273                 if (i == tbl->num_lvl - 2)
274                         set_pte_last = 1;
275                 tf_em_link_page_table(tp, tp_next, set_pte_last);
276         }
277
278         tbl->l0_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_va_tbl[0];
279         tbl->l0_dma_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_pa_tbl[0];
280 }
281
282 /**
283  * Unregisters EM Ctx in Firmware
284  *
285  * [in] tfp
286  *   Pointer to a TruFlow handle
287  *
288  * [in] tbl_scope_cb
289  *   Pointer to a table scope control block
290  *
291  * [in] dir
292  *   Receive or transmit direction
293  */
294 static void
295 tf_em_ctx_unreg(struct tf *tfp,
296                 struct tf_tbl_scope_cb *tbl_scope_cb,
297                 int dir)
298 {
299         struct hcapi_cfa_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
300         struct hcapi_cfa_em_table *tbl;
301         int i;
302
303         for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
304                 tbl = &ctxp->em_tables[i];
305
306                 if (tbl->num_entries != 0 && tbl->entry_size != 0) {
307                         tf_msg_em_mem_unrgtr(tfp, &tbl->ctx_id);
308                         tf_em_free_page_table(tbl);
309                 }
310         }
311 }
312
313 /**
314  * Registers EM Ctx in Firmware
315  *
316  * [in] tfp
317  *   Pointer to a TruFlow handle
318  *
319  * [in] tbl_scope_cb
320  *   Pointer to a table scope control block
321  *
322  * [in] dir
323  *   Receive or transmit direction
324  *
325  * Returns:
326  *   0       - Success
327  *   -ENOMEM - Out of Memory
328  */
329 static int
330 tf_em_ctx_reg(struct tf *tfp,
331               struct tf_tbl_scope_cb *tbl_scope_cb,
332               int dir)
333 {
334         struct hcapi_cfa_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
335         struct hcapi_cfa_em_table *tbl;
336         int rc = 0;
337         int i;
338
339         for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
340                 tbl = &ctxp->em_tables[i];
341
342                 if (tbl->num_entries && tbl->entry_size) {
343                         rc = tf_em_size_table(tbl, TF_EM_PAGE_SIZE);
344
345                         if (rc)
346                                 goto cleanup;
347
348                         rc = tf_em_alloc_page_table(tbl);
349                         if (rc)
350                                 goto cleanup;
351
352                         tf_em_setup_page_table(tbl);
353                         rc = tf_msg_em_mem_rgtr(tfp,
354                                                 tbl->num_lvl - 1,
355                                                 TF_EM_PAGE_SIZE_ENUM,
356                                                 tbl->l0_dma_addr,
357                                                 &tbl->ctx_id);
358                         if (rc)
359                                 goto cleanup;
360                 }
361         }
362         return rc;
363
364 cleanup:
365         tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
366         return rc;
367 }
368
369 int
370 tf_em_ext_alloc(struct tf *tfp, struct tf_alloc_tbl_scope_parms *parms)
371 {
372         int rc;
373         enum tf_dir dir;
374         struct tf_tbl_scope_cb *tbl_scope_cb;
375         struct hcapi_cfa_em_table *em_tables;
376         struct tf_free_tbl_scope_parms free_parms;
377         struct tf_rm_allocate_parms aparms = { 0 };
378         struct tf_rm_free_parms fparms = { 0 };
379
380         /* Get Table Scope control block from the session pool */
381         aparms.rm_db = eem_db[TF_DIR_RX];
382         aparms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
383         aparms.index = (uint32_t *)&parms->tbl_scope_id;
384         rc = tf_rm_allocate(&aparms);
385         if (rc) {
386                 TFP_DRV_LOG(ERR,
387                             "Failed to allocate table scope\n");
388                 return rc;
389         }
390
391         tbl_scope_cb = &tbl_scopes[parms->tbl_scope_id];
392         tbl_scope_cb->index = parms->tbl_scope_id;
393         tbl_scope_cb->tbl_scope_id = parms->tbl_scope_id;
394
395         rc = tfp_get_pf(tfp, &tbl_scope_cb->pf);
396         if (rc) {
397                 TFP_DRV_LOG(ERR,
398                             "EEM: PF query error rc:%s\n",
399                             strerror(-rc));
400                 goto cleanup;
401         }
402
403         for (dir = 0; dir < TF_DIR_MAX; dir++) {
404                 rc = tf_msg_em_qcaps(tfp,
405                                      dir,
406                                      &tbl_scope_cb->em_caps[dir]);
407                 if (rc) {
408                         TFP_DRV_LOG(ERR,
409                                     "EEM: Unable to query for EEM capability,"
410                                     " rc:%s\n",
411                                     strerror(-rc));
412                         goto cleanup;
413                 }
414         }
415
416         /*
417          * Validate and setup table sizes
418          */
419         if (tf_em_validate_num_entries(tbl_scope_cb, parms))
420                 goto cleanup;
421
422         for (dir = 0; dir < TF_DIR_MAX; dir++) {
423                 /*
424                  * Allocate tables and signal configuration to FW
425                  */
426                 rc = tf_em_ctx_reg(tfp, tbl_scope_cb, dir);
427                 if (rc) {
428                         TFP_DRV_LOG(ERR,
429                                     "EEM: Unable to register for EEM ctx,"
430                                     " rc:%s\n",
431                                     strerror(-rc));
432                         goto cleanup;
433                 }
434
435                 em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;
436                 rc = tf_msg_em_cfg(tfp,
437                                    em_tables[TF_KEY0_TABLE].num_entries,
438                                    em_tables[TF_KEY0_TABLE].ctx_id,
439                                    em_tables[TF_KEY1_TABLE].ctx_id,
440                                    em_tables[TF_RECORD_TABLE].ctx_id,
441                                    em_tables[TF_EFC_TABLE].ctx_id,
442                                    parms->hw_flow_cache_flush_timer,
443                                    dir);
444                 if (rc) {
445                         TFP_DRV_LOG(ERR,
446                                     "TBL: Unable to configure EEM in firmware"
447                                     " rc:%s\n",
448                                     strerror(-rc));
449                         goto cleanup_full;
450                 }
451
452                 rc = tf_msg_em_op(tfp,
453                                   dir,
454                                   HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE);
455
456                 if (rc) {
457                         TFP_DRV_LOG(ERR,
458                                     "EEM: Unable to enable EEM in firmware"
459                                     " rc:%s\n",
460                                     strerror(-rc));
461                         goto cleanup_full;
462                 }
463
464                 /* Allocate the pool of offsets of the external memory.
465                  * Initially, this is a single fixed size pool for all external
466                  * actions related to a single table scope.
467                  */
468                 rc = tf_create_tbl_pool_external(dir,
469                                             tbl_scope_cb,
470                                             em_tables[TF_RECORD_TABLE].num_entries,
471                                             em_tables[TF_RECORD_TABLE].entry_size);
472                 if (rc) {
473                         TFP_DRV_LOG(ERR,
474                                     "%s TBL: Unable to allocate idx pools %s\n",
475                                     tf_dir_2_str(dir),
476                                     strerror(-rc));
477                         goto cleanup_full;
478                 }
479         }
480
481         return 0;
482
483 cleanup_full:
484         free_parms.tbl_scope_id = parms->tbl_scope_id;
485         tf_em_ext_free(tfp, &free_parms);
486         return -EINVAL;
487
488 cleanup:
489         /* Free Table control block */
490         fparms.rm_db = eem_db[TF_DIR_RX];
491         fparms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
492         fparms.index = parms->tbl_scope_id;
493         tf_rm_free(&fparms);
494         return -EINVAL;
495 }
496
497 int
498 tf_em_ext_free(struct tf *tfp,
499                struct tf_free_tbl_scope_parms *parms)
500 {
501         int rc = 0;
502         enum tf_dir  dir;
503         struct tf_tbl_scope_cb *tbl_scope_cb;
504         struct tf_rm_free_parms aparms = { 0 };
505
506         tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
507
508         if (tbl_scope_cb == NULL) {
509                 TFP_DRV_LOG(ERR, "Table scope error\n");
510                 return -EINVAL;
511         }
512
513         /* Free Table control block */
514         aparms.rm_db = eem_db[TF_DIR_RX];
515         aparms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
516         aparms.index = parms->tbl_scope_id;
517         rc = tf_rm_free(&aparms);
518         if (rc) {
519                 TFP_DRV_LOG(ERR,
520                             "Failed to free table scope\n");
521         }
522
523         /* free table scope locks */
524         for (dir = 0; dir < TF_DIR_MAX; dir++) {
525                 /* Free associated external pools
526                  */
527                 tf_destroy_tbl_pool_external(dir,
528                                              tbl_scope_cb);
529                 tf_msg_em_op(tfp,
530                              dir,
531                              HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE);
532
533                 /* free table scope and all associated resources */
534                 tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
535         }
536
537         tbl_scopes[parms->tbl_scope_id].tbl_scope_id = TF_TBL_SCOPE_INVALID;
538         return rc;
539 }