e594f024852be9f5ee6b581901d5f18333bfdd81
[dpdk.git] / drivers / net / bnxt / tf_core / tf_tbl.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Broadcom
3  * All rights reserved.
4  */
5
6 /* Truflow Table APIs and supporting code */
7
8 #include <stdio.h>
9 #include <string.h>
10 #include <stdbool.h>
11 #include <math.h>
12 #include <sys/param.h>
13 #include <rte_common.h>
14 #include <rte_errno.h>
15 #include "hsi_struct_def_dpdk.h"
16
17 #include "tf_core.h"
18 #include "tf_util.h"
19 #include "tf_em.h"
20 #include "tf_msg.h"
21 #include "tfp.h"
22 #include "hwrm_tf.h"
23 #include "bnxt.h"
24 #include "tf_resources.h"
25 #include "tf_rm.h"
26 #include "stack.h"
27 #include "tf_common.h"
28
29 #define PTU_PTE_VALID          0x1UL
30 #define PTU_PTE_LAST           0x2UL
31 #define PTU_PTE_NEXT_TO_LAST   0x4UL
32
33 /* Number of pointers per page_size */
34 #define MAX_PAGE_PTRS(page_size)  ((page_size) / sizeof(void *))
35
36 #define TF_EM_PG_SZ_4K        (1 << 12)
37 #define TF_EM_PG_SZ_8K        (1 << 13)
38 #define TF_EM_PG_SZ_64K       (1 << 16)
39 #define TF_EM_PG_SZ_256K      (1 << 18)
40 #define TF_EM_PG_SZ_1M        (1 << 20)
41 #define TF_EM_PG_SZ_2M        (1 << 21)
42 #define TF_EM_PG_SZ_4M        (1 << 22)
43 #define TF_EM_PG_SZ_1G        (1 << 30)
44
45 #define TF_EM_CTX_ID_INVALID   0xFFFF
46
47 #define TF_EM_MIN_ENTRIES     (1 << 15) /* 32K */
48 #define TF_EM_MAX_ENTRIES     (1 << 27) /* 128M */
49
50 /**
51  * Function to free a page table
52  *
53  * [in] tp
54  *   Pointer to the page table to free
55  */
56 static void
57 tf_em_free_pg_tbl(struct hcapi_cfa_em_page_tbl *tp)
58 {
59         uint32_t i;
60
61         for (i = 0; i < tp->pg_count; i++) {
62                 if (!tp->pg_va_tbl[i]) {
63                         TFP_DRV_LOG(WARNING,
64                                     "No mapping for page: %d table: %016" PRIu64 "\n",
65                                     i,
66                                     (uint64_t)(uintptr_t)tp);
67                         continue;
68                 }
69
70                 tfp_free(tp->pg_va_tbl[i]);
71                 tp->pg_va_tbl[i] = NULL;
72         }
73
74         tp->pg_count = 0;
75         tfp_free(tp->pg_va_tbl);
76         tp->pg_va_tbl = NULL;
77         tfp_free(tp->pg_pa_tbl);
78         tp->pg_pa_tbl = NULL;
79 }
80
81 /**
82  * Function to free an EM table
83  *
84  * [in] tbl
85  *   Pointer to the EM table to free
86  */
87 static void
88 tf_em_free_page_table(struct hcapi_cfa_em_table *tbl)
89 {
90         struct hcapi_cfa_em_page_tbl *tp;
91         int i;
92
93         for (i = 0; i < tbl->num_lvl; i++) {
94                 tp = &tbl->pg_tbl[i];
95                 TFP_DRV_LOG(INFO,
96                            "EEM: Freeing page table: size %u lvl %d cnt %u\n",
97                            TF_EM_PAGE_SIZE,
98                             i,
99                             tp->pg_count);
100
101                 tf_em_free_pg_tbl(tp);
102         }
103
104         tbl->l0_addr = NULL;
105         tbl->l0_dma_addr = 0;
106         tbl->num_lvl = 0;
107         tbl->num_data_pages = 0;
108 }
109
110 /**
111  * Allocation of page tables
112  *
113  * [in] tfp
114  *   Pointer to a TruFlow handle
115  *
116  * [in] pg_count
117  *   Page count to allocate
118  *
119  * [in] pg_size
120  *   Size of each page
121  *
122  * Returns:
123  *   0       - Success
124  *   -ENOMEM - Out of memory
125  */
126 static int
127 tf_em_alloc_pg_tbl(struct hcapi_cfa_em_page_tbl *tp,
128                    uint32_t pg_count,
129                    uint32_t pg_size)
130 {
131         uint32_t i;
132         struct tfp_calloc_parms parms;
133
134         parms.nitems = pg_count;
135         parms.size = sizeof(void *);
136         parms.alignment = 0;
137
138         if (tfp_calloc(&parms) != 0)
139                 return -ENOMEM;
140
141         tp->pg_va_tbl = parms.mem_va;
142
143         if (tfp_calloc(&parms) != 0) {
144                 tfp_free(tp->pg_va_tbl);
145                 return -ENOMEM;
146         }
147
148         tp->pg_pa_tbl = parms.mem_va;
149
150         tp->pg_count = 0;
151         tp->pg_size = pg_size;
152
153         for (i = 0; i < pg_count; i++) {
154                 parms.nitems = 1;
155                 parms.size = pg_size;
156                 parms.alignment = TF_EM_PAGE_ALIGNMENT;
157
158                 if (tfp_calloc(&parms) != 0)
159                         goto cleanup;
160
161                 tp->pg_pa_tbl[i] = (uintptr_t)parms.mem_pa;
162                 tp->pg_va_tbl[i] = parms.mem_va;
163
164                 memset(tp->pg_va_tbl[i], 0, pg_size);
165                 tp->pg_count++;
166         }
167
168         return 0;
169
170 cleanup:
171         tf_em_free_pg_tbl(tp);
172         return -ENOMEM;
173 }
174
175 /**
176  * Allocates EM page tables
177  *
178  * [in] tbl
179  *   Table to allocate pages for
180  *
181  * Returns:
182  *   0       - Success
183  *   -ENOMEM - Out of memory
184  */
185 static int
186 tf_em_alloc_page_table(struct hcapi_cfa_em_table *tbl)
187 {
188         struct hcapi_cfa_em_page_tbl *tp;
189         int rc = 0;
190         int i;
191         uint32_t j;
192
193         for (i = 0; i < tbl->num_lvl; i++) {
194                 tp = &tbl->pg_tbl[i];
195
196                 rc = tf_em_alloc_pg_tbl(tp,
197                                         tbl->page_cnt[i],
198                                         TF_EM_PAGE_SIZE);
199                 if (rc) {
200                         TFP_DRV_LOG(WARNING,
201                                 "Failed to allocate page table: lvl: %d, rc:%s\n",
202                                 i,
203                                 strerror(-rc));
204                         goto cleanup;
205                 }
206
207                 for (j = 0; j < tp->pg_count; j++) {
208                         TFP_DRV_LOG(INFO,
209                                 "EEM: Allocated page table: size %u lvl %d cnt"
210                                 " %u VA:%p PA:%p\n",
211                                 TF_EM_PAGE_SIZE,
212                                 i,
213                                 tp->pg_count,
214                                 (uint32_t *)tp->pg_va_tbl[j],
215                                 (uint32_t *)(uintptr_t)tp->pg_pa_tbl[j]);
216                 }
217         }
218         return rc;
219
220 cleanup:
221         tf_em_free_page_table(tbl);
222         return rc;
223 }
224
225 /**
226  * Links EM page tables
227  *
228  * [in] tp
229  *   Pointer to page table
230  *
231  * [in] tp_next
232  *   Pointer to the next page table
233  *
234  * [in] set_pte_last
235  *   Flag controlling if the page table is last
236  */
237 static void
238 tf_em_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
239                       struct hcapi_cfa_em_page_tbl *tp_next,
240                       bool set_pte_last)
241 {
242         uint64_t *pg_pa = tp_next->pg_pa_tbl;
243         uint64_t *pg_va;
244         uint64_t valid;
245         uint32_t k = 0;
246         uint32_t i;
247         uint32_t j;
248
249         for (i = 0; i < tp->pg_count; i++) {
250                 pg_va = tp->pg_va_tbl[i];
251
252                 for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
253                         if (k == tp_next->pg_count - 2 && set_pte_last)
254                                 valid = PTU_PTE_NEXT_TO_LAST | PTU_PTE_VALID;
255                         else if (k == tp_next->pg_count - 1 && set_pte_last)
256                                 valid = PTU_PTE_LAST | PTU_PTE_VALID;
257                         else
258                                 valid = PTU_PTE_VALID;
259
260                         pg_va[j] = tfp_cpu_to_le_64(pg_pa[k] | valid);
261                         if (++k >= tp_next->pg_count)
262                                 return;
263                 }
264         }
265 }
266
267 /**
268  * Setup a EM page table
269  *
270  * [in] tbl
271  *   Pointer to EM page table
272  */
273 static void
274 tf_em_setup_page_table(struct hcapi_cfa_em_table *tbl)
275 {
276         struct hcapi_cfa_em_page_tbl *tp_next;
277         struct hcapi_cfa_em_page_tbl *tp;
278         bool set_pte_last = 0;
279         int i;
280
281         for (i = 0; i < tbl->num_lvl - 1; i++) {
282                 tp = &tbl->pg_tbl[i];
283                 tp_next = &tbl->pg_tbl[i + 1];
284                 if (i == tbl->num_lvl - 2)
285                         set_pte_last = 1;
286                 tf_em_link_page_table(tp, tp_next, set_pte_last);
287         }
288
289         tbl->l0_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_va_tbl[0];
290         tbl->l0_dma_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_pa_tbl[0];
291 }
292
293 /**
294  * Given the page size, size of each data item (entry size),
295  * and the total number of entries needed, determine the number
296  * of page table levels and the number of data pages required.
297  *
298  * [in] page_size
299  *   Page size
300  *
301  * [in] entry_size
302  *   Entry size
303  *
304  * [in] num_entries
305  *   Number of entries needed
306  *
307  * [out] num_data_pages
308  *   Number of pages required
309  *
310  * Returns:
311  *   Success  - Number of EM page levels required
312  *   -ENOMEM  - Out of memory
313  */
314 static int
315 tf_em_size_page_tbl_lvl(uint32_t page_size,
316                         uint32_t entry_size,
317                         uint32_t num_entries,
318                         uint64_t *num_data_pages)
319 {
320         uint64_t lvl_data_size = page_size;
321         int lvl = TF_PT_LVL_0;
322         uint64_t data_size;
323
324         *num_data_pages = 0;
325         data_size = (uint64_t)num_entries * entry_size;
326
327         while (lvl_data_size < data_size) {
328                 lvl++;
329
330                 if (lvl == TF_PT_LVL_1)
331                         lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
332                                 page_size;
333                 else if (lvl == TF_PT_LVL_2)
334                         lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
335                                 MAX_PAGE_PTRS(page_size) * page_size;
336                 else
337                         return -ENOMEM;
338         }
339
340         *num_data_pages = roundup(data_size, page_size) / page_size;
341
342         return lvl;
343 }
344
345 /**
346  * Return the number of page table pages needed to
347  * reference the given number of next level pages.
348  *
349  * [in] num_pages
350  *   Number of EM pages
351  *
352  * [in] page_size
353  *   Size of each EM page
354  *
355  * Returns:
356  *   Number of EM page table pages
357  */
358 static uint32_t
359 tf_em_page_tbl_pgcnt(uint32_t num_pages,
360                      uint32_t page_size)
361 {
362         return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
363                        MAX_PAGE_PTRS(page_size);
364         return 0;
365 }
366
367 /**
368  * Given the number of data pages, page_size and the maximum
369  * number of page table levels (already determined), size
370  * the number of page table pages required at each level.
371  *
372  * [in] max_lvl
373  *   Max number of levels
374  *
375  * [in] num_data_pages
376  *   Number of EM data pages
377  *
378  * [in] page_size
379  *   Size of an EM page
380  *
381  * [out] *page_cnt
382  *   EM page count
383  */
384 static void
385 tf_em_size_page_tbls(int max_lvl,
386                      uint64_t num_data_pages,
387                      uint32_t page_size,
388                      uint32_t *page_cnt)
389 {
390         if (max_lvl == TF_PT_LVL_0) {
391                 page_cnt[TF_PT_LVL_0] = num_data_pages;
392         } else if (max_lvl == TF_PT_LVL_1) {
393                 page_cnt[TF_PT_LVL_1] = num_data_pages;
394                 page_cnt[TF_PT_LVL_0] =
395                 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
396         } else if (max_lvl == TF_PT_LVL_2) {
397                 page_cnt[TF_PT_LVL_2] = num_data_pages;
398                 page_cnt[TF_PT_LVL_1] =
399                 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
400                 page_cnt[TF_PT_LVL_0] =
401                 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
402         } else {
403                 return;
404         }
405 }
406
407 /**
408  * Size the EM table based on capabilities
409  *
410  * [in] tbl
411  *   EM table to size
412  *
413  * Returns:
414  *   0        - Success
415  *   - EINVAL - Parameter error
416  *   - ENOMEM - Out of memory
417  */
418 static int
419 tf_em_size_table(struct hcapi_cfa_em_table *tbl)
420 {
421         uint64_t num_data_pages;
422         uint32_t *page_cnt;
423         int max_lvl;
424         uint32_t num_entries;
425         uint32_t cnt = TF_EM_MIN_ENTRIES;
426
427         /* Ignore entry if both size and number are zero */
428         if (!tbl->entry_size && !tbl->num_entries)
429                 return 0;
430
431         /* If only one is set then error */
432         if (!tbl->entry_size || !tbl->num_entries)
433                 return -EINVAL;
434
435         /* Determine number of page table levels and the number
436          * of data pages needed to process the given eem table.
437          */
438         if (tbl->type == TF_RECORD_TABLE) {
439                 /*
440                  * For action records just a memory size is provided. Work
441                  * backwards to resolve to number of entries
442                  */
443                 num_entries = tbl->num_entries / tbl->entry_size;
444                 if (num_entries < TF_EM_MIN_ENTRIES) {
445                         num_entries = TF_EM_MIN_ENTRIES;
446                 } else {
447                         while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
448                                 cnt *= 2;
449                         num_entries = cnt;
450                 }
451         } else {
452                 num_entries = tbl->num_entries;
453         }
454
455         max_lvl = tf_em_size_page_tbl_lvl(TF_EM_PAGE_SIZE,
456                                           tbl->entry_size,
457                                           tbl->num_entries,
458                                           &num_data_pages);
459         if (max_lvl < 0) {
460                 TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
461                 TFP_DRV_LOG(WARNING,
462                             "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
463                             tbl->type, (uint64_t)num_entries * tbl->entry_size,
464                             TF_EM_PAGE_SIZE);
465                 return -ENOMEM;
466         }
467
468         tbl->num_lvl = max_lvl + 1;
469         tbl->num_data_pages = num_data_pages;
470
471         /* Determine the number of pages needed at each level */
472         page_cnt = tbl->page_cnt;
473         memset(page_cnt, 0, sizeof(tbl->page_cnt));
474         tf_em_size_page_tbls(max_lvl, num_data_pages, TF_EM_PAGE_SIZE,
475                                 page_cnt);
476
477         TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
478         TFP_DRV_LOG(INFO,
479                     "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 " l0: %u l1: %u l2: %u\n",
480                     max_lvl + 1,
481                     (uint64_t)num_data_pages * TF_EM_PAGE_SIZE,
482                     num_data_pages,
483                     page_cnt[TF_PT_LVL_0],
484                     page_cnt[TF_PT_LVL_1],
485                     page_cnt[TF_PT_LVL_2]);
486
487         return 0;
488 }
489
490 /**
491  * Unregisters EM Ctx in Firmware
492  *
493  * [in] tfp
494  *   Pointer to a TruFlow handle
495  *
496  * [in] tbl_scope_cb
497  *   Pointer to a table scope control block
498  *
499  * [in] dir
500  *   Receive or transmit direction
501  */
502 static void
503 tf_em_ctx_unreg(struct tf *tfp,
504                 struct tf_tbl_scope_cb *tbl_scope_cb,
505                 int dir)
506 {
507         struct hcapi_cfa_em_ctx_mem_info *ctxp =
508                 &tbl_scope_cb->em_ctx_info[dir];
509         struct hcapi_cfa_em_table *tbl;
510         int i;
511
512         for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
513                 tbl = &ctxp->em_tables[i];
514
515                 if (tbl->num_entries != 0 && tbl->entry_size != 0) {
516                         tf_msg_em_mem_unrgtr(tfp, &tbl->ctx_id);
517                         tf_em_free_page_table(tbl);
518                 }
519         }
520 }
521
522 /**
523  * Registers EM Ctx in Firmware
524  *
525  * [in] tfp
526  *   Pointer to a TruFlow handle
527  *
528  * [in] tbl_scope_cb
529  *   Pointer to a table scope control block
530  *
531  * [in] dir
532  *   Receive or transmit direction
533  *
534  * Returns:
535  *   0       - Success
536  *   -ENOMEM - Out of Memory
537  */
538 static int
539 tf_em_ctx_reg(struct tf *tfp,
540               struct tf_tbl_scope_cb *tbl_scope_cb,
541               int dir)
542 {
543         struct hcapi_cfa_em_ctx_mem_info *ctxp =
544                 &tbl_scope_cb->em_ctx_info[dir];
545         struct hcapi_cfa_em_table *tbl;
546         int rc = 0;
547         int i;
548
549         for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
550                 tbl = &ctxp->em_tables[i];
551
552                 if (tbl->num_entries && tbl->entry_size) {
553                         rc = tf_em_size_table(tbl);
554
555                         if (rc)
556                                 goto cleanup;
557
558                         rc = tf_em_alloc_page_table(tbl);
559                         if (rc)
560                                 goto cleanup;
561
562                         tf_em_setup_page_table(tbl);
563                         rc = tf_msg_em_mem_rgtr(tfp,
564                                                 tbl->num_lvl - 1,
565                                                 TF_EM_PAGE_SIZE_ENUM,
566                                                 tbl->l0_dma_addr,
567                                                 &tbl->ctx_id);
568                         if (rc)
569                                 goto cleanup;
570                 }
571         }
572         return rc;
573
574 cleanup:
575         tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
576         return rc;
577 }
578
579 /**
580  * Validates EM number of entries requested
581  *
582  * [in] tbl_scope_cb
583  *   Pointer to table scope control block to be populated
584  *
585  * [in] parms
586  *   Pointer to input parameters
587  *
588  * Returns:
589  *   0       - Success
590  *   -EINVAL - Parameter error
591  */
592 static int
593 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
594                            struct tf_alloc_tbl_scope_parms *parms)
595 {
596         uint32_t cnt;
597
598         if (parms->rx_mem_size_in_mb != 0) {
599                 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
600                 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
601                                      + 1);
602                 uint32_t num_entries = (parms->rx_mem_size_in_mb *
603                                         TF_MEGABYTE) / (key_b + action_b);
604
605                 if (num_entries < TF_EM_MIN_ENTRIES) {
606                         TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
607                                     "%uMB\n",
608                                     parms->rx_mem_size_in_mb);
609                         return -EINVAL;
610                 }
611
612                 cnt = TF_EM_MIN_ENTRIES;
613                 while (num_entries > cnt &&
614                        cnt <= TF_EM_MAX_ENTRIES)
615                         cnt *= 2;
616
617                 if (cnt > TF_EM_MAX_ENTRIES) {
618                         TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
619                                     "%u\n",
620                        (parms->tx_num_flows_in_k * TF_KILOBYTE));
621                         return -EINVAL;
622                 }
623
624                 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
625         } else {
626                 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
627                     TF_EM_MIN_ENTRIES ||
628                     (parms->rx_num_flows_in_k * TF_KILOBYTE) >
629                     tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
630                         TFP_DRV_LOG(ERR,
631                                     "EEM: Invalid number of Rx flows "
632                                     "requested:%u max:%u\n",
633                                     parms->rx_num_flows_in_k * TF_KILOBYTE,
634                         tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
635                         return -EINVAL;
636                 }
637
638                 /* must be a power-of-2 supported value
639                  * in the range 32K - 128M
640                  */
641                 cnt = TF_EM_MIN_ENTRIES;
642                 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
643                        cnt <= TF_EM_MAX_ENTRIES)
644                         cnt *= 2;
645
646                 if (cnt > TF_EM_MAX_ENTRIES) {
647                         TFP_DRV_LOG(ERR,
648                                     "EEM: Invalid number of Rx requested: %u\n",
649                                     (parms->rx_num_flows_in_k * TF_KILOBYTE));
650                         return -EINVAL;
651                 }
652         }
653
654         if (parms->tx_mem_size_in_mb != 0) {
655                 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
656                 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
657                                      + 1);
658                 uint32_t num_entries = (parms->tx_mem_size_in_mb *
659                                         (TF_KILOBYTE * TF_KILOBYTE)) /
660                         (key_b + action_b);
661
662                 if (num_entries < TF_EM_MIN_ENTRIES) {
663                         TFP_DRV_LOG(ERR,
664                                     "EEM: Insufficient memory requested:%uMB\n",
665                                     parms->rx_mem_size_in_mb);
666                         return -EINVAL;
667                 }
668
669                 cnt = TF_EM_MIN_ENTRIES;
670                 while (num_entries > cnt &&
671                        cnt <= TF_EM_MAX_ENTRIES)
672                         cnt *= 2;
673
674                 if (cnt > TF_EM_MAX_ENTRIES) {
675                         TFP_DRV_LOG(ERR,
676                                     "EEM: Invalid number of Tx requested: %u\n",
677                        (parms->tx_num_flows_in_k * TF_KILOBYTE));
678                         return -EINVAL;
679                 }
680
681                 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
682         } else {
683                 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
684                     TF_EM_MIN_ENTRIES ||
685                     (parms->tx_num_flows_in_k * TF_KILOBYTE) >
686                     tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
687                         TFP_DRV_LOG(ERR,
688                                     "EEM: Invalid number of Tx flows "
689                                     "requested:%u max:%u\n",
690                                     (parms->tx_num_flows_in_k * TF_KILOBYTE),
691                         tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
692                         return -EINVAL;
693                 }
694
695                 cnt = TF_EM_MIN_ENTRIES;
696                 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
697                        cnt <= TF_EM_MAX_ENTRIES)
698                         cnt *= 2;
699
700                 if (cnt > TF_EM_MAX_ENTRIES) {
701                         TFP_DRV_LOG(ERR,
702                                     "EEM: Invalid number of Tx requested: %u\n",
703                        (parms->tx_num_flows_in_k * TF_KILOBYTE));
704                         return -EINVAL;
705                 }
706         }
707
708         if (parms->rx_num_flows_in_k != 0 &&
709             (parms->rx_max_key_sz_in_bits / 8 == 0)) {
710                 TFP_DRV_LOG(ERR,
711                             "EEM: Rx key size required: %u\n",
712                             (parms->rx_max_key_sz_in_bits));
713                 return -EINVAL;
714         }
715
716         if (parms->tx_num_flows_in_k != 0 &&
717             (parms->tx_max_key_sz_in_bits / 8 == 0)) {
718                 TFP_DRV_LOG(ERR,
719                             "EEM: Tx key size required: %u\n",
720                             (parms->tx_max_key_sz_in_bits));
721                 return -EINVAL;
722         }
723         /* Rx */
724         tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
725                 parms->rx_num_flows_in_k * TF_KILOBYTE;
726         tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
727                 parms->rx_max_key_sz_in_bits / 8;
728
729         tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
730                 parms->rx_num_flows_in_k * TF_KILOBYTE;
731         tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
732                 parms->rx_max_key_sz_in_bits / 8;
733
734         tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
735                 parms->rx_num_flows_in_k * TF_KILOBYTE;
736         tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
737                 parms->rx_max_action_entry_sz_in_bits / 8;
738
739         tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries =
740                 0;
741
742         /* Tx */
743         tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
744                 parms->tx_num_flows_in_k * TF_KILOBYTE;
745         tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
746                 parms->tx_max_key_sz_in_bits / 8;
747
748         tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
749                 parms->tx_num_flows_in_k * TF_KILOBYTE;
750         tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
751                 parms->tx_max_key_sz_in_bits / 8;
752
753         tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
754                 parms->tx_num_flows_in_k * TF_KILOBYTE;
755         tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
756                 parms->tx_max_action_entry_sz_in_bits / 8;
757
758         tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries =
759                 0;
760
761         return 0;
762 }
763
764 /**
765  * Internal function to get a Table Entry. Supports all Table Types
766  * except the TF_TBL_TYPE_EXT as that is handled as a table scope.
767  *
768  * [in] tfp
769  *   Pointer to TruFlow handle
770  *
771  * [in] parms
772  *   Pointer to input parameters
773  *
774  * Returns:
775  *   0       - Success
776  *   -EINVAL - Parameter error
777  */
778 static int
779 tf_bulk_get_tbl_entry_internal(struct tf *tfp,
780                           struct tf_bulk_get_tbl_entry_parms *parms)
781 {
782         int rc;
783         int id;
784         uint32_t index;
785         struct bitalloc *session_pool;
786         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
787
788         /* Lookup the pool using the table type of the element */
789         rc = tf_rm_lookup_tbl_type_pool(tfs,
790                                         parms->dir,
791                                         parms->type,
792                                         &session_pool);
793         /* Error logging handled by tf_rm_lookup_tbl_type_pool */
794         if (rc)
795                 return rc;
796
797         index = parms->starting_idx;
798
799         /*
800          * Adjust the returned index/offset as there is no guarantee
801          * that the start is 0 at time of RM allocation
802          */
803         tf_rm_convert_index(tfs,
804                             parms->dir,
805                             parms->type,
806                             TF_RM_CONVERT_RM_BASE,
807                             parms->starting_idx,
808                             &index);
809
810         /* Verify that the entry has been previously allocated */
811         id = ba_inuse(session_pool, index);
812         if (id != 1) {
813                 TFP_DRV_LOG(ERR,
814                    "%s, Invalid or not allocated index, type:%d, starting_idx:%d\n",
815                    tf_dir_2_str(parms->dir),
816                    parms->type,
817                    index);
818                 return -EINVAL;
819         }
820
821         /* Get the entry */
822         rc = tf_msg_bulk_get_tbl_entry(tfp, parms);
823         if (rc) {
824                 TFP_DRV_LOG(ERR,
825                             "%s, Bulk get failed, type:%d, rc:%s\n",
826                             tf_dir_2_str(parms->dir),
827                             parms->type,
828                             strerror(-rc));
829         }
830
831         return rc;
832 }
833
834 #if (TF_SHADOW == 1)
835 /**
836  * Allocate Tbl entry from the Shadow DB. Shadow DB is searched for
837  * the requested entry. If found the ref count is incremente and
838  * returned.
839  *
840  * [in] tfs
841  *   Pointer to session
842  * [in] parms
843  *   Allocation parameters
844  *
845  * Return:
846  *  0       - Success, entry found and ref count incremented
847  *  -ENOENT - Failure, entry not found
848  */
849 static int
850 tf_alloc_tbl_entry_shadow(struct tf_session *tfs __rte_unused,
851                           struct tf_alloc_tbl_entry_parms *parms __rte_unused)
852 {
853         TFP_DRV_LOG(ERR,
854                     "%s, Entry Alloc with search not supported\n",
855                     tf_dir_2_str(parms->dir));
856
857         return -EOPNOTSUPP;
858 }
859
860 /**
861  * Free Tbl entry from the Shadow DB. Shadow DB is searched for
862  * the requested entry. If found the ref count is decremente and
863  * new ref_count returned.
864  *
865  * [in] tfs
866  *   Pointer to session
867  * [in] parms
868  *   Allocation parameters
869  *
870  * Return:
871  *  0       - Success, entry found and ref count decremented
872  *  -ENOENT - Failure, entry not found
873  */
874 static int
875 tf_free_tbl_entry_shadow(struct tf_session *tfs,
876                          struct tf_free_tbl_entry_parms *parms)
877 {
878         TFP_DRV_LOG(ERR,
879                     "%s, Entry Free with search not supported\n",
880                     tf_dir_2_str(parms->dir));
881
882         return -EOPNOTSUPP;
883 }
884 #endif /* TF_SHADOW */
885
886 /**
887  * Create External Tbl pool of memory indexes.
888  *
889  * [in] dir
890  *   direction
891  * [in] tbl_scope_cb
892  *   pointer to the table scope
893  * [in] num_entries
894  *   number of entries to write
895  * [in] entry_sz_bytes
896  *   size of each entry
897  *
898  * Return:
899  *  0       - Success, entry allocated - no search support
900  *  -ENOMEM -EINVAL -EOPNOTSUPP
901  *          - Failure, entry not allocated, out of resources
902  */
903 static int
904 tf_create_tbl_pool_external(enum tf_dir dir,
905                             struct tf_tbl_scope_cb *tbl_scope_cb,
906                             uint32_t num_entries,
907                             uint32_t entry_sz_bytes)
908 {
909         struct tfp_calloc_parms parms;
910         uint32_t i;
911         int32_t j;
912         int rc = 0;
913         struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
914
915         parms.nitems = num_entries;
916         parms.size = sizeof(uint32_t);
917         parms.alignment = 0;
918
919         if (tfp_calloc(&parms) != 0) {
920                 TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
921                             tf_dir_2_str(dir), strerror(ENOMEM));
922                 return -ENOMEM;
923         }
924
925         /* Create empty stack
926          */
927         rc = stack_init(num_entries, parms.mem_va, pool);
928
929         if (rc != 0) {
930                 TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
931                             tf_dir_2_str(dir), strerror(-rc));
932                 goto cleanup;
933         }
934
935         /* Save the  malloced memory address so that it can
936          * be freed when the table scope is freed.
937          */
938         tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
939
940         /* Fill pool with indexes in reverse
941          */
942         j = (num_entries - 1) * entry_sz_bytes;
943
944         for (i = 0; i < num_entries; i++) {
945                 rc = stack_push(pool, j);
946                 if (rc != 0) {
947                         TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
948                                     tf_dir_2_str(dir), strerror(-rc));
949                         goto cleanup;
950                 }
951
952                 if (j < 0) {
953                         TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
954                                     dir, j);
955                         goto cleanup;
956                 }
957                 j -= entry_sz_bytes;
958         }
959
960         if (!stack_is_full(pool)) {
961                 rc = -EINVAL;
962                 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
963                             tf_dir_2_str(dir), strerror(-rc));
964                 goto cleanup;
965         }
966         return 0;
967 cleanup:
968         tfp_free((void *)parms.mem_va);
969         return rc;
970 }
971
972 /**
973  * Destroy External Tbl pool of memory indexes.
974  *
975  * [in] dir
976  *   direction
977  * [in] tbl_scope_cb
978  *   pointer to the table scope
979  *
980  */
981 static void
982 tf_destroy_tbl_pool_external(enum tf_dir dir,
983                              struct tf_tbl_scope_cb *tbl_scope_cb)
984 {
985         uint32_t *ext_act_pool_mem =
986                 tbl_scope_cb->ext_act_pool_mem[dir];
987
988         tfp_free(ext_act_pool_mem);
989 }
990
991 /* API defined in tf_em.h */
992 struct tf_tbl_scope_cb *
993 tbl_scope_cb_find(struct tf_session *session,
994                   uint32_t tbl_scope_id)
995 {
996         int i;
997
998         /* Check that id is valid */
999         i = ba_inuse(session->tbl_scope_pool_rx, tbl_scope_id);
1000         if (i < 0)
1001                 return NULL;
1002
1003         for (i = 0; i < TF_NUM_TBL_SCOPE; i++) {
1004                 if (session->tbl_scopes[i].tbl_scope_id == tbl_scope_id)
1005                         return &session->tbl_scopes[i];
1006         }
1007
1008         return NULL;
1009 }
1010
1011 /* API defined in tf_core.h */
1012 int
1013 tf_free_eem_tbl_scope_cb(struct tf *tfp,
1014                          struct tf_free_tbl_scope_parms *parms)
1015 {
1016         int rc = 0;
1017         enum tf_dir  dir;
1018         struct tf_tbl_scope_cb *tbl_scope_cb;
1019         struct tf_session *session;
1020
1021         session = (struct tf_session *)(tfp->session->core_data);
1022
1023         tbl_scope_cb = tbl_scope_cb_find(session,
1024                                          parms->tbl_scope_id);
1025
1026         if (tbl_scope_cb == NULL) {
1027                 TFP_DRV_LOG(ERR, "Table scope error\n");
1028                 return -EINVAL;
1029         }
1030
1031         /* Free Table control block */
1032         ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);
1033
1034         /* free table scope locks */
1035         for (dir = 0; dir < TF_DIR_MAX; dir++) {
1036                 /* Free associated external pools
1037                  */
1038                 tf_destroy_tbl_pool_external(dir,
1039                                              tbl_scope_cb);
1040                 tf_msg_em_op(tfp,
1041                              dir,
1042                              HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE);
1043
1044                 /* free table scope and all associated resources */
1045                 tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
1046         }
1047
1048         return rc;
1049 }
1050
1051 /* API defined in tf_em.h */
1052 int
1053 tf_alloc_eem_tbl_scope(struct tf *tfp,
1054                        struct tf_alloc_tbl_scope_parms *parms)
1055 {
1056         int rc;
1057         enum tf_dir dir;
1058         struct tf_tbl_scope_cb *tbl_scope_cb;
1059         struct hcapi_cfa_em_table *em_tables;
1060         int index;
1061         struct tf_session *session;
1062         struct tf_free_tbl_scope_parms free_parms;
1063
1064         session = (struct tf_session *)tfp->session->core_data;
1065
1066         /* Get Table Scope control block from the session pool */
1067         index = ba_alloc(session->tbl_scope_pool_rx);
1068         if (index == -1) {
1069                 TFP_DRV_LOG(ERR, "EEM: Unable to allocate table scope "
1070                             "Control Block\n");
1071                 return -ENOMEM;
1072         }
1073
1074         tbl_scope_cb = &session->tbl_scopes[index];
1075         tbl_scope_cb->index = index;
1076         tbl_scope_cb->tbl_scope_id = index;
1077         parms->tbl_scope_id = index;
1078
1079         for (dir = 0; dir < TF_DIR_MAX; dir++) {
1080                 rc = tf_msg_em_qcaps(tfp,
1081                                      dir,
1082                                      &tbl_scope_cb->em_caps[dir]);
1083                 if (rc) {
1084                         TFP_DRV_LOG(ERR,
1085                                     "EEM: Unable to query for EEM capability,"
1086                                     " rc:%s\n",
1087                                     strerror(-rc));
1088                         goto cleanup;
1089                 }
1090         }
1091
1092         /*
1093          * Validate and setup table sizes
1094          */
1095         if (tf_em_validate_num_entries(tbl_scope_cb, parms))
1096                 goto cleanup;
1097
1098         for (dir = 0; dir < TF_DIR_MAX; dir++) {
1099                 /*
1100                  * Allocate tables and signal configuration to FW
1101                  */
1102                 rc = tf_em_ctx_reg(tfp, tbl_scope_cb, dir);
1103                 if (rc) {
1104                         TFP_DRV_LOG(ERR,
1105                                     "EEM: Unable to register for EEM ctx,"
1106                                     " rc:%s\n",
1107                                     strerror(-rc));
1108                         goto cleanup;
1109                 }
1110
1111                 em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;
1112                 rc = tf_msg_em_cfg(tfp,
1113                                    em_tables[TF_KEY0_TABLE].num_entries,
1114                                    em_tables[TF_KEY0_TABLE].ctx_id,
1115                                    em_tables[TF_KEY1_TABLE].ctx_id,
1116                                    em_tables[TF_RECORD_TABLE].ctx_id,
1117                                    em_tables[TF_EFC_TABLE].ctx_id,
1118                                    parms->hw_flow_cache_flush_timer,
1119                                    dir);
1120                 if (rc) {
1121                         TFP_DRV_LOG(ERR,
1122                                     "TBL: Unable to configure EEM in firmware"
1123                                     " rc:%s\n",
1124                                     strerror(-rc));
1125                         goto cleanup_full;
1126                 }
1127
1128                 rc = tf_msg_em_op(tfp,
1129                                   dir,
1130                                   HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE);
1131
1132                 if (rc) {
1133                         TFP_DRV_LOG(ERR,
1134                                     "EEM: Unable to enable EEM in firmware"
1135                                     " rc:%s\n",
1136                                     strerror(-rc));
1137                         goto cleanup_full;
1138                 }
1139
1140                 /* Allocate the pool of offsets of the external memory.
1141                  * Initially, this is a single fixed size pool for all external
1142                  * actions related to a single table scope.
1143                  */
1144                 rc = tf_create_tbl_pool_external(dir,
1145                                     tbl_scope_cb,
1146                                     em_tables[TF_RECORD_TABLE].num_entries,
1147                                     em_tables[TF_RECORD_TABLE].entry_size);
1148                 if (rc) {
1149                         TFP_DRV_LOG(ERR,
1150                                     "%s TBL: Unable to allocate idx pools %s\n",
1151                                     tf_dir_2_str(dir),
1152                                     strerror(-rc));
1153                         goto cleanup_full;
1154                 }
1155         }
1156
1157         return 0;
1158
1159 cleanup_full:
1160         free_parms.tbl_scope_id = index;
1161         tf_free_eem_tbl_scope_cb(tfp, &free_parms);
1162         return -EINVAL;
1163
1164 cleanup:
1165         /* Free Table control block */
1166         ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);
1167         return -EINVAL;
1168 }
1169
1170  /* API defined in tf_core.h */
1171 int
1172 tf_bulk_get_tbl_entry(struct tf *tfp,
1173                  struct tf_bulk_get_tbl_entry_parms *parms)
1174 {
1175         int rc = 0;
1176
1177         TF_CHECK_PARMS_SESSION(tfp, parms);
1178
1179         if (parms->type == TF_TBL_TYPE_EXT) {
1180                 /* Not supported, yet */
1181                 TFP_DRV_LOG(ERR,
1182                             "%s, External table type not supported\n",
1183                             tf_dir_2_str(parms->dir));
1184
1185                 rc = -EOPNOTSUPP;
1186         } else {
1187                 /* Internal table type processing */
1188                 rc = tf_bulk_get_tbl_entry_internal(tfp, parms);
1189                 if (rc)
1190                         TFP_DRV_LOG(ERR,
1191                                     "%s, Bulk get failed, type:%d, rc:%s\n",
1192                                     tf_dir_2_str(parms->dir),
1193                                     parms->type,
1194                                     strerror(-rc));
1195         }
1196
1197         return rc;
1198 }
1199
1200 /* API defined in tf_core.h */
1201 int
1202 tf_alloc_tbl_scope(struct tf *tfp,
1203                    struct tf_alloc_tbl_scope_parms *parms)
1204 {
1205         int rc;
1206
1207         TF_CHECK_PARMS_SESSION_NO_DIR(tfp, parms);
1208
1209         rc = tf_alloc_eem_tbl_scope(tfp, parms);
1210
1211         return rc;
1212 }
1213
1214 /* API defined in tf_core.h */
1215 int
1216 tf_free_tbl_scope(struct tf *tfp,
1217                   struct tf_free_tbl_scope_parms *parms)
1218 {
1219         int rc;
1220
1221         TF_CHECK_PARMS_SESSION_NO_DIR(tfp, parms);
1222
1223         /* free table scope and all associated resources */
1224         rc = tf_free_eem_tbl_scope_cb(tfp, parms);
1225
1226         return rc;
1227 }
1228
1229 static void
1230 tf_dump_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
1231                         struct hcapi_cfa_em_page_tbl *tp_next)
1232 {
1233         uint64_t *pg_va;
1234         uint32_t i;
1235         uint32_t j;
1236         uint32_t k = 0;
1237
1238         printf("pg_count:%d pg_size:0x%x\n",
1239                tp->pg_count,
1240                tp->pg_size);
1241         for (i = 0; i < tp->pg_count; i++) {
1242                 pg_va = tp->pg_va_tbl[i];
1243                 printf("\t%p\n", (void *)pg_va);
1244                 for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
1245                         printf("\t\t%p\n", (void *)(uintptr_t)pg_va[j]);
1246                         if (((pg_va[j] & 0x7) ==
1247                              tfp_cpu_to_le_64(PTU_PTE_LAST |
1248                                               PTU_PTE_VALID)))
1249                                 return;
1250
1251                         if (!(pg_va[j] & tfp_cpu_to_le_64(PTU_PTE_VALID))) {
1252                                 printf("** Invalid entry **\n");
1253                                 return;
1254                         }
1255
1256                         if (++k >= tp_next->pg_count) {
1257                                 printf("** Shouldn't get here **\n");
1258                                 return;
1259                         }
1260                 }
1261         }
1262 }
1263
1264 void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id);
1265
1266 void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id)
1267 {
1268         struct tf_session      *session;
1269         struct tf_tbl_scope_cb *tbl_scope_cb;
1270         struct hcapi_cfa_em_page_tbl *tp;
1271         struct hcapi_cfa_em_page_tbl *tp_next;
1272         struct hcapi_cfa_em_table *tbl;
1273         int i;
1274         int j;
1275         int dir;
1276
1277         printf("called %s\n", __func__);
1278
1279         /* find session struct */
1280         session = (struct tf_session *)tfp->session->core_data;
1281
1282         /* find control block for table scope */
1283         tbl_scope_cb = tbl_scope_cb_find(session,
1284                                          tbl_scope_id);
1285         if (tbl_scope_cb == NULL)
1286                 PMD_DRV_LOG(ERR, "No table scope\n");
1287
1288         for (dir = 0; dir < TF_DIR_MAX; dir++) {
1289                 printf("Direction %s:\n", (dir == TF_DIR_RX ? "Rx" : "Tx"));
1290
1291                 for (j = TF_KEY0_TABLE; j < TF_MAX_TABLE; j++) {
1292                         tbl = &tbl_scope_cb->em_ctx_info[dir].em_tables[j];
1293                         printf
1294         ("Table: j:%d type:%d num_entries:%d entry_size:0x%x num_lvl:%d ",
1295                                j,
1296                                tbl->type,
1297                                tbl->num_entries,
1298                                tbl->entry_size,
1299                                tbl->num_lvl);
1300                         if (tbl->pg_tbl[0].pg_va_tbl &&
1301                             tbl->pg_tbl[0].pg_pa_tbl)
1302                                 printf("%p %p\n",
1303                                tbl->pg_tbl[0].pg_va_tbl[0],
1304                                (void *)(uintptr_t)tbl->pg_tbl[0].pg_pa_tbl[0]);
1305                         for (i = 0; i < tbl->num_lvl - 1; i++) {
1306                                 printf("Level:%d\n", i);
1307                                 tp = &tbl->pg_tbl[i];
1308                                 tp_next = &tbl->pg_tbl[i + 1];
1309                                 tf_dump_link_page_table(tp, tp_next);
1310                         }
1311                         printf("\n");
1312                 }
1313         }
1314 }