56767e73278c70a7c466813224050e3c67550aaa
[dpdk.git] / drivers / net / bnxt / tf_core / tf_rm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include <string.h>
7
8 #include <rte_common.h>
9
10 #include "tf_rm.h"
11 #include "tf_core.h"
12 #include "tf_session.h"
13 #include "tf_resources.h"
14 #include "tf_msg.h"
15 #include "bnxt.h"
16
17 /**
18  * Internal macro to perform HW resource allocation check between what
19  * firmware reports vs what was statically requested.
20  *
21  * Parameters:
22  *   struct tf_rm_hw_query    *hquery      - Pointer to the hw query result
23  *   enum tf_dir               dir         - Direction to process
24  *   enum tf_resource_type_hw  hcapi_type  - HCAPI type, the index element
25  *                                           in the hw query structure
26  *   define                    def_value   - Define value to check against
27  *   uint32_t                 *eflag       - Result of the check
28  */
29 #define TF_RM_CHECK_HW_ALLOC(hquery, dir, hcapi_type, def_value, eflag) do {  \
30         if ((dir) == TF_DIR_RX) {                                             \
31                 if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _RX) \
32                         *(eflag) |= 1 << (hcapi_type);                        \
33         } else {                                                              \
34                 if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _TX) \
35                         *(eflag) |= 1 << (hcapi_type);                        \
36         }                                                                     \
37 } while (0)
38
39 /**
40  * Internal macro to perform HW resource allocation check between what
41  * firmware reports vs what was statically requested.
42  *
43  * Parameters:
44  *   struct tf_rm_sram_query   *squery      - Pointer to the sram query result
45  *   enum tf_dir                dir         - Direction to process
46  *   enum tf_resource_type_sram hcapi_type  - HCAPI type, the index element
47  *                                            in the hw query structure
48  *   define                     def_value   - Define value to check against
49  *   uint32_t                  *eflag       - Result of the check
50  */
51 #define TF_RM_CHECK_SRAM_ALLOC(squery, dir, hcapi_type, def_value, eflag) do { \
52         if ((dir) == TF_DIR_RX) {                                              \
53                 if ((squery)->sram_query[(hcapi_type)].max != def_value ## _RX)\
54                         *(eflag) |= 1 << (hcapi_type);                         \
55         } else {                                                               \
56                 if ((squery)->sram_query[(hcapi_type)].max != def_value ## _TX)\
57                         *(eflag) |= 1 << (hcapi_type);                         \
58         }                                                                      \
59 } while (0)
60
61 /**
62  * Internal macro to convert a reserved resource define name to be
63  * direction specific.
64  *
65  * Parameters:
66  *   enum tf_dir    dir         - Direction to process
67  *   string         type        - Type name to append RX or TX to
68  *   string         dtype       - Direction specific type
69  *
70  *
71  */
72 #define TF_RESC_RSVD(dir, type, dtype) do {     \
73                 if ((dir) == TF_DIR_RX)         \
74                         (dtype) = type ## _RX;  \
75                 else                            \
76                         (dtype) = type ## _TX;  \
77         } while (0)
78
79 const char
80 *tf_dir_2_str(enum tf_dir dir)
81 {
82         switch (dir) {
83         case TF_DIR_RX:
84                 return "RX";
85         case TF_DIR_TX:
86                 return "TX";
87         default:
88                 return "Invalid direction";
89         }
90 }
91
92 const char
93 *tf_ident_2_str(enum tf_identifier_type id_type)
94 {
95         switch (id_type) {
96         case TF_IDENT_TYPE_L2_CTXT:
97                 return "l2_ctxt_remap";
98         case TF_IDENT_TYPE_PROF_FUNC:
99                 return "prof_func";
100         case TF_IDENT_TYPE_WC_PROF:
101                 return "wc_prof";
102         case TF_IDENT_TYPE_EM_PROF:
103                 return "em_prof";
104         case TF_IDENT_TYPE_L2_FUNC:
105                 return "l2_func";
106         default:
107                 break;
108         }
109         return "Invalid identifier";
110 }
111
112 const char
113 *tf_hcapi_sram_2_str(enum tf_resource_type_sram sram_type)
114 {
115         switch (sram_type) {
116         case TF_RESC_TYPE_SRAM_FULL_ACTION:
117                 return "Full action";
118         case TF_RESC_TYPE_SRAM_MCG:
119                 return "MCG";
120         case TF_RESC_TYPE_SRAM_ENCAP_8B:
121                 return "Encap 8B";
122         case TF_RESC_TYPE_SRAM_ENCAP_16B:
123                 return "Encap 16B";
124         case TF_RESC_TYPE_SRAM_ENCAP_64B:
125                 return "Encap 64B";
126         case TF_RESC_TYPE_SRAM_SP_SMAC:
127                 return "Source properties SMAC";
128         case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4:
129                 return "Source properties SMAC IPv4";
130         case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6:
131                 return "Source properties IPv6";
132         case TF_RESC_TYPE_SRAM_COUNTER_64B:
133                 return "Counter 64B";
134         case TF_RESC_TYPE_SRAM_NAT_SPORT:
135                 return "NAT source port";
136         case TF_RESC_TYPE_SRAM_NAT_DPORT:
137                 return "NAT destination port";
138         case TF_RESC_TYPE_SRAM_NAT_S_IPV4:
139                 return "NAT source IPv4";
140         case TF_RESC_TYPE_SRAM_NAT_D_IPV4:
141                 return "NAT destination IPv4";
142         default:
143                 return "Invalid identifier";
144         }
145 }
146
147 /**
148  * Helper function to perform a SRAM HCAPI resource type lookup
149  * against the reserved value of the same static type.
150  *
151  * Returns:
152  *   -EOPNOTSUPP - Reserved resource type not supported
153  *   Value       - Integer value of the reserved value for the requested type
154  */
155 static int
156 tf_rm_rsvd_sram_value(enum tf_dir dir, enum tf_resource_type_sram index)
157 {
158         uint32_t value = -EOPNOTSUPP;
159
160         switch (index) {
161         case TF_RESC_TYPE_SRAM_FULL_ACTION:
162                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_FULL_ACTION, value);
163                 break;
164         case TF_RESC_TYPE_SRAM_MCG:
165                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_MCG, value);
166                 break;
167         case TF_RESC_TYPE_SRAM_ENCAP_8B:
168                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_8B, value);
169                 break;
170         case TF_RESC_TYPE_SRAM_ENCAP_16B:
171                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_16B, value);
172                 break;
173         case TF_RESC_TYPE_SRAM_ENCAP_64B:
174                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_64B, value);
175                 break;
176         case TF_RESC_TYPE_SRAM_SP_SMAC:
177                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC, value);
178                 break;
179         case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4:
180                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV4, value);
181                 break;
182         case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6:
183                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV6, value);
184                 break;
185         case TF_RESC_TYPE_SRAM_COUNTER_64B:
186                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_COUNTER_64B, value);
187                 break;
188         case TF_RESC_TYPE_SRAM_NAT_SPORT:
189                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_SPORT, value);
190                 break;
191         case TF_RESC_TYPE_SRAM_NAT_DPORT:
192                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_DPORT, value);
193                 break;
194         case TF_RESC_TYPE_SRAM_NAT_S_IPV4:
195                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_S_IPV4, value);
196                 break;
197         case TF_RESC_TYPE_SRAM_NAT_D_IPV4:
198                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_D_IPV4, value);
199                 break;
200         default:
201                 break;
202         }
203
204         return value;
205 }
206
207 /**
208  * Helper function to print all the SRAM resource qcaps errors
209  * reported in the error_flag.
210  *
211  * [in] dir
212  *   Receive or transmit direction
213  *
214  * [in] error_flag
215  *   Pointer to the sram error flags created at time of the query check
216  */
217 static void
218 tf_rm_print_sram_qcaps_error(enum tf_dir dir,
219                              struct tf_rm_sram_query *sram_query,
220                              uint32_t *error_flag)
221 {
222         int i;
223
224         PMD_DRV_LOG(ERR, "QCAPS errors SRAM\n");
225         PMD_DRV_LOG(ERR, "  Direction: %s\n", tf_dir_2_str(dir));
226         PMD_DRV_LOG(ERR, "  Elements:\n");
227
228         for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
229                 if (*error_flag & 1 << i)
230                         PMD_DRV_LOG(ERR, "    %s, %d elem available, req:%d\n",
231                                     tf_hcapi_sram_2_str(i),
232                                     sram_query->sram_query[i].max,
233                                     tf_rm_rsvd_sram_value(dir, i));
234         }
235 }
236
237 /**
238  * Performs a HW resource check between what firmware capability
239  * reports and what the core expects is available.
240  *
241  * Firmware performs the resource carving at AFM init time and the
242  * resource capability is reported in the TruFlow qcaps msg.
243  *
244  * [in] query
245  *   Pointer to HW Query data structure. Query holds what the firmware
246  *   offers of the HW resources.
247  *
248  * [in] dir
249  *   Receive or transmit direction
250  *
251  * [in/out] error_flag
252  *   Pointer to a bit array indicating the error of a single HCAPI
253  *   resource type. When a bit is set to 1, the HCAPI resource type
254  *   failed static allocation.
255  *
256  * Returns:
257  *  0       - Success
258  *  -ENOMEM - Failure on one of the allocated resources. Check the
259  *            error_flag for what types are flagged errored.
260  */
261 static int
262 tf_rm_check_hw_qcaps_static(struct tf_rm_hw_query *query,
263                             enum tf_dir dir,
264                             uint32_t *error_flag)
265 {
266         *error_flag = 0;
267         TF_RM_CHECK_HW_ALLOC(query,
268                              dir,
269                              TF_RESC_TYPE_HW_RANGE_ENTRY,
270                              TF_RSVD_RANGE_ENTRY,
271                              error_flag);
272
273         if (*error_flag != 0)
274                 return -ENOMEM;
275
276         return 0;
277 }
278
279 /**
280  * Performs a SRAM resource check between what firmware capability
281  * reports and what the core expects is available.
282  *
283  * Firmware performs the resource carving at AFM init time and the
284  * resource capability is reported in the TruFlow qcaps msg.
285  *
286  * [in] query
287  *   Pointer to SRAM Query data structure. Query holds what the
288  *   firmware offers of the SRAM resources.
289  *
290  * [in] dir
291  *   Receive or transmit direction
292  *
293  * [in/out] error_flag
294  *   Pointer to a bit array indicating the error of a single HCAPI
295  *   resource type. When a bit is set to 1, the HCAPI resource type
296  *   failed static allocation.
297  *
298  * Returns:
299  *  0       - Success
300  *  -ENOMEM - Failure on one of the allocated resources. Check the
301  *            error_flag for what types are flagged errored.
302  */
303 static int
304 tf_rm_check_sram_qcaps_static(struct tf_rm_sram_query *query,
305                               enum tf_dir dir,
306                               uint32_t *error_flag)
307 {
308         *error_flag = 0;
309
310         TF_RM_CHECK_SRAM_ALLOC(query,
311                                dir,
312                                TF_RESC_TYPE_SRAM_FULL_ACTION,
313                                TF_RSVD_SRAM_FULL_ACTION,
314                                error_flag);
315
316         TF_RM_CHECK_SRAM_ALLOC(query,
317                                dir,
318                                TF_RESC_TYPE_SRAM_MCG,
319                                TF_RSVD_SRAM_MCG,
320                                error_flag);
321
322         TF_RM_CHECK_SRAM_ALLOC(query,
323                                dir,
324                                TF_RESC_TYPE_SRAM_ENCAP_8B,
325                                TF_RSVD_SRAM_ENCAP_8B,
326                                error_flag);
327
328         TF_RM_CHECK_SRAM_ALLOC(query,
329                                dir,
330                                TF_RESC_TYPE_SRAM_ENCAP_16B,
331                                TF_RSVD_SRAM_ENCAP_16B,
332                                error_flag);
333
334         TF_RM_CHECK_SRAM_ALLOC(query,
335                                dir,
336                                TF_RESC_TYPE_SRAM_ENCAP_64B,
337                                TF_RSVD_SRAM_ENCAP_64B,
338                                error_flag);
339
340         TF_RM_CHECK_SRAM_ALLOC(query,
341                                dir,
342                                TF_RESC_TYPE_SRAM_SP_SMAC,
343                                TF_RSVD_SRAM_SP_SMAC,
344                                error_flag);
345
346         TF_RM_CHECK_SRAM_ALLOC(query,
347                                dir,
348                                TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
349                                TF_RSVD_SRAM_SP_SMAC_IPV4,
350                                error_flag);
351
352         TF_RM_CHECK_SRAM_ALLOC(query,
353                                dir,
354                                TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
355                                TF_RSVD_SRAM_SP_SMAC_IPV6,
356                                error_flag);
357
358         TF_RM_CHECK_SRAM_ALLOC(query,
359                                dir,
360                                TF_RESC_TYPE_SRAM_COUNTER_64B,
361                                TF_RSVD_SRAM_COUNTER_64B,
362                                error_flag);
363
364         TF_RM_CHECK_SRAM_ALLOC(query,
365                                dir,
366                                TF_RESC_TYPE_SRAM_NAT_SPORT,
367                                TF_RSVD_SRAM_NAT_SPORT,
368                                error_flag);
369
370         TF_RM_CHECK_SRAM_ALLOC(query,
371                                dir,
372                                TF_RESC_TYPE_SRAM_NAT_DPORT,
373                                TF_RSVD_SRAM_NAT_DPORT,
374                                error_flag);
375
376         TF_RM_CHECK_SRAM_ALLOC(query,
377                                dir,
378                                TF_RESC_TYPE_SRAM_NAT_S_IPV4,
379                                TF_RSVD_SRAM_NAT_S_IPV4,
380                                error_flag);
381
382         TF_RM_CHECK_SRAM_ALLOC(query,
383                                dir,
384                                TF_RESC_TYPE_SRAM_NAT_D_IPV4,
385                                TF_RSVD_SRAM_NAT_D_IPV4,
386                                error_flag);
387
388         if (*error_flag != 0)
389                 return -ENOMEM;
390
391         return 0;
392 }
393
394 /**
395  * Internal function to mark pool entries used.
396  */
397 static void
398 tf_rm_reserve_range(uint32_t count,
399                     uint32_t rsv_begin,
400                     uint32_t rsv_end,
401                     uint32_t max,
402                     struct bitalloc *pool)
403 {
404         uint32_t i;
405
406         /* If no resources has been requested we mark everything
407          * 'used'
408          */
409         if (count == 0) {
410                 for (i = 0; i < max; i++)
411                         ba_alloc_index(pool, i);
412         } else {
413                 /* Support 2 main modes
414                  * Reserved range starts from bottom up (with
415                  * pre-reserved value or not)
416                  * - begin = 0 to end xx
417                  * - begin = 1 to end xx
418                  *
419                  * Reserved range starts from top down
420                  * - begin = yy to end max
421                  */
422
423                 /* Bottom up check, start from 0 */
424                 if (rsv_begin == 0) {
425                         for (i = rsv_end + 1; i < max; i++)
426                                 ba_alloc_index(pool, i);
427                 }
428
429                 /* Bottom up check, start from 1 or higher OR
430                  * Top Down
431                  */
432                 if (rsv_begin >= 1) {
433                         /* Allocate from 0 until start */
434                         for (i = 0; i < rsv_begin; i++)
435                                 ba_alloc_index(pool, i);
436
437                         /* Skip and then do the remaining */
438                         if (rsv_end < max - 1) {
439                                 for (i = rsv_end; i < max; i++)
440                                         ba_alloc_index(pool, i);
441                         }
442                 }
443         }
444 }
445
446 /**
447  * Internal function to mark all the l2 ctxt allocated that Truflow
448  * does not own.
449  */
450 static void
451 tf_rm_rsvd_l2_ctxt(struct tf_session *tfs)
452 {
453         uint32_t index = TF_RESC_TYPE_HW_L2_CTXT_TCAM;
454         uint32_t end = 0;
455
456         /* l2 ctxt rx direction */
457         if (tfs->resc.rx.hw_entry[index].stride > 0)
458                 end = tfs->resc.rx.hw_entry[index].start +
459                         tfs->resc.rx.hw_entry[index].stride - 1;
460
461         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
462                             tfs->resc.rx.hw_entry[index].start,
463                             end,
464                             TF_NUM_L2_CTXT_TCAM,
465                             tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX);
466
467         /* l2 ctxt tx direction */
468         if (tfs->resc.tx.hw_entry[index].stride > 0)
469                 end = tfs->resc.tx.hw_entry[index].start +
470                         tfs->resc.tx.hw_entry[index].stride - 1;
471
472         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
473                             tfs->resc.tx.hw_entry[index].start,
474                             end,
475                             TF_NUM_L2_CTXT_TCAM,
476                             tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX);
477 }
478
479 /**
480  * Internal function to mark all the l2 func resources allocated that
481  * Truflow does not own.
482  */
483 static void
484 tf_rm_rsvd_l2_func(struct tf_session *tfs)
485 {
486         uint32_t index = TF_RESC_TYPE_HW_L2_FUNC;
487         uint32_t end = 0;
488
489         /* l2 func rx direction */
490         if (tfs->resc.rx.hw_entry[index].stride > 0)
491                 end = tfs->resc.rx.hw_entry[index].start +
492                         tfs->resc.rx.hw_entry[index].stride - 1;
493
494         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
495                             tfs->resc.rx.hw_entry[index].start,
496                             end,
497                             TF_NUM_L2_FUNC,
498                             tfs->TF_L2_FUNC_POOL_NAME_RX);
499
500         /* l2 func tx direction */
501         if (tfs->resc.tx.hw_entry[index].stride > 0)
502                 end = tfs->resc.tx.hw_entry[index].start +
503                         tfs->resc.tx.hw_entry[index].stride - 1;
504
505         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
506                             tfs->resc.tx.hw_entry[index].start,
507                             end,
508                             TF_NUM_L2_FUNC,
509                             tfs->TF_L2_FUNC_POOL_NAME_TX);
510 }
511
512 /**
513  * Internal function to mark all the full action resources allocated
514  * that Truflow does not own.
515  */
516 static void
517 tf_rm_rsvd_sram_full_action(struct tf_session *tfs)
518 {
519         uint32_t index = TF_RESC_TYPE_SRAM_FULL_ACTION;
520         uint16_t end = 0;
521
522         /* full action rx direction */
523         if (tfs->resc.rx.sram_entry[index].stride > 0)
524                 end = tfs->resc.rx.sram_entry[index].start +
525                         tfs->resc.rx.sram_entry[index].stride - 1;
526
527         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
528                             TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_RX,
529                             end,
530                             TF_RSVD_SRAM_FULL_ACTION_RX,
531                             tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX);
532
533         /* full action tx direction */
534         if (tfs->resc.tx.sram_entry[index].stride > 0)
535                 end = tfs->resc.tx.sram_entry[index].start +
536                         tfs->resc.tx.sram_entry[index].stride - 1;
537
538         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
539                             TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_TX,
540                             end,
541                             TF_RSVD_SRAM_FULL_ACTION_TX,
542                             tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX);
543 }
544
545 /**
546  * Internal function to mark all the multicast group resources
547  * allocated that Truflow does not own.
548  */
549 static void
550 tf_rm_rsvd_sram_mcg(struct tf_session *tfs)
551 {
552         uint32_t index = TF_RESC_TYPE_SRAM_MCG;
553         uint16_t end = 0;
554
555         /* multicast group rx direction */
556         if (tfs->resc.rx.sram_entry[index].stride > 0)
557                 end = tfs->resc.rx.sram_entry[index].start +
558                         tfs->resc.rx.sram_entry[index].stride - 1;
559
560         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
561                             TF_RSVD_SRAM_MCG_BEGIN_IDX_RX,
562                             end,
563                             TF_RSVD_SRAM_MCG_RX,
564                             tfs->TF_SRAM_MCG_POOL_NAME_RX);
565
566         /* Multicast Group on TX is not supported */
567 }
568
569 /**
570  * Internal function to mark all the encap resources allocated that
571  * Truflow does not own.
572  */
573 static void
574 tf_rm_rsvd_sram_encap(struct tf_session *tfs)
575 {
576         uint32_t index = TF_RESC_TYPE_SRAM_ENCAP_8B;
577         uint16_t end = 0;
578
579         /* encap 8b rx direction */
580         if (tfs->resc.rx.sram_entry[index].stride > 0)
581                 end = tfs->resc.rx.sram_entry[index].start +
582                         tfs->resc.rx.sram_entry[index].stride - 1;
583
584         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
585                             TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_RX,
586                             end,
587                             TF_RSVD_SRAM_ENCAP_8B_RX,
588                             tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX);
589
590         /* encap 8b tx direction */
591         if (tfs->resc.tx.sram_entry[index].stride > 0)
592                 end = tfs->resc.tx.sram_entry[index].start +
593                         tfs->resc.tx.sram_entry[index].stride - 1;
594
595         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
596                             TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_TX,
597                             end,
598                             TF_RSVD_SRAM_ENCAP_8B_TX,
599                             tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX);
600
601         index = TF_RESC_TYPE_SRAM_ENCAP_16B;
602
603         /* encap 16b rx direction */
604         if (tfs->resc.rx.sram_entry[index].stride > 0)
605                 end = tfs->resc.rx.sram_entry[index].start +
606                         tfs->resc.rx.sram_entry[index].stride - 1;
607
608         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
609                             TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_RX,
610                             end,
611                             TF_RSVD_SRAM_ENCAP_16B_RX,
612                             tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX);
613
614         /* encap 16b tx direction */
615         if (tfs->resc.tx.sram_entry[index].stride > 0)
616                 end = tfs->resc.tx.sram_entry[index].start +
617                         tfs->resc.tx.sram_entry[index].stride - 1;
618
619         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
620                             TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_TX,
621                             end,
622                             TF_RSVD_SRAM_ENCAP_16B_TX,
623                             tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX);
624
625         index = TF_RESC_TYPE_SRAM_ENCAP_64B;
626
627         /* Encap 64B not supported on RX */
628
629         /* Encap 64b tx direction */
630         if (tfs->resc.tx.sram_entry[index].stride > 0)
631                 end = tfs->resc.tx.sram_entry[index].start +
632                         tfs->resc.tx.sram_entry[index].stride - 1;
633
634         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
635                             TF_RSVD_SRAM_ENCAP_64B_BEGIN_IDX_TX,
636                             end,
637                             TF_RSVD_SRAM_ENCAP_64B_TX,
638                             tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX);
639 }
640
641 /**
642  * Internal function to mark all the sp resources allocated that
643  * Truflow does not own.
644  */
645 static void
646 tf_rm_rsvd_sram_sp(struct tf_session *tfs)
647 {
648         uint32_t index = TF_RESC_TYPE_SRAM_SP_SMAC;
649         uint16_t end = 0;
650
651         /* sp smac rx direction */
652         if (tfs->resc.rx.sram_entry[index].stride > 0)
653                 end = tfs->resc.rx.sram_entry[index].start +
654                         tfs->resc.rx.sram_entry[index].stride - 1;
655
656         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
657                             TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_RX,
658                             end,
659                             TF_RSVD_SRAM_SP_SMAC_RX,
660                             tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX);
661
662         /* sp smac tx direction */
663         if (tfs->resc.tx.sram_entry[index].stride > 0)
664                 end = tfs->resc.tx.sram_entry[index].start +
665                         tfs->resc.tx.sram_entry[index].stride - 1;
666
667         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
668                             TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_TX,
669                             end,
670                             TF_RSVD_SRAM_SP_SMAC_TX,
671                             tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX);
672
673         index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4;
674
675         /* SP SMAC IPv4 not supported on RX */
676
677         /* sp smac ipv4 tx direction */
678         if (tfs->resc.tx.sram_entry[index].stride > 0)
679                 end = tfs->resc.tx.sram_entry[index].start +
680                         tfs->resc.tx.sram_entry[index].stride - 1;
681
682         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
683                             TF_RSVD_SRAM_SP_SMAC_IPV4_BEGIN_IDX_TX,
684                             end,
685                             TF_RSVD_SRAM_SP_SMAC_IPV4_TX,
686                             tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX);
687
688         index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6;
689
690         /* SP SMAC IPv6 not supported on RX */
691
692         /* sp smac ipv6 tx direction */
693         if (tfs->resc.tx.sram_entry[index].stride > 0)
694                 end = tfs->resc.tx.sram_entry[index].start +
695                         tfs->resc.tx.sram_entry[index].stride - 1;
696
697         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
698                             TF_RSVD_SRAM_SP_SMAC_IPV6_BEGIN_IDX_TX,
699                             end,
700                             TF_RSVD_SRAM_SP_SMAC_IPV6_TX,
701                             tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX);
702 }
703
704 /**
705  * Internal function to mark all the stat resources allocated that
706  * Truflow does not own.
707  */
708 static void
709 tf_rm_rsvd_sram_stats(struct tf_session *tfs)
710 {
711         uint32_t index = TF_RESC_TYPE_SRAM_COUNTER_64B;
712         uint16_t end = 0;
713
714         /* counter 64b rx direction */
715         if (tfs->resc.rx.sram_entry[index].stride > 0)
716                 end = tfs->resc.rx.sram_entry[index].start +
717                         tfs->resc.rx.sram_entry[index].stride - 1;
718
719         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
720                             TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_RX,
721                             end,
722                             TF_RSVD_SRAM_COUNTER_64B_RX,
723                             tfs->TF_SRAM_STATS_64B_POOL_NAME_RX);
724
725         /* counter 64b tx direction */
726         if (tfs->resc.tx.sram_entry[index].stride > 0)
727                 end = tfs->resc.tx.sram_entry[index].start +
728                         tfs->resc.tx.sram_entry[index].stride - 1;
729
730         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
731                             TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_TX,
732                             end,
733                             TF_RSVD_SRAM_COUNTER_64B_TX,
734                             tfs->TF_SRAM_STATS_64B_POOL_NAME_TX);
735 }
736
737 /**
738  * Internal function to mark all the nat resources allocated that
739  * Truflow does not own.
740  */
741 static void
742 tf_rm_rsvd_sram_nat(struct tf_session *tfs)
743 {
744         uint32_t index = TF_RESC_TYPE_SRAM_NAT_SPORT;
745         uint16_t end = 0;
746
747         /* nat source port rx direction */
748         if (tfs->resc.rx.sram_entry[index].stride > 0)
749                 end = tfs->resc.rx.sram_entry[index].start +
750                         tfs->resc.rx.sram_entry[index].stride - 1;
751
752         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
753                             TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_RX,
754                             end,
755                             TF_RSVD_SRAM_NAT_SPORT_RX,
756                             tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX);
757
758         /* nat source port tx direction */
759         if (tfs->resc.tx.sram_entry[index].stride > 0)
760                 end = tfs->resc.tx.sram_entry[index].start +
761                         tfs->resc.tx.sram_entry[index].stride - 1;
762
763         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
764                             TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_TX,
765                             end,
766                             TF_RSVD_SRAM_NAT_SPORT_TX,
767                             tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX);
768
769         index = TF_RESC_TYPE_SRAM_NAT_DPORT;
770
771         /* nat destination port rx direction */
772         if (tfs->resc.rx.sram_entry[index].stride > 0)
773                 end = tfs->resc.rx.sram_entry[index].start +
774                         tfs->resc.rx.sram_entry[index].stride - 1;
775
776         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
777                             TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_RX,
778                             end,
779                             TF_RSVD_SRAM_NAT_DPORT_RX,
780                             tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX);
781
782         /* nat destination port tx direction */
783         if (tfs->resc.tx.sram_entry[index].stride > 0)
784                 end = tfs->resc.tx.sram_entry[index].start +
785                         tfs->resc.tx.sram_entry[index].stride - 1;
786
787         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
788                             TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_TX,
789                             end,
790                             TF_RSVD_SRAM_NAT_DPORT_TX,
791                             tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX);
792
793         index = TF_RESC_TYPE_SRAM_NAT_S_IPV4;
794
795         /* nat source port ipv4 rx direction */
796         if (tfs->resc.rx.sram_entry[index].stride > 0)
797                 end = tfs->resc.rx.sram_entry[index].start +
798                         tfs->resc.rx.sram_entry[index].stride - 1;
799
800         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
801                             TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_RX,
802                             end,
803                             TF_RSVD_SRAM_NAT_S_IPV4_RX,
804                             tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX);
805
806         /* nat source ipv4 port tx direction */
807         if (tfs->resc.tx.sram_entry[index].stride > 0)
808                 end = tfs->resc.tx.sram_entry[index].start +
809                         tfs->resc.tx.sram_entry[index].stride - 1;
810
811         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
812                             TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_TX,
813                             end,
814                             TF_RSVD_SRAM_NAT_S_IPV4_TX,
815                             tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX);
816
817         index = TF_RESC_TYPE_SRAM_NAT_D_IPV4;
818
819         /* nat destination port ipv4 rx direction */
820         if (tfs->resc.rx.sram_entry[index].stride > 0)
821                 end = tfs->resc.rx.sram_entry[index].start +
822                         tfs->resc.rx.sram_entry[index].stride - 1;
823
824         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
825                             TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_RX,
826                             end,
827                             TF_RSVD_SRAM_NAT_D_IPV4_RX,
828                             tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX);
829
830         /* nat destination ipv4 port tx direction */
831         if (tfs->resc.tx.sram_entry[index].stride > 0)
832                 end = tfs->resc.tx.sram_entry[index].start +
833                         tfs->resc.tx.sram_entry[index].stride - 1;
834
835         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
836                             TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_TX,
837                             end,
838                             TF_RSVD_SRAM_NAT_D_IPV4_TX,
839                             tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX);
840 }
841
842 /**
843  * Internal function used to validate the HW allocated resources
844  * against the requested values.
845  */
846 static int
847 tf_rm_hw_alloc_validate(enum tf_dir dir,
848                         struct tf_rm_hw_alloc *hw_alloc,
849                         struct tf_rm_entry *hw_entry)
850 {
851         int error = 0;
852         int i;
853
854         for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
855                 if (hw_entry[i].stride != hw_alloc->hw_num[i]) {
856                         PMD_DRV_LOG(ERR,
857                                 "%s, Alloc failed id:%d expect:%d got:%d\n",
858                                 tf_dir_2_str(dir),
859                                 i,
860                                 hw_alloc->hw_num[i],
861                                 hw_entry[i].stride);
862                         error = -1;
863                 }
864         }
865
866         return error;
867 }
868
869 /**
870  * Internal function used to validate the SRAM allocated resources
871  * against the requested values.
872  */
873 static int
874 tf_rm_sram_alloc_validate(enum tf_dir dir __rte_unused,
875                           struct tf_rm_sram_alloc *sram_alloc,
876                           struct tf_rm_entry *sram_entry)
877 {
878         int error = 0;
879         int i;
880
881         for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
882                 if (sram_entry[i].stride != sram_alloc->sram_num[i]) {
883                         PMD_DRV_LOG(ERR,
884                                 "%s, Alloc failed idx:%d expect:%d got:%d\n",
885                                 tf_dir_2_str(dir),
886                                 i,
887                                 sram_alloc->sram_num[i],
888                                 sram_entry[i].stride);
889                         error = -1;
890                 }
891         }
892
893         return error;
894 }
895
896 /**
897  * Internal function used to mark all the HW resources allocated that
898  * Truflow does not own.
899  */
900 static void
901 tf_rm_reserve_hw(struct tf *tfp)
902 {
903         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
904
905         /* TBD
906          * There is no direct AFM resource allocation as it is carved
907          * statically at AFM boot time. Thus the bit allocators work
908          * on the full HW resource amount and we just mark everything
909          * used except the resources that Truflow took ownership off.
910          */
911         tf_rm_rsvd_l2_ctxt(tfs);
912         tf_rm_rsvd_l2_func(tfs);
913 }
914
915 /**
916  * Internal function used to mark all the SRAM resources allocated
917  * that Truflow does not own.
918  */
919 static void
920 tf_rm_reserve_sram(struct tf *tfp)
921 {
922         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
923
924         /* TBD
925          * There is no direct AFM resource allocation as it is carved
926          * statically at AFM boot time. Thus the bit allocators work
927          * on the full HW resource amount and we just mark everything
928          * used except the resources that Truflow took ownership off.
929          */
930         tf_rm_rsvd_sram_full_action(tfs);
931         tf_rm_rsvd_sram_mcg(tfs);
932         tf_rm_rsvd_sram_encap(tfs);
933         tf_rm_rsvd_sram_sp(tfs);
934         tf_rm_rsvd_sram_stats(tfs);
935         tf_rm_rsvd_sram_nat(tfs);
936 }
937
938 /**
939  * Internal function used to allocate and validate all HW resources.
940  */
941 static int
942 tf_rm_allocate_validate_hw(struct tf *tfp,
943                            enum tf_dir dir)
944 {
945         int rc;
946         int i;
947         struct tf_rm_hw_query hw_query;
948         struct tf_rm_hw_alloc hw_alloc;
949         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
950         struct tf_rm_entry *hw_entries;
951         uint32_t error_flag;
952
953         if (dir == TF_DIR_RX)
954                 hw_entries = tfs->resc.rx.hw_entry;
955         else
956                 hw_entries = tfs->resc.tx.hw_entry;
957
958         /* Query for Session HW Resources */
959         rc = tf_msg_session_hw_resc_qcaps(tfp, dir, &hw_query);
960         if (rc) {
961                 /* Log error */
962                 PMD_DRV_LOG(ERR,
963                             "%s, HW qcaps message send failed\n",
964                             tf_dir_2_str(dir));
965                 goto cleanup;
966         }
967
968         rc = tf_rm_check_hw_qcaps_static(&hw_query, dir, &error_flag);
969         if (rc) {
970                 /* Log error */
971                 PMD_DRV_LOG(ERR,
972                         "%s, HW QCAPS validation failed, error_flag:0x%x\n",
973                         tf_dir_2_str(dir),
974                         error_flag);
975                 goto cleanup;
976         }
977
978         /* Post process HW capability */
979         for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++)
980                 hw_alloc.hw_num[i] = hw_query.hw_query[i].max;
981
982         /* Allocate Session HW Resources */
983         rc = tf_msg_session_hw_resc_alloc(tfp, dir, &hw_alloc, hw_entries);
984         if (rc) {
985                 /* Log error */
986                 PMD_DRV_LOG(ERR,
987                             "%s, HW alloc message send failed\n",
988                             tf_dir_2_str(dir));
989                 goto cleanup;
990         }
991
992         /* Perform HW allocation validation as its possible the
993          * resource availability changed between qcaps and alloc
994          */
995         rc = tf_rm_hw_alloc_validate(dir, &hw_alloc, hw_entries);
996         if (rc) {
997                 /* Log error */
998                 PMD_DRV_LOG(ERR,
999                             "%s, HW Resource validation failed\n",
1000                             tf_dir_2_str(dir));
1001                 goto cleanup;
1002         }
1003
1004         return 0;
1005
1006  cleanup:
1007         return -1;
1008 }
1009
1010 /**
1011  * Internal function used to allocate and validate all SRAM resources.
1012  *
1013  * [in] tfp
1014  *   Pointer to TF handle
1015  *
1016  * [in] dir
1017  *   Receive or transmit direction
1018  *
1019  * Returns:
1020  *   0  - Success
1021  *   -1 - Internal error
1022  */
1023 static int
1024 tf_rm_allocate_validate_sram(struct tf *tfp,
1025                              enum tf_dir dir)
1026 {
1027         int rc;
1028         int i;
1029         struct tf_rm_sram_query sram_query;
1030         struct tf_rm_sram_alloc sram_alloc;
1031         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1032         struct tf_rm_entry *sram_entries;
1033         uint32_t error_flag;
1034
1035         if (dir == TF_DIR_RX)
1036                 sram_entries = tfs->resc.rx.sram_entry;
1037         else
1038                 sram_entries = tfs->resc.tx.sram_entry;
1039
1040         /* Query for Session SRAM Resources */
1041         rc = tf_msg_session_sram_resc_qcaps(tfp, dir, &sram_query);
1042         if (rc) {
1043                 /* Log error */
1044                 PMD_DRV_LOG(ERR,
1045                             "%s, SRAM qcaps message send failed\n",
1046                             tf_dir_2_str(dir));
1047                 goto cleanup;
1048         }
1049
1050         rc = tf_rm_check_sram_qcaps_static(&sram_query, dir, &error_flag);
1051         if (rc) {
1052                 /* Log error */
1053                 PMD_DRV_LOG(ERR,
1054                         "%s, SRAM QCAPS validation failed, error_flag:%x\n",
1055                         tf_dir_2_str(dir),
1056                         error_flag);
1057                 tf_rm_print_sram_qcaps_error(dir, &sram_query, &error_flag);
1058                 goto cleanup;
1059         }
1060
1061         /* Post process SRAM capability */
1062         for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++)
1063                 sram_alloc.sram_num[i] = sram_query.sram_query[i].max;
1064
1065         /* Allocate Session SRAM Resources */
1066         rc = tf_msg_session_sram_resc_alloc(tfp,
1067                                             dir,
1068                                             &sram_alloc,
1069                                             sram_entries);
1070         if (rc) {
1071                 /* Log error */
1072                 PMD_DRV_LOG(ERR,
1073                             "%s, SRAM alloc message send failed\n",
1074                             tf_dir_2_str(dir));
1075                 goto cleanup;
1076         }
1077
1078         /* Perform SRAM allocation validation as its possible the
1079          * resource availability changed between qcaps and alloc
1080          */
1081         rc = tf_rm_sram_alloc_validate(dir, &sram_alloc, sram_entries);
1082         if (rc) {
1083                 /* Log error */
1084                 PMD_DRV_LOG(ERR,
1085                             "%s, SRAM Resource allocation validation failed\n",
1086                             tf_dir_2_str(dir));
1087                 goto cleanup;
1088         }
1089
1090         return 0;
1091
1092  cleanup:
1093         return -1;
1094 }
1095
1096 /**
1097  * Helper function used to prune a SRAM resource array to only hold
1098  * elements that needs to be flushed.
1099  *
1100  * [in] tfs
1101  *   Session handle
1102  *
1103  * [in] dir
1104  *   Receive or transmit direction
1105  *
1106  * [in] hw_entries
1107  *   Master SRAM Resource data base
1108  *
1109  * [in/out] flush_entries
1110  *   Pruned SRAM Resource database of entries to be flushed. This
1111  *   array should be passed in as a complete copy of the master SRAM
1112  *   Resource database. The outgoing result will be a pruned version
1113  *   based on the result of the requested checking
1114  *
1115  * Returns:
1116  *    0 - Success, no flush required
1117  *    1 - Success, flush required
1118  *   -1 - Internal error
1119  */
1120 static int
1121 tf_rm_sram_to_flush(struct tf_session *tfs,
1122                     enum tf_dir dir,
1123                     struct tf_rm_entry *sram_entries,
1124                     struct tf_rm_entry *flush_entries)
1125 {
1126         int rc;
1127         int flush_rc = 0;
1128         int free_cnt;
1129         struct bitalloc *pool;
1130
1131         /* Check all the sram resource pools and check for left over
1132          * elements. Any found will result in the complete pool of a
1133          * type to get invalidated.
1134          */
1135
1136         TF_RM_GET_POOLS(tfs, dir, &pool,
1137                         TF_SRAM_FULL_ACTION_POOL_NAME,
1138                         rc);
1139         if (rc)
1140                 return rc;
1141         free_cnt = ba_free_count(pool);
1142         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride) {
1143                 flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].start = 0;
1144                 flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride = 0;
1145         } else {
1146                 flush_rc = 1;
1147         }
1148
1149         /* Only pools for RX direction */
1150         if (dir == TF_DIR_RX) {
1151                 TF_RM_GET_POOLS_RX(tfs, &pool,
1152                                    TF_SRAM_MCG_POOL_NAME);
1153                 if (rc)
1154                         return rc;
1155                 free_cnt = ba_free_count(pool);
1156                 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_MCG].stride) {
1157                         flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0;
1158                         flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0;
1159                 } else {
1160                         flush_rc = 1;
1161                 }
1162         } else {
1163                 /* Always prune TX direction */
1164                 flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0;
1165                 flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0;
1166         }
1167
1168         TF_RM_GET_POOLS(tfs, dir, &pool,
1169                         TF_SRAM_ENCAP_8B_POOL_NAME,
1170                         rc);
1171         if (rc)
1172                 return rc;
1173         free_cnt = ba_free_count(pool);
1174         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride) {
1175                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].start = 0;
1176                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride = 0;
1177         } else {
1178                 flush_rc = 1;
1179         }
1180
1181         TF_RM_GET_POOLS(tfs, dir, &pool,
1182                         TF_SRAM_ENCAP_16B_POOL_NAME,
1183                         rc);
1184         if (rc)
1185                 return rc;
1186         free_cnt = ba_free_count(pool);
1187         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride) {
1188                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].start = 0;
1189                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride = 0;
1190         } else {
1191                 flush_rc = 1;
1192         }
1193
1194         /* Only pools for TX direction */
1195         if (dir == TF_DIR_TX) {
1196                 TF_RM_GET_POOLS_TX(tfs, &pool,
1197                                    TF_SRAM_ENCAP_64B_POOL_NAME);
1198                 if (rc)
1199                         return rc;
1200                 free_cnt = ba_free_count(pool);
1201                 if (free_cnt ==
1202                     sram_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride) {
1203                         flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0;
1204                         flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0;
1205                 } else {
1206                         flush_rc = 1;
1207                 }
1208         } else {
1209                 /* Always prune RX direction */
1210                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0;
1211                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0;
1212         }
1213
1214         TF_RM_GET_POOLS(tfs, dir, &pool,
1215                         TF_SRAM_SP_SMAC_POOL_NAME,
1216                         rc);
1217         if (rc)
1218                 return rc;
1219         free_cnt = ba_free_count(pool);
1220         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride) {
1221                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].start = 0;
1222                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride = 0;
1223         } else {
1224                 flush_rc = 1;
1225         }
1226
1227         /* Only pools for TX direction */
1228         if (dir == TF_DIR_TX) {
1229                 TF_RM_GET_POOLS_TX(tfs, &pool,
1230                                    TF_SRAM_SP_SMAC_IPV4_POOL_NAME);
1231                 if (rc)
1232                         return rc;
1233                 free_cnt = ba_free_count(pool);
1234                 if (free_cnt ==
1235                     sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride) {
1236                         flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0;
1237                         flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride =
1238                                 0;
1239                 } else {
1240                         flush_rc = 1;
1241                 }
1242         } else {
1243                 /* Always prune RX direction */
1244                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0;
1245                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride = 0;
1246         }
1247
1248         /* Only pools for TX direction */
1249         if (dir == TF_DIR_TX) {
1250                 TF_RM_GET_POOLS_TX(tfs, &pool,
1251                                    TF_SRAM_SP_SMAC_IPV6_POOL_NAME);
1252                 if (rc)
1253                         return rc;
1254                 free_cnt = ba_free_count(pool);
1255                 if (free_cnt ==
1256                     sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride) {
1257                         flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0;
1258                         flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride =
1259                                 0;
1260                 } else {
1261                         flush_rc = 1;
1262                 }
1263         } else {
1264                 /* Always prune RX direction */
1265                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0;
1266                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride = 0;
1267         }
1268
1269         TF_RM_GET_POOLS(tfs, dir, &pool,
1270                         TF_SRAM_STATS_64B_POOL_NAME,
1271                         rc);
1272         if (rc)
1273                 return rc;
1274         free_cnt = ba_free_count(pool);
1275         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride) {
1276                 flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].start = 0;
1277                 flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride = 0;
1278         } else {
1279                 flush_rc = 1;
1280         }
1281
1282         TF_RM_GET_POOLS(tfs, dir, &pool,
1283                         TF_SRAM_NAT_SPORT_POOL_NAME,
1284                         rc);
1285         if (rc)
1286                 return rc;
1287         free_cnt = ba_free_count(pool);
1288         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride) {
1289                 flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].start = 0;
1290                 flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride = 0;
1291         } else {
1292                 flush_rc = 1;
1293         }
1294
1295         TF_RM_GET_POOLS(tfs, dir, &pool,
1296                         TF_SRAM_NAT_DPORT_POOL_NAME,
1297                         rc);
1298         if (rc)
1299                 return rc;
1300         free_cnt = ba_free_count(pool);
1301         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride) {
1302                 flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].start = 0;
1303                 flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride = 0;
1304         } else {
1305                 flush_rc = 1;
1306         }
1307
1308         TF_RM_GET_POOLS(tfs, dir, &pool,
1309                         TF_SRAM_NAT_S_IPV4_POOL_NAME,
1310                         rc);
1311         if (rc)
1312                 return rc;
1313         free_cnt = ba_free_count(pool);
1314         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride) {
1315                 flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].start = 0;
1316                 flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride = 0;
1317         } else {
1318                 flush_rc = 1;
1319         }
1320
1321         TF_RM_GET_POOLS(tfs, dir, &pool,
1322                         TF_SRAM_NAT_D_IPV4_POOL_NAME,
1323                         rc);
1324         if (rc)
1325                 return rc;
1326         free_cnt = ba_free_count(pool);
1327         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride) {
1328                 flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].start = 0;
1329                 flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride = 0;
1330         } else {
1331                 flush_rc = 1;
1332         }
1333
1334         return flush_rc;
1335 }
1336
1337 /**
1338  * Helper function used to generate an error log for the SRAM types
1339  * that needs to be flushed. The types should have been cleaned up
1340  * ahead of invoking tf_close_session.
1341  *
1342  * [in] sram_entries
1343  *   SRAM Resource database holding elements to be flushed
1344  */
1345 static void
1346 tf_rm_log_sram_flush(enum tf_dir dir,
1347                      struct tf_rm_entry *sram_entries)
1348 {
1349         int i;
1350
1351         /* Walk the sram flush array and log the types that wasn't
1352          * cleaned up.
1353          */
1354         for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
1355                 if (sram_entries[i].stride != 0)
1356                         PMD_DRV_LOG(ERR,
1357                                     "%s: %s was not cleaned up\n",
1358                                     tf_dir_2_str(dir),
1359                                     tf_hcapi_sram_2_str(i));
1360         }
1361 }
1362
1363 void
1364 tf_rm_init(struct tf *tfp __rte_unused)
1365 {
1366         struct tf_session *tfs =
1367                 (struct tf_session *)(tfp->session->core_data);
1368
1369         /* This version is host specific and should be checked against
1370          * when attaching as there is no guarantee that a secondary
1371          * would run from same image version.
1372          */
1373         tfs->ver.major = TF_SESSION_VER_MAJOR;
1374         tfs->ver.minor = TF_SESSION_VER_MINOR;
1375         tfs->ver.update = TF_SESSION_VER_UPDATE;
1376
1377         tfs->session_id.id = 0;
1378         tfs->ref_count = 0;
1379
1380         /* Initialization of Table Scopes */
1381         /* ll_init(&tfs->tbl_scope_ll); */
1382
1383         /* Initialization of HW and SRAM resource DB */
1384         memset(&tfs->resc, 0, sizeof(struct tf_rm_db));
1385
1386         /* Initialization of HW Resource Pools */
1387         ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
1388         ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
1389
1390         /* Initialization of SRAM Resource Pools
1391          * These pools are set to the TFLIB defined MAX sizes not
1392          * AFM's HW max as to limit the memory consumption
1393          */
1394         ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX,
1395                 TF_RSVD_SRAM_FULL_ACTION_RX);
1396         ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX,
1397                 TF_RSVD_SRAM_FULL_ACTION_TX);
1398         /* Only Multicast Group on RX is supported */
1399         ba_init(tfs->TF_SRAM_MCG_POOL_NAME_RX,
1400                 TF_RSVD_SRAM_MCG_RX);
1401         ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX,
1402                 TF_RSVD_SRAM_ENCAP_8B_RX);
1403         ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX,
1404                 TF_RSVD_SRAM_ENCAP_8B_TX);
1405         ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX,
1406                 TF_RSVD_SRAM_ENCAP_16B_RX);
1407         ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX,
1408                 TF_RSVD_SRAM_ENCAP_16B_TX);
1409         /* Only Encap 64B on TX is supported */
1410         ba_init(tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX,
1411                 TF_RSVD_SRAM_ENCAP_64B_TX);
1412         ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX,
1413                 TF_RSVD_SRAM_SP_SMAC_RX);
1414         ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX,
1415                 TF_RSVD_SRAM_SP_SMAC_TX);
1416         /* Only SP SMAC IPv4 on TX is supported */
1417         ba_init(tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX,
1418                 TF_RSVD_SRAM_SP_SMAC_IPV4_TX);
1419         /* Only SP SMAC IPv6 on TX is supported */
1420         ba_init(tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX,
1421                 TF_RSVD_SRAM_SP_SMAC_IPV6_TX);
1422         ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_RX,
1423                 TF_RSVD_SRAM_COUNTER_64B_RX);
1424         ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_TX,
1425                 TF_RSVD_SRAM_COUNTER_64B_TX);
1426         ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX,
1427                 TF_RSVD_SRAM_NAT_SPORT_RX);
1428         ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX,
1429                 TF_RSVD_SRAM_NAT_SPORT_TX);
1430         ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX,
1431                 TF_RSVD_SRAM_NAT_DPORT_RX);
1432         ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX,
1433                 TF_RSVD_SRAM_NAT_DPORT_TX);
1434         ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX,
1435                 TF_RSVD_SRAM_NAT_S_IPV4_RX);
1436         ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX,
1437                 TF_RSVD_SRAM_NAT_S_IPV4_TX);
1438         ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX,
1439                 TF_RSVD_SRAM_NAT_D_IPV4_RX);
1440         ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX,
1441                 TF_RSVD_SRAM_NAT_D_IPV4_TX);
1442
1443         /* Initialization of pools local to TF Core */
1444         ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
1445         ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
1446 }
1447
1448 int
1449 tf_rm_allocate_validate(struct tf *tfp)
1450 {
1451         int rc;
1452         int i;
1453
1454         for (i = 0; i < TF_DIR_MAX; i++) {
1455                 rc = tf_rm_allocate_validate_hw(tfp, i);
1456                 if (rc)
1457                         return rc;
1458                 rc = tf_rm_allocate_validate_sram(tfp, i);
1459                 if (rc)
1460                         return rc;
1461         }
1462
1463         /* With both HW and SRAM allocated and validated we can
1464          * 'scrub' the reservation on the pools.
1465          */
1466         tf_rm_reserve_hw(tfp);
1467         tf_rm_reserve_sram(tfp);
1468
1469         return rc;
1470 }
1471
1472 int
1473 tf_rm_close(struct tf *tfp)
1474 {
1475         int rc;
1476         int rc_close = 0;
1477         int i;
1478         struct tf_rm_entry *hw_entries;
1479         struct tf_rm_entry *sram_entries;
1480         struct tf_rm_entry *sram_flush_entries;
1481         struct tf_session *tfs __rte_unused =
1482                 (struct tf_session *)(tfp->session->core_data);
1483
1484         struct tf_rm_db flush_resc = tfs->resc;
1485
1486         /* On close it is assumed that the session has already cleaned
1487          * up all its resources, individually, while destroying its
1488          * flows. No checking is performed thus the behavior is as
1489          * follows.
1490          *
1491          * Session RM will signal FW to release session resources. FW
1492          * will perform invalidation of all the allocated entries
1493          * (assures any outstanding resources has been cleared, then
1494          * free the FW RM instance.
1495          *
1496          * Session will then be freed by tf_close_session() thus there
1497          * is no need to clean each resource pool as the whole session
1498          * is going away.
1499          */
1500
1501         for (i = 0; i < TF_DIR_MAX; i++) {
1502                 if (i == TF_DIR_RX) {
1503                         hw_entries = tfs->resc.rx.hw_entry;
1504                         sram_entries = tfs->resc.rx.sram_entry;
1505                         sram_flush_entries = flush_resc.rx.sram_entry;
1506                 } else {
1507                         hw_entries = tfs->resc.tx.hw_entry;
1508                         sram_entries = tfs->resc.tx.sram_entry;
1509                         sram_flush_entries = flush_resc.tx.sram_entry;
1510                 }
1511
1512                 /* Check for any not previously freed SRAM resources
1513                  * and flush if required.
1514                  */
1515                 rc = tf_rm_sram_to_flush(tfs,
1516                                          i,
1517                                          sram_entries,
1518                                          sram_flush_entries);
1519                 if (rc) {
1520                         rc_close = -ENOTEMPTY;
1521                         /* Log error */
1522                         PMD_DRV_LOG(ERR,
1523                                     "%s, lingering SRAM resources\n",
1524                                     tf_dir_2_str(i));
1525
1526                         /* Log the entries to be flushed */
1527                         tf_rm_log_sram_flush(i, sram_flush_entries);
1528
1529                         rc = tf_msg_session_sram_resc_flush(tfp,
1530                                                             i,
1531                                                             sram_flush_entries);
1532                         if (rc) {
1533                                 rc_close = rc;
1534                                 /* Log error */
1535                                 PMD_DRV_LOG(ERR,
1536                                             "%s, HW flush failed\n",
1537                                             tf_dir_2_str(i));
1538                         }
1539                 }
1540
1541                 rc = tf_msg_session_hw_resc_free(tfp, i, hw_entries);
1542                 if (rc) {
1543                         rc_close = rc;
1544                         /* Log error */
1545                         PMD_DRV_LOG(ERR,
1546                                     "%s, HW free failed\n",
1547                                     tf_dir_2_str(i));
1548                 }
1549
1550                 rc = tf_msg_session_sram_resc_free(tfp, i, sram_entries);
1551                 if (rc) {
1552                         rc_close = rc;
1553                         /* Log error */
1554                         PMD_DRV_LOG(ERR,
1555                                     "%s, SRAM free failed\n",
1556                                     tf_dir_2_str(i));
1557                 }
1558         }
1559
1560         return rc_close;
1561 }
1562
1563 int
1564 tf_rm_convert_tbl_type(enum tf_tbl_type type,
1565                        uint32_t *hcapi_type)
1566 {
1567         int rc = 0;
1568
1569         switch (type) {
1570         case TF_TBL_TYPE_FULL_ACT_RECORD:
1571                 *hcapi_type = TF_RESC_TYPE_SRAM_FULL_ACTION;
1572                 break;
1573         case TF_TBL_TYPE_MCAST_GROUPS:
1574                 *hcapi_type = TF_RESC_TYPE_SRAM_MCG;
1575                 break;
1576         case TF_TBL_TYPE_ACT_ENCAP_8B:
1577                 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_8B;
1578                 break;
1579         case TF_TBL_TYPE_ACT_ENCAP_16B:
1580                 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_16B;
1581                 break;
1582         case TF_TBL_TYPE_ACT_ENCAP_64B:
1583                 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_64B;
1584                 break;
1585         case TF_TBL_TYPE_ACT_SP_SMAC:
1586                 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC;
1587                 break;
1588         case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
1589                 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4;
1590                 break;
1591         case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
1592                 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6;
1593                 break;
1594         case TF_TBL_TYPE_ACT_STATS_64:
1595                 *hcapi_type = TF_RESC_TYPE_SRAM_COUNTER_64B;
1596                 break;
1597         case TF_TBL_TYPE_ACT_MODIFY_SPORT:
1598                 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_SPORT;
1599                 break;
1600         case TF_TBL_TYPE_ACT_MODIFY_DPORT:
1601                 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_DPORT;
1602                 break;
1603         case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
1604                 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_S_IPV4;
1605                 break;
1606         case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
1607                 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_D_IPV4;
1608                 break;
1609         case TF_TBL_TYPE_METER_PROF:
1610                 *hcapi_type = TF_RESC_TYPE_HW_METER_PROF;
1611                 break;
1612         case TF_TBL_TYPE_METER_INST:
1613                 *hcapi_type = TF_RESC_TYPE_HW_METER_INST;
1614                 break;
1615         case TF_TBL_TYPE_MIRROR_CONFIG:
1616                 *hcapi_type = TF_RESC_TYPE_HW_MIRROR;
1617                 break;
1618         case TF_TBL_TYPE_UPAR:
1619                 *hcapi_type = TF_RESC_TYPE_HW_UPAR;
1620                 break;
1621         case TF_TBL_TYPE_EPOCH0:
1622                 *hcapi_type = TF_RESC_TYPE_HW_EPOCH0;
1623                 break;
1624         case TF_TBL_TYPE_EPOCH1:
1625                 *hcapi_type = TF_RESC_TYPE_HW_EPOCH1;
1626                 break;
1627         case TF_TBL_TYPE_METADATA:
1628                 *hcapi_type = TF_RESC_TYPE_HW_METADATA;
1629                 break;
1630         case TF_TBL_TYPE_CT_STATE:
1631                 *hcapi_type = TF_RESC_TYPE_HW_CT_STATE;
1632                 break;
1633         case TF_TBL_TYPE_RANGE_PROF:
1634                 *hcapi_type = TF_RESC_TYPE_HW_RANGE_PROF;
1635                 break;
1636         case TF_TBL_TYPE_RANGE_ENTRY:
1637                 *hcapi_type = TF_RESC_TYPE_HW_RANGE_ENTRY;
1638                 break;
1639         case TF_TBL_TYPE_LAG:
1640                 *hcapi_type = TF_RESC_TYPE_HW_LAG_ENTRY;
1641                 break;
1642         /* Not yet supported */
1643         case TF_TBL_TYPE_ACT_ENCAP_32B:
1644         case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST:
1645         case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC:
1646         case TF_TBL_TYPE_VNIC_SVIF:
1647         case TF_TBL_TYPE_EXT:   /* No pools for this type */
1648         case TF_TBL_TYPE_EXT_0: /* No pools for this type */
1649         default:
1650                 *hcapi_type = -1;
1651                 rc = -EOPNOTSUPP;
1652         }
1653
1654         return rc;
1655 }
1656
1657 int
1658 tf_rm_convert_index(struct tf_session *tfs,
1659                     enum tf_dir dir,
1660                     enum tf_tbl_type type,
1661                     enum tf_rm_convert_type c_type,
1662                     uint32_t index,
1663                     uint32_t *convert_index)
1664 {
1665         int rc;
1666         struct tf_rm_resc *resc;
1667         uint32_t hcapi_type;
1668         uint32_t base_index;
1669
1670         if (dir == TF_DIR_RX)
1671                 resc = &tfs->resc.rx;
1672         else if (dir == TF_DIR_TX)
1673                 resc = &tfs->resc.tx;
1674         else
1675                 return -EOPNOTSUPP;
1676
1677         rc = tf_rm_convert_tbl_type(type, &hcapi_type);
1678         if (rc)
1679                 return -1;
1680
1681         switch (type) {
1682         case TF_TBL_TYPE_FULL_ACT_RECORD:
1683         case TF_TBL_TYPE_MCAST_GROUPS:
1684         case TF_TBL_TYPE_ACT_ENCAP_8B:
1685         case TF_TBL_TYPE_ACT_ENCAP_16B:
1686         case TF_TBL_TYPE_ACT_ENCAP_32B:
1687         case TF_TBL_TYPE_ACT_ENCAP_64B:
1688         case TF_TBL_TYPE_ACT_SP_SMAC:
1689         case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
1690         case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
1691         case TF_TBL_TYPE_ACT_STATS_64:
1692         case TF_TBL_TYPE_ACT_MODIFY_SPORT:
1693         case TF_TBL_TYPE_ACT_MODIFY_DPORT:
1694         case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
1695         case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
1696                 base_index = resc->sram_entry[hcapi_type].start;
1697                 break;
1698         case TF_TBL_TYPE_MIRROR_CONFIG:
1699         case TF_TBL_TYPE_METER_PROF:
1700         case TF_TBL_TYPE_METER_INST:
1701         case TF_TBL_TYPE_UPAR:
1702         case TF_TBL_TYPE_EPOCH0:
1703         case TF_TBL_TYPE_EPOCH1:
1704         case TF_TBL_TYPE_METADATA:
1705         case TF_TBL_TYPE_CT_STATE:
1706         case TF_TBL_TYPE_RANGE_PROF:
1707         case TF_TBL_TYPE_RANGE_ENTRY:
1708         case TF_TBL_TYPE_LAG:
1709                 base_index = resc->hw_entry[hcapi_type].start;
1710                 break;
1711         /* Not yet supported */
1712         case TF_TBL_TYPE_VNIC_SVIF:
1713         case TF_TBL_TYPE_EXT:   /* No pools for this type */
1714         case TF_TBL_TYPE_EXT_0: /* No pools for this type */
1715         default:
1716                 return -EOPNOTSUPP;
1717         }
1718
1719         switch (c_type) {
1720         case TF_RM_CONVERT_RM_BASE:
1721                 *convert_index = index - base_index;
1722                 break;
1723         case TF_RM_CONVERT_ADD_BASE:
1724                 *convert_index = index + base_index;
1725                 break;
1726         default:
1727                 return -EOPNOTSUPP;
1728         }
1729
1730         return 0;
1731 }