net/bnxt: add resource manager
[dpdk.git] / drivers / net / bnxt / tf_core / tf_rm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include <string.h>
7
8 #include <rte_common.h>
9
10 #include "tf_rm.h"
11 #include "tf_core.h"
12 #include "tf_session.h"
13 #include "tf_resources.h"
14 #include "tf_msg.h"
15 #include "bnxt.h"
16
17 /**
18  * Internal macro to perform HW resource allocation check between what
19  * firmware reports vs what was statically requested.
20  *
21  * Parameters:
22  *   struct tf_rm_hw_query    *hquery      - Pointer to the hw query result
23  *   enum tf_dir               dir         - Direction to process
24  *   enum tf_resource_type_hw  hcapi_type  - HCAPI type, the index element
25  *                                           in the hw query structure
26  *   define                    def_value   - Define value to check against
27  *   uint32_t                 *eflag       - Result of the check
28  */
29 #define TF_RM_CHECK_HW_ALLOC(hquery, dir, hcapi_type, def_value, eflag) do {  \
30         if ((dir) == TF_DIR_RX) {                                             \
31                 if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _RX) \
32                         *(eflag) |= 1 << (hcapi_type);                        \
33         } else {                                                              \
34                 if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _TX) \
35                         *(eflag) |= 1 << (hcapi_type);                        \
36         }                                                                     \
37 } while (0)
38
39 /**
40  * Internal macro to perform HW resource allocation check between what
41  * firmware reports vs what was statically requested.
42  *
43  * Parameters:
44  *   struct tf_rm_sram_query   *squery      - Pointer to the sram query result
45  *   enum tf_dir                dir         - Direction to process
46  *   enum tf_resource_type_sram hcapi_type  - HCAPI type, the index element
47  *                                            in the hw query structure
48  *   define                     def_value   - Define value to check against
49  *   uint32_t                  *eflag       - Result of the check
50  */
51 #define TF_RM_CHECK_SRAM_ALLOC(squery, dir, hcapi_type, def_value, eflag) do { \
52         if ((dir) == TF_DIR_RX) {                                              \
53                 if ((squery)->sram_query[(hcapi_type)].max != def_value ## _RX)\
54                         *(eflag) |= 1 << (hcapi_type);                         \
55         } else {                                                               \
56                 if ((squery)->sram_query[(hcapi_type)].max != def_value ## _TX)\
57                         *(eflag) |= 1 << (hcapi_type);                         \
58         }                                                                      \
59 } while (0)
60
61 /**
62  * Internal macro to convert a reserved resource define name to be
63  * direction specific.
64  *
65  * Parameters:
66  *   enum tf_dir    dir         - Direction to process
67  *   string         type        - Type name to append RX or TX to
68  *   string         dtype       - Direction specific type
69  *
70  *
71  */
72 #define TF_RESC_RSVD(dir, type, dtype) do {     \
73                 if ((dir) == TF_DIR_RX)         \
74                         (dtype) = type ## _RX;  \
75                 else                            \
76                         (dtype) = type ## _TX;  \
77         } while (0)
78
79 const char
80 *tf_dir_2_str(enum tf_dir dir)
81 {
82         switch (dir) {
83         case TF_DIR_RX:
84                 return "RX";
85         case TF_DIR_TX:
86                 return "TX";
87         default:
88                 return "Invalid direction";
89         }
90 }
91
92 const char
93 *tf_ident_2_str(enum tf_identifier_type id_type)
94 {
95         switch (id_type) {
96         case TF_IDENT_TYPE_L2_CTXT:
97                 return "l2_ctxt_remap";
98         case TF_IDENT_TYPE_PROF_FUNC:
99                 return "prof_func";
100         case TF_IDENT_TYPE_WC_PROF:
101                 return "wc_prof";
102         case TF_IDENT_TYPE_EM_PROF:
103                 return "em_prof";
104         case TF_IDENT_TYPE_L2_FUNC:
105                 return "l2_func";
106         default:
107                 return "Invalid identifier";
108         }
109 }
110
111 const char
112 *tf_tcam_tbl_2_str(enum tf_tcam_tbl_type tcam_type)
113 {
114         switch (tcam_type) {
115         case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
116                 return "l2_ctxt_tcam";
117         case TF_TCAM_TBL_TYPE_PROF_TCAM:
118                 return "prof_tcam";
119         case TF_TCAM_TBL_TYPE_WC_TCAM:
120                 return "wc_tcam";
121         case TF_TCAM_TBL_TYPE_VEB_TCAM:
122                 return "veb_tcam";
123         case TF_TCAM_TBL_TYPE_SP_TCAM:
124                 return "sp_tcam";
125         case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
126                 return "ct_rule_tcam";
127         default:
128                 return "Invalid tcam table type";
129         }
130 }
131
132 const char
133 *tf_hcapi_hw_2_str(enum tf_resource_type_hw hw_type)
134 {
135         switch (hw_type) {
136         case TF_RESC_TYPE_HW_L2_CTXT_TCAM:
137                 return "L2 ctxt tcam";
138         case TF_RESC_TYPE_HW_PROF_FUNC:
139                 return "Profile Func";
140         case TF_RESC_TYPE_HW_PROF_TCAM:
141                 return "Profile tcam";
142         case TF_RESC_TYPE_HW_EM_PROF_ID:
143                 return "EM profile id";
144         case TF_RESC_TYPE_HW_EM_REC:
145                 return "EM record";
146         case TF_RESC_TYPE_HW_WC_TCAM_PROF_ID:
147                 return "WC tcam profile id";
148         case TF_RESC_TYPE_HW_WC_TCAM:
149                 return "WC tcam";
150         case TF_RESC_TYPE_HW_METER_PROF:
151                 return "Meter profile";
152         case TF_RESC_TYPE_HW_METER_INST:
153                 return "Meter instance";
154         case TF_RESC_TYPE_HW_MIRROR:
155                 return "Mirror";
156         case TF_RESC_TYPE_HW_UPAR:
157                 return "UPAR";
158         case TF_RESC_TYPE_HW_SP_TCAM:
159                 return "Source properties tcam";
160         case TF_RESC_TYPE_HW_L2_FUNC:
161                 return "L2 Function";
162         case TF_RESC_TYPE_HW_FKB:
163                 return "FKB";
164         case TF_RESC_TYPE_HW_TBL_SCOPE:
165                 return "Table scope";
166         case TF_RESC_TYPE_HW_EPOCH0:
167                 return "EPOCH0";
168         case TF_RESC_TYPE_HW_EPOCH1:
169                 return "EPOCH1";
170         case TF_RESC_TYPE_HW_METADATA:
171                 return "Metadata";
172         case TF_RESC_TYPE_HW_CT_STATE:
173                 return "Connection tracking state";
174         case TF_RESC_TYPE_HW_RANGE_PROF:
175                 return "Range profile";
176         case TF_RESC_TYPE_HW_RANGE_ENTRY:
177                 return "Range entry";
178         case TF_RESC_TYPE_HW_LAG_ENTRY:
179                 return "LAG";
180         default:
181                 return "Invalid identifier";
182         }
183 }
184
185 const char
186 *tf_hcapi_sram_2_str(enum tf_resource_type_sram sram_type)
187 {
188         switch (sram_type) {
189         case TF_RESC_TYPE_SRAM_FULL_ACTION:
190                 return "Full action";
191         case TF_RESC_TYPE_SRAM_MCG:
192                 return "MCG";
193         case TF_RESC_TYPE_SRAM_ENCAP_8B:
194                 return "Encap 8B";
195         case TF_RESC_TYPE_SRAM_ENCAP_16B:
196                 return "Encap 16B";
197         case TF_RESC_TYPE_SRAM_ENCAP_64B:
198                 return "Encap 64B";
199         case TF_RESC_TYPE_SRAM_SP_SMAC:
200                 return "Source properties SMAC";
201         case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4:
202                 return "Source properties SMAC IPv4";
203         case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6:
204                 return "Source properties IPv6";
205         case TF_RESC_TYPE_SRAM_COUNTER_64B:
206                 return "Counter 64B";
207         case TF_RESC_TYPE_SRAM_NAT_SPORT:
208                 return "NAT source port";
209         case TF_RESC_TYPE_SRAM_NAT_DPORT:
210                 return "NAT destination port";
211         case TF_RESC_TYPE_SRAM_NAT_S_IPV4:
212                 return "NAT source IPv4";
213         case TF_RESC_TYPE_SRAM_NAT_D_IPV4:
214                 return "NAT destination IPv4";
215         default:
216                 return "Invalid identifier";
217         }
218 }
219
220 /**
221  * Helper function to perform a HW HCAPI resource type lookup against
222  * the reserved value of the same static type.
223  *
224  * Returns:
225  *   -EOPNOTSUPP - Reserved resource type not supported
226  *   Value       - Integer value of the reserved value for the requested type
227  */
228 static int
229 tf_rm_rsvd_hw_value(enum tf_dir dir, enum tf_resource_type_hw index)
230 {
231         uint32_t value = -EOPNOTSUPP;
232
233         switch (index) {
234         case TF_RESC_TYPE_HW_L2_CTXT_TCAM:
235                 TF_RESC_RSVD(dir, TF_RSVD_L2_CTXT_TCAM, value);
236                 break;
237         case TF_RESC_TYPE_HW_PROF_FUNC:
238                 TF_RESC_RSVD(dir, TF_RSVD_PROF_FUNC, value);
239                 break;
240         case TF_RESC_TYPE_HW_PROF_TCAM:
241                 TF_RESC_RSVD(dir, TF_RSVD_PROF_TCAM, value);
242                 break;
243         case TF_RESC_TYPE_HW_EM_PROF_ID:
244                 TF_RESC_RSVD(dir, TF_RSVD_EM_PROF_ID, value);
245                 break;
246         case TF_RESC_TYPE_HW_EM_REC:
247                 TF_RESC_RSVD(dir, TF_RSVD_EM_REC, value);
248                 break;
249         case TF_RESC_TYPE_HW_WC_TCAM_PROF_ID:
250                 TF_RESC_RSVD(dir, TF_RSVD_WC_TCAM_PROF_ID, value);
251                 break;
252         case TF_RESC_TYPE_HW_WC_TCAM:
253                 TF_RESC_RSVD(dir, TF_RSVD_WC_TCAM, value);
254                 break;
255         case TF_RESC_TYPE_HW_METER_PROF:
256                 TF_RESC_RSVD(dir, TF_RSVD_METER_PROF, value);
257                 break;
258         case TF_RESC_TYPE_HW_METER_INST:
259                 TF_RESC_RSVD(dir, TF_RSVD_METER_INST, value);
260                 break;
261         case TF_RESC_TYPE_HW_MIRROR:
262                 TF_RESC_RSVD(dir, TF_RSVD_MIRROR, value);
263                 break;
264         case TF_RESC_TYPE_HW_UPAR:
265                 TF_RESC_RSVD(dir, TF_RSVD_UPAR, value);
266                 break;
267         case TF_RESC_TYPE_HW_SP_TCAM:
268                 TF_RESC_RSVD(dir, TF_RSVD_SP_TCAM, value);
269                 break;
270         case TF_RESC_TYPE_HW_L2_FUNC:
271                 TF_RESC_RSVD(dir, TF_RSVD_L2_FUNC, value);
272                 break;
273         case TF_RESC_TYPE_HW_FKB:
274                 TF_RESC_RSVD(dir, TF_RSVD_FKB, value);
275                 break;
276         case TF_RESC_TYPE_HW_TBL_SCOPE:
277                 TF_RESC_RSVD(dir, TF_RSVD_TBL_SCOPE, value);
278                 break;
279         case TF_RESC_TYPE_HW_EPOCH0:
280                 TF_RESC_RSVD(dir, TF_RSVD_EPOCH0, value);
281                 break;
282         case TF_RESC_TYPE_HW_EPOCH1:
283                 TF_RESC_RSVD(dir, TF_RSVD_EPOCH1, value);
284                 break;
285         case TF_RESC_TYPE_HW_METADATA:
286                 TF_RESC_RSVD(dir, TF_RSVD_METADATA, value);
287                 break;
288         case TF_RESC_TYPE_HW_CT_STATE:
289                 TF_RESC_RSVD(dir, TF_RSVD_CT_STATE, value);
290                 break;
291         case TF_RESC_TYPE_HW_RANGE_PROF:
292                 TF_RESC_RSVD(dir, TF_RSVD_RANGE_PROF, value);
293                 break;
294         case TF_RESC_TYPE_HW_RANGE_ENTRY:
295                 TF_RESC_RSVD(dir, TF_RSVD_RANGE_ENTRY, value);
296                 break;
297         case TF_RESC_TYPE_HW_LAG_ENTRY:
298                 TF_RESC_RSVD(dir, TF_RSVD_LAG_ENTRY, value);
299                 break;
300         default:
301                 break;
302         }
303
304         return value;
305 }
306
307 /**
308  * Helper function to perform a SRAM HCAPI resource type lookup
309  * against the reserved value of the same static type.
310  *
311  * Returns:
312  *   -EOPNOTSUPP - Reserved resource type not supported
313  *   Value       - Integer value of the reserved value for the requested type
314  */
315 static int
316 tf_rm_rsvd_sram_value(enum tf_dir dir, enum tf_resource_type_sram index)
317 {
318         uint32_t value = -EOPNOTSUPP;
319
320         switch (index) {
321         case TF_RESC_TYPE_SRAM_FULL_ACTION:
322                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_FULL_ACTION, value);
323                 break;
324         case TF_RESC_TYPE_SRAM_MCG:
325                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_MCG, value);
326                 break;
327         case TF_RESC_TYPE_SRAM_ENCAP_8B:
328                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_8B, value);
329                 break;
330         case TF_RESC_TYPE_SRAM_ENCAP_16B:
331                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_16B, value);
332                 break;
333         case TF_RESC_TYPE_SRAM_ENCAP_64B:
334                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_64B, value);
335                 break;
336         case TF_RESC_TYPE_SRAM_SP_SMAC:
337                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC, value);
338                 break;
339         case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4:
340                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV4, value);
341                 break;
342         case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6:
343                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV6, value);
344                 break;
345         case TF_RESC_TYPE_SRAM_COUNTER_64B:
346                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_COUNTER_64B, value);
347                 break;
348         case TF_RESC_TYPE_SRAM_NAT_SPORT:
349                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_SPORT, value);
350                 break;
351         case TF_RESC_TYPE_SRAM_NAT_DPORT:
352                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_DPORT, value);
353                 break;
354         case TF_RESC_TYPE_SRAM_NAT_S_IPV4:
355                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_S_IPV4, value);
356                 break;
357         case TF_RESC_TYPE_SRAM_NAT_D_IPV4:
358                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_D_IPV4, value);
359                 break;
360         default:
361                 break;
362         }
363
364         return value;
365 }
366
367 /**
368  * Helper function to print all the HW resource qcaps errors reported
369  * in the error_flag.
370  *
371  * [in] dir
372  *   Receive or transmit direction
373  *
374  * [in] error_flag
375  *   Pointer to the hw error flags created at time of the query check
376  */
377 static void
378 tf_rm_print_hw_qcaps_error(enum tf_dir dir,
379                            struct tf_rm_hw_query *hw_query,
380                            uint32_t *error_flag)
381 {
382         int i;
383
384         PMD_DRV_LOG(ERR, "QCAPS errors HW\n");
385         PMD_DRV_LOG(ERR, "  Direction: %s\n", tf_dir_2_str(dir));
386         PMD_DRV_LOG(ERR, "  Elements:\n");
387
388         for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
389                 if (*error_flag & 1 << i)
390                         PMD_DRV_LOG(ERR, "    %s, %d elem available, req:%d\n",
391                                     tf_hcapi_hw_2_str(i),
392                                     hw_query->hw_query[i].max,
393                                     tf_rm_rsvd_hw_value(dir, i));
394         }
395 }
396
397 /**
398  * Helper function to print all the SRAM resource qcaps errors
399  * reported in the error_flag.
400  *
401  * [in] dir
402  *   Receive or transmit direction
403  *
404  * [in] error_flag
405  *   Pointer to the sram error flags created at time of the query check
406  */
407 static void
408 tf_rm_print_sram_qcaps_error(enum tf_dir dir,
409                              struct tf_rm_sram_query *sram_query,
410                              uint32_t *error_flag)
411 {
412         int i;
413
414         PMD_DRV_LOG(ERR, "QCAPS errors SRAM\n");
415         PMD_DRV_LOG(ERR, "  Direction: %s\n", tf_dir_2_str(dir));
416         PMD_DRV_LOG(ERR, "  Elements:\n");
417
418         for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
419                 if (*error_flag & 1 << i)
420                         PMD_DRV_LOG(ERR, "    %s, %d elem available, req:%d\n",
421                                     tf_hcapi_sram_2_str(i),
422                                     sram_query->sram_query[i].max,
423                                     tf_rm_rsvd_sram_value(dir, i));
424         }
425 }
426
427 /**
428  * Performs a HW resource check between what firmware capability
429  * reports and what the core expects is available.
430  *
431  * Firmware performs the resource carving at AFM init time and the
432  * resource capability is reported in the TruFlow qcaps msg.
433  *
434  * [in] query
435  *   Pointer to HW Query data structure. Query holds what the firmware
436  *   offers of the HW resources.
437  *
438  * [in] dir
439  *   Receive or transmit direction
440  *
441  * [in/out] error_flag
442  *   Pointer to a bit array indicating the error of a single HCAPI
443  *   resource type. When a bit is set to 1, the HCAPI resource type
444  *   failed static allocation.
445  *
446  * Returns:
447  *  0       - Success
448  *  -ENOMEM - Failure on one of the allocated resources. Check the
449  *            error_flag for what types are flagged errored.
450  */
451 static int
452 tf_rm_check_hw_qcaps_static(struct tf_rm_hw_query *query,
453                             enum tf_dir dir,
454                             uint32_t *error_flag)
455 {
456         *error_flag = 0;
457
458         TF_RM_CHECK_HW_ALLOC(query,
459                              dir,
460                              TF_RESC_TYPE_HW_L2_CTXT_TCAM,
461                              TF_RSVD_L2_CTXT_TCAM,
462                              error_flag);
463
464         TF_RM_CHECK_HW_ALLOC(query,
465                              dir,
466                              TF_RESC_TYPE_HW_PROF_FUNC,
467                              TF_RSVD_PROF_FUNC,
468                              error_flag);
469
470         TF_RM_CHECK_HW_ALLOC(query,
471                              dir,
472                              TF_RESC_TYPE_HW_PROF_TCAM,
473                              TF_RSVD_PROF_TCAM,
474                              error_flag);
475
476         TF_RM_CHECK_HW_ALLOC(query,
477                              dir,
478                              TF_RESC_TYPE_HW_EM_PROF_ID,
479                              TF_RSVD_EM_PROF_ID,
480                              error_flag);
481
482         TF_RM_CHECK_HW_ALLOC(query,
483                              dir,
484                              TF_RESC_TYPE_HW_EM_REC,
485                              TF_RSVD_EM_REC,
486                              error_flag);
487
488         TF_RM_CHECK_HW_ALLOC(query,
489                              dir,
490                              TF_RESC_TYPE_HW_WC_TCAM_PROF_ID,
491                              TF_RSVD_WC_TCAM_PROF_ID,
492                              error_flag);
493
494         TF_RM_CHECK_HW_ALLOC(query,
495                              dir,
496                              TF_RESC_TYPE_HW_WC_TCAM,
497                              TF_RSVD_WC_TCAM,
498                              error_flag);
499
500         TF_RM_CHECK_HW_ALLOC(query,
501                              dir,
502                              TF_RESC_TYPE_HW_METER_PROF,
503                              TF_RSVD_METER_PROF,
504                              error_flag);
505
506         TF_RM_CHECK_HW_ALLOC(query,
507                              dir,
508                              TF_RESC_TYPE_HW_METER_INST,
509                              TF_RSVD_METER_INST,
510                              error_flag);
511
512         TF_RM_CHECK_HW_ALLOC(query,
513                              dir,
514                              TF_RESC_TYPE_HW_MIRROR,
515                              TF_RSVD_MIRROR,
516                              error_flag);
517
518         TF_RM_CHECK_HW_ALLOC(query,
519                              dir,
520                              TF_RESC_TYPE_HW_UPAR,
521                              TF_RSVD_UPAR,
522                              error_flag);
523
524         TF_RM_CHECK_HW_ALLOC(query,
525                              dir,
526                              TF_RESC_TYPE_HW_SP_TCAM,
527                              TF_RSVD_SP_TCAM,
528                              error_flag);
529
530         TF_RM_CHECK_HW_ALLOC(query,
531                              dir,
532                              TF_RESC_TYPE_HW_L2_FUNC,
533                              TF_RSVD_L2_FUNC,
534                              error_flag);
535
536         TF_RM_CHECK_HW_ALLOC(query,
537                              dir,
538                              TF_RESC_TYPE_HW_FKB,
539                              TF_RSVD_FKB,
540                              error_flag);
541
542         TF_RM_CHECK_HW_ALLOC(query,
543                              dir,
544                              TF_RESC_TYPE_HW_TBL_SCOPE,
545                              TF_RSVD_TBL_SCOPE,
546                              error_flag);
547
548         TF_RM_CHECK_HW_ALLOC(query,
549                              dir,
550                              TF_RESC_TYPE_HW_EPOCH0,
551                              TF_RSVD_EPOCH0,
552                              error_flag);
553
554         TF_RM_CHECK_HW_ALLOC(query,
555                              dir,
556                              TF_RESC_TYPE_HW_EPOCH1,
557                              TF_RSVD_EPOCH1,
558                              error_flag);
559
560         TF_RM_CHECK_HW_ALLOC(query,
561                              dir,
562                              TF_RESC_TYPE_HW_METADATA,
563                              TF_RSVD_METADATA,
564                              error_flag);
565
566         TF_RM_CHECK_HW_ALLOC(query,
567                              dir,
568                              TF_RESC_TYPE_HW_CT_STATE,
569                              TF_RSVD_CT_STATE,
570                              error_flag);
571
572         TF_RM_CHECK_HW_ALLOC(query,
573                              dir,
574                              TF_RESC_TYPE_HW_RANGE_PROF,
575                              TF_RSVD_RANGE_PROF,
576                              error_flag);
577
578         TF_RM_CHECK_HW_ALLOC(query,
579                              dir,
580                              TF_RESC_TYPE_HW_RANGE_ENTRY,
581                              TF_RSVD_RANGE_ENTRY,
582                              error_flag);
583
584         TF_RM_CHECK_HW_ALLOC(query,
585                              dir,
586                              TF_RESC_TYPE_HW_LAG_ENTRY,
587                              TF_RSVD_LAG_ENTRY,
588                              error_flag);
589
590         if (*error_flag != 0)
591                 return -ENOMEM;
592
593         return 0;
594 }
595
596 /**
597  * Performs a SRAM resource check between what firmware capability
598  * reports and what the core expects is available.
599  *
600  * Firmware performs the resource carving at AFM init time and the
601  * resource capability is reported in the TruFlow qcaps msg.
602  *
603  * [in] query
604  *   Pointer to SRAM Query data structure. Query holds what the
605  *   firmware offers of the SRAM resources.
606  *
607  * [in] dir
608  *   Receive or transmit direction
609  *
610  * [in/out] error_flag
611  *   Pointer to a bit array indicating the error of a single HCAPI
612  *   resource type. When a bit is set to 1, the HCAPI resource type
613  *   failed static allocation.
614  *
615  * Returns:
616  *  0       - Success
617  *  -ENOMEM - Failure on one of the allocated resources. Check the
618  *            error_flag for what types are flagged errored.
619  */
620 static int
621 tf_rm_check_sram_qcaps_static(struct tf_rm_sram_query *query,
622                               enum tf_dir dir,
623                               uint32_t *error_flag)
624 {
625         *error_flag = 0;
626
627         TF_RM_CHECK_SRAM_ALLOC(query,
628                                dir,
629                                TF_RESC_TYPE_SRAM_FULL_ACTION,
630                                TF_RSVD_SRAM_FULL_ACTION,
631                                error_flag);
632
633         TF_RM_CHECK_SRAM_ALLOC(query,
634                                dir,
635                                TF_RESC_TYPE_SRAM_MCG,
636                                TF_RSVD_SRAM_MCG,
637                                error_flag);
638
639         TF_RM_CHECK_SRAM_ALLOC(query,
640                                dir,
641                                TF_RESC_TYPE_SRAM_ENCAP_8B,
642                                TF_RSVD_SRAM_ENCAP_8B,
643                                error_flag);
644
645         TF_RM_CHECK_SRAM_ALLOC(query,
646                                dir,
647                                TF_RESC_TYPE_SRAM_ENCAP_16B,
648                                TF_RSVD_SRAM_ENCAP_16B,
649                                error_flag);
650
651         TF_RM_CHECK_SRAM_ALLOC(query,
652                                dir,
653                                TF_RESC_TYPE_SRAM_ENCAP_64B,
654                                TF_RSVD_SRAM_ENCAP_64B,
655                                error_flag);
656
657         TF_RM_CHECK_SRAM_ALLOC(query,
658                                dir,
659                                TF_RESC_TYPE_SRAM_SP_SMAC,
660                                TF_RSVD_SRAM_SP_SMAC,
661                                error_flag);
662
663         TF_RM_CHECK_SRAM_ALLOC(query,
664                                dir,
665                                TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
666                                TF_RSVD_SRAM_SP_SMAC_IPV4,
667                                error_flag);
668
669         TF_RM_CHECK_SRAM_ALLOC(query,
670                                dir,
671                                TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
672                                TF_RSVD_SRAM_SP_SMAC_IPV6,
673                                error_flag);
674
675         TF_RM_CHECK_SRAM_ALLOC(query,
676                                dir,
677                                TF_RESC_TYPE_SRAM_COUNTER_64B,
678                                TF_RSVD_SRAM_COUNTER_64B,
679                                error_flag);
680
681         TF_RM_CHECK_SRAM_ALLOC(query,
682                                dir,
683                                TF_RESC_TYPE_SRAM_NAT_SPORT,
684                                TF_RSVD_SRAM_NAT_SPORT,
685                                error_flag);
686
687         TF_RM_CHECK_SRAM_ALLOC(query,
688                                dir,
689                                TF_RESC_TYPE_SRAM_NAT_DPORT,
690                                TF_RSVD_SRAM_NAT_DPORT,
691                                error_flag);
692
693         TF_RM_CHECK_SRAM_ALLOC(query,
694                                dir,
695                                TF_RESC_TYPE_SRAM_NAT_S_IPV4,
696                                TF_RSVD_SRAM_NAT_S_IPV4,
697                                error_flag);
698
699         TF_RM_CHECK_SRAM_ALLOC(query,
700                                dir,
701                                TF_RESC_TYPE_SRAM_NAT_D_IPV4,
702                                TF_RSVD_SRAM_NAT_D_IPV4,
703                                error_flag);
704
705         if (*error_flag != 0)
706                 return -ENOMEM;
707
708         return 0;
709 }
710
711 /**
712  * Internal function to mark pool entries used.
713  */
714 static void
715 tf_rm_reserve_range(uint32_t count,
716                     uint32_t rsv_begin,
717                     uint32_t rsv_end,
718                     uint32_t max,
719                     struct bitalloc *pool)
720 {
721         uint32_t i;
722
723         /* If no resources has been requested we mark everything
724          * 'used'
725          */
726         if (count == 0) {
727                 for (i = 0; i < max; i++)
728                         ba_alloc_index(pool, i);
729         } else {
730                 /* Support 2 main modes
731                  * Reserved range starts from bottom up (with
732                  * pre-reserved value or not)
733                  * - begin = 0 to end xx
734                  * - begin = 1 to end xx
735                  *
736                  * Reserved range starts from top down
737                  * - begin = yy to end max
738                  */
739
740                 /* Bottom up check, start from 0 */
741                 if (rsv_begin == 0) {
742                         for (i = rsv_end + 1; i < max; i++)
743                                 ba_alloc_index(pool, i);
744                 }
745
746                 /* Bottom up check, start from 1 or higher OR
747                  * Top Down
748                  */
749                 if (rsv_begin >= 1) {
750                         /* Allocate from 0 until start */
751                         for (i = 0; i < rsv_begin; i++)
752                                 ba_alloc_index(pool, i);
753
754                         /* Skip and then do the remaining */
755                         if (rsv_end < max - 1) {
756                                 for (i = rsv_end; i < max; i++)
757                                         ba_alloc_index(pool, i);
758                         }
759                 }
760         }
761 }
762
763 /**
764  * Internal function to mark all the l2 ctxt allocated that Truflow
765  * does not own.
766  */
767 static void
768 tf_rm_rsvd_l2_ctxt(struct tf_session *tfs)
769 {
770         uint32_t index = TF_RESC_TYPE_HW_L2_CTXT_TCAM;
771         uint32_t end = 0;
772
773         /* l2 ctxt rx direction */
774         if (tfs->resc.rx.hw_entry[index].stride > 0)
775                 end = tfs->resc.rx.hw_entry[index].start +
776                         tfs->resc.rx.hw_entry[index].stride - 1;
777
778         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
779                             tfs->resc.rx.hw_entry[index].start,
780                             end,
781                             TF_NUM_L2_CTXT_TCAM,
782                             tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX);
783
784         /* l2 ctxt tx direction */
785         if (tfs->resc.tx.hw_entry[index].stride > 0)
786                 end = tfs->resc.tx.hw_entry[index].start +
787                         tfs->resc.tx.hw_entry[index].stride - 1;
788
789         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
790                             tfs->resc.tx.hw_entry[index].start,
791                             end,
792                             TF_NUM_L2_CTXT_TCAM,
793                             tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX);
794 }
795
796 /**
797  * Internal function to mark all the profile tcam and profile func
798  * resources that Truflow does not own.
799  */
800 static void
801 tf_rm_rsvd_prof(struct tf_session *tfs)
802 {
803         uint32_t index = TF_RESC_TYPE_HW_PROF_FUNC;
804         uint32_t end = 0;
805
806         /* profile func rx direction */
807         if (tfs->resc.rx.hw_entry[index].stride > 0)
808                 end = tfs->resc.rx.hw_entry[index].start +
809                         tfs->resc.rx.hw_entry[index].stride - 1;
810
811         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
812                             tfs->resc.rx.hw_entry[index].start,
813                             end,
814                             TF_NUM_PROF_FUNC,
815                             tfs->TF_PROF_FUNC_POOL_NAME_RX);
816
817         /* profile func tx direction */
818         if (tfs->resc.tx.hw_entry[index].stride > 0)
819                 end = tfs->resc.tx.hw_entry[index].start +
820                         tfs->resc.tx.hw_entry[index].stride - 1;
821
822         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
823                             tfs->resc.tx.hw_entry[index].start,
824                             end,
825                             TF_NUM_PROF_FUNC,
826                             tfs->TF_PROF_FUNC_POOL_NAME_TX);
827
828         index = TF_RESC_TYPE_HW_PROF_TCAM;
829
830         /* profile tcam rx direction */
831         if (tfs->resc.rx.hw_entry[index].stride > 0)
832                 end = tfs->resc.rx.hw_entry[index].start +
833                         tfs->resc.rx.hw_entry[index].stride - 1;
834
835         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
836                             tfs->resc.rx.hw_entry[index].start,
837                             end,
838                             TF_NUM_PROF_TCAM,
839                             tfs->TF_PROF_TCAM_POOL_NAME_RX);
840
841         /* profile tcam tx direction */
842         if (tfs->resc.tx.hw_entry[index].stride > 0)
843                 end = tfs->resc.tx.hw_entry[index].start +
844                         tfs->resc.tx.hw_entry[index].stride - 1;
845
846         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
847                             tfs->resc.tx.hw_entry[index].start,
848                             end,
849                             TF_NUM_PROF_TCAM,
850                             tfs->TF_PROF_TCAM_POOL_NAME_TX);
851 }
852
853 /**
854  * Internal function to mark all the em profile id allocated that
855  * Truflow does not own.
856  */
857 static void
858 tf_rm_rsvd_em_prof(struct tf_session *tfs)
859 {
860         uint32_t index = TF_RESC_TYPE_HW_EM_PROF_ID;
861         uint32_t end = 0;
862
863         /* em prof id rx direction */
864         if (tfs->resc.rx.hw_entry[index].stride > 0)
865                 end = tfs->resc.rx.hw_entry[index].start +
866                         tfs->resc.rx.hw_entry[index].stride - 1;
867
868         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
869                             tfs->resc.rx.hw_entry[index].start,
870                             end,
871                             TF_NUM_EM_PROF_ID,
872                             tfs->TF_EM_PROF_ID_POOL_NAME_RX);
873
874         /* em prof id tx direction */
875         if (tfs->resc.tx.hw_entry[index].stride > 0)
876                 end = tfs->resc.tx.hw_entry[index].start +
877                         tfs->resc.tx.hw_entry[index].stride - 1;
878
879         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
880                             tfs->resc.tx.hw_entry[index].start,
881                             end,
882                             TF_NUM_EM_PROF_ID,
883                             tfs->TF_EM_PROF_ID_POOL_NAME_TX);
884 }
885
886 /**
887  * Internal function to mark all the wildcard tcam and profile id
888  * resources that Truflow does not own.
889  */
890 static void
891 tf_rm_rsvd_wc(struct tf_session *tfs)
892 {
893         uint32_t index = TF_RESC_TYPE_HW_WC_TCAM_PROF_ID;
894         uint32_t end = 0;
895
896         /* wc profile id rx direction */
897         if (tfs->resc.rx.hw_entry[index].stride > 0)
898                 end = tfs->resc.rx.hw_entry[index].start +
899                         tfs->resc.rx.hw_entry[index].stride - 1;
900
901         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
902                             tfs->resc.rx.hw_entry[index].start,
903                             end,
904                             TF_NUM_WC_PROF_ID,
905                             tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX);
906
907         /* wc profile id tx direction */
908         if (tfs->resc.tx.hw_entry[index].stride > 0)
909                 end = tfs->resc.tx.hw_entry[index].start +
910                         tfs->resc.tx.hw_entry[index].stride - 1;
911
912         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
913                             tfs->resc.tx.hw_entry[index].start,
914                             end,
915                             TF_NUM_WC_PROF_ID,
916                             tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX);
917
918         index = TF_RESC_TYPE_HW_WC_TCAM;
919
920         /* wc tcam rx direction */
921         if (tfs->resc.rx.hw_entry[index].stride > 0)
922                 end = tfs->resc.rx.hw_entry[index].start +
923                         tfs->resc.rx.hw_entry[index].stride - 1;
924
925         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
926                             tfs->resc.rx.hw_entry[index].start,
927                             end,
928                             TF_NUM_WC_TCAM_ROW,
929                             tfs->TF_WC_TCAM_POOL_NAME_RX);
930
931         /* wc tcam tx direction */
932         if (tfs->resc.tx.hw_entry[index].stride > 0)
933                 end = tfs->resc.tx.hw_entry[index].start +
934                         tfs->resc.tx.hw_entry[index].stride - 1;
935
936         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
937                             tfs->resc.tx.hw_entry[index].start,
938                             end,
939                             TF_NUM_WC_TCAM_ROW,
940                             tfs->TF_WC_TCAM_POOL_NAME_TX);
941 }
942
943 /**
944  * Internal function to mark all the meter resources allocated that
945  * Truflow does not own.
946  */
947 static void
948 tf_rm_rsvd_meter(struct tf_session *tfs)
949 {
950         uint32_t index = TF_RESC_TYPE_HW_METER_PROF;
951         uint32_t end = 0;
952
953         /* meter profiles rx direction */
954         if (tfs->resc.rx.hw_entry[index].stride > 0)
955                 end = tfs->resc.rx.hw_entry[index].start +
956                         tfs->resc.rx.hw_entry[index].stride - 1;
957
958         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
959                             tfs->resc.rx.hw_entry[index].start,
960                             end,
961                             TF_NUM_METER_PROF,
962                             tfs->TF_METER_PROF_POOL_NAME_RX);
963
964         /* meter profiles tx direction */
965         if (tfs->resc.tx.hw_entry[index].stride > 0)
966                 end = tfs->resc.tx.hw_entry[index].start +
967                         tfs->resc.tx.hw_entry[index].stride - 1;
968
969         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
970                             tfs->resc.tx.hw_entry[index].start,
971                             end,
972                             TF_NUM_METER_PROF,
973                             tfs->TF_METER_PROF_POOL_NAME_TX);
974
975         index = TF_RESC_TYPE_HW_METER_INST;
976
977         /* meter rx direction */
978         if (tfs->resc.rx.hw_entry[index].stride > 0)
979                 end = tfs->resc.rx.hw_entry[index].start +
980                         tfs->resc.rx.hw_entry[index].stride - 1;
981
982         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
983                             tfs->resc.rx.hw_entry[index].start,
984                             end,
985                             TF_NUM_METER,
986                             tfs->TF_METER_INST_POOL_NAME_RX);
987
988         /* meter tx direction */
989         if (tfs->resc.tx.hw_entry[index].stride > 0)
990                 end = tfs->resc.tx.hw_entry[index].start +
991                         tfs->resc.tx.hw_entry[index].stride - 1;
992
993         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
994                             tfs->resc.tx.hw_entry[index].start,
995                             end,
996                             TF_NUM_METER,
997                             tfs->TF_METER_INST_POOL_NAME_TX);
998 }
999
1000 /**
1001  * Internal function to mark all the mirror resources allocated that
1002  * Truflow does not own.
1003  */
1004 static void
1005 tf_rm_rsvd_mirror(struct tf_session *tfs)
1006 {
1007         uint32_t index = TF_RESC_TYPE_HW_MIRROR;
1008         uint32_t end = 0;
1009
1010         /* mirror rx direction */
1011         if (tfs->resc.rx.hw_entry[index].stride > 0)
1012                 end = tfs->resc.rx.hw_entry[index].start +
1013                         tfs->resc.rx.hw_entry[index].stride - 1;
1014
1015         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1016                             tfs->resc.rx.hw_entry[index].start,
1017                             end,
1018                             TF_NUM_MIRROR,
1019                             tfs->TF_MIRROR_POOL_NAME_RX);
1020
1021         /* mirror tx direction */
1022         if (tfs->resc.tx.hw_entry[index].stride > 0)
1023                 end = tfs->resc.tx.hw_entry[index].start +
1024                         tfs->resc.tx.hw_entry[index].stride - 1;
1025
1026         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1027                             tfs->resc.tx.hw_entry[index].start,
1028                             end,
1029                             TF_NUM_MIRROR,
1030                             tfs->TF_MIRROR_POOL_NAME_TX);
1031 }
1032
1033 /**
1034  * Internal function to mark all the upar resources allocated that
1035  * Truflow does not own.
1036  */
1037 static void
1038 tf_rm_rsvd_upar(struct tf_session *tfs)
1039 {
1040         uint32_t index = TF_RESC_TYPE_HW_UPAR;
1041         uint32_t end = 0;
1042
1043         /* upar rx direction */
1044         if (tfs->resc.rx.hw_entry[index].stride > 0)
1045                 end = tfs->resc.rx.hw_entry[index].start +
1046                         tfs->resc.rx.hw_entry[index].stride - 1;
1047
1048         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1049                             tfs->resc.rx.hw_entry[index].start,
1050                             end,
1051                             TF_NUM_UPAR,
1052                             tfs->TF_UPAR_POOL_NAME_RX);
1053
1054         /* upar tx direction */
1055         if (tfs->resc.tx.hw_entry[index].stride > 0)
1056                 end = tfs->resc.tx.hw_entry[index].start +
1057                         tfs->resc.tx.hw_entry[index].stride - 1;
1058
1059         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1060                             tfs->resc.tx.hw_entry[index].start,
1061                             end,
1062                             TF_NUM_UPAR,
1063                             tfs->TF_UPAR_POOL_NAME_TX);
1064 }
1065
1066 /**
1067  * Internal function to mark all the sp tcam resources allocated that
1068  * Truflow does not own.
1069  */
1070 static void
1071 tf_rm_rsvd_sp_tcam(struct tf_session *tfs)
1072 {
1073         uint32_t index = TF_RESC_TYPE_HW_SP_TCAM;
1074         uint32_t end = 0;
1075
1076         /* sp tcam rx direction */
1077         if (tfs->resc.rx.hw_entry[index].stride > 0)
1078                 end = tfs->resc.rx.hw_entry[index].start +
1079                         tfs->resc.rx.hw_entry[index].stride - 1;
1080
1081         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1082                             tfs->resc.rx.hw_entry[index].start,
1083                             end,
1084                             TF_NUM_SP_TCAM,
1085                             tfs->TF_SP_TCAM_POOL_NAME_RX);
1086
1087         /* sp tcam tx direction */
1088         if (tfs->resc.tx.hw_entry[index].stride > 0)
1089                 end = tfs->resc.tx.hw_entry[index].start +
1090                         tfs->resc.tx.hw_entry[index].stride - 1;
1091
1092         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1093                             tfs->resc.tx.hw_entry[index].start,
1094                             end,
1095                             TF_NUM_SP_TCAM,
1096                             tfs->TF_SP_TCAM_POOL_NAME_TX);
1097 }
1098
1099 /**
1100  * Internal function to mark all the l2 func resources allocated that
1101  * Truflow does not own.
1102  */
1103 static void
1104 tf_rm_rsvd_l2_func(struct tf_session *tfs)
1105 {
1106         uint32_t index = TF_RESC_TYPE_HW_L2_FUNC;
1107         uint32_t end = 0;
1108
1109         /* l2 func rx direction */
1110         if (tfs->resc.rx.hw_entry[index].stride > 0)
1111                 end = tfs->resc.rx.hw_entry[index].start +
1112                         tfs->resc.rx.hw_entry[index].stride - 1;
1113
1114         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1115                             tfs->resc.rx.hw_entry[index].start,
1116                             end,
1117                             TF_NUM_L2_FUNC,
1118                             tfs->TF_L2_FUNC_POOL_NAME_RX);
1119
1120         /* l2 func tx direction */
1121         if (tfs->resc.tx.hw_entry[index].stride > 0)
1122                 end = tfs->resc.tx.hw_entry[index].start +
1123                         tfs->resc.tx.hw_entry[index].stride - 1;
1124
1125         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1126                             tfs->resc.tx.hw_entry[index].start,
1127                             end,
1128                             TF_NUM_L2_FUNC,
1129                             tfs->TF_L2_FUNC_POOL_NAME_TX);
1130 }
1131
1132 /**
1133  * Internal function to mark all the fkb resources allocated that
1134  * Truflow does not own.
1135  */
1136 static void
1137 tf_rm_rsvd_fkb(struct tf_session *tfs)
1138 {
1139         uint32_t index = TF_RESC_TYPE_HW_FKB;
1140         uint32_t end = 0;
1141
1142         /* fkb rx direction */
1143         if (tfs->resc.rx.hw_entry[index].stride > 0)
1144                 end = tfs->resc.rx.hw_entry[index].start +
1145                         tfs->resc.rx.hw_entry[index].stride - 1;
1146
1147         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1148                             tfs->resc.rx.hw_entry[index].start,
1149                             end,
1150                             TF_NUM_FKB,
1151                             tfs->TF_FKB_POOL_NAME_RX);
1152
1153         /* fkb tx direction */
1154         if (tfs->resc.tx.hw_entry[index].stride > 0)
1155                 end = tfs->resc.tx.hw_entry[index].start +
1156                         tfs->resc.tx.hw_entry[index].stride - 1;
1157
1158         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1159                             tfs->resc.tx.hw_entry[index].start,
1160                             end,
1161                             TF_NUM_FKB,
1162                             tfs->TF_FKB_POOL_NAME_TX);
1163 }
1164
1165 /**
1166  * Internal function to mark all the tbld scope resources allocated
1167  * that Truflow does not own.
1168  */
1169 static void
1170 tf_rm_rsvd_tbl_scope(struct tf_session *tfs)
1171 {
1172         uint32_t index = TF_RESC_TYPE_HW_TBL_SCOPE;
1173         uint32_t end = 0;
1174
1175         /* tbl scope rx direction */
1176         if (tfs->resc.rx.hw_entry[index].stride > 0)
1177                 end = tfs->resc.rx.hw_entry[index].start +
1178                         tfs->resc.rx.hw_entry[index].stride - 1;
1179
1180         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1181                             tfs->resc.rx.hw_entry[index].start,
1182                             end,
1183                             TF_NUM_TBL_SCOPE,
1184                             tfs->TF_TBL_SCOPE_POOL_NAME_RX);
1185
1186         /* tbl scope tx direction */
1187         if (tfs->resc.tx.hw_entry[index].stride > 0)
1188                 end = tfs->resc.tx.hw_entry[index].start +
1189                         tfs->resc.tx.hw_entry[index].stride - 1;
1190
1191         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1192                             tfs->resc.tx.hw_entry[index].start,
1193                             end,
1194                             TF_NUM_TBL_SCOPE,
1195                             tfs->TF_TBL_SCOPE_POOL_NAME_TX);
1196 }
1197
1198 /**
1199  * Internal function to mark all the l2 epoch resources allocated that
1200  * Truflow does not own.
1201  */
1202 static void
1203 tf_rm_rsvd_epoch(struct tf_session *tfs)
1204 {
1205         uint32_t index = TF_RESC_TYPE_HW_EPOCH0;
1206         uint32_t end = 0;
1207
1208         /* epoch0 rx direction */
1209         if (tfs->resc.rx.hw_entry[index].stride > 0)
1210                 end = tfs->resc.rx.hw_entry[index].start +
1211                         tfs->resc.rx.hw_entry[index].stride - 1;
1212
1213         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1214                             tfs->resc.rx.hw_entry[index].start,
1215                             end,
1216                             TF_NUM_EPOCH0,
1217                             tfs->TF_EPOCH0_POOL_NAME_RX);
1218
1219         /* epoch0 tx direction */
1220         if (tfs->resc.tx.hw_entry[index].stride > 0)
1221                 end = tfs->resc.tx.hw_entry[index].start +
1222                         tfs->resc.tx.hw_entry[index].stride - 1;
1223
1224         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1225                             tfs->resc.tx.hw_entry[index].start,
1226                             end,
1227                             TF_NUM_EPOCH0,
1228                             tfs->TF_EPOCH0_POOL_NAME_TX);
1229
1230         index = TF_RESC_TYPE_HW_EPOCH1;
1231
1232         /* epoch1 rx direction */
1233         if (tfs->resc.rx.hw_entry[index].stride > 0)
1234                 end = tfs->resc.rx.hw_entry[index].start +
1235                         tfs->resc.rx.hw_entry[index].stride - 1;
1236
1237         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1238                             tfs->resc.rx.hw_entry[index].start,
1239                             end,
1240                             TF_NUM_EPOCH1,
1241                             tfs->TF_EPOCH1_POOL_NAME_RX);
1242
1243         /* epoch1 tx direction */
1244         if (tfs->resc.tx.hw_entry[index].stride > 0)
1245                 end = tfs->resc.tx.hw_entry[index].start +
1246                         tfs->resc.tx.hw_entry[index].stride - 1;
1247
1248         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1249                             tfs->resc.tx.hw_entry[index].start,
1250                             end,
1251                             TF_NUM_EPOCH1,
1252                             tfs->TF_EPOCH1_POOL_NAME_TX);
1253 }
1254
1255 /**
1256  * Internal function to mark all the metadata resources allocated that
1257  * Truflow does not own.
1258  */
1259 static void
1260 tf_rm_rsvd_metadata(struct tf_session *tfs)
1261 {
1262         uint32_t index = TF_RESC_TYPE_HW_METADATA;
1263         uint32_t end = 0;
1264
1265         /* metadata rx direction */
1266         if (tfs->resc.rx.hw_entry[index].stride > 0)
1267                 end = tfs->resc.rx.hw_entry[index].start +
1268                         tfs->resc.rx.hw_entry[index].stride - 1;
1269
1270         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1271                             tfs->resc.rx.hw_entry[index].start,
1272                             end,
1273                             TF_NUM_METADATA,
1274                             tfs->TF_METADATA_POOL_NAME_RX);
1275
1276         /* metadata tx direction */
1277         if (tfs->resc.tx.hw_entry[index].stride > 0)
1278                 end = tfs->resc.tx.hw_entry[index].start +
1279                         tfs->resc.tx.hw_entry[index].stride - 1;
1280
1281         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1282                             tfs->resc.tx.hw_entry[index].start,
1283                             end,
1284                             TF_NUM_METADATA,
1285                             tfs->TF_METADATA_POOL_NAME_TX);
1286 }
1287
1288 /**
1289  * Internal function to mark all the ct state resources allocated that
1290  * Truflow does not own.
1291  */
1292 static void
1293 tf_rm_rsvd_ct_state(struct tf_session *tfs)
1294 {
1295         uint32_t index = TF_RESC_TYPE_HW_CT_STATE;
1296         uint32_t end = 0;
1297
1298         /* ct state rx direction */
1299         if (tfs->resc.rx.hw_entry[index].stride > 0)
1300                 end = tfs->resc.rx.hw_entry[index].start +
1301                         tfs->resc.rx.hw_entry[index].stride - 1;
1302
1303         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1304                             tfs->resc.rx.hw_entry[index].start,
1305                             end,
1306                             TF_NUM_CT_STATE,
1307                             tfs->TF_CT_STATE_POOL_NAME_RX);
1308
1309         /* ct state tx direction */
1310         if (tfs->resc.tx.hw_entry[index].stride > 0)
1311                 end = tfs->resc.tx.hw_entry[index].start +
1312                         tfs->resc.tx.hw_entry[index].stride - 1;
1313
1314         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1315                             tfs->resc.tx.hw_entry[index].start,
1316                             end,
1317                             TF_NUM_CT_STATE,
1318                             tfs->TF_CT_STATE_POOL_NAME_TX);
1319 }
1320
1321 /**
1322  * Internal function to mark all the range resources allocated that
1323  * Truflow does not own.
1324  */
1325 static void
1326 tf_rm_rsvd_range(struct tf_session *tfs)
1327 {
1328         uint32_t index = TF_RESC_TYPE_HW_RANGE_PROF;
1329         uint32_t end = 0;
1330
1331         /* range profile rx direction */
1332         if (tfs->resc.rx.hw_entry[index].stride > 0)
1333                 end = tfs->resc.rx.hw_entry[index].start +
1334                         tfs->resc.rx.hw_entry[index].stride - 1;
1335
1336         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1337                             tfs->resc.rx.hw_entry[index].start,
1338                             end,
1339                             TF_NUM_RANGE_PROF,
1340                             tfs->TF_RANGE_PROF_POOL_NAME_RX);
1341
1342         /* range profile tx direction */
1343         if (tfs->resc.tx.hw_entry[index].stride > 0)
1344                 end = tfs->resc.tx.hw_entry[index].start +
1345                         tfs->resc.tx.hw_entry[index].stride - 1;
1346
1347         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1348                             tfs->resc.tx.hw_entry[index].start,
1349                             end,
1350                             TF_NUM_RANGE_PROF,
1351                             tfs->TF_RANGE_PROF_POOL_NAME_TX);
1352
1353         index = TF_RESC_TYPE_HW_RANGE_ENTRY;
1354
1355         /* range entry rx direction */
1356         if (tfs->resc.rx.hw_entry[index].stride > 0)
1357                 end = tfs->resc.rx.hw_entry[index].start +
1358                         tfs->resc.rx.hw_entry[index].stride - 1;
1359
1360         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1361                             tfs->resc.rx.hw_entry[index].start,
1362                             end,
1363                             TF_NUM_RANGE_ENTRY,
1364                             tfs->TF_RANGE_ENTRY_POOL_NAME_RX);
1365
1366         /* range entry tx direction */
1367         if (tfs->resc.tx.hw_entry[index].stride > 0)
1368                 end = tfs->resc.tx.hw_entry[index].start +
1369                         tfs->resc.tx.hw_entry[index].stride - 1;
1370
1371         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1372                             tfs->resc.tx.hw_entry[index].start,
1373                             end,
1374                             TF_NUM_RANGE_ENTRY,
1375                             tfs->TF_RANGE_ENTRY_POOL_NAME_TX);
1376 }
1377
1378 /**
1379  * Internal function to mark all the lag resources allocated that
1380  * Truflow does not own.
1381  */
1382 static void
1383 tf_rm_rsvd_lag_entry(struct tf_session *tfs)
1384 {
1385         uint32_t index = TF_RESC_TYPE_HW_LAG_ENTRY;
1386         uint32_t end = 0;
1387
1388         /* lag entry rx direction */
1389         if (tfs->resc.rx.hw_entry[index].stride > 0)
1390                 end = tfs->resc.rx.hw_entry[index].start +
1391                         tfs->resc.rx.hw_entry[index].stride - 1;
1392
1393         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1394                             tfs->resc.rx.hw_entry[index].start,
1395                             end,
1396                             TF_NUM_LAG_ENTRY,
1397                             tfs->TF_LAG_ENTRY_POOL_NAME_RX);
1398
1399         /* lag entry tx direction */
1400         if (tfs->resc.tx.hw_entry[index].stride > 0)
1401                 end = tfs->resc.tx.hw_entry[index].start +
1402                         tfs->resc.tx.hw_entry[index].stride - 1;
1403
1404         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1405                             tfs->resc.tx.hw_entry[index].start,
1406                             end,
1407                             TF_NUM_LAG_ENTRY,
1408                             tfs->TF_LAG_ENTRY_POOL_NAME_TX);
1409 }
1410
1411 /**
1412  * Internal function to mark all the full action resources allocated
1413  * that Truflow does not own.
1414  */
1415 static void
1416 tf_rm_rsvd_sram_full_action(struct tf_session *tfs)
1417 {
1418         uint32_t index = TF_RESC_TYPE_SRAM_FULL_ACTION;
1419         uint16_t end = 0;
1420
1421         /* full action rx direction */
1422         if (tfs->resc.rx.sram_entry[index].stride > 0)
1423                 end = tfs->resc.rx.sram_entry[index].start +
1424                         tfs->resc.rx.sram_entry[index].stride - 1;
1425
1426         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1427                             TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_RX,
1428                             end,
1429                             TF_RSVD_SRAM_FULL_ACTION_RX,
1430                             tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX);
1431
1432         /* full action tx direction */
1433         if (tfs->resc.tx.sram_entry[index].stride > 0)
1434                 end = tfs->resc.tx.sram_entry[index].start +
1435                         tfs->resc.tx.sram_entry[index].stride - 1;
1436
1437         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1438                             TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_TX,
1439                             end,
1440                             TF_RSVD_SRAM_FULL_ACTION_TX,
1441                             tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX);
1442 }
1443
1444 /**
1445  * Internal function to mark all the multicast group resources
1446  * allocated that Truflow does not own.
1447  */
1448 static void
1449 tf_rm_rsvd_sram_mcg(struct tf_session *tfs)
1450 {
1451         uint32_t index = TF_RESC_TYPE_SRAM_MCG;
1452         uint16_t end = 0;
1453
1454         /* multicast group rx direction */
1455         if (tfs->resc.rx.sram_entry[index].stride > 0)
1456                 end = tfs->resc.rx.sram_entry[index].start +
1457                         tfs->resc.rx.sram_entry[index].stride - 1;
1458
1459         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1460                             TF_RSVD_SRAM_MCG_BEGIN_IDX_RX,
1461                             end,
1462                             TF_RSVD_SRAM_MCG_RX,
1463                             tfs->TF_SRAM_MCG_POOL_NAME_RX);
1464
1465         /* Multicast Group on TX is not supported */
1466 }
1467
1468 /**
1469  * Internal function to mark all the encap resources allocated that
1470  * Truflow does not own.
1471  */
1472 static void
1473 tf_rm_rsvd_sram_encap(struct tf_session *tfs)
1474 {
1475         uint32_t index = TF_RESC_TYPE_SRAM_ENCAP_8B;
1476         uint16_t end = 0;
1477
1478         /* encap 8b rx direction */
1479         if (tfs->resc.rx.sram_entry[index].stride > 0)
1480                 end = tfs->resc.rx.sram_entry[index].start +
1481                         tfs->resc.rx.sram_entry[index].stride - 1;
1482
1483         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1484                             TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_RX,
1485                             end,
1486                             TF_RSVD_SRAM_ENCAP_8B_RX,
1487                             tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX);
1488
1489         /* encap 8b tx direction */
1490         if (tfs->resc.tx.sram_entry[index].stride > 0)
1491                 end = tfs->resc.tx.sram_entry[index].start +
1492                         tfs->resc.tx.sram_entry[index].stride - 1;
1493
1494         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1495                             TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_TX,
1496                             end,
1497                             TF_RSVD_SRAM_ENCAP_8B_TX,
1498                             tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX);
1499
1500         index = TF_RESC_TYPE_SRAM_ENCAP_16B;
1501
1502         /* encap 16b rx direction */
1503         if (tfs->resc.rx.sram_entry[index].stride > 0)
1504                 end = tfs->resc.rx.sram_entry[index].start +
1505                         tfs->resc.rx.sram_entry[index].stride - 1;
1506
1507         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1508                             TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_RX,
1509                             end,
1510                             TF_RSVD_SRAM_ENCAP_16B_RX,
1511                             tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX);
1512
1513         /* encap 16b tx direction */
1514         if (tfs->resc.tx.sram_entry[index].stride > 0)
1515                 end = tfs->resc.tx.sram_entry[index].start +
1516                         tfs->resc.tx.sram_entry[index].stride - 1;
1517
1518         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1519                             TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_TX,
1520                             end,
1521                             TF_RSVD_SRAM_ENCAP_16B_TX,
1522                             tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX);
1523
1524         index = TF_RESC_TYPE_SRAM_ENCAP_64B;
1525
1526         /* Encap 64B not supported on RX */
1527
1528         /* Encap 64b tx direction */
1529         if (tfs->resc.tx.sram_entry[index].stride > 0)
1530                 end = tfs->resc.tx.sram_entry[index].start +
1531                         tfs->resc.tx.sram_entry[index].stride - 1;
1532
1533         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1534                             TF_RSVD_SRAM_ENCAP_64B_BEGIN_IDX_TX,
1535                             end,
1536                             TF_RSVD_SRAM_ENCAP_64B_TX,
1537                             tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX);
1538 }
1539
1540 /**
1541  * Internal function to mark all the sp resources allocated that
1542  * Truflow does not own.
1543  */
1544 static void
1545 tf_rm_rsvd_sram_sp(struct tf_session *tfs)
1546 {
1547         uint32_t index = TF_RESC_TYPE_SRAM_SP_SMAC;
1548         uint16_t end = 0;
1549
1550         /* sp smac rx direction */
1551         if (tfs->resc.rx.sram_entry[index].stride > 0)
1552                 end = tfs->resc.rx.sram_entry[index].start +
1553                         tfs->resc.rx.sram_entry[index].stride - 1;
1554
1555         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1556                             TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_RX,
1557                             end,
1558                             TF_RSVD_SRAM_SP_SMAC_RX,
1559                             tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX);
1560
1561         /* sp smac tx direction */
1562         if (tfs->resc.tx.sram_entry[index].stride > 0)
1563                 end = tfs->resc.tx.sram_entry[index].start +
1564                         tfs->resc.tx.sram_entry[index].stride - 1;
1565
1566         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1567                             TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_TX,
1568                             end,
1569                             TF_RSVD_SRAM_SP_SMAC_TX,
1570                             tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX);
1571
1572         index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4;
1573
1574         /* SP SMAC IPv4 not supported on RX */
1575
1576         /* sp smac ipv4 tx direction */
1577         if (tfs->resc.tx.sram_entry[index].stride > 0)
1578                 end = tfs->resc.tx.sram_entry[index].start +
1579                         tfs->resc.tx.sram_entry[index].stride - 1;
1580
1581         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1582                             TF_RSVD_SRAM_SP_SMAC_IPV4_BEGIN_IDX_TX,
1583                             end,
1584                             TF_RSVD_SRAM_SP_SMAC_IPV4_TX,
1585                             tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX);
1586
1587         index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6;
1588
1589         /* SP SMAC IPv6 not supported on RX */
1590
1591         /* sp smac ipv6 tx direction */
1592         if (tfs->resc.tx.sram_entry[index].stride > 0)
1593                 end = tfs->resc.tx.sram_entry[index].start +
1594                         tfs->resc.tx.sram_entry[index].stride - 1;
1595
1596         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1597                             TF_RSVD_SRAM_SP_SMAC_IPV6_BEGIN_IDX_TX,
1598                             end,
1599                             TF_RSVD_SRAM_SP_SMAC_IPV6_TX,
1600                             tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX);
1601 }
1602
1603 /**
1604  * Internal function to mark all the stat resources allocated that
1605  * Truflow does not own.
1606  */
1607 static void
1608 tf_rm_rsvd_sram_stats(struct tf_session *tfs)
1609 {
1610         uint32_t index = TF_RESC_TYPE_SRAM_COUNTER_64B;
1611         uint16_t end = 0;
1612
1613         /* counter 64b rx direction */
1614         if (tfs->resc.rx.sram_entry[index].stride > 0)
1615                 end = tfs->resc.rx.sram_entry[index].start +
1616                         tfs->resc.rx.sram_entry[index].stride - 1;
1617
1618         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1619                             TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_RX,
1620                             end,
1621                             TF_RSVD_SRAM_COUNTER_64B_RX,
1622                             tfs->TF_SRAM_STATS_64B_POOL_NAME_RX);
1623
1624         /* counter 64b tx direction */
1625         if (tfs->resc.tx.sram_entry[index].stride > 0)
1626                 end = tfs->resc.tx.sram_entry[index].start +
1627                         tfs->resc.tx.sram_entry[index].stride - 1;
1628
1629         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1630                             TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_TX,
1631                             end,
1632                             TF_RSVD_SRAM_COUNTER_64B_TX,
1633                             tfs->TF_SRAM_STATS_64B_POOL_NAME_TX);
1634 }
1635
1636 /**
1637  * Internal function to mark all the nat resources allocated that
1638  * Truflow does not own.
1639  */
1640 static void
1641 tf_rm_rsvd_sram_nat(struct tf_session *tfs)
1642 {
1643         uint32_t index = TF_RESC_TYPE_SRAM_NAT_SPORT;
1644         uint16_t end = 0;
1645
1646         /* nat source port rx direction */
1647         if (tfs->resc.rx.sram_entry[index].stride > 0)
1648                 end = tfs->resc.rx.sram_entry[index].start +
1649                         tfs->resc.rx.sram_entry[index].stride - 1;
1650
1651         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1652                             TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_RX,
1653                             end,
1654                             TF_RSVD_SRAM_NAT_SPORT_RX,
1655                             tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX);
1656
1657         /* nat source port tx direction */
1658         if (tfs->resc.tx.sram_entry[index].stride > 0)
1659                 end = tfs->resc.tx.sram_entry[index].start +
1660                         tfs->resc.tx.sram_entry[index].stride - 1;
1661
1662         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1663                             TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_TX,
1664                             end,
1665                             TF_RSVD_SRAM_NAT_SPORT_TX,
1666                             tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX);
1667
1668         index = TF_RESC_TYPE_SRAM_NAT_DPORT;
1669
1670         /* nat destination port rx direction */
1671         if (tfs->resc.rx.sram_entry[index].stride > 0)
1672                 end = tfs->resc.rx.sram_entry[index].start +
1673                         tfs->resc.rx.sram_entry[index].stride - 1;
1674
1675         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1676                             TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_RX,
1677                             end,
1678                             TF_RSVD_SRAM_NAT_DPORT_RX,
1679                             tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX);
1680
1681         /* nat destination port tx direction */
1682         if (tfs->resc.tx.sram_entry[index].stride > 0)
1683                 end = tfs->resc.tx.sram_entry[index].start +
1684                         tfs->resc.tx.sram_entry[index].stride - 1;
1685
1686         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1687                             TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_TX,
1688                             end,
1689                             TF_RSVD_SRAM_NAT_DPORT_TX,
1690                             tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX);
1691
1692         index = TF_RESC_TYPE_SRAM_NAT_S_IPV4;
1693
1694         /* nat source port ipv4 rx direction */
1695         if (tfs->resc.rx.sram_entry[index].stride > 0)
1696                 end = tfs->resc.rx.sram_entry[index].start +
1697                         tfs->resc.rx.sram_entry[index].stride - 1;
1698
1699         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1700                             TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_RX,
1701                             end,
1702                             TF_RSVD_SRAM_NAT_S_IPV4_RX,
1703                             tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX);
1704
1705         /* nat source ipv4 port tx direction */
1706         if (tfs->resc.tx.sram_entry[index].stride > 0)
1707                 end = tfs->resc.tx.sram_entry[index].start +
1708                         tfs->resc.tx.sram_entry[index].stride - 1;
1709
1710         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1711                             TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_TX,
1712                             end,
1713                             TF_RSVD_SRAM_NAT_S_IPV4_TX,
1714                             tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX);
1715
1716         index = TF_RESC_TYPE_SRAM_NAT_D_IPV4;
1717
1718         /* nat destination port ipv4 rx direction */
1719         if (tfs->resc.rx.sram_entry[index].stride > 0)
1720                 end = tfs->resc.rx.sram_entry[index].start +
1721                         tfs->resc.rx.sram_entry[index].stride - 1;
1722
1723         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1724                             TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_RX,
1725                             end,
1726                             TF_RSVD_SRAM_NAT_D_IPV4_RX,
1727                             tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX);
1728
1729         /* nat destination ipv4 port tx direction */
1730         if (tfs->resc.tx.sram_entry[index].stride > 0)
1731                 end = tfs->resc.tx.sram_entry[index].start +
1732                         tfs->resc.tx.sram_entry[index].stride - 1;
1733
1734         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1735                             TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_TX,
1736                             end,
1737                             TF_RSVD_SRAM_NAT_D_IPV4_TX,
1738                             tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX);
1739 }
1740
1741 /**
1742  * Internal function used to validate the HW allocated resources
1743  * against the requested values.
1744  */
1745 static int
1746 tf_rm_hw_alloc_validate(enum tf_dir dir,
1747                         struct tf_rm_hw_alloc *hw_alloc,
1748                         struct tf_rm_entry *hw_entry)
1749 {
1750         int error = 0;
1751         int i;
1752
1753         for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
1754                 if (hw_entry[i].stride != hw_alloc->hw_num[i]) {
1755                         PMD_DRV_LOG(ERR,
1756                                 "%s, Alloc failed id:%d expect:%d got:%d\n",
1757                                 tf_dir_2_str(dir),
1758                                 i,
1759                                 hw_alloc->hw_num[i],
1760                                 hw_entry[i].stride);
1761                         error = -1;
1762                 }
1763         }
1764
1765         return error;
1766 }
1767
1768 /**
1769  * Internal function used to validate the SRAM allocated resources
1770  * against the requested values.
1771  */
1772 static int
1773 tf_rm_sram_alloc_validate(enum tf_dir dir __rte_unused,
1774                           struct tf_rm_sram_alloc *sram_alloc,
1775                           struct tf_rm_entry *sram_entry)
1776 {
1777         int error = 0;
1778         int i;
1779
1780         for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
1781                 if (sram_entry[i].stride != sram_alloc->sram_num[i]) {
1782                         PMD_DRV_LOG(ERR,
1783                                 "%s, Alloc failed idx:%d expect:%d got:%d\n",
1784                                 tf_dir_2_str(dir),
1785                                 i,
1786                                 sram_alloc->sram_num[i],
1787                                 sram_entry[i].stride);
1788                         error = -1;
1789                 }
1790         }
1791
1792         return error;
1793 }
1794
1795 /**
1796  * Internal function used to mark all the HW resources allocated that
1797  * Truflow does not own.
1798  */
1799 static void
1800 tf_rm_reserve_hw(struct tf *tfp)
1801 {
1802         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1803
1804         /* TBD
1805          * There is no direct AFM resource allocation as it is carved
1806          * statically at AFM boot time. Thus the bit allocators work
1807          * on the full HW resource amount and we just mark everything
1808          * used except the resources that Truflow took ownership off.
1809          */
1810         tf_rm_rsvd_l2_ctxt(tfs);
1811         tf_rm_rsvd_prof(tfs);
1812         tf_rm_rsvd_em_prof(tfs);
1813         tf_rm_rsvd_wc(tfs);
1814         tf_rm_rsvd_mirror(tfs);
1815         tf_rm_rsvd_meter(tfs);
1816         tf_rm_rsvd_upar(tfs);
1817         tf_rm_rsvd_sp_tcam(tfs);
1818         tf_rm_rsvd_l2_func(tfs);
1819         tf_rm_rsvd_fkb(tfs);
1820         tf_rm_rsvd_tbl_scope(tfs);
1821         tf_rm_rsvd_epoch(tfs);
1822         tf_rm_rsvd_metadata(tfs);
1823         tf_rm_rsvd_ct_state(tfs);
1824         tf_rm_rsvd_range(tfs);
1825         tf_rm_rsvd_lag_entry(tfs);
1826 }
1827
1828 /**
1829  * Internal function used to mark all the SRAM resources allocated
1830  * that Truflow does not own.
1831  */
1832 static void
1833 tf_rm_reserve_sram(struct tf *tfp)
1834 {
1835         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1836
1837         /* TBD
1838          * There is no direct AFM resource allocation as it is carved
1839          * statically at AFM boot time. Thus the bit allocators work
1840          * on the full HW resource amount and we just mark everything
1841          * used except the resources that Truflow took ownership off.
1842          */
1843         tf_rm_rsvd_sram_full_action(tfs);
1844         tf_rm_rsvd_sram_mcg(tfs);
1845         tf_rm_rsvd_sram_encap(tfs);
1846         tf_rm_rsvd_sram_sp(tfs);
1847         tf_rm_rsvd_sram_stats(tfs);
1848         tf_rm_rsvd_sram_nat(tfs);
1849 }
1850
1851 /**
1852  * Internal function used to allocate and validate all HW resources.
1853  */
1854 static int
1855 tf_rm_allocate_validate_hw(struct tf *tfp,
1856                            enum tf_dir dir)
1857 {
1858         int rc;
1859         int i;
1860         struct tf_rm_hw_query hw_query;
1861         struct tf_rm_hw_alloc hw_alloc;
1862         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1863         struct tf_rm_entry *hw_entries;
1864         uint32_t error_flag;
1865
1866         if (dir == TF_DIR_RX)
1867                 hw_entries = tfs->resc.rx.hw_entry;
1868         else
1869                 hw_entries = tfs->resc.tx.hw_entry;
1870
1871         /* Query for Session HW Resources */
1872         rc = tf_msg_session_hw_resc_qcaps(tfp, dir, &hw_query);
1873         if (rc) {
1874                 /* Log error */
1875                 PMD_DRV_LOG(ERR,
1876                             "%s, HW qcaps message send failed\n",
1877                             tf_dir_2_str(dir));
1878                 goto cleanup;
1879         }
1880
1881         rc = tf_rm_check_hw_qcaps_static(&hw_query, dir, &error_flag);
1882         if (rc) {
1883                 /* Log error */
1884                 PMD_DRV_LOG(ERR,
1885                         "%s, HW QCAPS validation failed, error_flag:0x%x\n",
1886                         tf_dir_2_str(dir),
1887                         error_flag);
1888                 tf_rm_print_hw_qcaps_error(dir, &hw_query, &error_flag);
1889                 goto cleanup;
1890         }
1891
1892         /* Post process HW capability */
1893         for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++)
1894                 hw_alloc.hw_num[i] = hw_query.hw_query[i].max;
1895
1896         /* Allocate Session HW Resources */
1897         rc = tf_msg_session_hw_resc_alloc(tfp, dir, &hw_alloc, hw_entries);
1898         if (rc) {
1899                 /* Log error */
1900                 PMD_DRV_LOG(ERR,
1901                             "%s, HW alloc message send failed\n",
1902                             tf_dir_2_str(dir));
1903                 goto cleanup;
1904         }
1905
1906         /* Perform HW allocation validation as its possible the
1907          * resource availability changed between qcaps and alloc
1908          */
1909         rc = tf_rm_hw_alloc_validate(dir, &hw_alloc, hw_entries);
1910         if (rc) {
1911                 /* Log error */
1912                 PMD_DRV_LOG(ERR,
1913                             "%s, HW Resource validation failed\n",
1914                             tf_dir_2_str(dir));
1915                 goto cleanup;
1916         }
1917
1918         return 0;
1919
1920  cleanup:
1921         return -1;
1922 }
1923
1924 /**
1925  * Internal function used to allocate and validate all SRAM resources.
1926  *
1927  * [in] tfp
1928  *   Pointer to TF handle
1929  *
1930  * [in] dir
1931  *   Receive or transmit direction
1932  *
1933  * Returns:
1934  *   0  - Success
1935  *   -1 - Internal error
1936  */
1937 static int
1938 tf_rm_allocate_validate_sram(struct tf *tfp,
1939                              enum tf_dir dir)
1940 {
1941         int rc;
1942         int i;
1943         struct tf_rm_sram_query sram_query;
1944         struct tf_rm_sram_alloc sram_alloc;
1945         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1946         struct tf_rm_entry *sram_entries;
1947         uint32_t error_flag;
1948
1949         if (dir == TF_DIR_RX)
1950                 sram_entries = tfs->resc.rx.sram_entry;
1951         else
1952                 sram_entries = tfs->resc.tx.sram_entry;
1953
1954         /* Query for Session SRAM Resources */
1955         rc = tf_msg_session_sram_resc_qcaps(tfp, dir, &sram_query);
1956         if (rc) {
1957                 /* Log error */
1958                 PMD_DRV_LOG(ERR,
1959                             "%s, SRAM qcaps message send failed\n",
1960                             tf_dir_2_str(dir));
1961                 goto cleanup;
1962         }
1963
1964         rc = tf_rm_check_sram_qcaps_static(&sram_query, dir, &error_flag);
1965         if (rc) {
1966                 /* Log error */
1967                 PMD_DRV_LOG(ERR,
1968                         "%s, SRAM QCAPS validation failed, error_flag:%x\n",
1969                         tf_dir_2_str(dir),
1970                         error_flag);
1971                 tf_rm_print_sram_qcaps_error(dir, &sram_query, &error_flag);
1972                 goto cleanup;
1973         }
1974
1975         /* Post process SRAM capability */
1976         for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++)
1977                 sram_alloc.sram_num[i] = sram_query.sram_query[i].max;
1978
1979         /* Allocate Session SRAM Resources */
1980         rc = tf_msg_session_sram_resc_alloc(tfp,
1981                                             dir,
1982                                             &sram_alloc,
1983                                             sram_entries);
1984         if (rc) {
1985                 /* Log error */
1986                 PMD_DRV_LOG(ERR,
1987                             "%s, SRAM alloc message send failed\n",
1988                             tf_dir_2_str(dir));
1989                 goto cleanup;
1990         }
1991
1992         /* Perform SRAM allocation validation as its possible the
1993          * resource availability changed between qcaps and alloc
1994          */
1995         rc = tf_rm_sram_alloc_validate(dir, &sram_alloc, sram_entries);
1996         if (rc) {
1997                 /* Log error */
1998                 PMD_DRV_LOG(ERR,
1999                             "%s, SRAM Resource allocation validation failed\n",
2000                             tf_dir_2_str(dir));
2001                 goto cleanup;
2002         }
2003
2004         return 0;
2005
2006  cleanup:
2007         return -1;
2008 }
2009
2010 /**
2011  * Helper function used to prune a HW resource array to only hold
2012  * elements that needs to be flushed.
2013  *
2014  * [in] tfs
2015  *   Session handle
2016  *
2017  * [in] dir
2018  *   Receive or transmit direction
2019  *
2020  * [in] hw_entries
2021  *   Master HW Resource database
2022  *
2023  * [in/out] flush_entries
2024  *   Pruned HW Resource database of entries to be flushed. This
2025  *   array should be passed in as a complete copy of the master HW
2026  *   Resource database. The outgoing result will be a pruned version
2027  *   based on the result of the requested checking
2028  *
2029  * Returns:
2030  *    0 - Success, no flush required
2031  *    1 - Success, flush required
2032  *   -1 - Internal error
2033  */
2034 static int
2035 tf_rm_hw_to_flush(struct tf_session *tfs,
2036                   enum tf_dir dir,
2037                   struct tf_rm_entry *hw_entries,
2038                   struct tf_rm_entry *flush_entries)
2039 {
2040         int rc;
2041         int flush_rc = 0;
2042         int free_cnt;
2043         struct bitalloc *pool;
2044
2045         /* Check all the hw resource pools and check for left over
2046          * elements. Any found will result in the complete pool of a
2047          * type to get invalidated.
2048          */
2049
2050         TF_RM_GET_POOLS(tfs, dir, &pool,
2051                         TF_L2_CTXT_TCAM_POOL_NAME,
2052                         rc);
2053         if (rc)
2054                 return rc;
2055         free_cnt = ba_free_count(pool);
2056         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride) {
2057                 flush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].start = 0;
2058                 flush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride = 0;
2059         } else {
2060                 flush_rc = 1;
2061         }
2062
2063         TF_RM_GET_POOLS(tfs, dir, &pool,
2064                         TF_PROF_FUNC_POOL_NAME,
2065                         rc);
2066         if (rc)
2067                 return rc;
2068         free_cnt = ba_free_count(pool);
2069         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride) {
2070                 flush_entries[TF_RESC_TYPE_HW_PROF_FUNC].start = 0;
2071                 flush_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride = 0;
2072         } else {
2073                 flush_rc = 1;
2074         }
2075
2076         TF_RM_GET_POOLS(tfs, dir, &pool,
2077                         TF_PROF_TCAM_POOL_NAME,
2078                         rc);
2079         if (rc)
2080                 return rc;
2081         free_cnt = ba_free_count(pool);
2082         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride) {
2083                 flush_entries[TF_RESC_TYPE_HW_PROF_TCAM].start = 0;
2084                 flush_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride = 0;
2085         } else {
2086                 flush_rc = 1;
2087         }
2088
2089         TF_RM_GET_POOLS(tfs, dir, &pool,
2090                         TF_EM_PROF_ID_POOL_NAME,
2091                         rc);
2092         if (rc)
2093                 return rc;
2094         free_cnt = ba_free_count(pool);
2095         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride) {
2096                 flush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].start = 0;
2097                 flush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride = 0;
2098         } else {
2099                 flush_rc = 1;
2100         }
2101
2102         flush_entries[TF_RESC_TYPE_HW_EM_REC].start = 0;
2103         flush_entries[TF_RESC_TYPE_HW_EM_REC].stride = 0;
2104
2105         TF_RM_GET_POOLS(tfs, dir, &pool,
2106                         TF_WC_TCAM_PROF_ID_POOL_NAME,
2107                         rc);
2108         if (rc)
2109                 return rc;
2110         free_cnt = ba_free_count(pool);
2111         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride) {
2112                 flush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].start = 0;
2113                 flush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride = 0;
2114         } else {
2115                 flush_rc = 1;
2116         }
2117
2118         TF_RM_GET_POOLS(tfs, dir, &pool,
2119                         TF_WC_TCAM_POOL_NAME,
2120                         rc);
2121         if (rc)
2122                 return rc;
2123         free_cnt = ba_free_count(pool);
2124         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM].stride) {
2125                 flush_entries[TF_RESC_TYPE_HW_WC_TCAM].start = 0;
2126                 flush_entries[TF_RESC_TYPE_HW_WC_TCAM].stride = 0;
2127         } else {
2128                 flush_rc = 1;
2129         }
2130
2131         TF_RM_GET_POOLS(tfs, dir, &pool,
2132                         TF_METER_PROF_POOL_NAME,
2133                         rc);
2134         if (rc)
2135                 return rc;
2136         free_cnt = ba_free_count(pool);
2137         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_PROF].stride) {
2138                 flush_entries[TF_RESC_TYPE_HW_METER_PROF].start = 0;
2139                 flush_entries[TF_RESC_TYPE_HW_METER_PROF].stride = 0;
2140         } else {
2141                 flush_rc = 1;
2142         }
2143
2144         TF_RM_GET_POOLS(tfs, dir, &pool,
2145                         TF_METER_INST_POOL_NAME,
2146                         rc);
2147         if (rc)
2148                 return rc;
2149         free_cnt = ba_free_count(pool);
2150         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_INST].stride) {
2151                 flush_entries[TF_RESC_TYPE_HW_METER_INST].start = 0;
2152                 flush_entries[TF_RESC_TYPE_HW_METER_INST].stride = 0;
2153         } else {
2154                 flush_rc = 1;
2155         }
2156
2157         TF_RM_GET_POOLS(tfs, dir, &pool,
2158                         TF_MIRROR_POOL_NAME,
2159                         rc);
2160         if (rc)
2161                 return rc;
2162         free_cnt = ba_free_count(pool);
2163         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_MIRROR].stride) {
2164                 flush_entries[TF_RESC_TYPE_HW_MIRROR].start = 0;
2165                 flush_entries[TF_RESC_TYPE_HW_MIRROR].stride = 0;
2166         } else {
2167                 flush_rc = 1;
2168         }
2169
2170         TF_RM_GET_POOLS(tfs, dir, &pool,
2171                         TF_UPAR_POOL_NAME,
2172                         rc);
2173         if (rc)
2174                 return rc;
2175         free_cnt = ba_free_count(pool);
2176         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_UPAR].stride) {
2177                 flush_entries[TF_RESC_TYPE_HW_UPAR].start = 0;
2178                 flush_entries[TF_RESC_TYPE_HW_UPAR].stride = 0;
2179         } else {
2180                 flush_rc = 1;
2181         }
2182
2183         TF_RM_GET_POOLS(tfs, dir, &pool,
2184                         TF_SP_TCAM_POOL_NAME,
2185                         rc);
2186         if (rc)
2187                 return rc;
2188         free_cnt = ba_free_count(pool);
2189         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_SP_TCAM].stride) {
2190                 flush_entries[TF_RESC_TYPE_HW_SP_TCAM].start = 0;
2191                 flush_entries[TF_RESC_TYPE_HW_SP_TCAM].stride = 0;
2192         } else {
2193                 flush_rc = 1;
2194         }
2195
2196         TF_RM_GET_POOLS(tfs, dir, &pool,
2197                         TF_L2_FUNC_POOL_NAME,
2198                         rc);
2199         if (rc)
2200                 return rc;
2201         free_cnt = ba_free_count(pool);
2202         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_FUNC].stride) {
2203                 flush_entries[TF_RESC_TYPE_HW_L2_FUNC].start = 0;
2204                 flush_entries[TF_RESC_TYPE_HW_L2_FUNC].stride = 0;
2205         } else {
2206                 flush_rc = 1;
2207         }
2208
2209         TF_RM_GET_POOLS(tfs, dir, &pool,
2210                         TF_FKB_POOL_NAME,
2211                         rc);
2212         if (rc)
2213                 return rc;
2214         free_cnt = ba_free_count(pool);
2215         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_FKB].stride) {
2216                 flush_entries[TF_RESC_TYPE_HW_FKB].start = 0;
2217                 flush_entries[TF_RESC_TYPE_HW_FKB].stride = 0;
2218         } else {
2219                 flush_rc = 1;
2220         }
2221
2222         TF_RM_GET_POOLS(tfs, dir, &pool,
2223                         TF_TBL_SCOPE_POOL_NAME,
2224                         rc);
2225         if (rc)
2226                 return rc;
2227         free_cnt = ba_free_count(pool);
2228         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride) {
2229                 flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].start = 0;
2230                 flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride = 0;
2231         } else {
2232                 PMD_DRV_LOG(ERR, "%s: TBL_SCOPE free_cnt:%d, entries:%d\n",
2233                             tf_dir_2_str(dir),
2234                             free_cnt,
2235                             hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride);
2236                 flush_rc = 1;
2237         }
2238
2239         TF_RM_GET_POOLS(tfs, dir, &pool,
2240                         TF_EPOCH0_POOL_NAME,
2241                         rc);
2242         if (rc)
2243                 return rc;
2244         free_cnt = ba_free_count(pool);
2245         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH0].stride) {
2246                 flush_entries[TF_RESC_TYPE_HW_EPOCH0].start = 0;
2247                 flush_entries[TF_RESC_TYPE_HW_EPOCH0].stride = 0;
2248         } else {
2249                 flush_rc = 1;
2250         }
2251
2252         TF_RM_GET_POOLS(tfs, dir, &pool,
2253                         TF_EPOCH1_POOL_NAME,
2254                         rc);
2255         if (rc)
2256                 return rc;
2257         free_cnt = ba_free_count(pool);
2258         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH1].stride) {
2259                 flush_entries[TF_RESC_TYPE_HW_EPOCH1].start = 0;
2260                 flush_entries[TF_RESC_TYPE_HW_EPOCH1].stride = 0;
2261         } else {
2262                 flush_rc = 1;
2263         }
2264
2265         TF_RM_GET_POOLS(tfs, dir, &pool,
2266                         TF_METADATA_POOL_NAME,
2267                         rc);
2268         if (rc)
2269                 return rc;
2270         free_cnt = ba_free_count(pool);
2271         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METADATA].stride) {
2272                 flush_entries[TF_RESC_TYPE_HW_METADATA].start = 0;
2273                 flush_entries[TF_RESC_TYPE_HW_METADATA].stride = 0;
2274         } else {
2275                 flush_rc = 1;
2276         }
2277
2278         TF_RM_GET_POOLS(tfs, dir, &pool,
2279                         TF_CT_STATE_POOL_NAME,
2280                         rc);
2281         if (rc)
2282                 return rc;
2283         free_cnt = ba_free_count(pool);
2284         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_CT_STATE].stride) {
2285                 flush_entries[TF_RESC_TYPE_HW_CT_STATE].start = 0;
2286                 flush_entries[TF_RESC_TYPE_HW_CT_STATE].stride = 0;
2287         } else {
2288                 flush_rc = 1;
2289         }
2290
2291         TF_RM_GET_POOLS(tfs, dir, &pool,
2292                         TF_RANGE_PROF_POOL_NAME,
2293                         rc);
2294         if (rc)
2295                 return rc;
2296         free_cnt = ba_free_count(pool);
2297         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride) {
2298                 flush_entries[TF_RESC_TYPE_HW_RANGE_PROF].start = 0;
2299                 flush_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride = 0;
2300         } else {
2301                 flush_rc = 1;
2302         }
2303
2304         TF_RM_GET_POOLS(tfs, dir, &pool,
2305                         TF_RANGE_ENTRY_POOL_NAME,
2306                         rc);
2307         if (rc)
2308                 return rc;
2309         free_cnt = ba_free_count(pool);
2310         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride) {
2311                 flush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].start = 0;
2312                 flush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride = 0;
2313         } else {
2314                 flush_rc = 1;
2315         }
2316
2317         TF_RM_GET_POOLS(tfs, dir, &pool,
2318                         TF_LAG_ENTRY_POOL_NAME,
2319                         rc);
2320         if (rc)
2321                 return rc;
2322         free_cnt = ba_free_count(pool);
2323         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride) {
2324                 flush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].start = 0;
2325                 flush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride = 0;
2326         } else {
2327                 flush_rc = 1;
2328         }
2329
2330         return flush_rc;
2331 }
2332
2333 /**
2334  * Helper function used to prune a SRAM resource array to only hold
2335  * elements that needs to be flushed.
2336  *
2337  * [in] tfs
2338  *   Session handle
2339  *
2340  * [in] dir
2341  *   Receive or transmit direction
2342  *
2343  * [in] hw_entries
2344  *   Master SRAM Resource data base
2345  *
2346  * [in/out] flush_entries
2347  *   Pruned SRAM Resource database of entries to be flushed. This
2348  *   array should be passed in as a complete copy of the master SRAM
2349  *   Resource database. The outgoing result will be a pruned version
2350  *   based on the result of the requested checking
2351  *
2352  * Returns:
2353  *    0 - Success, no flush required
2354  *    1 - Success, flush required
2355  *   -1 - Internal error
2356  */
2357 static int
2358 tf_rm_sram_to_flush(struct tf_session *tfs,
2359                     enum tf_dir dir,
2360                     struct tf_rm_entry *sram_entries,
2361                     struct tf_rm_entry *flush_entries)
2362 {
2363         int rc;
2364         int flush_rc = 0;
2365         int free_cnt;
2366         struct bitalloc *pool;
2367
2368         /* Check all the sram resource pools and check for left over
2369          * elements. Any found will result in the complete pool of a
2370          * type to get invalidated.
2371          */
2372
2373         TF_RM_GET_POOLS(tfs, dir, &pool,
2374                         TF_SRAM_FULL_ACTION_POOL_NAME,
2375                         rc);
2376         if (rc)
2377                 return rc;
2378         free_cnt = ba_free_count(pool);
2379         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride) {
2380                 flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].start = 0;
2381                 flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride = 0;
2382         } else {
2383                 flush_rc = 1;
2384         }
2385
2386         /* Only pools for RX direction */
2387         if (dir == TF_DIR_RX) {
2388                 TF_RM_GET_POOLS_RX(tfs, &pool,
2389                                    TF_SRAM_MCG_POOL_NAME);
2390                 if (rc)
2391                         return rc;
2392                 free_cnt = ba_free_count(pool);
2393                 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_MCG].stride) {
2394                         flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0;
2395                         flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0;
2396                 } else {
2397                         flush_rc = 1;
2398                 }
2399         } else {
2400                 /* Always prune TX direction */
2401                 flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0;
2402                 flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0;
2403         }
2404
2405         TF_RM_GET_POOLS(tfs, dir, &pool,
2406                         TF_SRAM_ENCAP_8B_POOL_NAME,
2407                         rc);
2408         if (rc)
2409                 return rc;
2410         free_cnt = ba_free_count(pool);
2411         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride) {
2412                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].start = 0;
2413                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride = 0;
2414         } else {
2415                 flush_rc = 1;
2416         }
2417
2418         TF_RM_GET_POOLS(tfs, dir, &pool,
2419                         TF_SRAM_ENCAP_16B_POOL_NAME,
2420                         rc);
2421         if (rc)
2422                 return rc;
2423         free_cnt = ba_free_count(pool);
2424         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride) {
2425                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].start = 0;
2426                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride = 0;
2427         } else {
2428                 flush_rc = 1;
2429         }
2430
2431         /* Only pools for TX direction */
2432         if (dir == TF_DIR_TX) {
2433                 TF_RM_GET_POOLS_TX(tfs, &pool,
2434                                    TF_SRAM_ENCAP_64B_POOL_NAME);
2435                 if (rc)
2436                         return rc;
2437                 free_cnt = ba_free_count(pool);
2438                 if (free_cnt ==
2439                     sram_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride) {
2440                         flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0;
2441                         flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0;
2442                 } else {
2443                         flush_rc = 1;
2444                 }
2445         } else {
2446                 /* Always prune RX direction */
2447                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0;
2448                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0;
2449         }
2450
2451         TF_RM_GET_POOLS(tfs, dir, &pool,
2452                         TF_SRAM_SP_SMAC_POOL_NAME,
2453                         rc);
2454         if (rc)
2455                 return rc;
2456         free_cnt = ba_free_count(pool);
2457         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride) {
2458                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].start = 0;
2459                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride = 0;
2460         } else {
2461                 flush_rc = 1;
2462         }
2463
2464         /* Only pools for TX direction */
2465         if (dir == TF_DIR_TX) {
2466                 TF_RM_GET_POOLS_TX(tfs, &pool,
2467                                    TF_SRAM_SP_SMAC_IPV4_POOL_NAME);
2468                 if (rc)
2469                         return rc;
2470                 free_cnt = ba_free_count(pool);
2471                 if (free_cnt ==
2472                     sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride) {
2473                         flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0;
2474                         flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride =
2475                                 0;
2476                 } else {
2477                         flush_rc = 1;
2478                 }
2479         } else {
2480                 /* Always prune RX direction */
2481                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0;
2482                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride = 0;
2483         }
2484
2485         /* Only pools for TX direction */
2486         if (dir == TF_DIR_TX) {
2487                 TF_RM_GET_POOLS_TX(tfs, &pool,
2488                                    TF_SRAM_SP_SMAC_IPV6_POOL_NAME);
2489                 if (rc)
2490                         return rc;
2491                 free_cnt = ba_free_count(pool);
2492                 if (free_cnt ==
2493                     sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride) {
2494                         flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0;
2495                         flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride =
2496                                 0;
2497                 } else {
2498                         flush_rc = 1;
2499                 }
2500         } else {
2501                 /* Always prune RX direction */
2502                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0;
2503                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride = 0;
2504         }
2505
2506         TF_RM_GET_POOLS(tfs, dir, &pool,
2507                         TF_SRAM_STATS_64B_POOL_NAME,
2508                         rc);
2509         if (rc)
2510                 return rc;
2511         free_cnt = ba_free_count(pool);
2512         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride) {
2513                 flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].start = 0;
2514                 flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride = 0;
2515         } else {
2516                 flush_rc = 1;
2517         }
2518
2519         TF_RM_GET_POOLS(tfs, dir, &pool,
2520                         TF_SRAM_NAT_SPORT_POOL_NAME,
2521                         rc);
2522         if (rc)
2523                 return rc;
2524         free_cnt = ba_free_count(pool);
2525         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride) {
2526                 flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].start = 0;
2527                 flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride = 0;
2528         } else {
2529                 flush_rc = 1;
2530         }
2531
2532         TF_RM_GET_POOLS(tfs, dir, &pool,
2533                         TF_SRAM_NAT_DPORT_POOL_NAME,
2534                         rc);
2535         if (rc)
2536                 return rc;
2537         free_cnt = ba_free_count(pool);
2538         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride) {
2539                 flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].start = 0;
2540                 flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride = 0;
2541         } else {
2542                 flush_rc = 1;
2543         }
2544
2545         TF_RM_GET_POOLS(tfs, dir, &pool,
2546                         TF_SRAM_NAT_S_IPV4_POOL_NAME,
2547                         rc);
2548         if (rc)
2549                 return rc;
2550         free_cnt = ba_free_count(pool);
2551         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride) {
2552                 flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].start = 0;
2553                 flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride = 0;
2554         } else {
2555                 flush_rc = 1;
2556         }
2557
2558         TF_RM_GET_POOLS(tfs, dir, &pool,
2559                         TF_SRAM_NAT_D_IPV4_POOL_NAME,
2560                         rc);
2561         if (rc)
2562                 return rc;
2563         free_cnt = ba_free_count(pool);
2564         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride) {
2565                 flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].start = 0;
2566                 flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride = 0;
2567         } else {
2568                 flush_rc = 1;
2569         }
2570
2571         return flush_rc;
2572 }
2573
2574 /**
2575  * Helper function used to generate an error log for the HW types that
2576  * needs to be flushed. The types should have been cleaned up ahead of
2577  * invoking tf_close_session.
2578  *
2579  * [in] hw_entries
2580  *   HW Resource database holding elements to be flushed
2581  */
2582 static void
2583 tf_rm_log_hw_flush(enum tf_dir dir,
2584                    struct tf_rm_entry *hw_entries)
2585 {
2586         int i;
2587
2588         /* Walk the hw flush array and log the types that wasn't
2589          * cleaned up.
2590          */
2591         for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
2592                 if (hw_entries[i].stride != 0)
2593                         PMD_DRV_LOG(ERR,
2594                                     "%s: %s was not cleaned up\n",
2595                                     tf_dir_2_str(dir),
2596                                     tf_hcapi_hw_2_str(i));
2597         }
2598 }
2599
2600 /**
2601  * Helper function used to generate an error log for the SRAM types
2602  * that needs to be flushed. The types should have been cleaned up
2603  * ahead of invoking tf_close_session.
2604  *
2605  * [in] sram_entries
2606  *   SRAM Resource database holding elements to be flushed
2607  */
2608 static void
2609 tf_rm_log_sram_flush(enum tf_dir dir,
2610                      struct tf_rm_entry *sram_entries)
2611 {
2612         int i;
2613
2614         /* Walk the sram flush array and log the types that wasn't
2615          * cleaned up.
2616          */
2617         for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
2618                 if (sram_entries[i].stride != 0)
2619                         PMD_DRV_LOG(ERR,
2620                                     "%s: %s was not cleaned up\n",
2621                                     tf_dir_2_str(dir),
2622                                     tf_hcapi_sram_2_str(i));
2623         }
2624 }
2625
2626 void
2627 tf_rm_init(struct tf *tfp __rte_unused)
2628 {
2629         struct tf_session *tfs =
2630                 (struct tf_session *)(tfp->session->core_data);
2631
2632         /* This version is host specific and should be checked against
2633          * when attaching as there is no guarantee that a secondary
2634          * would run from same image version.
2635          */
2636         tfs->ver.major = TF_SESSION_VER_MAJOR;
2637         tfs->ver.minor = TF_SESSION_VER_MINOR;
2638         tfs->ver.update = TF_SESSION_VER_UPDATE;
2639
2640         tfs->session_id.id = 0;
2641         tfs->ref_count = 0;
2642
2643         /* Initialization of Table Scopes */
2644         /* ll_init(&tfs->tbl_scope_ll); */
2645
2646         /* Initialization of HW and SRAM resource DB */
2647         memset(&tfs->resc, 0, sizeof(struct tf_rm_db));
2648
2649         /* Initialization of HW Resource Pools */
2650         ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
2651         ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
2652         ba_init(tfs->TF_PROF_FUNC_POOL_NAME_RX, TF_NUM_PROF_FUNC);
2653         ba_init(tfs->TF_PROF_FUNC_POOL_NAME_TX, TF_NUM_PROF_FUNC);
2654         ba_init(tfs->TF_PROF_TCAM_POOL_NAME_RX, TF_NUM_PROF_TCAM);
2655         ba_init(tfs->TF_PROF_TCAM_POOL_NAME_TX, TF_NUM_PROF_TCAM);
2656         ba_init(tfs->TF_EM_PROF_ID_POOL_NAME_RX, TF_NUM_EM_PROF_ID);
2657         ba_init(tfs->TF_EM_PROF_ID_POOL_NAME_TX, TF_NUM_EM_PROF_ID);
2658
2659         /* TBD, how do we want to handle EM records ?*/
2660         /* EM Records should not be controlled by way of a pool */
2661
2662         ba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX, TF_NUM_WC_PROF_ID);
2663         ba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX, TF_NUM_WC_PROF_ID);
2664         ba_init(tfs->TF_WC_TCAM_POOL_NAME_RX, TF_NUM_WC_TCAM_ROW);
2665         ba_init(tfs->TF_WC_TCAM_POOL_NAME_TX, TF_NUM_WC_TCAM_ROW);
2666         ba_init(tfs->TF_METER_PROF_POOL_NAME_RX, TF_NUM_METER_PROF);
2667         ba_init(tfs->TF_METER_PROF_POOL_NAME_TX, TF_NUM_METER_PROF);
2668         ba_init(tfs->TF_METER_INST_POOL_NAME_RX, TF_NUM_METER);
2669         ba_init(tfs->TF_METER_INST_POOL_NAME_TX, TF_NUM_METER);
2670         ba_init(tfs->TF_MIRROR_POOL_NAME_RX, TF_NUM_MIRROR);
2671         ba_init(tfs->TF_MIRROR_POOL_NAME_TX, TF_NUM_MIRROR);
2672         ba_init(tfs->TF_UPAR_POOL_NAME_RX, TF_NUM_UPAR);
2673         ba_init(tfs->TF_UPAR_POOL_NAME_TX, TF_NUM_UPAR);
2674
2675         ba_init(tfs->TF_SP_TCAM_POOL_NAME_RX, TF_NUM_SP_TCAM);
2676         ba_init(tfs->TF_SP_TCAM_POOL_NAME_TX, TF_NUM_SP_TCAM);
2677
2678         ba_init(tfs->TF_FKB_POOL_NAME_RX, TF_NUM_FKB);
2679         ba_init(tfs->TF_FKB_POOL_NAME_TX, TF_NUM_FKB);
2680
2681         ba_init(tfs->TF_TBL_SCOPE_POOL_NAME_RX, TF_NUM_TBL_SCOPE);
2682         ba_init(tfs->TF_TBL_SCOPE_POOL_NAME_TX, TF_NUM_TBL_SCOPE);
2683         ba_init(tfs->TF_L2_FUNC_POOL_NAME_RX, TF_NUM_L2_FUNC);
2684         ba_init(tfs->TF_L2_FUNC_POOL_NAME_TX, TF_NUM_L2_FUNC);
2685         ba_init(tfs->TF_EPOCH0_POOL_NAME_RX, TF_NUM_EPOCH0);
2686         ba_init(tfs->TF_EPOCH0_POOL_NAME_TX, TF_NUM_EPOCH0);
2687         ba_init(tfs->TF_EPOCH1_POOL_NAME_RX, TF_NUM_EPOCH1);
2688         ba_init(tfs->TF_EPOCH1_POOL_NAME_TX, TF_NUM_EPOCH1);
2689         ba_init(tfs->TF_METADATA_POOL_NAME_RX, TF_NUM_METADATA);
2690         ba_init(tfs->TF_METADATA_POOL_NAME_TX, TF_NUM_METADATA);
2691         ba_init(tfs->TF_CT_STATE_POOL_NAME_RX, TF_NUM_CT_STATE);
2692         ba_init(tfs->TF_CT_STATE_POOL_NAME_TX, TF_NUM_CT_STATE);
2693         ba_init(tfs->TF_RANGE_PROF_POOL_NAME_RX, TF_NUM_RANGE_PROF);
2694         ba_init(tfs->TF_RANGE_PROF_POOL_NAME_TX, TF_NUM_RANGE_PROF);
2695         ba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_RX, TF_NUM_RANGE_ENTRY);
2696         ba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_TX, TF_NUM_RANGE_ENTRY);
2697         ba_init(tfs->TF_LAG_ENTRY_POOL_NAME_RX, TF_NUM_LAG_ENTRY);
2698         ba_init(tfs->TF_LAG_ENTRY_POOL_NAME_TX, TF_NUM_LAG_ENTRY);
2699
2700         /* Initialization of SRAM Resource Pools
2701          * These pools are set to the TFLIB defined MAX sizes not
2702          * AFM's HW max as to limit the memory consumption
2703          */
2704         ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX,
2705                 TF_RSVD_SRAM_FULL_ACTION_RX);
2706         ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX,
2707                 TF_RSVD_SRAM_FULL_ACTION_TX);
2708         /* Only Multicast Group on RX is supported */
2709         ba_init(tfs->TF_SRAM_MCG_POOL_NAME_RX,
2710                 TF_RSVD_SRAM_MCG_RX);
2711         ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX,
2712                 TF_RSVD_SRAM_ENCAP_8B_RX);
2713         ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX,
2714                 TF_RSVD_SRAM_ENCAP_8B_TX);
2715         ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX,
2716                 TF_RSVD_SRAM_ENCAP_16B_RX);
2717         ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX,
2718                 TF_RSVD_SRAM_ENCAP_16B_TX);
2719         /* Only Encap 64B on TX is supported */
2720         ba_init(tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX,
2721                 TF_RSVD_SRAM_ENCAP_64B_TX);
2722         ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX,
2723                 TF_RSVD_SRAM_SP_SMAC_RX);
2724         ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX,
2725                 TF_RSVD_SRAM_SP_SMAC_TX);
2726         /* Only SP SMAC IPv4 on TX is supported */
2727         ba_init(tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX,
2728                 TF_RSVD_SRAM_SP_SMAC_IPV4_TX);
2729         /* Only SP SMAC IPv6 on TX is supported */
2730         ba_init(tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX,
2731                 TF_RSVD_SRAM_SP_SMAC_IPV6_TX);
2732         ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_RX,
2733                 TF_RSVD_SRAM_COUNTER_64B_RX);
2734         ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_TX,
2735                 TF_RSVD_SRAM_COUNTER_64B_TX);
2736         ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX,
2737                 TF_RSVD_SRAM_NAT_SPORT_RX);
2738         ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX,
2739                 TF_RSVD_SRAM_NAT_SPORT_TX);
2740         ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX,
2741                 TF_RSVD_SRAM_NAT_DPORT_RX);
2742         ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX,
2743                 TF_RSVD_SRAM_NAT_DPORT_TX);
2744         ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX,
2745                 TF_RSVD_SRAM_NAT_S_IPV4_RX);
2746         ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX,
2747                 TF_RSVD_SRAM_NAT_S_IPV4_TX);
2748         ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX,
2749                 TF_RSVD_SRAM_NAT_D_IPV4_RX);
2750         ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX,
2751                 TF_RSVD_SRAM_NAT_D_IPV4_TX);
2752
2753         /* Initialization of pools local to TF Core */
2754         ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
2755         ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
2756 }
2757
2758 int
2759 tf_rm_allocate_validate(struct tf *tfp)
2760 {
2761         int rc;
2762         int i;
2763
2764         for (i = 0; i < TF_DIR_MAX; i++) {
2765                 rc = tf_rm_allocate_validate_hw(tfp, i);
2766                 if (rc)
2767                         return rc;
2768                 rc = tf_rm_allocate_validate_sram(tfp, i);
2769                 if (rc)
2770                         return rc;
2771         }
2772
2773         /* With both HW and SRAM allocated and validated we can
2774          * 'scrub' the reservation on the pools.
2775          */
2776         tf_rm_reserve_hw(tfp);
2777         tf_rm_reserve_sram(tfp);
2778
2779         return rc;
2780 }
2781
2782 int
2783 tf_rm_close(struct tf *tfp)
2784 {
2785         int rc;
2786         int rc_close = 0;
2787         int i;
2788         struct tf_rm_entry *hw_entries;
2789         struct tf_rm_entry *hw_flush_entries;
2790         struct tf_rm_entry *sram_entries;
2791         struct tf_rm_entry *sram_flush_entries;
2792         struct tf_session *tfs __rte_unused =
2793                 (struct tf_session *)(tfp->session->core_data);
2794
2795         struct tf_rm_db flush_resc = tfs->resc;
2796
2797         /* On close it is assumed that the session has already cleaned
2798          * up all its resources, individually, while destroying its
2799          * flows. No checking is performed thus the behavior is as
2800          * follows.
2801          *
2802          * Session RM will signal FW to release session resources. FW
2803          * will perform invalidation of all the allocated entries
2804          * (assures any outstanding resources has been cleared, then
2805          * free the FW RM instance.
2806          *
2807          * Session will then be freed by tf_close_session() thus there
2808          * is no need to clean each resource pool as the whole session
2809          * is going away.
2810          */
2811
2812         for (i = 0; i < TF_DIR_MAX; i++) {
2813                 if (i == TF_DIR_RX) {
2814                         hw_entries = tfs->resc.rx.hw_entry;
2815                         hw_flush_entries = flush_resc.rx.hw_entry;
2816                         sram_entries = tfs->resc.rx.sram_entry;
2817                         sram_flush_entries = flush_resc.rx.sram_entry;
2818                 } else {
2819                         hw_entries = tfs->resc.tx.hw_entry;
2820                         hw_flush_entries = flush_resc.tx.hw_entry;
2821                         sram_entries = tfs->resc.tx.sram_entry;
2822                         sram_flush_entries = flush_resc.tx.sram_entry;
2823                 }
2824
2825                 /* Check for any not previously freed HW resources and
2826                  * flush if required.
2827                  */
2828                 rc = tf_rm_hw_to_flush(tfs, i, hw_entries, hw_flush_entries);
2829                 if (rc) {
2830                         rc_close = -ENOTEMPTY;
2831                         /* Log error */
2832                         PMD_DRV_LOG(ERR,
2833                                     "%s, lingering HW resources\n",
2834                                     tf_dir_2_str(i));
2835
2836                         /* Log the entries to be flushed */
2837                         tf_rm_log_hw_flush(i, hw_flush_entries);
2838                         rc = tf_msg_session_hw_resc_flush(tfp,
2839                                                           i,
2840                                                           hw_flush_entries);
2841                         if (rc) {
2842                                 rc_close = rc;
2843                                 /* Log error */
2844                                 PMD_DRV_LOG(ERR,
2845                                             "%s, HW flush failed\n",
2846                                             tf_dir_2_str(i));
2847                         }
2848                 }
2849
2850                 /* Check for any not previously freed SRAM resources
2851                  * and flush if required.
2852                  */
2853                 rc = tf_rm_sram_to_flush(tfs,
2854                                          i,
2855                                          sram_entries,
2856                                          sram_flush_entries);
2857                 if (rc) {
2858                         rc_close = -ENOTEMPTY;
2859                         /* Log error */
2860                         PMD_DRV_LOG(ERR,
2861                                     "%s, lingering SRAM resources\n",
2862                                     tf_dir_2_str(i));
2863
2864                         /* Log the entries to be flushed */
2865                         tf_rm_log_sram_flush(i, sram_flush_entries);
2866
2867                         rc = tf_msg_session_sram_resc_flush(tfp,
2868                                                             i,
2869                                                             sram_flush_entries);
2870                         if (rc) {
2871                                 rc_close = rc;
2872                                 /* Log error */
2873                                 PMD_DRV_LOG(ERR,
2874                                             "%s, HW flush failed\n",
2875                                             tf_dir_2_str(i));
2876                         }
2877                 }
2878
2879                 rc = tf_msg_session_hw_resc_free(tfp, i, hw_entries);
2880                 if (rc) {
2881                         rc_close = rc;
2882                         /* Log error */
2883                         PMD_DRV_LOG(ERR,
2884                                     "%s, HW free failed\n",
2885                                     tf_dir_2_str(i));
2886                 }
2887
2888                 rc = tf_msg_session_sram_resc_free(tfp, i, sram_entries);
2889                 if (rc) {
2890                         rc_close = rc;
2891                         /* Log error */
2892                         PMD_DRV_LOG(ERR,
2893                                     "%s, SRAM free failed\n",
2894                                     tf_dir_2_str(i));
2895                 }
2896         }
2897
2898         return rc_close;
2899 }
2900
2901 #if (TF_SHADOW == 1)
2902 int
2903 tf_rm_shadow_db_init(struct tf_session *tfs)
2904 {
2905         rc = 1;
2906
2907         return rc;
2908 }
2909 #endif /* TF_SHADOW */
2910
2911 int
2912 tf_rm_lookup_tcam_type_pool(struct tf_session *tfs,
2913                             enum tf_dir dir,
2914                             enum tf_tcam_tbl_type type,
2915                             struct bitalloc **pool)
2916 {
2917         int rc = -EOPNOTSUPP;
2918
2919         *pool = NULL;
2920
2921         switch (type) {
2922         case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
2923                 TF_RM_GET_POOLS(tfs, dir, pool,
2924                                 TF_L2_CTXT_TCAM_POOL_NAME,
2925                                 rc);
2926                 break;
2927         case TF_TCAM_TBL_TYPE_PROF_TCAM:
2928                 TF_RM_GET_POOLS(tfs, dir, pool,
2929                                 TF_PROF_TCAM_POOL_NAME,
2930                                 rc);
2931                 break;
2932         case TF_TCAM_TBL_TYPE_WC_TCAM:
2933                 TF_RM_GET_POOLS(tfs, dir, pool,
2934                                 TF_WC_TCAM_POOL_NAME,
2935                                 rc);
2936                 break;
2937         case TF_TCAM_TBL_TYPE_VEB_TCAM:
2938         case TF_TCAM_TBL_TYPE_SP_TCAM:
2939         case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
2940         default:
2941                 break;
2942         }
2943
2944         if (rc == -EOPNOTSUPP) {
2945                 PMD_DRV_LOG(ERR,
2946                             "dir:%d, Tcam type not supported, type:%d\n",
2947                             dir,
2948                             type);
2949                 return rc;
2950         } else if (rc == -1) {
2951                 PMD_DRV_LOG(ERR,
2952                             "%s:, Tcam type lookup failed, type:%d\n",
2953                             tf_dir_2_str(dir),
2954                             type);
2955                 return rc;
2956         }
2957
2958         return 0;
2959 }
2960
2961 int
2962 tf_rm_lookup_tbl_type_pool(struct tf_session *tfs,
2963                            enum tf_dir dir,
2964                            enum tf_tbl_type type,
2965                            struct bitalloc **pool)
2966 {
2967         int rc = -EOPNOTSUPP;
2968
2969         *pool = NULL;
2970
2971         switch (type) {
2972         case TF_TBL_TYPE_FULL_ACT_RECORD:
2973                 TF_RM_GET_POOLS(tfs, dir, pool,
2974                                 TF_SRAM_FULL_ACTION_POOL_NAME,
2975                                 rc);
2976                 break;
2977         case TF_TBL_TYPE_MCAST_GROUPS:
2978                 /* No pools for TX direction, so bail out */
2979                 if (dir == TF_DIR_TX)
2980                         break;
2981                 TF_RM_GET_POOLS_RX(tfs, pool,
2982                                    TF_SRAM_MCG_POOL_NAME);
2983                 rc = 0;
2984                 break;
2985         case TF_TBL_TYPE_ACT_ENCAP_8B:
2986                 TF_RM_GET_POOLS(tfs, dir, pool,
2987                                 TF_SRAM_ENCAP_8B_POOL_NAME,
2988                                 rc);
2989                 break;
2990         case TF_TBL_TYPE_ACT_ENCAP_16B:
2991                 TF_RM_GET_POOLS(tfs, dir, pool,
2992                                 TF_SRAM_ENCAP_16B_POOL_NAME,
2993                                 rc);
2994                 break;
2995         case TF_TBL_TYPE_ACT_ENCAP_64B:
2996                 /* No pools for RX direction, so bail out */
2997                 if (dir == TF_DIR_RX)
2998                         break;
2999                 TF_RM_GET_POOLS_TX(tfs, pool,
3000                                    TF_SRAM_ENCAP_64B_POOL_NAME);
3001                 rc = 0;
3002                 break;
3003         case TF_TBL_TYPE_ACT_SP_SMAC:
3004                 TF_RM_GET_POOLS(tfs, dir, pool,
3005                                 TF_SRAM_SP_SMAC_POOL_NAME,
3006                                 rc);
3007                 break;
3008         case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
3009                 /* No pools for TX direction, so bail out */
3010                 if (dir == TF_DIR_RX)
3011                         break;
3012                 TF_RM_GET_POOLS_TX(tfs, pool,
3013                                    TF_SRAM_SP_SMAC_IPV4_POOL_NAME);
3014                 rc = 0;
3015                 break;
3016         case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
3017                 /* No pools for TX direction, so bail out */
3018                 if (dir == TF_DIR_RX)
3019                         break;
3020                 TF_RM_GET_POOLS_TX(tfs, pool,
3021                                    TF_SRAM_SP_SMAC_IPV6_POOL_NAME);
3022                 rc = 0;
3023                 break;
3024         case TF_TBL_TYPE_ACT_STATS_64:
3025                 TF_RM_GET_POOLS(tfs, dir, pool,
3026                                 TF_SRAM_STATS_64B_POOL_NAME,
3027                                 rc);
3028                 break;
3029         case TF_TBL_TYPE_ACT_MODIFY_SPORT:
3030                 TF_RM_GET_POOLS(tfs, dir, pool,
3031                                 TF_SRAM_NAT_SPORT_POOL_NAME,
3032                                 rc);
3033                 break;
3034         case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
3035                 TF_RM_GET_POOLS(tfs, dir, pool,
3036                                 TF_SRAM_NAT_S_IPV4_POOL_NAME,
3037                                 rc);
3038                 break;
3039         case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
3040                 TF_RM_GET_POOLS(tfs, dir, pool,
3041                                 TF_SRAM_NAT_D_IPV4_POOL_NAME,
3042                                 rc);
3043                 break;
3044         case TF_TBL_TYPE_METER_PROF:
3045                 TF_RM_GET_POOLS(tfs, dir, pool,
3046                                 TF_METER_PROF_POOL_NAME,
3047                                 rc);
3048                 break;
3049         case TF_TBL_TYPE_METER_INST:
3050                 TF_RM_GET_POOLS(tfs, dir, pool,
3051                                 TF_METER_INST_POOL_NAME,
3052                                 rc);
3053                 break;
3054         case TF_TBL_TYPE_MIRROR_CONFIG:
3055                 TF_RM_GET_POOLS(tfs, dir, pool,
3056                                 TF_MIRROR_POOL_NAME,
3057                                 rc);
3058                 break;
3059         case TF_TBL_TYPE_UPAR:
3060                 TF_RM_GET_POOLS(tfs, dir, pool,
3061                                 TF_UPAR_POOL_NAME,
3062                                 rc);
3063                 break;
3064         case TF_TBL_TYPE_EPOCH0:
3065                 TF_RM_GET_POOLS(tfs, dir, pool,
3066                                 TF_EPOCH0_POOL_NAME,
3067                                 rc);
3068                 break;
3069         case TF_TBL_TYPE_EPOCH1:
3070                 TF_RM_GET_POOLS(tfs, dir, pool,
3071                                 TF_EPOCH1_POOL_NAME,
3072                                 rc);
3073                 break;
3074         case TF_TBL_TYPE_METADATA:
3075                 TF_RM_GET_POOLS(tfs, dir, pool,
3076                                 TF_METADATA_POOL_NAME,
3077                                 rc);
3078                 break;
3079         case TF_TBL_TYPE_CT_STATE:
3080                 TF_RM_GET_POOLS(tfs, dir, pool,
3081                                 TF_CT_STATE_POOL_NAME,
3082                                 rc);
3083                 break;
3084         case TF_TBL_TYPE_RANGE_PROF:
3085                 TF_RM_GET_POOLS(tfs, dir, pool,
3086                                 TF_RANGE_PROF_POOL_NAME,
3087                                 rc);
3088                 break;
3089         case TF_TBL_TYPE_RANGE_ENTRY:
3090                 TF_RM_GET_POOLS(tfs, dir, pool,
3091                                 TF_RANGE_ENTRY_POOL_NAME,
3092                                 rc);
3093                 break;
3094         case TF_TBL_TYPE_LAG:
3095                 TF_RM_GET_POOLS(tfs, dir, pool,
3096                                 TF_LAG_ENTRY_POOL_NAME,
3097                                 rc);
3098                 break;
3099         /* Not yet supported */
3100         case TF_TBL_TYPE_ACT_ENCAP_32B:
3101         case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST:
3102         case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC:
3103         case TF_TBL_TYPE_VNIC_SVIF:
3104                 break;
3105         /* No bitalloc pools for these types */
3106         case TF_TBL_TYPE_EXT:
3107         case TF_TBL_TYPE_EXT_0:
3108         default:
3109                 break;
3110         }
3111
3112         if (rc == -EOPNOTSUPP) {
3113                 PMD_DRV_LOG(ERR,
3114                             "dir:%d, Table type not supported, type:%d\n",
3115                             dir,
3116                             type);
3117                 return rc;
3118         } else if (rc == -1) {
3119                 PMD_DRV_LOG(ERR,
3120                             "dir:%d, Table type lookup failed, type:%d\n",
3121                             dir,
3122                             type);
3123                 return rc;
3124         }
3125
3126         return 0;
3127 }
3128
3129 int
3130 tf_rm_convert_tbl_type(enum tf_tbl_type type,
3131                        uint32_t *hcapi_type)
3132 {
3133         int rc = 0;
3134
3135         switch (type) {
3136         case TF_TBL_TYPE_FULL_ACT_RECORD:
3137                 *hcapi_type = TF_RESC_TYPE_SRAM_FULL_ACTION;
3138                 break;
3139         case TF_TBL_TYPE_MCAST_GROUPS:
3140                 *hcapi_type = TF_RESC_TYPE_SRAM_MCG;
3141                 break;
3142         case TF_TBL_TYPE_ACT_ENCAP_8B:
3143                 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_8B;
3144                 break;
3145         case TF_TBL_TYPE_ACT_ENCAP_16B:
3146                 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_16B;
3147                 break;
3148         case TF_TBL_TYPE_ACT_ENCAP_64B:
3149                 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_64B;
3150                 break;
3151         case TF_TBL_TYPE_ACT_SP_SMAC:
3152                 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC;
3153                 break;
3154         case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
3155                 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4;
3156                 break;
3157         case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
3158                 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6;
3159                 break;
3160         case TF_TBL_TYPE_ACT_STATS_64:
3161                 *hcapi_type = TF_RESC_TYPE_SRAM_COUNTER_64B;
3162                 break;
3163         case TF_TBL_TYPE_ACT_MODIFY_SPORT:
3164                 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_SPORT;
3165                 break;
3166         case TF_TBL_TYPE_ACT_MODIFY_DPORT:
3167                 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_DPORT;
3168                 break;
3169         case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
3170                 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_S_IPV4;
3171                 break;
3172         case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
3173                 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_D_IPV4;
3174                 break;
3175         case TF_TBL_TYPE_METER_PROF:
3176                 *hcapi_type = TF_RESC_TYPE_HW_METER_PROF;
3177                 break;
3178         case TF_TBL_TYPE_METER_INST:
3179                 *hcapi_type = TF_RESC_TYPE_HW_METER_INST;
3180                 break;
3181         case TF_TBL_TYPE_MIRROR_CONFIG:
3182                 *hcapi_type = TF_RESC_TYPE_HW_MIRROR;
3183                 break;
3184         case TF_TBL_TYPE_UPAR:
3185                 *hcapi_type = TF_RESC_TYPE_HW_UPAR;
3186                 break;
3187         case TF_TBL_TYPE_EPOCH0:
3188                 *hcapi_type = TF_RESC_TYPE_HW_EPOCH0;
3189                 break;
3190         case TF_TBL_TYPE_EPOCH1:
3191                 *hcapi_type = TF_RESC_TYPE_HW_EPOCH1;
3192                 break;
3193         case TF_TBL_TYPE_METADATA:
3194                 *hcapi_type = TF_RESC_TYPE_HW_METADATA;
3195                 break;
3196         case TF_TBL_TYPE_CT_STATE:
3197                 *hcapi_type = TF_RESC_TYPE_HW_CT_STATE;
3198                 break;
3199         case TF_TBL_TYPE_RANGE_PROF:
3200                 *hcapi_type = TF_RESC_TYPE_HW_RANGE_PROF;
3201                 break;
3202         case TF_TBL_TYPE_RANGE_ENTRY:
3203                 *hcapi_type = TF_RESC_TYPE_HW_RANGE_ENTRY;
3204                 break;
3205         case TF_TBL_TYPE_LAG:
3206                 *hcapi_type = TF_RESC_TYPE_HW_LAG_ENTRY;
3207                 break;
3208         /* Not yet supported */
3209         case TF_TBL_TYPE_ACT_ENCAP_32B:
3210         case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST:
3211         case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC:
3212         case TF_TBL_TYPE_VNIC_SVIF:
3213         case TF_TBL_TYPE_EXT:   /* No pools for this type */
3214         case TF_TBL_TYPE_EXT_0: /* No pools for this type */
3215         default:
3216                 *hcapi_type = -1;
3217                 rc = -EOPNOTSUPP;
3218         }
3219
3220         return rc;
3221 }
3222
3223 int
3224 tf_rm_convert_index(struct tf_session *tfs,
3225                     enum tf_dir dir,
3226                     enum tf_tbl_type type,
3227                     enum tf_rm_convert_type c_type,
3228                     uint32_t index,
3229                     uint32_t *convert_index)
3230 {
3231         int rc;
3232         struct tf_rm_resc *resc;
3233         uint32_t hcapi_type;
3234         uint32_t base_index;
3235
3236         if (dir == TF_DIR_RX)
3237                 resc = &tfs->resc.rx;
3238         else if (dir == TF_DIR_TX)
3239                 resc = &tfs->resc.tx;
3240         else
3241                 return -EOPNOTSUPP;
3242
3243         rc = tf_rm_convert_tbl_type(type, &hcapi_type);
3244         if (rc)
3245                 return -1;
3246
3247         switch (type) {
3248         case TF_TBL_TYPE_FULL_ACT_RECORD:
3249         case TF_TBL_TYPE_MCAST_GROUPS:
3250         case TF_TBL_TYPE_ACT_ENCAP_8B:
3251         case TF_TBL_TYPE_ACT_ENCAP_16B:
3252         case TF_TBL_TYPE_ACT_ENCAP_32B:
3253         case TF_TBL_TYPE_ACT_ENCAP_64B:
3254         case TF_TBL_TYPE_ACT_SP_SMAC:
3255         case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
3256         case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
3257         case TF_TBL_TYPE_ACT_STATS_64:
3258         case TF_TBL_TYPE_ACT_MODIFY_SPORT:
3259         case TF_TBL_TYPE_ACT_MODIFY_DPORT:
3260         case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
3261         case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
3262                 base_index = resc->sram_entry[hcapi_type].start;
3263                 break;
3264         case TF_TBL_TYPE_MIRROR_CONFIG:
3265         case TF_TBL_TYPE_METER_PROF:
3266         case TF_TBL_TYPE_METER_INST:
3267         case TF_TBL_TYPE_UPAR:
3268         case TF_TBL_TYPE_EPOCH0:
3269         case TF_TBL_TYPE_EPOCH1:
3270         case TF_TBL_TYPE_METADATA:
3271         case TF_TBL_TYPE_CT_STATE:
3272         case TF_TBL_TYPE_RANGE_PROF:
3273         case TF_TBL_TYPE_RANGE_ENTRY:
3274         case TF_TBL_TYPE_LAG:
3275                 base_index = resc->hw_entry[hcapi_type].start;
3276                 break;
3277         /* Not yet supported */
3278         case TF_TBL_TYPE_VNIC_SVIF:
3279         case TF_TBL_TYPE_EXT:   /* No pools for this type */
3280         case TF_TBL_TYPE_EXT_0: /* No pools for this type */
3281         default:
3282                 return -EOPNOTSUPP;
3283         }
3284
3285         switch (c_type) {
3286         case TF_RM_CONVERT_RM_BASE:
3287                 *convert_index = index - base_index;
3288                 break;
3289         case TF_RM_CONVERT_ADD_BASE:
3290                 *convert_index = index + base_index;
3291                 break;
3292         default:
3293                 return -EOPNOTSUPP;
3294         }
3295
3296         return 0;
3297 }