2264704d2b29a6b12da80cdf976011768aa524d0
[dpdk.git] / drivers / net / bnxt / tf_core / tf_rm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include <string.h>
7
8 #include <rte_common.h>
9
10 #include "tf_rm.h"
11 #include "tf_core.h"
12 #include "tf_util.h"
13 #include "tf_session.h"
14 #include "tf_resources.h"
15 #include "tf_msg.h"
16 #include "bnxt.h"
17
18 /**
19  * Internal macro to perform HW resource allocation check between what
20  * firmware reports vs what was statically requested.
21  *
22  * Parameters:
23  *   struct tf_rm_hw_query    *hquery      - Pointer to the hw query result
24  *   enum tf_dir               dir         - Direction to process
25  *   enum tf_resource_type_hw  hcapi_type  - HCAPI type, the index element
26  *                                           in the hw query structure
27  *   define                    def_value   - Define value to check against
28  *   uint32_t                 *eflag       - Result of the check
29  */
30 #define TF_RM_CHECK_HW_ALLOC(hquery, dir, hcapi_type, def_value, eflag) do {  \
31         if ((dir) == TF_DIR_RX) {                                             \
32                 if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _RX) \
33                         *(eflag) |= 1 << (hcapi_type);                        \
34         } else {                                                              \
35                 if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _TX) \
36                         *(eflag) |= 1 << (hcapi_type);                        \
37         }                                                                     \
38 } while (0)
39
40 /**
41  * Internal macro to perform HW resource allocation check between what
42  * firmware reports vs what was statically requested.
43  *
44  * Parameters:
45  *   struct tf_rm_sram_query   *squery      - Pointer to the sram query result
46  *   enum tf_dir                dir         - Direction to process
47  *   enum tf_resource_type_sram hcapi_type  - HCAPI type, the index element
48  *                                            in the hw query structure
49  *   define                     def_value   - Define value to check against
50  *   uint32_t                  *eflag       - Result of the check
51  */
52 #define TF_RM_CHECK_SRAM_ALLOC(squery, dir, hcapi_type, def_value, eflag) do { \
53         if ((dir) == TF_DIR_RX) {                                              \
54                 if ((squery)->sram_query[(hcapi_type)].max != def_value ## _RX)\
55                         *(eflag) |= 1 << (hcapi_type);                         \
56         } else {                                                               \
57                 if ((squery)->sram_query[(hcapi_type)].max != def_value ## _TX)\
58                         *(eflag) |= 1 << (hcapi_type);                         \
59         }                                                                      \
60 } while (0)
61
62 /**
63  * Internal macro to convert a reserved resource define name to be
64  * direction specific.
65  *
66  * Parameters:
67  *   enum tf_dir    dir         - Direction to process
68  *   string         type        - Type name to append RX or TX to
69  *   string         dtype       - Direction specific type
70  *
71  *
72  */
73 #define TF_RESC_RSVD(dir, type, dtype) do {     \
74                 if ((dir) == TF_DIR_RX)         \
75                         (dtype) = type ## _RX;  \
76                 else                            \
77                         (dtype) = type ## _TX;  \
78         } while (0)
79
80 const char
81 *tf_hcapi_hw_2_str(enum tf_resource_type_hw hw_type)
82 {
83         switch (hw_type) {
84         case TF_RESC_TYPE_HW_L2_CTXT_TCAM:
85                 return "L2 ctxt tcam";
86         case TF_RESC_TYPE_HW_PROF_FUNC:
87                 return "Profile Func";
88         case TF_RESC_TYPE_HW_PROF_TCAM:
89                 return "Profile tcam";
90         case TF_RESC_TYPE_HW_EM_PROF_ID:
91                 return "EM profile id";
92         case TF_RESC_TYPE_HW_EM_REC:
93                 return "EM record";
94         case TF_RESC_TYPE_HW_WC_TCAM_PROF_ID:
95                 return "WC tcam profile id";
96         case TF_RESC_TYPE_HW_WC_TCAM:
97                 return "WC tcam";
98         case TF_RESC_TYPE_HW_METER_PROF:
99                 return "Meter profile";
100         case TF_RESC_TYPE_HW_METER_INST:
101                 return "Meter instance";
102         case TF_RESC_TYPE_HW_MIRROR:
103                 return "Mirror";
104         case TF_RESC_TYPE_HW_UPAR:
105                 return "UPAR";
106         case TF_RESC_TYPE_HW_SP_TCAM:
107                 return "Source properties tcam";
108         case TF_RESC_TYPE_HW_L2_FUNC:
109                 return "L2 Function";
110         case TF_RESC_TYPE_HW_FKB:
111                 return "FKB";
112         case TF_RESC_TYPE_HW_TBL_SCOPE:
113                 return "Table scope";
114         case TF_RESC_TYPE_HW_EPOCH0:
115                 return "EPOCH0";
116         case TF_RESC_TYPE_HW_EPOCH1:
117                 return "EPOCH1";
118         case TF_RESC_TYPE_HW_METADATA:
119                 return "Metadata";
120         case TF_RESC_TYPE_HW_CT_STATE:
121                 return "Connection tracking state";
122         case TF_RESC_TYPE_HW_RANGE_PROF:
123                 return "Range profile";
124         case TF_RESC_TYPE_HW_RANGE_ENTRY:
125                 return "Range entry";
126         case TF_RESC_TYPE_HW_LAG_ENTRY:
127                 return "LAG";
128         default:
129                 return "Invalid identifier";
130         }
131 }
132
133 const char
134 *tf_hcapi_sram_2_str(enum tf_resource_type_sram sram_type)
135 {
136         switch (sram_type) {
137         case TF_RESC_TYPE_SRAM_FULL_ACTION:
138                 return "Full action";
139         case TF_RESC_TYPE_SRAM_MCG:
140                 return "MCG";
141         case TF_RESC_TYPE_SRAM_ENCAP_8B:
142                 return "Encap 8B";
143         case TF_RESC_TYPE_SRAM_ENCAP_16B:
144                 return "Encap 16B";
145         case TF_RESC_TYPE_SRAM_ENCAP_64B:
146                 return "Encap 64B";
147         case TF_RESC_TYPE_SRAM_SP_SMAC:
148                 return "Source properties SMAC";
149         case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4:
150                 return "Source properties SMAC IPv4";
151         case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6:
152                 return "Source properties IPv6";
153         case TF_RESC_TYPE_SRAM_COUNTER_64B:
154                 return "Counter 64B";
155         case TF_RESC_TYPE_SRAM_NAT_SPORT:
156                 return "NAT source port";
157         case TF_RESC_TYPE_SRAM_NAT_DPORT:
158                 return "NAT destination port";
159         case TF_RESC_TYPE_SRAM_NAT_S_IPV4:
160                 return "NAT source IPv4";
161         case TF_RESC_TYPE_SRAM_NAT_D_IPV4:
162                 return "NAT destination IPv4";
163         default:
164                 return "Invalid identifier";
165         }
166 }
167
168 /**
169  * Helper function to perform a HW HCAPI resource type lookup against
170  * the reserved value of the same static type.
171  *
172  * Returns:
173  *   -EOPNOTSUPP - Reserved resource type not supported
174  *   Value       - Integer value of the reserved value for the requested type
175  */
176 static int
177 tf_rm_rsvd_hw_value(enum tf_dir dir, enum tf_resource_type_hw index)
178 {
179         uint32_t value = -EOPNOTSUPP;
180
181         switch (index) {
182         case TF_RESC_TYPE_HW_L2_CTXT_TCAM:
183                 TF_RESC_RSVD(dir, TF_RSVD_L2_CTXT_TCAM, value);
184                 break;
185         case TF_RESC_TYPE_HW_PROF_FUNC:
186                 TF_RESC_RSVD(dir, TF_RSVD_PROF_FUNC, value);
187                 break;
188         case TF_RESC_TYPE_HW_PROF_TCAM:
189                 TF_RESC_RSVD(dir, TF_RSVD_PROF_TCAM, value);
190                 break;
191         case TF_RESC_TYPE_HW_EM_PROF_ID:
192                 TF_RESC_RSVD(dir, TF_RSVD_EM_PROF_ID, value);
193                 break;
194         case TF_RESC_TYPE_HW_EM_REC:
195                 TF_RESC_RSVD(dir, TF_RSVD_EM_REC, value);
196                 break;
197         case TF_RESC_TYPE_HW_WC_TCAM_PROF_ID:
198                 TF_RESC_RSVD(dir, TF_RSVD_WC_TCAM_PROF_ID, value);
199                 break;
200         case TF_RESC_TYPE_HW_WC_TCAM:
201                 TF_RESC_RSVD(dir, TF_RSVD_WC_TCAM, value);
202                 break;
203         case TF_RESC_TYPE_HW_METER_PROF:
204                 TF_RESC_RSVD(dir, TF_RSVD_METER_PROF, value);
205                 break;
206         case TF_RESC_TYPE_HW_METER_INST:
207                 TF_RESC_RSVD(dir, TF_RSVD_METER_INST, value);
208                 break;
209         case TF_RESC_TYPE_HW_MIRROR:
210                 TF_RESC_RSVD(dir, TF_RSVD_MIRROR, value);
211                 break;
212         case TF_RESC_TYPE_HW_UPAR:
213                 TF_RESC_RSVD(dir, TF_RSVD_UPAR, value);
214                 break;
215         case TF_RESC_TYPE_HW_SP_TCAM:
216                 TF_RESC_RSVD(dir, TF_RSVD_SP_TCAM, value);
217                 break;
218         case TF_RESC_TYPE_HW_L2_FUNC:
219                 TF_RESC_RSVD(dir, TF_RSVD_L2_FUNC, value);
220                 break;
221         case TF_RESC_TYPE_HW_FKB:
222                 TF_RESC_RSVD(dir, TF_RSVD_FKB, value);
223                 break;
224         case TF_RESC_TYPE_HW_TBL_SCOPE:
225                 TF_RESC_RSVD(dir, TF_RSVD_TBL_SCOPE, value);
226                 break;
227         case TF_RESC_TYPE_HW_EPOCH0:
228                 TF_RESC_RSVD(dir, TF_RSVD_EPOCH0, value);
229                 break;
230         case TF_RESC_TYPE_HW_EPOCH1:
231                 TF_RESC_RSVD(dir, TF_RSVD_EPOCH1, value);
232                 break;
233         case TF_RESC_TYPE_HW_METADATA:
234                 TF_RESC_RSVD(dir, TF_RSVD_METADATA, value);
235                 break;
236         case TF_RESC_TYPE_HW_CT_STATE:
237                 TF_RESC_RSVD(dir, TF_RSVD_CT_STATE, value);
238                 break;
239         case TF_RESC_TYPE_HW_RANGE_PROF:
240                 TF_RESC_RSVD(dir, TF_RSVD_RANGE_PROF, value);
241                 break;
242         case TF_RESC_TYPE_HW_RANGE_ENTRY:
243                 TF_RESC_RSVD(dir, TF_RSVD_RANGE_ENTRY, value);
244                 break;
245         case TF_RESC_TYPE_HW_LAG_ENTRY:
246                 TF_RESC_RSVD(dir, TF_RSVD_LAG_ENTRY, value);
247                 break;
248         default:
249                 break;
250         }
251
252         return value;
253 }
254
255 /**
256  * Helper function to perform a SRAM HCAPI resource type lookup
257  * against the reserved value of the same static type.
258  *
259  * Returns:
260  *   -EOPNOTSUPP - Reserved resource type not supported
261  *   Value       - Integer value of the reserved value for the requested type
262  */
263 static int
264 tf_rm_rsvd_sram_value(enum tf_dir dir, enum tf_resource_type_sram index)
265 {
266         uint32_t value = -EOPNOTSUPP;
267
268         switch (index) {
269         case TF_RESC_TYPE_SRAM_FULL_ACTION:
270                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_FULL_ACTION, value);
271                 break;
272         case TF_RESC_TYPE_SRAM_MCG:
273                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_MCG, value);
274                 break;
275         case TF_RESC_TYPE_SRAM_ENCAP_8B:
276                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_8B, value);
277                 break;
278         case TF_RESC_TYPE_SRAM_ENCAP_16B:
279                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_16B, value);
280                 break;
281         case TF_RESC_TYPE_SRAM_ENCAP_64B:
282                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_64B, value);
283                 break;
284         case TF_RESC_TYPE_SRAM_SP_SMAC:
285                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC, value);
286                 break;
287         case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4:
288                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV4, value);
289                 break;
290         case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6:
291                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV6, value);
292                 break;
293         case TF_RESC_TYPE_SRAM_COUNTER_64B:
294                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_COUNTER_64B, value);
295                 break;
296         case TF_RESC_TYPE_SRAM_NAT_SPORT:
297                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_SPORT, value);
298                 break;
299         case TF_RESC_TYPE_SRAM_NAT_DPORT:
300                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_DPORT, value);
301                 break;
302         case TF_RESC_TYPE_SRAM_NAT_S_IPV4:
303                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_S_IPV4, value);
304                 break;
305         case TF_RESC_TYPE_SRAM_NAT_D_IPV4:
306                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_D_IPV4, value);
307                 break;
308         default:
309                 break;
310         }
311
312         return value;
313 }
314
315 /**
316  * Helper function to print all the HW resource qcaps errors reported
317  * in the error_flag.
318  *
319  * [in] dir
320  *   Receive or transmit direction
321  *
322  * [in] error_flag
323  *   Pointer to the hw error flags created at time of the query check
324  */
325 static void
326 tf_rm_print_hw_qcaps_error(enum tf_dir dir,
327                            struct tf_rm_hw_query *hw_query,
328                            uint32_t *error_flag)
329 {
330         int i;
331
332         PMD_DRV_LOG(ERR, "QCAPS errors HW\n");
333         PMD_DRV_LOG(ERR, "  Direction: %s\n", tf_dir_2_str(dir));
334         PMD_DRV_LOG(ERR, "  Elements:\n");
335
336         for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
337                 if (*error_flag & 1 << i)
338                         PMD_DRV_LOG(ERR, "    %s, %d elem available, req:%d\n",
339                                     tf_hcapi_hw_2_str(i),
340                                     hw_query->hw_query[i].max,
341                                     tf_rm_rsvd_hw_value(dir, i));
342         }
343 }
344
345 /**
346  * Helper function to print all the SRAM resource qcaps errors
347  * reported in the error_flag.
348  *
349  * [in] dir
350  *   Receive or transmit direction
351  *
352  * [in] error_flag
353  *   Pointer to the sram error flags created at time of the query check
354  */
355 static void
356 tf_rm_print_sram_qcaps_error(enum tf_dir dir,
357                              struct tf_rm_sram_query *sram_query,
358                              uint32_t *error_flag)
359 {
360         int i;
361
362         PMD_DRV_LOG(ERR, "QCAPS errors SRAM\n");
363         PMD_DRV_LOG(ERR, "  Direction: %s\n", tf_dir_2_str(dir));
364         PMD_DRV_LOG(ERR, "  Elements:\n");
365
366         for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
367                 if (*error_flag & 1 << i)
368                         PMD_DRV_LOG(ERR, "    %s, %d elem available, req:%d\n",
369                                     tf_hcapi_sram_2_str(i),
370                                     sram_query->sram_query[i].max,
371                                     tf_rm_rsvd_sram_value(dir, i));
372         }
373 }
374
375 /**
376  * Performs a HW resource check between what firmware capability
377  * reports and what the core expects is available.
378  *
379  * Firmware performs the resource carving at AFM init time and the
380  * resource capability is reported in the TruFlow qcaps msg.
381  *
382  * [in] query
383  *   Pointer to HW Query data structure. Query holds what the firmware
384  *   offers of the HW resources.
385  *
386  * [in] dir
387  *   Receive or transmit direction
388  *
389  * [in/out] error_flag
390  *   Pointer to a bit array indicating the error of a single HCAPI
391  *   resource type. When a bit is set to 1, the HCAPI resource type
392  *   failed static allocation.
393  *
394  * Returns:
395  *  0       - Success
396  *  -ENOMEM - Failure on one of the allocated resources. Check the
397  *            error_flag for what types are flagged errored.
398  */
399 static int
400 tf_rm_check_hw_qcaps_static(struct tf_rm_hw_query *query,
401                             enum tf_dir dir,
402                             uint32_t *error_flag)
403 {
404         *error_flag = 0;
405
406         TF_RM_CHECK_HW_ALLOC(query,
407                              dir,
408                              TF_RESC_TYPE_HW_L2_CTXT_TCAM,
409                              TF_RSVD_L2_CTXT_TCAM,
410                              error_flag);
411
412         TF_RM_CHECK_HW_ALLOC(query,
413                              dir,
414                              TF_RESC_TYPE_HW_PROF_FUNC,
415                              TF_RSVD_PROF_FUNC,
416                              error_flag);
417
418         TF_RM_CHECK_HW_ALLOC(query,
419                              dir,
420                              TF_RESC_TYPE_HW_PROF_TCAM,
421                              TF_RSVD_PROF_TCAM,
422                              error_flag);
423
424         TF_RM_CHECK_HW_ALLOC(query,
425                              dir,
426                              TF_RESC_TYPE_HW_EM_PROF_ID,
427                              TF_RSVD_EM_PROF_ID,
428                              error_flag);
429
430         TF_RM_CHECK_HW_ALLOC(query,
431                              dir,
432                              TF_RESC_TYPE_HW_EM_REC,
433                              TF_RSVD_EM_REC,
434                              error_flag);
435
436         TF_RM_CHECK_HW_ALLOC(query,
437                              dir,
438                              TF_RESC_TYPE_HW_WC_TCAM_PROF_ID,
439                              TF_RSVD_WC_TCAM_PROF_ID,
440                              error_flag);
441
442         TF_RM_CHECK_HW_ALLOC(query,
443                              dir,
444                              TF_RESC_TYPE_HW_WC_TCAM,
445                              TF_RSVD_WC_TCAM,
446                              error_flag);
447
448         TF_RM_CHECK_HW_ALLOC(query,
449                              dir,
450                              TF_RESC_TYPE_HW_METER_PROF,
451                              TF_RSVD_METER_PROF,
452                              error_flag);
453
454         TF_RM_CHECK_HW_ALLOC(query,
455                              dir,
456                              TF_RESC_TYPE_HW_METER_INST,
457                              TF_RSVD_METER_INST,
458                              error_flag);
459
460         TF_RM_CHECK_HW_ALLOC(query,
461                              dir,
462                              TF_RESC_TYPE_HW_MIRROR,
463                              TF_RSVD_MIRROR,
464                              error_flag);
465
466         TF_RM_CHECK_HW_ALLOC(query,
467                              dir,
468                              TF_RESC_TYPE_HW_UPAR,
469                              TF_RSVD_UPAR,
470                              error_flag);
471
472         TF_RM_CHECK_HW_ALLOC(query,
473                              dir,
474                              TF_RESC_TYPE_HW_SP_TCAM,
475                              TF_RSVD_SP_TCAM,
476                              error_flag);
477
478         TF_RM_CHECK_HW_ALLOC(query,
479                              dir,
480                              TF_RESC_TYPE_HW_L2_FUNC,
481                              TF_RSVD_L2_FUNC,
482                              error_flag);
483
484         TF_RM_CHECK_HW_ALLOC(query,
485                              dir,
486                              TF_RESC_TYPE_HW_FKB,
487                              TF_RSVD_FKB,
488                              error_flag);
489
490         TF_RM_CHECK_HW_ALLOC(query,
491                              dir,
492                              TF_RESC_TYPE_HW_TBL_SCOPE,
493                              TF_RSVD_TBL_SCOPE,
494                              error_flag);
495
496         TF_RM_CHECK_HW_ALLOC(query,
497                              dir,
498                              TF_RESC_TYPE_HW_EPOCH0,
499                              TF_RSVD_EPOCH0,
500                              error_flag);
501
502         TF_RM_CHECK_HW_ALLOC(query,
503                              dir,
504                              TF_RESC_TYPE_HW_EPOCH1,
505                              TF_RSVD_EPOCH1,
506                              error_flag);
507
508         TF_RM_CHECK_HW_ALLOC(query,
509                              dir,
510                              TF_RESC_TYPE_HW_METADATA,
511                              TF_RSVD_METADATA,
512                              error_flag);
513
514         TF_RM_CHECK_HW_ALLOC(query,
515                              dir,
516                              TF_RESC_TYPE_HW_CT_STATE,
517                              TF_RSVD_CT_STATE,
518                              error_flag);
519
520         TF_RM_CHECK_HW_ALLOC(query,
521                              dir,
522                              TF_RESC_TYPE_HW_RANGE_PROF,
523                              TF_RSVD_RANGE_PROF,
524                              error_flag);
525
526         TF_RM_CHECK_HW_ALLOC(query,
527                              dir,
528                              TF_RESC_TYPE_HW_RANGE_ENTRY,
529                              TF_RSVD_RANGE_ENTRY,
530                              error_flag);
531
532         TF_RM_CHECK_HW_ALLOC(query,
533                              dir,
534                              TF_RESC_TYPE_HW_LAG_ENTRY,
535                              TF_RSVD_LAG_ENTRY,
536                              error_flag);
537
538         if (*error_flag != 0)
539                 return -ENOMEM;
540
541         return 0;
542 }
543
544 /**
545  * Performs a SRAM resource check between what firmware capability
546  * reports and what the core expects is available.
547  *
548  * Firmware performs the resource carving at AFM init time and the
549  * resource capability is reported in the TruFlow qcaps msg.
550  *
551  * [in] query
552  *   Pointer to SRAM Query data structure. Query holds what the
553  *   firmware offers of the SRAM resources.
554  *
555  * [in] dir
556  *   Receive or transmit direction
557  *
558  * [in/out] error_flag
559  *   Pointer to a bit array indicating the error of a single HCAPI
560  *   resource type. When a bit is set to 1, the HCAPI resource type
561  *   failed static allocation.
562  *
563  * Returns:
564  *  0       - Success
565  *  -ENOMEM - Failure on one of the allocated resources. Check the
566  *            error_flag for what types are flagged errored.
567  */
568 static int
569 tf_rm_check_sram_qcaps_static(struct tf_rm_sram_query *query,
570                               enum tf_dir dir,
571                               uint32_t *error_flag)
572 {
573         *error_flag = 0;
574
575         TF_RM_CHECK_SRAM_ALLOC(query,
576                                dir,
577                                TF_RESC_TYPE_SRAM_FULL_ACTION,
578                                TF_RSVD_SRAM_FULL_ACTION,
579                                error_flag);
580
581         TF_RM_CHECK_SRAM_ALLOC(query,
582                                dir,
583                                TF_RESC_TYPE_SRAM_MCG,
584                                TF_RSVD_SRAM_MCG,
585                                error_flag);
586
587         TF_RM_CHECK_SRAM_ALLOC(query,
588                                dir,
589                                TF_RESC_TYPE_SRAM_ENCAP_8B,
590                                TF_RSVD_SRAM_ENCAP_8B,
591                                error_flag);
592
593         TF_RM_CHECK_SRAM_ALLOC(query,
594                                dir,
595                                TF_RESC_TYPE_SRAM_ENCAP_16B,
596                                TF_RSVD_SRAM_ENCAP_16B,
597                                error_flag);
598
599         TF_RM_CHECK_SRAM_ALLOC(query,
600                                dir,
601                                TF_RESC_TYPE_SRAM_ENCAP_64B,
602                                TF_RSVD_SRAM_ENCAP_64B,
603                                error_flag);
604
605         TF_RM_CHECK_SRAM_ALLOC(query,
606                                dir,
607                                TF_RESC_TYPE_SRAM_SP_SMAC,
608                                TF_RSVD_SRAM_SP_SMAC,
609                                error_flag);
610
611         TF_RM_CHECK_SRAM_ALLOC(query,
612                                dir,
613                                TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
614                                TF_RSVD_SRAM_SP_SMAC_IPV4,
615                                error_flag);
616
617         TF_RM_CHECK_SRAM_ALLOC(query,
618                                dir,
619                                TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
620                                TF_RSVD_SRAM_SP_SMAC_IPV6,
621                                error_flag);
622
623         TF_RM_CHECK_SRAM_ALLOC(query,
624                                dir,
625                                TF_RESC_TYPE_SRAM_COUNTER_64B,
626                                TF_RSVD_SRAM_COUNTER_64B,
627                                error_flag);
628
629         TF_RM_CHECK_SRAM_ALLOC(query,
630                                dir,
631                                TF_RESC_TYPE_SRAM_NAT_SPORT,
632                                TF_RSVD_SRAM_NAT_SPORT,
633                                error_flag);
634
635         TF_RM_CHECK_SRAM_ALLOC(query,
636                                dir,
637                                TF_RESC_TYPE_SRAM_NAT_DPORT,
638                                TF_RSVD_SRAM_NAT_DPORT,
639                                error_flag);
640
641         TF_RM_CHECK_SRAM_ALLOC(query,
642                                dir,
643                                TF_RESC_TYPE_SRAM_NAT_S_IPV4,
644                                TF_RSVD_SRAM_NAT_S_IPV4,
645                                error_flag);
646
647         TF_RM_CHECK_SRAM_ALLOC(query,
648                                dir,
649                                TF_RESC_TYPE_SRAM_NAT_D_IPV4,
650                                TF_RSVD_SRAM_NAT_D_IPV4,
651                                error_flag);
652
653         if (*error_flag != 0)
654                 return -ENOMEM;
655
656         return 0;
657 }
658
659 /**
660  * Internal function to mark pool entries used.
661  */
662 static void
663 tf_rm_reserve_range(uint32_t count,
664                     uint32_t rsv_begin,
665                     uint32_t rsv_end,
666                     uint32_t max,
667                     struct bitalloc *pool)
668 {
669         uint32_t i;
670
671         /* If no resources has been requested we mark everything
672          * 'used'
673          */
674         if (count == 0) {
675                 for (i = 0; i < max; i++)
676                         ba_alloc_index(pool, i);
677         } else {
678                 /* Support 2 main modes
679                  * Reserved range starts from bottom up (with
680                  * pre-reserved value or not)
681                  * - begin = 0 to end xx
682                  * - begin = 1 to end xx
683                  *
684                  * Reserved range starts from top down
685                  * - begin = yy to end max
686                  */
687
688                 /* Bottom up check, start from 0 */
689                 if (rsv_begin == 0) {
690                         for (i = rsv_end + 1; i < max; i++)
691                                 ba_alloc_index(pool, i);
692                 }
693
694                 /* Bottom up check, start from 1 or higher OR
695                  * Top Down
696                  */
697                 if (rsv_begin >= 1) {
698                         /* Allocate from 0 until start */
699                         for (i = 0; i < rsv_begin; i++)
700                                 ba_alloc_index(pool, i);
701
702                         /* Skip and then do the remaining */
703                         if (rsv_end < max - 1) {
704                                 for (i = rsv_end; i < max; i++)
705                                         ba_alloc_index(pool, i);
706                         }
707                 }
708         }
709 }
710
711 /**
712  * Internal function to mark all the l2 ctxt allocated that Truflow
713  * does not own.
714  */
715 static void
716 tf_rm_rsvd_l2_ctxt(struct tf_session *tfs)
717 {
718         uint32_t index = TF_RESC_TYPE_HW_L2_CTXT_TCAM;
719         uint32_t end = 0;
720
721         /* l2 ctxt rx direction */
722         if (tfs->resc.rx.hw_entry[index].stride > 0)
723                 end = tfs->resc.rx.hw_entry[index].start +
724                         tfs->resc.rx.hw_entry[index].stride - 1;
725
726         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
727                             tfs->resc.rx.hw_entry[index].start,
728                             end,
729                             TF_NUM_L2_CTXT_TCAM,
730                             tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX);
731
732         /* l2 ctxt tx direction */
733         if (tfs->resc.tx.hw_entry[index].stride > 0)
734                 end = tfs->resc.tx.hw_entry[index].start +
735                         tfs->resc.tx.hw_entry[index].stride - 1;
736
737         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
738                             tfs->resc.tx.hw_entry[index].start,
739                             end,
740                             TF_NUM_L2_CTXT_TCAM,
741                             tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX);
742 }
743
744 /**
745  * Internal function to mark all the profile tcam and profile func
746  * resources that Truflow does not own.
747  */
748 static void
749 tf_rm_rsvd_prof(struct tf_session *tfs)
750 {
751         uint32_t index = TF_RESC_TYPE_HW_PROF_FUNC;
752         uint32_t end = 0;
753
754         /* profile func rx direction */
755         if (tfs->resc.rx.hw_entry[index].stride > 0)
756                 end = tfs->resc.rx.hw_entry[index].start +
757                         tfs->resc.rx.hw_entry[index].stride - 1;
758
759         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
760                             tfs->resc.rx.hw_entry[index].start,
761                             end,
762                             TF_NUM_PROF_FUNC,
763                             tfs->TF_PROF_FUNC_POOL_NAME_RX);
764
765         /* profile func tx direction */
766         if (tfs->resc.tx.hw_entry[index].stride > 0)
767                 end = tfs->resc.tx.hw_entry[index].start +
768                         tfs->resc.tx.hw_entry[index].stride - 1;
769
770         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
771                             tfs->resc.tx.hw_entry[index].start,
772                             end,
773                             TF_NUM_PROF_FUNC,
774                             tfs->TF_PROF_FUNC_POOL_NAME_TX);
775
776         index = TF_RESC_TYPE_HW_PROF_TCAM;
777
778         /* profile tcam rx direction */
779         if (tfs->resc.rx.hw_entry[index].stride > 0)
780                 end = tfs->resc.rx.hw_entry[index].start +
781                         tfs->resc.rx.hw_entry[index].stride - 1;
782
783         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
784                             tfs->resc.rx.hw_entry[index].start,
785                             end,
786                             TF_NUM_PROF_TCAM,
787                             tfs->TF_PROF_TCAM_POOL_NAME_RX);
788
789         /* profile tcam tx direction */
790         if (tfs->resc.tx.hw_entry[index].stride > 0)
791                 end = tfs->resc.tx.hw_entry[index].start +
792                         tfs->resc.tx.hw_entry[index].stride - 1;
793
794         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
795                             tfs->resc.tx.hw_entry[index].start,
796                             end,
797                             TF_NUM_PROF_TCAM,
798                             tfs->TF_PROF_TCAM_POOL_NAME_TX);
799 }
800
801 /**
802  * Internal function to mark all the em profile id allocated that
803  * Truflow does not own.
804  */
805 static void
806 tf_rm_rsvd_em_prof(struct tf_session *tfs)
807 {
808         uint32_t index = TF_RESC_TYPE_HW_EM_PROF_ID;
809         uint32_t end = 0;
810
811         /* em prof id rx direction */
812         if (tfs->resc.rx.hw_entry[index].stride > 0)
813                 end = tfs->resc.rx.hw_entry[index].start +
814                         tfs->resc.rx.hw_entry[index].stride - 1;
815
816         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
817                             tfs->resc.rx.hw_entry[index].start,
818                             end,
819                             TF_NUM_EM_PROF_ID,
820                             tfs->TF_EM_PROF_ID_POOL_NAME_RX);
821
822         /* em prof id tx direction */
823         if (tfs->resc.tx.hw_entry[index].stride > 0)
824                 end = tfs->resc.tx.hw_entry[index].start +
825                         tfs->resc.tx.hw_entry[index].stride - 1;
826
827         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
828                             tfs->resc.tx.hw_entry[index].start,
829                             end,
830                             TF_NUM_EM_PROF_ID,
831                             tfs->TF_EM_PROF_ID_POOL_NAME_TX);
832 }
833
834 /**
835  * Internal function to mark all the wildcard tcam and profile id
836  * resources that Truflow does not own.
837  */
838 static void
839 tf_rm_rsvd_wc(struct tf_session *tfs)
840 {
841         uint32_t index = TF_RESC_TYPE_HW_WC_TCAM_PROF_ID;
842         uint32_t end = 0;
843
844         /* wc profile id rx direction */
845         if (tfs->resc.rx.hw_entry[index].stride > 0)
846                 end = tfs->resc.rx.hw_entry[index].start +
847                         tfs->resc.rx.hw_entry[index].stride - 1;
848
849         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
850                             tfs->resc.rx.hw_entry[index].start,
851                             end,
852                             TF_NUM_WC_PROF_ID,
853                             tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX);
854
855         /* wc profile id tx direction */
856         if (tfs->resc.tx.hw_entry[index].stride > 0)
857                 end = tfs->resc.tx.hw_entry[index].start +
858                         tfs->resc.tx.hw_entry[index].stride - 1;
859
860         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
861                             tfs->resc.tx.hw_entry[index].start,
862                             end,
863                             TF_NUM_WC_PROF_ID,
864                             tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX);
865
866         index = TF_RESC_TYPE_HW_WC_TCAM;
867
868         /* wc tcam rx direction */
869         if (tfs->resc.rx.hw_entry[index].stride > 0)
870                 end = tfs->resc.rx.hw_entry[index].start +
871                         tfs->resc.rx.hw_entry[index].stride - 1;
872
873         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
874                             tfs->resc.rx.hw_entry[index].start,
875                             end,
876                             TF_NUM_WC_TCAM_ROW,
877                             tfs->TF_WC_TCAM_POOL_NAME_RX);
878
879         /* wc tcam tx direction */
880         if (tfs->resc.tx.hw_entry[index].stride > 0)
881                 end = tfs->resc.tx.hw_entry[index].start +
882                         tfs->resc.tx.hw_entry[index].stride - 1;
883
884         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
885                             tfs->resc.tx.hw_entry[index].start,
886                             end,
887                             TF_NUM_WC_TCAM_ROW,
888                             tfs->TF_WC_TCAM_POOL_NAME_TX);
889 }
890
891 /**
892  * Internal function to mark all the meter resources allocated that
893  * Truflow does not own.
894  */
895 static void
896 tf_rm_rsvd_meter(struct tf_session *tfs)
897 {
898         uint32_t index = TF_RESC_TYPE_HW_METER_PROF;
899         uint32_t end = 0;
900
901         /* meter profiles rx direction */
902         if (tfs->resc.rx.hw_entry[index].stride > 0)
903                 end = tfs->resc.rx.hw_entry[index].start +
904                         tfs->resc.rx.hw_entry[index].stride - 1;
905
906         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
907                             tfs->resc.rx.hw_entry[index].start,
908                             end,
909                             TF_NUM_METER_PROF,
910                             tfs->TF_METER_PROF_POOL_NAME_RX);
911
912         /* meter profiles tx direction */
913         if (tfs->resc.tx.hw_entry[index].stride > 0)
914                 end = tfs->resc.tx.hw_entry[index].start +
915                         tfs->resc.tx.hw_entry[index].stride - 1;
916
917         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
918                             tfs->resc.tx.hw_entry[index].start,
919                             end,
920                             TF_NUM_METER_PROF,
921                             tfs->TF_METER_PROF_POOL_NAME_TX);
922
923         index = TF_RESC_TYPE_HW_METER_INST;
924
925         /* meter rx direction */
926         if (tfs->resc.rx.hw_entry[index].stride > 0)
927                 end = tfs->resc.rx.hw_entry[index].start +
928                         tfs->resc.rx.hw_entry[index].stride - 1;
929
930         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
931                             tfs->resc.rx.hw_entry[index].start,
932                             end,
933                             TF_NUM_METER,
934                             tfs->TF_METER_INST_POOL_NAME_RX);
935
936         /* meter tx direction */
937         if (tfs->resc.tx.hw_entry[index].stride > 0)
938                 end = tfs->resc.tx.hw_entry[index].start +
939                         tfs->resc.tx.hw_entry[index].stride - 1;
940
941         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
942                             tfs->resc.tx.hw_entry[index].start,
943                             end,
944                             TF_NUM_METER,
945                             tfs->TF_METER_INST_POOL_NAME_TX);
946 }
947
948 /**
949  * Internal function to mark all the mirror resources allocated that
950  * Truflow does not own.
951  */
952 static void
953 tf_rm_rsvd_mirror(struct tf_session *tfs)
954 {
955         uint32_t index = TF_RESC_TYPE_HW_MIRROR;
956         uint32_t end = 0;
957
958         /* mirror rx direction */
959         if (tfs->resc.rx.hw_entry[index].stride > 0)
960                 end = tfs->resc.rx.hw_entry[index].start +
961                         tfs->resc.rx.hw_entry[index].stride - 1;
962
963         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
964                             tfs->resc.rx.hw_entry[index].start,
965                             end,
966                             TF_NUM_MIRROR,
967                             tfs->TF_MIRROR_POOL_NAME_RX);
968
969         /* mirror tx direction */
970         if (tfs->resc.tx.hw_entry[index].stride > 0)
971                 end = tfs->resc.tx.hw_entry[index].start +
972                         tfs->resc.tx.hw_entry[index].stride - 1;
973
974         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
975                             tfs->resc.tx.hw_entry[index].start,
976                             end,
977                             TF_NUM_MIRROR,
978                             tfs->TF_MIRROR_POOL_NAME_TX);
979 }
980
981 /**
982  * Internal function to mark all the upar resources allocated that
983  * Truflow does not own.
984  */
985 static void
986 tf_rm_rsvd_upar(struct tf_session *tfs)
987 {
988         uint32_t index = TF_RESC_TYPE_HW_UPAR;
989         uint32_t end = 0;
990
991         /* upar rx direction */
992         if (tfs->resc.rx.hw_entry[index].stride > 0)
993                 end = tfs->resc.rx.hw_entry[index].start +
994                         tfs->resc.rx.hw_entry[index].stride - 1;
995
996         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
997                             tfs->resc.rx.hw_entry[index].start,
998                             end,
999                             TF_NUM_UPAR,
1000                             tfs->TF_UPAR_POOL_NAME_RX);
1001
1002         /* upar tx direction */
1003         if (tfs->resc.tx.hw_entry[index].stride > 0)
1004                 end = tfs->resc.tx.hw_entry[index].start +
1005                         tfs->resc.tx.hw_entry[index].stride - 1;
1006
1007         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1008                             tfs->resc.tx.hw_entry[index].start,
1009                             end,
1010                             TF_NUM_UPAR,
1011                             tfs->TF_UPAR_POOL_NAME_TX);
1012 }
1013
1014 /**
1015  * Internal function to mark all the sp tcam resources allocated that
1016  * Truflow does not own.
1017  */
1018 static void
1019 tf_rm_rsvd_sp_tcam(struct tf_session *tfs)
1020 {
1021         uint32_t index = TF_RESC_TYPE_HW_SP_TCAM;
1022         uint32_t end = 0;
1023
1024         /* sp tcam rx direction */
1025         if (tfs->resc.rx.hw_entry[index].stride > 0)
1026                 end = tfs->resc.rx.hw_entry[index].start +
1027                         tfs->resc.rx.hw_entry[index].stride - 1;
1028
1029         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1030                             tfs->resc.rx.hw_entry[index].start,
1031                             end,
1032                             TF_NUM_SP_TCAM,
1033                             tfs->TF_SP_TCAM_POOL_NAME_RX);
1034
1035         /* sp tcam tx direction */
1036         if (tfs->resc.tx.hw_entry[index].stride > 0)
1037                 end = tfs->resc.tx.hw_entry[index].start +
1038                         tfs->resc.tx.hw_entry[index].stride - 1;
1039
1040         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1041                             tfs->resc.tx.hw_entry[index].start,
1042                             end,
1043                             TF_NUM_SP_TCAM,
1044                             tfs->TF_SP_TCAM_POOL_NAME_TX);
1045 }
1046
1047 /**
1048  * Internal function to mark all the l2 func resources allocated that
1049  * Truflow does not own.
1050  */
1051 static void
1052 tf_rm_rsvd_l2_func(struct tf_session *tfs)
1053 {
1054         uint32_t index = TF_RESC_TYPE_HW_L2_FUNC;
1055         uint32_t end = 0;
1056
1057         /* l2 func rx direction */
1058         if (tfs->resc.rx.hw_entry[index].stride > 0)
1059                 end = tfs->resc.rx.hw_entry[index].start +
1060                         tfs->resc.rx.hw_entry[index].stride - 1;
1061
1062         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1063                             tfs->resc.rx.hw_entry[index].start,
1064                             end,
1065                             TF_NUM_L2_FUNC,
1066                             tfs->TF_L2_FUNC_POOL_NAME_RX);
1067
1068         /* l2 func tx direction */
1069         if (tfs->resc.tx.hw_entry[index].stride > 0)
1070                 end = tfs->resc.tx.hw_entry[index].start +
1071                         tfs->resc.tx.hw_entry[index].stride - 1;
1072
1073         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1074                             tfs->resc.tx.hw_entry[index].start,
1075                             end,
1076                             TF_NUM_L2_FUNC,
1077                             tfs->TF_L2_FUNC_POOL_NAME_TX);
1078 }
1079
1080 /**
1081  * Internal function to mark all the fkb resources allocated that
1082  * Truflow does not own.
1083  */
1084 static void
1085 tf_rm_rsvd_fkb(struct tf_session *tfs)
1086 {
1087         uint32_t index = TF_RESC_TYPE_HW_FKB;
1088         uint32_t end = 0;
1089
1090         /* fkb rx direction */
1091         if (tfs->resc.rx.hw_entry[index].stride > 0)
1092                 end = tfs->resc.rx.hw_entry[index].start +
1093                         tfs->resc.rx.hw_entry[index].stride - 1;
1094
1095         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1096                             tfs->resc.rx.hw_entry[index].start,
1097                             end,
1098                             TF_NUM_FKB,
1099                             tfs->TF_FKB_POOL_NAME_RX);
1100
1101         /* fkb tx direction */
1102         if (tfs->resc.tx.hw_entry[index].stride > 0)
1103                 end = tfs->resc.tx.hw_entry[index].start +
1104                         tfs->resc.tx.hw_entry[index].stride - 1;
1105
1106         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1107                             tfs->resc.tx.hw_entry[index].start,
1108                             end,
1109                             TF_NUM_FKB,
1110                             tfs->TF_FKB_POOL_NAME_TX);
1111 }
1112
1113 /**
1114  * Internal function to mark all the tbld scope resources allocated
1115  * that Truflow does not own.
1116  */
1117 static void
1118 tf_rm_rsvd_tbl_scope(struct tf_session *tfs)
1119 {
1120         uint32_t index = TF_RESC_TYPE_HW_TBL_SCOPE;
1121         uint32_t end = 0;
1122
1123         /* tbl scope rx direction */
1124         if (tfs->resc.rx.hw_entry[index].stride > 0)
1125                 end = tfs->resc.rx.hw_entry[index].start +
1126                         tfs->resc.rx.hw_entry[index].stride - 1;
1127
1128         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1129                             tfs->resc.rx.hw_entry[index].start,
1130                             end,
1131                             TF_NUM_TBL_SCOPE,
1132                             tfs->TF_TBL_SCOPE_POOL_NAME_RX);
1133
1134         /* tbl scope tx direction */
1135         if (tfs->resc.tx.hw_entry[index].stride > 0)
1136                 end = tfs->resc.tx.hw_entry[index].start +
1137                         tfs->resc.tx.hw_entry[index].stride - 1;
1138
1139         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1140                             tfs->resc.tx.hw_entry[index].start,
1141                             end,
1142                             TF_NUM_TBL_SCOPE,
1143                             tfs->TF_TBL_SCOPE_POOL_NAME_TX);
1144 }
1145
1146 /**
1147  * Internal function to mark all the l2 epoch resources allocated that
1148  * Truflow does not own.
1149  */
1150 static void
1151 tf_rm_rsvd_epoch(struct tf_session *tfs)
1152 {
1153         uint32_t index = TF_RESC_TYPE_HW_EPOCH0;
1154         uint32_t end = 0;
1155
1156         /* epoch0 rx direction */
1157         if (tfs->resc.rx.hw_entry[index].stride > 0)
1158                 end = tfs->resc.rx.hw_entry[index].start +
1159                         tfs->resc.rx.hw_entry[index].stride - 1;
1160
1161         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1162                             tfs->resc.rx.hw_entry[index].start,
1163                             end,
1164                             TF_NUM_EPOCH0,
1165                             tfs->TF_EPOCH0_POOL_NAME_RX);
1166
1167         /* epoch0 tx direction */
1168         if (tfs->resc.tx.hw_entry[index].stride > 0)
1169                 end = tfs->resc.tx.hw_entry[index].start +
1170                         tfs->resc.tx.hw_entry[index].stride - 1;
1171
1172         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1173                             tfs->resc.tx.hw_entry[index].start,
1174                             end,
1175                             TF_NUM_EPOCH0,
1176                             tfs->TF_EPOCH0_POOL_NAME_TX);
1177
1178         index = TF_RESC_TYPE_HW_EPOCH1;
1179
1180         /* epoch1 rx direction */
1181         if (tfs->resc.rx.hw_entry[index].stride > 0)
1182                 end = tfs->resc.rx.hw_entry[index].start +
1183                         tfs->resc.rx.hw_entry[index].stride - 1;
1184
1185         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1186                             tfs->resc.rx.hw_entry[index].start,
1187                             end,
1188                             TF_NUM_EPOCH1,
1189                             tfs->TF_EPOCH1_POOL_NAME_RX);
1190
1191         /* epoch1 tx direction */
1192         if (tfs->resc.tx.hw_entry[index].stride > 0)
1193                 end = tfs->resc.tx.hw_entry[index].start +
1194                         tfs->resc.tx.hw_entry[index].stride - 1;
1195
1196         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1197                             tfs->resc.tx.hw_entry[index].start,
1198                             end,
1199                             TF_NUM_EPOCH1,
1200                             tfs->TF_EPOCH1_POOL_NAME_TX);
1201 }
1202
1203 /**
1204  * Internal function to mark all the metadata resources allocated that
1205  * Truflow does not own.
1206  */
1207 static void
1208 tf_rm_rsvd_metadata(struct tf_session *tfs)
1209 {
1210         uint32_t index = TF_RESC_TYPE_HW_METADATA;
1211         uint32_t end = 0;
1212
1213         /* metadata rx direction */
1214         if (tfs->resc.rx.hw_entry[index].stride > 0)
1215                 end = tfs->resc.rx.hw_entry[index].start +
1216                         tfs->resc.rx.hw_entry[index].stride - 1;
1217
1218         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1219                             tfs->resc.rx.hw_entry[index].start,
1220                             end,
1221                             TF_NUM_METADATA,
1222                             tfs->TF_METADATA_POOL_NAME_RX);
1223
1224         /* metadata tx direction */
1225         if (tfs->resc.tx.hw_entry[index].stride > 0)
1226                 end = tfs->resc.tx.hw_entry[index].start +
1227                         tfs->resc.tx.hw_entry[index].stride - 1;
1228
1229         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1230                             tfs->resc.tx.hw_entry[index].start,
1231                             end,
1232                             TF_NUM_METADATA,
1233                             tfs->TF_METADATA_POOL_NAME_TX);
1234 }
1235
1236 /**
1237  * Internal function to mark all the ct state resources allocated that
1238  * Truflow does not own.
1239  */
1240 static void
1241 tf_rm_rsvd_ct_state(struct tf_session *tfs)
1242 {
1243         uint32_t index = TF_RESC_TYPE_HW_CT_STATE;
1244         uint32_t end = 0;
1245
1246         /* ct state rx direction */
1247         if (tfs->resc.rx.hw_entry[index].stride > 0)
1248                 end = tfs->resc.rx.hw_entry[index].start +
1249                         tfs->resc.rx.hw_entry[index].stride - 1;
1250
1251         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1252                             tfs->resc.rx.hw_entry[index].start,
1253                             end,
1254                             TF_NUM_CT_STATE,
1255                             tfs->TF_CT_STATE_POOL_NAME_RX);
1256
1257         /* ct state tx direction */
1258         if (tfs->resc.tx.hw_entry[index].stride > 0)
1259                 end = tfs->resc.tx.hw_entry[index].start +
1260                         tfs->resc.tx.hw_entry[index].stride - 1;
1261
1262         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1263                             tfs->resc.tx.hw_entry[index].start,
1264                             end,
1265                             TF_NUM_CT_STATE,
1266                             tfs->TF_CT_STATE_POOL_NAME_TX);
1267 }
1268
1269 /**
1270  * Internal function to mark all the range resources allocated that
1271  * Truflow does not own.
1272  */
1273 static void
1274 tf_rm_rsvd_range(struct tf_session *tfs)
1275 {
1276         uint32_t index = TF_RESC_TYPE_HW_RANGE_PROF;
1277         uint32_t end = 0;
1278
1279         /* range profile rx direction */
1280         if (tfs->resc.rx.hw_entry[index].stride > 0)
1281                 end = tfs->resc.rx.hw_entry[index].start +
1282                         tfs->resc.rx.hw_entry[index].stride - 1;
1283
1284         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1285                             tfs->resc.rx.hw_entry[index].start,
1286                             end,
1287                             TF_NUM_RANGE_PROF,
1288                             tfs->TF_RANGE_PROF_POOL_NAME_RX);
1289
1290         /* range profile tx direction */
1291         if (tfs->resc.tx.hw_entry[index].stride > 0)
1292                 end = tfs->resc.tx.hw_entry[index].start +
1293                         tfs->resc.tx.hw_entry[index].stride - 1;
1294
1295         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1296                             tfs->resc.tx.hw_entry[index].start,
1297                             end,
1298                             TF_NUM_RANGE_PROF,
1299                             tfs->TF_RANGE_PROF_POOL_NAME_TX);
1300
1301         index = TF_RESC_TYPE_HW_RANGE_ENTRY;
1302
1303         /* range entry rx direction */
1304         if (tfs->resc.rx.hw_entry[index].stride > 0)
1305                 end = tfs->resc.rx.hw_entry[index].start +
1306                         tfs->resc.rx.hw_entry[index].stride - 1;
1307
1308         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1309                             tfs->resc.rx.hw_entry[index].start,
1310                             end,
1311                             TF_NUM_RANGE_ENTRY,
1312                             tfs->TF_RANGE_ENTRY_POOL_NAME_RX);
1313
1314         /* range entry tx direction */
1315         if (tfs->resc.tx.hw_entry[index].stride > 0)
1316                 end = tfs->resc.tx.hw_entry[index].start +
1317                         tfs->resc.tx.hw_entry[index].stride - 1;
1318
1319         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1320                             tfs->resc.tx.hw_entry[index].start,
1321                             end,
1322                             TF_NUM_RANGE_ENTRY,
1323                             tfs->TF_RANGE_ENTRY_POOL_NAME_TX);
1324 }
1325
1326 /**
1327  * Internal function to mark all the lag resources allocated that
1328  * Truflow does not own.
1329  */
1330 static void
1331 tf_rm_rsvd_lag_entry(struct tf_session *tfs)
1332 {
1333         uint32_t index = TF_RESC_TYPE_HW_LAG_ENTRY;
1334         uint32_t end = 0;
1335
1336         /* lag entry rx direction */
1337         if (tfs->resc.rx.hw_entry[index].stride > 0)
1338                 end = tfs->resc.rx.hw_entry[index].start +
1339                         tfs->resc.rx.hw_entry[index].stride - 1;
1340
1341         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1342                             tfs->resc.rx.hw_entry[index].start,
1343                             end,
1344                             TF_NUM_LAG_ENTRY,
1345                             tfs->TF_LAG_ENTRY_POOL_NAME_RX);
1346
1347         /* lag entry tx direction */
1348         if (tfs->resc.tx.hw_entry[index].stride > 0)
1349                 end = tfs->resc.tx.hw_entry[index].start +
1350                         tfs->resc.tx.hw_entry[index].stride - 1;
1351
1352         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1353                             tfs->resc.tx.hw_entry[index].start,
1354                             end,
1355                             TF_NUM_LAG_ENTRY,
1356                             tfs->TF_LAG_ENTRY_POOL_NAME_TX);
1357 }
1358
1359 /**
1360  * Internal function to mark all the full action resources allocated
1361  * that Truflow does not own.
1362  */
1363 static void
1364 tf_rm_rsvd_sram_full_action(struct tf_session *tfs)
1365 {
1366         uint32_t index = TF_RESC_TYPE_SRAM_FULL_ACTION;
1367         uint16_t end = 0;
1368
1369         /* full action rx direction */
1370         if (tfs->resc.rx.sram_entry[index].stride > 0)
1371                 end = tfs->resc.rx.sram_entry[index].start +
1372                         tfs->resc.rx.sram_entry[index].stride - 1;
1373
1374         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1375                             TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_RX,
1376                             end,
1377                             TF_RSVD_SRAM_FULL_ACTION_RX,
1378                             tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX);
1379
1380         /* full action tx direction */
1381         if (tfs->resc.tx.sram_entry[index].stride > 0)
1382                 end = tfs->resc.tx.sram_entry[index].start +
1383                         tfs->resc.tx.sram_entry[index].stride - 1;
1384
1385         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1386                             TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_TX,
1387                             end,
1388                             TF_RSVD_SRAM_FULL_ACTION_TX,
1389                             tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX);
1390 }
1391
1392 /**
1393  * Internal function to mark all the multicast group resources
1394  * allocated that Truflow does not own.
1395  */
1396 static void
1397 tf_rm_rsvd_sram_mcg(struct tf_session *tfs)
1398 {
1399         uint32_t index = TF_RESC_TYPE_SRAM_MCG;
1400         uint16_t end = 0;
1401
1402         /* multicast group rx direction */
1403         if (tfs->resc.rx.sram_entry[index].stride > 0)
1404                 end = tfs->resc.rx.sram_entry[index].start +
1405                         tfs->resc.rx.sram_entry[index].stride - 1;
1406
1407         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1408                             TF_RSVD_SRAM_MCG_BEGIN_IDX_RX,
1409                             end,
1410                             TF_RSVD_SRAM_MCG_RX,
1411                             tfs->TF_SRAM_MCG_POOL_NAME_RX);
1412
1413         /* Multicast Group on TX is not supported */
1414 }
1415
1416 /**
1417  * Internal function to mark all the encap resources allocated that
1418  * Truflow does not own.
1419  */
1420 static void
1421 tf_rm_rsvd_sram_encap(struct tf_session *tfs)
1422 {
1423         uint32_t index = TF_RESC_TYPE_SRAM_ENCAP_8B;
1424         uint16_t end = 0;
1425
1426         /* encap 8b rx direction */
1427         if (tfs->resc.rx.sram_entry[index].stride > 0)
1428                 end = tfs->resc.rx.sram_entry[index].start +
1429                         tfs->resc.rx.sram_entry[index].stride - 1;
1430
1431         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1432                             TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_RX,
1433                             end,
1434                             TF_RSVD_SRAM_ENCAP_8B_RX,
1435                             tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX);
1436
1437         /* encap 8b tx direction */
1438         if (tfs->resc.tx.sram_entry[index].stride > 0)
1439                 end = tfs->resc.tx.sram_entry[index].start +
1440                         tfs->resc.tx.sram_entry[index].stride - 1;
1441
1442         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1443                             TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_TX,
1444                             end,
1445                             TF_RSVD_SRAM_ENCAP_8B_TX,
1446                             tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX);
1447
1448         index = TF_RESC_TYPE_SRAM_ENCAP_16B;
1449
1450         /* encap 16b rx direction */
1451         if (tfs->resc.rx.sram_entry[index].stride > 0)
1452                 end = tfs->resc.rx.sram_entry[index].start +
1453                         tfs->resc.rx.sram_entry[index].stride - 1;
1454
1455         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1456                             TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_RX,
1457                             end,
1458                             TF_RSVD_SRAM_ENCAP_16B_RX,
1459                             tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX);
1460
1461         /* encap 16b tx direction */
1462         if (tfs->resc.tx.sram_entry[index].stride > 0)
1463                 end = tfs->resc.tx.sram_entry[index].start +
1464                         tfs->resc.tx.sram_entry[index].stride - 1;
1465
1466         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1467                             TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_TX,
1468                             end,
1469                             TF_RSVD_SRAM_ENCAP_16B_TX,
1470                             tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX);
1471
1472         index = TF_RESC_TYPE_SRAM_ENCAP_64B;
1473
1474         /* Encap 64B not supported on RX */
1475
1476         /* Encap 64b tx direction */
1477         if (tfs->resc.tx.sram_entry[index].stride > 0)
1478                 end = tfs->resc.tx.sram_entry[index].start +
1479                         tfs->resc.tx.sram_entry[index].stride - 1;
1480
1481         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1482                             TF_RSVD_SRAM_ENCAP_64B_BEGIN_IDX_TX,
1483                             end,
1484                             TF_RSVD_SRAM_ENCAP_64B_TX,
1485                             tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX);
1486 }
1487
1488 /**
1489  * Internal function to mark all the sp resources allocated that
1490  * Truflow does not own.
1491  */
1492 static void
1493 tf_rm_rsvd_sram_sp(struct tf_session *tfs)
1494 {
1495         uint32_t index = TF_RESC_TYPE_SRAM_SP_SMAC;
1496         uint16_t end = 0;
1497
1498         /* sp smac rx direction */
1499         if (tfs->resc.rx.sram_entry[index].stride > 0)
1500                 end = tfs->resc.rx.sram_entry[index].start +
1501                         tfs->resc.rx.sram_entry[index].stride - 1;
1502
1503         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1504                             TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_RX,
1505                             end,
1506                             TF_RSVD_SRAM_SP_SMAC_RX,
1507                             tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX);
1508
1509         /* sp smac tx direction */
1510         if (tfs->resc.tx.sram_entry[index].stride > 0)
1511                 end = tfs->resc.tx.sram_entry[index].start +
1512                         tfs->resc.tx.sram_entry[index].stride - 1;
1513
1514         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1515                             TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_TX,
1516                             end,
1517                             TF_RSVD_SRAM_SP_SMAC_TX,
1518                             tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX);
1519
1520         index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4;
1521
1522         /* SP SMAC IPv4 not supported on RX */
1523
1524         /* sp smac ipv4 tx direction */
1525         if (tfs->resc.tx.sram_entry[index].stride > 0)
1526                 end = tfs->resc.tx.sram_entry[index].start +
1527                         tfs->resc.tx.sram_entry[index].stride - 1;
1528
1529         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1530                             TF_RSVD_SRAM_SP_SMAC_IPV4_BEGIN_IDX_TX,
1531                             end,
1532                             TF_RSVD_SRAM_SP_SMAC_IPV4_TX,
1533                             tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX);
1534
1535         index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6;
1536
1537         /* SP SMAC IPv6 not supported on RX */
1538
1539         /* sp smac ipv6 tx direction */
1540         if (tfs->resc.tx.sram_entry[index].stride > 0)
1541                 end = tfs->resc.tx.sram_entry[index].start +
1542                         tfs->resc.tx.sram_entry[index].stride - 1;
1543
1544         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1545                             TF_RSVD_SRAM_SP_SMAC_IPV6_BEGIN_IDX_TX,
1546                             end,
1547                             TF_RSVD_SRAM_SP_SMAC_IPV6_TX,
1548                             tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX);
1549 }
1550
1551 /**
1552  * Internal function to mark all the stat resources allocated that
1553  * Truflow does not own.
1554  */
1555 static void
1556 tf_rm_rsvd_sram_stats(struct tf_session *tfs)
1557 {
1558         uint32_t index = TF_RESC_TYPE_SRAM_COUNTER_64B;
1559         uint16_t end = 0;
1560
1561         /* counter 64b rx direction */
1562         if (tfs->resc.rx.sram_entry[index].stride > 0)
1563                 end = tfs->resc.rx.sram_entry[index].start +
1564                         tfs->resc.rx.sram_entry[index].stride - 1;
1565
1566         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1567                             TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_RX,
1568                             end,
1569                             TF_RSVD_SRAM_COUNTER_64B_RX,
1570                             tfs->TF_SRAM_STATS_64B_POOL_NAME_RX);
1571
1572         /* counter 64b tx direction */
1573         if (tfs->resc.tx.sram_entry[index].stride > 0)
1574                 end = tfs->resc.tx.sram_entry[index].start +
1575                         tfs->resc.tx.sram_entry[index].stride - 1;
1576
1577         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1578                             TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_TX,
1579                             end,
1580                             TF_RSVD_SRAM_COUNTER_64B_TX,
1581                             tfs->TF_SRAM_STATS_64B_POOL_NAME_TX);
1582 }
1583
1584 /**
1585  * Internal function to mark all the nat resources allocated that
1586  * Truflow does not own.
1587  */
1588 static void
1589 tf_rm_rsvd_sram_nat(struct tf_session *tfs)
1590 {
1591         uint32_t index = TF_RESC_TYPE_SRAM_NAT_SPORT;
1592         uint16_t end = 0;
1593
1594         /* nat source port rx direction */
1595         if (tfs->resc.rx.sram_entry[index].stride > 0)
1596                 end = tfs->resc.rx.sram_entry[index].start +
1597                         tfs->resc.rx.sram_entry[index].stride - 1;
1598
1599         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1600                             TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_RX,
1601                             end,
1602                             TF_RSVD_SRAM_NAT_SPORT_RX,
1603                             tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX);
1604
1605         /* nat source port tx direction */
1606         if (tfs->resc.tx.sram_entry[index].stride > 0)
1607                 end = tfs->resc.tx.sram_entry[index].start +
1608                         tfs->resc.tx.sram_entry[index].stride - 1;
1609
1610         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1611                             TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_TX,
1612                             end,
1613                             TF_RSVD_SRAM_NAT_SPORT_TX,
1614                             tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX);
1615
1616         index = TF_RESC_TYPE_SRAM_NAT_DPORT;
1617
1618         /* nat destination port rx direction */
1619         if (tfs->resc.rx.sram_entry[index].stride > 0)
1620                 end = tfs->resc.rx.sram_entry[index].start +
1621                         tfs->resc.rx.sram_entry[index].stride - 1;
1622
1623         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1624                             TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_RX,
1625                             end,
1626                             TF_RSVD_SRAM_NAT_DPORT_RX,
1627                             tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX);
1628
1629         /* nat destination port tx direction */
1630         if (tfs->resc.tx.sram_entry[index].stride > 0)
1631                 end = tfs->resc.tx.sram_entry[index].start +
1632                         tfs->resc.tx.sram_entry[index].stride - 1;
1633
1634         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1635                             TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_TX,
1636                             end,
1637                             TF_RSVD_SRAM_NAT_DPORT_TX,
1638                             tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX);
1639
1640         index = TF_RESC_TYPE_SRAM_NAT_S_IPV4;
1641
1642         /* nat source port ipv4 rx direction */
1643         if (tfs->resc.rx.sram_entry[index].stride > 0)
1644                 end = tfs->resc.rx.sram_entry[index].start +
1645                         tfs->resc.rx.sram_entry[index].stride - 1;
1646
1647         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1648                             TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_RX,
1649                             end,
1650                             TF_RSVD_SRAM_NAT_S_IPV4_RX,
1651                             tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX);
1652
1653         /* nat source ipv4 port tx direction */
1654         if (tfs->resc.tx.sram_entry[index].stride > 0)
1655                 end = tfs->resc.tx.sram_entry[index].start +
1656                         tfs->resc.tx.sram_entry[index].stride - 1;
1657
1658         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1659                             TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_TX,
1660                             end,
1661                             TF_RSVD_SRAM_NAT_S_IPV4_TX,
1662                             tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX);
1663
1664         index = TF_RESC_TYPE_SRAM_NAT_D_IPV4;
1665
1666         /* nat destination port ipv4 rx direction */
1667         if (tfs->resc.rx.sram_entry[index].stride > 0)
1668                 end = tfs->resc.rx.sram_entry[index].start +
1669                         tfs->resc.rx.sram_entry[index].stride - 1;
1670
1671         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1672                             TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_RX,
1673                             end,
1674                             TF_RSVD_SRAM_NAT_D_IPV4_RX,
1675                             tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX);
1676
1677         /* nat destination ipv4 port tx direction */
1678         if (tfs->resc.tx.sram_entry[index].stride > 0)
1679                 end = tfs->resc.tx.sram_entry[index].start +
1680                         tfs->resc.tx.sram_entry[index].stride - 1;
1681
1682         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1683                             TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_TX,
1684                             end,
1685                             TF_RSVD_SRAM_NAT_D_IPV4_TX,
1686                             tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX);
1687 }
1688
1689 /**
1690  * Internal function used to validate the HW allocated resources
1691  * against the requested values.
1692  */
1693 static int
1694 tf_rm_hw_alloc_validate(enum tf_dir dir,
1695                         struct tf_rm_hw_alloc *hw_alloc,
1696                         struct tf_rm_entry *hw_entry)
1697 {
1698         int error = 0;
1699         int i;
1700
1701         for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
1702                 if (hw_entry[i].stride != hw_alloc->hw_num[i]) {
1703                         PMD_DRV_LOG(ERR,
1704                                 "%s, Alloc failed id:%d expect:%d got:%d\n",
1705                                 tf_dir_2_str(dir),
1706                                 i,
1707                                 hw_alloc->hw_num[i],
1708                                 hw_entry[i].stride);
1709                         error = -1;
1710                 }
1711         }
1712
1713         return error;
1714 }
1715
1716 /**
1717  * Internal function used to validate the SRAM allocated resources
1718  * against the requested values.
1719  */
1720 static int
1721 tf_rm_sram_alloc_validate(enum tf_dir dir __rte_unused,
1722                           struct tf_rm_sram_alloc *sram_alloc,
1723                           struct tf_rm_entry *sram_entry)
1724 {
1725         int error = 0;
1726         int i;
1727
1728         for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
1729                 if (sram_entry[i].stride != sram_alloc->sram_num[i]) {
1730                         PMD_DRV_LOG(ERR,
1731                                 "%s, Alloc failed idx:%d expect:%d got:%d\n",
1732                                 tf_dir_2_str(dir),
1733                                 i,
1734                                 sram_alloc->sram_num[i],
1735                                 sram_entry[i].stride);
1736                         error = -1;
1737                 }
1738         }
1739
1740         return error;
1741 }
1742
1743 /**
1744  * Internal function used to mark all the HW resources allocated that
1745  * Truflow does not own.
1746  */
1747 static void
1748 tf_rm_reserve_hw(struct tf *tfp)
1749 {
1750         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1751
1752         /* TBD
1753          * There is no direct AFM resource allocation as it is carved
1754          * statically at AFM boot time. Thus the bit allocators work
1755          * on the full HW resource amount and we just mark everything
1756          * used except the resources that Truflow took ownership off.
1757          */
1758         tf_rm_rsvd_l2_ctxt(tfs);
1759         tf_rm_rsvd_prof(tfs);
1760         tf_rm_rsvd_em_prof(tfs);
1761         tf_rm_rsvd_wc(tfs);
1762         tf_rm_rsvd_mirror(tfs);
1763         tf_rm_rsvd_meter(tfs);
1764         tf_rm_rsvd_upar(tfs);
1765         tf_rm_rsvd_sp_tcam(tfs);
1766         tf_rm_rsvd_l2_func(tfs);
1767         tf_rm_rsvd_fkb(tfs);
1768         tf_rm_rsvd_tbl_scope(tfs);
1769         tf_rm_rsvd_epoch(tfs);
1770         tf_rm_rsvd_metadata(tfs);
1771         tf_rm_rsvd_ct_state(tfs);
1772         tf_rm_rsvd_range(tfs);
1773         tf_rm_rsvd_lag_entry(tfs);
1774 }
1775
1776 /**
1777  * Internal function used to mark all the SRAM resources allocated
1778  * that Truflow does not own.
1779  */
1780 static void
1781 tf_rm_reserve_sram(struct tf *tfp)
1782 {
1783         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1784
1785         /* TBD
1786          * There is no direct AFM resource allocation as it is carved
1787          * statically at AFM boot time. Thus the bit allocators work
1788          * on the full HW resource amount and we just mark everything
1789          * used except the resources that Truflow took ownership off.
1790          */
1791         tf_rm_rsvd_sram_full_action(tfs);
1792         tf_rm_rsvd_sram_mcg(tfs);
1793         tf_rm_rsvd_sram_encap(tfs);
1794         tf_rm_rsvd_sram_sp(tfs);
1795         tf_rm_rsvd_sram_stats(tfs);
1796         tf_rm_rsvd_sram_nat(tfs);
1797 }
1798
1799 /**
1800  * Internal function used to allocate and validate all HW resources.
1801  */
1802 static int
1803 tf_rm_allocate_validate_hw(struct tf *tfp,
1804                            enum tf_dir dir)
1805 {
1806         int rc;
1807         int i;
1808         struct tf_rm_hw_query hw_query;
1809         struct tf_rm_hw_alloc hw_alloc;
1810         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1811         struct tf_rm_entry *hw_entries;
1812         uint32_t error_flag;
1813
1814         if (dir == TF_DIR_RX)
1815                 hw_entries = tfs->resc.rx.hw_entry;
1816         else
1817                 hw_entries = tfs->resc.tx.hw_entry;
1818
1819         /* Query for Session HW Resources */
1820         rc = tf_msg_session_hw_resc_qcaps(tfp, dir, &hw_query);
1821         if (rc) {
1822                 /* Log error */
1823                 PMD_DRV_LOG(ERR,
1824                             "%s, HW qcaps message send failed\n",
1825                             tf_dir_2_str(dir));
1826                 goto cleanup;
1827         }
1828
1829         rc = tf_rm_check_hw_qcaps_static(&hw_query, dir, &error_flag);
1830         if (rc) {
1831                 /* Log error */
1832                 PMD_DRV_LOG(ERR,
1833                         "%s, HW QCAPS validation failed, error_flag:0x%x\n",
1834                         tf_dir_2_str(dir),
1835                         error_flag);
1836                 tf_rm_print_hw_qcaps_error(dir, &hw_query, &error_flag);
1837                 goto cleanup;
1838         }
1839
1840         /* Post process HW capability */
1841         for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++)
1842                 hw_alloc.hw_num[i] = hw_query.hw_query[i].max;
1843
1844         /* Allocate Session HW Resources */
1845         rc = tf_msg_session_hw_resc_alloc(tfp, dir, &hw_alloc, hw_entries);
1846         if (rc) {
1847                 /* Log error */
1848                 PMD_DRV_LOG(ERR,
1849                             "%s, HW alloc message send failed\n",
1850                             tf_dir_2_str(dir));
1851                 goto cleanup;
1852         }
1853
1854         /* Perform HW allocation validation as its possible the
1855          * resource availability changed between qcaps and alloc
1856          */
1857         rc = tf_rm_hw_alloc_validate(dir, &hw_alloc, hw_entries);
1858         if (rc) {
1859                 /* Log error */
1860                 PMD_DRV_LOG(ERR,
1861                             "%s, HW Resource validation failed\n",
1862                             tf_dir_2_str(dir));
1863                 goto cleanup;
1864         }
1865
1866         return 0;
1867
1868  cleanup:
1869         return -1;
1870 }
1871
1872 /**
1873  * Internal function used to allocate and validate all SRAM resources.
1874  *
1875  * [in] tfp
1876  *   Pointer to TF handle
1877  *
1878  * [in] dir
1879  *   Receive or transmit direction
1880  *
1881  * Returns:
1882  *   0  - Success
1883  *   -1 - Internal error
1884  */
1885 static int
1886 tf_rm_allocate_validate_sram(struct tf *tfp,
1887                              enum tf_dir dir)
1888 {
1889         int rc;
1890         int i;
1891         struct tf_rm_sram_query sram_query;
1892         struct tf_rm_sram_alloc sram_alloc;
1893         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1894         struct tf_rm_entry *sram_entries;
1895         uint32_t error_flag;
1896
1897         if (dir == TF_DIR_RX)
1898                 sram_entries = tfs->resc.rx.sram_entry;
1899         else
1900                 sram_entries = tfs->resc.tx.sram_entry;
1901
1902         /* Query for Session SRAM Resources */
1903         rc = tf_msg_session_sram_resc_qcaps(tfp, dir, &sram_query);
1904         if (rc) {
1905                 /* Log error */
1906                 PMD_DRV_LOG(ERR,
1907                             "%s, SRAM qcaps message send failed\n",
1908                             tf_dir_2_str(dir));
1909                 goto cleanup;
1910         }
1911
1912         rc = tf_rm_check_sram_qcaps_static(&sram_query, dir, &error_flag);
1913         if (rc) {
1914                 /* Log error */
1915                 PMD_DRV_LOG(ERR,
1916                         "%s, SRAM QCAPS validation failed, error_flag:%x\n",
1917                         tf_dir_2_str(dir),
1918                         error_flag);
1919                 tf_rm_print_sram_qcaps_error(dir, &sram_query, &error_flag);
1920                 goto cleanup;
1921         }
1922
1923         /* Post process SRAM capability */
1924         for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++)
1925                 sram_alloc.sram_num[i] = sram_query.sram_query[i].max;
1926
1927         /* Allocate Session SRAM Resources */
1928         rc = tf_msg_session_sram_resc_alloc(tfp,
1929                                             dir,
1930                                             &sram_alloc,
1931                                             sram_entries);
1932         if (rc) {
1933                 /* Log error */
1934                 PMD_DRV_LOG(ERR,
1935                             "%s, SRAM alloc message send failed\n",
1936                             tf_dir_2_str(dir));
1937                 goto cleanup;
1938         }
1939
1940         /* Perform SRAM allocation validation as its possible the
1941          * resource availability changed between qcaps and alloc
1942          */
1943         rc = tf_rm_sram_alloc_validate(dir, &sram_alloc, sram_entries);
1944         if (rc) {
1945                 /* Log error */
1946                 PMD_DRV_LOG(ERR,
1947                             "%s, SRAM Resource allocation validation failed\n",
1948                             tf_dir_2_str(dir));
1949                 goto cleanup;
1950         }
1951
1952         return 0;
1953
1954  cleanup:
1955         return -1;
1956 }
1957
1958 /**
1959  * Helper function used to prune a HW resource array to only hold
1960  * elements that needs to be flushed.
1961  *
1962  * [in] tfs
1963  *   Session handle
1964  *
1965  * [in] dir
1966  *   Receive or transmit direction
1967  *
1968  * [in] hw_entries
1969  *   Master HW Resource database
1970  *
1971  * [in/out] flush_entries
1972  *   Pruned HW Resource database of entries to be flushed. This
1973  *   array should be passed in as a complete copy of the master HW
1974  *   Resource database. The outgoing result will be a pruned version
1975  *   based on the result of the requested checking
1976  *
1977  * Returns:
1978  *    0 - Success, no flush required
1979  *    1 - Success, flush required
1980  *   -1 - Internal error
1981  */
1982 static int
1983 tf_rm_hw_to_flush(struct tf_session *tfs,
1984                   enum tf_dir dir,
1985                   struct tf_rm_entry *hw_entries,
1986                   struct tf_rm_entry *flush_entries)
1987 {
1988         int rc;
1989         int flush_rc = 0;
1990         int free_cnt;
1991         struct bitalloc *pool;
1992
1993         /* Check all the hw resource pools and check for left over
1994          * elements. Any found will result in the complete pool of a
1995          * type to get invalidated.
1996          */
1997
1998         TF_RM_GET_POOLS(tfs, dir, &pool,
1999                         TF_L2_CTXT_TCAM_POOL_NAME,
2000                         rc);
2001         if (rc)
2002                 return rc;
2003         free_cnt = ba_free_count(pool);
2004         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride) {
2005                 flush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].start = 0;
2006                 flush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride = 0;
2007         } else {
2008                 flush_rc = 1;
2009         }
2010
2011         TF_RM_GET_POOLS(tfs, dir, &pool,
2012                         TF_PROF_FUNC_POOL_NAME,
2013                         rc);
2014         if (rc)
2015                 return rc;
2016         free_cnt = ba_free_count(pool);
2017         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride) {
2018                 flush_entries[TF_RESC_TYPE_HW_PROF_FUNC].start = 0;
2019                 flush_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride = 0;
2020         } else {
2021                 flush_rc = 1;
2022         }
2023
2024         TF_RM_GET_POOLS(tfs, dir, &pool,
2025                         TF_PROF_TCAM_POOL_NAME,
2026                         rc);
2027         if (rc)
2028                 return rc;
2029         free_cnt = ba_free_count(pool);
2030         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride) {
2031                 flush_entries[TF_RESC_TYPE_HW_PROF_TCAM].start = 0;
2032                 flush_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride = 0;
2033         } else {
2034                 flush_rc = 1;
2035         }
2036
2037         TF_RM_GET_POOLS(tfs, dir, &pool,
2038                         TF_EM_PROF_ID_POOL_NAME,
2039                         rc);
2040         if (rc)
2041                 return rc;
2042         free_cnt = ba_free_count(pool);
2043         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride) {
2044                 flush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].start = 0;
2045                 flush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride = 0;
2046         } else {
2047                 flush_rc = 1;
2048         }
2049
2050         flush_entries[TF_RESC_TYPE_HW_EM_REC].start = 0;
2051         flush_entries[TF_RESC_TYPE_HW_EM_REC].stride = 0;
2052
2053         TF_RM_GET_POOLS(tfs, dir, &pool,
2054                         TF_WC_TCAM_PROF_ID_POOL_NAME,
2055                         rc);
2056         if (rc)
2057                 return rc;
2058         free_cnt = ba_free_count(pool);
2059         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride) {
2060                 flush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].start = 0;
2061                 flush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride = 0;
2062         } else {
2063                 flush_rc = 1;
2064         }
2065
2066         TF_RM_GET_POOLS(tfs, dir, &pool,
2067                         TF_WC_TCAM_POOL_NAME,
2068                         rc);
2069         if (rc)
2070                 return rc;
2071         free_cnt = ba_free_count(pool);
2072         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM].stride) {
2073                 flush_entries[TF_RESC_TYPE_HW_WC_TCAM].start = 0;
2074                 flush_entries[TF_RESC_TYPE_HW_WC_TCAM].stride = 0;
2075         } else {
2076                 flush_rc = 1;
2077         }
2078
2079         TF_RM_GET_POOLS(tfs, dir, &pool,
2080                         TF_METER_PROF_POOL_NAME,
2081                         rc);
2082         if (rc)
2083                 return rc;
2084         free_cnt = ba_free_count(pool);
2085         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_PROF].stride) {
2086                 flush_entries[TF_RESC_TYPE_HW_METER_PROF].start = 0;
2087                 flush_entries[TF_RESC_TYPE_HW_METER_PROF].stride = 0;
2088         } else {
2089                 flush_rc = 1;
2090         }
2091
2092         TF_RM_GET_POOLS(tfs, dir, &pool,
2093                         TF_METER_INST_POOL_NAME,
2094                         rc);
2095         if (rc)
2096                 return rc;
2097         free_cnt = ba_free_count(pool);
2098         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_INST].stride) {
2099                 flush_entries[TF_RESC_TYPE_HW_METER_INST].start = 0;
2100                 flush_entries[TF_RESC_TYPE_HW_METER_INST].stride = 0;
2101         } else {
2102                 flush_rc = 1;
2103         }
2104
2105         TF_RM_GET_POOLS(tfs, dir, &pool,
2106                         TF_MIRROR_POOL_NAME,
2107                         rc);
2108         if (rc)
2109                 return rc;
2110         free_cnt = ba_free_count(pool);
2111         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_MIRROR].stride) {
2112                 flush_entries[TF_RESC_TYPE_HW_MIRROR].start = 0;
2113                 flush_entries[TF_RESC_TYPE_HW_MIRROR].stride = 0;
2114         } else {
2115                 flush_rc = 1;
2116         }
2117
2118         TF_RM_GET_POOLS(tfs, dir, &pool,
2119                         TF_UPAR_POOL_NAME,
2120                         rc);
2121         if (rc)
2122                 return rc;
2123         free_cnt = ba_free_count(pool);
2124         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_UPAR].stride) {
2125                 flush_entries[TF_RESC_TYPE_HW_UPAR].start = 0;
2126                 flush_entries[TF_RESC_TYPE_HW_UPAR].stride = 0;
2127         } else {
2128                 flush_rc = 1;
2129         }
2130
2131         TF_RM_GET_POOLS(tfs, dir, &pool,
2132                         TF_SP_TCAM_POOL_NAME,
2133                         rc);
2134         if (rc)
2135                 return rc;
2136         free_cnt = ba_free_count(pool);
2137         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_SP_TCAM].stride) {
2138                 flush_entries[TF_RESC_TYPE_HW_SP_TCAM].start = 0;
2139                 flush_entries[TF_RESC_TYPE_HW_SP_TCAM].stride = 0;
2140         } else {
2141                 flush_rc = 1;
2142         }
2143
2144         TF_RM_GET_POOLS(tfs, dir, &pool,
2145                         TF_L2_FUNC_POOL_NAME,
2146                         rc);
2147         if (rc)
2148                 return rc;
2149         free_cnt = ba_free_count(pool);
2150         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_FUNC].stride) {
2151                 flush_entries[TF_RESC_TYPE_HW_L2_FUNC].start = 0;
2152                 flush_entries[TF_RESC_TYPE_HW_L2_FUNC].stride = 0;
2153         } else {
2154                 flush_rc = 1;
2155         }
2156
2157         TF_RM_GET_POOLS(tfs, dir, &pool,
2158                         TF_FKB_POOL_NAME,
2159                         rc);
2160         if (rc)
2161                 return rc;
2162         free_cnt = ba_free_count(pool);
2163         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_FKB].stride) {
2164                 flush_entries[TF_RESC_TYPE_HW_FKB].start = 0;
2165                 flush_entries[TF_RESC_TYPE_HW_FKB].stride = 0;
2166         } else {
2167                 flush_rc = 1;
2168         }
2169
2170         TF_RM_GET_POOLS(tfs, dir, &pool,
2171                         TF_TBL_SCOPE_POOL_NAME,
2172                         rc);
2173         if (rc)
2174                 return rc;
2175         free_cnt = ba_free_count(pool);
2176         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride) {
2177                 flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].start = 0;
2178                 flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride = 0;
2179         } else {
2180                 PMD_DRV_LOG(ERR, "%s: TBL_SCOPE free_cnt:%d, entries:%d\n",
2181                             tf_dir_2_str(dir),
2182                             free_cnt,
2183                             hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride);
2184                 flush_rc = 1;
2185         }
2186
2187         TF_RM_GET_POOLS(tfs, dir, &pool,
2188                         TF_EPOCH0_POOL_NAME,
2189                         rc);
2190         if (rc)
2191                 return rc;
2192         free_cnt = ba_free_count(pool);
2193         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH0].stride) {
2194                 flush_entries[TF_RESC_TYPE_HW_EPOCH0].start = 0;
2195                 flush_entries[TF_RESC_TYPE_HW_EPOCH0].stride = 0;
2196         } else {
2197                 flush_rc = 1;
2198         }
2199
2200         TF_RM_GET_POOLS(tfs, dir, &pool,
2201                         TF_EPOCH1_POOL_NAME,
2202                         rc);
2203         if (rc)
2204                 return rc;
2205         free_cnt = ba_free_count(pool);
2206         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH1].stride) {
2207                 flush_entries[TF_RESC_TYPE_HW_EPOCH1].start = 0;
2208                 flush_entries[TF_RESC_TYPE_HW_EPOCH1].stride = 0;
2209         } else {
2210                 flush_rc = 1;
2211         }
2212
2213         TF_RM_GET_POOLS(tfs, dir, &pool,
2214                         TF_METADATA_POOL_NAME,
2215                         rc);
2216         if (rc)
2217                 return rc;
2218         free_cnt = ba_free_count(pool);
2219         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METADATA].stride) {
2220                 flush_entries[TF_RESC_TYPE_HW_METADATA].start = 0;
2221                 flush_entries[TF_RESC_TYPE_HW_METADATA].stride = 0;
2222         } else {
2223                 flush_rc = 1;
2224         }
2225
2226         TF_RM_GET_POOLS(tfs, dir, &pool,
2227                         TF_CT_STATE_POOL_NAME,
2228                         rc);
2229         if (rc)
2230                 return rc;
2231         free_cnt = ba_free_count(pool);
2232         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_CT_STATE].stride) {
2233                 flush_entries[TF_RESC_TYPE_HW_CT_STATE].start = 0;
2234                 flush_entries[TF_RESC_TYPE_HW_CT_STATE].stride = 0;
2235         } else {
2236                 flush_rc = 1;
2237         }
2238
2239         TF_RM_GET_POOLS(tfs, dir, &pool,
2240                         TF_RANGE_PROF_POOL_NAME,
2241                         rc);
2242         if (rc)
2243                 return rc;
2244         free_cnt = ba_free_count(pool);
2245         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride) {
2246                 flush_entries[TF_RESC_TYPE_HW_RANGE_PROF].start = 0;
2247                 flush_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride = 0;
2248         } else {
2249                 flush_rc = 1;
2250         }
2251
2252         TF_RM_GET_POOLS(tfs, dir, &pool,
2253                         TF_RANGE_ENTRY_POOL_NAME,
2254                         rc);
2255         if (rc)
2256                 return rc;
2257         free_cnt = ba_free_count(pool);
2258         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride) {
2259                 flush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].start = 0;
2260                 flush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride = 0;
2261         } else {
2262                 flush_rc = 1;
2263         }
2264
2265         TF_RM_GET_POOLS(tfs, dir, &pool,
2266                         TF_LAG_ENTRY_POOL_NAME,
2267                         rc);
2268         if (rc)
2269                 return rc;
2270         free_cnt = ba_free_count(pool);
2271         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride) {
2272                 flush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].start = 0;
2273                 flush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride = 0;
2274         } else {
2275                 flush_rc = 1;
2276         }
2277
2278         return flush_rc;
2279 }
2280
2281 /**
2282  * Helper function used to prune a SRAM resource array to only hold
2283  * elements that needs to be flushed.
2284  *
2285  * [in] tfs
2286  *   Session handle
2287  *
2288  * [in] dir
2289  *   Receive or transmit direction
2290  *
2291  * [in] hw_entries
2292  *   Master SRAM Resource data base
2293  *
2294  * [in/out] flush_entries
2295  *   Pruned SRAM Resource database of entries to be flushed. This
2296  *   array should be passed in as a complete copy of the master SRAM
2297  *   Resource database. The outgoing result will be a pruned version
2298  *   based on the result of the requested checking
2299  *
2300  * Returns:
2301  *    0 - Success, no flush required
2302  *    1 - Success, flush required
2303  *   -1 - Internal error
2304  */
2305 static int
2306 tf_rm_sram_to_flush(struct tf_session *tfs,
2307                     enum tf_dir dir,
2308                     struct tf_rm_entry *sram_entries,
2309                     struct tf_rm_entry *flush_entries)
2310 {
2311         int rc;
2312         int flush_rc = 0;
2313         int free_cnt;
2314         struct bitalloc *pool;
2315
2316         /* Check all the sram resource pools and check for left over
2317          * elements. Any found will result in the complete pool of a
2318          * type to get invalidated.
2319          */
2320
2321         TF_RM_GET_POOLS(tfs, dir, &pool,
2322                         TF_SRAM_FULL_ACTION_POOL_NAME,
2323                         rc);
2324         if (rc)
2325                 return rc;
2326         free_cnt = ba_free_count(pool);
2327         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride) {
2328                 flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].start = 0;
2329                 flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride = 0;
2330         } else {
2331                 flush_rc = 1;
2332         }
2333
2334         /* Only pools for RX direction */
2335         if (dir == TF_DIR_RX) {
2336                 TF_RM_GET_POOLS_RX(tfs, &pool,
2337                                    TF_SRAM_MCG_POOL_NAME);
2338                 if (rc)
2339                         return rc;
2340                 free_cnt = ba_free_count(pool);
2341                 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_MCG].stride) {
2342                         flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0;
2343                         flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0;
2344                 } else {
2345                         flush_rc = 1;
2346                 }
2347         } else {
2348                 /* Always prune TX direction */
2349                 flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0;
2350                 flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0;
2351         }
2352
2353         TF_RM_GET_POOLS(tfs, dir, &pool,
2354                         TF_SRAM_ENCAP_8B_POOL_NAME,
2355                         rc);
2356         if (rc)
2357                 return rc;
2358         free_cnt = ba_free_count(pool);
2359         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride) {
2360                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].start = 0;
2361                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride = 0;
2362         } else {
2363                 flush_rc = 1;
2364         }
2365
2366         TF_RM_GET_POOLS(tfs, dir, &pool,
2367                         TF_SRAM_ENCAP_16B_POOL_NAME,
2368                         rc);
2369         if (rc)
2370                 return rc;
2371         free_cnt = ba_free_count(pool);
2372         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride) {
2373                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].start = 0;
2374                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride = 0;
2375         } else {
2376                 flush_rc = 1;
2377         }
2378
2379         /* Only pools for TX direction */
2380         if (dir == TF_DIR_TX) {
2381                 TF_RM_GET_POOLS_TX(tfs, &pool,
2382                                    TF_SRAM_ENCAP_64B_POOL_NAME);
2383                 if (rc)
2384                         return rc;
2385                 free_cnt = ba_free_count(pool);
2386                 if (free_cnt ==
2387                     sram_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride) {
2388                         flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0;
2389                         flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0;
2390                 } else {
2391                         flush_rc = 1;
2392                 }
2393         } else {
2394                 /* Always prune RX direction */
2395                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0;
2396                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0;
2397         }
2398
2399         TF_RM_GET_POOLS(tfs, dir, &pool,
2400                         TF_SRAM_SP_SMAC_POOL_NAME,
2401                         rc);
2402         if (rc)
2403                 return rc;
2404         free_cnt = ba_free_count(pool);
2405         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride) {
2406                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].start = 0;
2407                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride = 0;
2408         } else {
2409                 flush_rc = 1;
2410         }
2411
2412         /* Only pools for TX direction */
2413         if (dir == TF_DIR_TX) {
2414                 TF_RM_GET_POOLS_TX(tfs, &pool,
2415                                    TF_SRAM_SP_SMAC_IPV4_POOL_NAME);
2416                 if (rc)
2417                         return rc;
2418                 free_cnt = ba_free_count(pool);
2419                 if (free_cnt ==
2420                     sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride) {
2421                         flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0;
2422                         flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride =
2423                                 0;
2424                 } else {
2425                         flush_rc = 1;
2426                 }
2427         } else {
2428                 /* Always prune RX direction */
2429                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0;
2430                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride = 0;
2431         }
2432
2433         /* Only pools for TX direction */
2434         if (dir == TF_DIR_TX) {
2435                 TF_RM_GET_POOLS_TX(tfs, &pool,
2436                                    TF_SRAM_SP_SMAC_IPV6_POOL_NAME);
2437                 if (rc)
2438                         return rc;
2439                 free_cnt = ba_free_count(pool);
2440                 if (free_cnt ==
2441                     sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride) {
2442                         flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0;
2443                         flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride =
2444                                 0;
2445                 } else {
2446                         flush_rc = 1;
2447                 }
2448         } else {
2449                 /* Always prune RX direction */
2450                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0;
2451                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride = 0;
2452         }
2453
2454         TF_RM_GET_POOLS(tfs, dir, &pool,
2455                         TF_SRAM_STATS_64B_POOL_NAME,
2456                         rc);
2457         if (rc)
2458                 return rc;
2459         free_cnt = ba_free_count(pool);
2460         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride) {
2461                 flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].start = 0;
2462                 flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride = 0;
2463         } else {
2464                 flush_rc = 1;
2465         }
2466
2467         TF_RM_GET_POOLS(tfs, dir, &pool,
2468                         TF_SRAM_NAT_SPORT_POOL_NAME,
2469                         rc);
2470         if (rc)
2471                 return rc;
2472         free_cnt = ba_free_count(pool);
2473         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride) {
2474                 flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].start = 0;
2475                 flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride = 0;
2476         } else {
2477                 flush_rc = 1;
2478         }
2479
2480         TF_RM_GET_POOLS(tfs, dir, &pool,
2481                         TF_SRAM_NAT_DPORT_POOL_NAME,
2482                         rc);
2483         if (rc)
2484                 return rc;
2485         free_cnt = ba_free_count(pool);
2486         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride) {
2487                 flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].start = 0;
2488                 flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride = 0;
2489         } else {
2490                 flush_rc = 1;
2491         }
2492
2493         TF_RM_GET_POOLS(tfs, dir, &pool,
2494                         TF_SRAM_NAT_S_IPV4_POOL_NAME,
2495                         rc);
2496         if (rc)
2497                 return rc;
2498         free_cnt = ba_free_count(pool);
2499         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride) {
2500                 flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].start = 0;
2501                 flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride = 0;
2502         } else {
2503                 flush_rc = 1;
2504         }
2505
2506         TF_RM_GET_POOLS(tfs, dir, &pool,
2507                         TF_SRAM_NAT_D_IPV4_POOL_NAME,
2508                         rc);
2509         if (rc)
2510                 return rc;
2511         free_cnt = ba_free_count(pool);
2512         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride) {
2513                 flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].start = 0;
2514                 flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride = 0;
2515         } else {
2516                 flush_rc = 1;
2517         }
2518
2519         return flush_rc;
2520 }
2521
2522 /**
2523  * Helper function used to generate an error log for the HW types that
2524  * needs to be flushed. The types should have been cleaned up ahead of
2525  * invoking tf_close_session.
2526  *
2527  * [in] hw_entries
2528  *   HW Resource database holding elements to be flushed
2529  */
2530 static void
2531 tf_rm_log_hw_flush(enum tf_dir dir,
2532                    struct tf_rm_entry *hw_entries)
2533 {
2534         int i;
2535
2536         /* Walk the hw flush array and log the types that wasn't
2537          * cleaned up.
2538          */
2539         for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
2540                 if (hw_entries[i].stride != 0)
2541                         PMD_DRV_LOG(ERR,
2542                                     "%s: %s was not cleaned up\n",
2543                                     tf_dir_2_str(dir),
2544                                     tf_hcapi_hw_2_str(i));
2545         }
2546 }
2547
2548 /**
2549  * Helper function used to generate an error log for the SRAM types
2550  * that needs to be flushed. The types should have been cleaned up
2551  * ahead of invoking tf_close_session.
2552  *
2553  * [in] sram_entries
2554  *   SRAM Resource database holding elements to be flushed
2555  */
2556 static void
2557 tf_rm_log_sram_flush(enum tf_dir dir,
2558                      struct tf_rm_entry *sram_entries)
2559 {
2560         int i;
2561
2562         /* Walk the sram flush array and log the types that wasn't
2563          * cleaned up.
2564          */
2565         for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
2566                 if (sram_entries[i].stride != 0)
2567                         PMD_DRV_LOG(ERR,
2568                                     "%s: %s was not cleaned up\n",
2569                                     tf_dir_2_str(dir),
2570                                     tf_hcapi_sram_2_str(i));
2571         }
2572 }
2573
2574 void
2575 tf_rm_init(struct tf *tfp __rte_unused)
2576 {
2577         struct tf_session *tfs =
2578                 (struct tf_session *)(tfp->session->core_data);
2579
2580         /* This version is host specific and should be checked against
2581          * when attaching as there is no guarantee that a secondary
2582          * would run from same image version.
2583          */
2584         tfs->ver.major = TF_SESSION_VER_MAJOR;
2585         tfs->ver.minor = TF_SESSION_VER_MINOR;
2586         tfs->ver.update = TF_SESSION_VER_UPDATE;
2587
2588         tfs->session_id.id = 0;
2589         tfs->ref_count = 0;
2590
2591         /* Initialization of Table Scopes */
2592         /* ll_init(&tfs->tbl_scope_ll); */
2593
2594         /* Initialization of HW and SRAM resource DB */
2595         memset(&tfs->resc, 0, sizeof(struct tf_rm_db));
2596
2597         /* Initialization of HW Resource Pools */
2598         ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
2599         ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
2600         ba_init(tfs->TF_PROF_FUNC_POOL_NAME_RX, TF_NUM_PROF_FUNC);
2601         ba_init(tfs->TF_PROF_FUNC_POOL_NAME_TX, TF_NUM_PROF_FUNC);
2602         ba_init(tfs->TF_PROF_TCAM_POOL_NAME_RX, TF_NUM_PROF_TCAM);
2603         ba_init(tfs->TF_PROF_TCAM_POOL_NAME_TX, TF_NUM_PROF_TCAM);
2604         ba_init(tfs->TF_EM_PROF_ID_POOL_NAME_RX, TF_NUM_EM_PROF_ID);
2605         ba_init(tfs->TF_EM_PROF_ID_POOL_NAME_TX, TF_NUM_EM_PROF_ID);
2606
2607         /* TBD, how do we want to handle EM records ?*/
2608         /* EM Records should not be controlled by way of a pool */
2609
2610         ba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX, TF_NUM_WC_PROF_ID);
2611         ba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX, TF_NUM_WC_PROF_ID);
2612         ba_init(tfs->TF_WC_TCAM_POOL_NAME_RX, TF_NUM_WC_TCAM_ROW);
2613         ba_init(tfs->TF_WC_TCAM_POOL_NAME_TX, TF_NUM_WC_TCAM_ROW);
2614         ba_init(tfs->TF_METER_PROF_POOL_NAME_RX, TF_NUM_METER_PROF);
2615         ba_init(tfs->TF_METER_PROF_POOL_NAME_TX, TF_NUM_METER_PROF);
2616         ba_init(tfs->TF_METER_INST_POOL_NAME_RX, TF_NUM_METER);
2617         ba_init(tfs->TF_METER_INST_POOL_NAME_TX, TF_NUM_METER);
2618         ba_init(tfs->TF_MIRROR_POOL_NAME_RX, TF_NUM_MIRROR);
2619         ba_init(tfs->TF_MIRROR_POOL_NAME_TX, TF_NUM_MIRROR);
2620         ba_init(tfs->TF_UPAR_POOL_NAME_RX, TF_NUM_UPAR);
2621         ba_init(tfs->TF_UPAR_POOL_NAME_TX, TF_NUM_UPAR);
2622
2623         ba_init(tfs->TF_SP_TCAM_POOL_NAME_RX, TF_NUM_SP_TCAM);
2624         ba_init(tfs->TF_SP_TCAM_POOL_NAME_TX, TF_NUM_SP_TCAM);
2625
2626         ba_init(tfs->TF_FKB_POOL_NAME_RX, TF_NUM_FKB);
2627         ba_init(tfs->TF_FKB_POOL_NAME_TX, TF_NUM_FKB);
2628
2629         ba_init(tfs->TF_TBL_SCOPE_POOL_NAME_RX, TF_NUM_TBL_SCOPE);
2630         ba_init(tfs->TF_TBL_SCOPE_POOL_NAME_TX, TF_NUM_TBL_SCOPE);
2631         ba_init(tfs->TF_L2_FUNC_POOL_NAME_RX, TF_NUM_L2_FUNC);
2632         ba_init(tfs->TF_L2_FUNC_POOL_NAME_TX, TF_NUM_L2_FUNC);
2633         ba_init(tfs->TF_EPOCH0_POOL_NAME_RX, TF_NUM_EPOCH0);
2634         ba_init(tfs->TF_EPOCH0_POOL_NAME_TX, TF_NUM_EPOCH0);
2635         ba_init(tfs->TF_EPOCH1_POOL_NAME_RX, TF_NUM_EPOCH1);
2636         ba_init(tfs->TF_EPOCH1_POOL_NAME_TX, TF_NUM_EPOCH1);
2637         ba_init(tfs->TF_METADATA_POOL_NAME_RX, TF_NUM_METADATA);
2638         ba_init(tfs->TF_METADATA_POOL_NAME_TX, TF_NUM_METADATA);
2639         ba_init(tfs->TF_CT_STATE_POOL_NAME_RX, TF_NUM_CT_STATE);
2640         ba_init(tfs->TF_CT_STATE_POOL_NAME_TX, TF_NUM_CT_STATE);
2641         ba_init(tfs->TF_RANGE_PROF_POOL_NAME_RX, TF_NUM_RANGE_PROF);
2642         ba_init(tfs->TF_RANGE_PROF_POOL_NAME_TX, TF_NUM_RANGE_PROF);
2643         ba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_RX, TF_NUM_RANGE_ENTRY);
2644         ba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_TX, TF_NUM_RANGE_ENTRY);
2645         ba_init(tfs->TF_LAG_ENTRY_POOL_NAME_RX, TF_NUM_LAG_ENTRY);
2646         ba_init(tfs->TF_LAG_ENTRY_POOL_NAME_TX, TF_NUM_LAG_ENTRY);
2647
2648         /* Initialization of SRAM Resource Pools
2649          * These pools are set to the TFLIB defined MAX sizes not
2650          * AFM's HW max as to limit the memory consumption
2651          */
2652         ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX,
2653                 TF_RSVD_SRAM_FULL_ACTION_RX);
2654         ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX,
2655                 TF_RSVD_SRAM_FULL_ACTION_TX);
2656         /* Only Multicast Group on RX is supported */
2657         ba_init(tfs->TF_SRAM_MCG_POOL_NAME_RX,
2658                 TF_RSVD_SRAM_MCG_RX);
2659         ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX,
2660                 TF_RSVD_SRAM_ENCAP_8B_RX);
2661         ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX,
2662                 TF_RSVD_SRAM_ENCAP_8B_TX);
2663         ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX,
2664                 TF_RSVD_SRAM_ENCAP_16B_RX);
2665         ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX,
2666                 TF_RSVD_SRAM_ENCAP_16B_TX);
2667         /* Only Encap 64B on TX is supported */
2668         ba_init(tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX,
2669                 TF_RSVD_SRAM_ENCAP_64B_TX);
2670         ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX,
2671                 TF_RSVD_SRAM_SP_SMAC_RX);
2672         ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX,
2673                 TF_RSVD_SRAM_SP_SMAC_TX);
2674         /* Only SP SMAC IPv4 on TX is supported */
2675         ba_init(tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX,
2676                 TF_RSVD_SRAM_SP_SMAC_IPV4_TX);
2677         /* Only SP SMAC IPv6 on TX is supported */
2678         ba_init(tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX,
2679                 TF_RSVD_SRAM_SP_SMAC_IPV6_TX);
2680         ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_RX,
2681                 TF_RSVD_SRAM_COUNTER_64B_RX);
2682         ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_TX,
2683                 TF_RSVD_SRAM_COUNTER_64B_TX);
2684         ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX,
2685                 TF_RSVD_SRAM_NAT_SPORT_RX);
2686         ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX,
2687                 TF_RSVD_SRAM_NAT_SPORT_TX);
2688         ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX,
2689                 TF_RSVD_SRAM_NAT_DPORT_RX);
2690         ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX,
2691                 TF_RSVD_SRAM_NAT_DPORT_TX);
2692         ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX,
2693                 TF_RSVD_SRAM_NAT_S_IPV4_RX);
2694         ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX,
2695                 TF_RSVD_SRAM_NAT_S_IPV4_TX);
2696         ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX,
2697                 TF_RSVD_SRAM_NAT_D_IPV4_RX);
2698         ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX,
2699                 TF_RSVD_SRAM_NAT_D_IPV4_TX);
2700
2701         /* Initialization of pools local to TF Core */
2702         ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
2703         ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
2704 }
2705
2706 int
2707 tf_rm_allocate_validate(struct tf *tfp)
2708 {
2709         int rc;
2710         int i;
2711
2712         for (i = 0; i < TF_DIR_MAX; i++) {
2713                 rc = tf_rm_allocate_validate_hw(tfp, i);
2714                 if (rc)
2715                         return rc;
2716                 rc = tf_rm_allocate_validate_sram(tfp, i);
2717                 if (rc)
2718                         return rc;
2719         }
2720
2721         /* With both HW and SRAM allocated and validated we can
2722          * 'scrub' the reservation on the pools.
2723          */
2724         tf_rm_reserve_hw(tfp);
2725         tf_rm_reserve_sram(tfp);
2726
2727         return rc;
2728 }
2729
2730 int
2731 tf_rm_close(struct tf *tfp)
2732 {
2733         int rc;
2734         int rc_close = 0;
2735         int i;
2736         struct tf_rm_entry *hw_entries;
2737         struct tf_rm_entry *hw_flush_entries;
2738         struct tf_rm_entry *sram_entries;
2739         struct tf_rm_entry *sram_flush_entries;
2740         struct tf_session *tfs __rte_unused =
2741                 (struct tf_session *)(tfp->session->core_data);
2742
2743         struct tf_rm_db flush_resc = tfs->resc;
2744
2745         /* On close it is assumed that the session has already cleaned
2746          * up all its resources, individually, while destroying its
2747          * flows. No checking is performed thus the behavior is as
2748          * follows.
2749          *
2750          * Session RM will signal FW to release session resources. FW
2751          * will perform invalidation of all the allocated entries
2752          * (assures any outstanding resources has been cleared, then
2753          * free the FW RM instance.
2754          *
2755          * Session will then be freed by tf_close_session() thus there
2756          * is no need to clean each resource pool as the whole session
2757          * is going away.
2758          */
2759
2760         for (i = 0; i < TF_DIR_MAX; i++) {
2761                 if (i == TF_DIR_RX) {
2762                         hw_entries = tfs->resc.rx.hw_entry;
2763                         hw_flush_entries = flush_resc.rx.hw_entry;
2764                         sram_entries = tfs->resc.rx.sram_entry;
2765                         sram_flush_entries = flush_resc.rx.sram_entry;
2766                 } else {
2767                         hw_entries = tfs->resc.tx.hw_entry;
2768                         hw_flush_entries = flush_resc.tx.hw_entry;
2769                         sram_entries = tfs->resc.tx.sram_entry;
2770                         sram_flush_entries = flush_resc.tx.sram_entry;
2771                 }
2772
2773                 /* Check for any not previously freed HW resources and
2774                  * flush if required.
2775                  */
2776                 rc = tf_rm_hw_to_flush(tfs, i, hw_entries, hw_flush_entries);
2777                 if (rc) {
2778                         rc_close = -ENOTEMPTY;
2779                         /* Log error */
2780                         PMD_DRV_LOG(ERR,
2781                                     "%s, lingering HW resources\n",
2782                                     tf_dir_2_str(i));
2783
2784                         /* Log the entries to be flushed */
2785                         tf_rm_log_hw_flush(i, hw_flush_entries);
2786                         rc = tf_msg_session_hw_resc_flush(tfp,
2787                                                           i,
2788                                                           hw_flush_entries);
2789                         if (rc) {
2790                                 rc_close = rc;
2791                                 /* Log error */
2792                                 PMD_DRV_LOG(ERR,
2793                                             "%s, HW flush failed\n",
2794                                             tf_dir_2_str(i));
2795                         }
2796                 }
2797
2798                 /* Check for any not previously freed SRAM resources
2799                  * and flush if required.
2800                  */
2801                 rc = tf_rm_sram_to_flush(tfs,
2802                                          i,
2803                                          sram_entries,
2804                                          sram_flush_entries);
2805                 if (rc) {
2806                         rc_close = -ENOTEMPTY;
2807                         /* Log error */
2808                         PMD_DRV_LOG(ERR,
2809                                     "%s, lingering SRAM resources\n",
2810                                     tf_dir_2_str(i));
2811
2812                         /* Log the entries to be flushed */
2813                         tf_rm_log_sram_flush(i, sram_flush_entries);
2814
2815                         rc = tf_msg_session_sram_resc_flush(tfp,
2816                                                             i,
2817                                                             sram_flush_entries);
2818                         if (rc) {
2819                                 rc_close = rc;
2820                                 /* Log error */
2821                                 PMD_DRV_LOG(ERR,
2822                                             "%s, HW flush failed\n",
2823                                             tf_dir_2_str(i));
2824                         }
2825                 }
2826
2827                 rc = tf_msg_session_hw_resc_free(tfp, i, hw_entries);
2828                 if (rc) {
2829                         rc_close = rc;
2830                         /* Log error */
2831                         PMD_DRV_LOG(ERR,
2832                                     "%s, HW free failed\n",
2833                                     tf_dir_2_str(i));
2834                 }
2835
2836                 rc = tf_msg_session_sram_resc_free(tfp, i, sram_entries);
2837                 if (rc) {
2838                         rc_close = rc;
2839                         /* Log error */
2840                         PMD_DRV_LOG(ERR,
2841                                     "%s, SRAM free failed\n",
2842                                     tf_dir_2_str(i));
2843                 }
2844         }
2845
2846         return rc_close;
2847 }
2848
2849 #if (TF_SHADOW == 1)
2850 int
2851 tf_rm_shadow_db_init(struct tf_session *tfs)
2852 {
2853         rc = 1;
2854
2855         return rc;
2856 }
2857 #endif /* TF_SHADOW */
2858
2859 int
2860 tf_rm_lookup_tcam_type_pool(struct tf_session *tfs,
2861                             enum tf_dir dir,
2862                             enum tf_tcam_tbl_type type,
2863                             struct bitalloc **pool)
2864 {
2865         int rc = -EOPNOTSUPP;
2866
2867         *pool = NULL;
2868
2869         switch (type) {
2870         case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
2871                 TF_RM_GET_POOLS(tfs, dir, pool,
2872                                 TF_L2_CTXT_TCAM_POOL_NAME,
2873                                 rc);
2874                 break;
2875         case TF_TCAM_TBL_TYPE_PROF_TCAM:
2876                 TF_RM_GET_POOLS(tfs, dir, pool,
2877                                 TF_PROF_TCAM_POOL_NAME,
2878                                 rc);
2879                 break;
2880         case TF_TCAM_TBL_TYPE_WC_TCAM:
2881                 TF_RM_GET_POOLS(tfs, dir, pool,
2882                                 TF_WC_TCAM_POOL_NAME,
2883                                 rc);
2884                 break;
2885         case TF_TCAM_TBL_TYPE_VEB_TCAM:
2886         case TF_TCAM_TBL_TYPE_SP_TCAM:
2887         case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
2888         default:
2889                 break;
2890         }
2891
2892         if (rc == -EOPNOTSUPP) {
2893                 PMD_DRV_LOG(ERR,
2894                             "dir:%d, Tcam type not supported, type:%d\n",
2895                             dir,
2896                             type);
2897                 return rc;
2898         } else if (rc == -1) {
2899                 PMD_DRV_LOG(ERR,
2900                             "%s:, Tcam type lookup failed, type:%d\n",
2901                             tf_dir_2_str(dir),
2902                             type);
2903                 return rc;
2904         }
2905
2906         return 0;
2907 }
2908
2909 int
2910 tf_rm_lookup_tbl_type_pool(struct tf_session *tfs,
2911                            enum tf_dir dir,
2912                            enum tf_tbl_type type,
2913                            struct bitalloc **pool)
2914 {
2915         int rc = -EOPNOTSUPP;
2916
2917         *pool = NULL;
2918
2919         switch (type) {
2920         case TF_TBL_TYPE_FULL_ACT_RECORD:
2921                 TF_RM_GET_POOLS(tfs, dir, pool,
2922                                 TF_SRAM_FULL_ACTION_POOL_NAME,
2923                                 rc);
2924                 break;
2925         case TF_TBL_TYPE_MCAST_GROUPS:
2926                 /* No pools for TX direction, so bail out */
2927                 if (dir == TF_DIR_TX)
2928                         break;
2929                 TF_RM_GET_POOLS_RX(tfs, pool,
2930                                    TF_SRAM_MCG_POOL_NAME);
2931                 rc = 0;
2932                 break;
2933         case TF_TBL_TYPE_ACT_ENCAP_8B:
2934                 TF_RM_GET_POOLS(tfs, dir, pool,
2935                                 TF_SRAM_ENCAP_8B_POOL_NAME,
2936                                 rc);
2937                 break;
2938         case TF_TBL_TYPE_ACT_ENCAP_16B:
2939                 TF_RM_GET_POOLS(tfs, dir, pool,
2940                                 TF_SRAM_ENCAP_16B_POOL_NAME,
2941                                 rc);
2942                 break;
2943         case TF_TBL_TYPE_ACT_ENCAP_64B:
2944                 /* No pools for RX direction, so bail out */
2945                 if (dir == TF_DIR_RX)
2946                         break;
2947                 TF_RM_GET_POOLS_TX(tfs, pool,
2948                                    TF_SRAM_ENCAP_64B_POOL_NAME);
2949                 rc = 0;
2950                 break;
2951         case TF_TBL_TYPE_ACT_SP_SMAC:
2952                 TF_RM_GET_POOLS(tfs, dir, pool,
2953                                 TF_SRAM_SP_SMAC_POOL_NAME,
2954                                 rc);
2955                 break;
2956         case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
2957                 /* No pools for TX direction, so bail out */
2958                 if (dir == TF_DIR_RX)
2959                         break;
2960                 TF_RM_GET_POOLS_TX(tfs, pool,
2961                                    TF_SRAM_SP_SMAC_IPV4_POOL_NAME);
2962                 rc = 0;
2963                 break;
2964         case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
2965                 /* No pools for TX direction, so bail out */
2966                 if (dir == TF_DIR_RX)
2967                         break;
2968                 TF_RM_GET_POOLS_TX(tfs, pool,
2969                                    TF_SRAM_SP_SMAC_IPV6_POOL_NAME);
2970                 rc = 0;
2971                 break;
2972         case TF_TBL_TYPE_ACT_STATS_64:
2973                 TF_RM_GET_POOLS(tfs, dir, pool,
2974                                 TF_SRAM_STATS_64B_POOL_NAME,
2975                                 rc);
2976                 break;
2977         case TF_TBL_TYPE_ACT_MODIFY_SPORT:
2978                 TF_RM_GET_POOLS(tfs, dir, pool,
2979                                 TF_SRAM_NAT_SPORT_POOL_NAME,
2980                                 rc);
2981                 break;
2982         case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
2983                 TF_RM_GET_POOLS(tfs, dir, pool,
2984                                 TF_SRAM_NAT_S_IPV4_POOL_NAME,
2985                                 rc);
2986                 break;
2987         case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
2988                 TF_RM_GET_POOLS(tfs, dir, pool,
2989                                 TF_SRAM_NAT_D_IPV4_POOL_NAME,
2990                                 rc);
2991                 break;
2992         case TF_TBL_TYPE_METER_PROF:
2993                 TF_RM_GET_POOLS(tfs, dir, pool,
2994                                 TF_METER_PROF_POOL_NAME,
2995                                 rc);
2996                 break;
2997         case TF_TBL_TYPE_METER_INST:
2998                 TF_RM_GET_POOLS(tfs, dir, pool,
2999                                 TF_METER_INST_POOL_NAME,
3000                                 rc);
3001                 break;
3002         case TF_TBL_TYPE_MIRROR_CONFIG:
3003                 TF_RM_GET_POOLS(tfs, dir, pool,
3004                                 TF_MIRROR_POOL_NAME,
3005                                 rc);
3006                 break;
3007         case TF_TBL_TYPE_UPAR:
3008                 TF_RM_GET_POOLS(tfs, dir, pool,
3009                                 TF_UPAR_POOL_NAME,
3010                                 rc);
3011                 break;
3012         case TF_TBL_TYPE_EPOCH0:
3013                 TF_RM_GET_POOLS(tfs, dir, pool,
3014                                 TF_EPOCH0_POOL_NAME,
3015                                 rc);
3016                 break;
3017         case TF_TBL_TYPE_EPOCH1:
3018                 TF_RM_GET_POOLS(tfs, dir, pool,
3019                                 TF_EPOCH1_POOL_NAME,
3020                                 rc);
3021                 break;
3022         case TF_TBL_TYPE_METADATA:
3023                 TF_RM_GET_POOLS(tfs, dir, pool,
3024                                 TF_METADATA_POOL_NAME,
3025                                 rc);
3026                 break;
3027         case TF_TBL_TYPE_CT_STATE:
3028                 TF_RM_GET_POOLS(tfs, dir, pool,
3029                                 TF_CT_STATE_POOL_NAME,
3030                                 rc);
3031                 break;
3032         case TF_TBL_TYPE_RANGE_PROF:
3033                 TF_RM_GET_POOLS(tfs, dir, pool,
3034                                 TF_RANGE_PROF_POOL_NAME,
3035                                 rc);
3036                 break;
3037         case TF_TBL_TYPE_RANGE_ENTRY:
3038                 TF_RM_GET_POOLS(tfs, dir, pool,
3039                                 TF_RANGE_ENTRY_POOL_NAME,
3040                                 rc);
3041                 break;
3042         case TF_TBL_TYPE_LAG:
3043                 TF_RM_GET_POOLS(tfs, dir, pool,
3044                                 TF_LAG_ENTRY_POOL_NAME,
3045                                 rc);
3046                 break;
3047         /* Not yet supported */
3048         case TF_TBL_TYPE_ACT_ENCAP_32B:
3049         case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST:
3050         case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC:
3051         case TF_TBL_TYPE_VNIC_SVIF:
3052                 break;
3053         /* No bitalloc pools for these types */
3054         case TF_TBL_TYPE_EXT:
3055         default:
3056                 break;
3057         }
3058
3059         if (rc == -EOPNOTSUPP) {
3060                 PMD_DRV_LOG(ERR,
3061                             "dir:%d, Table type not supported, type:%d\n",
3062                             dir,
3063                             type);
3064                 return rc;
3065         } else if (rc == -1) {
3066                 PMD_DRV_LOG(ERR,
3067                             "dir:%d, Table type lookup failed, type:%d\n",
3068                             dir,
3069                             type);
3070                 return rc;
3071         }
3072
3073         return 0;
3074 }
3075
3076 int
3077 tf_rm_convert_tbl_type(enum tf_tbl_type type,
3078                        uint32_t *hcapi_type)
3079 {
3080         int rc = 0;
3081
3082         switch (type) {
3083         case TF_TBL_TYPE_FULL_ACT_RECORD:
3084                 *hcapi_type = TF_RESC_TYPE_SRAM_FULL_ACTION;
3085                 break;
3086         case TF_TBL_TYPE_MCAST_GROUPS:
3087                 *hcapi_type = TF_RESC_TYPE_SRAM_MCG;
3088                 break;
3089         case TF_TBL_TYPE_ACT_ENCAP_8B:
3090                 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_8B;
3091                 break;
3092         case TF_TBL_TYPE_ACT_ENCAP_16B:
3093                 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_16B;
3094                 break;
3095         case TF_TBL_TYPE_ACT_ENCAP_64B:
3096                 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_64B;
3097                 break;
3098         case TF_TBL_TYPE_ACT_SP_SMAC:
3099                 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC;
3100                 break;
3101         case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
3102                 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4;
3103                 break;
3104         case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
3105                 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6;
3106                 break;
3107         case TF_TBL_TYPE_ACT_STATS_64:
3108                 *hcapi_type = TF_RESC_TYPE_SRAM_COUNTER_64B;
3109                 break;
3110         case TF_TBL_TYPE_ACT_MODIFY_SPORT:
3111                 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_SPORT;
3112                 break;
3113         case TF_TBL_TYPE_ACT_MODIFY_DPORT:
3114                 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_DPORT;
3115                 break;
3116         case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
3117                 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_S_IPV4;
3118                 break;
3119         case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
3120                 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_D_IPV4;
3121                 break;
3122         case TF_TBL_TYPE_METER_PROF:
3123                 *hcapi_type = TF_RESC_TYPE_HW_METER_PROF;
3124                 break;
3125         case TF_TBL_TYPE_METER_INST:
3126                 *hcapi_type = TF_RESC_TYPE_HW_METER_INST;
3127                 break;
3128         case TF_TBL_TYPE_MIRROR_CONFIG:
3129                 *hcapi_type = TF_RESC_TYPE_HW_MIRROR;
3130                 break;
3131         case TF_TBL_TYPE_UPAR:
3132                 *hcapi_type = TF_RESC_TYPE_HW_UPAR;
3133                 break;
3134         case TF_TBL_TYPE_EPOCH0:
3135                 *hcapi_type = TF_RESC_TYPE_HW_EPOCH0;
3136                 break;
3137         case TF_TBL_TYPE_EPOCH1:
3138                 *hcapi_type = TF_RESC_TYPE_HW_EPOCH1;
3139                 break;
3140         case TF_TBL_TYPE_METADATA:
3141                 *hcapi_type = TF_RESC_TYPE_HW_METADATA;
3142                 break;
3143         case TF_TBL_TYPE_CT_STATE:
3144                 *hcapi_type = TF_RESC_TYPE_HW_CT_STATE;
3145                 break;
3146         case TF_TBL_TYPE_RANGE_PROF:
3147                 *hcapi_type = TF_RESC_TYPE_HW_RANGE_PROF;
3148                 break;
3149         case TF_TBL_TYPE_RANGE_ENTRY:
3150                 *hcapi_type = TF_RESC_TYPE_HW_RANGE_ENTRY;
3151                 break;
3152         case TF_TBL_TYPE_LAG:
3153                 *hcapi_type = TF_RESC_TYPE_HW_LAG_ENTRY;
3154                 break;
3155         /* Not yet supported */
3156         case TF_TBL_TYPE_ACT_ENCAP_32B:
3157         case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST:
3158         case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC:
3159         case TF_TBL_TYPE_VNIC_SVIF:
3160         case TF_TBL_TYPE_EXT:   /* No pools for this type */
3161         default:
3162                 *hcapi_type = -1;
3163                 rc = -EOPNOTSUPP;
3164         }
3165
3166         return rc;
3167 }
3168
3169 int
3170 tf_rm_convert_index(struct tf_session *tfs,
3171                     enum tf_dir dir,
3172                     enum tf_tbl_type type,
3173                     enum tf_rm_convert_type c_type,
3174                     uint32_t index,
3175                     uint32_t *convert_index)
3176 {
3177         int rc;
3178         struct tf_rm_resc *resc;
3179         uint32_t hcapi_type;
3180         uint32_t base_index;
3181
3182         if (dir == TF_DIR_RX)
3183                 resc = &tfs->resc.rx;
3184         else if (dir == TF_DIR_TX)
3185                 resc = &tfs->resc.tx;
3186         else
3187                 return -EOPNOTSUPP;
3188
3189         rc = tf_rm_convert_tbl_type(type, &hcapi_type);
3190         if (rc)
3191                 return -1;
3192
3193         switch (type) {
3194         case TF_TBL_TYPE_FULL_ACT_RECORD:
3195         case TF_TBL_TYPE_MCAST_GROUPS:
3196         case TF_TBL_TYPE_ACT_ENCAP_8B:
3197         case TF_TBL_TYPE_ACT_ENCAP_16B:
3198         case TF_TBL_TYPE_ACT_ENCAP_32B:
3199         case TF_TBL_TYPE_ACT_ENCAP_64B:
3200         case TF_TBL_TYPE_ACT_SP_SMAC:
3201         case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
3202         case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
3203         case TF_TBL_TYPE_ACT_STATS_64:
3204         case TF_TBL_TYPE_ACT_MODIFY_SPORT:
3205         case TF_TBL_TYPE_ACT_MODIFY_DPORT:
3206         case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
3207         case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
3208                 base_index = resc->sram_entry[hcapi_type].start;
3209                 break;
3210         case TF_TBL_TYPE_MIRROR_CONFIG:
3211         case TF_TBL_TYPE_METER_PROF:
3212         case TF_TBL_TYPE_METER_INST:
3213         case TF_TBL_TYPE_UPAR:
3214         case TF_TBL_TYPE_EPOCH0:
3215         case TF_TBL_TYPE_EPOCH1:
3216         case TF_TBL_TYPE_METADATA:
3217         case TF_TBL_TYPE_CT_STATE:
3218         case TF_TBL_TYPE_RANGE_PROF:
3219         case TF_TBL_TYPE_RANGE_ENTRY:
3220         case TF_TBL_TYPE_LAG:
3221                 base_index = resc->hw_entry[hcapi_type].start;
3222                 break;
3223         /* Not yet supported */
3224         case TF_TBL_TYPE_VNIC_SVIF:
3225         case TF_TBL_TYPE_EXT:   /* No pools for this type */
3226         default:
3227                 return -EOPNOTSUPP;
3228         }
3229
3230         switch (c_type) {
3231         case TF_RM_CONVERT_RM_BASE:
3232                 *convert_index = index - base_index;
3233                 break;
3234         case TF_RM_CONVERT_ADD_BASE:
3235                 *convert_index = index + base_index;
3236                 break;
3237         default:
3238                 return -EOPNOTSUPP;
3239         }
3240
3241         return 0;
3242 }