net/bnxt: add core changes for EM and EEM lookups
[dpdk.git] / drivers / net / bnxt / tf_core / tf_rm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include <string.h>
7
8 #include <rte_common.h>
9
10 #include "tf_rm.h"
11 #include "tf_core.h"
12 #include "tf_util.h"
13 #include "tf_session.h"
14 #include "tf_resources.h"
15 #include "tf_msg.h"
16 #include "bnxt.h"
17 #include "tfp.h"
18
19 /**
20  * Internal macro to perform HW resource allocation check between what
21  * firmware reports vs what was statically requested.
22  *
23  * Parameters:
24  *   struct tf_rm_hw_query    *hquery      - Pointer to the hw query result
25  *   enum tf_dir               dir         - Direction to process
26  *   enum tf_resource_type_hw  hcapi_type  - HCAPI type, the index element
27  *                                           in the hw query structure
28  *   define                    def_value   - Define value to check against
29  *   uint32_t                 *eflag       - Result of the check
30  */
31 #define TF_RM_CHECK_HW_ALLOC(hquery, dir, hcapi_type, def_value, eflag) do {  \
32         if ((dir) == TF_DIR_RX) {                                             \
33                 if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _RX) \
34                         *(eflag) |= 1 << (hcapi_type);                        \
35         } else {                                                              \
36                 if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _TX) \
37                         *(eflag) |= 1 << (hcapi_type);                        \
38         }                                                                     \
39 } while (0)
40
41 /**
42  * Internal macro to perform HW resource allocation check between what
43  * firmware reports vs what was statically requested.
44  *
45  * Parameters:
46  *   struct tf_rm_sram_query   *squery      - Pointer to the sram query result
47  *   enum tf_dir                dir         - Direction to process
48  *   enum tf_resource_type_sram hcapi_type  - HCAPI type, the index element
49  *                                            in the hw query structure
50  *   define                     def_value   - Define value to check against
51  *   uint32_t                  *eflag       - Result of the check
52  */
53 #define TF_RM_CHECK_SRAM_ALLOC(squery, dir, hcapi_type, def_value, eflag) do { \
54         if ((dir) == TF_DIR_RX) {                                              \
55                 if ((squery)->sram_query[(hcapi_type)].max != def_value ## _RX)\
56                         *(eflag) |= 1 << (hcapi_type);                         \
57         } else {                                                               \
58                 if ((squery)->sram_query[(hcapi_type)].max != def_value ## _TX)\
59                         *(eflag) |= 1 << (hcapi_type);                         \
60         }                                                                      \
61 } while (0)
62
63 /**
64  * Internal macro to convert a reserved resource define name to be
65  * direction specific.
66  *
67  * Parameters:
68  *   enum tf_dir    dir         - Direction to process
69  *   string         type        - Type name to append RX or TX to
70  *   string         dtype       - Direction specific type
71  *
72  *
73  */
74 #define TF_RESC_RSVD(dir, type, dtype) do {     \
75                 if ((dir) == TF_DIR_RX)         \
76                         (dtype) = type ## _RX;  \
77                 else                            \
78                         (dtype) = type ## _TX;  \
79         } while (0)
80
81 const char
82 *tf_hcapi_hw_2_str(enum tf_resource_type_hw hw_type)
83 {
84         switch (hw_type) {
85         case TF_RESC_TYPE_HW_L2_CTXT_TCAM:
86                 return "L2 ctxt tcam";
87         case TF_RESC_TYPE_HW_PROF_FUNC:
88                 return "Profile Func";
89         case TF_RESC_TYPE_HW_PROF_TCAM:
90                 return "Profile tcam";
91         case TF_RESC_TYPE_HW_EM_PROF_ID:
92                 return "EM profile id";
93         case TF_RESC_TYPE_HW_EM_REC:
94                 return "EM record";
95         case TF_RESC_TYPE_HW_WC_TCAM_PROF_ID:
96                 return "WC tcam profile id";
97         case TF_RESC_TYPE_HW_WC_TCAM:
98                 return "WC tcam";
99         case TF_RESC_TYPE_HW_METER_PROF:
100                 return "Meter profile";
101         case TF_RESC_TYPE_HW_METER_INST:
102                 return "Meter instance";
103         case TF_RESC_TYPE_HW_MIRROR:
104                 return "Mirror";
105         case TF_RESC_TYPE_HW_UPAR:
106                 return "UPAR";
107         case TF_RESC_TYPE_HW_SP_TCAM:
108                 return "Source properties tcam";
109         case TF_RESC_TYPE_HW_L2_FUNC:
110                 return "L2 Function";
111         case TF_RESC_TYPE_HW_FKB:
112                 return "FKB";
113         case TF_RESC_TYPE_HW_TBL_SCOPE:
114                 return "Table scope";
115         case TF_RESC_TYPE_HW_EPOCH0:
116                 return "EPOCH0";
117         case TF_RESC_TYPE_HW_EPOCH1:
118                 return "EPOCH1";
119         case TF_RESC_TYPE_HW_METADATA:
120                 return "Metadata";
121         case TF_RESC_TYPE_HW_CT_STATE:
122                 return "Connection tracking state";
123         case TF_RESC_TYPE_HW_RANGE_PROF:
124                 return "Range profile";
125         case TF_RESC_TYPE_HW_RANGE_ENTRY:
126                 return "Range entry";
127         case TF_RESC_TYPE_HW_LAG_ENTRY:
128                 return "LAG";
129         default:
130                 return "Invalid identifier";
131         }
132 }
133
134 const char
135 *tf_hcapi_sram_2_str(enum tf_resource_type_sram sram_type)
136 {
137         switch (sram_type) {
138         case TF_RESC_TYPE_SRAM_FULL_ACTION:
139                 return "Full action";
140         case TF_RESC_TYPE_SRAM_MCG:
141                 return "MCG";
142         case TF_RESC_TYPE_SRAM_ENCAP_8B:
143                 return "Encap 8B";
144         case TF_RESC_TYPE_SRAM_ENCAP_16B:
145                 return "Encap 16B";
146         case TF_RESC_TYPE_SRAM_ENCAP_64B:
147                 return "Encap 64B";
148         case TF_RESC_TYPE_SRAM_SP_SMAC:
149                 return "Source properties SMAC";
150         case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4:
151                 return "Source properties SMAC IPv4";
152         case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6:
153                 return "Source properties IPv6";
154         case TF_RESC_TYPE_SRAM_COUNTER_64B:
155                 return "Counter 64B";
156         case TF_RESC_TYPE_SRAM_NAT_SPORT:
157                 return "NAT source port";
158         case TF_RESC_TYPE_SRAM_NAT_DPORT:
159                 return "NAT destination port";
160         case TF_RESC_TYPE_SRAM_NAT_S_IPV4:
161                 return "NAT source IPv4";
162         case TF_RESC_TYPE_SRAM_NAT_D_IPV4:
163                 return "NAT destination IPv4";
164         default:
165                 return "Invalid identifier";
166         }
167 }
168
169 /**
170  * Helper function to perform a HW HCAPI resource type lookup against
171  * the reserved value of the same static type.
172  *
173  * Returns:
174  *   -EOPNOTSUPP - Reserved resource type not supported
175  *   Value       - Integer value of the reserved value for the requested type
176  */
177 static int
178 tf_rm_rsvd_hw_value(enum tf_dir dir, enum tf_resource_type_hw index)
179 {
180         uint32_t value = -EOPNOTSUPP;
181
182         switch (index) {
183         case TF_RESC_TYPE_HW_L2_CTXT_TCAM:
184                 TF_RESC_RSVD(dir, TF_RSVD_L2_CTXT_TCAM, value);
185                 break;
186         case TF_RESC_TYPE_HW_PROF_FUNC:
187                 TF_RESC_RSVD(dir, TF_RSVD_PROF_FUNC, value);
188                 break;
189         case TF_RESC_TYPE_HW_PROF_TCAM:
190                 TF_RESC_RSVD(dir, TF_RSVD_PROF_TCAM, value);
191                 break;
192         case TF_RESC_TYPE_HW_EM_PROF_ID:
193                 TF_RESC_RSVD(dir, TF_RSVD_EM_PROF_ID, value);
194                 break;
195         case TF_RESC_TYPE_HW_EM_REC:
196                 TF_RESC_RSVD(dir, TF_RSVD_EM_REC, value);
197                 break;
198         case TF_RESC_TYPE_HW_WC_TCAM_PROF_ID:
199                 TF_RESC_RSVD(dir, TF_RSVD_WC_TCAM_PROF_ID, value);
200                 break;
201         case TF_RESC_TYPE_HW_WC_TCAM:
202                 TF_RESC_RSVD(dir, TF_RSVD_WC_TCAM, value);
203                 break;
204         case TF_RESC_TYPE_HW_METER_PROF:
205                 TF_RESC_RSVD(dir, TF_RSVD_METER_PROF, value);
206                 break;
207         case TF_RESC_TYPE_HW_METER_INST:
208                 TF_RESC_RSVD(dir, TF_RSVD_METER_INST, value);
209                 break;
210         case TF_RESC_TYPE_HW_MIRROR:
211                 TF_RESC_RSVD(dir, TF_RSVD_MIRROR, value);
212                 break;
213         case TF_RESC_TYPE_HW_UPAR:
214                 TF_RESC_RSVD(dir, TF_RSVD_UPAR, value);
215                 break;
216         case TF_RESC_TYPE_HW_SP_TCAM:
217                 TF_RESC_RSVD(dir, TF_RSVD_SP_TCAM, value);
218                 break;
219         case TF_RESC_TYPE_HW_L2_FUNC:
220                 TF_RESC_RSVD(dir, TF_RSVD_L2_FUNC, value);
221                 break;
222         case TF_RESC_TYPE_HW_FKB:
223                 TF_RESC_RSVD(dir, TF_RSVD_FKB, value);
224                 break;
225         case TF_RESC_TYPE_HW_TBL_SCOPE:
226                 TF_RESC_RSVD(dir, TF_RSVD_TBL_SCOPE, value);
227                 break;
228         case TF_RESC_TYPE_HW_EPOCH0:
229                 TF_RESC_RSVD(dir, TF_RSVD_EPOCH0, value);
230                 break;
231         case TF_RESC_TYPE_HW_EPOCH1:
232                 TF_RESC_RSVD(dir, TF_RSVD_EPOCH1, value);
233                 break;
234         case TF_RESC_TYPE_HW_METADATA:
235                 TF_RESC_RSVD(dir, TF_RSVD_METADATA, value);
236                 break;
237         case TF_RESC_TYPE_HW_CT_STATE:
238                 TF_RESC_RSVD(dir, TF_RSVD_CT_STATE, value);
239                 break;
240         case TF_RESC_TYPE_HW_RANGE_PROF:
241                 TF_RESC_RSVD(dir, TF_RSVD_RANGE_PROF, value);
242                 break;
243         case TF_RESC_TYPE_HW_RANGE_ENTRY:
244                 TF_RESC_RSVD(dir, TF_RSVD_RANGE_ENTRY, value);
245                 break;
246         case TF_RESC_TYPE_HW_LAG_ENTRY:
247                 TF_RESC_RSVD(dir, TF_RSVD_LAG_ENTRY, value);
248                 break;
249         default:
250                 break;
251         }
252
253         return value;
254 }
255
256 /**
257  * Helper function to perform a SRAM HCAPI resource type lookup
258  * against the reserved value of the same static type.
259  *
260  * Returns:
261  *   -EOPNOTSUPP - Reserved resource type not supported
262  *   Value       - Integer value of the reserved value for the requested type
263  */
264 static int
265 tf_rm_rsvd_sram_value(enum tf_dir dir, enum tf_resource_type_sram index)
266 {
267         uint32_t value = -EOPNOTSUPP;
268
269         switch (index) {
270         case TF_RESC_TYPE_SRAM_FULL_ACTION:
271                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_FULL_ACTION, value);
272                 break;
273         case TF_RESC_TYPE_SRAM_MCG:
274                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_MCG, value);
275                 break;
276         case TF_RESC_TYPE_SRAM_ENCAP_8B:
277                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_8B, value);
278                 break;
279         case TF_RESC_TYPE_SRAM_ENCAP_16B:
280                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_16B, value);
281                 break;
282         case TF_RESC_TYPE_SRAM_ENCAP_64B:
283                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_64B, value);
284                 break;
285         case TF_RESC_TYPE_SRAM_SP_SMAC:
286                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC, value);
287                 break;
288         case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4:
289                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV4, value);
290                 break;
291         case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6:
292                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV6, value);
293                 break;
294         case TF_RESC_TYPE_SRAM_COUNTER_64B:
295                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_COUNTER_64B, value);
296                 break;
297         case TF_RESC_TYPE_SRAM_NAT_SPORT:
298                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_SPORT, value);
299                 break;
300         case TF_RESC_TYPE_SRAM_NAT_DPORT:
301                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_DPORT, value);
302                 break;
303         case TF_RESC_TYPE_SRAM_NAT_S_IPV4:
304                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_S_IPV4, value);
305                 break;
306         case TF_RESC_TYPE_SRAM_NAT_D_IPV4:
307                 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_D_IPV4, value);
308                 break;
309         default:
310                 break;
311         }
312
313         return value;
314 }
315
316 /**
317  * Helper function to print all the HW resource qcaps errors reported
318  * in the error_flag.
319  *
320  * [in] dir
321  *   Receive or transmit direction
322  *
323  * [in] error_flag
324  *   Pointer to the hw error flags created at time of the query check
325  */
326 static void
327 tf_rm_print_hw_qcaps_error(enum tf_dir dir,
328                            struct tf_rm_hw_query *hw_query,
329                            uint32_t *error_flag)
330 {
331         int i;
332
333         TFP_DRV_LOG(ERR, "QCAPS errors HW\n");
334         TFP_DRV_LOG(ERR, "  Direction: %s\n", tf_dir_2_str(dir));
335         TFP_DRV_LOG(ERR, "  Elements:\n");
336
337         for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
338                 if (*error_flag & 1 << i)
339                         TFP_DRV_LOG(ERR, "    %s, %d elem available, req:%d\n",
340                                     tf_hcapi_hw_2_str(i),
341                                     hw_query->hw_query[i].max,
342                                     tf_rm_rsvd_hw_value(dir, i));
343         }
344 }
345
346 /**
347  * Helper function to print all the SRAM resource qcaps errors
348  * reported in the error_flag.
349  *
350  * [in] dir
351  *   Receive or transmit direction
352  *
353  * [in] error_flag
354  *   Pointer to the sram error flags created at time of the query check
355  */
356 static void
357 tf_rm_print_sram_qcaps_error(enum tf_dir dir,
358                              struct tf_rm_sram_query *sram_query,
359                              uint32_t *error_flag)
360 {
361         int i;
362
363         TFP_DRV_LOG(ERR, "QCAPS errors SRAM\n");
364         TFP_DRV_LOG(ERR, "  Direction: %s\n", tf_dir_2_str(dir));
365         TFP_DRV_LOG(ERR, "  Elements:\n");
366
367         for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
368                 if (*error_flag & 1 << i)
369                         TFP_DRV_LOG(ERR, "    %s, %d elem available, req:%d\n",
370                                     tf_hcapi_sram_2_str(i),
371                                     sram_query->sram_query[i].max,
372                                     tf_rm_rsvd_sram_value(dir, i));
373         }
374 }
375
376 /**
377  * Performs a HW resource check between what firmware capability
378  * reports and what the core expects is available.
379  *
380  * Firmware performs the resource carving at AFM init time and the
381  * resource capability is reported in the TruFlow qcaps msg.
382  *
383  * [in] query
384  *   Pointer to HW Query data structure. Query holds what the firmware
385  *   offers of the HW resources.
386  *
387  * [in] dir
388  *   Receive or transmit direction
389  *
390  * [in/out] error_flag
391  *   Pointer to a bit array indicating the error of a single HCAPI
392  *   resource type. When a bit is set to 1, the HCAPI resource type
393  *   failed static allocation.
394  *
395  * Returns:
396  *  0       - Success
397  *  -ENOMEM - Failure on one of the allocated resources. Check the
398  *            error_flag for what types are flagged errored.
399  */
400 static int
401 tf_rm_check_hw_qcaps_static(struct tf_rm_hw_query *query,
402                             enum tf_dir dir,
403                             uint32_t *error_flag)
404 {
405         *error_flag = 0;
406
407         TF_RM_CHECK_HW_ALLOC(query,
408                              dir,
409                              TF_RESC_TYPE_HW_L2_CTXT_TCAM,
410                              TF_RSVD_L2_CTXT_TCAM,
411                              error_flag);
412
413         TF_RM_CHECK_HW_ALLOC(query,
414                              dir,
415                              TF_RESC_TYPE_HW_PROF_FUNC,
416                              TF_RSVD_PROF_FUNC,
417                              error_flag);
418
419         TF_RM_CHECK_HW_ALLOC(query,
420                              dir,
421                              TF_RESC_TYPE_HW_PROF_TCAM,
422                              TF_RSVD_PROF_TCAM,
423                              error_flag);
424
425         TF_RM_CHECK_HW_ALLOC(query,
426                              dir,
427                              TF_RESC_TYPE_HW_EM_PROF_ID,
428                              TF_RSVD_EM_PROF_ID,
429                              error_flag);
430
431         TF_RM_CHECK_HW_ALLOC(query,
432                              dir,
433                              TF_RESC_TYPE_HW_EM_REC,
434                              TF_RSVD_EM_REC,
435                              error_flag);
436
437         TF_RM_CHECK_HW_ALLOC(query,
438                              dir,
439                              TF_RESC_TYPE_HW_WC_TCAM_PROF_ID,
440                              TF_RSVD_WC_TCAM_PROF_ID,
441                              error_flag);
442
443         TF_RM_CHECK_HW_ALLOC(query,
444                              dir,
445                              TF_RESC_TYPE_HW_WC_TCAM,
446                              TF_RSVD_WC_TCAM,
447                              error_flag);
448
449         TF_RM_CHECK_HW_ALLOC(query,
450                              dir,
451                              TF_RESC_TYPE_HW_METER_PROF,
452                              TF_RSVD_METER_PROF,
453                              error_flag);
454
455         TF_RM_CHECK_HW_ALLOC(query,
456                              dir,
457                              TF_RESC_TYPE_HW_METER_INST,
458                              TF_RSVD_METER_INST,
459                              error_flag);
460
461         TF_RM_CHECK_HW_ALLOC(query,
462                              dir,
463                              TF_RESC_TYPE_HW_MIRROR,
464                              TF_RSVD_MIRROR,
465                              error_flag);
466
467         TF_RM_CHECK_HW_ALLOC(query,
468                              dir,
469                              TF_RESC_TYPE_HW_UPAR,
470                              TF_RSVD_UPAR,
471                              error_flag);
472
473         TF_RM_CHECK_HW_ALLOC(query,
474                              dir,
475                              TF_RESC_TYPE_HW_SP_TCAM,
476                              TF_RSVD_SP_TCAM,
477                              error_flag);
478
479         TF_RM_CHECK_HW_ALLOC(query,
480                              dir,
481                              TF_RESC_TYPE_HW_L2_FUNC,
482                              TF_RSVD_L2_FUNC,
483                              error_flag);
484
485         TF_RM_CHECK_HW_ALLOC(query,
486                              dir,
487                              TF_RESC_TYPE_HW_FKB,
488                              TF_RSVD_FKB,
489                              error_flag);
490
491         TF_RM_CHECK_HW_ALLOC(query,
492                              dir,
493                              TF_RESC_TYPE_HW_TBL_SCOPE,
494                              TF_RSVD_TBL_SCOPE,
495                              error_flag);
496
497         TF_RM_CHECK_HW_ALLOC(query,
498                              dir,
499                              TF_RESC_TYPE_HW_EPOCH0,
500                              TF_RSVD_EPOCH0,
501                              error_flag);
502
503         TF_RM_CHECK_HW_ALLOC(query,
504                              dir,
505                              TF_RESC_TYPE_HW_EPOCH1,
506                              TF_RSVD_EPOCH1,
507                              error_flag);
508
509         TF_RM_CHECK_HW_ALLOC(query,
510                              dir,
511                              TF_RESC_TYPE_HW_METADATA,
512                              TF_RSVD_METADATA,
513                              error_flag);
514
515         TF_RM_CHECK_HW_ALLOC(query,
516                              dir,
517                              TF_RESC_TYPE_HW_CT_STATE,
518                              TF_RSVD_CT_STATE,
519                              error_flag);
520
521         TF_RM_CHECK_HW_ALLOC(query,
522                              dir,
523                              TF_RESC_TYPE_HW_RANGE_PROF,
524                              TF_RSVD_RANGE_PROF,
525                              error_flag);
526
527         TF_RM_CHECK_HW_ALLOC(query,
528                              dir,
529                              TF_RESC_TYPE_HW_RANGE_ENTRY,
530                              TF_RSVD_RANGE_ENTRY,
531                              error_flag);
532
533         TF_RM_CHECK_HW_ALLOC(query,
534                              dir,
535                              TF_RESC_TYPE_HW_LAG_ENTRY,
536                              TF_RSVD_LAG_ENTRY,
537                              error_flag);
538
539         if (*error_flag != 0)
540                 return -ENOMEM;
541
542         return 0;
543 }
544
545 /**
546  * Performs a SRAM resource check between what firmware capability
547  * reports and what the core expects is available.
548  *
549  * Firmware performs the resource carving at AFM init time and the
550  * resource capability is reported in the TruFlow qcaps msg.
551  *
552  * [in] query
553  *   Pointer to SRAM Query data structure. Query holds what the
554  *   firmware offers of the SRAM resources.
555  *
556  * [in] dir
557  *   Receive or transmit direction
558  *
559  * [in/out] error_flag
560  *   Pointer to a bit array indicating the error of a single HCAPI
561  *   resource type. When a bit is set to 1, the HCAPI resource type
562  *   failed static allocation.
563  *
564  * Returns:
565  *  0       - Success
566  *  -ENOMEM - Failure on one of the allocated resources. Check the
567  *            error_flag for what types are flagged errored.
568  */
569 static int
570 tf_rm_check_sram_qcaps_static(struct tf_rm_sram_query *query,
571                               enum tf_dir dir,
572                               uint32_t *error_flag)
573 {
574         *error_flag = 0;
575
576         TF_RM_CHECK_SRAM_ALLOC(query,
577                                dir,
578                                TF_RESC_TYPE_SRAM_FULL_ACTION,
579                                TF_RSVD_SRAM_FULL_ACTION,
580                                error_flag);
581
582         TF_RM_CHECK_SRAM_ALLOC(query,
583                                dir,
584                                TF_RESC_TYPE_SRAM_MCG,
585                                TF_RSVD_SRAM_MCG,
586                                error_flag);
587
588         TF_RM_CHECK_SRAM_ALLOC(query,
589                                dir,
590                                TF_RESC_TYPE_SRAM_ENCAP_8B,
591                                TF_RSVD_SRAM_ENCAP_8B,
592                                error_flag);
593
594         TF_RM_CHECK_SRAM_ALLOC(query,
595                                dir,
596                                TF_RESC_TYPE_SRAM_ENCAP_16B,
597                                TF_RSVD_SRAM_ENCAP_16B,
598                                error_flag);
599
600         TF_RM_CHECK_SRAM_ALLOC(query,
601                                dir,
602                                TF_RESC_TYPE_SRAM_ENCAP_64B,
603                                TF_RSVD_SRAM_ENCAP_64B,
604                                error_flag);
605
606         TF_RM_CHECK_SRAM_ALLOC(query,
607                                dir,
608                                TF_RESC_TYPE_SRAM_SP_SMAC,
609                                TF_RSVD_SRAM_SP_SMAC,
610                                error_flag);
611
612         TF_RM_CHECK_SRAM_ALLOC(query,
613                                dir,
614                                TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
615                                TF_RSVD_SRAM_SP_SMAC_IPV4,
616                                error_flag);
617
618         TF_RM_CHECK_SRAM_ALLOC(query,
619                                dir,
620                                TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
621                                TF_RSVD_SRAM_SP_SMAC_IPV6,
622                                error_flag);
623
624         TF_RM_CHECK_SRAM_ALLOC(query,
625                                dir,
626                                TF_RESC_TYPE_SRAM_COUNTER_64B,
627                                TF_RSVD_SRAM_COUNTER_64B,
628                                error_flag);
629
630         TF_RM_CHECK_SRAM_ALLOC(query,
631                                dir,
632                                TF_RESC_TYPE_SRAM_NAT_SPORT,
633                                TF_RSVD_SRAM_NAT_SPORT,
634                                error_flag);
635
636         TF_RM_CHECK_SRAM_ALLOC(query,
637                                dir,
638                                TF_RESC_TYPE_SRAM_NAT_DPORT,
639                                TF_RSVD_SRAM_NAT_DPORT,
640                                error_flag);
641
642         TF_RM_CHECK_SRAM_ALLOC(query,
643                                dir,
644                                TF_RESC_TYPE_SRAM_NAT_S_IPV4,
645                                TF_RSVD_SRAM_NAT_S_IPV4,
646                                error_flag);
647
648         TF_RM_CHECK_SRAM_ALLOC(query,
649                                dir,
650                                TF_RESC_TYPE_SRAM_NAT_D_IPV4,
651                                TF_RSVD_SRAM_NAT_D_IPV4,
652                                error_flag);
653
654         if (*error_flag != 0)
655                 return -ENOMEM;
656
657         return 0;
658 }
659
660 /**
661  * Internal function to mark pool entries used.
662  */
663 static void
664 tf_rm_reserve_range(uint32_t count,
665                     uint32_t rsv_begin,
666                     uint32_t rsv_end,
667                     uint32_t max,
668                     struct bitalloc *pool)
669 {
670         uint32_t i;
671
672         /* If no resources has been requested we mark everything
673          * 'used'
674          */
675         if (count == 0) {
676                 for (i = 0; i < max; i++)
677                         ba_alloc_index(pool, i);
678         } else {
679                 /* Support 2 main modes
680                  * Reserved range starts from bottom up (with
681                  * pre-reserved value or not)
682                  * - begin = 0 to end xx
683                  * - begin = 1 to end xx
684                  *
685                  * Reserved range starts from top down
686                  * - begin = yy to end max
687                  */
688
689                 /* Bottom up check, start from 0 */
690                 if (rsv_begin == 0) {
691                         for (i = rsv_end + 1; i < max; i++)
692                                 ba_alloc_index(pool, i);
693                 }
694
695                 /* Bottom up check, start from 1 or higher OR
696                  * Top Down
697                  */
698                 if (rsv_begin >= 1) {
699                         /* Allocate from 0 until start */
700                         for (i = 0; i < rsv_begin; i++)
701                                 ba_alloc_index(pool, i);
702
703                         /* Skip and then do the remaining */
704                         if (rsv_end < max - 1) {
705                                 for (i = rsv_end; i < max; i++)
706                                         ba_alloc_index(pool, i);
707                         }
708                 }
709         }
710 }
711
712 /**
713  * Internal function to mark all the l2 ctxt allocated that Truflow
714  * does not own.
715  */
716 static void
717 tf_rm_rsvd_l2_ctxt(struct tf_session *tfs)
718 {
719         uint32_t index = TF_RESC_TYPE_HW_L2_CTXT_TCAM;
720         uint32_t end = 0;
721
722         /* l2 ctxt rx direction */
723         if (tfs->resc.rx.hw_entry[index].stride > 0)
724                 end = tfs->resc.rx.hw_entry[index].start +
725                         tfs->resc.rx.hw_entry[index].stride - 1;
726
727         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
728                             tfs->resc.rx.hw_entry[index].start,
729                             end,
730                             TF_NUM_L2_CTXT_TCAM,
731                             tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX);
732
733         /* l2 ctxt tx direction */
734         if (tfs->resc.tx.hw_entry[index].stride > 0)
735                 end = tfs->resc.tx.hw_entry[index].start +
736                         tfs->resc.tx.hw_entry[index].stride - 1;
737
738         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
739                             tfs->resc.tx.hw_entry[index].start,
740                             end,
741                             TF_NUM_L2_CTXT_TCAM,
742                             tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX);
743 }
744
745 /**
746  * Internal function to mark all the profile tcam and profile func
747  * resources that Truflow does not own.
748  */
749 static void
750 tf_rm_rsvd_prof(struct tf_session *tfs)
751 {
752         uint32_t index = TF_RESC_TYPE_HW_PROF_FUNC;
753         uint32_t end = 0;
754
755         /* profile func rx direction */
756         if (tfs->resc.rx.hw_entry[index].stride > 0)
757                 end = tfs->resc.rx.hw_entry[index].start +
758                         tfs->resc.rx.hw_entry[index].stride - 1;
759
760         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
761                             tfs->resc.rx.hw_entry[index].start,
762                             end,
763                             TF_NUM_PROF_FUNC,
764                             tfs->TF_PROF_FUNC_POOL_NAME_RX);
765
766         /* profile func tx direction */
767         if (tfs->resc.tx.hw_entry[index].stride > 0)
768                 end = tfs->resc.tx.hw_entry[index].start +
769                         tfs->resc.tx.hw_entry[index].stride - 1;
770
771         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
772                             tfs->resc.tx.hw_entry[index].start,
773                             end,
774                             TF_NUM_PROF_FUNC,
775                             tfs->TF_PROF_FUNC_POOL_NAME_TX);
776
777         index = TF_RESC_TYPE_HW_PROF_TCAM;
778
779         /* profile tcam rx direction */
780         if (tfs->resc.rx.hw_entry[index].stride > 0)
781                 end = tfs->resc.rx.hw_entry[index].start +
782                         tfs->resc.rx.hw_entry[index].stride - 1;
783
784         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
785                             tfs->resc.rx.hw_entry[index].start,
786                             end,
787                             TF_NUM_PROF_TCAM,
788                             tfs->TF_PROF_TCAM_POOL_NAME_RX);
789
790         /* profile tcam tx direction */
791         if (tfs->resc.tx.hw_entry[index].stride > 0)
792                 end = tfs->resc.tx.hw_entry[index].start +
793                         tfs->resc.tx.hw_entry[index].stride - 1;
794
795         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
796                             tfs->resc.tx.hw_entry[index].start,
797                             end,
798                             TF_NUM_PROF_TCAM,
799                             tfs->TF_PROF_TCAM_POOL_NAME_TX);
800 }
801
802 /**
803  * Internal function to mark all the em profile id allocated that
804  * Truflow does not own.
805  */
806 static void
807 tf_rm_rsvd_em_prof(struct tf_session *tfs)
808 {
809         uint32_t index = TF_RESC_TYPE_HW_EM_PROF_ID;
810         uint32_t end = 0;
811
812         /* em prof id rx direction */
813         if (tfs->resc.rx.hw_entry[index].stride > 0)
814                 end = tfs->resc.rx.hw_entry[index].start +
815                         tfs->resc.rx.hw_entry[index].stride - 1;
816
817         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
818                             tfs->resc.rx.hw_entry[index].start,
819                             end,
820                             TF_NUM_EM_PROF_ID,
821                             tfs->TF_EM_PROF_ID_POOL_NAME_RX);
822
823         /* em prof id tx direction */
824         if (tfs->resc.tx.hw_entry[index].stride > 0)
825                 end = tfs->resc.tx.hw_entry[index].start +
826                         tfs->resc.tx.hw_entry[index].stride - 1;
827
828         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
829                             tfs->resc.tx.hw_entry[index].start,
830                             end,
831                             TF_NUM_EM_PROF_ID,
832                             tfs->TF_EM_PROF_ID_POOL_NAME_TX);
833 }
834
835 /**
836  * Internal function to mark all the wildcard tcam and profile id
837  * resources that Truflow does not own.
838  */
839 static void
840 tf_rm_rsvd_wc(struct tf_session *tfs)
841 {
842         uint32_t index = TF_RESC_TYPE_HW_WC_TCAM_PROF_ID;
843         uint32_t end = 0;
844
845         /* wc profile id rx direction */
846         if (tfs->resc.rx.hw_entry[index].stride > 0)
847                 end = tfs->resc.rx.hw_entry[index].start +
848                         tfs->resc.rx.hw_entry[index].stride - 1;
849
850         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
851                             tfs->resc.rx.hw_entry[index].start,
852                             end,
853                             TF_NUM_WC_PROF_ID,
854                             tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX);
855
856         /* wc profile id tx direction */
857         if (tfs->resc.tx.hw_entry[index].stride > 0)
858                 end = tfs->resc.tx.hw_entry[index].start +
859                         tfs->resc.tx.hw_entry[index].stride - 1;
860
861         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
862                             tfs->resc.tx.hw_entry[index].start,
863                             end,
864                             TF_NUM_WC_PROF_ID,
865                             tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX);
866
867         index = TF_RESC_TYPE_HW_WC_TCAM;
868
869         /* wc tcam rx direction */
870         if (tfs->resc.rx.hw_entry[index].stride > 0)
871                 end = tfs->resc.rx.hw_entry[index].start +
872                         tfs->resc.rx.hw_entry[index].stride - 1;
873
874         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
875                             tfs->resc.rx.hw_entry[index].start,
876                             end,
877                             TF_NUM_WC_TCAM_ROW,
878                             tfs->TF_WC_TCAM_POOL_NAME_RX);
879
880         /* wc tcam tx direction */
881         if (tfs->resc.tx.hw_entry[index].stride > 0)
882                 end = tfs->resc.tx.hw_entry[index].start +
883                         tfs->resc.tx.hw_entry[index].stride - 1;
884
885         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
886                             tfs->resc.tx.hw_entry[index].start,
887                             end,
888                             TF_NUM_WC_TCAM_ROW,
889                             tfs->TF_WC_TCAM_POOL_NAME_TX);
890 }
891
892 /**
893  * Internal function to mark all the meter resources allocated that
894  * Truflow does not own.
895  */
896 static void
897 tf_rm_rsvd_meter(struct tf_session *tfs)
898 {
899         uint32_t index = TF_RESC_TYPE_HW_METER_PROF;
900         uint32_t end = 0;
901
902         /* meter profiles rx direction */
903         if (tfs->resc.rx.hw_entry[index].stride > 0)
904                 end = tfs->resc.rx.hw_entry[index].start +
905                         tfs->resc.rx.hw_entry[index].stride - 1;
906
907         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
908                             tfs->resc.rx.hw_entry[index].start,
909                             end,
910                             TF_NUM_METER_PROF,
911                             tfs->TF_METER_PROF_POOL_NAME_RX);
912
913         /* meter profiles tx direction */
914         if (tfs->resc.tx.hw_entry[index].stride > 0)
915                 end = tfs->resc.tx.hw_entry[index].start +
916                         tfs->resc.tx.hw_entry[index].stride - 1;
917
918         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
919                             tfs->resc.tx.hw_entry[index].start,
920                             end,
921                             TF_NUM_METER_PROF,
922                             tfs->TF_METER_PROF_POOL_NAME_TX);
923
924         index = TF_RESC_TYPE_HW_METER_INST;
925
926         /* meter rx direction */
927         if (tfs->resc.rx.hw_entry[index].stride > 0)
928                 end = tfs->resc.rx.hw_entry[index].start +
929                         tfs->resc.rx.hw_entry[index].stride - 1;
930
931         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
932                             tfs->resc.rx.hw_entry[index].start,
933                             end,
934                             TF_NUM_METER,
935                             tfs->TF_METER_INST_POOL_NAME_RX);
936
937         /* meter tx direction */
938         if (tfs->resc.tx.hw_entry[index].stride > 0)
939                 end = tfs->resc.tx.hw_entry[index].start +
940                         tfs->resc.tx.hw_entry[index].stride - 1;
941
942         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
943                             tfs->resc.tx.hw_entry[index].start,
944                             end,
945                             TF_NUM_METER,
946                             tfs->TF_METER_INST_POOL_NAME_TX);
947 }
948
949 /**
950  * Internal function to mark all the mirror resources allocated that
951  * Truflow does not own.
952  */
953 static void
954 tf_rm_rsvd_mirror(struct tf_session *tfs)
955 {
956         uint32_t index = TF_RESC_TYPE_HW_MIRROR;
957         uint32_t end = 0;
958
959         /* mirror rx direction */
960         if (tfs->resc.rx.hw_entry[index].stride > 0)
961                 end = tfs->resc.rx.hw_entry[index].start +
962                         tfs->resc.rx.hw_entry[index].stride - 1;
963
964         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
965                             tfs->resc.rx.hw_entry[index].start,
966                             end,
967                             TF_NUM_MIRROR,
968                             tfs->TF_MIRROR_POOL_NAME_RX);
969
970         /* mirror tx direction */
971         if (tfs->resc.tx.hw_entry[index].stride > 0)
972                 end = tfs->resc.tx.hw_entry[index].start +
973                         tfs->resc.tx.hw_entry[index].stride - 1;
974
975         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
976                             tfs->resc.tx.hw_entry[index].start,
977                             end,
978                             TF_NUM_MIRROR,
979                             tfs->TF_MIRROR_POOL_NAME_TX);
980 }
981
982 /**
983  * Internal function to mark all the upar resources allocated that
984  * Truflow does not own.
985  */
986 static void
987 tf_rm_rsvd_upar(struct tf_session *tfs)
988 {
989         uint32_t index = TF_RESC_TYPE_HW_UPAR;
990         uint32_t end = 0;
991
992         /* upar rx direction */
993         if (tfs->resc.rx.hw_entry[index].stride > 0)
994                 end = tfs->resc.rx.hw_entry[index].start +
995                         tfs->resc.rx.hw_entry[index].stride - 1;
996
997         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
998                             tfs->resc.rx.hw_entry[index].start,
999                             end,
1000                             TF_NUM_UPAR,
1001                             tfs->TF_UPAR_POOL_NAME_RX);
1002
1003         /* upar tx direction */
1004         if (tfs->resc.tx.hw_entry[index].stride > 0)
1005                 end = tfs->resc.tx.hw_entry[index].start +
1006                         tfs->resc.tx.hw_entry[index].stride - 1;
1007
1008         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1009                             tfs->resc.tx.hw_entry[index].start,
1010                             end,
1011                             TF_NUM_UPAR,
1012                             tfs->TF_UPAR_POOL_NAME_TX);
1013 }
1014
1015 /**
1016  * Internal function to mark all the sp tcam resources allocated that
1017  * Truflow does not own.
1018  */
1019 static void
1020 tf_rm_rsvd_sp_tcam(struct tf_session *tfs)
1021 {
1022         uint32_t index = TF_RESC_TYPE_HW_SP_TCAM;
1023         uint32_t end = 0;
1024
1025         /* sp tcam rx direction */
1026         if (tfs->resc.rx.hw_entry[index].stride > 0)
1027                 end = tfs->resc.rx.hw_entry[index].start +
1028                         tfs->resc.rx.hw_entry[index].stride - 1;
1029
1030         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1031                             tfs->resc.rx.hw_entry[index].start,
1032                             end,
1033                             TF_NUM_SP_TCAM,
1034                             tfs->TF_SP_TCAM_POOL_NAME_RX);
1035
1036         /* sp tcam tx direction */
1037         if (tfs->resc.tx.hw_entry[index].stride > 0)
1038                 end = tfs->resc.tx.hw_entry[index].start +
1039                         tfs->resc.tx.hw_entry[index].stride - 1;
1040
1041         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1042                             tfs->resc.tx.hw_entry[index].start,
1043                             end,
1044                             TF_NUM_SP_TCAM,
1045                             tfs->TF_SP_TCAM_POOL_NAME_TX);
1046 }
1047
1048 /**
1049  * Internal function to mark all the l2 func resources allocated that
1050  * Truflow does not own.
1051  */
1052 static void
1053 tf_rm_rsvd_l2_func(struct tf_session *tfs)
1054 {
1055         uint32_t index = TF_RESC_TYPE_HW_L2_FUNC;
1056         uint32_t end = 0;
1057
1058         /* l2 func rx direction */
1059         if (tfs->resc.rx.hw_entry[index].stride > 0)
1060                 end = tfs->resc.rx.hw_entry[index].start +
1061                         tfs->resc.rx.hw_entry[index].stride - 1;
1062
1063         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1064                             tfs->resc.rx.hw_entry[index].start,
1065                             end,
1066                             TF_NUM_L2_FUNC,
1067                             tfs->TF_L2_FUNC_POOL_NAME_RX);
1068
1069         /* l2 func tx direction */
1070         if (tfs->resc.tx.hw_entry[index].stride > 0)
1071                 end = tfs->resc.tx.hw_entry[index].start +
1072                         tfs->resc.tx.hw_entry[index].stride - 1;
1073
1074         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1075                             tfs->resc.tx.hw_entry[index].start,
1076                             end,
1077                             TF_NUM_L2_FUNC,
1078                             tfs->TF_L2_FUNC_POOL_NAME_TX);
1079 }
1080
1081 /**
1082  * Internal function to mark all the fkb resources allocated that
1083  * Truflow does not own.
1084  */
1085 static void
1086 tf_rm_rsvd_fkb(struct tf_session *tfs)
1087 {
1088         uint32_t index = TF_RESC_TYPE_HW_FKB;
1089         uint32_t end = 0;
1090
1091         /* fkb rx direction */
1092         if (tfs->resc.rx.hw_entry[index].stride > 0)
1093                 end = tfs->resc.rx.hw_entry[index].start +
1094                         tfs->resc.rx.hw_entry[index].stride - 1;
1095
1096         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1097                             tfs->resc.rx.hw_entry[index].start,
1098                             end,
1099                             TF_NUM_FKB,
1100                             tfs->TF_FKB_POOL_NAME_RX);
1101
1102         /* fkb tx direction */
1103         if (tfs->resc.tx.hw_entry[index].stride > 0)
1104                 end = tfs->resc.tx.hw_entry[index].start +
1105                         tfs->resc.tx.hw_entry[index].stride - 1;
1106
1107         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1108                             tfs->resc.tx.hw_entry[index].start,
1109                             end,
1110                             TF_NUM_FKB,
1111                             tfs->TF_FKB_POOL_NAME_TX);
1112 }
1113
1114 /**
1115  * Internal function to mark all the tbld scope resources allocated
1116  * that Truflow does not own.
1117  */
1118 static void
1119 tf_rm_rsvd_tbl_scope(struct tf_session *tfs)
1120 {
1121         uint32_t index = TF_RESC_TYPE_HW_TBL_SCOPE;
1122         uint32_t end = 0;
1123
1124         /* tbl scope rx direction */
1125         if (tfs->resc.rx.hw_entry[index].stride > 0)
1126                 end = tfs->resc.rx.hw_entry[index].start +
1127                         tfs->resc.rx.hw_entry[index].stride - 1;
1128
1129         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1130                             tfs->resc.rx.hw_entry[index].start,
1131                             end,
1132                             TF_NUM_TBL_SCOPE,
1133                             tfs->TF_TBL_SCOPE_POOL_NAME_RX);
1134
1135         /* tbl scope tx direction */
1136         if (tfs->resc.tx.hw_entry[index].stride > 0)
1137                 end = tfs->resc.tx.hw_entry[index].start +
1138                         tfs->resc.tx.hw_entry[index].stride - 1;
1139
1140         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1141                             tfs->resc.tx.hw_entry[index].start,
1142                             end,
1143                             TF_NUM_TBL_SCOPE,
1144                             tfs->TF_TBL_SCOPE_POOL_NAME_TX);
1145 }
1146
1147 /**
1148  * Internal function to mark all the l2 epoch resources allocated that
1149  * Truflow does not own.
1150  */
1151 static void
1152 tf_rm_rsvd_epoch(struct tf_session *tfs)
1153 {
1154         uint32_t index = TF_RESC_TYPE_HW_EPOCH0;
1155         uint32_t end = 0;
1156
1157         /* epoch0 rx direction */
1158         if (tfs->resc.rx.hw_entry[index].stride > 0)
1159                 end = tfs->resc.rx.hw_entry[index].start +
1160                         tfs->resc.rx.hw_entry[index].stride - 1;
1161
1162         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1163                             tfs->resc.rx.hw_entry[index].start,
1164                             end,
1165                             TF_NUM_EPOCH0,
1166                             tfs->TF_EPOCH0_POOL_NAME_RX);
1167
1168         /* epoch0 tx direction */
1169         if (tfs->resc.tx.hw_entry[index].stride > 0)
1170                 end = tfs->resc.tx.hw_entry[index].start +
1171                         tfs->resc.tx.hw_entry[index].stride - 1;
1172
1173         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1174                             tfs->resc.tx.hw_entry[index].start,
1175                             end,
1176                             TF_NUM_EPOCH0,
1177                             tfs->TF_EPOCH0_POOL_NAME_TX);
1178
1179         index = TF_RESC_TYPE_HW_EPOCH1;
1180
1181         /* epoch1 rx direction */
1182         if (tfs->resc.rx.hw_entry[index].stride > 0)
1183                 end = tfs->resc.rx.hw_entry[index].start +
1184                         tfs->resc.rx.hw_entry[index].stride - 1;
1185
1186         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1187                             tfs->resc.rx.hw_entry[index].start,
1188                             end,
1189                             TF_NUM_EPOCH1,
1190                             tfs->TF_EPOCH1_POOL_NAME_RX);
1191
1192         /* epoch1 tx direction */
1193         if (tfs->resc.tx.hw_entry[index].stride > 0)
1194                 end = tfs->resc.tx.hw_entry[index].start +
1195                         tfs->resc.tx.hw_entry[index].stride - 1;
1196
1197         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1198                             tfs->resc.tx.hw_entry[index].start,
1199                             end,
1200                             TF_NUM_EPOCH1,
1201                             tfs->TF_EPOCH1_POOL_NAME_TX);
1202 }
1203
1204 /**
1205  * Internal function to mark all the metadata resources allocated that
1206  * Truflow does not own.
1207  */
1208 static void
1209 tf_rm_rsvd_metadata(struct tf_session *tfs)
1210 {
1211         uint32_t index = TF_RESC_TYPE_HW_METADATA;
1212         uint32_t end = 0;
1213
1214         /* metadata rx direction */
1215         if (tfs->resc.rx.hw_entry[index].stride > 0)
1216                 end = tfs->resc.rx.hw_entry[index].start +
1217                         tfs->resc.rx.hw_entry[index].stride - 1;
1218
1219         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1220                             tfs->resc.rx.hw_entry[index].start,
1221                             end,
1222                             TF_NUM_METADATA,
1223                             tfs->TF_METADATA_POOL_NAME_RX);
1224
1225         /* metadata tx direction */
1226         if (tfs->resc.tx.hw_entry[index].stride > 0)
1227                 end = tfs->resc.tx.hw_entry[index].start +
1228                         tfs->resc.tx.hw_entry[index].stride - 1;
1229
1230         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1231                             tfs->resc.tx.hw_entry[index].start,
1232                             end,
1233                             TF_NUM_METADATA,
1234                             tfs->TF_METADATA_POOL_NAME_TX);
1235 }
1236
1237 /**
1238  * Internal function to mark all the ct state resources allocated that
1239  * Truflow does not own.
1240  */
1241 static void
1242 tf_rm_rsvd_ct_state(struct tf_session *tfs)
1243 {
1244         uint32_t index = TF_RESC_TYPE_HW_CT_STATE;
1245         uint32_t end = 0;
1246
1247         /* ct state rx direction */
1248         if (tfs->resc.rx.hw_entry[index].stride > 0)
1249                 end = tfs->resc.rx.hw_entry[index].start +
1250                         tfs->resc.rx.hw_entry[index].stride - 1;
1251
1252         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1253                             tfs->resc.rx.hw_entry[index].start,
1254                             end,
1255                             TF_NUM_CT_STATE,
1256                             tfs->TF_CT_STATE_POOL_NAME_RX);
1257
1258         /* ct state tx direction */
1259         if (tfs->resc.tx.hw_entry[index].stride > 0)
1260                 end = tfs->resc.tx.hw_entry[index].start +
1261                         tfs->resc.tx.hw_entry[index].stride - 1;
1262
1263         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1264                             tfs->resc.tx.hw_entry[index].start,
1265                             end,
1266                             TF_NUM_CT_STATE,
1267                             tfs->TF_CT_STATE_POOL_NAME_TX);
1268 }
1269
1270 /**
1271  * Internal function to mark all the range resources allocated that
1272  * Truflow does not own.
1273  */
1274 static void
1275 tf_rm_rsvd_range(struct tf_session *tfs)
1276 {
1277         uint32_t index = TF_RESC_TYPE_HW_RANGE_PROF;
1278         uint32_t end = 0;
1279
1280         /* range profile rx direction */
1281         if (tfs->resc.rx.hw_entry[index].stride > 0)
1282                 end = tfs->resc.rx.hw_entry[index].start +
1283                         tfs->resc.rx.hw_entry[index].stride - 1;
1284
1285         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1286                             tfs->resc.rx.hw_entry[index].start,
1287                             end,
1288                             TF_NUM_RANGE_PROF,
1289                             tfs->TF_RANGE_PROF_POOL_NAME_RX);
1290
1291         /* range profile tx direction */
1292         if (tfs->resc.tx.hw_entry[index].stride > 0)
1293                 end = tfs->resc.tx.hw_entry[index].start +
1294                         tfs->resc.tx.hw_entry[index].stride - 1;
1295
1296         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1297                             tfs->resc.tx.hw_entry[index].start,
1298                             end,
1299                             TF_NUM_RANGE_PROF,
1300                             tfs->TF_RANGE_PROF_POOL_NAME_TX);
1301
1302         index = TF_RESC_TYPE_HW_RANGE_ENTRY;
1303
1304         /* range entry rx direction */
1305         if (tfs->resc.rx.hw_entry[index].stride > 0)
1306                 end = tfs->resc.rx.hw_entry[index].start +
1307                         tfs->resc.rx.hw_entry[index].stride - 1;
1308
1309         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1310                             tfs->resc.rx.hw_entry[index].start,
1311                             end,
1312                             TF_NUM_RANGE_ENTRY,
1313                             tfs->TF_RANGE_ENTRY_POOL_NAME_RX);
1314
1315         /* range entry tx direction */
1316         if (tfs->resc.tx.hw_entry[index].stride > 0)
1317                 end = tfs->resc.tx.hw_entry[index].start +
1318                         tfs->resc.tx.hw_entry[index].stride - 1;
1319
1320         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1321                             tfs->resc.tx.hw_entry[index].start,
1322                             end,
1323                             TF_NUM_RANGE_ENTRY,
1324                             tfs->TF_RANGE_ENTRY_POOL_NAME_TX);
1325 }
1326
1327 /**
1328  * Internal function to mark all the lag resources allocated that
1329  * Truflow does not own.
1330  */
1331 static void
1332 tf_rm_rsvd_lag_entry(struct tf_session *tfs)
1333 {
1334         uint32_t index = TF_RESC_TYPE_HW_LAG_ENTRY;
1335         uint32_t end = 0;
1336
1337         /* lag entry rx direction */
1338         if (tfs->resc.rx.hw_entry[index].stride > 0)
1339                 end = tfs->resc.rx.hw_entry[index].start +
1340                         tfs->resc.rx.hw_entry[index].stride - 1;
1341
1342         tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1343                             tfs->resc.rx.hw_entry[index].start,
1344                             end,
1345                             TF_NUM_LAG_ENTRY,
1346                             tfs->TF_LAG_ENTRY_POOL_NAME_RX);
1347
1348         /* lag entry tx direction */
1349         if (tfs->resc.tx.hw_entry[index].stride > 0)
1350                 end = tfs->resc.tx.hw_entry[index].start +
1351                         tfs->resc.tx.hw_entry[index].stride - 1;
1352
1353         tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1354                             tfs->resc.tx.hw_entry[index].start,
1355                             end,
1356                             TF_NUM_LAG_ENTRY,
1357                             tfs->TF_LAG_ENTRY_POOL_NAME_TX);
1358 }
1359
1360 /**
1361  * Internal function to mark all the full action resources allocated
1362  * that Truflow does not own.
1363  */
1364 static void
1365 tf_rm_rsvd_sram_full_action(struct tf_session *tfs)
1366 {
1367         uint32_t index = TF_RESC_TYPE_SRAM_FULL_ACTION;
1368         uint16_t end = 0;
1369
1370         /* full action rx direction */
1371         if (tfs->resc.rx.sram_entry[index].stride > 0)
1372                 end = tfs->resc.rx.sram_entry[index].start +
1373                         tfs->resc.rx.sram_entry[index].stride - 1;
1374
1375         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1376                             TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_RX,
1377                             end,
1378                             TF_RSVD_SRAM_FULL_ACTION_RX,
1379                             tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX);
1380
1381         /* full action tx direction */
1382         if (tfs->resc.tx.sram_entry[index].stride > 0)
1383                 end = tfs->resc.tx.sram_entry[index].start +
1384                         tfs->resc.tx.sram_entry[index].stride - 1;
1385
1386         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1387                             TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_TX,
1388                             end,
1389                             TF_RSVD_SRAM_FULL_ACTION_TX,
1390                             tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX);
1391 }
1392
1393 /**
1394  * Internal function to mark all the multicast group resources
1395  * allocated that Truflow does not own.
1396  */
1397 static void
1398 tf_rm_rsvd_sram_mcg(struct tf_session *tfs)
1399 {
1400         uint32_t index = TF_RESC_TYPE_SRAM_MCG;
1401         uint16_t end = 0;
1402
1403         /* multicast group rx direction */
1404         if (tfs->resc.rx.sram_entry[index].stride > 0)
1405                 end = tfs->resc.rx.sram_entry[index].start +
1406                         tfs->resc.rx.sram_entry[index].stride - 1;
1407
1408         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1409                             TF_RSVD_SRAM_MCG_BEGIN_IDX_RX,
1410                             end,
1411                             TF_RSVD_SRAM_MCG_RX,
1412                             tfs->TF_SRAM_MCG_POOL_NAME_RX);
1413
1414         /* Multicast Group on TX is not supported */
1415 }
1416
1417 /**
1418  * Internal function to mark all the encap resources allocated that
1419  * Truflow does not own.
1420  */
1421 static void
1422 tf_rm_rsvd_sram_encap(struct tf_session *tfs)
1423 {
1424         uint32_t index = TF_RESC_TYPE_SRAM_ENCAP_8B;
1425         uint16_t end = 0;
1426
1427         /* encap 8b rx direction */
1428         if (tfs->resc.rx.sram_entry[index].stride > 0)
1429                 end = tfs->resc.rx.sram_entry[index].start +
1430                         tfs->resc.rx.sram_entry[index].stride - 1;
1431
1432         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1433                             TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_RX,
1434                             end,
1435                             TF_RSVD_SRAM_ENCAP_8B_RX,
1436                             tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX);
1437
1438         /* encap 8b tx direction */
1439         if (tfs->resc.tx.sram_entry[index].stride > 0)
1440                 end = tfs->resc.tx.sram_entry[index].start +
1441                         tfs->resc.tx.sram_entry[index].stride - 1;
1442
1443         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1444                             TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_TX,
1445                             end,
1446                             TF_RSVD_SRAM_ENCAP_8B_TX,
1447                             tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX);
1448
1449         index = TF_RESC_TYPE_SRAM_ENCAP_16B;
1450
1451         /* encap 16b rx direction */
1452         if (tfs->resc.rx.sram_entry[index].stride > 0)
1453                 end = tfs->resc.rx.sram_entry[index].start +
1454                         tfs->resc.rx.sram_entry[index].stride - 1;
1455
1456         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1457                             TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_RX,
1458                             end,
1459                             TF_RSVD_SRAM_ENCAP_16B_RX,
1460                             tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX);
1461
1462         /* encap 16b tx direction */
1463         if (tfs->resc.tx.sram_entry[index].stride > 0)
1464                 end = tfs->resc.tx.sram_entry[index].start +
1465                         tfs->resc.tx.sram_entry[index].stride - 1;
1466
1467         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1468                             TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_TX,
1469                             end,
1470                             TF_RSVD_SRAM_ENCAP_16B_TX,
1471                             tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX);
1472
1473         index = TF_RESC_TYPE_SRAM_ENCAP_64B;
1474
1475         /* Encap 64B not supported on RX */
1476
1477         /* Encap 64b tx direction */
1478         if (tfs->resc.tx.sram_entry[index].stride > 0)
1479                 end = tfs->resc.tx.sram_entry[index].start +
1480                         tfs->resc.tx.sram_entry[index].stride - 1;
1481
1482         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1483                             TF_RSVD_SRAM_ENCAP_64B_BEGIN_IDX_TX,
1484                             end,
1485                             TF_RSVD_SRAM_ENCAP_64B_TX,
1486                             tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX);
1487 }
1488
1489 /**
1490  * Internal function to mark all the sp resources allocated that
1491  * Truflow does not own.
1492  */
1493 static void
1494 tf_rm_rsvd_sram_sp(struct tf_session *tfs)
1495 {
1496         uint32_t index = TF_RESC_TYPE_SRAM_SP_SMAC;
1497         uint16_t end = 0;
1498
1499         /* sp smac rx direction */
1500         if (tfs->resc.rx.sram_entry[index].stride > 0)
1501                 end = tfs->resc.rx.sram_entry[index].start +
1502                         tfs->resc.rx.sram_entry[index].stride - 1;
1503
1504         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1505                             TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_RX,
1506                             end,
1507                             TF_RSVD_SRAM_SP_SMAC_RX,
1508                             tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX);
1509
1510         /* sp smac tx direction */
1511         if (tfs->resc.tx.sram_entry[index].stride > 0)
1512                 end = tfs->resc.tx.sram_entry[index].start +
1513                         tfs->resc.tx.sram_entry[index].stride - 1;
1514
1515         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1516                             TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_TX,
1517                             end,
1518                             TF_RSVD_SRAM_SP_SMAC_TX,
1519                             tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX);
1520
1521         index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4;
1522
1523         /* SP SMAC IPv4 not supported on RX */
1524
1525         /* sp smac ipv4 tx direction */
1526         if (tfs->resc.tx.sram_entry[index].stride > 0)
1527                 end = tfs->resc.tx.sram_entry[index].start +
1528                         tfs->resc.tx.sram_entry[index].stride - 1;
1529
1530         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1531                             TF_RSVD_SRAM_SP_SMAC_IPV4_BEGIN_IDX_TX,
1532                             end,
1533                             TF_RSVD_SRAM_SP_SMAC_IPV4_TX,
1534                             tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX);
1535
1536         index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6;
1537
1538         /* SP SMAC IPv6 not supported on RX */
1539
1540         /* sp smac ipv6 tx direction */
1541         if (tfs->resc.tx.sram_entry[index].stride > 0)
1542                 end = tfs->resc.tx.sram_entry[index].start +
1543                         tfs->resc.tx.sram_entry[index].stride - 1;
1544
1545         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1546                             TF_RSVD_SRAM_SP_SMAC_IPV6_BEGIN_IDX_TX,
1547                             end,
1548                             TF_RSVD_SRAM_SP_SMAC_IPV6_TX,
1549                             tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX);
1550 }
1551
1552 /**
1553  * Internal function to mark all the stat resources allocated that
1554  * Truflow does not own.
1555  */
1556 static void
1557 tf_rm_rsvd_sram_stats(struct tf_session *tfs)
1558 {
1559         uint32_t index = TF_RESC_TYPE_SRAM_COUNTER_64B;
1560         uint16_t end = 0;
1561
1562         /* counter 64b rx direction */
1563         if (tfs->resc.rx.sram_entry[index].stride > 0)
1564                 end = tfs->resc.rx.sram_entry[index].start +
1565                         tfs->resc.rx.sram_entry[index].stride - 1;
1566
1567         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1568                             TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_RX,
1569                             end,
1570                             TF_RSVD_SRAM_COUNTER_64B_RX,
1571                             tfs->TF_SRAM_STATS_64B_POOL_NAME_RX);
1572
1573         /* counter 64b tx direction */
1574         if (tfs->resc.tx.sram_entry[index].stride > 0)
1575                 end = tfs->resc.tx.sram_entry[index].start +
1576                         tfs->resc.tx.sram_entry[index].stride - 1;
1577
1578         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1579                             TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_TX,
1580                             end,
1581                             TF_RSVD_SRAM_COUNTER_64B_TX,
1582                             tfs->TF_SRAM_STATS_64B_POOL_NAME_TX);
1583 }
1584
1585 /**
1586  * Internal function to mark all the nat resources allocated that
1587  * Truflow does not own.
1588  */
1589 static void
1590 tf_rm_rsvd_sram_nat(struct tf_session *tfs)
1591 {
1592         uint32_t index = TF_RESC_TYPE_SRAM_NAT_SPORT;
1593         uint16_t end = 0;
1594
1595         /* nat source port rx direction */
1596         if (tfs->resc.rx.sram_entry[index].stride > 0)
1597                 end = tfs->resc.rx.sram_entry[index].start +
1598                         tfs->resc.rx.sram_entry[index].stride - 1;
1599
1600         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1601                             TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_RX,
1602                             end,
1603                             TF_RSVD_SRAM_NAT_SPORT_RX,
1604                             tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX);
1605
1606         /* nat source port tx direction */
1607         if (tfs->resc.tx.sram_entry[index].stride > 0)
1608                 end = tfs->resc.tx.sram_entry[index].start +
1609                         tfs->resc.tx.sram_entry[index].stride - 1;
1610
1611         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1612                             TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_TX,
1613                             end,
1614                             TF_RSVD_SRAM_NAT_SPORT_TX,
1615                             tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX);
1616
1617         index = TF_RESC_TYPE_SRAM_NAT_DPORT;
1618
1619         /* nat destination port rx direction */
1620         if (tfs->resc.rx.sram_entry[index].stride > 0)
1621                 end = tfs->resc.rx.sram_entry[index].start +
1622                         tfs->resc.rx.sram_entry[index].stride - 1;
1623
1624         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1625                             TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_RX,
1626                             end,
1627                             TF_RSVD_SRAM_NAT_DPORT_RX,
1628                             tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX);
1629
1630         /* nat destination port tx direction */
1631         if (tfs->resc.tx.sram_entry[index].stride > 0)
1632                 end = tfs->resc.tx.sram_entry[index].start +
1633                         tfs->resc.tx.sram_entry[index].stride - 1;
1634
1635         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1636                             TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_TX,
1637                             end,
1638                             TF_RSVD_SRAM_NAT_DPORT_TX,
1639                             tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX);
1640
1641         index = TF_RESC_TYPE_SRAM_NAT_S_IPV4;
1642
1643         /* nat source port ipv4 rx direction */
1644         if (tfs->resc.rx.sram_entry[index].stride > 0)
1645                 end = tfs->resc.rx.sram_entry[index].start +
1646                         tfs->resc.rx.sram_entry[index].stride - 1;
1647
1648         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1649                             TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_RX,
1650                             end,
1651                             TF_RSVD_SRAM_NAT_S_IPV4_RX,
1652                             tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX);
1653
1654         /* nat source ipv4 port tx direction */
1655         if (tfs->resc.tx.sram_entry[index].stride > 0)
1656                 end = tfs->resc.tx.sram_entry[index].start +
1657                         tfs->resc.tx.sram_entry[index].stride - 1;
1658
1659         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1660                             TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_TX,
1661                             end,
1662                             TF_RSVD_SRAM_NAT_S_IPV4_TX,
1663                             tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX);
1664
1665         index = TF_RESC_TYPE_SRAM_NAT_D_IPV4;
1666
1667         /* nat destination port ipv4 rx direction */
1668         if (tfs->resc.rx.sram_entry[index].stride > 0)
1669                 end = tfs->resc.rx.sram_entry[index].start +
1670                         tfs->resc.rx.sram_entry[index].stride - 1;
1671
1672         tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1673                             TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_RX,
1674                             end,
1675                             TF_RSVD_SRAM_NAT_D_IPV4_RX,
1676                             tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX);
1677
1678         /* nat destination ipv4 port tx direction */
1679         if (tfs->resc.tx.sram_entry[index].stride > 0)
1680                 end = tfs->resc.tx.sram_entry[index].start +
1681                         tfs->resc.tx.sram_entry[index].stride - 1;
1682
1683         tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1684                             TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_TX,
1685                             end,
1686                             TF_RSVD_SRAM_NAT_D_IPV4_TX,
1687                             tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX);
1688 }
1689
1690 /**
1691  * Internal function used to validate the HW allocated resources
1692  * against the requested values.
1693  */
1694 static int
1695 tf_rm_hw_alloc_validate(enum tf_dir dir,
1696                         struct tf_rm_hw_alloc *hw_alloc,
1697                         struct tf_rm_entry *hw_entry)
1698 {
1699         int error = 0;
1700         int i;
1701
1702         for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
1703                 if (hw_entry[i].stride != hw_alloc->hw_num[i]) {
1704                         TFP_DRV_LOG(ERR,
1705                                 "%s, Alloc failed id:%d expect:%d got:%d\n",
1706                                 tf_dir_2_str(dir),
1707                                 i,
1708                                 hw_alloc->hw_num[i],
1709                                 hw_entry[i].stride);
1710                         error = -1;
1711                 }
1712         }
1713
1714         return error;
1715 }
1716
1717 /**
1718  * Internal function used to validate the SRAM allocated resources
1719  * against the requested values.
1720  */
1721 static int
1722 tf_rm_sram_alloc_validate(enum tf_dir dir __rte_unused,
1723                           struct tf_rm_sram_alloc *sram_alloc,
1724                           struct tf_rm_entry *sram_entry)
1725 {
1726         int error = 0;
1727         int i;
1728
1729         for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
1730                 if (sram_entry[i].stride != sram_alloc->sram_num[i]) {
1731                         TFP_DRV_LOG(ERR,
1732                                 "%s, Alloc failed idx:%d expect:%d got:%d\n",
1733                                 tf_dir_2_str(dir),
1734                                 i,
1735                                 sram_alloc->sram_num[i],
1736                                 sram_entry[i].stride);
1737                         error = -1;
1738                 }
1739         }
1740
1741         return error;
1742 }
1743
1744 /**
1745  * Internal function used to mark all the HW resources allocated that
1746  * Truflow does not own.
1747  */
1748 static void
1749 tf_rm_reserve_hw(struct tf *tfp)
1750 {
1751         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1752
1753         /* TBD
1754          * There is no direct AFM resource allocation as it is carved
1755          * statically at AFM boot time. Thus the bit allocators work
1756          * on the full HW resource amount and we just mark everything
1757          * used except the resources that Truflow took ownership off.
1758          */
1759         tf_rm_rsvd_l2_ctxt(tfs);
1760         tf_rm_rsvd_prof(tfs);
1761         tf_rm_rsvd_em_prof(tfs);
1762         tf_rm_rsvd_wc(tfs);
1763         tf_rm_rsvd_mirror(tfs);
1764         tf_rm_rsvd_meter(tfs);
1765         tf_rm_rsvd_upar(tfs);
1766         tf_rm_rsvd_sp_tcam(tfs);
1767         tf_rm_rsvd_l2_func(tfs);
1768         tf_rm_rsvd_fkb(tfs);
1769         tf_rm_rsvd_tbl_scope(tfs);
1770         tf_rm_rsvd_epoch(tfs);
1771         tf_rm_rsvd_metadata(tfs);
1772         tf_rm_rsvd_ct_state(tfs);
1773         tf_rm_rsvd_range(tfs);
1774         tf_rm_rsvd_lag_entry(tfs);
1775 }
1776
1777 /**
1778  * Internal function used to mark all the SRAM resources allocated
1779  * that Truflow does not own.
1780  */
1781 static void
1782 tf_rm_reserve_sram(struct tf *tfp)
1783 {
1784         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1785
1786         /* TBD
1787          * There is no direct AFM resource allocation as it is carved
1788          * statically at AFM boot time. Thus the bit allocators work
1789          * on the full HW resource amount and we just mark everything
1790          * used except the resources that Truflow took ownership off.
1791          */
1792         tf_rm_rsvd_sram_full_action(tfs);
1793         tf_rm_rsvd_sram_mcg(tfs);
1794         tf_rm_rsvd_sram_encap(tfs);
1795         tf_rm_rsvd_sram_sp(tfs);
1796         tf_rm_rsvd_sram_stats(tfs);
1797         tf_rm_rsvd_sram_nat(tfs);
1798 }
1799
1800 /**
1801  * Internal function used to allocate and validate all HW resources.
1802  */
1803 static int
1804 tf_rm_allocate_validate_hw(struct tf *tfp,
1805                            enum tf_dir dir)
1806 {
1807         int rc;
1808         int i;
1809         struct tf_rm_hw_query hw_query;
1810         struct tf_rm_hw_alloc hw_alloc;
1811         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1812         struct tf_rm_entry *hw_entries;
1813         uint32_t error_flag;
1814
1815         if (dir == TF_DIR_RX)
1816                 hw_entries = tfs->resc.rx.hw_entry;
1817         else
1818                 hw_entries = tfs->resc.tx.hw_entry;
1819
1820         /* Query for Session HW Resources */
1821         rc = tf_msg_session_hw_resc_qcaps(tfp, dir, &hw_query);
1822         if (rc) {
1823                 /* Log error */
1824                 TFP_DRV_LOG(ERR,
1825                             "%s, HW qcaps message send failed, rc:%s\n",
1826                             tf_dir_2_str(dir),
1827                             strerror(-rc));
1828                 goto cleanup;
1829         }
1830
1831         rc = tf_rm_check_hw_qcaps_static(&hw_query, dir, &error_flag);
1832         if (rc) {
1833                 /* Log error */
1834                 TFP_DRV_LOG(ERR,
1835                         "%s, HW QCAPS validation failed,"
1836                         "error_flag:0x%x, rc:%s\n",
1837                         tf_dir_2_str(dir),
1838                         error_flag,
1839                         strerror(-rc));
1840                 tf_rm_print_hw_qcaps_error(dir, &hw_query, &error_flag);
1841                 goto cleanup;
1842         }
1843
1844         /* Post process HW capability */
1845         for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++)
1846                 hw_alloc.hw_num[i] = hw_query.hw_query[i].max;
1847
1848         /* Allocate Session HW Resources */
1849         rc = tf_msg_session_hw_resc_alloc(tfp, dir, &hw_alloc, hw_entries);
1850         if (rc) {
1851                 /* Log error */
1852                 TFP_DRV_LOG(ERR,
1853                             "%s, HW alloc message send failed, rc:%s\n",
1854                             tf_dir_2_str(dir),
1855                             strerror(-rc));
1856                 goto cleanup;
1857         }
1858
1859         /* Perform HW allocation validation as its possible the
1860          * resource availability changed between qcaps and alloc
1861          */
1862         rc = tf_rm_hw_alloc_validate(dir, &hw_alloc, hw_entries);
1863         if (rc) {
1864                 /* Log error */
1865                 TFP_DRV_LOG(ERR,
1866                             "%s, HW Resource validation failed, rc:%s\n",
1867                             tf_dir_2_str(dir),
1868                             strerror(-rc));
1869                 goto cleanup;
1870         }
1871
1872         return 0;
1873
1874  cleanup:
1875
1876         return -1;
1877 }
1878
1879 /**
1880  * Internal function used to allocate and validate all SRAM resources.
1881  *
1882  * [in] tfp
1883  *   Pointer to TF handle
1884  *
1885  * [in] dir
1886  *   Receive or transmit direction
1887  *
1888  * Returns:
1889  *   0  - Success
1890  *   -1 - Internal error
1891  */
1892 static int
1893 tf_rm_allocate_validate_sram(struct tf *tfp,
1894                              enum tf_dir dir)
1895 {
1896         int rc;
1897         int i;
1898         struct tf_rm_sram_query sram_query;
1899         struct tf_rm_sram_alloc sram_alloc;
1900         struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1901         struct tf_rm_entry *sram_entries;
1902         uint32_t error_flag;
1903
1904         if (dir == TF_DIR_RX)
1905                 sram_entries = tfs->resc.rx.sram_entry;
1906         else
1907                 sram_entries = tfs->resc.tx.sram_entry;
1908
1909         /* Query for Session SRAM Resources */
1910         rc = tf_msg_session_sram_resc_qcaps(tfp, dir, &sram_query);
1911         if (rc) {
1912                 /* Log error */
1913                 TFP_DRV_LOG(ERR,
1914                             "%s, SRAM qcaps message send failed, rc:%s\n",
1915                             tf_dir_2_str(dir),
1916                             strerror(-rc));
1917                 goto cleanup;
1918         }
1919
1920         rc = tf_rm_check_sram_qcaps_static(&sram_query, dir, &error_flag);
1921         if (rc) {
1922                 /* Log error */
1923                 TFP_DRV_LOG(ERR,
1924                         "%s, SRAM QCAPS validation failed,"
1925                         "error_flag:%x, rc:%s\n",
1926                         tf_dir_2_str(dir),
1927                         error_flag,
1928                         strerror(-rc));
1929                 tf_rm_print_sram_qcaps_error(dir, &sram_query, &error_flag);
1930                 goto cleanup;
1931         }
1932
1933         /* Post process SRAM capability */
1934         for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++)
1935                 sram_alloc.sram_num[i] = sram_query.sram_query[i].max;
1936
1937         /* Allocate Session SRAM Resources */
1938         rc = tf_msg_session_sram_resc_alloc(tfp,
1939                                             dir,
1940                                             &sram_alloc,
1941                                             sram_entries);
1942         if (rc) {
1943                 /* Log error */
1944                 TFP_DRV_LOG(ERR,
1945                             "%s, SRAM alloc message send failed, rc:%s\n",
1946                             tf_dir_2_str(dir),
1947                             strerror(-rc));
1948                 goto cleanup;
1949         }
1950
1951         /* Perform SRAM allocation validation as its possible the
1952          * resource availability changed between qcaps and alloc
1953          */
1954         rc = tf_rm_sram_alloc_validate(dir, &sram_alloc, sram_entries);
1955         if (rc) {
1956                 /* Log error */
1957                 TFP_DRV_LOG(ERR,
1958                             "%s, SRAM Resource allocation validation failed,"
1959                             " rc:%s\n",
1960                             tf_dir_2_str(dir),
1961                             strerror(-rc));
1962                 goto cleanup;
1963         }
1964
1965         return 0;
1966
1967  cleanup:
1968
1969         return -1;
1970 }
1971
1972 /**
1973  * Helper function used to prune a HW resource array to only hold
1974  * elements that needs to be flushed.
1975  *
1976  * [in] tfs
1977  *   Session handle
1978  *
1979  * [in] dir
1980  *   Receive or transmit direction
1981  *
1982  * [in] hw_entries
1983  *   Master HW Resource database
1984  *
1985  * [in/out] flush_entries
1986  *   Pruned HW Resource database of entries to be flushed. This
1987  *   array should be passed in as a complete copy of the master HW
1988  *   Resource database. The outgoing result will be a pruned version
1989  *   based on the result of the requested checking
1990  *
1991  * Returns:
1992  *    0 - Success, no flush required
1993  *    1 - Success, flush required
1994  *   -1 - Internal error
1995  */
1996 static int
1997 tf_rm_hw_to_flush(struct tf_session *tfs,
1998                   enum tf_dir dir,
1999                   struct tf_rm_entry *hw_entries,
2000                   struct tf_rm_entry *flush_entries)
2001 {
2002         int rc;
2003         int flush_rc = 0;
2004         int free_cnt;
2005         struct bitalloc *pool;
2006
2007         /* Check all the hw resource pools and check for left over
2008          * elements. Any found will result in the complete pool of a
2009          * type to get invalidated.
2010          */
2011
2012         TF_RM_GET_POOLS(tfs, dir, &pool,
2013                         TF_L2_CTXT_TCAM_POOL_NAME,
2014                         rc);
2015         if (rc)
2016                 return rc;
2017         free_cnt = ba_free_count(pool);
2018         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride) {
2019                 flush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].start = 0;
2020                 flush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride = 0;
2021         } else {
2022                 flush_rc = 1;
2023         }
2024
2025         TF_RM_GET_POOLS(tfs, dir, &pool,
2026                         TF_PROF_FUNC_POOL_NAME,
2027                         rc);
2028         if (rc)
2029                 return rc;
2030         free_cnt = ba_free_count(pool);
2031         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride) {
2032                 flush_entries[TF_RESC_TYPE_HW_PROF_FUNC].start = 0;
2033                 flush_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride = 0;
2034         } else {
2035                 flush_rc = 1;
2036         }
2037
2038         TF_RM_GET_POOLS(tfs, dir, &pool,
2039                         TF_PROF_TCAM_POOL_NAME,
2040                         rc);
2041         if (rc)
2042                 return rc;
2043         free_cnt = ba_free_count(pool);
2044         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride) {
2045                 flush_entries[TF_RESC_TYPE_HW_PROF_TCAM].start = 0;
2046                 flush_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride = 0;
2047         } else {
2048                 flush_rc = 1;
2049         }
2050
2051         TF_RM_GET_POOLS(tfs, dir, &pool,
2052                         TF_EM_PROF_ID_POOL_NAME,
2053                         rc);
2054         if (rc)
2055                 return rc;
2056         free_cnt = ba_free_count(pool);
2057         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride) {
2058                 flush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].start = 0;
2059                 flush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride = 0;
2060         } else {
2061                 flush_rc = 1;
2062         }
2063
2064         flush_entries[TF_RESC_TYPE_HW_EM_REC].start = 0;
2065         flush_entries[TF_RESC_TYPE_HW_EM_REC].stride = 0;
2066
2067         TF_RM_GET_POOLS(tfs, dir, &pool,
2068                         TF_WC_TCAM_PROF_ID_POOL_NAME,
2069                         rc);
2070         if (rc)
2071                 return rc;
2072         free_cnt = ba_free_count(pool);
2073         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride) {
2074                 flush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].start = 0;
2075                 flush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride = 0;
2076         } else {
2077                 flush_rc = 1;
2078         }
2079
2080         TF_RM_GET_POOLS(tfs, dir, &pool,
2081                         TF_WC_TCAM_POOL_NAME,
2082                         rc);
2083         if (rc)
2084                 return rc;
2085         free_cnt = ba_free_count(pool);
2086         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM].stride) {
2087                 flush_entries[TF_RESC_TYPE_HW_WC_TCAM].start = 0;
2088                 flush_entries[TF_RESC_TYPE_HW_WC_TCAM].stride = 0;
2089         } else {
2090                 flush_rc = 1;
2091         }
2092
2093         TF_RM_GET_POOLS(tfs, dir, &pool,
2094                         TF_METER_PROF_POOL_NAME,
2095                         rc);
2096         if (rc)
2097                 return rc;
2098         free_cnt = ba_free_count(pool);
2099         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_PROF].stride) {
2100                 flush_entries[TF_RESC_TYPE_HW_METER_PROF].start = 0;
2101                 flush_entries[TF_RESC_TYPE_HW_METER_PROF].stride = 0;
2102         } else {
2103                 flush_rc = 1;
2104         }
2105
2106         TF_RM_GET_POOLS(tfs, dir, &pool,
2107                         TF_METER_INST_POOL_NAME,
2108                         rc);
2109         if (rc)
2110                 return rc;
2111         free_cnt = ba_free_count(pool);
2112         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_INST].stride) {
2113                 flush_entries[TF_RESC_TYPE_HW_METER_INST].start = 0;
2114                 flush_entries[TF_RESC_TYPE_HW_METER_INST].stride = 0;
2115         } else {
2116                 flush_rc = 1;
2117         }
2118
2119         TF_RM_GET_POOLS(tfs, dir, &pool,
2120                         TF_MIRROR_POOL_NAME,
2121                         rc);
2122         if (rc)
2123                 return rc;
2124         free_cnt = ba_free_count(pool);
2125         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_MIRROR].stride) {
2126                 flush_entries[TF_RESC_TYPE_HW_MIRROR].start = 0;
2127                 flush_entries[TF_RESC_TYPE_HW_MIRROR].stride = 0;
2128         } else {
2129                 flush_rc = 1;
2130         }
2131
2132         TF_RM_GET_POOLS(tfs, dir, &pool,
2133                         TF_UPAR_POOL_NAME,
2134                         rc);
2135         if (rc)
2136                 return rc;
2137         free_cnt = ba_free_count(pool);
2138         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_UPAR].stride) {
2139                 flush_entries[TF_RESC_TYPE_HW_UPAR].start = 0;
2140                 flush_entries[TF_RESC_TYPE_HW_UPAR].stride = 0;
2141         } else {
2142                 flush_rc = 1;
2143         }
2144
2145         TF_RM_GET_POOLS(tfs, dir, &pool,
2146                         TF_SP_TCAM_POOL_NAME,
2147                         rc);
2148         if (rc)
2149                 return rc;
2150         free_cnt = ba_free_count(pool);
2151         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_SP_TCAM].stride) {
2152                 flush_entries[TF_RESC_TYPE_HW_SP_TCAM].start = 0;
2153                 flush_entries[TF_RESC_TYPE_HW_SP_TCAM].stride = 0;
2154         } else {
2155                 flush_rc = 1;
2156         }
2157
2158         TF_RM_GET_POOLS(tfs, dir, &pool,
2159                         TF_L2_FUNC_POOL_NAME,
2160                         rc);
2161         if (rc)
2162                 return rc;
2163         free_cnt = ba_free_count(pool);
2164         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_FUNC].stride) {
2165                 flush_entries[TF_RESC_TYPE_HW_L2_FUNC].start = 0;
2166                 flush_entries[TF_RESC_TYPE_HW_L2_FUNC].stride = 0;
2167         } else {
2168                 flush_rc = 1;
2169         }
2170
2171         TF_RM_GET_POOLS(tfs, dir, &pool,
2172                         TF_FKB_POOL_NAME,
2173                         rc);
2174         if (rc)
2175                 return rc;
2176         free_cnt = ba_free_count(pool);
2177         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_FKB].stride) {
2178                 flush_entries[TF_RESC_TYPE_HW_FKB].start = 0;
2179                 flush_entries[TF_RESC_TYPE_HW_FKB].stride = 0;
2180         } else {
2181                 flush_rc = 1;
2182         }
2183
2184         TF_RM_GET_POOLS(tfs, dir, &pool,
2185                         TF_TBL_SCOPE_POOL_NAME,
2186                         rc);
2187         if (rc)
2188                 return rc;
2189         free_cnt = ba_free_count(pool);
2190         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride) {
2191                 flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].start = 0;
2192                 flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride = 0;
2193         } else {
2194                 TFP_DRV_LOG(ERR, "%s, TBL_SCOPE free_cnt:%d, entries:%d\n",
2195                             tf_dir_2_str(dir),
2196                             free_cnt,
2197                             hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride);
2198                 flush_rc = 1;
2199         }
2200
2201         TF_RM_GET_POOLS(tfs, dir, &pool,
2202                         TF_EPOCH0_POOL_NAME,
2203                         rc);
2204         if (rc)
2205                 return rc;
2206         free_cnt = ba_free_count(pool);
2207         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH0].stride) {
2208                 flush_entries[TF_RESC_TYPE_HW_EPOCH0].start = 0;
2209                 flush_entries[TF_RESC_TYPE_HW_EPOCH0].stride = 0;
2210         } else {
2211                 flush_rc = 1;
2212         }
2213
2214         TF_RM_GET_POOLS(tfs, dir, &pool,
2215                         TF_EPOCH1_POOL_NAME,
2216                         rc);
2217         if (rc)
2218                 return rc;
2219         free_cnt = ba_free_count(pool);
2220         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH1].stride) {
2221                 flush_entries[TF_RESC_TYPE_HW_EPOCH1].start = 0;
2222                 flush_entries[TF_RESC_TYPE_HW_EPOCH1].stride = 0;
2223         } else {
2224                 flush_rc = 1;
2225         }
2226
2227         TF_RM_GET_POOLS(tfs, dir, &pool,
2228                         TF_METADATA_POOL_NAME,
2229                         rc);
2230         if (rc)
2231                 return rc;
2232         free_cnt = ba_free_count(pool);
2233         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METADATA].stride) {
2234                 flush_entries[TF_RESC_TYPE_HW_METADATA].start = 0;
2235                 flush_entries[TF_RESC_TYPE_HW_METADATA].stride = 0;
2236         } else {
2237                 flush_rc = 1;
2238         }
2239
2240         TF_RM_GET_POOLS(tfs, dir, &pool,
2241                         TF_CT_STATE_POOL_NAME,
2242                         rc);
2243         if (rc)
2244                 return rc;
2245         free_cnt = ba_free_count(pool);
2246         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_CT_STATE].stride) {
2247                 flush_entries[TF_RESC_TYPE_HW_CT_STATE].start = 0;
2248                 flush_entries[TF_RESC_TYPE_HW_CT_STATE].stride = 0;
2249         } else {
2250                 flush_rc = 1;
2251         }
2252
2253         TF_RM_GET_POOLS(tfs, dir, &pool,
2254                         TF_RANGE_PROF_POOL_NAME,
2255                         rc);
2256         if (rc)
2257                 return rc;
2258         free_cnt = ba_free_count(pool);
2259         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride) {
2260                 flush_entries[TF_RESC_TYPE_HW_RANGE_PROF].start = 0;
2261                 flush_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride = 0;
2262         } else {
2263                 flush_rc = 1;
2264         }
2265
2266         TF_RM_GET_POOLS(tfs, dir, &pool,
2267                         TF_RANGE_ENTRY_POOL_NAME,
2268                         rc);
2269         if (rc)
2270                 return rc;
2271         free_cnt = ba_free_count(pool);
2272         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride) {
2273                 flush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].start = 0;
2274                 flush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride = 0;
2275         } else {
2276                 flush_rc = 1;
2277         }
2278
2279         TF_RM_GET_POOLS(tfs, dir, &pool,
2280                         TF_LAG_ENTRY_POOL_NAME,
2281                         rc);
2282         if (rc)
2283                 return rc;
2284         free_cnt = ba_free_count(pool);
2285         if (free_cnt == hw_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride) {
2286                 flush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].start = 0;
2287                 flush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride = 0;
2288         } else {
2289                 flush_rc = 1;
2290         }
2291
2292         return flush_rc;
2293 }
2294
2295 /**
2296  * Helper function used to prune a SRAM resource array to only hold
2297  * elements that needs to be flushed.
2298  *
2299  * [in] tfs
2300  *   Session handle
2301  *
2302  * [in] dir
2303  *   Receive or transmit direction
2304  *
2305  * [in] hw_entries
2306  *   Master SRAM Resource data base
2307  *
2308  * [in/out] flush_entries
2309  *   Pruned SRAM Resource database of entries to be flushed. This
2310  *   array should be passed in as a complete copy of the master SRAM
2311  *   Resource database. The outgoing result will be a pruned version
2312  *   based on the result of the requested checking
2313  *
2314  * Returns:
2315  *    0 - Success, no flush required
2316  *    1 - Success, flush required
2317  *   -1 - Internal error
2318  */
2319 static int
2320 tf_rm_sram_to_flush(struct tf_session *tfs,
2321                     enum tf_dir dir,
2322                     struct tf_rm_entry *sram_entries,
2323                     struct tf_rm_entry *flush_entries)
2324 {
2325         int rc;
2326         int flush_rc = 0;
2327         int free_cnt;
2328         struct bitalloc *pool;
2329
2330         /* Check all the sram resource pools and check for left over
2331          * elements. Any found will result in the complete pool of a
2332          * type to get invalidated.
2333          */
2334
2335         TF_RM_GET_POOLS(tfs, dir, &pool,
2336                         TF_SRAM_FULL_ACTION_POOL_NAME,
2337                         rc);
2338         if (rc)
2339                 return rc;
2340         free_cnt = ba_free_count(pool);
2341         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride) {
2342                 flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].start = 0;
2343                 flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride = 0;
2344         } else {
2345                 flush_rc = 1;
2346         }
2347
2348         /* Only pools for RX direction */
2349         if (dir == TF_DIR_RX) {
2350                 TF_RM_GET_POOLS_RX(tfs, &pool,
2351                                    TF_SRAM_MCG_POOL_NAME);
2352                 if (rc)
2353                         return rc;
2354                 free_cnt = ba_free_count(pool);
2355                 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_MCG].stride) {
2356                         flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0;
2357                         flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0;
2358                 } else {
2359                         flush_rc = 1;
2360                 }
2361         } else {
2362                 /* Always prune TX direction */
2363                 flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0;
2364                 flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0;
2365         }
2366
2367         TF_RM_GET_POOLS(tfs, dir, &pool,
2368                         TF_SRAM_ENCAP_8B_POOL_NAME,
2369                         rc);
2370         if (rc)
2371                 return rc;
2372         free_cnt = ba_free_count(pool);
2373         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride) {
2374                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].start = 0;
2375                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride = 0;
2376         } else {
2377                 flush_rc = 1;
2378         }
2379
2380         TF_RM_GET_POOLS(tfs, dir, &pool,
2381                         TF_SRAM_ENCAP_16B_POOL_NAME,
2382                         rc);
2383         if (rc)
2384                 return rc;
2385         free_cnt = ba_free_count(pool);
2386         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride) {
2387                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].start = 0;
2388                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride = 0;
2389         } else {
2390                 flush_rc = 1;
2391         }
2392
2393         /* Only pools for TX direction */
2394         if (dir == TF_DIR_TX) {
2395                 TF_RM_GET_POOLS_TX(tfs, &pool,
2396                                    TF_SRAM_ENCAP_64B_POOL_NAME);
2397                 if (rc)
2398                         return rc;
2399                 free_cnt = ba_free_count(pool);
2400                 if (free_cnt ==
2401                     sram_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride) {
2402                         flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0;
2403                         flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0;
2404                 } else {
2405                         flush_rc = 1;
2406                 }
2407         } else {
2408                 /* Always prune RX direction */
2409                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0;
2410                 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0;
2411         }
2412
2413         TF_RM_GET_POOLS(tfs, dir, &pool,
2414                         TF_SRAM_SP_SMAC_POOL_NAME,
2415                         rc);
2416         if (rc)
2417                 return rc;
2418         free_cnt = ba_free_count(pool);
2419         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride) {
2420                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].start = 0;
2421                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride = 0;
2422         } else {
2423                 flush_rc = 1;
2424         }
2425
2426         /* Only pools for TX direction */
2427         if (dir == TF_DIR_TX) {
2428                 TF_RM_GET_POOLS_TX(tfs, &pool,
2429                                    TF_SRAM_SP_SMAC_IPV4_POOL_NAME);
2430                 if (rc)
2431                         return rc;
2432                 free_cnt = ba_free_count(pool);
2433                 if (free_cnt ==
2434                     sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride) {
2435                         flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0;
2436                         flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride =
2437                                 0;
2438                 } else {
2439                         flush_rc = 1;
2440                 }
2441         } else {
2442                 /* Always prune RX direction */
2443                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0;
2444                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride = 0;
2445         }
2446
2447         /* Only pools for TX direction */
2448         if (dir == TF_DIR_TX) {
2449                 TF_RM_GET_POOLS_TX(tfs, &pool,
2450                                    TF_SRAM_SP_SMAC_IPV6_POOL_NAME);
2451                 if (rc)
2452                         return rc;
2453                 free_cnt = ba_free_count(pool);
2454                 if (free_cnt ==
2455                     sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride) {
2456                         flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0;
2457                         flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride =
2458                                 0;
2459                 } else {
2460                         flush_rc = 1;
2461                 }
2462         } else {
2463                 /* Always prune RX direction */
2464                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0;
2465                 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride = 0;
2466         }
2467
2468         TF_RM_GET_POOLS(tfs, dir, &pool,
2469                         TF_SRAM_STATS_64B_POOL_NAME,
2470                         rc);
2471         if (rc)
2472                 return rc;
2473         free_cnt = ba_free_count(pool);
2474         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride) {
2475                 flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].start = 0;
2476                 flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride = 0;
2477         } else {
2478                 flush_rc = 1;
2479         }
2480
2481         TF_RM_GET_POOLS(tfs, dir, &pool,
2482                         TF_SRAM_NAT_SPORT_POOL_NAME,
2483                         rc);
2484         if (rc)
2485                 return rc;
2486         free_cnt = ba_free_count(pool);
2487         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride) {
2488                 flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].start = 0;
2489                 flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride = 0;
2490         } else {
2491                 flush_rc = 1;
2492         }
2493
2494         TF_RM_GET_POOLS(tfs, dir, &pool,
2495                         TF_SRAM_NAT_DPORT_POOL_NAME,
2496                         rc);
2497         if (rc)
2498                 return rc;
2499         free_cnt = ba_free_count(pool);
2500         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride) {
2501                 flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].start = 0;
2502                 flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride = 0;
2503         } else {
2504                 flush_rc = 1;
2505         }
2506
2507         TF_RM_GET_POOLS(tfs, dir, &pool,
2508                         TF_SRAM_NAT_S_IPV4_POOL_NAME,
2509                         rc);
2510         if (rc)
2511                 return rc;
2512         free_cnt = ba_free_count(pool);
2513         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride) {
2514                 flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].start = 0;
2515                 flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride = 0;
2516         } else {
2517                 flush_rc = 1;
2518         }
2519
2520         TF_RM_GET_POOLS(tfs, dir, &pool,
2521                         TF_SRAM_NAT_D_IPV4_POOL_NAME,
2522                         rc);
2523         if (rc)
2524                 return rc;
2525         free_cnt = ba_free_count(pool);
2526         if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride) {
2527                 flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].start = 0;
2528                 flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride = 0;
2529         } else {
2530                 flush_rc = 1;
2531         }
2532
2533         return flush_rc;
2534 }
2535
2536 /**
2537  * Helper function used to generate an error log for the HW types that
2538  * needs to be flushed. The types should have been cleaned up ahead of
2539  * invoking tf_close_session.
2540  *
2541  * [in] hw_entries
2542  *   HW Resource database holding elements to be flushed
2543  */
2544 static void
2545 tf_rm_log_hw_flush(enum tf_dir dir,
2546                    struct tf_rm_entry *hw_entries)
2547 {
2548         int i;
2549
2550         /* Walk the hw flush array and log the types that wasn't
2551          * cleaned up.
2552          */
2553         for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
2554                 if (hw_entries[i].stride != 0)
2555                         TFP_DRV_LOG(ERR,
2556                                     "%s, %s was not cleaned up\n",
2557                                     tf_dir_2_str(dir),
2558                                     tf_hcapi_hw_2_str(i));
2559         }
2560 }
2561
2562 /**
2563  * Helper function used to generate an error log for the SRAM types
2564  * that needs to be flushed. The types should have been cleaned up
2565  * ahead of invoking tf_close_session.
2566  *
2567  * [in] sram_entries
2568  *   SRAM Resource database holding elements to be flushed
2569  */
2570 static void
2571 tf_rm_log_sram_flush(enum tf_dir dir,
2572                      struct tf_rm_entry *sram_entries)
2573 {
2574         int i;
2575
2576         /* Walk the sram flush array and log the types that wasn't
2577          * cleaned up.
2578          */
2579         for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
2580                 if (sram_entries[i].stride != 0)
2581                         TFP_DRV_LOG(ERR,
2582                                     "%s, %s was not cleaned up\n",
2583                                     tf_dir_2_str(dir),
2584                                     tf_hcapi_sram_2_str(i));
2585         }
2586 }
2587
2588 void
2589 tf_rm_init(struct tf *tfp __rte_unused)
2590 {
2591         struct tf_session *tfs =
2592                 (struct tf_session *)(tfp->session->core_data);
2593
2594         /* This version is host specific and should be checked against
2595          * when attaching as there is no guarantee that a secondary
2596          * would run from same image version.
2597          */
2598         tfs->ver.major = TF_SESSION_VER_MAJOR;
2599         tfs->ver.minor = TF_SESSION_VER_MINOR;
2600         tfs->ver.update = TF_SESSION_VER_UPDATE;
2601
2602         tfs->session_id.id = 0;
2603         tfs->ref_count = 0;
2604
2605         /* Initialization of Table Scopes */
2606         /* ll_init(&tfs->tbl_scope_ll); */
2607
2608         /* Initialization of HW and SRAM resource DB */
2609         memset(&tfs->resc, 0, sizeof(struct tf_rm_db));
2610
2611         /* Initialization of HW Resource Pools */
2612         ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
2613         ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
2614         ba_init(tfs->TF_PROF_FUNC_POOL_NAME_RX, TF_NUM_PROF_FUNC);
2615         ba_init(tfs->TF_PROF_FUNC_POOL_NAME_TX, TF_NUM_PROF_FUNC);
2616         ba_init(tfs->TF_PROF_TCAM_POOL_NAME_RX, TF_NUM_PROF_TCAM);
2617         ba_init(tfs->TF_PROF_TCAM_POOL_NAME_TX, TF_NUM_PROF_TCAM);
2618         ba_init(tfs->TF_EM_PROF_ID_POOL_NAME_RX, TF_NUM_EM_PROF_ID);
2619         ba_init(tfs->TF_EM_PROF_ID_POOL_NAME_TX, TF_NUM_EM_PROF_ID);
2620
2621         /* TBD, how do we want to handle EM records ?*/
2622         /* EM Records should not be controlled by way of a pool */
2623
2624         ba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX, TF_NUM_WC_PROF_ID);
2625         ba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX, TF_NUM_WC_PROF_ID);
2626         ba_init(tfs->TF_WC_TCAM_POOL_NAME_RX, TF_NUM_WC_TCAM_ROW);
2627         ba_init(tfs->TF_WC_TCAM_POOL_NAME_TX, TF_NUM_WC_TCAM_ROW);
2628         ba_init(tfs->TF_METER_PROF_POOL_NAME_RX, TF_NUM_METER_PROF);
2629         ba_init(tfs->TF_METER_PROF_POOL_NAME_TX, TF_NUM_METER_PROF);
2630         ba_init(tfs->TF_METER_INST_POOL_NAME_RX, TF_NUM_METER);
2631         ba_init(tfs->TF_METER_INST_POOL_NAME_TX, TF_NUM_METER);
2632         ba_init(tfs->TF_MIRROR_POOL_NAME_RX, TF_NUM_MIRROR);
2633         ba_init(tfs->TF_MIRROR_POOL_NAME_TX, TF_NUM_MIRROR);
2634         ba_init(tfs->TF_UPAR_POOL_NAME_RX, TF_NUM_UPAR);
2635         ba_init(tfs->TF_UPAR_POOL_NAME_TX, TF_NUM_UPAR);
2636
2637         ba_init(tfs->TF_SP_TCAM_POOL_NAME_RX, TF_NUM_SP_TCAM);
2638         ba_init(tfs->TF_SP_TCAM_POOL_NAME_TX, TF_NUM_SP_TCAM);
2639
2640         ba_init(tfs->TF_FKB_POOL_NAME_RX, TF_NUM_FKB);
2641         ba_init(tfs->TF_FKB_POOL_NAME_TX, TF_NUM_FKB);
2642
2643         ba_init(tfs->TF_TBL_SCOPE_POOL_NAME_RX, TF_NUM_TBL_SCOPE);
2644         ba_init(tfs->TF_TBL_SCOPE_POOL_NAME_TX, TF_NUM_TBL_SCOPE);
2645         ba_init(tfs->TF_L2_FUNC_POOL_NAME_RX, TF_NUM_L2_FUNC);
2646         ba_init(tfs->TF_L2_FUNC_POOL_NAME_TX, TF_NUM_L2_FUNC);
2647         ba_init(tfs->TF_EPOCH0_POOL_NAME_RX, TF_NUM_EPOCH0);
2648         ba_init(tfs->TF_EPOCH0_POOL_NAME_TX, TF_NUM_EPOCH0);
2649         ba_init(tfs->TF_EPOCH1_POOL_NAME_RX, TF_NUM_EPOCH1);
2650         ba_init(tfs->TF_EPOCH1_POOL_NAME_TX, TF_NUM_EPOCH1);
2651         ba_init(tfs->TF_METADATA_POOL_NAME_RX, TF_NUM_METADATA);
2652         ba_init(tfs->TF_METADATA_POOL_NAME_TX, TF_NUM_METADATA);
2653         ba_init(tfs->TF_CT_STATE_POOL_NAME_RX, TF_NUM_CT_STATE);
2654         ba_init(tfs->TF_CT_STATE_POOL_NAME_TX, TF_NUM_CT_STATE);
2655         ba_init(tfs->TF_RANGE_PROF_POOL_NAME_RX, TF_NUM_RANGE_PROF);
2656         ba_init(tfs->TF_RANGE_PROF_POOL_NAME_TX, TF_NUM_RANGE_PROF);
2657         ba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_RX, TF_NUM_RANGE_ENTRY);
2658         ba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_TX, TF_NUM_RANGE_ENTRY);
2659         ba_init(tfs->TF_LAG_ENTRY_POOL_NAME_RX, TF_NUM_LAG_ENTRY);
2660         ba_init(tfs->TF_LAG_ENTRY_POOL_NAME_TX, TF_NUM_LAG_ENTRY);
2661
2662         /* Initialization of SRAM Resource Pools
2663          * These pools are set to the TFLIB defined MAX sizes not
2664          * AFM's HW max as to limit the memory consumption
2665          */
2666         ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX,
2667                 TF_RSVD_SRAM_FULL_ACTION_RX);
2668         ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX,
2669                 TF_RSVD_SRAM_FULL_ACTION_TX);
2670         /* Only Multicast Group on RX is supported */
2671         ba_init(tfs->TF_SRAM_MCG_POOL_NAME_RX,
2672                 TF_RSVD_SRAM_MCG_RX);
2673         ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX,
2674                 TF_RSVD_SRAM_ENCAP_8B_RX);
2675         ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX,
2676                 TF_RSVD_SRAM_ENCAP_8B_TX);
2677         ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX,
2678                 TF_RSVD_SRAM_ENCAP_16B_RX);
2679         ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX,
2680                 TF_RSVD_SRAM_ENCAP_16B_TX);
2681         /* Only Encap 64B on TX is supported */
2682         ba_init(tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX,
2683                 TF_RSVD_SRAM_ENCAP_64B_TX);
2684         ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX,
2685                 TF_RSVD_SRAM_SP_SMAC_RX);
2686         ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX,
2687                 TF_RSVD_SRAM_SP_SMAC_TX);
2688         /* Only SP SMAC IPv4 on TX is supported */
2689         ba_init(tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX,
2690                 TF_RSVD_SRAM_SP_SMAC_IPV4_TX);
2691         /* Only SP SMAC IPv6 on TX is supported */
2692         ba_init(tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX,
2693                 TF_RSVD_SRAM_SP_SMAC_IPV6_TX);
2694         ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_RX,
2695                 TF_RSVD_SRAM_COUNTER_64B_RX);
2696         ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_TX,
2697                 TF_RSVD_SRAM_COUNTER_64B_TX);
2698         ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX,
2699                 TF_RSVD_SRAM_NAT_SPORT_RX);
2700         ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX,
2701                 TF_RSVD_SRAM_NAT_SPORT_TX);
2702         ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX,
2703                 TF_RSVD_SRAM_NAT_DPORT_RX);
2704         ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX,
2705                 TF_RSVD_SRAM_NAT_DPORT_TX);
2706         ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX,
2707                 TF_RSVD_SRAM_NAT_S_IPV4_RX);
2708         ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX,
2709                 TF_RSVD_SRAM_NAT_S_IPV4_TX);
2710         ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX,
2711                 TF_RSVD_SRAM_NAT_D_IPV4_RX);
2712         ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX,
2713                 TF_RSVD_SRAM_NAT_D_IPV4_TX);
2714
2715         /* Initialization of pools local to TF Core */
2716         ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
2717         ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
2718 }
2719
2720 int
2721 tf_rm_allocate_validate(struct tf *tfp)
2722 {
2723         int rc;
2724         int i;
2725
2726         for (i = 0; i < TF_DIR_MAX; i++) {
2727                 rc = tf_rm_allocate_validate_hw(tfp, i);
2728                 if (rc)
2729                         return rc;
2730                 rc = tf_rm_allocate_validate_sram(tfp, i);
2731                 if (rc)
2732                         return rc;
2733         }
2734
2735         /* With both HW and SRAM allocated and validated we can
2736          * 'scrub' the reservation on the pools.
2737          */
2738         tf_rm_reserve_hw(tfp);
2739         tf_rm_reserve_sram(tfp);
2740
2741         return rc;
2742 }
2743
2744 int
2745 tf_rm_close(struct tf *tfp)
2746 {
2747         int rc;
2748         int rc_close = 0;
2749         int i;
2750         struct tf_rm_entry *hw_entries;
2751         struct tf_rm_entry *hw_flush_entries;
2752         struct tf_rm_entry *sram_entries;
2753         struct tf_rm_entry *sram_flush_entries;
2754         struct tf_session *tfs __rte_unused =
2755                 (struct tf_session *)(tfp->session->core_data);
2756
2757         struct tf_rm_db flush_resc = tfs->resc;
2758
2759         /* On close it is assumed that the session has already cleaned
2760          * up all its resources, individually, while destroying its
2761          * flows. No checking is performed thus the behavior is as
2762          * follows.
2763          *
2764          * Session RM will signal FW to release session resources. FW
2765          * will perform invalidation of all the allocated entries
2766          * (assures any outstanding resources has been cleared, then
2767          * free the FW RM instance.
2768          *
2769          * Session will then be freed by tf_close_session() thus there
2770          * is no need to clean each resource pool as the whole session
2771          * is going away.
2772          */
2773
2774         for (i = 0; i < TF_DIR_MAX; i++) {
2775                 if (i == TF_DIR_RX) {
2776                         hw_entries = tfs->resc.rx.hw_entry;
2777                         hw_flush_entries = flush_resc.rx.hw_entry;
2778                         sram_entries = tfs->resc.rx.sram_entry;
2779                         sram_flush_entries = flush_resc.rx.sram_entry;
2780                 } else {
2781                         hw_entries = tfs->resc.tx.hw_entry;
2782                         hw_flush_entries = flush_resc.tx.hw_entry;
2783                         sram_entries = tfs->resc.tx.sram_entry;
2784                         sram_flush_entries = flush_resc.tx.sram_entry;
2785                 }
2786
2787                 /* Check for any not previously freed HW resources and
2788                  * flush if required.
2789                  */
2790                 rc = tf_rm_hw_to_flush(tfs, i, hw_entries, hw_flush_entries);
2791                 if (rc) {
2792                         rc_close = -ENOTEMPTY;
2793                         /* Log error */
2794                         TFP_DRV_LOG(ERR,
2795                                     "%s, lingering HW resources, rc:%s\n",
2796                                     tf_dir_2_str(i),
2797                                     strerror(-rc));
2798
2799                         /* Log the entries to be flushed */
2800                         tf_rm_log_hw_flush(i, hw_flush_entries);
2801                         rc = tf_msg_session_hw_resc_flush(tfp,
2802                                                           i,
2803                                                           hw_flush_entries);
2804                         if (rc) {
2805                                 rc_close = rc;
2806                                 /* Log error */
2807                                 TFP_DRV_LOG(ERR,
2808                                             "%s, HW flush failed, rc:%s\n",
2809                                             tf_dir_2_str(i),
2810                                             strerror(-rc));
2811                         }
2812                 }
2813
2814                 /* Check for any not previously freed SRAM resources
2815                  * and flush if required.
2816                  */
2817                 rc = tf_rm_sram_to_flush(tfs,
2818                                          i,
2819                                          sram_entries,
2820                                          sram_flush_entries);
2821                 if (rc) {
2822                         rc_close = -ENOTEMPTY;
2823                         /* Log error */
2824                         TFP_DRV_LOG(ERR,
2825                                     "%s, lingering SRAM resources, rc:%s\n",
2826                                     tf_dir_2_str(i),
2827                                     strerror(-rc));
2828
2829                         /* Log the entries to be flushed */
2830                         tf_rm_log_sram_flush(i, sram_flush_entries);
2831
2832                         rc = tf_msg_session_sram_resc_flush(tfp,
2833                                                             i,
2834                                                             sram_flush_entries);
2835                         if (rc) {
2836                                 rc_close = rc;
2837                                 /* Log error */
2838                                 TFP_DRV_LOG(ERR,
2839                                             "%s, HW flush failed, rc:%s\n",
2840                                             tf_dir_2_str(i),
2841                                             strerror(-rc));
2842                         }
2843                 }
2844
2845                 rc = tf_msg_session_hw_resc_free(tfp, i, hw_entries);
2846                 if (rc) {
2847                         rc_close = rc;
2848                         /* Log error */
2849                         TFP_DRV_LOG(ERR,
2850                                     "%s, HW free failed, rc:%s\n",
2851                                     tf_dir_2_str(i),
2852                                     strerror(-rc));
2853                 }
2854
2855                 rc = tf_msg_session_sram_resc_free(tfp, i, sram_entries);
2856                 if (rc) {
2857                         rc_close = rc;
2858                         /* Log error */
2859                         TFP_DRV_LOG(ERR,
2860                                     "%s, SRAM free failed, rc:%s\n",
2861                                     tf_dir_2_str(i),
2862                                     strerror(-rc));
2863                 }
2864         }
2865
2866         return rc_close;
2867 }
2868
2869 #if (TF_SHADOW == 1)
2870 int
2871 tf_rm_shadow_db_init(struct tf_session *tfs)
2872 {
2873         rc = 1;
2874
2875         return rc;
2876 }
2877 #endif /* TF_SHADOW */
2878
2879 int
2880 tf_rm_lookup_tcam_type_pool(struct tf_session *tfs,
2881                             enum tf_dir dir,
2882                             enum tf_tcam_tbl_type type,
2883                             struct bitalloc **pool)
2884 {
2885         int rc = -EOPNOTSUPP;
2886
2887         *pool = NULL;
2888
2889         switch (type) {
2890         case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
2891                 TF_RM_GET_POOLS(tfs, dir, pool,
2892                                 TF_L2_CTXT_TCAM_POOL_NAME,
2893                                 rc);
2894                 break;
2895         case TF_TCAM_TBL_TYPE_PROF_TCAM:
2896                 TF_RM_GET_POOLS(tfs, dir, pool,
2897                                 TF_PROF_TCAM_POOL_NAME,
2898                                 rc);
2899                 break;
2900         case TF_TCAM_TBL_TYPE_WC_TCAM:
2901                 TF_RM_GET_POOLS(tfs, dir, pool,
2902                                 TF_WC_TCAM_POOL_NAME,
2903                                 rc);
2904                 break;
2905         case TF_TCAM_TBL_TYPE_VEB_TCAM:
2906         case TF_TCAM_TBL_TYPE_SP_TCAM:
2907         case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
2908         default:
2909                 break;
2910         }
2911
2912         if (rc == -EOPNOTSUPP) {
2913                 TFP_DRV_LOG(ERR,
2914                             "%s, Tcam type not supported, type:%d\n",
2915                             tf_dir_2_str(dir),
2916                             type);
2917                 return rc;
2918         } else if (rc == -1) {
2919                 TFP_DRV_LOG(ERR,
2920                             "%s, Tcam type lookup failed, type:%d\n",
2921                             tf_dir_2_str(dir),
2922                             type);
2923                 return rc;
2924         }
2925
2926         return 0;
2927 }
2928
2929 int
2930 tf_rm_lookup_tbl_type_pool(struct tf_session *tfs,
2931                            enum tf_dir dir,
2932                            enum tf_tbl_type type,
2933                            struct bitalloc **pool)
2934 {
2935         int rc = -EOPNOTSUPP;
2936
2937         *pool = NULL;
2938
2939         switch (type) {
2940         case TF_TBL_TYPE_FULL_ACT_RECORD:
2941                 TF_RM_GET_POOLS(tfs, dir, pool,
2942                                 TF_SRAM_FULL_ACTION_POOL_NAME,
2943                                 rc);
2944                 break;
2945         case TF_TBL_TYPE_MCAST_GROUPS:
2946                 /* No pools for TX direction, so bail out */
2947                 if (dir == TF_DIR_TX)
2948                         break;
2949                 TF_RM_GET_POOLS_RX(tfs, pool,
2950                                    TF_SRAM_MCG_POOL_NAME);
2951                 rc = 0;
2952                 break;
2953         case TF_TBL_TYPE_ACT_ENCAP_8B:
2954                 TF_RM_GET_POOLS(tfs, dir, pool,
2955                                 TF_SRAM_ENCAP_8B_POOL_NAME,
2956                                 rc);
2957                 break;
2958         case TF_TBL_TYPE_ACT_ENCAP_16B:
2959                 TF_RM_GET_POOLS(tfs, dir, pool,
2960                                 TF_SRAM_ENCAP_16B_POOL_NAME,
2961                                 rc);
2962                 break;
2963         case TF_TBL_TYPE_ACT_ENCAP_64B:
2964                 /* No pools for RX direction, so bail out */
2965                 if (dir == TF_DIR_RX)
2966                         break;
2967                 TF_RM_GET_POOLS_TX(tfs, pool,
2968                                    TF_SRAM_ENCAP_64B_POOL_NAME);
2969                 rc = 0;
2970                 break;
2971         case TF_TBL_TYPE_ACT_SP_SMAC:
2972                 TF_RM_GET_POOLS(tfs, dir, pool,
2973                                 TF_SRAM_SP_SMAC_POOL_NAME,
2974                                 rc);
2975                 break;
2976         case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
2977                 /* No pools for TX direction, so bail out */
2978                 if (dir == TF_DIR_RX)
2979                         break;
2980                 TF_RM_GET_POOLS_TX(tfs, pool,
2981                                    TF_SRAM_SP_SMAC_IPV4_POOL_NAME);
2982                 rc = 0;
2983                 break;
2984         case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
2985                 /* No pools for TX direction, so bail out */
2986                 if (dir == TF_DIR_RX)
2987                         break;
2988                 TF_RM_GET_POOLS_TX(tfs, pool,
2989                                    TF_SRAM_SP_SMAC_IPV6_POOL_NAME);
2990                 rc = 0;
2991                 break;
2992         case TF_TBL_TYPE_ACT_STATS_64:
2993                 TF_RM_GET_POOLS(tfs, dir, pool,
2994                                 TF_SRAM_STATS_64B_POOL_NAME,
2995                                 rc);
2996                 break;
2997         case TF_TBL_TYPE_ACT_MODIFY_SPORT:
2998                 TF_RM_GET_POOLS(tfs, dir, pool,
2999                                 TF_SRAM_NAT_SPORT_POOL_NAME,
3000                                 rc);
3001                 break;
3002         case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
3003                 TF_RM_GET_POOLS(tfs, dir, pool,
3004                                 TF_SRAM_NAT_S_IPV4_POOL_NAME,
3005                                 rc);
3006                 break;
3007         case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
3008                 TF_RM_GET_POOLS(tfs, dir, pool,
3009                                 TF_SRAM_NAT_D_IPV4_POOL_NAME,
3010                                 rc);
3011                 break;
3012         case TF_TBL_TYPE_METER_PROF:
3013                 TF_RM_GET_POOLS(tfs, dir, pool,
3014                                 TF_METER_PROF_POOL_NAME,
3015                                 rc);
3016                 break;
3017         case TF_TBL_TYPE_METER_INST:
3018                 TF_RM_GET_POOLS(tfs, dir, pool,
3019                                 TF_METER_INST_POOL_NAME,
3020                                 rc);
3021                 break;
3022         case TF_TBL_TYPE_MIRROR_CONFIG:
3023                 TF_RM_GET_POOLS(tfs, dir, pool,
3024                                 TF_MIRROR_POOL_NAME,
3025                                 rc);
3026                 break;
3027         case TF_TBL_TYPE_UPAR:
3028                 TF_RM_GET_POOLS(tfs, dir, pool,
3029                                 TF_UPAR_POOL_NAME,
3030                                 rc);
3031                 break;
3032         case TF_TBL_TYPE_EPOCH0:
3033                 TF_RM_GET_POOLS(tfs, dir, pool,
3034                                 TF_EPOCH0_POOL_NAME,
3035                                 rc);
3036                 break;
3037         case TF_TBL_TYPE_EPOCH1:
3038                 TF_RM_GET_POOLS(tfs, dir, pool,
3039                                 TF_EPOCH1_POOL_NAME,
3040                                 rc);
3041                 break;
3042         case TF_TBL_TYPE_METADATA:
3043                 TF_RM_GET_POOLS(tfs, dir, pool,
3044                                 TF_METADATA_POOL_NAME,
3045                                 rc);
3046                 break;
3047         case TF_TBL_TYPE_CT_STATE:
3048                 TF_RM_GET_POOLS(tfs, dir, pool,
3049                                 TF_CT_STATE_POOL_NAME,
3050                                 rc);
3051                 break;
3052         case TF_TBL_TYPE_RANGE_PROF:
3053                 TF_RM_GET_POOLS(tfs, dir, pool,
3054                                 TF_RANGE_PROF_POOL_NAME,
3055                                 rc);
3056                 break;
3057         case TF_TBL_TYPE_RANGE_ENTRY:
3058                 TF_RM_GET_POOLS(tfs, dir, pool,
3059                                 TF_RANGE_ENTRY_POOL_NAME,
3060                                 rc);
3061                 break;
3062         case TF_TBL_TYPE_LAG:
3063                 TF_RM_GET_POOLS(tfs, dir, pool,
3064                                 TF_LAG_ENTRY_POOL_NAME,
3065                                 rc);
3066                 break;
3067         /* Not yet supported */
3068         case TF_TBL_TYPE_ACT_ENCAP_32B:
3069         case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST:
3070         case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC:
3071         case TF_TBL_TYPE_VNIC_SVIF:
3072                 break;
3073         /* No bitalloc pools for these types */
3074         case TF_TBL_TYPE_EXT:
3075         default:
3076                 break;
3077         }
3078
3079         if (rc == -EOPNOTSUPP) {
3080                 TFP_DRV_LOG(ERR,
3081                             "%s, Table type not supported, type:%d\n",
3082                             tf_dir_2_str(dir),
3083                             type);
3084                 return rc;
3085         } else if (rc == -1) {
3086                 TFP_DRV_LOG(ERR,
3087                             "%s, Table type lookup failed, type:%d\n",
3088                             tf_dir_2_str(dir),
3089                             type);
3090                 return rc;
3091         }
3092
3093         return 0;
3094 }
3095
3096 int
3097 tf_rm_convert_tbl_type(enum tf_tbl_type type,
3098                        uint32_t *hcapi_type)
3099 {
3100         int rc = 0;
3101
3102         switch (type) {
3103         case TF_TBL_TYPE_FULL_ACT_RECORD:
3104                 *hcapi_type = TF_RESC_TYPE_SRAM_FULL_ACTION;
3105                 break;
3106         case TF_TBL_TYPE_MCAST_GROUPS:
3107                 *hcapi_type = TF_RESC_TYPE_SRAM_MCG;
3108                 break;
3109         case TF_TBL_TYPE_ACT_ENCAP_8B:
3110                 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_8B;
3111                 break;
3112         case TF_TBL_TYPE_ACT_ENCAP_16B:
3113                 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_16B;
3114                 break;
3115         case TF_TBL_TYPE_ACT_ENCAP_64B:
3116                 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_64B;
3117                 break;
3118         case TF_TBL_TYPE_ACT_SP_SMAC:
3119                 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC;
3120                 break;
3121         case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
3122                 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4;
3123                 break;
3124         case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
3125                 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6;
3126                 break;
3127         case TF_TBL_TYPE_ACT_STATS_64:
3128                 *hcapi_type = TF_RESC_TYPE_SRAM_COUNTER_64B;
3129                 break;
3130         case TF_TBL_TYPE_ACT_MODIFY_SPORT:
3131                 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_SPORT;
3132                 break;
3133         case TF_TBL_TYPE_ACT_MODIFY_DPORT:
3134                 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_DPORT;
3135                 break;
3136         case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
3137                 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_S_IPV4;
3138                 break;
3139         case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
3140                 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_D_IPV4;
3141                 break;
3142         case TF_TBL_TYPE_METER_PROF:
3143                 *hcapi_type = TF_RESC_TYPE_HW_METER_PROF;
3144                 break;
3145         case TF_TBL_TYPE_METER_INST:
3146                 *hcapi_type = TF_RESC_TYPE_HW_METER_INST;
3147                 break;
3148         case TF_TBL_TYPE_MIRROR_CONFIG:
3149                 *hcapi_type = TF_RESC_TYPE_HW_MIRROR;
3150                 break;
3151         case TF_TBL_TYPE_UPAR:
3152                 *hcapi_type = TF_RESC_TYPE_HW_UPAR;
3153                 break;
3154         case TF_TBL_TYPE_EPOCH0:
3155                 *hcapi_type = TF_RESC_TYPE_HW_EPOCH0;
3156                 break;
3157         case TF_TBL_TYPE_EPOCH1:
3158                 *hcapi_type = TF_RESC_TYPE_HW_EPOCH1;
3159                 break;
3160         case TF_TBL_TYPE_METADATA:
3161                 *hcapi_type = TF_RESC_TYPE_HW_METADATA;
3162                 break;
3163         case TF_TBL_TYPE_CT_STATE:
3164                 *hcapi_type = TF_RESC_TYPE_HW_CT_STATE;
3165                 break;
3166         case TF_TBL_TYPE_RANGE_PROF:
3167                 *hcapi_type = TF_RESC_TYPE_HW_RANGE_PROF;
3168                 break;
3169         case TF_TBL_TYPE_RANGE_ENTRY:
3170                 *hcapi_type = TF_RESC_TYPE_HW_RANGE_ENTRY;
3171                 break;
3172         case TF_TBL_TYPE_LAG:
3173                 *hcapi_type = TF_RESC_TYPE_HW_LAG_ENTRY;
3174                 break;
3175         /* Not yet supported */
3176         case TF_TBL_TYPE_ACT_ENCAP_32B:
3177         case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST:
3178         case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC:
3179         case TF_TBL_TYPE_VNIC_SVIF:
3180         case TF_TBL_TYPE_EXT:   /* No pools for this type */
3181         default:
3182                 *hcapi_type = -1;
3183                 rc = -EOPNOTSUPP;
3184         }
3185
3186         return rc;
3187 }
3188
3189 int
3190 tf_rm_convert_index(struct tf_session *tfs,
3191                     enum tf_dir dir,
3192                     enum tf_tbl_type type,
3193                     enum tf_rm_convert_type c_type,
3194                     uint32_t index,
3195                     uint32_t *convert_index)
3196 {
3197         int rc;
3198         struct tf_rm_resc *resc;
3199         uint32_t hcapi_type;
3200         uint32_t base_index;
3201
3202         if (dir == TF_DIR_RX)
3203                 resc = &tfs->resc.rx;
3204         else if (dir == TF_DIR_TX)
3205                 resc = &tfs->resc.tx;
3206         else
3207                 return -EOPNOTSUPP;
3208
3209         rc = tf_rm_convert_tbl_type(type, &hcapi_type);
3210         if (rc)
3211                 return -1;
3212
3213         switch (type) {
3214         case TF_TBL_TYPE_FULL_ACT_RECORD:
3215         case TF_TBL_TYPE_MCAST_GROUPS:
3216         case TF_TBL_TYPE_ACT_ENCAP_8B:
3217         case TF_TBL_TYPE_ACT_ENCAP_16B:
3218         case TF_TBL_TYPE_ACT_ENCAP_32B:
3219         case TF_TBL_TYPE_ACT_ENCAP_64B:
3220         case TF_TBL_TYPE_ACT_SP_SMAC:
3221         case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
3222         case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
3223         case TF_TBL_TYPE_ACT_STATS_64:
3224         case TF_TBL_TYPE_ACT_MODIFY_SPORT:
3225         case TF_TBL_TYPE_ACT_MODIFY_DPORT:
3226         case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
3227         case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
3228                 base_index = resc->sram_entry[hcapi_type].start;
3229                 break;
3230         case TF_TBL_TYPE_MIRROR_CONFIG:
3231         case TF_TBL_TYPE_METER_PROF:
3232         case TF_TBL_TYPE_METER_INST:
3233         case TF_TBL_TYPE_UPAR:
3234         case TF_TBL_TYPE_EPOCH0:
3235         case TF_TBL_TYPE_EPOCH1:
3236         case TF_TBL_TYPE_METADATA:
3237         case TF_TBL_TYPE_CT_STATE:
3238         case TF_TBL_TYPE_RANGE_PROF:
3239         case TF_TBL_TYPE_RANGE_ENTRY:
3240         case TF_TBL_TYPE_LAG:
3241                 base_index = resc->hw_entry[hcapi_type].start;
3242                 break;
3243         /* Not yet supported */
3244         case TF_TBL_TYPE_VNIC_SVIF:
3245         case TF_TBL_TYPE_EXT:   /* No pools for this type */
3246         default:
3247                 return -EOPNOTSUPP;
3248         }
3249
3250         switch (c_type) {
3251         case TF_RM_CONVERT_RM_BASE:
3252                 *convert_index = index - base_index;
3253                 break;
3254         case TF_RM_CONVERT_ADD_BASE:
3255                 *convert_index = index + base_index;
3256                 break;
3257         default:
3258                 return -EOPNOTSUPP;
3259         }
3260
3261         return 0;
3262 }