1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
8 #include <rte_common.h>
13 #include "tf_session.h"
14 #include "tf_resources.h"
19 * Internal macro to perform HW resource allocation check between what
20 * firmware reports vs what was statically requested.
23 * struct tf_rm_hw_query *hquery - Pointer to the hw query result
24 * enum tf_dir dir - Direction to process
25 * enum tf_resource_type_hw hcapi_type - HCAPI type, the index element
26 * in the hw query structure
27 * define def_value - Define value to check against
28 * uint32_t *eflag - Result of the check
30 #define TF_RM_CHECK_HW_ALLOC(hquery, dir, hcapi_type, def_value, eflag) do { \
31 if ((dir) == TF_DIR_RX) { \
32 if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _RX) \
33 *(eflag) |= 1 << (hcapi_type); \
35 if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _TX) \
36 *(eflag) |= 1 << (hcapi_type); \
41 * Internal macro to perform HW resource allocation check between what
42 * firmware reports vs what was statically requested.
45 * struct tf_rm_sram_query *squery - Pointer to the sram query result
46 * enum tf_dir dir - Direction to process
47 * enum tf_resource_type_sram hcapi_type - HCAPI type, the index element
48 * in the hw query structure
49 * define def_value - Define value to check against
50 * uint32_t *eflag - Result of the check
52 #define TF_RM_CHECK_SRAM_ALLOC(squery, dir, hcapi_type, def_value, eflag) do { \
53 if ((dir) == TF_DIR_RX) { \
54 if ((squery)->sram_query[(hcapi_type)].max != def_value ## _RX)\
55 *(eflag) |= 1 << (hcapi_type); \
57 if ((squery)->sram_query[(hcapi_type)].max != def_value ## _TX)\
58 *(eflag) |= 1 << (hcapi_type); \
63 * Internal macro to convert a reserved resource define name to be
67 * enum tf_dir dir - Direction to process
68 * string type - Type name to append RX or TX to
69 * string dtype - Direction specific type
73 #define TF_RESC_RSVD(dir, type, dtype) do { \
74 if ((dir) == TF_DIR_RX) \
75 (dtype) = type ## _RX; \
77 (dtype) = type ## _TX; \
81 *tf_hcapi_hw_2_str(enum tf_resource_type_hw hw_type)
84 case TF_RESC_TYPE_HW_L2_CTXT_TCAM:
85 return "L2 ctxt tcam";
86 case TF_RESC_TYPE_HW_PROF_FUNC:
87 return "Profile Func";
88 case TF_RESC_TYPE_HW_PROF_TCAM:
89 return "Profile tcam";
90 case TF_RESC_TYPE_HW_EM_PROF_ID:
91 return "EM profile id";
92 case TF_RESC_TYPE_HW_EM_REC:
94 case TF_RESC_TYPE_HW_WC_TCAM_PROF_ID:
95 return "WC tcam profile id";
96 case TF_RESC_TYPE_HW_WC_TCAM:
98 case TF_RESC_TYPE_HW_METER_PROF:
99 return "Meter profile";
100 case TF_RESC_TYPE_HW_METER_INST:
101 return "Meter instance";
102 case TF_RESC_TYPE_HW_MIRROR:
104 case TF_RESC_TYPE_HW_UPAR:
106 case TF_RESC_TYPE_HW_SP_TCAM:
107 return "Source properties tcam";
108 case TF_RESC_TYPE_HW_L2_FUNC:
109 return "L2 Function";
110 case TF_RESC_TYPE_HW_FKB:
112 case TF_RESC_TYPE_HW_TBL_SCOPE:
113 return "Table scope";
114 case TF_RESC_TYPE_HW_EPOCH0:
116 case TF_RESC_TYPE_HW_EPOCH1:
118 case TF_RESC_TYPE_HW_METADATA:
120 case TF_RESC_TYPE_HW_CT_STATE:
121 return "Connection tracking state";
122 case TF_RESC_TYPE_HW_RANGE_PROF:
123 return "Range profile";
124 case TF_RESC_TYPE_HW_RANGE_ENTRY:
125 return "Range entry";
126 case TF_RESC_TYPE_HW_LAG_ENTRY:
129 return "Invalid identifier";
134 *tf_hcapi_sram_2_str(enum tf_resource_type_sram sram_type)
137 case TF_RESC_TYPE_SRAM_FULL_ACTION:
138 return "Full action";
139 case TF_RESC_TYPE_SRAM_MCG:
141 case TF_RESC_TYPE_SRAM_ENCAP_8B:
143 case TF_RESC_TYPE_SRAM_ENCAP_16B:
145 case TF_RESC_TYPE_SRAM_ENCAP_64B:
147 case TF_RESC_TYPE_SRAM_SP_SMAC:
148 return "Source properties SMAC";
149 case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4:
150 return "Source properties SMAC IPv4";
151 case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6:
152 return "Source properties IPv6";
153 case TF_RESC_TYPE_SRAM_COUNTER_64B:
154 return "Counter 64B";
155 case TF_RESC_TYPE_SRAM_NAT_SPORT:
156 return "NAT source port";
157 case TF_RESC_TYPE_SRAM_NAT_DPORT:
158 return "NAT destination port";
159 case TF_RESC_TYPE_SRAM_NAT_S_IPV4:
160 return "NAT source IPv4";
161 case TF_RESC_TYPE_SRAM_NAT_D_IPV4:
162 return "NAT destination IPv4";
164 return "Invalid identifier";
169 * Helper function to perform a HW HCAPI resource type lookup against
170 * the reserved value of the same static type.
173 * -EOPNOTSUPP - Reserved resource type not supported
174 * Value - Integer value of the reserved value for the requested type
177 tf_rm_rsvd_hw_value(enum tf_dir dir, enum tf_resource_type_hw index)
179 uint32_t value = -EOPNOTSUPP;
182 case TF_RESC_TYPE_HW_L2_CTXT_TCAM:
183 TF_RESC_RSVD(dir, TF_RSVD_L2_CTXT_TCAM, value);
185 case TF_RESC_TYPE_HW_PROF_FUNC:
186 TF_RESC_RSVD(dir, TF_RSVD_PROF_FUNC, value);
188 case TF_RESC_TYPE_HW_PROF_TCAM:
189 TF_RESC_RSVD(dir, TF_RSVD_PROF_TCAM, value);
191 case TF_RESC_TYPE_HW_EM_PROF_ID:
192 TF_RESC_RSVD(dir, TF_RSVD_EM_PROF_ID, value);
194 case TF_RESC_TYPE_HW_EM_REC:
195 TF_RESC_RSVD(dir, TF_RSVD_EM_REC, value);
197 case TF_RESC_TYPE_HW_WC_TCAM_PROF_ID:
198 TF_RESC_RSVD(dir, TF_RSVD_WC_TCAM_PROF_ID, value);
200 case TF_RESC_TYPE_HW_WC_TCAM:
201 TF_RESC_RSVD(dir, TF_RSVD_WC_TCAM, value);
203 case TF_RESC_TYPE_HW_METER_PROF:
204 TF_RESC_RSVD(dir, TF_RSVD_METER_PROF, value);
206 case TF_RESC_TYPE_HW_METER_INST:
207 TF_RESC_RSVD(dir, TF_RSVD_METER_INST, value);
209 case TF_RESC_TYPE_HW_MIRROR:
210 TF_RESC_RSVD(dir, TF_RSVD_MIRROR, value);
212 case TF_RESC_TYPE_HW_UPAR:
213 TF_RESC_RSVD(dir, TF_RSVD_UPAR, value);
215 case TF_RESC_TYPE_HW_SP_TCAM:
216 TF_RESC_RSVD(dir, TF_RSVD_SP_TCAM, value);
218 case TF_RESC_TYPE_HW_L2_FUNC:
219 TF_RESC_RSVD(dir, TF_RSVD_L2_FUNC, value);
221 case TF_RESC_TYPE_HW_FKB:
222 TF_RESC_RSVD(dir, TF_RSVD_FKB, value);
224 case TF_RESC_TYPE_HW_TBL_SCOPE:
225 TF_RESC_RSVD(dir, TF_RSVD_TBL_SCOPE, value);
227 case TF_RESC_TYPE_HW_EPOCH0:
228 TF_RESC_RSVD(dir, TF_RSVD_EPOCH0, value);
230 case TF_RESC_TYPE_HW_EPOCH1:
231 TF_RESC_RSVD(dir, TF_RSVD_EPOCH1, value);
233 case TF_RESC_TYPE_HW_METADATA:
234 TF_RESC_RSVD(dir, TF_RSVD_METADATA, value);
236 case TF_RESC_TYPE_HW_CT_STATE:
237 TF_RESC_RSVD(dir, TF_RSVD_CT_STATE, value);
239 case TF_RESC_TYPE_HW_RANGE_PROF:
240 TF_RESC_RSVD(dir, TF_RSVD_RANGE_PROF, value);
242 case TF_RESC_TYPE_HW_RANGE_ENTRY:
243 TF_RESC_RSVD(dir, TF_RSVD_RANGE_ENTRY, value);
245 case TF_RESC_TYPE_HW_LAG_ENTRY:
246 TF_RESC_RSVD(dir, TF_RSVD_LAG_ENTRY, value);
256 * Helper function to perform a SRAM HCAPI resource type lookup
257 * against the reserved value of the same static type.
260 * -EOPNOTSUPP - Reserved resource type not supported
261 * Value - Integer value of the reserved value for the requested type
264 tf_rm_rsvd_sram_value(enum tf_dir dir, enum tf_resource_type_sram index)
266 uint32_t value = -EOPNOTSUPP;
269 case TF_RESC_TYPE_SRAM_FULL_ACTION:
270 TF_RESC_RSVD(dir, TF_RSVD_SRAM_FULL_ACTION, value);
272 case TF_RESC_TYPE_SRAM_MCG:
273 TF_RESC_RSVD(dir, TF_RSVD_SRAM_MCG, value);
275 case TF_RESC_TYPE_SRAM_ENCAP_8B:
276 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_8B, value);
278 case TF_RESC_TYPE_SRAM_ENCAP_16B:
279 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_16B, value);
281 case TF_RESC_TYPE_SRAM_ENCAP_64B:
282 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_64B, value);
284 case TF_RESC_TYPE_SRAM_SP_SMAC:
285 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC, value);
287 case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4:
288 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV4, value);
290 case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6:
291 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV6, value);
293 case TF_RESC_TYPE_SRAM_COUNTER_64B:
294 TF_RESC_RSVD(dir, TF_RSVD_SRAM_COUNTER_64B, value);
296 case TF_RESC_TYPE_SRAM_NAT_SPORT:
297 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_SPORT, value);
299 case TF_RESC_TYPE_SRAM_NAT_DPORT:
300 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_DPORT, value);
302 case TF_RESC_TYPE_SRAM_NAT_S_IPV4:
303 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_S_IPV4, value);
305 case TF_RESC_TYPE_SRAM_NAT_D_IPV4:
306 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_D_IPV4, value);
316 * Helper function to print all the HW resource qcaps errors reported
320 * Receive or transmit direction
323 * Pointer to the hw error flags created at time of the query check
326 tf_rm_print_hw_qcaps_error(enum tf_dir dir,
327 struct tf_rm_hw_query *hw_query,
328 uint32_t *error_flag)
332 PMD_DRV_LOG(ERR, "QCAPS errors HW\n");
333 PMD_DRV_LOG(ERR, " Direction: %s\n", tf_dir_2_str(dir));
334 PMD_DRV_LOG(ERR, " Elements:\n");
336 for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
337 if (*error_flag & 1 << i)
338 PMD_DRV_LOG(ERR, " %s, %d elem available, req:%d\n",
339 tf_hcapi_hw_2_str(i),
340 hw_query->hw_query[i].max,
341 tf_rm_rsvd_hw_value(dir, i));
346 * Helper function to print all the SRAM resource qcaps errors
347 * reported in the error_flag.
350 * Receive or transmit direction
353 * Pointer to the sram error flags created at time of the query check
356 tf_rm_print_sram_qcaps_error(enum tf_dir dir,
357 struct tf_rm_sram_query *sram_query,
358 uint32_t *error_flag)
362 PMD_DRV_LOG(ERR, "QCAPS errors SRAM\n");
363 PMD_DRV_LOG(ERR, " Direction: %s\n", tf_dir_2_str(dir));
364 PMD_DRV_LOG(ERR, " Elements:\n");
366 for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
367 if (*error_flag & 1 << i)
368 PMD_DRV_LOG(ERR, " %s, %d elem available, req:%d\n",
369 tf_hcapi_sram_2_str(i),
370 sram_query->sram_query[i].max,
371 tf_rm_rsvd_sram_value(dir, i));
376 * Performs a HW resource check between what firmware capability
377 * reports and what the core expects is available.
379 * Firmware performs the resource carving at AFM init time and the
380 * resource capability is reported in the TruFlow qcaps msg.
383 * Pointer to HW Query data structure. Query holds what the firmware
384 * offers of the HW resources.
387 * Receive or transmit direction
389 * [in/out] error_flag
390 * Pointer to a bit array indicating the error of a single HCAPI
391 * resource type. When a bit is set to 1, the HCAPI resource type
392 * failed static allocation.
396 * -ENOMEM - Failure on one of the allocated resources. Check the
397 * error_flag for what types are flagged errored.
400 tf_rm_check_hw_qcaps_static(struct tf_rm_hw_query *query,
402 uint32_t *error_flag)
406 TF_RM_CHECK_HW_ALLOC(query,
408 TF_RESC_TYPE_HW_L2_CTXT_TCAM,
409 TF_RSVD_L2_CTXT_TCAM,
412 TF_RM_CHECK_HW_ALLOC(query,
414 TF_RESC_TYPE_HW_PROF_FUNC,
418 TF_RM_CHECK_HW_ALLOC(query,
420 TF_RESC_TYPE_HW_PROF_TCAM,
424 TF_RM_CHECK_HW_ALLOC(query,
426 TF_RESC_TYPE_HW_EM_PROF_ID,
430 TF_RM_CHECK_HW_ALLOC(query,
432 TF_RESC_TYPE_HW_EM_REC,
436 TF_RM_CHECK_HW_ALLOC(query,
438 TF_RESC_TYPE_HW_WC_TCAM_PROF_ID,
439 TF_RSVD_WC_TCAM_PROF_ID,
442 TF_RM_CHECK_HW_ALLOC(query,
444 TF_RESC_TYPE_HW_WC_TCAM,
448 TF_RM_CHECK_HW_ALLOC(query,
450 TF_RESC_TYPE_HW_METER_PROF,
454 TF_RM_CHECK_HW_ALLOC(query,
456 TF_RESC_TYPE_HW_METER_INST,
460 TF_RM_CHECK_HW_ALLOC(query,
462 TF_RESC_TYPE_HW_MIRROR,
466 TF_RM_CHECK_HW_ALLOC(query,
468 TF_RESC_TYPE_HW_UPAR,
472 TF_RM_CHECK_HW_ALLOC(query,
474 TF_RESC_TYPE_HW_SP_TCAM,
478 TF_RM_CHECK_HW_ALLOC(query,
480 TF_RESC_TYPE_HW_L2_FUNC,
484 TF_RM_CHECK_HW_ALLOC(query,
490 TF_RM_CHECK_HW_ALLOC(query,
492 TF_RESC_TYPE_HW_TBL_SCOPE,
496 TF_RM_CHECK_HW_ALLOC(query,
498 TF_RESC_TYPE_HW_EPOCH0,
502 TF_RM_CHECK_HW_ALLOC(query,
504 TF_RESC_TYPE_HW_EPOCH1,
508 TF_RM_CHECK_HW_ALLOC(query,
510 TF_RESC_TYPE_HW_METADATA,
514 TF_RM_CHECK_HW_ALLOC(query,
516 TF_RESC_TYPE_HW_CT_STATE,
520 TF_RM_CHECK_HW_ALLOC(query,
522 TF_RESC_TYPE_HW_RANGE_PROF,
526 TF_RM_CHECK_HW_ALLOC(query,
528 TF_RESC_TYPE_HW_RANGE_ENTRY,
532 TF_RM_CHECK_HW_ALLOC(query,
534 TF_RESC_TYPE_HW_LAG_ENTRY,
538 if (*error_flag != 0)
545 * Performs a SRAM resource check between what firmware capability
546 * reports and what the core expects is available.
548 * Firmware performs the resource carving at AFM init time and the
549 * resource capability is reported in the TruFlow qcaps msg.
552 * Pointer to SRAM Query data structure. Query holds what the
553 * firmware offers of the SRAM resources.
556 * Receive or transmit direction
558 * [in/out] error_flag
559 * Pointer to a bit array indicating the error of a single HCAPI
560 * resource type. When a bit is set to 1, the HCAPI resource type
561 * failed static allocation.
565 * -ENOMEM - Failure on one of the allocated resources. Check the
566 * error_flag for what types are flagged errored.
569 tf_rm_check_sram_qcaps_static(struct tf_rm_sram_query *query,
571 uint32_t *error_flag)
575 TF_RM_CHECK_SRAM_ALLOC(query,
577 TF_RESC_TYPE_SRAM_FULL_ACTION,
578 TF_RSVD_SRAM_FULL_ACTION,
581 TF_RM_CHECK_SRAM_ALLOC(query,
583 TF_RESC_TYPE_SRAM_MCG,
587 TF_RM_CHECK_SRAM_ALLOC(query,
589 TF_RESC_TYPE_SRAM_ENCAP_8B,
590 TF_RSVD_SRAM_ENCAP_8B,
593 TF_RM_CHECK_SRAM_ALLOC(query,
595 TF_RESC_TYPE_SRAM_ENCAP_16B,
596 TF_RSVD_SRAM_ENCAP_16B,
599 TF_RM_CHECK_SRAM_ALLOC(query,
601 TF_RESC_TYPE_SRAM_ENCAP_64B,
602 TF_RSVD_SRAM_ENCAP_64B,
605 TF_RM_CHECK_SRAM_ALLOC(query,
607 TF_RESC_TYPE_SRAM_SP_SMAC,
608 TF_RSVD_SRAM_SP_SMAC,
611 TF_RM_CHECK_SRAM_ALLOC(query,
613 TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
614 TF_RSVD_SRAM_SP_SMAC_IPV4,
617 TF_RM_CHECK_SRAM_ALLOC(query,
619 TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
620 TF_RSVD_SRAM_SP_SMAC_IPV6,
623 TF_RM_CHECK_SRAM_ALLOC(query,
625 TF_RESC_TYPE_SRAM_COUNTER_64B,
626 TF_RSVD_SRAM_COUNTER_64B,
629 TF_RM_CHECK_SRAM_ALLOC(query,
631 TF_RESC_TYPE_SRAM_NAT_SPORT,
632 TF_RSVD_SRAM_NAT_SPORT,
635 TF_RM_CHECK_SRAM_ALLOC(query,
637 TF_RESC_TYPE_SRAM_NAT_DPORT,
638 TF_RSVD_SRAM_NAT_DPORT,
641 TF_RM_CHECK_SRAM_ALLOC(query,
643 TF_RESC_TYPE_SRAM_NAT_S_IPV4,
644 TF_RSVD_SRAM_NAT_S_IPV4,
647 TF_RM_CHECK_SRAM_ALLOC(query,
649 TF_RESC_TYPE_SRAM_NAT_D_IPV4,
650 TF_RSVD_SRAM_NAT_D_IPV4,
653 if (*error_flag != 0)
660 * Internal function to mark pool entries used.
663 tf_rm_reserve_range(uint32_t count,
667 struct bitalloc *pool)
671 /* If no resources has been requested we mark everything
675 for (i = 0; i < max; i++)
676 ba_alloc_index(pool, i);
678 /* Support 2 main modes
679 * Reserved range starts from bottom up (with
680 * pre-reserved value or not)
681 * - begin = 0 to end xx
682 * - begin = 1 to end xx
684 * Reserved range starts from top down
685 * - begin = yy to end max
688 /* Bottom up check, start from 0 */
689 if (rsv_begin == 0) {
690 for (i = rsv_end + 1; i < max; i++)
691 ba_alloc_index(pool, i);
694 /* Bottom up check, start from 1 or higher OR
697 if (rsv_begin >= 1) {
698 /* Allocate from 0 until start */
699 for (i = 0; i < rsv_begin; i++)
700 ba_alloc_index(pool, i);
702 /* Skip and then do the remaining */
703 if (rsv_end < max - 1) {
704 for (i = rsv_end; i < max; i++)
705 ba_alloc_index(pool, i);
712 * Internal function to mark all the l2 ctxt allocated that Truflow
716 tf_rm_rsvd_l2_ctxt(struct tf_session *tfs)
718 uint32_t index = TF_RESC_TYPE_HW_L2_CTXT_TCAM;
721 /* l2 ctxt rx direction */
722 if (tfs->resc.rx.hw_entry[index].stride > 0)
723 end = tfs->resc.rx.hw_entry[index].start +
724 tfs->resc.rx.hw_entry[index].stride - 1;
726 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
727 tfs->resc.rx.hw_entry[index].start,
730 tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX);
732 /* l2 ctxt tx direction */
733 if (tfs->resc.tx.hw_entry[index].stride > 0)
734 end = tfs->resc.tx.hw_entry[index].start +
735 tfs->resc.tx.hw_entry[index].stride - 1;
737 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
738 tfs->resc.tx.hw_entry[index].start,
741 tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX);
745 * Internal function to mark all the profile tcam and profile func
746 * resources that Truflow does not own.
749 tf_rm_rsvd_prof(struct tf_session *tfs)
751 uint32_t index = TF_RESC_TYPE_HW_PROF_FUNC;
754 /* profile func rx direction */
755 if (tfs->resc.rx.hw_entry[index].stride > 0)
756 end = tfs->resc.rx.hw_entry[index].start +
757 tfs->resc.rx.hw_entry[index].stride - 1;
759 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
760 tfs->resc.rx.hw_entry[index].start,
763 tfs->TF_PROF_FUNC_POOL_NAME_RX);
765 /* profile func tx direction */
766 if (tfs->resc.tx.hw_entry[index].stride > 0)
767 end = tfs->resc.tx.hw_entry[index].start +
768 tfs->resc.tx.hw_entry[index].stride - 1;
770 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
771 tfs->resc.tx.hw_entry[index].start,
774 tfs->TF_PROF_FUNC_POOL_NAME_TX);
776 index = TF_RESC_TYPE_HW_PROF_TCAM;
778 /* profile tcam rx direction */
779 if (tfs->resc.rx.hw_entry[index].stride > 0)
780 end = tfs->resc.rx.hw_entry[index].start +
781 tfs->resc.rx.hw_entry[index].stride - 1;
783 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
784 tfs->resc.rx.hw_entry[index].start,
787 tfs->TF_PROF_TCAM_POOL_NAME_RX);
789 /* profile tcam tx direction */
790 if (tfs->resc.tx.hw_entry[index].stride > 0)
791 end = tfs->resc.tx.hw_entry[index].start +
792 tfs->resc.tx.hw_entry[index].stride - 1;
794 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
795 tfs->resc.tx.hw_entry[index].start,
798 tfs->TF_PROF_TCAM_POOL_NAME_TX);
802 * Internal function to mark all the em profile id allocated that
803 * Truflow does not own.
806 tf_rm_rsvd_em_prof(struct tf_session *tfs)
808 uint32_t index = TF_RESC_TYPE_HW_EM_PROF_ID;
811 /* em prof id rx direction */
812 if (tfs->resc.rx.hw_entry[index].stride > 0)
813 end = tfs->resc.rx.hw_entry[index].start +
814 tfs->resc.rx.hw_entry[index].stride - 1;
816 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
817 tfs->resc.rx.hw_entry[index].start,
820 tfs->TF_EM_PROF_ID_POOL_NAME_RX);
822 /* em prof id tx direction */
823 if (tfs->resc.tx.hw_entry[index].stride > 0)
824 end = tfs->resc.tx.hw_entry[index].start +
825 tfs->resc.tx.hw_entry[index].stride - 1;
827 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
828 tfs->resc.tx.hw_entry[index].start,
831 tfs->TF_EM_PROF_ID_POOL_NAME_TX);
835 * Internal function to mark all the wildcard tcam and profile id
836 * resources that Truflow does not own.
839 tf_rm_rsvd_wc(struct tf_session *tfs)
841 uint32_t index = TF_RESC_TYPE_HW_WC_TCAM_PROF_ID;
844 /* wc profile id rx direction */
845 if (tfs->resc.rx.hw_entry[index].stride > 0)
846 end = tfs->resc.rx.hw_entry[index].start +
847 tfs->resc.rx.hw_entry[index].stride - 1;
849 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
850 tfs->resc.rx.hw_entry[index].start,
853 tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX);
855 /* wc profile id tx direction */
856 if (tfs->resc.tx.hw_entry[index].stride > 0)
857 end = tfs->resc.tx.hw_entry[index].start +
858 tfs->resc.tx.hw_entry[index].stride - 1;
860 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
861 tfs->resc.tx.hw_entry[index].start,
864 tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX);
866 index = TF_RESC_TYPE_HW_WC_TCAM;
868 /* wc tcam rx direction */
869 if (tfs->resc.rx.hw_entry[index].stride > 0)
870 end = tfs->resc.rx.hw_entry[index].start +
871 tfs->resc.rx.hw_entry[index].stride - 1;
873 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
874 tfs->resc.rx.hw_entry[index].start,
877 tfs->TF_WC_TCAM_POOL_NAME_RX);
879 /* wc tcam tx direction */
880 if (tfs->resc.tx.hw_entry[index].stride > 0)
881 end = tfs->resc.tx.hw_entry[index].start +
882 tfs->resc.tx.hw_entry[index].stride - 1;
884 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
885 tfs->resc.tx.hw_entry[index].start,
888 tfs->TF_WC_TCAM_POOL_NAME_TX);
892 * Internal function to mark all the meter resources allocated that
893 * Truflow does not own.
896 tf_rm_rsvd_meter(struct tf_session *tfs)
898 uint32_t index = TF_RESC_TYPE_HW_METER_PROF;
901 /* meter profiles rx direction */
902 if (tfs->resc.rx.hw_entry[index].stride > 0)
903 end = tfs->resc.rx.hw_entry[index].start +
904 tfs->resc.rx.hw_entry[index].stride - 1;
906 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
907 tfs->resc.rx.hw_entry[index].start,
910 tfs->TF_METER_PROF_POOL_NAME_RX);
912 /* meter profiles tx direction */
913 if (tfs->resc.tx.hw_entry[index].stride > 0)
914 end = tfs->resc.tx.hw_entry[index].start +
915 tfs->resc.tx.hw_entry[index].stride - 1;
917 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
918 tfs->resc.tx.hw_entry[index].start,
921 tfs->TF_METER_PROF_POOL_NAME_TX);
923 index = TF_RESC_TYPE_HW_METER_INST;
925 /* meter rx direction */
926 if (tfs->resc.rx.hw_entry[index].stride > 0)
927 end = tfs->resc.rx.hw_entry[index].start +
928 tfs->resc.rx.hw_entry[index].stride - 1;
930 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
931 tfs->resc.rx.hw_entry[index].start,
934 tfs->TF_METER_INST_POOL_NAME_RX);
936 /* meter tx direction */
937 if (tfs->resc.tx.hw_entry[index].stride > 0)
938 end = tfs->resc.tx.hw_entry[index].start +
939 tfs->resc.tx.hw_entry[index].stride - 1;
941 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
942 tfs->resc.tx.hw_entry[index].start,
945 tfs->TF_METER_INST_POOL_NAME_TX);
949 * Internal function to mark all the mirror resources allocated that
950 * Truflow does not own.
953 tf_rm_rsvd_mirror(struct tf_session *tfs)
955 uint32_t index = TF_RESC_TYPE_HW_MIRROR;
958 /* mirror rx direction */
959 if (tfs->resc.rx.hw_entry[index].stride > 0)
960 end = tfs->resc.rx.hw_entry[index].start +
961 tfs->resc.rx.hw_entry[index].stride - 1;
963 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
964 tfs->resc.rx.hw_entry[index].start,
967 tfs->TF_MIRROR_POOL_NAME_RX);
969 /* mirror tx direction */
970 if (tfs->resc.tx.hw_entry[index].stride > 0)
971 end = tfs->resc.tx.hw_entry[index].start +
972 tfs->resc.tx.hw_entry[index].stride - 1;
974 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
975 tfs->resc.tx.hw_entry[index].start,
978 tfs->TF_MIRROR_POOL_NAME_TX);
982 * Internal function to mark all the upar resources allocated that
983 * Truflow does not own.
986 tf_rm_rsvd_upar(struct tf_session *tfs)
988 uint32_t index = TF_RESC_TYPE_HW_UPAR;
991 /* upar rx direction */
992 if (tfs->resc.rx.hw_entry[index].stride > 0)
993 end = tfs->resc.rx.hw_entry[index].start +
994 tfs->resc.rx.hw_entry[index].stride - 1;
996 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
997 tfs->resc.rx.hw_entry[index].start,
1000 tfs->TF_UPAR_POOL_NAME_RX);
1002 /* upar tx direction */
1003 if (tfs->resc.tx.hw_entry[index].stride > 0)
1004 end = tfs->resc.tx.hw_entry[index].start +
1005 tfs->resc.tx.hw_entry[index].stride - 1;
1007 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1008 tfs->resc.tx.hw_entry[index].start,
1011 tfs->TF_UPAR_POOL_NAME_TX);
1015 * Internal function to mark all the sp tcam resources allocated that
1016 * Truflow does not own.
1019 tf_rm_rsvd_sp_tcam(struct tf_session *tfs)
1021 uint32_t index = TF_RESC_TYPE_HW_SP_TCAM;
1024 /* sp tcam rx direction */
1025 if (tfs->resc.rx.hw_entry[index].stride > 0)
1026 end = tfs->resc.rx.hw_entry[index].start +
1027 tfs->resc.rx.hw_entry[index].stride - 1;
1029 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1030 tfs->resc.rx.hw_entry[index].start,
1033 tfs->TF_SP_TCAM_POOL_NAME_RX);
1035 /* sp tcam tx direction */
1036 if (tfs->resc.tx.hw_entry[index].stride > 0)
1037 end = tfs->resc.tx.hw_entry[index].start +
1038 tfs->resc.tx.hw_entry[index].stride - 1;
1040 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1041 tfs->resc.tx.hw_entry[index].start,
1044 tfs->TF_SP_TCAM_POOL_NAME_TX);
1048 * Internal function to mark all the l2 func resources allocated that
1049 * Truflow does not own.
1052 tf_rm_rsvd_l2_func(struct tf_session *tfs)
1054 uint32_t index = TF_RESC_TYPE_HW_L2_FUNC;
1057 /* l2 func rx direction */
1058 if (tfs->resc.rx.hw_entry[index].stride > 0)
1059 end = tfs->resc.rx.hw_entry[index].start +
1060 tfs->resc.rx.hw_entry[index].stride - 1;
1062 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1063 tfs->resc.rx.hw_entry[index].start,
1066 tfs->TF_L2_FUNC_POOL_NAME_RX);
1068 /* l2 func tx direction */
1069 if (tfs->resc.tx.hw_entry[index].stride > 0)
1070 end = tfs->resc.tx.hw_entry[index].start +
1071 tfs->resc.tx.hw_entry[index].stride - 1;
1073 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1074 tfs->resc.tx.hw_entry[index].start,
1077 tfs->TF_L2_FUNC_POOL_NAME_TX);
1081 * Internal function to mark all the fkb resources allocated that
1082 * Truflow does not own.
1085 tf_rm_rsvd_fkb(struct tf_session *tfs)
1087 uint32_t index = TF_RESC_TYPE_HW_FKB;
1090 /* fkb rx direction */
1091 if (tfs->resc.rx.hw_entry[index].stride > 0)
1092 end = tfs->resc.rx.hw_entry[index].start +
1093 tfs->resc.rx.hw_entry[index].stride - 1;
1095 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1096 tfs->resc.rx.hw_entry[index].start,
1099 tfs->TF_FKB_POOL_NAME_RX);
1101 /* fkb tx direction */
1102 if (tfs->resc.tx.hw_entry[index].stride > 0)
1103 end = tfs->resc.tx.hw_entry[index].start +
1104 tfs->resc.tx.hw_entry[index].stride - 1;
1106 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1107 tfs->resc.tx.hw_entry[index].start,
1110 tfs->TF_FKB_POOL_NAME_TX);
1114 * Internal function to mark all the tbld scope resources allocated
1115 * that Truflow does not own.
1118 tf_rm_rsvd_tbl_scope(struct tf_session *tfs)
1120 uint32_t index = TF_RESC_TYPE_HW_TBL_SCOPE;
1123 /* tbl scope rx direction */
1124 if (tfs->resc.rx.hw_entry[index].stride > 0)
1125 end = tfs->resc.rx.hw_entry[index].start +
1126 tfs->resc.rx.hw_entry[index].stride - 1;
1128 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1129 tfs->resc.rx.hw_entry[index].start,
1132 tfs->TF_TBL_SCOPE_POOL_NAME_RX);
1134 /* tbl scope tx direction */
1135 if (tfs->resc.tx.hw_entry[index].stride > 0)
1136 end = tfs->resc.tx.hw_entry[index].start +
1137 tfs->resc.tx.hw_entry[index].stride - 1;
1139 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1140 tfs->resc.tx.hw_entry[index].start,
1143 tfs->TF_TBL_SCOPE_POOL_NAME_TX);
1147 * Internal function to mark all the l2 epoch resources allocated that
1148 * Truflow does not own.
1151 tf_rm_rsvd_epoch(struct tf_session *tfs)
1153 uint32_t index = TF_RESC_TYPE_HW_EPOCH0;
1156 /* epoch0 rx direction */
1157 if (tfs->resc.rx.hw_entry[index].stride > 0)
1158 end = tfs->resc.rx.hw_entry[index].start +
1159 tfs->resc.rx.hw_entry[index].stride - 1;
1161 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1162 tfs->resc.rx.hw_entry[index].start,
1165 tfs->TF_EPOCH0_POOL_NAME_RX);
1167 /* epoch0 tx direction */
1168 if (tfs->resc.tx.hw_entry[index].stride > 0)
1169 end = tfs->resc.tx.hw_entry[index].start +
1170 tfs->resc.tx.hw_entry[index].stride - 1;
1172 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1173 tfs->resc.tx.hw_entry[index].start,
1176 tfs->TF_EPOCH0_POOL_NAME_TX);
1178 index = TF_RESC_TYPE_HW_EPOCH1;
1180 /* epoch1 rx direction */
1181 if (tfs->resc.rx.hw_entry[index].stride > 0)
1182 end = tfs->resc.rx.hw_entry[index].start +
1183 tfs->resc.rx.hw_entry[index].stride - 1;
1185 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1186 tfs->resc.rx.hw_entry[index].start,
1189 tfs->TF_EPOCH1_POOL_NAME_RX);
1191 /* epoch1 tx direction */
1192 if (tfs->resc.tx.hw_entry[index].stride > 0)
1193 end = tfs->resc.tx.hw_entry[index].start +
1194 tfs->resc.tx.hw_entry[index].stride - 1;
1196 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1197 tfs->resc.tx.hw_entry[index].start,
1200 tfs->TF_EPOCH1_POOL_NAME_TX);
1204 * Internal function to mark all the metadata resources allocated that
1205 * Truflow does not own.
1208 tf_rm_rsvd_metadata(struct tf_session *tfs)
1210 uint32_t index = TF_RESC_TYPE_HW_METADATA;
1213 /* metadata rx direction */
1214 if (tfs->resc.rx.hw_entry[index].stride > 0)
1215 end = tfs->resc.rx.hw_entry[index].start +
1216 tfs->resc.rx.hw_entry[index].stride - 1;
1218 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1219 tfs->resc.rx.hw_entry[index].start,
1222 tfs->TF_METADATA_POOL_NAME_RX);
1224 /* metadata tx direction */
1225 if (tfs->resc.tx.hw_entry[index].stride > 0)
1226 end = tfs->resc.tx.hw_entry[index].start +
1227 tfs->resc.tx.hw_entry[index].stride - 1;
1229 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1230 tfs->resc.tx.hw_entry[index].start,
1233 tfs->TF_METADATA_POOL_NAME_TX);
1237 * Internal function to mark all the ct state resources allocated that
1238 * Truflow does not own.
1241 tf_rm_rsvd_ct_state(struct tf_session *tfs)
1243 uint32_t index = TF_RESC_TYPE_HW_CT_STATE;
1246 /* ct state rx direction */
1247 if (tfs->resc.rx.hw_entry[index].stride > 0)
1248 end = tfs->resc.rx.hw_entry[index].start +
1249 tfs->resc.rx.hw_entry[index].stride - 1;
1251 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1252 tfs->resc.rx.hw_entry[index].start,
1255 tfs->TF_CT_STATE_POOL_NAME_RX);
1257 /* ct state tx direction */
1258 if (tfs->resc.tx.hw_entry[index].stride > 0)
1259 end = tfs->resc.tx.hw_entry[index].start +
1260 tfs->resc.tx.hw_entry[index].stride - 1;
1262 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1263 tfs->resc.tx.hw_entry[index].start,
1266 tfs->TF_CT_STATE_POOL_NAME_TX);
1270 * Internal function to mark all the range resources allocated that
1271 * Truflow does not own.
1274 tf_rm_rsvd_range(struct tf_session *tfs)
1276 uint32_t index = TF_RESC_TYPE_HW_RANGE_PROF;
1279 /* range profile rx direction */
1280 if (tfs->resc.rx.hw_entry[index].stride > 0)
1281 end = tfs->resc.rx.hw_entry[index].start +
1282 tfs->resc.rx.hw_entry[index].stride - 1;
1284 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1285 tfs->resc.rx.hw_entry[index].start,
1288 tfs->TF_RANGE_PROF_POOL_NAME_RX);
1290 /* range profile tx direction */
1291 if (tfs->resc.tx.hw_entry[index].stride > 0)
1292 end = tfs->resc.tx.hw_entry[index].start +
1293 tfs->resc.tx.hw_entry[index].stride - 1;
1295 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1296 tfs->resc.tx.hw_entry[index].start,
1299 tfs->TF_RANGE_PROF_POOL_NAME_TX);
1301 index = TF_RESC_TYPE_HW_RANGE_ENTRY;
1303 /* range entry rx direction */
1304 if (tfs->resc.rx.hw_entry[index].stride > 0)
1305 end = tfs->resc.rx.hw_entry[index].start +
1306 tfs->resc.rx.hw_entry[index].stride - 1;
1308 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1309 tfs->resc.rx.hw_entry[index].start,
1312 tfs->TF_RANGE_ENTRY_POOL_NAME_RX);
1314 /* range entry tx direction */
1315 if (tfs->resc.tx.hw_entry[index].stride > 0)
1316 end = tfs->resc.tx.hw_entry[index].start +
1317 tfs->resc.tx.hw_entry[index].stride - 1;
1319 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1320 tfs->resc.tx.hw_entry[index].start,
1323 tfs->TF_RANGE_ENTRY_POOL_NAME_TX);
1327 * Internal function to mark all the lag resources allocated that
1328 * Truflow does not own.
1331 tf_rm_rsvd_lag_entry(struct tf_session *tfs)
1333 uint32_t index = TF_RESC_TYPE_HW_LAG_ENTRY;
1336 /* lag entry rx direction */
1337 if (tfs->resc.rx.hw_entry[index].stride > 0)
1338 end = tfs->resc.rx.hw_entry[index].start +
1339 tfs->resc.rx.hw_entry[index].stride - 1;
1341 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1342 tfs->resc.rx.hw_entry[index].start,
1345 tfs->TF_LAG_ENTRY_POOL_NAME_RX);
1347 /* lag entry tx direction */
1348 if (tfs->resc.tx.hw_entry[index].stride > 0)
1349 end = tfs->resc.tx.hw_entry[index].start +
1350 tfs->resc.tx.hw_entry[index].stride - 1;
1352 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1353 tfs->resc.tx.hw_entry[index].start,
1356 tfs->TF_LAG_ENTRY_POOL_NAME_TX);
1360 * Internal function to mark all the full action resources allocated
1361 * that Truflow does not own.
1364 tf_rm_rsvd_sram_full_action(struct tf_session *tfs)
1366 uint32_t index = TF_RESC_TYPE_SRAM_FULL_ACTION;
1369 /* full action rx direction */
1370 if (tfs->resc.rx.sram_entry[index].stride > 0)
1371 end = tfs->resc.rx.sram_entry[index].start +
1372 tfs->resc.rx.sram_entry[index].stride - 1;
1374 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1375 TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_RX,
1377 TF_RSVD_SRAM_FULL_ACTION_RX,
1378 tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX);
1380 /* full action tx direction */
1381 if (tfs->resc.tx.sram_entry[index].stride > 0)
1382 end = tfs->resc.tx.sram_entry[index].start +
1383 tfs->resc.tx.sram_entry[index].stride - 1;
1385 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1386 TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_TX,
1388 TF_RSVD_SRAM_FULL_ACTION_TX,
1389 tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX);
1393 * Internal function to mark all the multicast group resources
1394 * allocated that Truflow does not own.
1397 tf_rm_rsvd_sram_mcg(struct tf_session *tfs)
1399 uint32_t index = TF_RESC_TYPE_SRAM_MCG;
1402 /* multicast group rx direction */
1403 if (tfs->resc.rx.sram_entry[index].stride > 0)
1404 end = tfs->resc.rx.sram_entry[index].start +
1405 tfs->resc.rx.sram_entry[index].stride - 1;
1407 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1408 TF_RSVD_SRAM_MCG_BEGIN_IDX_RX,
1410 TF_RSVD_SRAM_MCG_RX,
1411 tfs->TF_SRAM_MCG_POOL_NAME_RX);
1413 /* Multicast Group on TX is not supported */
1417 * Internal function to mark all the encap resources allocated that
1418 * Truflow does not own.
1421 tf_rm_rsvd_sram_encap(struct tf_session *tfs)
1423 uint32_t index = TF_RESC_TYPE_SRAM_ENCAP_8B;
1426 /* encap 8b rx direction */
1427 if (tfs->resc.rx.sram_entry[index].stride > 0)
1428 end = tfs->resc.rx.sram_entry[index].start +
1429 tfs->resc.rx.sram_entry[index].stride - 1;
1431 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1432 TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_RX,
1434 TF_RSVD_SRAM_ENCAP_8B_RX,
1435 tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX);
1437 /* encap 8b tx direction */
1438 if (tfs->resc.tx.sram_entry[index].stride > 0)
1439 end = tfs->resc.tx.sram_entry[index].start +
1440 tfs->resc.tx.sram_entry[index].stride - 1;
1442 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1443 TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_TX,
1445 TF_RSVD_SRAM_ENCAP_8B_TX,
1446 tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX);
1448 index = TF_RESC_TYPE_SRAM_ENCAP_16B;
1450 /* encap 16b rx direction */
1451 if (tfs->resc.rx.sram_entry[index].stride > 0)
1452 end = tfs->resc.rx.sram_entry[index].start +
1453 tfs->resc.rx.sram_entry[index].stride - 1;
1455 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1456 TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_RX,
1458 TF_RSVD_SRAM_ENCAP_16B_RX,
1459 tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX);
1461 /* encap 16b tx direction */
1462 if (tfs->resc.tx.sram_entry[index].stride > 0)
1463 end = tfs->resc.tx.sram_entry[index].start +
1464 tfs->resc.tx.sram_entry[index].stride - 1;
1466 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1467 TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_TX,
1469 TF_RSVD_SRAM_ENCAP_16B_TX,
1470 tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX);
1472 index = TF_RESC_TYPE_SRAM_ENCAP_64B;
1474 /* Encap 64B not supported on RX */
1476 /* Encap 64b tx direction */
1477 if (tfs->resc.tx.sram_entry[index].stride > 0)
1478 end = tfs->resc.tx.sram_entry[index].start +
1479 tfs->resc.tx.sram_entry[index].stride - 1;
1481 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1482 TF_RSVD_SRAM_ENCAP_64B_BEGIN_IDX_TX,
1484 TF_RSVD_SRAM_ENCAP_64B_TX,
1485 tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX);
1489 * Internal function to mark all the sp resources allocated that
1490 * Truflow does not own.
1493 tf_rm_rsvd_sram_sp(struct tf_session *tfs)
1495 uint32_t index = TF_RESC_TYPE_SRAM_SP_SMAC;
1498 /* sp smac rx direction */
1499 if (tfs->resc.rx.sram_entry[index].stride > 0)
1500 end = tfs->resc.rx.sram_entry[index].start +
1501 tfs->resc.rx.sram_entry[index].stride - 1;
1503 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1504 TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_RX,
1506 TF_RSVD_SRAM_SP_SMAC_RX,
1507 tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX);
1509 /* sp smac tx direction */
1510 if (tfs->resc.tx.sram_entry[index].stride > 0)
1511 end = tfs->resc.tx.sram_entry[index].start +
1512 tfs->resc.tx.sram_entry[index].stride - 1;
1514 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1515 TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_TX,
1517 TF_RSVD_SRAM_SP_SMAC_TX,
1518 tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX);
1520 index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4;
1522 /* SP SMAC IPv4 not supported on RX */
1524 /* sp smac ipv4 tx direction */
1525 if (tfs->resc.tx.sram_entry[index].stride > 0)
1526 end = tfs->resc.tx.sram_entry[index].start +
1527 tfs->resc.tx.sram_entry[index].stride - 1;
1529 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1530 TF_RSVD_SRAM_SP_SMAC_IPV4_BEGIN_IDX_TX,
1532 TF_RSVD_SRAM_SP_SMAC_IPV4_TX,
1533 tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX);
1535 index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6;
1537 /* SP SMAC IPv6 not supported on RX */
1539 /* sp smac ipv6 tx direction */
1540 if (tfs->resc.tx.sram_entry[index].stride > 0)
1541 end = tfs->resc.tx.sram_entry[index].start +
1542 tfs->resc.tx.sram_entry[index].stride - 1;
1544 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1545 TF_RSVD_SRAM_SP_SMAC_IPV6_BEGIN_IDX_TX,
1547 TF_RSVD_SRAM_SP_SMAC_IPV6_TX,
1548 tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX);
1552 * Internal function to mark all the stat resources allocated that
1553 * Truflow does not own.
1556 tf_rm_rsvd_sram_stats(struct tf_session *tfs)
1558 uint32_t index = TF_RESC_TYPE_SRAM_COUNTER_64B;
1561 /* counter 64b rx direction */
1562 if (tfs->resc.rx.sram_entry[index].stride > 0)
1563 end = tfs->resc.rx.sram_entry[index].start +
1564 tfs->resc.rx.sram_entry[index].stride - 1;
1566 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1567 TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_RX,
1569 TF_RSVD_SRAM_COUNTER_64B_RX,
1570 tfs->TF_SRAM_STATS_64B_POOL_NAME_RX);
1572 /* counter 64b tx direction */
1573 if (tfs->resc.tx.sram_entry[index].stride > 0)
1574 end = tfs->resc.tx.sram_entry[index].start +
1575 tfs->resc.tx.sram_entry[index].stride - 1;
1577 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1578 TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_TX,
1580 TF_RSVD_SRAM_COUNTER_64B_TX,
1581 tfs->TF_SRAM_STATS_64B_POOL_NAME_TX);
1585 * Internal function to mark all the nat resources allocated that
1586 * Truflow does not own.
1589 tf_rm_rsvd_sram_nat(struct tf_session *tfs)
1591 uint32_t index = TF_RESC_TYPE_SRAM_NAT_SPORT;
1594 /* nat source port rx direction */
1595 if (tfs->resc.rx.sram_entry[index].stride > 0)
1596 end = tfs->resc.rx.sram_entry[index].start +
1597 tfs->resc.rx.sram_entry[index].stride - 1;
1599 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1600 TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_RX,
1602 TF_RSVD_SRAM_NAT_SPORT_RX,
1603 tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX);
1605 /* nat source port tx direction */
1606 if (tfs->resc.tx.sram_entry[index].stride > 0)
1607 end = tfs->resc.tx.sram_entry[index].start +
1608 tfs->resc.tx.sram_entry[index].stride - 1;
1610 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1611 TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_TX,
1613 TF_RSVD_SRAM_NAT_SPORT_TX,
1614 tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX);
1616 index = TF_RESC_TYPE_SRAM_NAT_DPORT;
1618 /* nat destination port rx direction */
1619 if (tfs->resc.rx.sram_entry[index].stride > 0)
1620 end = tfs->resc.rx.sram_entry[index].start +
1621 tfs->resc.rx.sram_entry[index].stride - 1;
1623 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1624 TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_RX,
1626 TF_RSVD_SRAM_NAT_DPORT_RX,
1627 tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX);
1629 /* nat destination port tx direction */
1630 if (tfs->resc.tx.sram_entry[index].stride > 0)
1631 end = tfs->resc.tx.sram_entry[index].start +
1632 tfs->resc.tx.sram_entry[index].stride - 1;
1634 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1635 TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_TX,
1637 TF_RSVD_SRAM_NAT_DPORT_TX,
1638 tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX);
1640 index = TF_RESC_TYPE_SRAM_NAT_S_IPV4;
1642 /* nat source port ipv4 rx direction */
1643 if (tfs->resc.rx.sram_entry[index].stride > 0)
1644 end = tfs->resc.rx.sram_entry[index].start +
1645 tfs->resc.rx.sram_entry[index].stride - 1;
1647 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1648 TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_RX,
1650 TF_RSVD_SRAM_NAT_S_IPV4_RX,
1651 tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX);
1653 /* nat source ipv4 port tx direction */
1654 if (tfs->resc.tx.sram_entry[index].stride > 0)
1655 end = tfs->resc.tx.sram_entry[index].start +
1656 tfs->resc.tx.sram_entry[index].stride - 1;
1658 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1659 TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_TX,
1661 TF_RSVD_SRAM_NAT_S_IPV4_TX,
1662 tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX);
1664 index = TF_RESC_TYPE_SRAM_NAT_D_IPV4;
1666 /* nat destination port ipv4 rx direction */
1667 if (tfs->resc.rx.sram_entry[index].stride > 0)
1668 end = tfs->resc.rx.sram_entry[index].start +
1669 tfs->resc.rx.sram_entry[index].stride - 1;
1671 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1672 TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_RX,
1674 TF_RSVD_SRAM_NAT_D_IPV4_RX,
1675 tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX);
1677 /* nat destination ipv4 port tx direction */
1678 if (tfs->resc.tx.sram_entry[index].stride > 0)
1679 end = tfs->resc.tx.sram_entry[index].start +
1680 tfs->resc.tx.sram_entry[index].stride - 1;
1682 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1683 TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_TX,
1685 TF_RSVD_SRAM_NAT_D_IPV4_TX,
1686 tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX);
1690 * Internal function used to validate the HW allocated resources
1691 * against the requested values.
1694 tf_rm_hw_alloc_validate(enum tf_dir dir,
1695 struct tf_rm_hw_alloc *hw_alloc,
1696 struct tf_rm_entry *hw_entry)
1701 for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
1702 if (hw_entry[i].stride != hw_alloc->hw_num[i]) {
1704 "%s, Alloc failed id:%d expect:%d got:%d\n",
1707 hw_alloc->hw_num[i],
1708 hw_entry[i].stride);
1717 * Internal function used to validate the SRAM allocated resources
1718 * against the requested values.
1721 tf_rm_sram_alloc_validate(enum tf_dir dir __rte_unused,
1722 struct tf_rm_sram_alloc *sram_alloc,
1723 struct tf_rm_entry *sram_entry)
1728 for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
1729 if (sram_entry[i].stride != sram_alloc->sram_num[i]) {
1731 "%s, Alloc failed idx:%d expect:%d got:%d\n",
1734 sram_alloc->sram_num[i],
1735 sram_entry[i].stride);
1744 * Internal function used to mark all the HW resources allocated that
1745 * Truflow does not own.
1748 tf_rm_reserve_hw(struct tf *tfp)
1750 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1753 * There is no direct AFM resource allocation as it is carved
1754 * statically at AFM boot time. Thus the bit allocators work
1755 * on the full HW resource amount and we just mark everything
1756 * used except the resources that Truflow took ownership off.
1758 tf_rm_rsvd_l2_ctxt(tfs);
1759 tf_rm_rsvd_prof(tfs);
1760 tf_rm_rsvd_em_prof(tfs);
1762 tf_rm_rsvd_mirror(tfs);
1763 tf_rm_rsvd_meter(tfs);
1764 tf_rm_rsvd_upar(tfs);
1765 tf_rm_rsvd_sp_tcam(tfs);
1766 tf_rm_rsvd_l2_func(tfs);
1767 tf_rm_rsvd_fkb(tfs);
1768 tf_rm_rsvd_tbl_scope(tfs);
1769 tf_rm_rsvd_epoch(tfs);
1770 tf_rm_rsvd_metadata(tfs);
1771 tf_rm_rsvd_ct_state(tfs);
1772 tf_rm_rsvd_range(tfs);
1773 tf_rm_rsvd_lag_entry(tfs);
1777 * Internal function used to mark all the SRAM resources allocated
1778 * that Truflow does not own.
1781 tf_rm_reserve_sram(struct tf *tfp)
1783 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1786 * There is no direct AFM resource allocation as it is carved
1787 * statically at AFM boot time. Thus the bit allocators work
1788 * on the full HW resource amount and we just mark everything
1789 * used except the resources that Truflow took ownership off.
1791 tf_rm_rsvd_sram_full_action(tfs);
1792 tf_rm_rsvd_sram_mcg(tfs);
1793 tf_rm_rsvd_sram_encap(tfs);
1794 tf_rm_rsvd_sram_sp(tfs);
1795 tf_rm_rsvd_sram_stats(tfs);
1796 tf_rm_rsvd_sram_nat(tfs);
1800 * Internal function used to allocate and validate all HW resources.
1803 tf_rm_allocate_validate_hw(struct tf *tfp,
1808 struct tf_rm_hw_query hw_query;
1809 struct tf_rm_hw_alloc hw_alloc;
1810 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1811 struct tf_rm_entry *hw_entries;
1812 uint32_t error_flag;
1814 if (dir == TF_DIR_RX)
1815 hw_entries = tfs->resc.rx.hw_entry;
1817 hw_entries = tfs->resc.tx.hw_entry;
1819 /* Query for Session HW Resources */
1820 rc = tf_msg_session_hw_resc_qcaps(tfp, dir, &hw_query);
1824 "%s, HW qcaps message send failed\n",
1829 rc = tf_rm_check_hw_qcaps_static(&hw_query, dir, &error_flag);
1833 "%s, HW QCAPS validation failed, error_flag:0x%x\n",
1836 tf_rm_print_hw_qcaps_error(dir, &hw_query, &error_flag);
1840 /* Post process HW capability */
1841 for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++)
1842 hw_alloc.hw_num[i] = hw_query.hw_query[i].max;
1844 /* Allocate Session HW Resources */
1845 rc = tf_msg_session_hw_resc_alloc(tfp, dir, &hw_alloc, hw_entries);
1849 "%s, HW alloc message send failed\n",
1854 /* Perform HW allocation validation as its possible the
1855 * resource availability changed between qcaps and alloc
1857 rc = tf_rm_hw_alloc_validate(dir, &hw_alloc, hw_entries);
1861 "%s, HW Resource validation failed\n",
1873 * Internal function used to allocate and validate all SRAM resources.
1876 * Pointer to TF handle
1879 * Receive or transmit direction
1883 * -1 - Internal error
1886 tf_rm_allocate_validate_sram(struct tf *tfp,
1891 struct tf_rm_sram_query sram_query;
1892 struct tf_rm_sram_alloc sram_alloc;
1893 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1894 struct tf_rm_entry *sram_entries;
1895 uint32_t error_flag;
1897 if (dir == TF_DIR_RX)
1898 sram_entries = tfs->resc.rx.sram_entry;
1900 sram_entries = tfs->resc.tx.sram_entry;
1902 /* Query for Session SRAM Resources */
1903 rc = tf_msg_session_sram_resc_qcaps(tfp, dir, &sram_query);
1907 "%s, SRAM qcaps message send failed\n",
1912 rc = tf_rm_check_sram_qcaps_static(&sram_query, dir, &error_flag);
1916 "%s, SRAM QCAPS validation failed, error_flag:%x\n",
1919 tf_rm_print_sram_qcaps_error(dir, &sram_query, &error_flag);
1923 /* Post process SRAM capability */
1924 for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++)
1925 sram_alloc.sram_num[i] = sram_query.sram_query[i].max;
1927 /* Allocate Session SRAM Resources */
1928 rc = tf_msg_session_sram_resc_alloc(tfp,
1935 "%s, SRAM alloc message send failed\n",
1940 /* Perform SRAM allocation validation as its possible the
1941 * resource availability changed between qcaps and alloc
1943 rc = tf_rm_sram_alloc_validate(dir, &sram_alloc, sram_entries);
1947 "%s, SRAM Resource allocation validation failed\n",
1959 * Helper function used to prune a HW resource array to only hold
1960 * elements that needs to be flushed.
1966 * Receive or transmit direction
1969 * Master HW Resource database
1971 * [in/out] flush_entries
1972 * Pruned HW Resource database of entries to be flushed. This
1973 * array should be passed in as a complete copy of the master HW
1974 * Resource database. The outgoing result will be a pruned version
1975 * based on the result of the requested checking
1978 * 0 - Success, no flush required
1979 * 1 - Success, flush required
1980 * -1 - Internal error
1983 tf_rm_hw_to_flush(struct tf_session *tfs,
1985 struct tf_rm_entry *hw_entries,
1986 struct tf_rm_entry *flush_entries)
1991 struct bitalloc *pool;
1993 /* Check all the hw resource pools and check for left over
1994 * elements. Any found will result in the complete pool of a
1995 * type to get invalidated.
1998 TF_RM_GET_POOLS(tfs, dir, &pool,
1999 TF_L2_CTXT_TCAM_POOL_NAME,
2003 free_cnt = ba_free_count(pool);
2004 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride) {
2005 flush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].start = 0;
2006 flush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride = 0;
2011 TF_RM_GET_POOLS(tfs, dir, &pool,
2012 TF_PROF_FUNC_POOL_NAME,
2016 free_cnt = ba_free_count(pool);
2017 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride) {
2018 flush_entries[TF_RESC_TYPE_HW_PROF_FUNC].start = 0;
2019 flush_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride = 0;
2024 TF_RM_GET_POOLS(tfs, dir, &pool,
2025 TF_PROF_TCAM_POOL_NAME,
2029 free_cnt = ba_free_count(pool);
2030 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride) {
2031 flush_entries[TF_RESC_TYPE_HW_PROF_TCAM].start = 0;
2032 flush_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride = 0;
2037 TF_RM_GET_POOLS(tfs, dir, &pool,
2038 TF_EM_PROF_ID_POOL_NAME,
2042 free_cnt = ba_free_count(pool);
2043 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride) {
2044 flush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].start = 0;
2045 flush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride = 0;
2050 flush_entries[TF_RESC_TYPE_HW_EM_REC].start = 0;
2051 flush_entries[TF_RESC_TYPE_HW_EM_REC].stride = 0;
2053 TF_RM_GET_POOLS(tfs, dir, &pool,
2054 TF_WC_TCAM_PROF_ID_POOL_NAME,
2058 free_cnt = ba_free_count(pool);
2059 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride) {
2060 flush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].start = 0;
2061 flush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride = 0;
2066 TF_RM_GET_POOLS(tfs, dir, &pool,
2067 TF_WC_TCAM_POOL_NAME,
2071 free_cnt = ba_free_count(pool);
2072 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM].stride) {
2073 flush_entries[TF_RESC_TYPE_HW_WC_TCAM].start = 0;
2074 flush_entries[TF_RESC_TYPE_HW_WC_TCAM].stride = 0;
2079 TF_RM_GET_POOLS(tfs, dir, &pool,
2080 TF_METER_PROF_POOL_NAME,
2084 free_cnt = ba_free_count(pool);
2085 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_PROF].stride) {
2086 flush_entries[TF_RESC_TYPE_HW_METER_PROF].start = 0;
2087 flush_entries[TF_RESC_TYPE_HW_METER_PROF].stride = 0;
2092 TF_RM_GET_POOLS(tfs, dir, &pool,
2093 TF_METER_INST_POOL_NAME,
2097 free_cnt = ba_free_count(pool);
2098 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_INST].stride) {
2099 flush_entries[TF_RESC_TYPE_HW_METER_INST].start = 0;
2100 flush_entries[TF_RESC_TYPE_HW_METER_INST].stride = 0;
2105 TF_RM_GET_POOLS(tfs, dir, &pool,
2106 TF_MIRROR_POOL_NAME,
2110 free_cnt = ba_free_count(pool);
2111 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_MIRROR].stride) {
2112 flush_entries[TF_RESC_TYPE_HW_MIRROR].start = 0;
2113 flush_entries[TF_RESC_TYPE_HW_MIRROR].stride = 0;
2118 TF_RM_GET_POOLS(tfs, dir, &pool,
2123 free_cnt = ba_free_count(pool);
2124 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_UPAR].stride) {
2125 flush_entries[TF_RESC_TYPE_HW_UPAR].start = 0;
2126 flush_entries[TF_RESC_TYPE_HW_UPAR].stride = 0;
2131 TF_RM_GET_POOLS(tfs, dir, &pool,
2132 TF_SP_TCAM_POOL_NAME,
2136 free_cnt = ba_free_count(pool);
2137 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_SP_TCAM].stride) {
2138 flush_entries[TF_RESC_TYPE_HW_SP_TCAM].start = 0;
2139 flush_entries[TF_RESC_TYPE_HW_SP_TCAM].stride = 0;
2144 TF_RM_GET_POOLS(tfs, dir, &pool,
2145 TF_L2_FUNC_POOL_NAME,
2149 free_cnt = ba_free_count(pool);
2150 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_FUNC].stride) {
2151 flush_entries[TF_RESC_TYPE_HW_L2_FUNC].start = 0;
2152 flush_entries[TF_RESC_TYPE_HW_L2_FUNC].stride = 0;
2157 TF_RM_GET_POOLS(tfs, dir, &pool,
2162 free_cnt = ba_free_count(pool);
2163 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_FKB].stride) {
2164 flush_entries[TF_RESC_TYPE_HW_FKB].start = 0;
2165 flush_entries[TF_RESC_TYPE_HW_FKB].stride = 0;
2170 TF_RM_GET_POOLS(tfs, dir, &pool,
2171 TF_TBL_SCOPE_POOL_NAME,
2175 free_cnt = ba_free_count(pool);
2176 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride) {
2177 flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].start = 0;
2178 flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride = 0;
2180 PMD_DRV_LOG(ERR, "%s: TBL_SCOPE free_cnt:%d, entries:%d\n",
2183 hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride);
2187 TF_RM_GET_POOLS(tfs, dir, &pool,
2188 TF_EPOCH0_POOL_NAME,
2192 free_cnt = ba_free_count(pool);
2193 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH0].stride) {
2194 flush_entries[TF_RESC_TYPE_HW_EPOCH0].start = 0;
2195 flush_entries[TF_RESC_TYPE_HW_EPOCH0].stride = 0;
2200 TF_RM_GET_POOLS(tfs, dir, &pool,
2201 TF_EPOCH1_POOL_NAME,
2205 free_cnt = ba_free_count(pool);
2206 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH1].stride) {
2207 flush_entries[TF_RESC_TYPE_HW_EPOCH1].start = 0;
2208 flush_entries[TF_RESC_TYPE_HW_EPOCH1].stride = 0;
2213 TF_RM_GET_POOLS(tfs, dir, &pool,
2214 TF_METADATA_POOL_NAME,
2218 free_cnt = ba_free_count(pool);
2219 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METADATA].stride) {
2220 flush_entries[TF_RESC_TYPE_HW_METADATA].start = 0;
2221 flush_entries[TF_RESC_TYPE_HW_METADATA].stride = 0;
2226 TF_RM_GET_POOLS(tfs, dir, &pool,
2227 TF_CT_STATE_POOL_NAME,
2231 free_cnt = ba_free_count(pool);
2232 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_CT_STATE].stride) {
2233 flush_entries[TF_RESC_TYPE_HW_CT_STATE].start = 0;
2234 flush_entries[TF_RESC_TYPE_HW_CT_STATE].stride = 0;
2239 TF_RM_GET_POOLS(tfs, dir, &pool,
2240 TF_RANGE_PROF_POOL_NAME,
2244 free_cnt = ba_free_count(pool);
2245 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride) {
2246 flush_entries[TF_RESC_TYPE_HW_RANGE_PROF].start = 0;
2247 flush_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride = 0;
2252 TF_RM_GET_POOLS(tfs, dir, &pool,
2253 TF_RANGE_ENTRY_POOL_NAME,
2257 free_cnt = ba_free_count(pool);
2258 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride) {
2259 flush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].start = 0;
2260 flush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride = 0;
2265 TF_RM_GET_POOLS(tfs, dir, &pool,
2266 TF_LAG_ENTRY_POOL_NAME,
2270 free_cnt = ba_free_count(pool);
2271 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride) {
2272 flush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].start = 0;
2273 flush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride = 0;
2282 * Helper function used to prune a SRAM resource array to only hold
2283 * elements that needs to be flushed.
2289 * Receive or transmit direction
2292 * Master SRAM Resource data base
2294 * [in/out] flush_entries
2295 * Pruned SRAM Resource database of entries to be flushed. This
2296 * array should be passed in as a complete copy of the master SRAM
2297 * Resource database. The outgoing result will be a pruned version
2298 * based on the result of the requested checking
2301 * 0 - Success, no flush required
2302 * 1 - Success, flush required
2303 * -1 - Internal error
2306 tf_rm_sram_to_flush(struct tf_session *tfs,
2308 struct tf_rm_entry *sram_entries,
2309 struct tf_rm_entry *flush_entries)
2314 struct bitalloc *pool;
2316 /* Check all the sram resource pools and check for left over
2317 * elements. Any found will result in the complete pool of a
2318 * type to get invalidated.
2321 TF_RM_GET_POOLS(tfs, dir, &pool,
2322 TF_SRAM_FULL_ACTION_POOL_NAME,
2326 free_cnt = ba_free_count(pool);
2327 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride) {
2328 flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].start = 0;
2329 flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride = 0;
2334 /* Only pools for RX direction */
2335 if (dir == TF_DIR_RX) {
2336 TF_RM_GET_POOLS_RX(tfs, &pool,
2337 TF_SRAM_MCG_POOL_NAME);
2340 free_cnt = ba_free_count(pool);
2341 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_MCG].stride) {
2342 flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0;
2343 flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0;
2348 /* Always prune TX direction */
2349 flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0;
2350 flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0;
2353 TF_RM_GET_POOLS(tfs, dir, &pool,
2354 TF_SRAM_ENCAP_8B_POOL_NAME,
2358 free_cnt = ba_free_count(pool);
2359 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride) {
2360 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].start = 0;
2361 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride = 0;
2366 TF_RM_GET_POOLS(tfs, dir, &pool,
2367 TF_SRAM_ENCAP_16B_POOL_NAME,
2371 free_cnt = ba_free_count(pool);
2372 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride) {
2373 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].start = 0;
2374 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride = 0;
2379 /* Only pools for TX direction */
2380 if (dir == TF_DIR_TX) {
2381 TF_RM_GET_POOLS_TX(tfs, &pool,
2382 TF_SRAM_ENCAP_64B_POOL_NAME);
2385 free_cnt = ba_free_count(pool);
2387 sram_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride) {
2388 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0;
2389 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0;
2394 /* Always prune RX direction */
2395 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0;
2396 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0;
2399 TF_RM_GET_POOLS(tfs, dir, &pool,
2400 TF_SRAM_SP_SMAC_POOL_NAME,
2404 free_cnt = ba_free_count(pool);
2405 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride) {
2406 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].start = 0;
2407 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride = 0;
2412 /* Only pools for TX direction */
2413 if (dir == TF_DIR_TX) {
2414 TF_RM_GET_POOLS_TX(tfs, &pool,
2415 TF_SRAM_SP_SMAC_IPV4_POOL_NAME);
2418 free_cnt = ba_free_count(pool);
2420 sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride) {
2421 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0;
2422 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride =
2428 /* Always prune RX direction */
2429 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0;
2430 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride = 0;
2433 /* Only pools for TX direction */
2434 if (dir == TF_DIR_TX) {
2435 TF_RM_GET_POOLS_TX(tfs, &pool,
2436 TF_SRAM_SP_SMAC_IPV6_POOL_NAME);
2439 free_cnt = ba_free_count(pool);
2441 sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride) {
2442 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0;
2443 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride =
2449 /* Always prune RX direction */
2450 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0;
2451 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride = 0;
2454 TF_RM_GET_POOLS(tfs, dir, &pool,
2455 TF_SRAM_STATS_64B_POOL_NAME,
2459 free_cnt = ba_free_count(pool);
2460 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride) {
2461 flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].start = 0;
2462 flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride = 0;
2467 TF_RM_GET_POOLS(tfs, dir, &pool,
2468 TF_SRAM_NAT_SPORT_POOL_NAME,
2472 free_cnt = ba_free_count(pool);
2473 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride) {
2474 flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].start = 0;
2475 flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride = 0;
2480 TF_RM_GET_POOLS(tfs, dir, &pool,
2481 TF_SRAM_NAT_DPORT_POOL_NAME,
2485 free_cnt = ba_free_count(pool);
2486 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride) {
2487 flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].start = 0;
2488 flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride = 0;
2493 TF_RM_GET_POOLS(tfs, dir, &pool,
2494 TF_SRAM_NAT_S_IPV4_POOL_NAME,
2498 free_cnt = ba_free_count(pool);
2499 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride) {
2500 flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].start = 0;
2501 flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride = 0;
2506 TF_RM_GET_POOLS(tfs, dir, &pool,
2507 TF_SRAM_NAT_D_IPV4_POOL_NAME,
2511 free_cnt = ba_free_count(pool);
2512 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride) {
2513 flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].start = 0;
2514 flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride = 0;
2523 * Helper function used to generate an error log for the HW types that
2524 * needs to be flushed. The types should have been cleaned up ahead of
2525 * invoking tf_close_session.
2528 * HW Resource database holding elements to be flushed
2531 tf_rm_log_hw_flush(enum tf_dir dir,
2532 struct tf_rm_entry *hw_entries)
2536 /* Walk the hw flush array and log the types that wasn't
2539 for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
2540 if (hw_entries[i].stride != 0)
2542 "%s: %s was not cleaned up\n",
2544 tf_hcapi_hw_2_str(i));
2549 * Helper function used to generate an error log for the SRAM types
2550 * that needs to be flushed. The types should have been cleaned up
2551 * ahead of invoking tf_close_session.
2554 * SRAM Resource database holding elements to be flushed
2557 tf_rm_log_sram_flush(enum tf_dir dir,
2558 struct tf_rm_entry *sram_entries)
2562 /* Walk the sram flush array and log the types that wasn't
2565 for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
2566 if (sram_entries[i].stride != 0)
2568 "%s: %s was not cleaned up\n",
2570 tf_hcapi_sram_2_str(i));
2575 tf_rm_init(struct tf *tfp __rte_unused)
2577 struct tf_session *tfs =
2578 (struct tf_session *)(tfp->session->core_data);
2580 /* This version is host specific and should be checked against
2581 * when attaching as there is no guarantee that a secondary
2582 * would run from same image version.
2584 tfs->ver.major = TF_SESSION_VER_MAJOR;
2585 tfs->ver.minor = TF_SESSION_VER_MINOR;
2586 tfs->ver.update = TF_SESSION_VER_UPDATE;
2588 tfs->session_id.id = 0;
2591 /* Initialization of Table Scopes */
2592 /* ll_init(&tfs->tbl_scope_ll); */
2594 /* Initialization of HW and SRAM resource DB */
2595 memset(&tfs->resc, 0, sizeof(struct tf_rm_db));
2597 /* Initialization of HW Resource Pools */
2598 ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
2599 ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
2600 ba_init(tfs->TF_PROF_FUNC_POOL_NAME_RX, TF_NUM_PROF_FUNC);
2601 ba_init(tfs->TF_PROF_FUNC_POOL_NAME_TX, TF_NUM_PROF_FUNC);
2602 ba_init(tfs->TF_PROF_TCAM_POOL_NAME_RX, TF_NUM_PROF_TCAM);
2603 ba_init(tfs->TF_PROF_TCAM_POOL_NAME_TX, TF_NUM_PROF_TCAM);
2604 ba_init(tfs->TF_EM_PROF_ID_POOL_NAME_RX, TF_NUM_EM_PROF_ID);
2605 ba_init(tfs->TF_EM_PROF_ID_POOL_NAME_TX, TF_NUM_EM_PROF_ID);
2607 /* TBD, how do we want to handle EM records ?*/
2608 /* EM Records should not be controlled by way of a pool */
2610 ba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX, TF_NUM_WC_PROF_ID);
2611 ba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX, TF_NUM_WC_PROF_ID);
2612 ba_init(tfs->TF_WC_TCAM_POOL_NAME_RX, TF_NUM_WC_TCAM_ROW);
2613 ba_init(tfs->TF_WC_TCAM_POOL_NAME_TX, TF_NUM_WC_TCAM_ROW);
2614 ba_init(tfs->TF_METER_PROF_POOL_NAME_RX, TF_NUM_METER_PROF);
2615 ba_init(tfs->TF_METER_PROF_POOL_NAME_TX, TF_NUM_METER_PROF);
2616 ba_init(tfs->TF_METER_INST_POOL_NAME_RX, TF_NUM_METER);
2617 ba_init(tfs->TF_METER_INST_POOL_NAME_TX, TF_NUM_METER);
2618 ba_init(tfs->TF_MIRROR_POOL_NAME_RX, TF_NUM_MIRROR);
2619 ba_init(tfs->TF_MIRROR_POOL_NAME_TX, TF_NUM_MIRROR);
2620 ba_init(tfs->TF_UPAR_POOL_NAME_RX, TF_NUM_UPAR);
2621 ba_init(tfs->TF_UPAR_POOL_NAME_TX, TF_NUM_UPAR);
2623 ba_init(tfs->TF_SP_TCAM_POOL_NAME_RX, TF_NUM_SP_TCAM);
2624 ba_init(tfs->TF_SP_TCAM_POOL_NAME_TX, TF_NUM_SP_TCAM);
2626 ba_init(tfs->TF_FKB_POOL_NAME_RX, TF_NUM_FKB);
2627 ba_init(tfs->TF_FKB_POOL_NAME_TX, TF_NUM_FKB);
2629 ba_init(tfs->TF_TBL_SCOPE_POOL_NAME_RX, TF_NUM_TBL_SCOPE);
2630 ba_init(tfs->TF_TBL_SCOPE_POOL_NAME_TX, TF_NUM_TBL_SCOPE);
2631 ba_init(tfs->TF_L2_FUNC_POOL_NAME_RX, TF_NUM_L2_FUNC);
2632 ba_init(tfs->TF_L2_FUNC_POOL_NAME_TX, TF_NUM_L2_FUNC);
2633 ba_init(tfs->TF_EPOCH0_POOL_NAME_RX, TF_NUM_EPOCH0);
2634 ba_init(tfs->TF_EPOCH0_POOL_NAME_TX, TF_NUM_EPOCH0);
2635 ba_init(tfs->TF_EPOCH1_POOL_NAME_RX, TF_NUM_EPOCH1);
2636 ba_init(tfs->TF_EPOCH1_POOL_NAME_TX, TF_NUM_EPOCH1);
2637 ba_init(tfs->TF_METADATA_POOL_NAME_RX, TF_NUM_METADATA);
2638 ba_init(tfs->TF_METADATA_POOL_NAME_TX, TF_NUM_METADATA);
2639 ba_init(tfs->TF_CT_STATE_POOL_NAME_RX, TF_NUM_CT_STATE);
2640 ba_init(tfs->TF_CT_STATE_POOL_NAME_TX, TF_NUM_CT_STATE);
2641 ba_init(tfs->TF_RANGE_PROF_POOL_NAME_RX, TF_NUM_RANGE_PROF);
2642 ba_init(tfs->TF_RANGE_PROF_POOL_NAME_TX, TF_NUM_RANGE_PROF);
2643 ba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_RX, TF_NUM_RANGE_ENTRY);
2644 ba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_TX, TF_NUM_RANGE_ENTRY);
2645 ba_init(tfs->TF_LAG_ENTRY_POOL_NAME_RX, TF_NUM_LAG_ENTRY);
2646 ba_init(tfs->TF_LAG_ENTRY_POOL_NAME_TX, TF_NUM_LAG_ENTRY);
2648 /* Initialization of SRAM Resource Pools
2649 * These pools are set to the TFLIB defined MAX sizes not
2650 * AFM's HW max as to limit the memory consumption
2652 ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX,
2653 TF_RSVD_SRAM_FULL_ACTION_RX);
2654 ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX,
2655 TF_RSVD_SRAM_FULL_ACTION_TX);
2656 /* Only Multicast Group on RX is supported */
2657 ba_init(tfs->TF_SRAM_MCG_POOL_NAME_RX,
2658 TF_RSVD_SRAM_MCG_RX);
2659 ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX,
2660 TF_RSVD_SRAM_ENCAP_8B_RX);
2661 ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX,
2662 TF_RSVD_SRAM_ENCAP_8B_TX);
2663 ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX,
2664 TF_RSVD_SRAM_ENCAP_16B_RX);
2665 ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX,
2666 TF_RSVD_SRAM_ENCAP_16B_TX);
2667 /* Only Encap 64B on TX is supported */
2668 ba_init(tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX,
2669 TF_RSVD_SRAM_ENCAP_64B_TX);
2670 ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX,
2671 TF_RSVD_SRAM_SP_SMAC_RX);
2672 ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX,
2673 TF_RSVD_SRAM_SP_SMAC_TX);
2674 /* Only SP SMAC IPv4 on TX is supported */
2675 ba_init(tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX,
2676 TF_RSVD_SRAM_SP_SMAC_IPV4_TX);
2677 /* Only SP SMAC IPv6 on TX is supported */
2678 ba_init(tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX,
2679 TF_RSVD_SRAM_SP_SMAC_IPV6_TX);
2680 ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_RX,
2681 TF_RSVD_SRAM_COUNTER_64B_RX);
2682 ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_TX,
2683 TF_RSVD_SRAM_COUNTER_64B_TX);
2684 ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX,
2685 TF_RSVD_SRAM_NAT_SPORT_RX);
2686 ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX,
2687 TF_RSVD_SRAM_NAT_SPORT_TX);
2688 ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX,
2689 TF_RSVD_SRAM_NAT_DPORT_RX);
2690 ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX,
2691 TF_RSVD_SRAM_NAT_DPORT_TX);
2692 ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX,
2693 TF_RSVD_SRAM_NAT_S_IPV4_RX);
2694 ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX,
2695 TF_RSVD_SRAM_NAT_S_IPV4_TX);
2696 ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX,
2697 TF_RSVD_SRAM_NAT_D_IPV4_RX);
2698 ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX,
2699 TF_RSVD_SRAM_NAT_D_IPV4_TX);
2701 /* Initialization of pools local to TF Core */
2702 ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
2703 ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
2707 tf_rm_allocate_validate(struct tf *tfp)
2712 for (i = 0; i < TF_DIR_MAX; i++) {
2713 rc = tf_rm_allocate_validate_hw(tfp, i);
2716 rc = tf_rm_allocate_validate_sram(tfp, i);
2721 /* With both HW and SRAM allocated and validated we can
2722 * 'scrub' the reservation on the pools.
2724 tf_rm_reserve_hw(tfp);
2725 tf_rm_reserve_sram(tfp);
2731 tf_rm_close(struct tf *tfp)
2736 struct tf_rm_entry *hw_entries;
2737 struct tf_rm_entry *hw_flush_entries;
2738 struct tf_rm_entry *sram_entries;
2739 struct tf_rm_entry *sram_flush_entries;
2740 struct tf_session *tfs __rte_unused =
2741 (struct tf_session *)(tfp->session->core_data);
2743 struct tf_rm_db flush_resc = tfs->resc;
2745 /* On close it is assumed that the session has already cleaned
2746 * up all its resources, individually, while destroying its
2747 * flows. No checking is performed thus the behavior is as
2750 * Session RM will signal FW to release session resources. FW
2751 * will perform invalidation of all the allocated entries
2752 * (assures any outstanding resources has been cleared, then
2753 * free the FW RM instance.
2755 * Session will then be freed by tf_close_session() thus there
2756 * is no need to clean each resource pool as the whole session
2760 for (i = 0; i < TF_DIR_MAX; i++) {
2761 if (i == TF_DIR_RX) {
2762 hw_entries = tfs->resc.rx.hw_entry;
2763 hw_flush_entries = flush_resc.rx.hw_entry;
2764 sram_entries = tfs->resc.rx.sram_entry;
2765 sram_flush_entries = flush_resc.rx.sram_entry;
2767 hw_entries = tfs->resc.tx.hw_entry;
2768 hw_flush_entries = flush_resc.tx.hw_entry;
2769 sram_entries = tfs->resc.tx.sram_entry;
2770 sram_flush_entries = flush_resc.tx.sram_entry;
2773 /* Check for any not previously freed HW resources and
2774 * flush if required.
2776 rc = tf_rm_hw_to_flush(tfs, i, hw_entries, hw_flush_entries);
2778 rc_close = -ENOTEMPTY;
2781 "%s, lingering HW resources\n",
2784 /* Log the entries to be flushed */
2785 tf_rm_log_hw_flush(i, hw_flush_entries);
2786 rc = tf_msg_session_hw_resc_flush(tfp,
2793 "%s, HW flush failed\n",
2798 /* Check for any not previously freed SRAM resources
2799 * and flush if required.
2801 rc = tf_rm_sram_to_flush(tfs,
2804 sram_flush_entries);
2806 rc_close = -ENOTEMPTY;
2809 "%s, lingering SRAM resources\n",
2812 /* Log the entries to be flushed */
2813 tf_rm_log_sram_flush(i, sram_flush_entries);
2815 rc = tf_msg_session_sram_resc_flush(tfp,
2817 sram_flush_entries);
2822 "%s, HW flush failed\n",
2827 rc = tf_msg_session_hw_resc_free(tfp, i, hw_entries);
2832 "%s, HW free failed\n",
2836 rc = tf_msg_session_sram_resc_free(tfp, i, sram_entries);
2841 "%s, SRAM free failed\n",
2849 #if (TF_SHADOW == 1)
2851 tf_rm_shadow_db_init(struct tf_session *tfs)
2857 #endif /* TF_SHADOW */
2860 tf_rm_lookup_tcam_type_pool(struct tf_session *tfs,
2862 enum tf_tcam_tbl_type type,
2863 struct bitalloc **pool)
2865 int rc = -EOPNOTSUPP;
2870 case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
2871 TF_RM_GET_POOLS(tfs, dir, pool,
2872 TF_L2_CTXT_TCAM_POOL_NAME,
2875 case TF_TCAM_TBL_TYPE_PROF_TCAM:
2876 TF_RM_GET_POOLS(tfs, dir, pool,
2877 TF_PROF_TCAM_POOL_NAME,
2880 case TF_TCAM_TBL_TYPE_WC_TCAM:
2881 TF_RM_GET_POOLS(tfs, dir, pool,
2882 TF_WC_TCAM_POOL_NAME,
2885 case TF_TCAM_TBL_TYPE_VEB_TCAM:
2886 case TF_TCAM_TBL_TYPE_SP_TCAM:
2887 case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
2892 if (rc == -EOPNOTSUPP) {
2894 "dir:%d, Tcam type not supported, type:%d\n",
2898 } else if (rc == -1) {
2900 "%s:, Tcam type lookup failed, type:%d\n",
2910 tf_rm_lookup_tbl_type_pool(struct tf_session *tfs,
2912 enum tf_tbl_type type,
2913 struct bitalloc **pool)
2915 int rc = -EOPNOTSUPP;
2920 case TF_TBL_TYPE_FULL_ACT_RECORD:
2921 TF_RM_GET_POOLS(tfs, dir, pool,
2922 TF_SRAM_FULL_ACTION_POOL_NAME,
2925 case TF_TBL_TYPE_MCAST_GROUPS:
2926 /* No pools for TX direction, so bail out */
2927 if (dir == TF_DIR_TX)
2929 TF_RM_GET_POOLS_RX(tfs, pool,
2930 TF_SRAM_MCG_POOL_NAME);
2933 case TF_TBL_TYPE_ACT_ENCAP_8B:
2934 TF_RM_GET_POOLS(tfs, dir, pool,
2935 TF_SRAM_ENCAP_8B_POOL_NAME,
2938 case TF_TBL_TYPE_ACT_ENCAP_16B:
2939 TF_RM_GET_POOLS(tfs, dir, pool,
2940 TF_SRAM_ENCAP_16B_POOL_NAME,
2943 case TF_TBL_TYPE_ACT_ENCAP_64B:
2944 /* No pools for RX direction, so bail out */
2945 if (dir == TF_DIR_RX)
2947 TF_RM_GET_POOLS_TX(tfs, pool,
2948 TF_SRAM_ENCAP_64B_POOL_NAME);
2951 case TF_TBL_TYPE_ACT_SP_SMAC:
2952 TF_RM_GET_POOLS(tfs, dir, pool,
2953 TF_SRAM_SP_SMAC_POOL_NAME,
2956 case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
2957 /* No pools for TX direction, so bail out */
2958 if (dir == TF_DIR_RX)
2960 TF_RM_GET_POOLS_TX(tfs, pool,
2961 TF_SRAM_SP_SMAC_IPV4_POOL_NAME);
2964 case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
2965 /* No pools for TX direction, so bail out */
2966 if (dir == TF_DIR_RX)
2968 TF_RM_GET_POOLS_TX(tfs, pool,
2969 TF_SRAM_SP_SMAC_IPV6_POOL_NAME);
2972 case TF_TBL_TYPE_ACT_STATS_64:
2973 TF_RM_GET_POOLS(tfs, dir, pool,
2974 TF_SRAM_STATS_64B_POOL_NAME,
2977 case TF_TBL_TYPE_ACT_MODIFY_SPORT:
2978 TF_RM_GET_POOLS(tfs, dir, pool,
2979 TF_SRAM_NAT_SPORT_POOL_NAME,
2982 case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
2983 TF_RM_GET_POOLS(tfs, dir, pool,
2984 TF_SRAM_NAT_S_IPV4_POOL_NAME,
2987 case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
2988 TF_RM_GET_POOLS(tfs, dir, pool,
2989 TF_SRAM_NAT_D_IPV4_POOL_NAME,
2992 case TF_TBL_TYPE_METER_PROF:
2993 TF_RM_GET_POOLS(tfs, dir, pool,
2994 TF_METER_PROF_POOL_NAME,
2997 case TF_TBL_TYPE_METER_INST:
2998 TF_RM_GET_POOLS(tfs, dir, pool,
2999 TF_METER_INST_POOL_NAME,
3002 case TF_TBL_TYPE_MIRROR_CONFIG:
3003 TF_RM_GET_POOLS(tfs, dir, pool,
3004 TF_MIRROR_POOL_NAME,
3007 case TF_TBL_TYPE_UPAR:
3008 TF_RM_GET_POOLS(tfs, dir, pool,
3012 case TF_TBL_TYPE_EPOCH0:
3013 TF_RM_GET_POOLS(tfs, dir, pool,
3014 TF_EPOCH0_POOL_NAME,
3017 case TF_TBL_TYPE_EPOCH1:
3018 TF_RM_GET_POOLS(tfs, dir, pool,
3019 TF_EPOCH1_POOL_NAME,
3022 case TF_TBL_TYPE_METADATA:
3023 TF_RM_GET_POOLS(tfs, dir, pool,
3024 TF_METADATA_POOL_NAME,
3027 case TF_TBL_TYPE_CT_STATE:
3028 TF_RM_GET_POOLS(tfs, dir, pool,
3029 TF_CT_STATE_POOL_NAME,
3032 case TF_TBL_TYPE_RANGE_PROF:
3033 TF_RM_GET_POOLS(tfs, dir, pool,
3034 TF_RANGE_PROF_POOL_NAME,
3037 case TF_TBL_TYPE_RANGE_ENTRY:
3038 TF_RM_GET_POOLS(tfs, dir, pool,
3039 TF_RANGE_ENTRY_POOL_NAME,
3042 case TF_TBL_TYPE_LAG:
3043 TF_RM_GET_POOLS(tfs, dir, pool,
3044 TF_LAG_ENTRY_POOL_NAME,
3047 /* Not yet supported */
3048 case TF_TBL_TYPE_ACT_ENCAP_32B:
3049 case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST:
3050 case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC:
3051 case TF_TBL_TYPE_VNIC_SVIF:
3053 /* No bitalloc pools for these types */
3054 case TF_TBL_TYPE_EXT:
3059 if (rc == -EOPNOTSUPP) {
3061 "dir:%d, Table type not supported, type:%d\n",
3065 } else if (rc == -1) {
3067 "dir:%d, Table type lookup failed, type:%d\n",
3077 tf_rm_convert_tbl_type(enum tf_tbl_type type,
3078 uint32_t *hcapi_type)
3083 case TF_TBL_TYPE_FULL_ACT_RECORD:
3084 *hcapi_type = TF_RESC_TYPE_SRAM_FULL_ACTION;
3086 case TF_TBL_TYPE_MCAST_GROUPS:
3087 *hcapi_type = TF_RESC_TYPE_SRAM_MCG;
3089 case TF_TBL_TYPE_ACT_ENCAP_8B:
3090 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_8B;
3092 case TF_TBL_TYPE_ACT_ENCAP_16B:
3093 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_16B;
3095 case TF_TBL_TYPE_ACT_ENCAP_64B:
3096 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_64B;
3098 case TF_TBL_TYPE_ACT_SP_SMAC:
3099 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC;
3101 case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
3102 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4;
3104 case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
3105 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6;
3107 case TF_TBL_TYPE_ACT_STATS_64:
3108 *hcapi_type = TF_RESC_TYPE_SRAM_COUNTER_64B;
3110 case TF_TBL_TYPE_ACT_MODIFY_SPORT:
3111 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_SPORT;
3113 case TF_TBL_TYPE_ACT_MODIFY_DPORT:
3114 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_DPORT;
3116 case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
3117 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_S_IPV4;
3119 case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
3120 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_D_IPV4;
3122 case TF_TBL_TYPE_METER_PROF:
3123 *hcapi_type = TF_RESC_TYPE_HW_METER_PROF;
3125 case TF_TBL_TYPE_METER_INST:
3126 *hcapi_type = TF_RESC_TYPE_HW_METER_INST;
3128 case TF_TBL_TYPE_MIRROR_CONFIG:
3129 *hcapi_type = TF_RESC_TYPE_HW_MIRROR;
3131 case TF_TBL_TYPE_UPAR:
3132 *hcapi_type = TF_RESC_TYPE_HW_UPAR;
3134 case TF_TBL_TYPE_EPOCH0:
3135 *hcapi_type = TF_RESC_TYPE_HW_EPOCH0;
3137 case TF_TBL_TYPE_EPOCH1:
3138 *hcapi_type = TF_RESC_TYPE_HW_EPOCH1;
3140 case TF_TBL_TYPE_METADATA:
3141 *hcapi_type = TF_RESC_TYPE_HW_METADATA;
3143 case TF_TBL_TYPE_CT_STATE:
3144 *hcapi_type = TF_RESC_TYPE_HW_CT_STATE;
3146 case TF_TBL_TYPE_RANGE_PROF:
3147 *hcapi_type = TF_RESC_TYPE_HW_RANGE_PROF;
3149 case TF_TBL_TYPE_RANGE_ENTRY:
3150 *hcapi_type = TF_RESC_TYPE_HW_RANGE_ENTRY;
3152 case TF_TBL_TYPE_LAG:
3153 *hcapi_type = TF_RESC_TYPE_HW_LAG_ENTRY;
3155 /* Not yet supported */
3156 case TF_TBL_TYPE_ACT_ENCAP_32B:
3157 case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST:
3158 case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC:
3159 case TF_TBL_TYPE_VNIC_SVIF:
3160 case TF_TBL_TYPE_EXT: /* No pools for this type */
3170 tf_rm_convert_index(struct tf_session *tfs,
3172 enum tf_tbl_type type,
3173 enum tf_rm_convert_type c_type,
3175 uint32_t *convert_index)
3178 struct tf_rm_resc *resc;
3179 uint32_t hcapi_type;
3180 uint32_t base_index;
3182 if (dir == TF_DIR_RX)
3183 resc = &tfs->resc.rx;
3184 else if (dir == TF_DIR_TX)
3185 resc = &tfs->resc.tx;
3189 rc = tf_rm_convert_tbl_type(type, &hcapi_type);
3194 case TF_TBL_TYPE_FULL_ACT_RECORD:
3195 case TF_TBL_TYPE_MCAST_GROUPS:
3196 case TF_TBL_TYPE_ACT_ENCAP_8B:
3197 case TF_TBL_TYPE_ACT_ENCAP_16B:
3198 case TF_TBL_TYPE_ACT_ENCAP_32B:
3199 case TF_TBL_TYPE_ACT_ENCAP_64B:
3200 case TF_TBL_TYPE_ACT_SP_SMAC:
3201 case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
3202 case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
3203 case TF_TBL_TYPE_ACT_STATS_64:
3204 case TF_TBL_TYPE_ACT_MODIFY_SPORT:
3205 case TF_TBL_TYPE_ACT_MODIFY_DPORT:
3206 case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
3207 case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
3208 base_index = resc->sram_entry[hcapi_type].start;
3210 case TF_TBL_TYPE_MIRROR_CONFIG:
3211 case TF_TBL_TYPE_METER_PROF:
3212 case TF_TBL_TYPE_METER_INST:
3213 case TF_TBL_TYPE_UPAR:
3214 case TF_TBL_TYPE_EPOCH0:
3215 case TF_TBL_TYPE_EPOCH1:
3216 case TF_TBL_TYPE_METADATA:
3217 case TF_TBL_TYPE_CT_STATE:
3218 case TF_TBL_TYPE_RANGE_PROF:
3219 case TF_TBL_TYPE_RANGE_ENTRY:
3220 case TF_TBL_TYPE_LAG:
3221 base_index = resc->hw_entry[hcapi_type].start;
3223 /* Not yet supported */
3224 case TF_TBL_TYPE_VNIC_SVIF:
3225 case TF_TBL_TYPE_EXT: /* No pools for this type */
3231 case TF_RM_CONVERT_RM_BASE:
3232 *convert_index = index - base_index;
3234 case TF_RM_CONVERT_ADD_BASE:
3235 *convert_index = index + base_index;