1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
8 #include <rte_common.h>
12 #include "tf_session.h"
13 #include "tf_resources.h"
18 * Internal macro to perform HW resource allocation check between what
19 * firmware reports vs what was statically requested.
22 * struct tf_rm_hw_query *hquery - Pointer to the hw query result
23 * enum tf_dir dir - Direction to process
24 * enum tf_resource_type_hw hcapi_type - HCAPI type, the index element
25 * in the hw query structure
26 * define def_value - Define value to check against
27 * uint32_t *eflag - Result of the check
29 #define TF_RM_CHECK_HW_ALLOC(hquery, dir, hcapi_type, def_value, eflag) do { \
30 if ((dir) == TF_DIR_RX) { \
31 if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _RX) \
32 *(eflag) |= 1 << (hcapi_type); \
34 if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _TX) \
35 *(eflag) |= 1 << (hcapi_type); \
40 * Internal macro to perform HW resource allocation check between what
41 * firmware reports vs what was statically requested.
44 * struct tf_rm_sram_query *squery - Pointer to the sram query result
45 * enum tf_dir dir - Direction to process
46 * enum tf_resource_type_sram hcapi_type - HCAPI type, the index element
47 * in the hw query structure
48 * define def_value - Define value to check against
49 * uint32_t *eflag - Result of the check
51 #define TF_RM_CHECK_SRAM_ALLOC(squery, dir, hcapi_type, def_value, eflag) do { \
52 if ((dir) == TF_DIR_RX) { \
53 if ((squery)->sram_query[(hcapi_type)].max != def_value ## _RX)\
54 *(eflag) |= 1 << (hcapi_type); \
56 if ((squery)->sram_query[(hcapi_type)].max != def_value ## _TX)\
57 *(eflag) |= 1 << (hcapi_type); \
62 * Internal macro to convert a reserved resource define name to be
66 * enum tf_dir dir - Direction to process
67 * string type - Type name to append RX or TX to
68 * string dtype - Direction specific type
72 #define TF_RESC_RSVD(dir, type, dtype) do { \
73 if ((dir) == TF_DIR_RX) \
74 (dtype) = type ## _RX; \
76 (dtype) = type ## _TX; \
80 *tf_dir_2_str(enum tf_dir dir)
88 return "Invalid direction";
93 *tf_ident_2_str(enum tf_identifier_type id_type)
96 case TF_IDENT_TYPE_L2_CTXT:
97 return "l2_ctxt_remap";
98 case TF_IDENT_TYPE_PROF_FUNC:
100 case TF_IDENT_TYPE_WC_PROF:
102 case TF_IDENT_TYPE_EM_PROF:
104 case TF_IDENT_TYPE_L2_FUNC:
107 return "Invalid identifier";
112 *tf_tcam_tbl_2_str(enum tf_tcam_tbl_type tcam_type)
115 case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
116 return "l2_ctxt_tcam";
117 case TF_TCAM_TBL_TYPE_PROF_TCAM:
119 case TF_TCAM_TBL_TYPE_WC_TCAM:
121 case TF_TCAM_TBL_TYPE_VEB_TCAM:
123 case TF_TCAM_TBL_TYPE_SP_TCAM:
125 case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
126 return "ct_rule_tcam";
128 return "Invalid tcam table type";
133 *tf_hcapi_hw_2_str(enum tf_resource_type_hw hw_type)
136 case TF_RESC_TYPE_HW_L2_CTXT_TCAM:
137 return "L2 ctxt tcam";
138 case TF_RESC_TYPE_HW_PROF_FUNC:
139 return "Profile Func";
140 case TF_RESC_TYPE_HW_PROF_TCAM:
141 return "Profile tcam";
142 case TF_RESC_TYPE_HW_EM_PROF_ID:
143 return "EM profile id";
144 case TF_RESC_TYPE_HW_EM_REC:
146 case TF_RESC_TYPE_HW_WC_TCAM_PROF_ID:
147 return "WC tcam profile id";
148 case TF_RESC_TYPE_HW_WC_TCAM:
150 case TF_RESC_TYPE_HW_METER_PROF:
151 return "Meter profile";
152 case TF_RESC_TYPE_HW_METER_INST:
153 return "Meter instance";
154 case TF_RESC_TYPE_HW_MIRROR:
156 case TF_RESC_TYPE_HW_UPAR:
158 case TF_RESC_TYPE_HW_SP_TCAM:
159 return "Source properties tcam";
160 case TF_RESC_TYPE_HW_L2_FUNC:
161 return "L2 Function";
162 case TF_RESC_TYPE_HW_FKB:
164 case TF_RESC_TYPE_HW_TBL_SCOPE:
165 return "Table scope";
166 case TF_RESC_TYPE_HW_EPOCH0:
168 case TF_RESC_TYPE_HW_EPOCH1:
170 case TF_RESC_TYPE_HW_METADATA:
172 case TF_RESC_TYPE_HW_CT_STATE:
173 return "Connection tracking state";
174 case TF_RESC_TYPE_HW_RANGE_PROF:
175 return "Range profile";
176 case TF_RESC_TYPE_HW_RANGE_ENTRY:
177 return "Range entry";
178 case TF_RESC_TYPE_HW_LAG_ENTRY:
181 return "Invalid identifier";
186 *tf_hcapi_sram_2_str(enum tf_resource_type_sram sram_type)
189 case TF_RESC_TYPE_SRAM_FULL_ACTION:
190 return "Full action";
191 case TF_RESC_TYPE_SRAM_MCG:
193 case TF_RESC_TYPE_SRAM_ENCAP_8B:
195 case TF_RESC_TYPE_SRAM_ENCAP_16B:
197 case TF_RESC_TYPE_SRAM_ENCAP_64B:
199 case TF_RESC_TYPE_SRAM_SP_SMAC:
200 return "Source properties SMAC";
201 case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4:
202 return "Source properties SMAC IPv4";
203 case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6:
204 return "Source properties IPv6";
205 case TF_RESC_TYPE_SRAM_COUNTER_64B:
206 return "Counter 64B";
207 case TF_RESC_TYPE_SRAM_NAT_SPORT:
208 return "NAT source port";
209 case TF_RESC_TYPE_SRAM_NAT_DPORT:
210 return "NAT destination port";
211 case TF_RESC_TYPE_SRAM_NAT_S_IPV4:
212 return "NAT source IPv4";
213 case TF_RESC_TYPE_SRAM_NAT_D_IPV4:
214 return "NAT destination IPv4";
216 return "Invalid identifier";
221 * Helper function to perform a HW HCAPI resource type lookup against
222 * the reserved value of the same static type.
225 * -EOPNOTSUPP - Reserved resource type not supported
226 * Value - Integer value of the reserved value for the requested type
229 tf_rm_rsvd_hw_value(enum tf_dir dir, enum tf_resource_type_hw index)
231 uint32_t value = -EOPNOTSUPP;
234 case TF_RESC_TYPE_HW_L2_CTXT_TCAM:
235 TF_RESC_RSVD(dir, TF_RSVD_L2_CTXT_TCAM, value);
237 case TF_RESC_TYPE_HW_PROF_FUNC:
238 TF_RESC_RSVD(dir, TF_RSVD_PROF_FUNC, value);
240 case TF_RESC_TYPE_HW_PROF_TCAM:
241 TF_RESC_RSVD(dir, TF_RSVD_PROF_TCAM, value);
243 case TF_RESC_TYPE_HW_EM_PROF_ID:
244 TF_RESC_RSVD(dir, TF_RSVD_EM_PROF_ID, value);
246 case TF_RESC_TYPE_HW_EM_REC:
247 TF_RESC_RSVD(dir, TF_RSVD_EM_REC, value);
249 case TF_RESC_TYPE_HW_WC_TCAM_PROF_ID:
250 TF_RESC_RSVD(dir, TF_RSVD_WC_TCAM_PROF_ID, value);
252 case TF_RESC_TYPE_HW_WC_TCAM:
253 TF_RESC_RSVD(dir, TF_RSVD_WC_TCAM, value);
255 case TF_RESC_TYPE_HW_METER_PROF:
256 TF_RESC_RSVD(dir, TF_RSVD_METER_PROF, value);
258 case TF_RESC_TYPE_HW_METER_INST:
259 TF_RESC_RSVD(dir, TF_RSVD_METER_INST, value);
261 case TF_RESC_TYPE_HW_MIRROR:
262 TF_RESC_RSVD(dir, TF_RSVD_MIRROR, value);
264 case TF_RESC_TYPE_HW_UPAR:
265 TF_RESC_RSVD(dir, TF_RSVD_UPAR, value);
267 case TF_RESC_TYPE_HW_SP_TCAM:
268 TF_RESC_RSVD(dir, TF_RSVD_SP_TCAM, value);
270 case TF_RESC_TYPE_HW_L2_FUNC:
271 TF_RESC_RSVD(dir, TF_RSVD_L2_FUNC, value);
273 case TF_RESC_TYPE_HW_FKB:
274 TF_RESC_RSVD(dir, TF_RSVD_FKB, value);
276 case TF_RESC_TYPE_HW_TBL_SCOPE:
277 TF_RESC_RSVD(dir, TF_RSVD_TBL_SCOPE, value);
279 case TF_RESC_TYPE_HW_EPOCH0:
280 TF_RESC_RSVD(dir, TF_RSVD_EPOCH0, value);
282 case TF_RESC_TYPE_HW_EPOCH1:
283 TF_RESC_RSVD(dir, TF_RSVD_EPOCH1, value);
285 case TF_RESC_TYPE_HW_METADATA:
286 TF_RESC_RSVD(dir, TF_RSVD_METADATA, value);
288 case TF_RESC_TYPE_HW_CT_STATE:
289 TF_RESC_RSVD(dir, TF_RSVD_CT_STATE, value);
291 case TF_RESC_TYPE_HW_RANGE_PROF:
292 TF_RESC_RSVD(dir, TF_RSVD_RANGE_PROF, value);
294 case TF_RESC_TYPE_HW_RANGE_ENTRY:
295 TF_RESC_RSVD(dir, TF_RSVD_RANGE_ENTRY, value);
297 case TF_RESC_TYPE_HW_LAG_ENTRY:
298 TF_RESC_RSVD(dir, TF_RSVD_LAG_ENTRY, value);
308 * Helper function to perform a SRAM HCAPI resource type lookup
309 * against the reserved value of the same static type.
312 * -EOPNOTSUPP - Reserved resource type not supported
313 * Value - Integer value of the reserved value for the requested type
316 tf_rm_rsvd_sram_value(enum tf_dir dir, enum tf_resource_type_sram index)
318 uint32_t value = -EOPNOTSUPP;
321 case TF_RESC_TYPE_SRAM_FULL_ACTION:
322 TF_RESC_RSVD(dir, TF_RSVD_SRAM_FULL_ACTION, value);
324 case TF_RESC_TYPE_SRAM_MCG:
325 TF_RESC_RSVD(dir, TF_RSVD_SRAM_MCG, value);
327 case TF_RESC_TYPE_SRAM_ENCAP_8B:
328 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_8B, value);
330 case TF_RESC_TYPE_SRAM_ENCAP_16B:
331 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_16B, value);
333 case TF_RESC_TYPE_SRAM_ENCAP_64B:
334 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_64B, value);
336 case TF_RESC_TYPE_SRAM_SP_SMAC:
337 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC, value);
339 case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4:
340 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV4, value);
342 case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6:
343 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV6, value);
345 case TF_RESC_TYPE_SRAM_COUNTER_64B:
346 TF_RESC_RSVD(dir, TF_RSVD_SRAM_COUNTER_64B, value);
348 case TF_RESC_TYPE_SRAM_NAT_SPORT:
349 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_SPORT, value);
351 case TF_RESC_TYPE_SRAM_NAT_DPORT:
352 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_DPORT, value);
354 case TF_RESC_TYPE_SRAM_NAT_S_IPV4:
355 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_S_IPV4, value);
357 case TF_RESC_TYPE_SRAM_NAT_D_IPV4:
358 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_D_IPV4, value);
368 * Helper function to print all the HW resource qcaps errors reported
372 * Receive or transmit direction
375 * Pointer to the hw error flags created at time of the query check
378 tf_rm_print_hw_qcaps_error(enum tf_dir dir,
379 struct tf_rm_hw_query *hw_query,
380 uint32_t *error_flag)
384 PMD_DRV_LOG(ERR, "QCAPS errors HW\n");
385 PMD_DRV_LOG(ERR, " Direction: %s\n", tf_dir_2_str(dir));
386 PMD_DRV_LOG(ERR, " Elements:\n");
388 for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
389 if (*error_flag & 1 << i)
390 PMD_DRV_LOG(ERR, " %s, %d elem available, req:%d\n",
391 tf_hcapi_hw_2_str(i),
392 hw_query->hw_query[i].max,
393 tf_rm_rsvd_hw_value(dir, i));
398 * Helper function to print all the SRAM resource qcaps errors
399 * reported in the error_flag.
402 * Receive or transmit direction
405 * Pointer to the sram error flags created at time of the query check
408 tf_rm_print_sram_qcaps_error(enum tf_dir dir,
409 struct tf_rm_sram_query *sram_query,
410 uint32_t *error_flag)
414 PMD_DRV_LOG(ERR, "QCAPS errors SRAM\n");
415 PMD_DRV_LOG(ERR, " Direction: %s\n", tf_dir_2_str(dir));
416 PMD_DRV_LOG(ERR, " Elements:\n");
418 for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
419 if (*error_flag & 1 << i)
420 PMD_DRV_LOG(ERR, " %s, %d elem available, req:%d\n",
421 tf_hcapi_sram_2_str(i),
422 sram_query->sram_query[i].max,
423 tf_rm_rsvd_sram_value(dir, i));
428 * Performs a HW resource check between what firmware capability
429 * reports and what the core expects is available.
431 * Firmware performs the resource carving at AFM init time and the
432 * resource capability is reported in the TruFlow qcaps msg.
435 * Pointer to HW Query data structure. Query holds what the firmware
436 * offers of the HW resources.
439 * Receive or transmit direction
441 * [in/out] error_flag
442 * Pointer to a bit array indicating the error of a single HCAPI
443 * resource type. When a bit is set to 1, the HCAPI resource type
444 * failed static allocation.
448 * -ENOMEM - Failure on one of the allocated resources. Check the
449 * error_flag for what types are flagged errored.
452 tf_rm_check_hw_qcaps_static(struct tf_rm_hw_query *query,
454 uint32_t *error_flag)
458 TF_RM_CHECK_HW_ALLOC(query,
460 TF_RESC_TYPE_HW_L2_CTXT_TCAM,
461 TF_RSVD_L2_CTXT_TCAM,
464 TF_RM_CHECK_HW_ALLOC(query,
466 TF_RESC_TYPE_HW_PROF_FUNC,
470 TF_RM_CHECK_HW_ALLOC(query,
472 TF_RESC_TYPE_HW_PROF_TCAM,
476 TF_RM_CHECK_HW_ALLOC(query,
478 TF_RESC_TYPE_HW_EM_PROF_ID,
482 TF_RM_CHECK_HW_ALLOC(query,
484 TF_RESC_TYPE_HW_EM_REC,
488 TF_RM_CHECK_HW_ALLOC(query,
490 TF_RESC_TYPE_HW_WC_TCAM_PROF_ID,
491 TF_RSVD_WC_TCAM_PROF_ID,
494 TF_RM_CHECK_HW_ALLOC(query,
496 TF_RESC_TYPE_HW_WC_TCAM,
500 TF_RM_CHECK_HW_ALLOC(query,
502 TF_RESC_TYPE_HW_METER_PROF,
506 TF_RM_CHECK_HW_ALLOC(query,
508 TF_RESC_TYPE_HW_METER_INST,
512 TF_RM_CHECK_HW_ALLOC(query,
514 TF_RESC_TYPE_HW_MIRROR,
518 TF_RM_CHECK_HW_ALLOC(query,
520 TF_RESC_TYPE_HW_UPAR,
524 TF_RM_CHECK_HW_ALLOC(query,
526 TF_RESC_TYPE_HW_SP_TCAM,
530 TF_RM_CHECK_HW_ALLOC(query,
532 TF_RESC_TYPE_HW_L2_FUNC,
536 TF_RM_CHECK_HW_ALLOC(query,
542 TF_RM_CHECK_HW_ALLOC(query,
544 TF_RESC_TYPE_HW_TBL_SCOPE,
548 TF_RM_CHECK_HW_ALLOC(query,
550 TF_RESC_TYPE_HW_EPOCH0,
554 TF_RM_CHECK_HW_ALLOC(query,
556 TF_RESC_TYPE_HW_EPOCH1,
560 TF_RM_CHECK_HW_ALLOC(query,
562 TF_RESC_TYPE_HW_METADATA,
566 TF_RM_CHECK_HW_ALLOC(query,
568 TF_RESC_TYPE_HW_CT_STATE,
572 TF_RM_CHECK_HW_ALLOC(query,
574 TF_RESC_TYPE_HW_RANGE_PROF,
578 TF_RM_CHECK_HW_ALLOC(query,
580 TF_RESC_TYPE_HW_RANGE_ENTRY,
584 TF_RM_CHECK_HW_ALLOC(query,
586 TF_RESC_TYPE_HW_LAG_ENTRY,
590 if (*error_flag != 0)
597 * Performs a SRAM resource check between what firmware capability
598 * reports and what the core expects is available.
600 * Firmware performs the resource carving at AFM init time and the
601 * resource capability is reported in the TruFlow qcaps msg.
604 * Pointer to SRAM Query data structure. Query holds what the
605 * firmware offers of the SRAM resources.
608 * Receive or transmit direction
610 * [in/out] error_flag
611 * Pointer to a bit array indicating the error of a single HCAPI
612 * resource type. When a bit is set to 1, the HCAPI resource type
613 * failed static allocation.
617 * -ENOMEM - Failure on one of the allocated resources. Check the
618 * error_flag for what types are flagged errored.
621 tf_rm_check_sram_qcaps_static(struct tf_rm_sram_query *query,
623 uint32_t *error_flag)
627 TF_RM_CHECK_SRAM_ALLOC(query,
629 TF_RESC_TYPE_SRAM_FULL_ACTION,
630 TF_RSVD_SRAM_FULL_ACTION,
633 TF_RM_CHECK_SRAM_ALLOC(query,
635 TF_RESC_TYPE_SRAM_MCG,
639 TF_RM_CHECK_SRAM_ALLOC(query,
641 TF_RESC_TYPE_SRAM_ENCAP_8B,
642 TF_RSVD_SRAM_ENCAP_8B,
645 TF_RM_CHECK_SRAM_ALLOC(query,
647 TF_RESC_TYPE_SRAM_ENCAP_16B,
648 TF_RSVD_SRAM_ENCAP_16B,
651 TF_RM_CHECK_SRAM_ALLOC(query,
653 TF_RESC_TYPE_SRAM_ENCAP_64B,
654 TF_RSVD_SRAM_ENCAP_64B,
657 TF_RM_CHECK_SRAM_ALLOC(query,
659 TF_RESC_TYPE_SRAM_SP_SMAC,
660 TF_RSVD_SRAM_SP_SMAC,
663 TF_RM_CHECK_SRAM_ALLOC(query,
665 TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
666 TF_RSVD_SRAM_SP_SMAC_IPV4,
669 TF_RM_CHECK_SRAM_ALLOC(query,
671 TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
672 TF_RSVD_SRAM_SP_SMAC_IPV6,
675 TF_RM_CHECK_SRAM_ALLOC(query,
677 TF_RESC_TYPE_SRAM_COUNTER_64B,
678 TF_RSVD_SRAM_COUNTER_64B,
681 TF_RM_CHECK_SRAM_ALLOC(query,
683 TF_RESC_TYPE_SRAM_NAT_SPORT,
684 TF_RSVD_SRAM_NAT_SPORT,
687 TF_RM_CHECK_SRAM_ALLOC(query,
689 TF_RESC_TYPE_SRAM_NAT_DPORT,
690 TF_RSVD_SRAM_NAT_DPORT,
693 TF_RM_CHECK_SRAM_ALLOC(query,
695 TF_RESC_TYPE_SRAM_NAT_S_IPV4,
696 TF_RSVD_SRAM_NAT_S_IPV4,
699 TF_RM_CHECK_SRAM_ALLOC(query,
701 TF_RESC_TYPE_SRAM_NAT_D_IPV4,
702 TF_RSVD_SRAM_NAT_D_IPV4,
705 if (*error_flag != 0)
712 * Internal function to mark pool entries used.
715 tf_rm_reserve_range(uint32_t count,
719 struct bitalloc *pool)
723 /* If no resources has been requested we mark everything
727 for (i = 0; i < max; i++)
728 ba_alloc_index(pool, i);
730 /* Support 2 main modes
731 * Reserved range starts from bottom up (with
732 * pre-reserved value or not)
733 * - begin = 0 to end xx
734 * - begin = 1 to end xx
736 * Reserved range starts from top down
737 * - begin = yy to end max
740 /* Bottom up check, start from 0 */
741 if (rsv_begin == 0) {
742 for (i = rsv_end + 1; i < max; i++)
743 ba_alloc_index(pool, i);
746 /* Bottom up check, start from 1 or higher OR
749 if (rsv_begin >= 1) {
750 /* Allocate from 0 until start */
751 for (i = 0; i < rsv_begin; i++)
752 ba_alloc_index(pool, i);
754 /* Skip and then do the remaining */
755 if (rsv_end < max - 1) {
756 for (i = rsv_end; i < max; i++)
757 ba_alloc_index(pool, i);
764 * Internal function to mark all the l2 ctxt allocated that Truflow
768 tf_rm_rsvd_l2_ctxt(struct tf_session *tfs)
770 uint32_t index = TF_RESC_TYPE_HW_L2_CTXT_TCAM;
773 /* l2 ctxt rx direction */
774 if (tfs->resc.rx.hw_entry[index].stride > 0)
775 end = tfs->resc.rx.hw_entry[index].start +
776 tfs->resc.rx.hw_entry[index].stride - 1;
778 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
779 tfs->resc.rx.hw_entry[index].start,
782 tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX);
784 /* l2 ctxt tx direction */
785 if (tfs->resc.tx.hw_entry[index].stride > 0)
786 end = tfs->resc.tx.hw_entry[index].start +
787 tfs->resc.tx.hw_entry[index].stride - 1;
789 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
790 tfs->resc.tx.hw_entry[index].start,
793 tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX);
797 * Internal function to mark all the profile tcam and profile func
798 * resources that Truflow does not own.
801 tf_rm_rsvd_prof(struct tf_session *tfs)
803 uint32_t index = TF_RESC_TYPE_HW_PROF_FUNC;
806 /* profile func rx direction */
807 if (tfs->resc.rx.hw_entry[index].stride > 0)
808 end = tfs->resc.rx.hw_entry[index].start +
809 tfs->resc.rx.hw_entry[index].stride - 1;
811 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
812 tfs->resc.rx.hw_entry[index].start,
815 tfs->TF_PROF_FUNC_POOL_NAME_RX);
817 /* profile func tx direction */
818 if (tfs->resc.tx.hw_entry[index].stride > 0)
819 end = tfs->resc.tx.hw_entry[index].start +
820 tfs->resc.tx.hw_entry[index].stride - 1;
822 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
823 tfs->resc.tx.hw_entry[index].start,
826 tfs->TF_PROF_FUNC_POOL_NAME_TX);
828 index = TF_RESC_TYPE_HW_PROF_TCAM;
830 /* profile tcam rx direction */
831 if (tfs->resc.rx.hw_entry[index].stride > 0)
832 end = tfs->resc.rx.hw_entry[index].start +
833 tfs->resc.rx.hw_entry[index].stride - 1;
835 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
836 tfs->resc.rx.hw_entry[index].start,
839 tfs->TF_PROF_TCAM_POOL_NAME_RX);
841 /* profile tcam tx direction */
842 if (tfs->resc.tx.hw_entry[index].stride > 0)
843 end = tfs->resc.tx.hw_entry[index].start +
844 tfs->resc.tx.hw_entry[index].stride - 1;
846 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
847 tfs->resc.tx.hw_entry[index].start,
850 tfs->TF_PROF_TCAM_POOL_NAME_TX);
854 * Internal function to mark all the em profile id allocated that
855 * Truflow does not own.
858 tf_rm_rsvd_em_prof(struct tf_session *tfs)
860 uint32_t index = TF_RESC_TYPE_HW_EM_PROF_ID;
863 /* em prof id rx direction */
864 if (tfs->resc.rx.hw_entry[index].stride > 0)
865 end = tfs->resc.rx.hw_entry[index].start +
866 tfs->resc.rx.hw_entry[index].stride - 1;
868 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
869 tfs->resc.rx.hw_entry[index].start,
872 tfs->TF_EM_PROF_ID_POOL_NAME_RX);
874 /* em prof id tx direction */
875 if (tfs->resc.tx.hw_entry[index].stride > 0)
876 end = tfs->resc.tx.hw_entry[index].start +
877 tfs->resc.tx.hw_entry[index].stride - 1;
879 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
880 tfs->resc.tx.hw_entry[index].start,
883 tfs->TF_EM_PROF_ID_POOL_NAME_TX);
887 * Internal function to mark all the wildcard tcam and profile id
888 * resources that Truflow does not own.
891 tf_rm_rsvd_wc(struct tf_session *tfs)
893 uint32_t index = TF_RESC_TYPE_HW_WC_TCAM_PROF_ID;
896 /* wc profile id rx direction */
897 if (tfs->resc.rx.hw_entry[index].stride > 0)
898 end = tfs->resc.rx.hw_entry[index].start +
899 tfs->resc.rx.hw_entry[index].stride - 1;
901 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
902 tfs->resc.rx.hw_entry[index].start,
905 tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX);
907 /* wc profile id tx direction */
908 if (tfs->resc.tx.hw_entry[index].stride > 0)
909 end = tfs->resc.tx.hw_entry[index].start +
910 tfs->resc.tx.hw_entry[index].stride - 1;
912 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
913 tfs->resc.tx.hw_entry[index].start,
916 tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX);
918 index = TF_RESC_TYPE_HW_WC_TCAM;
920 /* wc tcam rx direction */
921 if (tfs->resc.rx.hw_entry[index].stride > 0)
922 end = tfs->resc.rx.hw_entry[index].start +
923 tfs->resc.rx.hw_entry[index].stride - 1;
925 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
926 tfs->resc.rx.hw_entry[index].start,
929 tfs->TF_WC_TCAM_POOL_NAME_RX);
931 /* wc tcam tx direction */
932 if (tfs->resc.tx.hw_entry[index].stride > 0)
933 end = tfs->resc.tx.hw_entry[index].start +
934 tfs->resc.tx.hw_entry[index].stride - 1;
936 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
937 tfs->resc.tx.hw_entry[index].start,
940 tfs->TF_WC_TCAM_POOL_NAME_TX);
944 * Internal function to mark all the meter resources allocated that
945 * Truflow does not own.
948 tf_rm_rsvd_meter(struct tf_session *tfs)
950 uint32_t index = TF_RESC_TYPE_HW_METER_PROF;
953 /* meter profiles rx direction */
954 if (tfs->resc.rx.hw_entry[index].stride > 0)
955 end = tfs->resc.rx.hw_entry[index].start +
956 tfs->resc.rx.hw_entry[index].stride - 1;
958 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
959 tfs->resc.rx.hw_entry[index].start,
962 tfs->TF_METER_PROF_POOL_NAME_RX);
964 /* meter profiles tx direction */
965 if (tfs->resc.tx.hw_entry[index].stride > 0)
966 end = tfs->resc.tx.hw_entry[index].start +
967 tfs->resc.tx.hw_entry[index].stride - 1;
969 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
970 tfs->resc.tx.hw_entry[index].start,
973 tfs->TF_METER_PROF_POOL_NAME_TX);
975 index = TF_RESC_TYPE_HW_METER_INST;
977 /* meter rx direction */
978 if (tfs->resc.rx.hw_entry[index].stride > 0)
979 end = tfs->resc.rx.hw_entry[index].start +
980 tfs->resc.rx.hw_entry[index].stride - 1;
982 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
983 tfs->resc.rx.hw_entry[index].start,
986 tfs->TF_METER_INST_POOL_NAME_RX);
988 /* meter tx direction */
989 if (tfs->resc.tx.hw_entry[index].stride > 0)
990 end = tfs->resc.tx.hw_entry[index].start +
991 tfs->resc.tx.hw_entry[index].stride - 1;
993 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
994 tfs->resc.tx.hw_entry[index].start,
997 tfs->TF_METER_INST_POOL_NAME_TX);
1001 * Internal function to mark all the mirror resources allocated that
1002 * Truflow does not own.
1005 tf_rm_rsvd_mirror(struct tf_session *tfs)
1007 uint32_t index = TF_RESC_TYPE_HW_MIRROR;
1010 /* mirror rx direction */
1011 if (tfs->resc.rx.hw_entry[index].stride > 0)
1012 end = tfs->resc.rx.hw_entry[index].start +
1013 tfs->resc.rx.hw_entry[index].stride - 1;
1015 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1016 tfs->resc.rx.hw_entry[index].start,
1019 tfs->TF_MIRROR_POOL_NAME_RX);
1021 /* mirror tx direction */
1022 if (tfs->resc.tx.hw_entry[index].stride > 0)
1023 end = tfs->resc.tx.hw_entry[index].start +
1024 tfs->resc.tx.hw_entry[index].stride - 1;
1026 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1027 tfs->resc.tx.hw_entry[index].start,
1030 tfs->TF_MIRROR_POOL_NAME_TX);
1034 * Internal function to mark all the upar resources allocated that
1035 * Truflow does not own.
1038 tf_rm_rsvd_upar(struct tf_session *tfs)
1040 uint32_t index = TF_RESC_TYPE_HW_UPAR;
1043 /* upar rx direction */
1044 if (tfs->resc.rx.hw_entry[index].stride > 0)
1045 end = tfs->resc.rx.hw_entry[index].start +
1046 tfs->resc.rx.hw_entry[index].stride - 1;
1048 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1049 tfs->resc.rx.hw_entry[index].start,
1052 tfs->TF_UPAR_POOL_NAME_RX);
1054 /* upar tx direction */
1055 if (tfs->resc.tx.hw_entry[index].stride > 0)
1056 end = tfs->resc.tx.hw_entry[index].start +
1057 tfs->resc.tx.hw_entry[index].stride - 1;
1059 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1060 tfs->resc.tx.hw_entry[index].start,
1063 tfs->TF_UPAR_POOL_NAME_TX);
1067 * Internal function to mark all the sp tcam resources allocated that
1068 * Truflow does not own.
1071 tf_rm_rsvd_sp_tcam(struct tf_session *tfs)
1073 uint32_t index = TF_RESC_TYPE_HW_SP_TCAM;
1076 /* sp tcam rx direction */
1077 if (tfs->resc.rx.hw_entry[index].stride > 0)
1078 end = tfs->resc.rx.hw_entry[index].start +
1079 tfs->resc.rx.hw_entry[index].stride - 1;
1081 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1082 tfs->resc.rx.hw_entry[index].start,
1085 tfs->TF_SP_TCAM_POOL_NAME_RX);
1087 /* sp tcam tx direction */
1088 if (tfs->resc.tx.hw_entry[index].stride > 0)
1089 end = tfs->resc.tx.hw_entry[index].start +
1090 tfs->resc.tx.hw_entry[index].stride - 1;
1092 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1093 tfs->resc.tx.hw_entry[index].start,
1096 tfs->TF_SP_TCAM_POOL_NAME_TX);
1100 * Internal function to mark all the l2 func resources allocated that
1101 * Truflow does not own.
1104 tf_rm_rsvd_l2_func(struct tf_session *tfs)
1106 uint32_t index = TF_RESC_TYPE_HW_L2_FUNC;
1109 /* l2 func rx direction */
1110 if (tfs->resc.rx.hw_entry[index].stride > 0)
1111 end = tfs->resc.rx.hw_entry[index].start +
1112 tfs->resc.rx.hw_entry[index].stride - 1;
1114 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1115 tfs->resc.rx.hw_entry[index].start,
1118 tfs->TF_L2_FUNC_POOL_NAME_RX);
1120 /* l2 func tx direction */
1121 if (tfs->resc.tx.hw_entry[index].stride > 0)
1122 end = tfs->resc.tx.hw_entry[index].start +
1123 tfs->resc.tx.hw_entry[index].stride - 1;
1125 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1126 tfs->resc.tx.hw_entry[index].start,
1129 tfs->TF_L2_FUNC_POOL_NAME_TX);
1133 * Internal function to mark all the fkb resources allocated that
1134 * Truflow does not own.
1137 tf_rm_rsvd_fkb(struct tf_session *tfs)
1139 uint32_t index = TF_RESC_TYPE_HW_FKB;
1142 /* fkb rx direction */
1143 if (tfs->resc.rx.hw_entry[index].stride > 0)
1144 end = tfs->resc.rx.hw_entry[index].start +
1145 tfs->resc.rx.hw_entry[index].stride - 1;
1147 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1148 tfs->resc.rx.hw_entry[index].start,
1151 tfs->TF_FKB_POOL_NAME_RX);
1153 /* fkb tx direction */
1154 if (tfs->resc.tx.hw_entry[index].stride > 0)
1155 end = tfs->resc.tx.hw_entry[index].start +
1156 tfs->resc.tx.hw_entry[index].stride - 1;
1158 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1159 tfs->resc.tx.hw_entry[index].start,
1162 tfs->TF_FKB_POOL_NAME_TX);
1166 * Internal function to mark all the tbld scope resources allocated
1167 * that Truflow does not own.
1170 tf_rm_rsvd_tbl_scope(struct tf_session *tfs)
1172 uint32_t index = TF_RESC_TYPE_HW_TBL_SCOPE;
1175 /* tbl scope rx direction */
1176 if (tfs->resc.rx.hw_entry[index].stride > 0)
1177 end = tfs->resc.rx.hw_entry[index].start +
1178 tfs->resc.rx.hw_entry[index].stride - 1;
1180 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1181 tfs->resc.rx.hw_entry[index].start,
1184 tfs->TF_TBL_SCOPE_POOL_NAME_RX);
1186 /* tbl scope tx direction */
1187 if (tfs->resc.tx.hw_entry[index].stride > 0)
1188 end = tfs->resc.tx.hw_entry[index].start +
1189 tfs->resc.tx.hw_entry[index].stride - 1;
1191 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1192 tfs->resc.tx.hw_entry[index].start,
1195 tfs->TF_TBL_SCOPE_POOL_NAME_TX);
1199 * Internal function to mark all the l2 epoch resources allocated that
1200 * Truflow does not own.
1203 tf_rm_rsvd_epoch(struct tf_session *tfs)
1205 uint32_t index = TF_RESC_TYPE_HW_EPOCH0;
1208 /* epoch0 rx direction */
1209 if (tfs->resc.rx.hw_entry[index].stride > 0)
1210 end = tfs->resc.rx.hw_entry[index].start +
1211 tfs->resc.rx.hw_entry[index].stride - 1;
1213 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1214 tfs->resc.rx.hw_entry[index].start,
1217 tfs->TF_EPOCH0_POOL_NAME_RX);
1219 /* epoch0 tx direction */
1220 if (tfs->resc.tx.hw_entry[index].stride > 0)
1221 end = tfs->resc.tx.hw_entry[index].start +
1222 tfs->resc.tx.hw_entry[index].stride - 1;
1224 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1225 tfs->resc.tx.hw_entry[index].start,
1228 tfs->TF_EPOCH0_POOL_NAME_TX);
1230 index = TF_RESC_TYPE_HW_EPOCH1;
1232 /* epoch1 rx direction */
1233 if (tfs->resc.rx.hw_entry[index].stride > 0)
1234 end = tfs->resc.rx.hw_entry[index].start +
1235 tfs->resc.rx.hw_entry[index].stride - 1;
1237 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1238 tfs->resc.rx.hw_entry[index].start,
1241 tfs->TF_EPOCH1_POOL_NAME_RX);
1243 /* epoch1 tx direction */
1244 if (tfs->resc.tx.hw_entry[index].stride > 0)
1245 end = tfs->resc.tx.hw_entry[index].start +
1246 tfs->resc.tx.hw_entry[index].stride - 1;
1248 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1249 tfs->resc.tx.hw_entry[index].start,
1252 tfs->TF_EPOCH1_POOL_NAME_TX);
1256 * Internal function to mark all the metadata resources allocated that
1257 * Truflow does not own.
1260 tf_rm_rsvd_metadata(struct tf_session *tfs)
1262 uint32_t index = TF_RESC_TYPE_HW_METADATA;
1265 /* metadata rx direction */
1266 if (tfs->resc.rx.hw_entry[index].stride > 0)
1267 end = tfs->resc.rx.hw_entry[index].start +
1268 tfs->resc.rx.hw_entry[index].stride - 1;
1270 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1271 tfs->resc.rx.hw_entry[index].start,
1274 tfs->TF_METADATA_POOL_NAME_RX);
1276 /* metadata tx direction */
1277 if (tfs->resc.tx.hw_entry[index].stride > 0)
1278 end = tfs->resc.tx.hw_entry[index].start +
1279 tfs->resc.tx.hw_entry[index].stride - 1;
1281 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1282 tfs->resc.tx.hw_entry[index].start,
1285 tfs->TF_METADATA_POOL_NAME_TX);
1289 * Internal function to mark all the ct state resources allocated that
1290 * Truflow does not own.
1293 tf_rm_rsvd_ct_state(struct tf_session *tfs)
1295 uint32_t index = TF_RESC_TYPE_HW_CT_STATE;
1298 /* ct state rx direction */
1299 if (tfs->resc.rx.hw_entry[index].stride > 0)
1300 end = tfs->resc.rx.hw_entry[index].start +
1301 tfs->resc.rx.hw_entry[index].stride - 1;
1303 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1304 tfs->resc.rx.hw_entry[index].start,
1307 tfs->TF_CT_STATE_POOL_NAME_RX);
1309 /* ct state tx direction */
1310 if (tfs->resc.tx.hw_entry[index].stride > 0)
1311 end = tfs->resc.tx.hw_entry[index].start +
1312 tfs->resc.tx.hw_entry[index].stride - 1;
1314 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1315 tfs->resc.tx.hw_entry[index].start,
1318 tfs->TF_CT_STATE_POOL_NAME_TX);
1322 * Internal function to mark all the range resources allocated that
1323 * Truflow does not own.
1326 tf_rm_rsvd_range(struct tf_session *tfs)
1328 uint32_t index = TF_RESC_TYPE_HW_RANGE_PROF;
1331 /* range profile rx direction */
1332 if (tfs->resc.rx.hw_entry[index].stride > 0)
1333 end = tfs->resc.rx.hw_entry[index].start +
1334 tfs->resc.rx.hw_entry[index].stride - 1;
1336 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1337 tfs->resc.rx.hw_entry[index].start,
1340 tfs->TF_RANGE_PROF_POOL_NAME_RX);
1342 /* range profile tx direction */
1343 if (tfs->resc.tx.hw_entry[index].stride > 0)
1344 end = tfs->resc.tx.hw_entry[index].start +
1345 tfs->resc.tx.hw_entry[index].stride - 1;
1347 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1348 tfs->resc.tx.hw_entry[index].start,
1351 tfs->TF_RANGE_PROF_POOL_NAME_TX);
1353 index = TF_RESC_TYPE_HW_RANGE_ENTRY;
1355 /* range entry rx direction */
1356 if (tfs->resc.rx.hw_entry[index].stride > 0)
1357 end = tfs->resc.rx.hw_entry[index].start +
1358 tfs->resc.rx.hw_entry[index].stride - 1;
1360 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1361 tfs->resc.rx.hw_entry[index].start,
1364 tfs->TF_RANGE_ENTRY_POOL_NAME_RX);
1366 /* range entry tx direction */
1367 if (tfs->resc.tx.hw_entry[index].stride > 0)
1368 end = tfs->resc.tx.hw_entry[index].start +
1369 tfs->resc.tx.hw_entry[index].stride - 1;
1371 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1372 tfs->resc.tx.hw_entry[index].start,
1375 tfs->TF_RANGE_ENTRY_POOL_NAME_TX);
1379 * Internal function to mark all the lag resources allocated that
1380 * Truflow does not own.
1383 tf_rm_rsvd_lag_entry(struct tf_session *tfs)
1385 uint32_t index = TF_RESC_TYPE_HW_LAG_ENTRY;
1388 /* lag entry rx direction */
1389 if (tfs->resc.rx.hw_entry[index].stride > 0)
1390 end = tfs->resc.rx.hw_entry[index].start +
1391 tfs->resc.rx.hw_entry[index].stride - 1;
1393 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1394 tfs->resc.rx.hw_entry[index].start,
1397 tfs->TF_LAG_ENTRY_POOL_NAME_RX);
1399 /* lag entry tx direction */
1400 if (tfs->resc.tx.hw_entry[index].stride > 0)
1401 end = tfs->resc.tx.hw_entry[index].start +
1402 tfs->resc.tx.hw_entry[index].stride - 1;
1404 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1405 tfs->resc.tx.hw_entry[index].start,
1408 tfs->TF_LAG_ENTRY_POOL_NAME_TX);
1412 * Internal function to mark all the full action resources allocated
1413 * that Truflow does not own.
1416 tf_rm_rsvd_sram_full_action(struct tf_session *tfs)
1418 uint32_t index = TF_RESC_TYPE_SRAM_FULL_ACTION;
1421 /* full action rx direction */
1422 if (tfs->resc.rx.sram_entry[index].stride > 0)
1423 end = tfs->resc.rx.sram_entry[index].start +
1424 tfs->resc.rx.sram_entry[index].stride - 1;
1426 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1427 TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_RX,
1429 TF_RSVD_SRAM_FULL_ACTION_RX,
1430 tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX);
1432 /* full action tx direction */
1433 if (tfs->resc.tx.sram_entry[index].stride > 0)
1434 end = tfs->resc.tx.sram_entry[index].start +
1435 tfs->resc.tx.sram_entry[index].stride - 1;
1437 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1438 TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_TX,
1440 TF_RSVD_SRAM_FULL_ACTION_TX,
1441 tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX);
1445 * Internal function to mark all the multicast group resources
1446 * allocated that Truflow does not own.
1449 tf_rm_rsvd_sram_mcg(struct tf_session *tfs)
1451 uint32_t index = TF_RESC_TYPE_SRAM_MCG;
1454 /* multicast group rx direction */
1455 if (tfs->resc.rx.sram_entry[index].stride > 0)
1456 end = tfs->resc.rx.sram_entry[index].start +
1457 tfs->resc.rx.sram_entry[index].stride - 1;
1459 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1460 TF_RSVD_SRAM_MCG_BEGIN_IDX_RX,
1462 TF_RSVD_SRAM_MCG_RX,
1463 tfs->TF_SRAM_MCG_POOL_NAME_RX);
1465 /* Multicast Group on TX is not supported */
1469 * Internal function to mark all the encap resources allocated that
1470 * Truflow does not own.
1473 tf_rm_rsvd_sram_encap(struct tf_session *tfs)
1475 uint32_t index = TF_RESC_TYPE_SRAM_ENCAP_8B;
1478 /* encap 8b rx direction */
1479 if (tfs->resc.rx.sram_entry[index].stride > 0)
1480 end = tfs->resc.rx.sram_entry[index].start +
1481 tfs->resc.rx.sram_entry[index].stride - 1;
1483 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1484 TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_RX,
1486 TF_RSVD_SRAM_ENCAP_8B_RX,
1487 tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX);
1489 /* encap 8b tx direction */
1490 if (tfs->resc.tx.sram_entry[index].stride > 0)
1491 end = tfs->resc.tx.sram_entry[index].start +
1492 tfs->resc.tx.sram_entry[index].stride - 1;
1494 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1495 TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_TX,
1497 TF_RSVD_SRAM_ENCAP_8B_TX,
1498 tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX);
1500 index = TF_RESC_TYPE_SRAM_ENCAP_16B;
1502 /* encap 16b rx direction */
1503 if (tfs->resc.rx.sram_entry[index].stride > 0)
1504 end = tfs->resc.rx.sram_entry[index].start +
1505 tfs->resc.rx.sram_entry[index].stride - 1;
1507 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1508 TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_RX,
1510 TF_RSVD_SRAM_ENCAP_16B_RX,
1511 tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX);
1513 /* encap 16b tx direction */
1514 if (tfs->resc.tx.sram_entry[index].stride > 0)
1515 end = tfs->resc.tx.sram_entry[index].start +
1516 tfs->resc.tx.sram_entry[index].stride - 1;
1518 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1519 TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_TX,
1521 TF_RSVD_SRAM_ENCAP_16B_TX,
1522 tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX);
1524 index = TF_RESC_TYPE_SRAM_ENCAP_64B;
1526 /* Encap 64B not supported on RX */
1528 /* Encap 64b tx direction */
1529 if (tfs->resc.tx.sram_entry[index].stride > 0)
1530 end = tfs->resc.tx.sram_entry[index].start +
1531 tfs->resc.tx.sram_entry[index].stride - 1;
1533 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1534 TF_RSVD_SRAM_ENCAP_64B_BEGIN_IDX_TX,
1536 TF_RSVD_SRAM_ENCAP_64B_TX,
1537 tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX);
1541 * Internal function to mark all the sp resources allocated that
1542 * Truflow does not own.
1545 tf_rm_rsvd_sram_sp(struct tf_session *tfs)
1547 uint32_t index = TF_RESC_TYPE_SRAM_SP_SMAC;
1550 /* sp smac rx direction */
1551 if (tfs->resc.rx.sram_entry[index].stride > 0)
1552 end = tfs->resc.rx.sram_entry[index].start +
1553 tfs->resc.rx.sram_entry[index].stride - 1;
1555 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1556 TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_RX,
1558 TF_RSVD_SRAM_SP_SMAC_RX,
1559 tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX);
1561 /* sp smac tx direction */
1562 if (tfs->resc.tx.sram_entry[index].stride > 0)
1563 end = tfs->resc.tx.sram_entry[index].start +
1564 tfs->resc.tx.sram_entry[index].stride - 1;
1566 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1567 TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_TX,
1569 TF_RSVD_SRAM_SP_SMAC_TX,
1570 tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX);
1572 index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4;
1574 /* SP SMAC IPv4 not supported on RX */
1576 /* sp smac ipv4 tx direction */
1577 if (tfs->resc.tx.sram_entry[index].stride > 0)
1578 end = tfs->resc.tx.sram_entry[index].start +
1579 tfs->resc.tx.sram_entry[index].stride - 1;
1581 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1582 TF_RSVD_SRAM_SP_SMAC_IPV4_BEGIN_IDX_TX,
1584 TF_RSVD_SRAM_SP_SMAC_IPV4_TX,
1585 tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX);
1587 index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6;
1589 /* SP SMAC IPv6 not supported on RX */
1591 /* sp smac ipv6 tx direction */
1592 if (tfs->resc.tx.sram_entry[index].stride > 0)
1593 end = tfs->resc.tx.sram_entry[index].start +
1594 tfs->resc.tx.sram_entry[index].stride - 1;
1596 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1597 TF_RSVD_SRAM_SP_SMAC_IPV6_BEGIN_IDX_TX,
1599 TF_RSVD_SRAM_SP_SMAC_IPV6_TX,
1600 tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX);
1604 * Internal function to mark all the stat resources allocated that
1605 * Truflow does not own.
1608 tf_rm_rsvd_sram_stats(struct tf_session *tfs)
1610 uint32_t index = TF_RESC_TYPE_SRAM_COUNTER_64B;
1613 /* counter 64b rx direction */
1614 if (tfs->resc.rx.sram_entry[index].stride > 0)
1615 end = tfs->resc.rx.sram_entry[index].start +
1616 tfs->resc.rx.sram_entry[index].stride - 1;
1618 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1619 TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_RX,
1621 TF_RSVD_SRAM_COUNTER_64B_RX,
1622 tfs->TF_SRAM_STATS_64B_POOL_NAME_RX);
1624 /* counter 64b tx direction */
1625 if (tfs->resc.tx.sram_entry[index].stride > 0)
1626 end = tfs->resc.tx.sram_entry[index].start +
1627 tfs->resc.tx.sram_entry[index].stride - 1;
1629 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1630 TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_TX,
1632 TF_RSVD_SRAM_COUNTER_64B_TX,
1633 tfs->TF_SRAM_STATS_64B_POOL_NAME_TX);
1637 * Internal function to mark all the nat resources allocated that
1638 * Truflow does not own.
1641 tf_rm_rsvd_sram_nat(struct tf_session *tfs)
1643 uint32_t index = TF_RESC_TYPE_SRAM_NAT_SPORT;
1646 /* nat source port rx direction */
1647 if (tfs->resc.rx.sram_entry[index].stride > 0)
1648 end = tfs->resc.rx.sram_entry[index].start +
1649 tfs->resc.rx.sram_entry[index].stride - 1;
1651 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1652 TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_RX,
1654 TF_RSVD_SRAM_NAT_SPORT_RX,
1655 tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX);
1657 /* nat source port tx direction */
1658 if (tfs->resc.tx.sram_entry[index].stride > 0)
1659 end = tfs->resc.tx.sram_entry[index].start +
1660 tfs->resc.tx.sram_entry[index].stride - 1;
1662 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1663 TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_TX,
1665 TF_RSVD_SRAM_NAT_SPORT_TX,
1666 tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX);
1668 index = TF_RESC_TYPE_SRAM_NAT_DPORT;
1670 /* nat destination port rx direction */
1671 if (tfs->resc.rx.sram_entry[index].stride > 0)
1672 end = tfs->resc.rx.sram_entry[index].start +
1673 tfs->resc.rx.sram_entry[index].stride - 1;
1675 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1676 TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_RX,
1678 TF_RSVD_SRAM_NAT_DPORT_RX,
1679 tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX);
1681 /* nat destination port tx direction */
1682 if (tfs->resc.tx.sram_entry[index].stride > 0)
1683 end = tfs->resc.tx.sram_entry[index].start +
1684 tfs->resc.tx.sram_entry[index].stride - 1;
1686 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1687 TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_TX,
1689 TF_RSVD_SRAM_NAT_DPORT_TX,
1690 tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX);
1692 index = TF_RESC_TYPE_SRAM_NAT_S_IPV4;
1694 /* nat source port ipv4 rx direction */
1695 if (tfs->resc.rx.sram_entry[index].stride > 0)
1696 end = tfs->resc.rx.sram_entry[index].start +
1697 tfs->resc.rx.sram_entry[index].stride - 1;
1699 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1700 TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_RX,
1702 TF_RSVD_SRAM_NAT_S_IPV4_RX,
1703 tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX);
1705 /* nat source ipv4 port tx direction */
1706 if (tfs->resc.tx.sram_entry[index].stride > 0)
1707 end = tfs->resc.tx.sram_entry[index].start +
1708 tfs->resc.tx.sram_entry[index].stride - 1;
1710 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1711 TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_TX,
1713 TF_RSVD_SRAM_NAT_S_IPV4_TX,
1714 tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX);
1716 index = TF_RESC_TYPE_SRAM_NAT_D_IPV4;
1718 /* nat destination port ipv4 rx direction */
1719 if (tfs->resc.rx.sram_entry[index].stride > 0)
1720 end = tfs->resc.rx.sram_entry[index].start +
1721 tfs->resc.rx.sram_entry[index].stride - 1;
1723 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1724 TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_RX,
1726 TF_RSVD_SRAM_NAT_D_IPV4_RX,
1727 tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX);
1729 /* nat destination ipv4 port tx direction */
1730 if (tfs->resc.tx.sram_entry[index].stride > 0)
1731 end = tfs->resc.tx.sram_entry[index].start +
1732 tfs->resc.tx.sram_entry[index].stride - 1;
1734 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1735 TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_TX,
1737 TF_RSVD_SRAM_NAT_D_IPV4_TX,
1738 tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX);
1742 * Internal function used to validate the HW allocated resources
1743 * against the requested values.
1746 tf_rm_hw_alloc_validate(enum tf_dir dir,
1747 struct tf_rm_hw_alloc *hw_alloc,
1748 struct tf_rm_entry *hw_entry)
1753 for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
1754 if (hw_entry[i].stride != hw_alloc->hw_num[i]) {
1756 "%s, Alloc failed id:%d expect:%d got:%d\n",
1759 hw_alloc->hw_num[i],
1760 hw_entry[i].stride);
1769 * Internal function used to validate the SRAM allocated resources
1770 * against the requested values.
1773 tf_rm_sram_alloc_validate(enum tf_dir dir __rte_unused,
1774 struct tf_rm_sram_alloc *sram_alloc,
1775 struct tf_rm_entry *sram_entry)
1780 for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
1781 if (sram_entry[i].stride != sram_alloc->sram_num[i]) {
1783 "%s, Alloc failed idx:%d expect:%d got:%d\n",
1786 sram_alloc->sram_num[i],
1787 sram_entry[i].stride);
1796 * Internal function used to mark all the HW resources allocated that
1797 * Truflow does not own.
1800 tf_rm_reserve_hw(struct tf *tfp)
1802 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1805 * There is no direct AFM resource allocation as it is carved
1806 * statically at AFM boot time. Thus the bit allocators work
1807 * on the full HW resource amount and we just mark everything
1808 * used except the resources that Truflow took ownership off.
1810 tf_rm_rsvd_l2_ctxt(tfs);
1811 tf_rm_rsvd_prof(tfs);
1812 tf_rm_rsvd_em_prof(tfs);
1814 tf_rm_rsvd_mirror(tfs);
1815 tf_rm_rsvd_meter(tfs);
1816 tf_rm_rsvd_upar(tfs);
1817 tf_rm_rsvd_sp_tcam(tfs);
1818 tf_rm_rsvd_l2_func(tfs);
1819 tf_rm_rsvd_fkb(tfs);
1820 tf_rm_rsvd_tbl_scope(tfs);
1821 tf_rm_rsvd_epoch(tfs);
1822 tf_rm_rsvd_metadata(tfs);
1823 tf_rm_rsvd_ct_state(tfs);
1824 tf_rm_rsvd_range(tfs);
1825 tf_rm_rsvd_lag_entry(tfs);
1829 * Internal function used to mark all the SRAM resources allocated
1830 * that Truflow does not own.
1833 tf_rm_reserve_sram(struct tf *tfp)
1835 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1838 * There is no direct AFM resource allocation as it is carved
1839 * statically at AFM boot time. Thus the bit allocators work
1840 * on the full HW resource amount and we just mark everything
1841 * used except the resources that Truflow took ownership off.
1843 tf_rm_rsvd_sram_full_action(tfs);
1844 tf_rm_rsvd_sram_mcg(tfs);
1845 tf_rm_rsvd_sram_encap(tfs);
1846 tf_rm_rsvd_sram_sp(tfs);
1847 tf_rm_rsvd_sram_stats(tfs);
1848 tf_rm_rsvd_sram_nat(tfs);
1852 * Internal function used to allocate and validate all HW resources.
1855 tf_rm_allocate_validate_hw(struct tf *tfp,
1860 struct tf_rm_hw_query hw_query;
1861 struct tf_rm_hw_alloc hw_alloc;
1862 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1863 struct tf_rm_entry *hw_entries;
1864 uint32_t error_flag;
1866 if (dir == TF_DIR_RX)
1867 hw_entries = tfs->resc.rx.hw_entry;
1869 hw_entries = tfs->resc.tx.hw_entry;
1871 /* Query for Session HW Resources */
1872 rc = tf_msg_session_hw_resc_qcaps(tfp, dir, &hw_query);
1876 "%s, HW qcaps message send failed\n",
1881 rc = tf_rm_check_hw_qcaps_static(&hw_query, dir, &error_flag);
1885 "%s, HW QCAPS validation failed, error_flag:0x%x\n",
1888 tf_rm_print_hw_qcaps_error(dir, &hw_query, &error_flag);
1892 /* Post process HW capability */
1893 for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++)
1894 hw_alloc.hw_num[i] = hw_query.hw_query[i].max;
1896 /* Allocate Session HW Resources */
1897 rc = tf_msg_session_hw_resc_alloc(tfp, dir, &hw_alloc, hw_entries);
1901 "%s, HW alloc message send failed\n",
1906 /* Perform HW allocation validation as its possible the
1907 * resource availability changed between qcaps and alloc
1909 rc = tf_rm_hw_alloc_validate(dir, &hw_alloc, hw_entries);
1913 "%s, HW Resource validation failed\n",
1925 * Internal function used to allocate and validate all SRAM resources.
1928 * Pointer to TF handle
1931 * Receive or transmit direction
1935 * -1 - Internal error
1938 tf_rm_allocate_validate_sram(struct tf *tfp,
1943 struct tf_rm_sram_query sram_query;
1944 struct tf_rm_sram_alloc sram_alloc;
1945 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1946 struct tf_rm_entry *sram_entries;
1947 uint32_t error_flag;
1949 if (dir == TF_DIR_RX)
1950 sram_entries = tfs->resc.rx.sram_entry;
1952 sram_entries = tfs->resc.tx.sram_entry;
1954 /* Query for Session SRAM Resources */
1955 rc = tf_msg_session_sram_resc_qcaps(tfp, dir, &sram_query);
1959 "%s, SRAM qcaps message send failed\n",
1964 rc = tf_rm_check_sram_qcaps_static(&sram_query, dir, &error_flag);
1968 "%s, SRAM QCAPS validation failed, error_flag:%x\n",
1971 tf_rm_print_sram_qcaps_error(dir, &sram_query, &error_flag);
1975 /* Post process SRAM capability */
1976 for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++)
1977 sram_alloc.sram_num[i] = sram_query.sram_query[i].max;
1979 /* Allocate Session SRAM Resources */
1980 rc = tf_msg_session_sram_resc_alloc(tfp,
1987 "%s, SRAM alloc message send failed\n",
1992 /* Perform SRAM allocation validation as its possible the
1993 * resource availability changed between qcaps and alloc
1995 rc = tf_rm_sram_alloc_validate(dir, &sram_alloc, sram_entries);
1999 "%s, SRAM Resource allocation validation failed\n",
2011 * Helper function used to prune a HW resource array to only hold
2012 * elements that needs to be flushed.
2018 * Receive or transmit direction
2021 * Master HW Resource database
2023 * [in/out] flush_entries
2024 * Pruned HW Resource database of entries to be flushed. This
2025 * array should be passed in as a complete copy of the master HW
2026 * Resource database. The outgoing result will be a pruned version
2027 * based on the result of the requested checking
2030 * 0 - Success, no flush required
2031 * 1 - Success, flush required
2032 * -1 - Internal error
2035 tf_rm_hw_to_flush(struct tf_session *tfs,
2037 struct tf_rm_entry *hw_entries,
2038 struct tf_rm_entry *flush_entries)
2043 struct bitalloc *pool;
2045 /* Check all the hw resource pools and check for left over
2046 * elements. Any found will result in the complete pool of a
2047 * type to get invalidated.
2050 TF_RM_GET_POOLS(tfs, dir, &pool,
2051 TF_L2_CTXT_TCAM_POOL_NAME,
2055 free_cnt = ba_free_count(pool);
2056 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride) {
2057 flush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].start = 0;
2058 flush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride = 0;
2063 TF_RM_GET_POOLS(tfs, dir, &pool,
2064 TF_PROF_FUNC_POOL_NAME,
2068 free_cnt = ba_free_count(pool);
2069 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride) {
2070 flush_entries[TF_RESC_TYPE_HW_PROF_FUNC].start = 0;
2071 flush_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride = 0;
2076 TF_RM_GET_POOLS(tfs, dir, &pool,
2077 TF_PROF_TCAM_POOL_NAME,
2081 free_cnt = ba_free_count(pool);
2082 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride) {
2083 flush_entries[TF_RESC_TYPE_HW_PROF_TCAM].start = 0;
2084 flush_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride = 0;
2089 TF_RM_GET_POOLS(tfs, dir, &pool,
2090 TF_EM_PROF_ID_POOL_NAME,
2094 free_cnt = ba_free_count(pool);
2095 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride) {
2096 flush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].start = 0;
2097 flush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride = 0;
2102 flush_entries[TF_RESC_TYPE_HW_EM_REC].start = 0;
2103 flush_entries[TF_RESC_TYPE_HW_EM_REC].stride = 0;
2105 TF_RM_GET_POOLS(tfs, dir, &pool,
2106 TF_WC_TCAM_PROF_ID_POOL_NAME,
2110 free_cnt = ba_free_count(pool);
2111 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride) {
2112 flush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].start = 0;
2113 flush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride = 0;
2118 TF_RM_GET_POOLS(tfs, dir, &pool,
2119 TF_WC_TCAM_POOL_NAME,
2123 free_cnt = ba_free_count(pool);
2124 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM].stride) {
2125 flush_entries[TF_RESC_TYPE_HW_WC_TCAM].start = 0;
2126 flush_entries[TF_RESC_TYPE_HW_WC_TCAM].stride = 0;
2131 TF_RM_GET_POOLS(tfs, dir, &pool,
2132 TF_METER_PROF_POOL_NAME,
2136 free_cnt = ba_free_count(pool);
2137 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_PROF].stride) {
2138 flush_entries[TF_RESC_TYPE_HW_METER_PROF].start = 0;
2139 flush_entries[TF_RESC_TYPE_HW_METER_PROF].stride = 0;
2144 TF_RM_GET_POOLS(tfs, dir, &pool,
2145 TF_METER_INST_POOL_NAME,
2149 free_cnt = ba_free_count(pool);
2150 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_INST].stride) {
2151 flush_entries[TF_RESC_TYPE_HW_METER_INST].start = 0;
2152 flush_entries[TF_RESC_TYPE_HW_METER_INST].stride = 0;
2157 TF_RM_GET_POOLS(tfs, dir, &pool,
2158 TF_MIRROR_POOL_NAME,
2162 free_cnt = ba_free_count(pool);
2163 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_MIRROR].stride) {
2164 flush_entries[TF_RESC_TYPE_HW_MIRROR].start = 0;
2165 flush_entries[TF_RESC_TYPE_HW_MIRROR].stride = 0;
2170 TF_RM_GET_POOLS(tfs, dir, &pool,
2175 free_cnt = ba_free_count(pool);
2176 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_UPAR].stride) {
2177 flush_entries[TF_RESC_TYPE_HW_UPAR].start = 0;
2178 flush_entries[TF_RESC_TYPE_HW_UPAR].stride = 0;
2183 TF_RM_GET_POOLS(tfs, dir, &pool,
2184 TF_SP_TCAM_POOL_NAME,
2188 free_cnt = ba_free_count(pool);
2189 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_SP_TCAM].stride) {
2190 flush_entries[TF_RESC_TYPE_HW_SP_TCAM].start = 0;
2191 flush_entries[TF_RESC_TYPE_HW_SP_TCAM].stride = 0;
2196 TF_RM_GET_POOLS(tfs, dir, &pool,
2197 TF_L2_FUNC_POOL_NAME,
2201 free_cnt = ba_free_count(pool);
2202 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_FUNC].stride) {
2203 flush_entries[TF_RESC_TYPE_HW_L2_FUNC].start = 0;
2204 flush_entries[TF_RESC_TYPE_HW_L2_FUNC].stride = 0;
2209 TF_RM_GET_POOLS(tfs, dir, &pool,
2214 free_cnt = ba_free_count(pool);
2215 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_FKB].stride) {
2216 flush_entries[TF_RESC_TYPE_HW_FKB].start = 0;
2217 flush_entries[TF_RESC_TYPE_HW_FKB].stride = 0;
2222 TF_RM_GET_POOLS(tfs, dir, &pool,
2223 TF_TBL_SCOPE_POOL_NAME,
2227 free_cnt = ba_free_count(pool);
2228 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride) {
2229 flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].start = 0;
2230 flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride = 0;
2232 PMD_DRV_LOG(ERR, "%s: TBL_SCOPE free_cnt:%d, entries:%d\n",
2235 hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride);
2239 TF_RM_GET_POOLS(tfs, dir, &pool,
2240 TF_EPOCH0_POOL_NAME,
2244 free_cnt = ba_free_count(pool);
2245 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH0].stride) {
2246 flush_entries[TF_RESC_TYPE_HW_EPOCH0].start = 0;
2247 flush_entries[TF_RESC_TYPE_HW_EPOCH0].stride = 0;
2252 TF_RM_GET_POOLS(tfs, dir, &pool,
2253 TF_EPOCH1_POOL_NAME,
2257 free_cnt = ba_free_count(pool);
2258 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH1].stride) {
2259 flush_entries[TF_RESC_TYPE_HW_EPOCH1].start = 0;
2260 flush_entries[TF_RESC_TYPE_HW_EPOCH1].stride = 0;
2265 TF_RM_GET_POOLS(tfs, dir, &pool,
2266 TF_METADATA_POOL_NAME,
2270 free_cnt = ba_free_count(pool);
2271 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METADATA].stride) {
2272 flush_entries[TF_RESC_TYPE_HW_METADATA].start = 0;
2273 flush_entries[TF_RESC_TYPE_HW_METADATA].stride = 0;
2278 TF_RM_GET_POOLS(tfs, dir, &pool,
2279 TF_CT_STATE_POOL_NAME,
2283 free_cnt = ba_free_count(pool);
2284 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_CT_STATE].stride) {
2285 flush_entries[TF_RESC_TYPE_HW_CT_STATE].start = 0;
2286 flush_entries[TF_RESC_TYPE_HW_CT_STATE].stride = 0;
2291 TF_RM_GET_POOLS(tfs, dir, &pool,
2292 TF_RANGE_PROF_POOL_NAME,
2296 free_cnt = ba_free_count(pool);
2297 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride) {
2298 flush_entries[TF_RESC_TYPE_HW_RANGE_PROF].start = 0;
2299 flush_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride = 0;
2304 TF_RM_GET_POOLS(tfs, dir, &pool,
2305 TF_RANGE_ENTRY_POOL_NAME,
2309 free_cnt = ba_free_count(pool);
2310 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride) {
2311 flush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].start = 0;
2312 flush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride = 0;
2317 TF_RM_GET_POOLS(tfs, dir, &pool,
2318 TF_LAG_ENTRY_POOL_NAME,
2322 free_cnt = ba_free_count(pool);
2323 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride) {
2324 flush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].start = 0;
2325 flush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride = 0;
2334 * Helper function used to prune a SRAM resource array to only hold
2335 * elements that needs to be flushed.
2341 * Receive or transmit direction
2344 * Master SRAM Resource data base
2346 * [in/out] flush_entries
2347 * Pruned SRAM Resource database of entries to be flushed. This
2348 * array should be passed in as a complete copy of the master SRAM
2349 * Resource database. The outgoing result will be a pruned version
2350 * based on the result of the requested checking
2353 * 0 - Success, no flush required
2354 * 1 - Success, flush required
2355 * -1 - Internal error
2358 tf_rm_sram_to_flush(struct tf_session *tfs,
2360 struct tf_rm_entry *sram_entries,
2361 struct tf_rm_entry *flush_entries)
2366 struct bitalloc *pool;
2368 /* Check all the sram resource pools and check for left over
2369 * elements. Any found will result in the complete pool of a
2370 * type to get invalidated.
2373 TF_RM_GET_POOLS(tfs, dir, &pool,
2374 TF_SRAM_FULL_ACTION_POOL_NAME,
2378 free_cnt = ba_free_count(pool);
2379 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride) {
2380 flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].start = 0;
2381 flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride = 0;
2386 /* Only pools for RX direction */
2387 if (dir == TF_DIR_RX) {
2388 TF_RM_GET_POOLS_RX(tfs, &pool,
2389 TF_SRAM_MCG_POOL_NAME);
2392 free_cnt = ba_free_count(pool);
2393 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_MCG].stride) {
2394 flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0;
2395 flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0;
2400 /* Always prune TX direction */
2401 flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0;
2402 flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0;
2405 TF_RM_GET_POOLS(tfs, dir, &pool,
2406 TF_SRAM_ENCAP_8B_POOL_NAME,
2410 free_cnt = ba_free_count(pool);
2411 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride) {
2412 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].start = 0;
2413 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride = 0;
2418 TF_RM_GET_POOLS(tfs, dir, &pool,
2419 TF_SRAM_ENCAP_16B_POOL_NAME,
2423 free_cnt = ba_free_count(pool);
2424 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride) {
2425 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].start = 0;
2426 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride = 0;
2431 /* Only pools for TX direction */
2432 if (dir == TF_DIR_TX) {
2433 TF_RM_GET_POOLS_TX(tfs, &pool,
2434 TF_SRAM_ENCAP_64B_POOL_NAME);
2437 free_cnt = ba_free_count(pool);
2439 sram_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride) {
2440 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0;
2441 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0;
2446 /* Always prune RX direction */
2447 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0;
2448 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0;
2451 TF_RM_GET_POOLS(tfs, dir, &pool,
2452 TF_SRAM_SP_SMAC_POOL_NAME,
2456 free_cnt = ba_free_count(pool);
2457 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride) {
2458 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].start = 0;
2459 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride = 0;
2464 /* Only pools for TX direction */
2465 if (dir == TF_DIR_TX) {
2466 TF_RM_GET_POOLS_TX(tfs, &pool,
2467 TF_SRAM_SP_SMAC_IPV4_POOL_NAME);
2470 free_cnt = ba_free_count(pool);
2472 sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride) {
2473 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0;
2474 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride =
2480 /* Always prune RX direction */
2481 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0;
2482 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride = 0;
2485 /* Only pools for TX direction */
2486 if (dir == TF_DIR_TX) {
2487 TF_RM_GET_POOLS_TX(tfs, &pool,
2488 TF_SRAM_SP_SMAC_IPV6_POOL_NAME);
2491 free_cnt = ba_free_count(pool);
2493 sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride) {
2494 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0;
2495 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride =
2501 /* Always prune RX direction */
2502 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0;
2503 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride = 0;
2506 TF_RM_GET_POOLS(tfs, dir, &pool,
2507 TF_SRAM_STATS_64B_POOL_NAME,
2511 free_cnt = ba_free_count(pool);
2512 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride) {
2513 flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].start = 0;
2514 flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride = 0;
2519 TF_RM_GET_POOLS(tfs, dir, &pool,
2520 TF_SRAM_NAT_SPORT_POOL_NAME,
2524 free_cnt = ba_free_count(pool);
2525 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride) {
2526 flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].start = 0;
2527 flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride = 0;
2532 TF_RM_GET_POOLS(tfs, dir, &pool,
2533 TF_SRAM_NAT_DPORT_POOL_NAME,
2537 free_cnt = ba_free_count(pool);
2538 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride) {
2539 flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].start = 0;
2540 flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride = 0;
2545 TF_RM_GET_POOLS(tfs, dir, &pool,
2546 TF_SRAM_NAT_S_IPV4_POOL_NAME,
2550 free_cnt = ba_free_count(pool);
2551 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride) {
2552 flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].start = 0;
2553 flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride = 0;
2558 TF_RM_GET_POOLS(tfs, dir, &pool,
2559 TF_SRAM_NAT_D_IPV4_POOL_NAME,
2563 free_cnt = ba_free_count(pool);
2564 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride) {
2565 flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].start = 0;
2566 flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride = 0;
2575 * Helper function used to generate an error log for the HW types that
2576 * needs to be flushed. The types should have been cleaned up ahead of
2577 * invoking tf_close_session.
2580 * HW Resource database holding elements to be flushed
2583 tf_rm_log_hw_flush(enum tf_dir dir,
2584 struct tf_rm_entry *hw_entries)
2588 /* Walk the hw flush array and log the types that wasn't
2591 for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
2592 if (hw_entries[i].stride != 0)
2594 "%s: %s was not cleaned up\n",
2596 tf_hcapi_hw_2_str(i));
2601 * Helper function used to generate an error log for the SRAM types
2602 * that needs to be flushed. The types should have been cleaned up
2603 * ahead of invoking tf_close_session.
2606 * SRAM Resource database holding elements to be flushed
2609 tf_rm_log_sram_flush(enum tf_dir dir,
2610 struct tf_rm_entry *sram_entries)
2614 /* Walk the sram flush array and log the types that wasn't
2617 for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
2618 if (sram_entries[i].stride != 0)
2620 "%s: %s was not cleaned up\n",
2622 tf_hcapi_sram_2_str(i));
2627 tf_rm_init(struct tf *tfp __rte_unused)
2629 struct tf_session *tfs =
2630 (struct tf_session *)(tfp->session->core_data);
2632 /* This version is host specific and should be checked against
2633 * when attaching as there is no guarantee that a secondary
2634 * would run from same image version.
2636 tfs->ver.major = TF_SESSION_VER_MAJOR;
2637 tfs->ver.minor = TF_SESSION_VER_MINOR;
2638 tfs->ver.update = TF_SESSION_VER_UPDATE;
2640 tfs->session_id.id = 0;
2643 /* Initialization of Table Scopes */
2644 /* ll_init(&tfs->tbl_scope_ll); */
2646 /* Initialization of HW and SRAM resource DB */
2647 memset(&tfs->resc, 0, sizeof(struct tf_rm_db));
2649 /* Initialization of HW Resource Pools */
2650 ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
2651 ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
2652 ba_init(tfs->TF_PROF_FUNC_POOL_NAME_RX, TF_NUM_PROF_FUNC);
2653 ba_init(tfs->TF_PROF_FUNC_POOL_NAME_TX, TF_NUM_PROF_FUNC);
2654 ba_init(tfs->TF_PROF_TCAM_POOL_NAME_RX, TF_NUM_PROF_TCAM);
2655 ba_init(tfs->TF_PROF_TCAM_POOL_NAME_TX, TF_NUM_PROF_TCAM);
2656 ba_init(tfs->TF_EM_PROF_ID_POOL_NAME_RX, TF_NUM_EM_PROF_ID);
2657 ba_init(tfs->TF_EM_PROF_ID_POOL_NAME_TX, TF_NUM_EM_PROF_ID);
2659 /* TBD, how do we want to handle EM records ?*/
2660 /* EM Records should not be controlled by way of a pool */
2662 ba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX, TF_NUM_WC_PROF_ID);
2663 ba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX, TF_NUM_WC_PROF_ID);
2664 ba_init(tfs->TF_WC_TCAM_POOL_NAME_RX, TF_NUM_WC_TCAM_ROW);
2665 ba_init(tfs->TF_WC_TCAM_POOL_NAME_TX, TF_NUM_WC_TCAM_ROW);
2666 ba_init(tfs->TF_METER_PROF_POOL_NAME_RX, TF_NUM_METER_PROF);
2667 ba_init(tfs->TF_METER_PROF_POOL_NAME_TX, TF_NUM_METER_PROF);
2668 ba_init(tfs->TF_METER_INST_POOL_NAME_RX, TF_NUM_METER);
2669 ba_init(tfs->TF_METER_INST_POOL_NAME_TX, TF_NUM_METER);
2670 ba_init(tfs->TF_MIRROR_POOL_NAME_RX, TF_NUM_MIRROR);
2671 ba_init(tfs->TF_MIRROR_POOL_NAME_TX, TF_NUM_MIRROR);
2672 ba_init(tfs->TF_UPAR_POOL_NAME_RX, TF_NUM_UPAR);
2673 ba_init(tfs->TF_UPAR_POOL_NAME_TX, TF_NUM_UPAR);
2675 ba_init(tfs->TF_SP_TCAM_POOL_NAME_RX, TF_NUM_SP_TCAM);
2676 ba_init(tfs->TF_SP_TCAM_POOL_NAME_TX, TF_NUM_SP_TCAM);
2678 ba_init(tfs->TF_FKB_POOL_NAME_RX, TF_NUM_FKB);
2679 ba_init(tfs->TF_FKB_POOL_NAME_TX, TF_NUM_FKB);
2681 ba_init(tfs->TF_TBL_SCOPE_POOL_NAME_RX, TF_NUM_TBL_SCOPE);
2682 ba_init(tfs->TF_TBL_SCOPE_POOL_NAME_TX, TF_NUM_TBL_SCOPE);
2683 ba_init(tfs->TF_L2_FUNC_POOL_NAME_RX, TF_NUM_L2_FUNC);
2684 ba_init(tfs->TF_L2_FUNC_POOL_NAME_TX, TF_NUM_L2_FUNC);
2685 ba_init(tfs->TF_EPOCH0_POOL_NAME_RX, TF_NUM_EPOCH0);
2686 ba_init(tfs->TF_EPOCH0_POOL_NAME_TX, TF_NUM_EPOCH0);
2687 ba_init(tfs->TF_EPOCH1_POOL_NAME_RX, TF_NUM_EPOCH1);
2688 ba_init(tfs->TF_EPOCH1_POOL_NAME_TX, TF_NUM_EPOCH1);
2689 ba_init(tfs->TF_METADATA_POOL_NAME_RX, TF_NUM_METADATA);
2690 ba_init(tfs->TF_METADATA_POOL_NAME_TX, TF_NUM_METADATA);
2691 ba_init(tfs->TF_CT_STATE_POOL_NAME_RX, TF_NUM_CT_STATE);
2692 ba_init(tfs->TF_CT_STATE_POOL_NAME_TX, TF_NUM_CT_STATE);
2693 ba_init(tfs->TF_RANGE_PROF_POOL_NAME_RX, TF_NUM_RANGE_PROF);
2694 ba_init(tfs->TF_RANGE_PROF_POOL_NAME_TX, TF_NUM_RANGE_PROF);
2695 ba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_RX, TF_NUM_RANGE_ENTRY);
2696 ba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_TX, TF_NUM_RANGE_ENTRY);
2697 ba_init(tfs->TF_LAG_ENTRY_POOL_NAME_RX, TF_NUM_LAG_ENTRY);
2698 ba_init(tfs->TF_LAG_ENTRY_POOL_NAME_TX, TF_NUM_LAG_ENTRY);
2700 /* Initialization of SRAM Resource Pools
2701 * These pools are set to the TFLIB defined MAX sizes not
2702 * AFM's HW max as to limit the memory consumption
2704 ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX,
2705 TF_RSVD_SRAM_FULL_ACTION_RX);
2706 ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX,
2707 TF_RSVD_SRAM_FULL_ACTION_TX);
2708 /* Only Multicast Group on RX is supported */
2709 ba_init(tfs->TF_SRAM_MCG_POOL_NAME_RX,
2710 TF_RSVD_SRAM_MCG_RX);
2711 ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX,
2712 TF_RSVD_SRAM_ENCAP_8B_RX);
2713 ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX,
2714 TF_RSVD_SRAM_ENCAP_8B_TX);
2715 ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX,
2716 TF_RSVD_SRAM_ENCAP_16B_RX);
2717 ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX,
2718 TF_RSVD_SRAM_ENCAP_16B_TX);
2719 /* Only Encap 64B on TX is supported */
2720 ba_init(tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX,
2721 TF_RSVD_SRAM_ENCAP_64B_TX);
2722 ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX,
2723 TF_RSVD_SRAM_SP_SMAC_RX);
2724 ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX,
2725 TF_RSVD_SRAM_SP_SMAC_TX);
2726 /* Only SP SMAC IPv4 on TX is supported */
2727 ba_init(tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX,
2728 TF_RSVD_SRAM_SP_SMAC_IPV4_TX);
2729 /* Only SP SMAC IPv6 on TX is supported */
2730 ba_init(tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX,
2731 TF_RSVD_SRAM_SP_SMAC_IPV6_TX);
2732 ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_RX,
2733 TF_RSVD_SRAM_COUNTER_64B_RX);
2734 ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_TX,
2735 TF_RSVD_SRAM_COUNTER_64B_TX);
2736 ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX,
2737 TF_RSVD_SRAM_NAT_SPORT_RX);
2738 ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX,
2739 TF_RSVD_SRAM_NAT_SPORT_TX);
2740 ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX,
2741 TF_RSVD_SRAM_NAT_DPORT_RX);
2742 ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX,
2743 TF_RSVD_SRAM_NAT_DPORT_TX);
2744 ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX,
2745 TF_RSVD_SRAM_NAT_S_IPV4_RX);
2746 ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX,
2747 TF_RSVD_SRAM_NAT_S_IPV4_TX);
2748 ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX,
2749 TF_RSVD_SRAM_NAT_D_IPV4_RX);
2750 ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX,
2751 TF_RSVD_SRAM_NAT_D_IPV4_TX);
2753 /* Initialization of pools local to TF Core */
2754 ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
2755 ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
2759 tf_rm_allocate_validate(struct tf *tfp)
2764 for (i = 0; i < TF_DIR_MAX; i++) {
2765 rc = tf_rm_allocate_validate_hw(tfp, i);
2768 rc = tf_rm_allocate_validate_sram(tfp, i);
2773 /* With both HW and SRAM allocated and validated we can
2774 * 'scrub' the reservation on the pools.
2776 tf_rm_reserve_hw(tfp);
2777 tf_rm_reserve_sram(tfp);
2783 tf_rm_close(struct tf *tfp)
2788 struct tf_rm_entry *hw_entries;
2789 struct tf_rm_entry *hw_flush_entries;
2790 struct tf_rm_entry *sram_entries;
2791 struct tf_rm_entry *sram_flush_entries;
2792 struct tf_session *tfs __rte_unused =
2793 (struct tf_session *)(tfp->session->core_data);
2795 struct tf_rm_db flush_resc = tfs->resc;
2797 /* On close it is assumed that the session has already cleaned
2798 * up all its resources, individually, while destroying its
2799 * flows. No checking is performed thus the behavior is as
2802 * Session RM will signal FW to release session resources. FW
2803 * will perform invalidation of all the allocated entries
2804 * (assures any outstanding resources has been cleared, then
2805 * free the FW RM instance.
2807 * Session will then be freed by tf_close_session() thus there
2808 * is no need to clean each resource pool as the whole session
2812 for (i = 0; i < TF_DIR_MAX; i++) {
2813 if (i == TF_DIR_RX) {
2814 hw_entries = tfs->resc.rx.hw_entry;
2815 hw_flush_entries = flush_resc.rx.hw_entry;
2816 sram_entries = tfs->resc.rx.sram_entry;
2817 sram_flush_entries = flush_resc.rx.sram_entry;
2819 hw_entries = tfs->resc.tx.hw_entry;
2820 hw_flush_entries = flush_resc.tx.hw_entry;
2821 sram_entries = tfs->resc.tx.sram_entry;
2822 sram_flush_entries = flush_resc.tx.sram_entry;
2825 /* Check for any not previously freed HW resources and
2826 * flush if required.
2828 rc = tf_rm_hw_to_flush(tfs, i, hw_entries, hw_flush_entries);
2830 rc_close = -ENOTEMPTY;
2833 "%s, lingering HW resources\n",
2836 /* Log the entries to be flushed */
2837 tf_rm_log_hw_flush(i, hw_flush_entries);
2838 rc = tf_msg_session_hw_resc_flush(tfp,
2845 "%s, HW flush failed\n",
2850 /* Check for any not previously freed SRAM resources
2851 * and flush if required.
2853 rc = tf_rm_sram_to_flush(tfs,
2856 sram_flush_entries);
2858 rc_close = -ENOTEMPTY;
2861 "%s, lingering SRAM resources\n",
2864 /* Log the entries to be flushed */
2865 tf_rm_log_sram_flush(i, sram_flush_entries);
2867 rc = tf_msg_session_sram_resc_flush(tfp,
2869 sram_flush_entries);
2874 "%s, HW flush failed\n",
2879 rc = tf_msg_session_hw_resc_free(tfp, i, hw_entries);
2884 "%s, HW free failed\n",
2888 rc = tf_msg_session_sram_resc_free(tfp, i, sram_entries);
2893 "%s, SRAM free failed\n",
2901 #if (TF_SHADOW == 1)
2903 tf_rm_shadow_db_init(struct tf_session *tfs)
2909 #endif /* TF_SHADOW */
2912 tf_rm_lookup_tcam_type_pool(struct tf_session *tfs,
2914 enum tf_tcam_tbl_type type,
2915 struct bitalloc **pool)
2917 int rc = -EOPNOTSUPP;
2922 case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
2923 TF_RM_GET_POOLS(tfs, dir, pool,
2924 TF_L2_CTXT_TCAM_POOL_NAME,
2927 case TF_TCAM_TBL_TYPE_PROF_TCAM:
2928 TF_RM_GET_POOLS(tfs, dir, pool,
2929 TF_PROF_TCAM_POOL_NAME,
2932 case TF_TCAM_TBL_TYPE_WC_TCAM:
2933 TF_RM_GET_POOLS(tfs, dir, pool,
2934 TF_WC_TCAM_POOL_NAME,
2937 case TF_TCAM_TBL_TYPE_VEB_TCAM:
2938 case TF_TCAM_TBL_TYPE_SP_TCAM:
2939 case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
2944 if (rc == -EOPNOTSUPP) {
2946 "dir:%d, Tcam type not supported, type:%d\n",
2950 } else if (rc == -1) {
2952 "%s:, Tcam type lookup failed, type:%d\n",
2962 tf_rm_lookup_tbl_type_pool(struct tf_session *tfs,
2964 enum tf_tbl_type type,
2965 struct bitalloc **pool)
2967 int rc = -EOPNOTSUPP;
2972 case TF_TBL_TYPE_FULL_ACT_RECORD:
2973 TF_RM_GET_POOLS(tfs, dir, pool,
2974 TF_SRAM_FULL_ACTION_POOL_NAME,
2977 case TF_TBL_TYPE_MCAST_GROUPS:
2978 /* No pools for TX direction, so bail out */
2979 if (dir == TF_DIR_TX)
2981 TF_RM_GET_POOLS_RX(tfs, pool,
2982 TF_SRAM_MCG_POOL_NAME);
2985 case TF_TBL_TYPE_ACT_ENCAP_8B:
2986 TF_RM_GET_POOLS(tfs, dir, pool,
2987 TF_SRAM_ENCAP_8B_POOL_NAME,
2990 case TF_TBL_TYPE_ACT_ENCAP_16B:
2991 TF_RM_GET_POOLS(tfs, dir, pool,
2992 TF_SRAM_ENCAP_16B_POOL_NAME,
2995 case TF_TBL_TYPE_ACT_ENCAP_64B:
2996 /* No pools for RX direction, so bail out */
2997 if (dir == TF_DIR_RX)
2999 TF_RM_GET_POOLS_TX(tfs, pool,
3000 TF_SRAM_ENCAP_64B_POOL_NAME);
3003 case TF_TBL_TYPE_ACT_SP_SMAC:
3004 TF_RM_GET_POOLS(tfs, dir, pool,
3005 TF_SRAM_SP_SMAC_POOL_NAME,
3008 case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
3009 /* No pools for TX direction, so bail out */
3010 if (dir == TF_DIR_RX)
3012 TF_RM_GET_POOLS_TX(tfs, pool,
3013 TF_SRAM_SP_SMAC_IPV4_POOL_NAME);
3016 case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
3017 /* No pools for TX direction, so bail out */
3018 if (dir == TF_DIR_RX)
3020 TF_RM_GET_POOLS_TX(tfs, pool,
3021 TF_SRAM_SP_SMAC_IPV6_POOL_NAME);
3024 case TF_TBL_TYPE_ACT_STATS_64:
3025 TF_RM_GET_POOLS(tfs, dir, pool,
3026 TF_SRAM_STATS_64B_POOL_NAME,
3029 case TF_TBL_TYPE_ACT_MODIFY_SPORT:
3030 TF_RM_GET_POOLS(tfs, dir, pool,
3031 TF_SRAM_NAT_SPORT_POOL_NAME,
3034 case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
3035 TF_RM_GET_POOLS(tfs, dir, pool,
3036 TF_SRAM_NAT_S_IPV4_POOL_NAME,
3039 case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
3040 TF_RM_GET_POOLS(tfs, dir, pool,
3041 TF_SRAM_NAT_D_IPV4_POOL_NAME,
3044 case TF_TBL_TYPE_METER_PROF:
3045 TF_RM_GET_POOLS(tfs, dir, pool,
3046 TF_METER_PROF_POOL_NAME,
3049 case TF_TBL_TYPE_METER_INST:
3050 TF_RM_GET_POOLS(tfs, dir, pool,
3051 TF_METER_INST_POOL_NAME,
3054 case TF_TBL_TYPE_MIRROR_CONFIG:
3055 TF_RM_GET_POOLS(tfs, dir, pool,
3056 TF_MIRROR_POOL_NAME,
3059 case TF_TBL_TYPE_UPAR:
3060 TF_RM_GET_POOLS(tfs, dir, pool,
3064 case TF_TBL_TYPE_EPOCH0:
3065 TF_RM_GET_POOLS(tfs, dir, pool,
3066 TF_EPOCH0_POOL_NAME,
3069 case TF_TBL_TYPE_EPOCH1:
3070 TF_RM_GET_POOLS(tfs, dir, pool,
3071 TF_EPOCH1_POOL_NAME,
3074 case TF_TBL_TYPE_METADATA:
3075 TF_RM_GET_POOLS(tfs, dir, pool,
3076 TF_METADATA_POOL_NAME,
3079 case TF_TBL_TYPE_CT_STATE:
3080 TF_RM_GET_POOLS(tfs, dir, pool,
3081 TF_CT_STATE_POOL_NAME,
3084 case TF_TBL_TYPE_RANGE_PROF:
3085 TF_RM_GET_POOLS(tfs, dir, pool,
3086 TF_RANGE_PROF_POOL_NAME,
3089 case TF_TBL_TYPE_RANGE_ENTRY:
3090 TF_RM_GET_POOLS(tfs, dir, pool,
3091 TF_RANGE_ENTRY_POOL_NAME,
3094 case TF_TBL_TYPE_LAG:
3095 TF_RM_GET_POOLS(tfs, dir, pool,
3096 TF_LAG_ENTRY_POOL_NAME,
3099 /* Not yet supported */
3100 case TF_TBL_TYPE_ACT_ENCAP_32B:
3101 case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST:
3102 case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC:
3103 case TF_TBL_TYPE_VNIC_SVIF:
3105 /* No bitalloc pools for these types */
3106 case TF_TBL_TYPE_EXT:
3107 case TF_TBL_TYPE_EXT_0:
3112 if (rc == -EOPNOTSUPP) {
3114 "dir:%d, Table type not supported, type:%d\n",
3118 } else if (rc == -1) {
3120 "dir:%d, Table type lookup failed, type:%d\n",
3130 tf_rm_convert_tbl_type(enum tf_tbl_type type,
3131 uint32_t *hcapi_type)
3136 case TF_TBL_TYPE_FULL_ACT_RECORD:
3137 *hcapi_type = TF_RESC_TYPE_SRAM_FULL_ACTION;
3139 case TF_TBL_TYPE_MCAST_GROUPS:
3140 *hcapi_type = TF_RESC_TYPE_SRAM_MCG;
3142 case TF_TBL_TYPE_ACT_ENCAP_8B:
3143 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_8B;
3145 case TF_TBL_TYPE_ACT_ENCAP_16B:
3146 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_16B;
3148 case TF_TBL_TYPE_ACT_ENCAP_64B:
3149 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_64B;
3151 case TF_TBL_TYPE_ACT_SP_SMAC:
3152 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC;
3154 case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
3155 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4;
3157 case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
3158 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6;
3160 case TF_TBL_TYPE_ACT_STATS_64:
3161 *hcapi_type = TF_RESC_TYPE_SRAM_COUNTER_64B;
3163 case TF_TBL_TYPE_ACT_MODIFY_SPORT:
3164 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_SPORT;
3166 case TF_TBL_TYPE_ACT_MODIFY_DPORT:
3167 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_DPORT;
3169 case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
3170 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_S_IPV4;
3172 case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
3173 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_D_IPV4;
3175 case TF_TBL_TYPE_METER_PROF:
3176 *hcapi_type = TF_RESC_TYPE_HW_METER_PROF;
3178 case TF_TBL_TYPE_METER_INST:
3179 *hcapi_type = TF_RESC_TYPE_HW_METER_INST;
3181 case TF_TBL_TYPE_MIRROR_CONFIG:
3182 *hcapi_type = TF_RESC_TYPE_HW_MIRROR;
3184 case TF_TBL_TYPE_UPAR:
3185 *hcapi_type = TF_RESC_TYPE_HW_UPAR;
3187 case TF_TBL_TYPE_EPOCH0:
3188 *hcapi_type = TF_RESC_TYPE_HW_EPOCH0;
3190 case TF_TBL_TYPE_EPOCH1:
3191 *hcapi_type = TF_RESC_TYPE_HW_EPOCH1;
3193 case TF_TBL_TYPE_METADATA:
3194 *hcapi_type = TF_RESC_TYPE_HW_METADATA;
3196 case TF_TBL_TYPE_CT_STATE:
3197 *hcapi_type = TF_RESC_TYPE_HW_CT_STATE;
3199 case TF_TBL_TYPE_RANGE_PROF:
3200 *hcapi_type = TF_RESC_TYPE_HW_RANGE_PROF;
3202 case TF_TBL_TYPE_RANGE_ENTRY:
3203 *hcapi_type = TF_RESC_TYPE_HW_RANGE_ENTRY;
3205 case TF_TBL_TYPE_LAG:
3206 *hcapi_type = TF_RESC_TYPE_HW_LAG_ENTRY;
3208 /* Not yet supported */
3209 case TF_TBL_TYPE_ACT_ENCAP_32B:
3210 case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST:
3211 case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC:
3212 case TF_TBL_TYPE_VNIC_SVIF:
3213 case TF_TBL_TYPE_EXT: /* No pools for this type */
3214 case TF_TBL_TYPE_EXT_0: /* No pools for this type */
3224 tf_rm_convert_index(struct tf_session *tfs,
3226 enum tf_tbl_type type,
3227 enum tf_rm_convert_type c_type,
3229 uint32_t *convert_index)
3232 struct tf_rm_resc *resc;
3233 uint32_t hcapi_type;
3234 uint32_t base_index;
3236 if (dir == TF_DIR_RX)
3237 resc = &tfs->resc.rx;
3238 else if (dir == TF_DIR_TX)
3239 resc = &tfs->resc.tx;
3243 rc = tf_rm_convert_tbl_type(type, &hcapi_type);
3248 case TF_TBL_TYPE_FULL_ACT_RECORD:
3249 case TF_TBL_TYPE_MCAST_GROUPS:
3250 case TF_TBL_TYPE_ACT_ENCAP_8B:
3251 case TF_TBL_TYPE_ACT_ENCAP_16B:
3252 case TF_TBL_TYPE_ACT_ENCAP_32B:
3253 case TF_TBL_TYPE_ACT_ENCAP_64B:
3254 case TF_TBL_TYPE_ACT_SP_SMAC:
3255 case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
3256 case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
3257 case TF_TBL_TYPE_ACT_STATS_64:
3258 case TF_TBL_TYPE_ACT_MODIFY_SPORT:
3259 case TF_TBL_TYPE_ACT_MODIFY_DPORT:
3260 case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
3261 case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
3262 base_index = resc->sram_entry[hcapi_type].start;
3264 case TF_TBL_TYPE_MIRROR_CONFIG:
3265 case TF_TBL_TYPE_METER_PROF:
3266 case TF_TBL_TYPE_METER_INST:
3267 case TF_TBL_TYPE_UPAR:
3268 case TF_TBL_TYPE_EPOCH0:
3269 case TF_TBL_TYPE_EPOCH1:
3270 case TF_TBL_TYPE_METADATA:
3271 case TF_TBL_TYPE_CT_STATE:
3272 case TF_TBL_TYPE_RANGE_PROF:
3273 case TF_TBL_TYPE_RANGE_ENTRY:
3274 case TF_TBL_TYPE_LAG:
3275 base_index = resc->hw_entry[hcapi_type].start;
3277 /* Not yet supported */
3278 case TF_TBL_TYPE_VNIC_SVIF:
3279 case TF_TBL_TYPE_EXT: /* No pools for this type */
3280 case TF_TBL_TYPE_EXT_0: /* No pools for this type */
3286 case TF_RM_CONVERT_RM_BASE:
3287 *convert_index = index - base_index;
3289 case TF_RM_CONVERT_ADD_BASE:
3290 *convert_index = index + base_index;