1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
8 #include <rte_common.h>
13 #include "tf_session.h"
14 #include "tf_resources.h"
20 * Internal macro to perform HW resource allocation check between what
21 * firmware reports vs what was statically requested.
24 * struct tf_rm_hw_query *hquery - Pointer to the hw query result
25 * enum tf_dir dir - Direction to process
26 * enum tf_resource_type_hw hcapi_type - HCAPI type, the index element
27 * in the hw query structure
28 * define def_value - Define value to check against
29 * uint32_t *eflag - Result of the check
31 #define TF_RM_CHECK_HW_ALLOC(hquery, dir, hcapi_type, def_value, eflag) do { \
32 if ((dir) == TF_DIR_RX) { \
33 if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _RX) \
34 *(eflag) |= 1 << (hcapi_type); \
36 if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _TX) \
37 *(eflag) |= 1 << (hcapi_type); \
42 * Internal macro to perform HW resource allocation check between what
43 * firmware reports vs what was statically requested.
46 * struct tf_rm_sram_query *squery - Pointer to the sram query result
47 * enum tf_dir dir - Direction to process
48 * enum tf_resource_type_sram hcapi_type - HCAPI type, the index element
49 * in the hw query structure
50 * define def_value - Define value to check against
51 * uint32_t *eflag - Result of the check
53 #define TF_RM_CHECK_SRAM_ALLOC(squery, dir, hcapi_type, def_value, eflag) do { \
54 if ((dir) == TF_DIR_RX) { \
55 if ((squery)->sram_query[(hcapi_type)].max != def_value ## _RX)\
56 *(eflag) |= 1 << (hcapi_type); \
58 if ((squery)->sram_query[(hcapi_type)].max != def_value ## _TX)\
59 *(eflag) |= 1 << (hcapi_type); \
64 * Internal macro to convert a reserved resource define name to be
68 * enum tf_dir dir - Direction to process
69 * string type - Type name to append RX or TX to
70 * string dtype - Direction specific type
74 #define TF_RESC_RSVD(dir, type, dtype) do { \
75 if ((dir) == TF_DIR_RX) \
76 (dtype) = type ## _RX; \
78 (dtype) = type ## _TX; \
82 *tf_hcapi_hw_2_str(enum tf_resource_type_hw hw_type)
85 case TF_RESC_TYPE_HW_L2_CTXT_TCAM:
86 return "L2 ctxt tcam";
87 case TF_RESC_TYPE_HW_PROF_FUNC:
88 return "Profile Func";
89 case TF_RESC_TYPE_HW_PROF_TCAM:
90 return "Profile tcam";
91 case TF_RESC_TYPE_HW_EM_PROF_ID:
92 return "EM profile id";
93 case TF_RESC_TYPE_HW_EM_REC:
95 case TF_RESC_TYPE_HW_WC_TCAM_PROF_ID:
96 return "WC tcam profile id";
97 case TF_RESC_TYPE_HW_WC_TCAM:
99 case TF_RESC_TYPE_HW_METER_PROF:
100 return "Meter profile";
101 case TF_RESC_TYPE_HW_METER_INST:
102 return "Meter instance";
103 case TF_RESC_TYPE_HW_MIRROR:
105 case TF_RESC_TYPE_HW_UPAR:
107 case TF_RESC_TYPE_HW_SP_TCAM:
108 return "Source properties tcam";
109 case TF_RESC_TYPE_HW_L2_FUNC:
110 return "L2 Function";
111 case TF_RESC_TYPE_HW_FKB:
113 case TF_RESC_TYPE_HW_TBL_SCOPE:
114 return "Table scope";
115 case TF_RESC_TYPE_HW_EPOCH0:
117 case TF_RESC_TYPE_HW_EPOCH1:
119 case TF_RESC_TYPE_HW_METADATA:
121 case TF_RESC_TYPE_HW_CT_STATE:
122 return "Connection tracking state";
123 case TF_RESC_TYPE_HW_RANGE_PROF:
124 return "Range profile";
125 case TF_RESC_TYPE_HW_RANGE_ENTRY:
126 return "Range entry";
127 case TF_RESC_TYPE_HW_LAG_ENTRY:
130 return "Invalid identifier";
135 *tf_hcapi_sram_2_str(enum tf_resource_type_sram sram_type)
138 case TF_RESC_TYPE_SRAM_FULL_ACTION:
139 return "Full action";
140 case TF_RESC_TYPE_SRAM_MCG:
142 case TF_RESC_TYPE_SRAM_ENCAP_8B:
144 case TF_RESC_TYPE_SRAM_ENCAP_16B:
146 case TF_RESC_TYPE_SRAM_ENCAP_64B:
148 case TF_RESC_TYPE_SRAM_SP_SMAC:
149 return "Source properties SMAC";
150 case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4:
151 return "Source properties SMAC IPv4";
152 case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6:
153 return "Source properties IPv6";
154 case TF_RESC_TYPE_SRAM_COUNTER_64B:
155 return "Counter 64B";
156 case TF_RESC_TYPE_SRAM_NAT_SPORT:
157 return "NAT source port";
158 case TF_RESC_TYPE_SRAM_NAT_DPORT:
159 return "NAT destination port";
160 case TF_RESC_TYPE_SRAM_NAT_S_IPV4:
161 return "NAT source IPv4";
162 case TF_RESC_TYPE_SRAM_NAT_D_IPV4:
163 return "NAT destination IPv4";
165 return "Invalid identifier";
170 * Helper function to perform a HW HCAPI resource type lookup against
171 * the reserved value of the same static type.
174 * -EOPNOTSUPP - Reserved resource type not supported
175 * Value - Integer value of the reserved value for the requested type
178 tf_rm_rsvd_hw_value(enum tf_dir dir, enum tf_resource_type_hw index)
180 uint32_t value = -EOPNOTSUPP;
183 case TF_RESC_TYPE_HW_L2_CTXT_TCAM:
184 TF_RESC_RSVD(dir, TF_RSVD_L2_CTXT_TCAM, value);
186 case TF_RESC_TYPE_HW_PROF_FUNC:
187 TF_RESC_RSVD(dir, TF_RSVD_PROF_FUNC, value);
189 case TF_RESC_TYPE_HW_PROF_TCAM:
190 TF_RESC_RSVD(dir, TF_RSVD_PROF_TCAM, value);
192 case TF_RESC_TYPE_HW_EM_PROF_ID:
193 TF_RESC_RSVD(dir, TF_RSVD_EM_PROF_ID, value);
195 case TF_RESC_TYPE_HW_EM_REC:
196 TF_RESC_RSVD(dir, TF_RSVD_EM_REC, value);
198 case TF_RESC_TYPE_HW_WC_TCAM_PROF_ID:
199 TF_RESC_RSVD(dir, TF_RSVD_WC_TCAM_PROF_ID, value);
201 case TF_RESC_TYPE_HW_WC_TCAM:
202 TF_RESC_RSVD(dir, TF_RSVD_WC_TCAM, value);
204 case TF_RESC_TYPE_HW_METER_PROF:
205 TF_RESC_RSVD(dir, TF_RSVD_METER_PROF, value);
207 case TF_RESC_TYPE_HW_METER_INST:
208 TF_RESC_RSVD(dir, TF_RSVD_METER_INST, value);
210 case TF_RESC_TYPE_HW_MIRROR:
211 TF_RESC_RSVD(dir, TF_RSVD_MIRROR, value);
213 case TF_RESC_TYPE_HW_UPAR:
214 TF_RESC_RSVD(dir, TF_RSVD_UPAR, value);
216 case TF_RESC_TYPE_HW_SP_TCAM:
217 TF_RESC_RSVD(dir, TF_RSVD_SP_TCAM, value);
219 case TF_RESC_TYPE_HW_L2_FUNC:
220 TF_RESC_RSVD(dir, TF_RSVD_L2_FUNC, value);
222 case TF_RESC_TYPE_HW_FKB:
223 TF_RESC_RSVD(dir, TF_RSVD_FKB, value);
225 case TF_RESC_TYPE_HW_TBL_SCOPE:
226 TF_RESC_RSVD(dir, TF_RSVD_TBL_SCOPE, value);
228 case TF_RESC_TYPE_HW_EPOCH0:
229 TF_RESC_RSVD(dir, TF_RSVD_EPOCH0, value);
231 case TF_RESC_TYPE_HW_EPOCH1:
232 TF_RESC_RSVD(dir, TF_RSVD_EPOCH1, value);
234 case TF_RESC_TYPE_HW_METADATA:
235 TF_RESC_RSVD(dir, TF_RSVD_METADATA, value);
237 case TF_RESC_TYPE_HW_CT_STATE:
238 TF_RESC_RSVD(dir, TF_RSVD_CT_STATE, value);
240 case TF_RESC_TYPE_HW_RANGE_PROF:
241 TF_RESC_RSVD(dir, TF_RSVD_RANGE_PROF, value);
243 case TF_RESC_TYPE_HW_RANGE_ENTRY:
244 TF_RESC_RSVD(dir, TF_RSVD_RANGE_ENTRY, value);
246 case TF_RESC_TYPE_HW_LAG_ENTRY:
247 TF_RESC_RSVD(dir, TF_RSVD_LAG_ENTRY, value);
257 * Helper function to perform a SRAM HCAPI resource type lookup
258 * against the reserved value of the same static type.
261 * -EOPNOTSUPP - Reserved resource type not supported
262 * Value - Integer value of the reserved value for the requested type
265 tf_rm_rsvd_sram_value(enum tf_dir dir, enum tf_resource_type_sram index)
267 uint32_t value = -EOPNOTSUPP;
270 case TF_RESC_TYPE_SRAM_FULL_ACTION:
271 TF_RESC_RSVD(dir, TF_RSVD_SRAM_FULL_ACTION, value);
273 case TF_RESC_TYPE_SRAM_MCG:
274 TF_RESC_RSVD(dir, TF_RSVD_SRAM_MCG, value);
276 case TF_RESC_TYPE_SRAM_ENCAP_8B:
277 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_8B, value);
279 case TF_RESC_TYPE_SRAM_ENCAP_16B:
280 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_16B, value);
282 case TF_RESC_TYPE_SRAM_ENCAP_64B:
283 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_64B, value);
285 case TF_RESC_TYPE_SRAM_SP_SMAC:
286 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC, value);
288 case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4:
289 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV4, value);
291 case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6:
292 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV6, value);
294 case TF_RESC_TYPE_SRAM_COUNTER_64B:
295 TF_RESC_RSVD(dir, TF_RSVD_SRAM_COUNTER_64B, value);
297 case TF_RESC_TYPE_SRAM_NAT_SPORT:
298 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_SPORT, value);
300 case TF_RESC_TYPE_SRAM_NAT_DPORT:
301 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_DPORT, value);
303 case TF_RESC_TYPE_SRAM_NAT_S_IPV4:
304 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_S_IPV4, value);
306 case TF_RESC_TYPE_SRAM_NAT_D_IPV4:
307 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_D_IPV4, value);
317 * Helper function to print all the HW resource qcaps errors reported
321 * Receive or transmit direction
324 * Pointer to the hw error flags created at time of the query check
327 tf_rm_print_hw_qcaps_error(enum tf_dir dir,
328 struct tf_rm_hw_query *hw_query,
329 uint32_t *error_flag)
333 TFP_DRV_LOG(ERR, "QCAPS errors HW\n");
334 TFP_DRV_LOG(ERR, " Direction: %s\n", tf_dir_2_str(dir));
335 TFP_DRV_LOG(ERR, " Elements:\n");
337 for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
338 if (*error_flag & 1 << i)
339 TFP_DRV_LOG(ERR, " %s, %d elem available, req:%d\n",
340 tf_hcapi_hw_2_str(i),
341 hw_query->hw_query[i].max,
342 tf_rm_rsvd_hw_value(dir, i));
347 * Helper function to print all the SRAM resource qcaps errors
348 * reported in the error_flag.
351 * Receive or transmit direction
354 * Pointer to the sram error flags created at time of the query check
357 tf_rm_print_sram_qcaps_error(enum tf_dir dir,
358 struct tf_rm_sram_query *sram_query,
359 uint32_t *error_flag)
363 TFP_DRV_LOG(ERR, "QCAPS errors SRAM\n");
364 TFP_DRV_LOG(ERR, " Direction: %s\n", tf_dir_2_str(dir));
365 TFP_DRV_LOG(ERR, " Elements:\n");
367 for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
368 if (*error_flag & 1 << i)
369 TFP_DRV_LOG(ERR, " %s, %d elem available, req:%d\n",
370 tf_hcapi_sram_2_str(i),
371 sram_query->sram_query[i].max,
372 tf_rm_rsvd_sram_value(dir, i));
377 * Performs a HW resource check between what firmware capability
378 * reports and what the core expects is available.
380 * Firmware performs the resource carving at AFM init time and the
381 * resource capability is reported in the TruFlow qcaps msg.
384 * Pointer to HW Query data structure. Query holds what the firmware
385 * offers of the HW resources.
388 * Receive or transmit direction
390 * [in/out] error_flag
391 * Pointer to a bit array indicating the error of a single HCAPI
392 * resource type. When a bit is set to 1, the HCAPI resource type
393 * failed static allocation.
397 * -ENOMEM - Failure on one of the allocated resources. Check the
398 * error_flag for what types are flagged errored.
401 tf_rm_check_hw_qcaps_static(struct tf_rm_hw_query *query,
403 uint32_t *error_flag)
407 TF_RM_CHECK_HW_ALLOC(query,
409 TF_RESC_TYPE_HW_L2_CTXT_TCAM,
410 TF_RSVD_L2_CTXT_TCAM,
413 TF_RM_CHECK_HW_ALLOC(query,
415 TF_RESC_TYPE_HW_PROF_FUNC,
419 TF_RM_CHECK_HW_ALLOC(query,
421 TF_RESC_TYPE_HW_PROF_TCAM,
425 TF_RM_CHECK_HW_ALLOC(query,
427 TF_RESC_TYPE_HW_EM_PROF_ID,
431 TF_RM_CHECK_HW_ALLOC(query,
433 TF_RESC_TYPE_HW_EM_REC,
437 TF_RM_CHECK_HW_ALLOC(query,
439 TF_RESC_TYPE_HW_WC_TCAM_PROF_ID,
440 TF_RSVD_WC_TCAM_PROF_ID,
443 TF_RM_CHECK_HW_ALLOC(query,
445 TF_RESC_TYPE_HW_WC_TCAM,
449 TF_RM_CHECK_HW_ALLOC(query,
451 TF_RESC_TYPE_HW_METER_PROF,
455 TF_RM_CHECK_HW_ALLOC(query,
457 TF_RESC_TYPE_HW_METER_INST,
461 TF_RM_CHECK_HW_ALLOC(query,
463 TF_RESC_TYPE_HW_MIRROR,
467 TF_RM_CHECK_HW_ALLOC(query,
469 TF_RESC_TYPE_HW_UPAR,
473 TF_RM_CHECK_HW_ALLOC(query,
475 TF_RESC_TYPE_HW_SP_TCAM,
479 TF_RM_CHECK_HW_ALLOC(query,
481 TF_RESC_TYPE_HW_L2_FUNC,
485 TF_RM_CHECK_HW_ALLOC(query,
491 TF_RM_CHECK_HW_ALLOC(query,
493 TF_RESC_TYPE_HW_TBL_SCOPE,
497 TF_RM_CHECK_HW_ALLOC(query,
499 TF_RESC_TYPE_HW_EPOCH0,
503 TF_RM_CHECK_HW_ALLOC(query,
505 TF_RESC_TYPE_HW_EPOCH1,
509 TF_RM_CHECK_HW_ALLOC(query,
511 TF_RESC_TYPE_HW_METADATA,
515 TF_RM_CHECK_HW_ALLOC(query,
517 TF_RESC_TYPE_HW_CT_STATE,
521 TF_RM_CHECK_HW_ALLOC(query,
523 TF_RESC_TYPE_HW_RANGE_PROF,
527 TF_RM_CHECK_HW_ALLOC(query,
529 TF_RESC_TYPE_HW_RANGE_ENTRY,
533 TF_RM_CHECK_HW_ALLOC(query,
535 TF_RESC_TYPE_HW_LAG_ENTRY,
539 if (*error_flag != 0)
546 * Performs a SRAM resource check between what firmware capability
547 * reports and what the core expects is available.
549 * Firmware performs the resource carving at AFM init time and the
550 * resource capability is reported in the TruFlow qcaps msg.
553 * Pointer to SRAM Query data structure. Query holds what the
554 * firmware offers of the SRAM resources.
557 * Receive or transmit direction
559 * [in/out] error_flag
560 * Pointer to a bit array indicating the error of a single HCAPI
561 * resource type. When a bit is set to 1, the HCAPI resource type
562 * failed static allocation.
566 * -ENOMEM - Failure on one of the allocated resources. Check the
567 * error_flag for what types are flagged errored.
570 tf_rm_check_sram_qcaps_static(struct tf_rm_sram_query *query,
572 uint32_t *error_flag)
576 TF_RM_CHECK_SRAM_ALLOC(query,
578 TF_RESC_TYPE_SRAM_FULL_ACTION,
579 TF_RSVD_SRAM_FULL_ACTION,
582 TF_RM_CHECK_SRAM_ALLOC(query,
584 TF_RESC_TYPE_SRAM_MCG,
588 TF_RM_CHECK_SRAM_ALLOC(query,
590 TF_RESC_TYPE_SRAM_ENCAP_8B,
591 TF_RSVD_SRAM_ENCAP_8B,
594 TF_RM_CHECK_SRAM_ALLOC(query,
596 TF_RESC_TYPE_SRAM_ENCAP_16B,
597 TF_RSVD_SRAM_ENCAP_16B,
600 TF_RM_CHECK_SRAM_ALLOC(query,
602 TF_RESC_TYPE_SRAM_ENCAP_64B,
603 TF_RSVD_SRAM_ENCAP_64B,
606 TF_RM_CHECK_SRAM_ALLOC(query,
608 TF_RESC_TYPE_SRAM_SP_SMAC,
609 TF_RSVD_SRAM_SP_SMAC,
612 TF_RM_CHECK_SRAM_ALLOC(query,
614 TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
615 TF_RSVD_SRAM_SP_SMAC_IPV4,
618 TF_RM_CHECK_SRAM_ALLOC(query,
620 TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
621 TF_RSVD_SRAM_SP_SMAC_IPV6,
624 TF_RM_CHECK_SRAM_ALLOC(query,
626 TF_RESC_TYPE_SRAM_COUNTER_64B,
627 TF_RSVD_SRAM_COUNTER_64B,
630 TF_RM_CHECK_SRAM_ALLOC(query,
632 TF_RESC_TYPE_SRAM_NAT_SPORT,
633 TF_RSVD_SRAM_NAT_SPORT,
636 TF_RM_CHECK_SRAM_ALLOC(query,
638 TF_RESC_TYPE_SRAM_NAT_DPORT,
639 TF_RSVD_SRAM_NAT_DPORT,
642 TF_RM_CHECK_SRAM_ALLOC(query,
644 TF_RESC_TYPE_SRAM_NAT_S_IPV4,
645 TF_RSVD_SRAM_NAT_S_IPV4,
648 TF_RM_CHECK_SRAM_ALLOC(query,
650 TF_RESC_TYPE_SRAM_NAT_D_IPV4,
651 TF_RSVD_SRAM_NAT_D_IPV4,
654 if (*error_flag != 0)
661 * Internal function to mark pool entries used.
664 tf_rm_reserve_range(uint32_t count,
668 struct bitalloc *pool)
672 /* If no resources has been requested we mark everything
676 for (i = 0; i < max; i++)
677 ba_alloc_index(pool, i);
679 /* Support 2 main modes
680 * Reserved range starts from bottom up (with
681 * pre-reserved value or not)
682 * - begin = 0 to end xx
683 * - begin = 1 to end xx
685 * Reserved range starts from top down
686 * - begin = yy to end max
689 /* Bottom up check, start from 0 */
690 if (rsv_begin == 0) {
691 for (i = rsv_end + 1; i < max; i++)
692 ba_alloc_index(pool, i);
695 /* Bottom up check, start from 1 or higher OR
698 if (rsv_begin >= 1) {
699 /* Allocate from 0 until start */
700 for (i = 0; i < rsv_begin; i++)
701 ba_alloc_index(pool, i);
703 /* Skip and then do the remaining */
704 if (rsv_end < max - 1) {
705 for (i = rsv_end; i < max; i++)
706 ba_alloc_index(pool, i);
713 * Internal function to mark all the l2 ctxt allocated that Truflow
717 tf_rm_rsvd_l2_ctxt(struct tf_session *tfs)
719 uint32_t index = TF_RESC_TYPE_HW_L2_CTXT_TCAM;
722 /* l2 ctxt rx direction */
723 if (tfs->resc.rx.hw_entry[index].stride > 0)
724 end = tfs->resc.rx.hw_entry[index].start +
725 tfs->resc.rx.hw_entry[index].stride - 1;
727 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
728 tfs->resc.rx.hw_entry[index].start,
731 tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX);
733 /* l2 ctxt tx direction */
734 if (tfs->resc.tx.hw_entry[index].stride > 0)
735 end = tfs->resc.tx.hw_entry[index].start +
736 tfs->resc.tx.hw_entry[index].stride - 1;
738 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
739 tfs->resc.tx.hw_entry[index].start,
742 tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX);
746 * Internal function to mark all the profile tcam and profile func
747 * resources that Truflow does not own.
750 tf_rm_rsvd_prof(struct tf_session *tfs)
752 uint32_t index = TF_RESC_TYPE_HW_PROF_FUNC;
755 /* profile func rx direction */
756 if (tfs->resc.rx.hw_entry[index].stride > 0)
757 end = tfs->resc.rx.hw_entry[index].start +
758 tfs->resc.rx.hw_entry[index].stride - 1;
760 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
761 tfs->resc.rx.hw_entry[index].start,
764 tfs->TF_PROF_FUNC_POOL_NAME_RX);
766 /* profile func tx direction */
767 if (tfs->resc.tx.hw_entry[index].stride > 0)
768 end = tfs->resc.tx.hw_entry[index].start +
769 tfs->resc.tx.hw_entry[index].stride - 1;
771 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
772 tfs->resc.tx.hw_entry[index].start,
775 tfs->TF_PROF_FUNC_POOL_NAME_TX);
777 index = TF_RESC_TYPE_HW_PROF_TCAM;
779 /* profile tcam rx direction */
780 if (tfs->resc.rx.hw_entry[index].stride > 0)
781 end = tfs->resc.rx.hw_entry[index].start +
782 tfs->resc.rx.hw_entry[index].stride - 1;
784 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
785 tfs->resc.rx.hw_entry[index].start,
788 tfs->TF_PROF_TCAM_POOL_NAME_RX);
790 /* profile tcam tx direction */
791 if (tfs->resc.tx.hw_entry[index].stride > 0)
792 end = tfs->resc.tx.hw_entry[index].start +
793 tfs->resc.tx.hw_entry[index].stride - 1;
795 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
796 tfs->resc.tx.hw_entry[index].start,
799 tfs->TF_PROF_TCAM_POOL_NAME_TX);
803 * Internal function to mark all the em profile id allocated that
804 * Truflow does not own.
807 tf_rm_rsvd_em_prof(struct tf_session *tfs)
809 uint32_t index = TF_RESC_TYPE_HW_EM_PROF_ID;
812 /* em prof id rx direction */
813 if (tfs->resc.rx.hw_entry[index].stride > 0)
814 end = tfs->resc.rx.hw_entry[index].start +
815 tfs->resc.rx.hw_entry[index].stride - 1;
817 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
818 tfs->resc.rx.hw_entry[index].start,
821 tfs->TF_EM_PROF_ID_POOL_NAME_RX);
823 /* em prof id tx direction */
824 if (tfs->resc.tx.hw_entry[index].stride > 0)
825 end = tfs->resc.tx.hw_entry[index].start +
826 tfs->resc.tx.hw_entry[index].stride - 1;
828 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
829 tfs->resc.tx.hw_entry[index].start,
832 tfs->TF_EM_PROF_ID_POOL_NAME_TX);
836 * Internal function to mark all the wildcard tcam and profile id
837 * resources that Truflow does not own.
840 tf_rm_rsvd_wc(struct tf_session *tfs)
842 uint32_t index = TF_RESC_TYPE_HW_WC_TCAM_PROF_ID;
845 /* wc profile id rx direction */
846 if (tfs->resc.rx.hw_entry[index].stride > 0)
847 end = tfs->resc.rx.hw_entry[index].start +
848 tfs->resc.rx.hw_entry[index].stride - 1;
850 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
851 tfs->resc.rx.hw_entry[index].start,
854 tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX);
856 /* wc profile id tx direction */
857 if (tfs->resc.tx.hw_entry[index].stride > 0)
858 end = tfs->resc.tx.hw_entry[index].start +
859 tfs->resc.tx.hw_entry[index].stride - 1;
861 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
862 tfs->resc.tx.hw_entry[index].start,
865 tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX);
867 index = TF_RESC_TYPE_HW_WC_TCAM;
869 /* wc tcam rx direction */
870 if (tfs->resc.rx.hw_entry[index].stride > 0)
871 end = tfs->resc.rx.hw_entry[index].start +
872 tfs->resc.rx.hw_entry[index].stride - 1;
874 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
875 tfs->resc.rx.hw_entry[index].start,
878 tfs->TF_WC_TCAM_POOL_NAME_RX);
880 /* wc tcam tx direction */
881 if (tfs->resc.tx.hw_entry[index].stride > 0)
882 end = tfs->resc.tx.hw_entry[index].start +
883 tfs->resc.tx.hw_entry[index].stride - 1;
885 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
886 tfs->resc.tx.hw_entry[index].start,
889 tfs->TF_WC_TCAM_POOL_NAME_TX);
893 * Internal function to mark all the meter resources allocated that
894 * Truflow does not own.
897 tf_rm_rsvd_meter(struct tf_session *tfs)
899 uint32_t index = TF_RESC_TYPE_HW_METER_PROF;
902 /* meter profiles rx direction */
903 if (tfs->resc.rx.hw_entry[index].stride > 0)
904 end = tfs->resc.rx.hw_entry[index].start +
905 tfs->resc.rx.hw_entry[index].stride - 1;
907 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
908 tfs->resc.rx.hw_entry[index].start,
911 tfs->TF_METER_PROF_POOL_NAME_RX);
913 /* meter profiles tx direction */
914 if (tfs->resc.tx.hw_entry[index].stride > 0)
915 end = tfs->resc.tx.hw_entry[index].start +
916 tfs->resc.tx.hw_entry[index].stride - 1;
918 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
919 tfs->resc.tx.hw_entry[index].start,
922 tfs->TF_METER_PROF_POOL_NAME_TX);
924 index = TF_RESC_TYPE_HW_METER_INST;
926 /* meter rx direction */
927 if (tfs->resc.rx.hw_entry[index].stride > 0)
928 end = tfs->resc.rx.hw_entry[index].start +
929 tfs->resc.rx.hw_entry[index].stride - 1;
931 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
932 tfs->resc.rx.hw_entry[index].start,
935 tfs->TF_METER_INST_POOL_NAME_RX);
937 /* meter tx direction */
938 if (tfs->resc.tx.hw_entry[index].stride > 0)
939 end = tfs->resc.tx.hw_entry[index].start +
940 tfs->resc.tx.hw_entry[index].stride - 1;
942 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
943 tfs->resc.tx.hw_entry[index].start,
946 tfs->TF_METER_INST_POOL_NAME_TX);
950 * Internal function to mark all the mirror resources allocated that
951 * Truflow does not own.
954 tf_rm_rsvd_mirror(struct tf_session *tfs)
956 uint32_t index = TF_RESC_TYPE_HW_MIRROR;
959 /* mirror rx direction */
960 if (tfs->resc.rx.hw_entry[index].stride > 0)
961 end = tfs->resc.rx.hw_entry[index].start +
962 tfs->resc.rx.hw_entry[index].stride - 1;
964 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
965 tfs->resc.rx.hw_entry[index].start,
968 tfs->TF_MIRROR_POOL_NAME_RX);
970 /* mirror tx direction */
971 if (tfs->resc.tx.hw_entry[index].stride > 0)
972 end = tfs->resc.tx.hw_entry[index].start +
973 tfs->resc.tx.hw_entry[index].stride - 1;
975 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
976 tfs->resc.tx.hw_entry[index].start,
979 tfs->TF_MIRROR_POOL_NAME_TX);
983 * Internal function to mark all the upar resources allocated that
984 * Truflow does not own.
987 tf_rm_rsvd_upar(struct tf_session *tfs)
989 uint32_t index = TF_RESC_TYPE_HW_UPAR;
992 /* upar rx direction */
993 if (tfs->resc.rx.hw_entry[index].stride > 0)
994 end = tfs->resc.rx.hw_entry[index].start +
995 tfs->resc.rx.hw_entry[index].stride - 1;
997 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
998 tfs->resc.rx.hw_entry[index].start,
1001 tfs->TF_UPAR_POOL_NAME_RX);
1003 /* upar tx direction */
1004 if (tfs->resc.tx.hw_entry[index].stride > 0)
1005 end = tfs->resc.tx.hw_entry[index].start +
1006 tfs->resc.tx.hw_entry[index].stride - 1;
1008 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1009 tfs->resc.tx.hw_entry[index].start,
1012 tfs->TF_UPAR_POOL_NAME_TX);
1016 * Internal function to mark all the sp tcam resources allocated that
1017 * Truflow does not own.
1020 tf_rm_rsvd_sp_tcam(struct tf_session *tfs)
1022 uint32_t index = TF_RESC_TYPE_HW_SP_TCAM;
1025 /* sp tcam rx direction */
1026 if (tfs->resc.rx.hw_entry[index].stride > 0)
1027 end = tfs->resc.rx.hw_entry[index].start +
1028 tfs->resc.rx.hw_entry[index].stride - 1;
1030 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1031 tfs->resc.rx.hw_entry[index].start,
1034 tfs->TF_SP_TCAM_POOL_NAME_RX);
1036 /* sp tcam tx direction */
1037 if (tfs->resc.tx.hw_entry[index].stride > 0)
1038 end = tfs->resc.tx.hw_entry[index].start +
1039 tfs->resc.tx.hw_entry[index].stride - 1;
1041 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1042 tfs->resc.tx.hw_entry[index].start,
1045 tfs->TF_SP_TCAM_POOL_NAME_TX);
1049 * Internal function to mark all the l2 func resources allocated that
1050 * Truflow does not own.
1053 tf_rm_rsvd_l2_func(struct tf_session *tfs)
1055 uint32_t index = TF_RESC_TYPE_HW_L2_FUNC;
1058 /* l2 func rx direction */
1059 if (tfs->resc.rx.hw_entry[index].stride > 0)
1060 end = tfs->resc.rx.hw_entry[index].start +
1061 tfs->resc.rx.hw_entry[index].stride - 1;
1063 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1064 tfs->resc.rx.hw_entry[index].start,
1067 tfs->TF_L2_FUNC_POOL_NAME_RX);
1069 /* l2 func tx direction */
1070 if (tfs->resc.tx.hw_entry[index].stride > 0)
1071 end = tfs->resc.tx.hw_entry[index].start +
1072 tfs->resc.tx.hw_entry[index].stride - 1;
1074 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1075 tfs->resc.tx.hw_entry[index].start,
1078 tfs->TF_L2_FUNC_POOL_NAME_TX);
1082 * Internal function to mark all the fkb resources allocated that
1083 * Truflow does not own.
1086 tf_rm_rsvd_fkb(struct tf_session *tfs)
1088 uint32_t index = TF_RESC_TYPE_HW_FKB;
1091 /* fkb rx direction */
1092 if (tfs->resc.rx.hw_entry[index].stride > 0)
1093 end = tfs->resc.rx.hw_entry[index].start +
1094 tfs->resc.rx.hw_entry[index].stride - 1;
1096 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1097 tfs->resc.rx.hw_entry[index].start,
1100 tfs->TF_FKB_POOL_NAME_RX);
1102 /* fkb tx direction */
1103 if (tfs->resc.tx.hw_entry[index].stride > 0)
1104 end = tfs->resc.tx.hw_entry[index].start +
1105 tfs->resc.tx.hw_entry[index].stride - 1;
1107 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1108 tfs->resc.tx.hw_entry[index].start,
1111 tfs->TF_FKB_POOL_NAME_TX);
1115 * Internal function to mark all the tbld scope resources allocated
1116 * that Truflow does not own.
1119 tf_rm_rsvd_tbl_scope(struct tf_session *tfs)
1121 uint32_t index = TF_RESC_TYPE_HW_TBL_SCOPE;
1124 /* tbl scope rx direction */
1125 if (tfs->resc.rx.hw_entry[index].stride > 0)
1126 end = tfs->resc.rx.hw_entry[index].start +
1127 tfs->resc.rx.hw_entry[index].stride - 1;
1129 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1130 tfs->resc.rx.hw_entry[index].start,
1133 tfs->TF_TBL_SCOPE_POOL_NAME_RX);
1135 /* tbl scope tx direction */
1136 if (tfs->resc.tx.hw_entry[index].stride > 0)
1137 end = tfs->resc.tx.hw_entry[index].start +
1138 tfs->resc.tx.hw_entry[index].stride - 1;
1140 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1141 tfs->resc.tx.hw_entry[index].start,
1144 tfs->TF_TBL_SCOPE_POOL_NAME_TX);
1148 * Internal function to mark all the l2 epoch resources allocated that
1149 * Truflow does not own.
1152 tf_rm_rsvd_epoch(struct tf_session *tfs)
1154 uint32_t index = TF_RESC_TYPE_HW_EPOCH0;
1157 /* epoch0 rx direction */
1158 if (tfs->resc.rx.hw_entry[index].stride > 0)
1159 end = tfs->resc.rx.hw_entry[index].start +
1160 tfs->resc.rx.hw_entry[index].stride - 1;
1162 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1163 tfs->resc.rx.hw_entry[index].start,
1166 tfs->TF_EPOCH0_POOL_NAME_RX);
1168 /* epoch0 tx direction */
1169 if (tfs->resc.tx.hw_entry[index].stride > 0)
1170 end = tfs->resc.tx.hw_entry[index].start +
1171 tfs->resc.tx.hw_entry[index].stride - 1;
1173 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1174 tfs->resc.tx.hw_entry[index].start,
1177 tfs->TF_EPOCH0_POOL_NAME_TX);
1179 index = TF_RESC_TYPE_HW_EPOCH1;
1181 /* epoch1 rx direction */
1182 if (tfs->resc.rx.hw_entry[index].stride > 0)
1183 end = tfs->resc.rx.hw_entry[index].start +
1184 tfs->resc.rx.hw_entry[index].stride - 1;
1186 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1187 tfs->resc.rx.hw_entry[index].start,
1190 tfs->TF_EPOCH1_POOL_NAME_RX);
1192 /* epoch1 tx direction */
1193 if (tfs->resc.tx.hw_entry[index].stride > 0)
1194 end = tfs->resc.tx.hw_entry[index].start +
1195 tfs->resc.tx.hw_entry[index].stride - 1;
1197 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1198 tfs->resc.tx.hw_entry[index].start,
1201 tfs->TF_EPOCH1_POOL_NAME_TX);
1205 * Internal function to mark all the metadata resources allocated that
1206 * Truflow does not own.
1209 tf_rm_rsvd_metadata(struct tf_session *tfs)
1211 uint32_t index = TF_RESC_TYPE_HW_METADATA;
1214 /* metadata rx direction */
1215 if (tfs->resc.rx.hw_entry[index].stride > 0)
1216 end = tfs->resc.rx.hw_entry[index].start +
1217 tfs->resc.rx.hw_entry[index].stride - 1;
1219 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1220 tfs->resc.rx.hw_entry[index].start,
1223 tfs->TF_METADATA_POOL_NAME_RX);
1225 /* metadata tx direction */
1226 if (tfs->resc.tx.hw_entry[index].stride > 0)
1227 end = tfs->resc.tx.hw_entry[index].start +
1228 tfs->resc.tx.hw_entry[index].stride - 1;
1230 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1231 tfs->resc.tx.hw_entry[index].start,
1234 tfs->TF_METADATA_POOL_NAME_TX);
1238 * Internal function to mark all the ct state resources allocated that
1239 * Truflow does not own.
1242 tf_rm_rsvd_ct_state(struct tf_session *tfs)
1244 uint32_t index = TF_RESC_TYPE_HW_CT_STATE;
1247 /* ct state rx direction */
1248 if (tfs->resc.rx.hw_entry[index].stride > 0)
1249 end = tfs->resc.rx.hw_entry[index].start +
1250 tfs->resc.rx.hw_entry[index].stride - 1;
1252 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1253 tfs->resc.rx.hw_entry[index].start,
1256 tfs->TF_CT_STATE_POOL_NAME_RX);
1258 /* ct state tx direction */
1259 if (tfs->resc.tx.hw_entry[index].stride > 0)
1260 end = tfs->resc.tx.hw_entry[index].start +
1261 tfs->resc.tx.hw_entry[index].stride - 1;
1263 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1264 tfs->resc.tx.hw_entry[index].start,
1267 tfs->TF_CT_STATE_POOL_NAME_TX);
1271 * Internal function to mark all the range resources allocated that
1272 * Truflow does not own.
1275 tf_rm_rsvd_range(struct tf_session *tfs)
1277 uint32_t index = TF_RESC_TYPE_HW_RANGE_PROF;
1280 /* range profile rx direction */
1281 if (tfs->resc.rx.hw_entry[index].stride > 0)
1282 end = tfs->resc.rx.hw_entry[index].start +
1283 tfs->resc.rx.hw_entry[index].stride - 1;
1285 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1286 tfs->resc.rx.hw_entry[index].start,
1289 tfs->TF_RANGE_PROF_POOL_NAME_RX);
1291 /* range profile tx direction */
1292 if (tfs->resc.tx.hw_entry[index].stride > 0)
1293 end = tfs->resc.tx.hw_entry[index].start +
1294 tfs->resc.tx.hw_entry[index].stride - 1;
1296 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1297 tfs->resc.tx.hw_entry[index].start,
1300 tfs->TF_RANGE_PROF_POOL_NAME_TX);
1302 index = TF_RESC_TYPE_HW_RANGE_ENTRY;
1304 /* range entry rx direction */
1305 if (tfs->resc.rx.hw_entry[index].stride > 0)
1306 end = tfs->resc.rx.hw_entry[index].start +
1307 tfs->resc.rx.hw_entry[index].stride - 1;
1309 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1310 tfs->resc.rx.hw_entry[index].start,
1313 tfs->TF_RANGE_ENTRY_POOL_NAME_RX);
1315 /* range entry tx direction */
1316 if (tfs->resc.tx.hw_entry[index].stride > 0)
1317 end = tfs->resc.tx.hw_entry[index].start +
1318 tfs->resc.tx.hw_entry[index].stride - 1;
1320 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1321 tfs->resc.tx.hw_entry[index].start,
1324 tfs->TF_RANGE_ENTRY_POOL_NAME_TX);
1328 * Internal function to mark all the lag resources allocated that
1329 * Truflow does not own.
1332 tf_rm_rsvd_lag_entry(struct tf_session *tfs)
1334 uint32_t index = TF_RESC_TYPE_HW_LAG_ENTRY;
1337 /* lag entry rx direction */
1338 if (tfs->resc.rx.hw_entry[index].stride > 0)
1339 end = tfs->resc.rx.hw_entry[index].start +
1340 tfs->resc.rx.hw_entry[index].stride - 1;
1342 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
1343 tfs->resc.rx.hw_entry[index].start,
1346 tfs->TF_LAG_ENTRY_POOL_NAME_RX);
1348 /* lag entry tx direction */
1349 if (tfs->resc.tx.hw_entry[index].stride > 0)
1350 end = tfs->resc.tx.hw_entry[index].start +
1351 tfs->resc.tx.hw_entry[index].stride - 1;
1353 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
1354 tfs->resc.tx.hw_entry[index].start,
1357 tfs->TF_LAG_ENTRY_POOL_NAME_TX);
1361 * Internal function to mark all the full action resources allocated
1362 * that Truflow does not own.
1365 tf_rm_rsvd_sram_full_action(struct tf_session *tfs)
1367 uint32_t index = TF_RESC_TYPE_SRAM_FULL_ACTION;
1370 /* full action rx direction */
1371 if (tfs->resc.rx.sram_entry[index].stride > 0)
1372 end = tfs->resc.rx.sram_entry[index].start +
1373 tfs->resc.rx.sram_entry[index].stride - 1;
1375 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1376 TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_RX,
1378 TF_RSVD_SRAM_FULL_ACTION_RX,
1379 tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX);
1381 /* full action tx direction */
1382 if (tfs->resc.tx.sram_entry[index].stride > 0)
1383 end = tfs->resc.tx.sram_entry[index].start +
1384 tfs->resc.tx.sram_entry[index].stride - 1;
1386 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1387 TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_TX,
1389 TF_RSVD_SRAM_FULL_ACTION_TX,
1390 tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX);
1394 * Internal function to mark all the multicast group resources
1395 * allocated that Truflow does not own.
1398 tf_rm_rsvd_sram_mcg(struct tf_session *tfs)
1400 uint32_t index = TF_RESC_TYPE_SRAM_MCG;
1403 /* multicast group rx direction */
1404 if (tfs->resc.rx.sram_entry[index].stride > 0)
1405 end = tfs->resc.rx.sram_entry[index].start +
1406 tfs->resc.rx.sram_entry[index].stride - 1;
1408 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1409 TF_RSVD_SRAM_MCG_BEGIN_IDX_RX,
1411 TF_RSVD_SRAM_MCG_RX,
1412 tfs->TF_SRAM_MCG_POOL_NAME_RX);
1414 /* Multicast Group on TX is not supported */
1418 * Internal function to mark all the encap resources allocated that
1419 * Truflow does not own.
1422 tf_rm_rsvd_sram_encap(struct tf_session *tfs)
1424 uint32_t index = TF_RESC_TYPE_SRAM_ENCAP_8B;
1427 /* encap 8b rx direction */
1428 if (tfs->resc.rx.sram_entry[index].stride > 0)
1429 end = tfs->resc.rx.sram_entry[index].start +
1430 tfs->resc.rx.sram_entry[index].stride - 1;
1432 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1433 TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_RX,
1435 TF_RSVD_SRAM_ENCAP_8B_RX,
1436 tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX);
1438 /* encap 8b tx direction */
1439 if (tfs->resc.tx.sram_entry[index].stride > 0)
1440 end = tfs->resc.tx.sram_entry[index].start +
1441 tfs->resc.tx.sram_entry[index].stride - 1;
1443 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1444 TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_TX,
1446 TF_RSVD_SRAM_ENCAP_8B_TX,
1447 tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX);
1449 index = TF_RESC_TYPE_SRAM_ENCAP_16B;
1451 /* encap 16b rx direction */
1452 if (tfs->resc.rx.sram_entry[index].stride > 0)
1453 end = tfs->resc.rx.sram_entry[index].start +
1454 tfs->resc.rx.sram_entry[index].stride - 1;
1456 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1457 TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_RX,
1459 TF_RSVD_SRAM_ENCAP_16B_RX,
1460 tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX);
1462 /* encap 16b tx direction */
1463 if (tfs->resc.tx.sram_entry[index].stride > 0)
1464 end = tfs->resc.tx.sram_entry[index].start +
1465 tfs->resc.tx.sram_entry[index].stride - 1;
1467 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1468 TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_TX,
1470 TF_RSVD_SRAM_ENCAP_16B_TX,
1471 tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX);
1473 index = TF_RESC_TYPE_SRAM_ENCAP_64B;
1475 /* Encap 64B not supported on RX */
1477 /* Encap 64b tx direction */
1478 if (tfs->resc.tx.sram_entry[index].stride > 0)
1479 end = tfs->resc.tx.sram_entry[index].start +
1480 tfs->resc.tx.sram_entry[index].stride - 1;
1482 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1483 TF_RSVD_SRAM_ENCAP_64B_BEGIN_IDX_TX,
1485 TF_RSVD_SRAM_ENCAP_64B_TX,
1486 tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX);
1490 * Internal function to mark all the sp resources allocated that
1491 * Truflow does not own.
1494 tf_rm_rsvd_sram_sp(struct tf_session *tfs)
1496 uint32_t index = TF_RESC_TYPE_SRAM_SP_SMAC;
1499 /* sp smac rx direction */
1500 if (tfs->resc.rx.sram_entry[index].stride > 0)
1501 end = tfs->resc.rx.sram_entry[index].start +
1502 tfs->resc.rx.sram_entry[index].stride - 1;
1504 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1505 TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_RX,
1507 TF_RSVD_SRAM_SP_SMAC_RX,
1508 tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX);
1510 /* sp smac tx direction */
1511 if (tfs->resc.tx.sram_entry[index].stride > 0)
1512 end = tfs->resc.tx.sram_entry[index].start +
1513 tfs->resc.tx.sram_entry[index].stride - 1;
1515 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1516 TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_TX,
1518 TF_RSVD_SRAM_SP_SMAC_TX,
1519 tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX);
1521 index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4;
1523 /* SP SMAC IPv4 not supported on RX */
1525 /* sp smac ipv4 tx direction */
1526 if (tfs->resc.tx.sram_entry[index].stride > 0)
1527 end = tfs->resc.tx.sram_entry[index].start +
1528 tfs->resc.tx.sram_entry[index].stride - 1;
1530 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1531 TF_RSVD_SRAM_SP_SMAC_IPV4_BEGIN_IDX_TX,
1533 TF_RSVD_SRAM_SP_SMAC_IPV4_TX,
1534 tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX);
1536 index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6;
1538 /* SP SMAC IPv6 not supported on RX */
1540 /* sp smac ipv6 tx direction */
1541 if (tfs->resc.tx.sram_entry[index].stride > 0)
1542 end = tfs->resc.tx.sram_entry[index].start +
1543 tfs->resc.tx.sram_entry[index].stride - 1;
1545 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1546 TF_RSVD_SRAM_SP_SMAC_IPV6_BEGIN_IDX_TX,
1548 TF_RSVD_SRAM_SP_SMAC_IPV6_TX,
1549 tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX);
1553 * Internal function to mark all the stat resources allocated that
1554 * Truflow does not own.
1557 tf_rm_rsvd_sram_stats(struct tf_session *tfs)
1559 uint32_t index = TF_RESC_TYPE_SRAM_COUNTER_64B;
1562 /* counter 64b rx direction */
1563 if (tfs->resc.rx.sram_entry[index].stride > 0)
1564 end = tfs->resc.rx.sram_entry[index].start +
1565 tfs->resc.rx.sram_entry[index].stride - 1;
1567 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1568 TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_RX,
1570 TF_RSVD_SRAM_COUNTER_64B_RX,
1571 tfs->TF_SRAM_STATS_64B_POOL_NAME_RX);
1573 /* counter 64b tx direction */
1574 if (tfs->resc.tx.sram_entry[index].stride > 0)
1575 end = tfs->resc.tx.sram_entry[index].start +
1576 tfs->resc.tx.sram_entry[index].stride - 1;
1578 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1579 TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_TX,
1581 TF_RSVD_SRAM_COUNTER_64B_TX,
1582 tfs->TF_SRAM_STATS_64B_POOL_NAME_TX);
1586 * Internal function to mark all the nat resources allocated that
1587 * Truflow does not own.
1590 tf_rm_rsvd_sram_nat(struct tf_session *tfs)
1592 uint32_t index = TF_RESC_TYPE_SRAM_NAT_SPORT;
1595 /* nat source port rx direction */
1596 if (tfs->resc.rx.sram_entry[index].stride > 0)
1597 end = tfs->resc.rx.sram_entry[index].start +
1598 tfs->resc.rx.sram_entry[index].stride - 1;
1600 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1601 TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_RX,
1603 TF_RSVD_SRAM_NAT_SPORT_RX,
1604 tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX);
1606 /* nat source port tx direction */
1607 if (tfs->resc.tx.sram_entry[index].stride > 0)
1608 end = tfs->resc.tx.sram_entry[index].start +
1609 tfs->resc.tx.sram_entry[index].stride - 1;
1611 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1612 TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_TX,
1614 TF_RSVD_SRAM_NAT_SPORT_TX,
1615 tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX);
1617 index = TF_RESC_TYPE_SRAM_NAT_DPORT;
1619 /* nat destination port rx direction */
1620 if (tfs->resc.rx.sram_entry[index].stride > 0)
1621 end = tfs->resc.rx.sram_entry[index].start +
1622 tfs->resc.rx.sram_entry[index].stride - 1;
1624 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1625 TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_RX,
1627 TF_RSVD_SRAM_NAT_DPORT_RX,
1628 tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX);
1630 /* nat destination port tx direction */
1631 if (tfs->resc.tx.sram_entry[index].stride > 0)
1632 end = tfs->resc.tx.sram_entry[index].start +
1633 tfs->resc.tx.sram_entry[index].stride - 1;
1635 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1636 TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_TX,
1638 TF_RSVD_SRAM_NAT_DPORT_TX,
1639 tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX);
1641 index = TF_RESC_TYPE_SRAM_NAT_S_IPV4;
1643 /* nat source port ipv4 rx direction */
1644 if (tfs->resc.rx.sram_entry[index].stride > 0)
1645 end = tfs->resc.rx.sram_entry[index].start +
1646 tfs->resc.rx.sram_entry[index].stride - 1;
1648 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1649 TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_RX,
1651 TF_RSVD_SRAM_NAT_S_IPV4_RX,
1652 tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX);
1654 /* nat source ipv4 port tx direction */
1655 if (tfs->resc.tx.sram_entry[index].stride > 0)
1656 end = tfs->resc.tx.sram_entry[index].start +
1657 tfs->resc.tx.sram_entry[index].stride - 1;
1659 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1660 TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_TX,
1662 TF_RSVD_SRAM_NAT_S_IPV4_TX,
1663 tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX);
1665 index = TF_RESC_TYPE_SRAM_NAT_D_IPV4;
1667 /* nat destination port ipv4 rx direction */
1668 if (tfs->resc.rx.sram_entry[index].stride > 0)
1669 end = tfs->resc.rx.sram_entry[index].start +
1670 tfs->resc.rx.sram_entry[index].stride - 1;
1672 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
1673 TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_RX,
1675 TF_RSVD_SRAM_NAT_D_IPV4_RX,
1676 tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX);
1678 /* nat destination ipv4 port tx direction */
1679 if (tfs->resc.tx.sram_entry[index].stride > 0)
1680 end = tfs->resc.tx.sram_entry[index].start +
1681 tfs->resc.tx.sram_entry[index].stride - 1;
1683 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
1684 TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_TX,
1686 TF_RSVD_SRAM_NAT_D_IPV4_TX,
1687 tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX);
1691 * Internal function used to validate the HW allocated resources
1692 * against the requested values.
1695 tf_rm_hw_alloc_validate(enum tf_dir dir,
1696 struct tf_rm_hw_alloc *hw_alloc,
1697 struct tf_rm_entry *hw_entry)
1702 for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
1703 if (hw_entry[i].stride != hw_alloc->hw_num[i]) {
1705 "%s, Alloc failed id:%d expect:%d got:%d\n",
1708 hw_alloc->hw_num[i],
1709 hw_entry[i].stride);
1718 * Internal function used to validate the SRAM allocated resources
1719 * against the requested values.
1722 tf_rm_sram_alloc_validate(enum tf_dir dir __rte_unused,
1723 struct tf_rm_sram_alloc *sram_alloc,
1724 struct tf_rm_entry *sram_entry)
1729 for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
1730 if (sram_entry[i].stride != sram_alloc->sram_num[i]) {
1732 "%s, Alloc failed idx:%d expect:%d got:%d\n",
1735 sram_alloc->sram_num[i],
1736 sram_entry[i].stride);
1745 * Internal function used to mark all the HW resources allocated that
1746 * Truflow does not own.
1749 tf_rm_reserve_hw(struct tf *tfp)
1751 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1754 * There is no direct AFM resource allocation as it is carved
1755 * statically at AFM boot time. Thus the bit allocators work
1756 * on the full HW resource amount and we just mark everything
1757 * used except the resources that Truflow took ownership off.
1759 tf_rm_rsvd_l2_ctxt(tfs);
1760 tf_rm_rsvd_prof(tfs);
1761 tf_rm_rsvd_em_prof(tfs);
1763 tf_rm_rsvd_mirror(tfs);
1764 tf_rm_rsvd_meter(tfs);
1765 tf_rm_rsvd_upar(tfs);
1766 tf_rm_rsvd_sp_tcam(tfs);
1767 tf_rm_rsvd_l2_func(tfs);
1768 tf_rm_rsvd_fkb(tfs);
1769 tf_rm_rsvd_tbl_scope(tfs);
1770 tf_rm_rsvd_epoch(tfs);
1771 tf_rm_rsvd_metadata(tfs);
1772 tf_rm_rsvd_ct_state(tfs);
1773 tf_rm_rsvd_range(tfs);
1774 tf_rm_rsvd_lag_entry(tfs);
1778 * Internal function used to mark all the SRAM resources allocated
1779 * that Truflow does not own.
1782 tf_rm_reserve_sram(struct tf *tfp)
1784 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1787 * There is no direct AFM resource allocation as it is carved
1788 * statically at AFM boot time. Thus the bit allocators work
1789 * on the full HW resource amount and we just mark everything
1790 * used except the resources that Truflow took ownership off.
1792 tf_rm_rsvd_sram_full_action(tfs);
1793 tf_rm_rsvd_sram_mcg(tfs);
1794 tf_rm_rsvd_sram_encap(tfs);
1795 tf_rm_rsvd_sram_sp(tfs);
1796 tf_rm_rsvd_sram_stats(tfs);
1797 tf_rm_rsvd_sram_nat(tfs);
1801 * Internal function used to allocate and validate all HW resources.
1804 tf_rm_allocate_validate_hw(struct tf *tfp,
1809 struct tf_rm_hw_query hw_query;
1810 struct tf_rm_hw_alloc hw_alloc;
1811 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1812 struct tf_rm_entry *hw_entries;
1813 uint32_t error_flag;
1815 if (dir == TF_DIR_RX)
1816 hw_entries = tfs->resc.rx.hw_entry;
1818 hw_entries = tfs->resc.tx.hw_entry;
1820 /* Query for Session HW Resources */
1821 rc = tf_msg_session_hw_resc_qcaps(tfp, dir, &hw_query);
1825 "%s, HW qcaps message send failed, rc:%s\n",
1831 rc = tf_rm_check_hw_qcaps_static(&hw_query, dir, &error_flag);
1835 "%s, HW QCAPS validation failed,"
1836 "error_flag:0x%x, rc:%s\n",
1840 tf_rm_print_hw_qcaps_error(dir, &hw_query, &error_flag);
1844 /* Post process HW capability */
1845 for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++)
1846 hw_alloc.hw_num[i] = hw_query.hw_query[i].max;
1848 /* Allocate Session HW Resources */
1849 rc = tf_msg_session_hw_resc_alloc(tfp, dir, &hw_alloc, hw_entries);
1853 "%s, HW alloc message send failed, rc:%s\n",
1859 /* Perform HW allocation validation as its possible the
1860 * resource availability changed between qcaps and alloc
1862 rc = tf_rm_hw_alloc_validate(dir, &hw_alloc, hw_entries);
1866 "%s, HW Resource validation failed, rc:%s\n",
1880 * Internal function used to allocate and validate all SRAM resources.
1883 * Pointer to TF handle
1886 * Receive or transmit direction
1890 * -1 - Internal error
1893 tf_rm_allocate_validate_sram(struct tf *tfp,
1898 struct tf_rm_sram_query sram_query;
1899 struct tf_rm_sram_alloc sram_alloc;
1900 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1901 struct tf_rm_entry *sram_entries;
1902 uint32_t error_flag;
1904 if (dir == TF_DIR_RX)
1905 sram_entries = tfs->resc.rx.sram_entry;
1907 sram_entries = tfs->resc.tx.sram_entry;
1909 /* Query for Session SRAM Resources */
1910 rc = tf_msg_session_sram_resc_qcaps(tfp, dir, &sram_query);
1914 "%s, SRAM qcaps message send failed, rc:%s\n",
1920 rc = tf_rm_check_sram_qcaps_static(&sram_query, dir, &error_flag);
1924 "%s, SRAM QCAPS validation failed,"
1925 "error_flag:%x, rc:%s\n",
1929 tf_rm_print_sram_qcaps_error(dir, &sram_query, &error_flag);
1933 /* Post process SRAM capability */
1934 for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++)
1935 sram_alloc.sram_num[i] = sram_query.sram_query[i].max;
1937 /* Allocate Session SRAM Resources */
1938 rc = tf_msg_session_sram_resc_alloc(tfp,
1945 "%s, SRAM alloc message send failed, rc:%s\n",
1951 /* Perform SRAM allocation validation as its possible the
1952 * resource availability changed between qcaps and alloc
1954 rc = tf_rm_sram_alloc_validate(dir, &sram_alloc, sram_entries);
1958 "%s, SRAM Resource allocation validation failed,"
1973 * Helper function used to prune a HW resource array to only hold
1974 * elements that needs to be flushed.
1980 * Receive or transmit direction
1983 * Master HW Resource database
1985 * [in/out] flush_entries
1986 * Pruned HW Resource database of entries to be flushed. This
1987 * array should be passed in as a complete copy of the master HW
1988 * Resource database. The outgoing result will be a pruned version
1989 * based on the result of the requested checking
1992 * 0 - Success, no flush required
1993 * 1 - Success, flush required
1994 * -1 - Internal error
1997 tf_rm_hw_to_flush(struct tf_session *tfs,
1999 struct tf_rm_entry *hw_entries,
2000 struct tf_rm_entry *flush_entries)
2005 struct bitalloc *pool;
2007 /* Check all the hw resource pools and check for left over
2008 * elements. Any found will result in the complete pool of a
2009 * type to get invalidated.
2012 TF_RM_GET_POOLS(tfs, dir, &pool,
2013 TF_L2_CTXT_TCAM_POOL_NAME,
2017 free_cnt = ba_free_count(pool);
2018 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride) {
2019 flush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].start = 0;
2020 flush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride = 0;
2025 TF_RM_GET_POOLS(tfs, dir, &pool,
2026 TF_PROF_FUNC_POOL_NAME,
2030 free_cnt = ba_free_count(pool);
2031 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride) {
2032 flush_entries[TF_RESC_TYPE_HW_PROF_FUNC].start = 0;
2033 flush_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride = 0;
2038 TF_RM_GET_POOLS(tfs, dir, &pool,
2039 TF_PROF_TCAM_POOL_NAME,
2043 free_cnt = ba_free_count(pool);
2044 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride) {
2045 flush_entries[TF_RESC_TYPE_HW_PROF_TCAM].start = 0;
2046 flush_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride = 0;
2051 TF_RM_GET_POOLS(tfs, dir, &pool,
2052 TF_EM_PROF_ID_POOL_NAME,
2056 free_cnt = ba_free_count(pool);
2057 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride) {
2058 flush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].start = 0;
2059 flush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride = 0;
2064 flush_entries[TF_RESC_TYPE_HW_EM_REC].start = 0;
2065 flush_entries[TF_RESC_TYPE_HW_EM_REC].stride = 0;
2067 TF_RM_GET_POOLS(tfs, dir, &pool,
2068 TF_WC_TCAM_PROF_ID_POOL_NAME,
2072 free_cnt = ba_free_count(pool);
2073 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride) {
2074 flush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].start = 0;
2075 flush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride = 0;
2080 TF_RM_GET_POOLS(tfs, dir, &pool,
2081 TF_WC_TCAM_POOL_NAME,
2085 free_cnt = ba_free_count(pool);
2086 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM].stride) {
2087 flush_entries[TF_RESC_TYPE_HW_WC_TCAM].start = 0;
2088 flush_entries[TF_RESC_TYPE_HW_WC_TCAM].stride = 0;
2093 TF_RM_GET_POOLS(tfs, dir, &pool,
2094 TF_METER_PROF_POOL_NAME,
2098 free_cnt = ba_free_count(pool);
2099 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_PROF].stride) {
2100 flush_entries[TF_RESC_TYPE_HW_METER_PROF].start = 0;
2101 flush_entries[TF_RESC_TYPE_HW_METER_PROF].stride = 0;
2106 TF_RM_GET_POOLS(tfs, dir, &pool,
2107 TF_METER_INST_POOL_NAME,
2111 free_cnt = ba_free_count(pool);
2112 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_INST].stride) {
2113 flush_entries[TF_RESC_TYPE_HW_METER_INST].start = 0;
2114 flush_entries[TF_RESC_TYPE_HW_METER_INST].stride = 0;
2119 TF_RM_GET_POOLS(tfs, dir, &pool,
2120 TF_MIRROR_POOL_NAME,
2124 free_cnt = ba_free_count(pool);
2125 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_MIRROR].stride) {
2126 flush_entries[TF_RESC_TYPE_HW_MIRROR].start = 0;
2127 flush_entries[TF_RESC_TYPE_HW_MIRROR].stride = 0;
2132 TF_RM_GET_POOLS(tfs, dir, &pool,
2137 free_cnt = ba_free_count(pool);
2138 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_UPAR].stride) {
2139 flush_entries[TF_RESC_TYPE_HW_UPAR].start = 0;
2140 flush_entries[TF_RESC_TYPE_HW_UPAR].stride = 0;
2145 TF_RM_GET_POOLS(tfs, dir, &pool,
2146 TF_SP_TCAM_POOL_NAME,
2150 free_cnt = ba_free_count(pool);
2151 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_SP_TCAM].stride) {
2152 flush_entries[TF_RESC_TYPE_HW_SP_TCAM].start = 0;
2153 flush_entries[TF_RESC_TYPE_HW_SP_TCAM].stride = 0;
2158 TF_RM_GET_POOLS(tfs, dir, &pool,
2159 TF_L2_FUNC_POOL_NAME,
2163 free_cnt = ba_free_count(pool);
2164 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_FUNC].stride) {
2165 flush_entries[TF_RESC_TYPE_HW_L2_FUNC].start = 0;
2166 flush_entries[TF_RESC_TYPE_HW_L2_FUNC].stride = 0;
2171 TF_RM_GET_POOLS(tfs, dir, &pool,
2176 free_cnt = ba_free_count(pool);
2177 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_FKB].stride) {
2178 flush_entries[TF_RESC_TYPE_HW_FKB].start = 0;
2179 flush_entries[TF_RESC_TYPE_HW_FKB].stride = 0;
2184 TF_RM_GET_POOLS(tfs, dir, &pool,
2185 TF_TBL_SCOPE_POOL_NAME,
2189 free_cnt = ba_free_count(pool);
2190 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride) {
2191 flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].start = 0;
2192 flush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride = 0;
2194 TFP_DRV_LOG(ERR, "%s, TBL_SCOPE free_cnt:%d, entries:%d\n",
2197 hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride);
2201 TF_RM_GET_POOLS(tfs, dir, &pool,
2202 TF_EPOCH0_POOL_NAME,
2206 free_cnt = ba_free_count(pool);
2207 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH0].stride) {
2208 flush_entries[TF_RESC_TYPE_HW_EPOCH0].start = 0;
2209 flush_entries[TF_RESC_TYPE_HW_EPOCH0].stride = 0;
2214 TF_RM_GET_POOLS(tfs, dir, &pool,
2215 TF_EPOCH1_POOL_NAME,
2219 free_cnt = ba_free_count(pool);
2220 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH1].stride) {
2221 flush_entries[TF_RESC_TYPE_HW_EPOCH1].start = 0;
2222 flush_entries[TF_RESC_TYPE_HW_EPOCH1].stride = 0;
2227 TF_RM_GET_POOLS(tfs, dir, &pool,
2228 TF_METADATA_POOL_NAME,
2232 free_cnt = ba_free_count(pool);
2233 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_METADATA].stride) {
2234 flush_entries[TF_RESC_TYPE_HW_METADATA].start = 0;
2235 flush_entries[TF_RESC_TYPE_HW_METADATA].stride = 0;
2240 TF_RM_GET_POOLS(tfs, dir, &pool,
2241 TF_CT_STATE_POOL_NAME,
2245 free_cnt = ba_free_count(pool);
2246 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_CT_STATE].stride) {
2247 flush_entries[TF_RESC_TYPE_HW_CT_STATE].start = 0;
2248 flush_entries[TF_RESC_TYPE_HW_CT_STATE].stride = 0;
2253 TF_RM_GET_POOLS(tfs, dir, &pool,
2254 TF_RANGE_PROF_POOL_NAME,
2258 free_cnt = ba_free_count(pool);
2259 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride) {
2260 flush_entries[TF_RESC_TYPE_HW_RANGE_PROF].start = 0;
2261 flush_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride = 0;
2266 TF_RM_GET_POOLS(tfs, dir, &pool,
2267 TF_RANGE_ENTRY_POOL_NAME,
2271 free_cnt = ba_free_count(pool);
2272 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride) {
2273 flush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].start = 0;
2274 flush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride = 0;
2279 TF_RM_GET_POOLS(tfs, dir, &pool,
2280 TF_LAG_ENTRY_POOL_NAME,
2284 free_cnt = ba_free_count(pool);
2285 if (free_cnt == hw_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride) {
2286 flush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].start = 0;
2287 flush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride = 0;
2296 * Helper function used to prune a SRAM resource array to only hold
2297 * elements that needs to be flushed.
2303 * Receive or transmit direction
2306 * Master SRAM Resource data base
2308 * [in/out] flush_entries
2309 * Pruned SRAM Resource database of entries to be flushed. This
2310 * array should be passed in as a complete copy of the master SRAM
2311 * Resource database. The outgoing result will be a pruned version
2312 * based on the result of the requested checking
2315 * 0 - Success, no flush required
2316 * 1 - Success, flush required
2317 * -1 - Internal error
2320 tf_rm_sram_to_flush(struct tf_session *tfs,
2322 struct tf_rm_entry *sram_entries,
2323 struct tf_rm_entry *flush_entries)
2328 struct bitalloc *pool;
2330 /* Check all the sram resource pools and check for left over
2331 * elements. Any found will result in the complete pool of a
2332 * type to get invalidated.
2335 TF_RM_GET_POOLS(tfs, dir, &pool,
2336 TF_SRAM_FULL_ACTION_POOL_NAME,
2340 free_cnt = ba_free_count(pool);
2341 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride) {
2342 flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].start = 0;
2343 flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride = 0;
2348 /* Only pools for RX direction */
2349 if (dir == TF_DIR_RX) {
2350 TF_RM_GET_POOLS_RX(tfs, &pool,
2351 TF_SRAM_MCG_POOL_NAME);
2354 free_cnt = ba_free_count(pool);
2355 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_MCG].stride) {
2356 flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0;
2357 flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0;
2362 /* Always prune TX direction */
2363 flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0;
2364 flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0;
2367 TF_RM_GET_POOLS(tfs, dir, &pool,
2368 TF_SRAM_ENCAP_8B_POOL_NAME,
2372 free_cnt = ba_free_count(pool);
2373 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride) {
2374 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].start = 0;
2375 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride = 0;
2380 TF_RM_GET_POOLS(tfs, dir, &pool,
2381 TF_SRAM_ENCAP_16B_POOL_NAME,
2385 free_cnt = ba_free_count(pool);
2386 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride) {
2387 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].start = 0;
2388 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride = 0;
2393 /* Only pools for TX direction */
2394 if (dir == TF_DIR_TX) {
2395 TF_RM_GET_POOLS_TX(tfs, &pool,
2396 TF_SRAM_ENCAP_64B_POOL_NAME);
2399 free_cnt = ba_free_count(pool);
2401 sram_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride) {
2402 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0;
2403 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0;
2408 /* Always prune RX direction */
2409 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0;
2410 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0;
2413 TF_RM_GET_POOLS(tfs, dir, &pool,
2414 TF_SRAM_SP_SMAC_POOL_NAME,
2418 free_cnt = ba_free_count(pool);
2419 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride) {
2420 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].start = 0;
2421 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride = 0;
2426 /* Only pools for TX direction */
2427 if (dir == TF_DIR_TX) {
2428 TF_RM_GET_POOLS_TX(tfs, &pool,
2429 TF_SRAM_SP_SMAC_IPV4_POOL_NAME);
2432 free_cnt = ba_free_count(pool);
2434 sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride) {
2435 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0;
2436 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride =
2442 /* Always prune RX direction */
2443 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0;
2444 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride = 0;
2447 /* Only pools for TX direction */
2448 if (dir == TF_DIR_TX) {
2449 TF_RM_GET_POOLS_TX(tfs, &pool,
2450 TF_SRAM_SP_SMAC_IPV6_POOL_NAME);
2453 free_cnt = ba_free_count(pool);
2455 sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride) {
2456 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0;
2457 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride =
2463 /* Always prune RX direction */
2464 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0;
2465 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride = 0;
2468 TF_RM_GET_POOLS(tfs, dir, &pool,
2469 TF_SRAM_STATS_64B_POOL_NAME,
2473 free_cnt = ba_free_count(pool);
2474 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride) {
2475 flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].start = 0;
2476 flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride = 0;
2481 TF_RM_GET_POOLS(tfs, dir, &pool,
2482 TF_SRAM_NAT_SPORT_POOL_NAME,
2486 free_cnt = ba_free_count(pool);
2487 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride) {
2488 flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].start = 0;
2489 flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride = 0;
2494 TF_RM_GET_POOLS(tfs, dir, &pool,
2495 TF_SRAM_NAT_DPORT_POOL_NAME,
2499 free_cnt = ba_free_count(pool);
2500 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride) {
2501 flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].start = 0;
2502 flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride = 0;
2507 TF_RM_GET_POOLS(tfs, dir, &pool,
2508 TF_SRAM_NAT_S_IPV4_POOL_NAME,
2512 free_cnt = ba_free_count(pool);
2513 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride) {
2514 flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].start = 0;
2515 flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride = 0;
2520 TF_RM_GET_POOLS(tfs, dir, &pool,
2521 TF_SRAM_NAT_D_IPV4_POOL_NAME,
2525 free_cnt = ba_free_count(pool);
2526 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride) {
2527 flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].start = 0;
2528 flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride = 0;
2537 * Helper function used to generate an error log for the HW types that
2538 * needs to be flushed. The types should have been cleaned up ahead of
2539 * invoking tf_close_session.
2542 * HW Resource database holding elements to be flushed
2545 tf_rm_log_hw_flush(enum tf_dir dir,
2546 struct tf_rm_entry *hw_entries)
2550 /* Walk the hw flush array and log the types that wasn't
2553 for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
2554 if (hw_entries[i].stride != 0)
2556 "%s, %s was not cleaned up\n",
2558 tf_hcapi_hw_2_str(i));
2563 * Helper function used to generate an error log for the SRAM types
2564 * that needs to be flushed. The types should have been cleaned up
2565 * ahead of invoking tf_close_session.
2568 * SRAM Resource database holding elements to be flushed
2571 tf_rm_log_sram_flush(enum tf_dir dir,
2572 struct tf_rm_entry *sram_entries)
2576 /* Walk the sram flush array and log the types that wasn't
2579 for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
2580 if (sram_entries[i].stride != 0)
2582 "%s, %s was not cleaned up\n",
2584 tf_hcapi_sram_2_str(i));
2589 tf_rm_init(struct tf *tfp __rte_unused)
2591 struct tf_session *tfs =
2592 (struct tf_session *)(tfp->session->core_data);
2594 /* This version is host specific and should be checked against
2595 * when attaching as there is no guarantee that a secondary
2596 * would run from same image version.
2598 tfs->ver.major = TF_SESSION_VER_MAJOR;
2599 tfs->ver.minor = TF_SESSION_VER_MINOR;
2600 tfs->ver.update = TF_SESSION_VER_UPDATE;
2602 tfs->session_id.id = 0;
2605 /* Initialization of Table Scopes */
2606 /* ll_init(&tfs->tbl_scope_ll); */
2608 /* Initialization of HW and SRAM resource DB */
2609 memset(&tfs->resc, 0, sizeof(struct tf_rm_db));
2611 /* Initialization of HW Resource Pools */
2612 ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
2613 ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
2614 ba_init(tfs->TF_PROF_FUNC_POOL_NAME_RX, TF_NUM_PROF_FUNC);
2615 ba_init(tfs->TF_PROF_FUNC_POOL_NAME_TX, TF_NUM_PROF_FUNC);
2616 ba_init(tfs->TF_PROF_TCAM_POOL_NAME_RX, TF_NUM_PROF_TCAM);
2617 ba_init(tfs->TF_PROF_TCAM_POOL_NAME_TX, TF_NUM_PROF_TCAM);
2618 ba_init(tfs->TF_EM_PROF_ID_POOL_NAME_RX, TF_NUM_EM_PROF_ID);
2619 ba_init(tfs->TF_EM_PROF_ID_POOL_NAME_TX, TF_NUM_EM_PROF_ID);
2621 /* TBD, how do we want to handle EM records ?*/
2622 /* EM Records should not be controlled by way of a pool */
2624 ba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX, TF_NUM_WC_PROF_ID);
2625 ba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX, TF_NUM_WC_PROF_ID);
2626 ba_init(tfs->TF_WC_TCAM_POOL_NAME_RX, TF_NUM_WC_TCAM_ROW);
2627 ba_init(tfs->TF_WC_TCAM_POOL_NAME_TX, TF_NUM_WC_TCAM_ROW);
2628 ba_init(tfs->TF_METER_PROF_POOL_NAME_RX, TF_NUM_METER_PROF);
2629 ba_init(tfs->TF_METER_PROF_POOL_NAME_TX, TF_NUM_METER_PROF);
2630 ba_init(tfs->TF_METER_INST_POOL_NAME_RX, TF_NUM_METER);
2631 ba_init(tfs->TF_METER_INST_POOL_NAME_TX, TF_NUM_METER);
2632 ba_init(tfs->TF_MIRROR_POOL_NAME_RX, TF_NUM_MIRROR);
2633 ba_init(tfs->TF_MIRROR_POOL_NAME_TX, TF_NUM_MIRROR);
2634 ba_init(tfs->TF_UPAR_POOL_NAME_RX, TF_NUM_UPAR);
2635 ba_init(tfs->TF_UPAR_POOL_NAME_TX, TF_NUM_UPAR);
2637 ba_init(tfs->TF_SP_TCAM_POOL_NAME_RX, TF_NUM_SP_TCAM);
2638 ba_init(tfs->TF_SP_TCAM_POOL_NAME_TX, TF_NUM_SP_TCAM);
2640 ba_init(tfs->TF_FKB_POOL_NAME_RX, TF_NUM_FKB);
2641 ba_init(tfs->TF_FKB_POOL_NAME_TX, TF_NUM_FKB);
2643 ba_init(tfs->TF_TBL_SCOPE_POOL_NAME_RX, TF_NUM_TBL_SCOPE);
2644 ba_init(tfs->TF_TBL_SCOPE_POOL_NAME_TX, TF_NUM_TBL_SCOPE);
2645 ba_init(tfs->TF_L2_FUNC_POOL_NAME_RX, TF_NUM_L2_FUNC);
2646 ba_init(tfs->TF_L2_FUNC_POOL_NAME_TX, TF_NUM_L2_FUNC);
2647 ba_init(tfs->TF_EPOCH0_POOL_NAME_RX, TF_NUM_EPOCH0);
2648 ba_init(tfs->TF_EPOCH0_POOL_NAME_TX, TF_NUM_EPOCH0);
2649 ba_init(tfs->TF_EPOCH1_POOL_NAME_RX, TF_NUM_EPOCH1);
2650 ba_init(tfs->TF_EPOCH1_POOL_NAME_TX, TF_NUM_EPOCH1);
2651 ba_init(tfs->TF_METADATA_POOL_NAME_RX, TF_NUM_METADATA);
2652 ba_init(tfs->TF_METADATA_POOL_NAME_TX, TF_NUM_METADATA);
2653 ba_init(tfs->TF_CT_STATE_POOL_NAME_RX, TF_NUM_CT_STATE);
2654 ba_init(tfs->TF_CT_STATE_POOL_NAME_TX, TF_NUM_CT_STATE);
2655 ba_init(tfs->TF_RANGE_PROF_POOL_NAME_RX, TF_NUM_RANGE_PROF);
2656 ba_init(tfs->TF_RANGE_PROF_POOL_NAME_TX, TF_NUM_RANGE_PROF);
2657 ba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_RX, TF_NUM_RANGE_ENTRY);
2658 ba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_TX, TF_NUM_RANGE_ENTRY);
2659 ba_init(tfs->TF_LAG_ENTRY_POOL_NAME_RX, TF_NUM_LAG_ENTRY);
2660 ba_init(tfs->TF_LAG_ENTRY_POOL_NAME_TX, TF_NUM_LAG_ENTRY);
2662 /* Initialization of SRAM Resource Pools
2663 * These pools are set to the TFLIB defined MAX sizes not
2664 * AFM's HW max as to limit the memory consumption
2666 ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX,
2667 TF_RSVD_SRAM_FULL_ACTION_RX);
2668 ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX,
2669 TF_RSVD_SRAM_FULL_ACTION_TX);
2670 /* Only Multicast Group on RX is supported */
2671 ba_init(tfs->TF_SRAM_MCG_POOL_NAME_RX,
2672 TF_RSVD_SRAM_MCG_RX);
2673 ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX,
2674 TF_RSVD_SRAM_ENCAP_8B_RX);
2675 ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX,
2676 TF_RSVD_SRAM_ENCAP_8B_TX);
2677 ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX,
2678 TF_RSVD_SRAM_ENCAP_16B_RX);
2679 ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX,
2680 TF_RSVD_SRAM_ENCAP_16B_TX);
2681 /* Only Encap 64B on TX is supported */
2682 ba_init(tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX,
2683 TF_RSVD_SRAM_ENCAP_64B_TX);
2684 ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX,
2685 TF_RSVD_SRAM_SP_SMAC_RX);
2686 ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX,
2687 TF_RSVD_SRAM_SP_SMAC_TX);
2688 /* Only SP SMAC IPv4 on TX is supported */
2689 ba_init(tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX,
2690 TF_RSVD_SRAM_SP_SMAC_IPV4_TX);
2691 /* Only SP SMAC IPv6 on TX is supported */
2692 ba_init(tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX,
2693 TF_RSVD_SRAM_SP_SMAC_IPV6_TX);
2694 ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_RX,
2695 TF_RSVD_SRAM_COUNTER_64B_RX);
2696 ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_TX,
2697 TF_RSVD_SRAM_COUNTER_64B_TX);
2698 ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX,
2699 TF_RSVD_SRAM_NAT_SPORT_RX);
2700 ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX,
2701 TF_RSVD_SRAM_NAT_SPORT_TX);
2702 ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX,
2703 TF_RSVD_SRAM_NAT_DPORT_RX);
2704 ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX,
2705 TF_RSVD_SRAM_NAT_DPORT_TX);
2706 ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX,
2707 TF_RSVD_SRAM_NAT_S_IPV4_RX);
2708 ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX,
2709 TF_RSVD_SRAM_NAT_S_IPV4_TX);
2710 ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX,
2711 TF_RSVD_SRAM_NAT_D_IPV4_RX);
2712 ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX,
2713 TF_RSVD_SRAM_NAT_D_IPV4_TX);
2715 /* Initialization of pools local to TF Core */
2716 ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
2717 ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
2721 tf_rm_allocate_validate(struct tf *tfp)
2726 for (i = 0; i < TF_DIR_MAX; i++) {
2727 rc = tf_rm_allocate_validate_hw(tfp, i);
2730 rc = tf_rm_allocate_validate_sram(tfp, i);
2735 /* With both HW and SRAM allocated and validated we can
2736 * 'scrub' the reservation on the pools.
2738 tf_rm_reserve_hw(tfp);
2739 tf_rm_reserve_sram(tfp);
2745 tf_rm_close(struct tf *tfp)
2750 struct tf_rm_entry *hw_entries;
2751 struct tf_rm_entry *hw_flush_entries;
2752 struct tf_rm_entry *sram_entries;
2753 struct tf_rm_entry *sram_flush_entries;
2754 struct tf_session *tfs __rte_unused =
2755 (struct tf_session *)(tfp->session->core_data);
2757 struct tf_rm_db flush_resc = tfs->resc;
2759 /* On close it is assumed that the session has already cleaned
2760 * up all its resources, individually, while destroying its
2761 * flows. No checking is performed thus the behavior is as
2764 * Session RM will signal FW to release session resources. FW
2765 * will perform invalidation of all the allocated entries
2766 * (assures any outstanding resources has been cleared, then
2767 * free the FW RM instance.
2769 * Session will then be freed by tf_close_session() thus there
2770 * is no need to clean each resource pool as the whole session
2774 for (i = 0; i < TF_DIR_MAX; i++) {
2775 if (i == TF_DIR_RX) {
2776 hw_entries = tfs->resc.rx.hw_entry;
2777 hw_flush_entries = flush_resc.rx.hw_entry;
2778 sram_entries = tfs->resc.rx.sram_entry;
2779 sram_flush_entries = flush_resc.rx.sram_entry;
2781 hw_entries = tfs->resc.tx.hw_entry;
2782 hw_flush_entries = flush_resc.tx.hw_entry;
2783 sram_entries = tfs->resc.tx.sram_entry;
2784 sram_flush_entries = flush_resc.tx.sram_entry;
2787 /* Check for any not previously freed HW resources and
2788 * flush if required.
2790 rc = tf_rm_hw_to_flush(tfs, i, hw_entries, hw_flush_entries);
2792 rc_close = -ENOTEMPTY;
2795 "%s, lingering HW resources, rc:%s\n",
2799 /* Log the entries to be flushed */
2800 tf_rm_log_hw_flush(i, hw_flush_entries);
2801 rc = tf_msg_session_hw_resc_flush(tfp,
2808 "%s, HW flush failed, rc:%s\n",
2814 /* Check for any not previously freed SRAM resources
2815 * and flush if required.
2817 rc = tf_rm_sram_to_flush(tfs,
2820 sram_flush_entries);
2822 rc_close = -ENOTEMPTY;
2825 "%s, lingering SRAM resources, rc:%s\n",
2829 /* Log the entries to be flushed */
2830 tf_rm_log_sram_flush(i, sram_flush_entries);
2832 rc = tf_msg_session_sram_resc_flush(tfp,
2834 sram_flush_entries);
2839 "%s, HW flush failed, rc:%s\n",
2845 rc = tf_msg_session_hw_resc_free(tfp, i, hw_entries);
2850 "%s, HW free failed, rc:%s\n",
2855 rc = tf_msg_session_sram_resc_free(tfp, i, sram_entries);
2860 "%s, SRAM free failed, rc:%s\n",
2869 #if (TF_SHADOW == 1)
2871 tf_rm_shadow_db_init(struct tf_session *tfs)
2877 #endif /* TF_SHADOW */
2880 tf_rm_lookup_tcam_type_pool(struct tf_session *tfs,
2882 enum tf_tcam_tbl_type type,
2883 struct bitalloc **pool)
2885 int rc = -EOPNOTSUPP;
2890 case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:
2891 TF_RM_GET_POOLS(tfs, dir, pool,
2892 TF_L2_CTXT_TCAM_POOL_NAME,
2895 case TF_TCAM_TBL_TYPE_PROF_TCAM:
2896 TF_RM_GET_POOLS(tfs, dir, pool,
2897 TF_PROF_TCAM_POOL_NAME,
2900 case TF_TCAM_TBL_TYPE_WC_TCAM:
2901 TF_RM_GET_POOLS(tfs, dir, pool,
2902 TF_WC_TCAM_POOL_NAME,
2905 case TF_TCAM_TBL_TYPE_VEB_TCAM:
2906 case TF_TCAM_TBL_TYPE_SP_TCAM:
2907 case TF_TCAM_TBL_TYPE_CT_RULE_TCAM:
2912 if (rc == -EOPNOTSUPP) {
2914 "%s, Tcam type not supported, type:%d\n",
2918 } else if (rc == -1) {
2920 "%s, Tcam type lookup failed, type:%d\n",
2930 tf_rm_lookup_tbl_type_pool(struct tf_session *tfs,
2932 enum tf_tbl_type type,
2933 struct bitalloc **pool)
2935 int rc = -EOPNOTSUPP;
2940 case TF_TBL_TYPE_FULL_ACT_RECORD:
2941 TF_RM_GET_POOLS(tfs, dir, pool,
2942 TF_SRAM_FULL_ACTION_POOL_NAME,
2945 case TF_TBL_TYPE_MCAST_GROUPS:
2946 /* No pools for TX direction, so bail out */
2947 if (dir == TF_DIR_TX)
2949 TF_RM_GET_POOLS_RX(tfs, pool,
2950 TF_SRAM_MCG_POOL_NAME);
2953 case TF_TBL_TYPE_ACT_ENCAP_8B:
2954 TF_RM_GET_POOLS(tfs, dir, pool,
2955 TF_SRAM_ENCAP_8B_POOL_NAME,
2958 case TF_TBL_TYPE_ACT_ENCAP_16B:
2959 TF_RM_GET_POOLS(tfs, dir, pool,
2960 TF_SRAM_ENCAP_16B_POOL_NAME,
2963 case TF_TBL_TYPE_ACT_ENCAP_64B:
2964 /* No pools for RX direction, so bail out */
2965 if (dir == TF_DIR_RX)
2967 TF_RM_GET_POOLS_TX(tfs, pool,
2968 TF_SRAM_ENCAP_64B_POOL_NAME);
2971 case TF_TBL_TYPE_ACT_SP_SMAC:
2972 TF_RM_GET_POOLS(tfs, dir, pool,
2973 TF_SRAM_SP_SMAC_POOL_NAME,
2976 case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
2977 /* No pools for TX direction, so bail out */
2978 if (dir == TF_DIR_RX)
2980 TF_RM_GET_POOLS_TX(tfs, pool,
2981 TF_SRAM_SP_SMAC_IPV4_POOL_NAME);
2984 case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
2985 /* No pools for TX direction, so bail out */
2986 if (dir == TF_DIR_RX)
2988 TF_RM_GET_POOLS_TX(tfs, pool,
2989 TF_SRAM_SP_SMAC_IPV6_POOL_NAME);
2992 case TF_TBL_TYPE_ACT_STATS_64:
2993 TF_RM_GET_POOLS(tfs, dir, pool,
2994 TF_SRAM_STATS_64B_POOL_NAME,
2997 case TF_TBL_TYPE_ACT_MODIFY_SPORT:
2998 TF_RM_GET_POOLS(tfs, dir, pool,
2999 TF_SRAM_NAT_SPORT_POOL_NAME,
3002 case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
3003 TF_RM_GET_POOLS(tfs, dir, pool,
3004 TF_SRAM_NAT_S_IPV4_POOL_NAME,
3007 case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
3008 TF_RM_GET_POOLS(tfs, dir, pool,
3009 TF_SRAM_NAT_D_IPV4_POOL_NAME,
3012 case TF_TBL_TYPE_METER_PROF:
3013 TF_RM_GET_POOLS(tfs, dir, pool,
3014 TF_METER_PROF_POOL_NAME,
3017 case TF_TBL_TYPE_METER_INST:
3018 TF_RM_GET_POOLS(tfs, dir, pool,
3019 TF_METER_INST_POOL_NAME,
3022 case TF_TBL_TYPE_MIRROR_CONFIG:
3023 TF_RM_GET_POOLS(tfs, dir, pool,
3024 TF_MIRROR_POOL_NAME,
3027 case TF_TBL_TYPE_UPAR:
3028 TF_RM_GET_POOLS(tfs, dir, pool,
3032 case TF_TBL_TYPE_EPOCH0:
3033 TF_RM_GET_POOLS(tfs, dir, pool,
3034 TF_EPOCH0_POOL_NAME,
3037 case TF_TBL_TYPE_EPOCH1:
3038 TF_RM_GET_POOLS(tfs, dir, pool,
3039 TF_EPOCH1_POOL_NAME,
3042 case TF_TBL_TYPE_METADATA:
3043 TF_RM_GET_POOLS(tfs, dir, pool,
3044 TF_METADATA_POOL_NAME,
3047 case TF_TBL_TYPE_CT_STATE:
3048 TF_RM_GET_POOLS(tfs, dir, pool,
3049 TF_CT_STATE_POOL_NAME,
3052 case TF_TBL_TYPE_RANGE_PROF:
3053 TF_RM_GET_POOLS(tfs, dir, pool,
3054 TF_RANGE_PROF_POOL_NAME,
3057 case TF_TBL_TYPE_RANGE_ENTRY:
3058 TF_RM_GET_POOLS(tfs, dir, pool,
3059 TF_RANGE_ENTRY_POOL_NAME,
3062 case TF_TBL_TYPE_LAG:
3063 TF_RM_GET_POOLS(tfs, dir, pool,
3064 TF_LAG_ENTRY_POOL_NAME,
3067 /* Not yet supported */
3068 case TF_TBL_TYPE_ACT_ENCAP_32B:
3069 case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST:
3070 case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC:
3071 case TF_TBL_TYPE_VNIC_SVIF:
3073 /* No bitalloc pools for these types */
3074 case TF_TBL_TYPE_EXT:
3079 if (rc == -EOPNOTSUPP) {
3081 "%s, Table type not supported, type:%d\n",
3085 } else if (rc == -1) {
3087 "%s, Table type lookup failed, type:%d\n",
3097 tf_rm_convert_tbl_type(enum tf_tbl_type type,
3098 uint32_t *hcapi_type)
3103 case TF_TBL_TYPE_FULL_ACT_RECORD:
3104 *hcapi_type = TF_RESC_TYPE_SRAM_FULL_ACTION;
3106 case TF_TBL_TYPE_MCAST_GROUPS:
3107 *hcapi_type = TF_RESC_TYPE_SRAM_MCG;
3109 case TF_TBL_TYPE_ACT_ENCAP_8B:
3110 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_8B;
3112 case TF_TBL_TYPE_ACT_ENCAP_16B:
3113 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_16B;
3115 case TF_TBL_TYPE_ACT_ENCAP_64B:
3116 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_64B;
3118 case TF_TBL_TYPE_ACT_SP_SMAC:
3119 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC;
3121 case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
3122 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4;
3124 case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
3125 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6;
3127 case TF_TBL_TYPE_ACT_STATS_64:
3128 *hcapi_type = TF_RESC_TYPE_SRAM_COUNTER_64B;
3130 case TF_TBL_TYPE_ACT_MODIFY_SPORT:
3131 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_SPORT;
3133 case TF_TBL_TYPE_ACT_MODIFY_DPORT:
3134 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_DPORT;
3136 case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
3137 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_S_IPV4;
3139 case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
3140 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_D_IPV4;
3142 case TF_TBL_TYPE_METER_PROF:
3143 *hcapi_type = TF_RESC_TYPE_HW_METER_PROF;
3145 case TF_TBL_TYPE_METER_INST:
3146 *hcapi_type = TF_RESC_TYPE_HW_METER_INST;
3148 case TF_TBL_TYPE_MIRROR_CONFIG:
3149 *hcapi_type = TF_RESC_TYPE_HW_MIRROR;
3151 case TF_TBL_TYPE_UPAR:
3152 *hcapi_type = TF_RESC_TYPE_HW_UPAR;
3154 case TF_TBL_TYPE_EPOCH0:
3155 *hcapi_type = TF_RESC_TYPE_HW_EPOCH0;
3157 case TF_TBL_TYPE_EPOCH1:
3158 *hcapi_type = TF_RESC_TYPE_HW_EPOCH1;
3160 case TF_TBL_TYPE_METADATA:
3161 *hcapi_type = TF_RESC_TYPE_HW_METADATA;
3163 case TF_TBL_TYPE_CT_STATE:
3164 *hcapi_type = TF_RESC_TYPE_HW_CT_STATE;
3166 case TF_TBL_TYPE_RANGE_PROF:
3167 *hcapi_type = TF_RESC_TYPE_HW_RANGE_PROF;
3169 case TF_TBL_TYPE_RANGE_ENTRY:
3170 *hcapi_type = TF_RESC_TYPE_HW_RANGE_ENTRY;
3172 case TF_TBL_TYPE_LAG:
3173 *hcapi_type = TF_RESC_TYPE_HW_LAG_ENTRY;
3175 /* Not yet supported */
3176 case TF_TBL_TYPE_ACT_ENCAP_32B:
3177 case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST:
3178 case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC:
3179 case TF_TBL_TYPE_VNIC_SVIF:
3180 case TF_TBL_TYPE_EXT: /* No pools for this type */
3190 tf_rm_convert_index(struct tf_session *tfs,
3192 enum tf_tbl_type type,
3193 enum tf_rm_convert_type c_type,
3195 uint32_t *convert_index)
3198 struct tf_rm_resc *resc;
3199 uint32_t hcapi_type;
3200 uint32_t base_index;
3202 if (dir == TF_DIR_RX)
3203 resc = &tfs->resc.rx;
3204 else if (dir == TF_DIR_TX)
3205 resc = &tfs->resc.tx;
3209 rc = tf_rm_convert_tbl_type(type, &hcapi_type);
3214 case TF_TBL_TYPE_FULL_ACT_RECORD:
3215 case TF_TBL_TYPE_MCAST_GROUPS:
3216 case TF_TBL_TYPE_ACT_ENCAP_8B:
3217 case TF_TBL_TYPE_ACT_ENCAP_16B:
3218 case TF_TBL_TYPE_ACT_ENCAP_32B:
3219 case TF_TBL_TYPE_ACT_ENCAP_64B:
3220 case TF_TBL_TYPE_ACT_SP_SMAC:
3221 case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
3222 case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
3223 case TF_TBL_TYPE_ACT_STATS_64:
3224 case TF_TBL_TYPE_ACT_MODIFY_SPORT:
3225 case TF_TBL_TYPE_ACT_MODIFY_DPORT:
3226 case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
3227 case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
3228 base_index = resc->sram_entry[hcapi_type].start;
3230 case TF_TBL_TYPE_MIRROR_CONFIG:
3231 case TF_TBL_TYPE_METER_PROF:
3232 case TF_TBL_TYPE_METER_INST:
3233 case TF_TBL_TYPE_UPAR:
3234 case TF_TBL_TYPE_EPOCH0:
3235 case TF_TBL_TYPE_EPOCH1:
3236 case TF_TBL_TYPE_METADATA:
3237 case TF_TBL_TYPE_CT_STATE:
3238 case TF_TBL_TYPE_RANGE_PROF:
3239 case TF_TBL_TYPE_RANGE_ENTRY:
3240 case TF_TBL_TYPE_LAG:
3241 base_index = resc->hw_entry[hcapi_type].start;
3243 /* Not yet supported */
3244 case TF_TBL_TYPE_VNIC_SVIF:
3245 case TF_TBL_TYPE_EXT: /* No pools for this type */
3251 case TF_RM_CONVERT_RM_BASE:
3252 *convert_index = index - base_index;
3254 case TF_RM_CONVERT_ADD_BASE:
3255 *convert_index = index + base_index;