1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
8 #include <rte_common.h>
12 #include "tf_session.h"
13 #include "tf_resources.h"
18 * Internal macro to perform HW resource allocation check between what
19 * firmware reports vs what was statically requested.
22 * struct tf_rm_hw_query *hquery - Pointer to the hw query result
23 * enum tf_dir dir - Direction to process
24 * enum tf_resource_type_hw hcapi_type - HCAPI type, the index element
25 * in the hw query structure
26 * define def_value - Define value to check against
27 * uint32_t *eflag - Result of the check
29 #define TF_RM_CHECK_HW_ALLOC(hquery, dir, hcapi_type, def_value, eflag) do { \
30 if ((dir) == TF_DIR_RX) { \
31 if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _RX) \
32 *(eflag) |= 1 << (hcapi_type); \
34 if ((hquery)->hw_query[(hcapi_type)].max != def_value ## _TX) \
35 *(eflag) |= 1 << (hcapi_type); \
40 * Internal macro to perform HW resource allocation check between what
41 * firmware reports vs what was statically requested.
44 * struct tf_rm_sram_query *squery - Pointer to the sram query result
45 * enum tf_dir dir - Direction to process
46 * enum tf_resource_type_sram hcapi_type - HCAPI type, the index element
47 * in the hw query structure
48 * define def_value - Define value to check against
49 * uint32_t *eflag - Result of the check
51 #define TF_RM_CHECK_SRAM_ALLOC(squery, dir, hcapi_type, def_value, eflag) do { \
52 if ((dir) == TF_DIR_RX) { \
53 if ((squery)->sram_query[(hcapi_type)].max != def_value ## _RX)\
54 *(eflag) |= 1 << (hcapi_type); \
56 if ((squery)->sram_query[(hcapi_type)].max != def_value ## _TX)\
57 *(eflag) |= 1 << (hcapi_type); \
62 * Internal macro to convert a reserved resource define name to be
66 * enum tf_dir dir - Direction to process
67 * string type - Type name to append RX or TX to
68 * string dtype - Direction specific type
72 #define TF_RESC_RSVD(dir, type, dtype) do { \
73 if ((dir) == TF_DIR_RX) \
74 (dtype) = type ## _RX; \
76 (dtype) = type ## _TX; \
80 *tf_dir_2_str(enum tf_dir dir)
88 return "Invalid direction";
93 *tf_ident_2_str(enum tf_identifier_type id_type)
96 case TF_IDENT_TYPE_L2_CTXT:
97 return "l2_ctxt_remap";
98 case TF_IDENT_TYPE_PROF_FUNC:
100 case TF_IDENT_TYPE_WC_PROF:
102 case TF_IDENT_TYPE_EM_PROF:
104 case TF_IDENT_TYPE_L2_FUNC:
109 return "Invalid identifier";
113 *tf_hcapi_sram_2_str(enum tf_resource_type_sram sram_type)
116 case TF_RESC_TYPE_SRAM_FULL_ACTION:
117 return "Full action";
118 case TF_RESC_TYPE_SRAM_MCG:
120 case TF_RESC_TYPE_SRAM_ENCAP_8B:
122 case TF_RESC_TYPE_SRAM_ENCAP_16B:
124 case TF_RESC_TYPE_SRAM_ENCAP_64B:
126 case TF_RESC_TYPE_SRAM_SP_SMAC:
127 return "Source properties SMAC";
128 case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4:
129 return "Source properties SMAC IPv4";
130 case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6:
131 return "Source properties IPv6";
132 case TF_RESC_TYPE_SRAM_COUNTER_64B:
133 return "Counter 64B";
134 case TF_RESC_TYPE_SRAM_NAT_SPORT:
135 return "NAT source port";
136 case TF_RESC_TYPE_SRAM_NAT_DPORT:
137 return "NAT destination port";
138 case TF_RESC_TYPE_SRAM_NAT_S_IPV4:
139 return "NAT source IPv4";
140 case TF_RESC_TYPE_SRAM_NAT_D_IPV4:
141 return "NAT destination IPv4";
143 return "Invalid identifier";
148 * Helper function to perform a SRAM HCAPI resource type lookup
149 * against the reserved value of the same static type.
152 * -EOPNOTSUPP - Reserved resource type not supported
153 * Value - Integer value of the reserved value for the requested type
156 tf_rm_rsvd_sram_value(enum tf_dir dir, enum tf_resource_type_sram index)
158 uint32_t value = -EOPNOTSUPP;
161 case TF_RESC_TYPE_SRAM_FULL_ACTION:
162 TF_RESC_RSVD(dir, TF_RSVD_SRAM_FULL_ACTION, value);
164 case TF_RESC_TYPE_SRAM_MCG:
165 TF_RESC_RSVD(dir, TF_RSVD_SRAM_MCG, value);
167 case TF_RESC_TYPE_SRAM_ENCAP_8B:
168 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_8B, value);
170 case TF_RESC_TYPE_SRAM_ENCAP_16B:
171 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_16B, value);
173 case TF_RESC_TYPE_SRAM_ENCAP_64B:
174 TF_RESC_RSVD(dir, TF_RSVD_SRAM_ENCAP_64B, value);
176 case TF_RESC_TYPE_SRAM_SP_SMAC:
177 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC, value);
179 case TF_RESC_TYPE_SRAM_SP_SMAC_IPV4:
180 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV4, value);
182 case TF_RESC_TYPE_SRAM_SP_SMAC_IPV6:
183 TF_RESC_RSVD(dir, TF_RSVD_SRAM_SP_SMAC_IPV6, value);
185 case TF_RESC_TYPE_SRAM_COUNTER_64B:
186 TF_RESC_RSVD(dir, TF_RSVD_SRAM_COUNTER_64B, value);
188 case TF_RESC_TYPE_SRAM_NAT_SPORT:
189 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_SPORT, value);
191 case TF_RESC_TYPE_SRAM_NAT_DPORT:
192 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_DPORT, value);
194 case TF_RESC_TYPE_SRAM_NAT_S_IPV4:
195 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_S_IPV4, value);
197 case TF_RESC_TYPE_SRAM_NAT_D_IPV4:
198 TF_RESC_RSVD(dir, TF_RSVD_SRAM_NAT_D_IPV4, value);
208 * Helper function to print all the SRAM resource qcaps errors
209 * reported in the error_flag.
212 * Receive or transmit direction
215 * Pointer to the sram error flags created at time of the query check
218 tf_rm_print_sram_qcaps_error(enum tf_dir dir,
219 struct tf_rm_sram_query *sram_query,
220 uint32_t *error_flag)
224 PMD_DRV_LOG(ERR, "QCAPS errors SRAM\n");
225 PMD_DRV_LOG(ERR, " Direction: %s\n", tf_dir_2_str(dir));
226 PMD_DRV_LOG(ERR, " Elements:\n");
228 for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
229 if (*error_flag & 1 << i)
230 PMD_DRV_LOG(ERR, " %s, %d elem available, req:%d\n",
231 tf_hcapi_sram_2_str(i),
232 sram_query->sram_query[i].max,
233 tf_rm_rsvd_sram_value(dir, i));
238 * Performs a HW resource check between what firmware capability
239 * reports and what the core expects is available.
241 * Firmware performs the resource carving at AFM init time and the
242 * resource capability is reported in the TruFlow qcaps msg.
245 * Pointer to HW Query data structure. Query holds what the firmware
246 * offers of the HW resources.
249 * Receive or transmit direction
251 * [in/out] error_flag
252 * Pointer to a bit array indicating the error of a single HCAPI
253 * resource type. When a bit is set to 1, the HCAPI resource type
254 * failed static allocation.
258 * -ENOMEM - Failure on one of the allocated resources. Check the
259 * error_flag for what types are flagged errored.
262 tf_rm_check_hw_qcaps_static(struct tf_rm_hw_query *query,
264 uint32_t *error_flag)
267 TF_RM_CHECK_HW_ALLOC(query,
269 TF_RESC_TYPE_HW_RANGE_ENTRY,
273 if (*error_flag != 0)
280 * Performs a SRAM resource check between what firmware capability
281 * reports and what the core expects is available.
283 * Firmware performs the resource carving at AFM init time and the
284 * resource capability is reported in the TruFlow qcaps msg.
287 * Pointer to SRAM Query data structure. Query holds what the
288 * firmware offers of the SRAM resources.
291 * Receive or transmit direction
293 * [in/out] error_flag
294 * Pointer to a bit array indicating the error of a single HCAPI
295 * resource type. When a bit is set to 1, the HCAPI resource type
296 * failed static allocation.
300 * -ENOMEM - Failure on one of the allocated resources. Check the
301 * error_flag for what types are flagged errored.
304 tf_rm_check_sram_qcaps_static(struct tf_rm_sram_query *query,
306 uint32_t *error_flag)
310 TF_RM_CHECK_SRAM_ALLOC(query,
312 TF_RESC_TYPE_SRAM_FULL_ACTION,
313 TF_RSVD_SRAM_FULL_ACTION,
316 TF_RM_CHECK_SRAM_ALLOC(query,
318 TF_RESC_TYPE_SRAM_MCG,
322 TF_RM_CHECK_SRAM_ALLOC(query,
324 TF_RESC_TYPE_SRAM_ENCAP_8B,
325 TF_RSVD_SRAM_ENCAP_8B,
328 TF_RM_CHECK_SRAM_ALLOC(query,
330 TF_RESC_TYPE_SRAM_ENCAP_16B,
331 TF_RSVD_SRAM_ENCAP_16B,
334 TF_RM_CHECK_SRAM_ALLOC(query,
336 TF_RESC_TYPE_SRAM_ENCAP_64B,
337 TF_RSVD_SRAM_ENCAP_64B,
340 TF_RM_CHECK_SRAM_ALLOC(query,
342 TF_RESC_TYPE_SRAM_SP_SMAC,
343 TF_RSVD_SRAM_SP_SMAC,
346 TF_RM_CHECK_SRAM_ALLOC(query,
348 TF_RESC_TYPE_SRAM_SP_SMAC_IPV4,
349 TF_RSVD_SRAM_SP_SMAC_IPV4,
352 TF_RM_CHECK_SRAM_ALLOC(query,
354 TF_RESC_TYPE_SRAM_SP_SMAC_IPV6,
355 TF_RSVD_SRAM_SP_SMAC_IPV6,
358 TF_RM_CHECK_SRAM_ALLOC(query,
360 TF_RESC_TYPE_SRAM_COUNTER_64B,
361 TF_RSVD_SRAM_COUNTER_64B,
364 TF_RM_CHECK_SRAM_ALLOC(query,
366 TF_RESC_TYPE_SRAM_NAT_SPORT,
367 TF_RSVD_SRAM_NAT_SPORT,
370 TF_RM_CHECK_SRAM_ALLOC(query,
372 TF_RESC_TYPE_SRAM_NAT_DPORT,
373 TF_RSVD_SRAM_NAT_DPORT,
376 TF_RM_CHECK_SRAM_ALLOC(query,
378 TF_RESC_TYPE_SRAM_NAT_S_IPV4,
379 TF_RSVD_SRAM_NAT_S_IPV4,
382 TF_RM_CHECK_SRAM_ALLOC(query,
384 TF_RESC_TYPE_SRAM_NAT_D_IPV4,
385 TF_RSVD_SRAM_NAT_D_IPV4,
388 if (*error_flag != 0)
395 * Internal function to mark pool entries used.
398 tf_rm_reserve_range(uint32_t count,
402 struct bitalloc *pool)
406 /* If no resources has been requested we mark everything
410 for (i = 0; i < max; i++)
411 ba_alloc_index(pool, i);
413 /* Support 2 main modes
414 * Reserved range starts from bottom up (with
415 * pre-reserved value or not)
416 * - begin = 0 to end xx
417 * - begin = 1 to end xx
419 * Reserved range starts from top down
420 * - begin = yy to end max
423 /* Bottom up check, start from 0 */
424 if (rsv_begin == 0) {
425 for (i = rsv_end + 1; i < max; i++)
426 ba_alloc_index(pool, i);
429 /* Bottom up check, start from 1 or higher OR
432 if (rsv_begin >= 1) {
433 /* Allocate from 0 until start */
434 for (i = 0; i < rsv_begin; i++)
435 ba_alloc_index(pool, i);
437 /* Skip and then do the remaining */
438 if (rsv_end < max - 1) {
439 for (i = rsv_end; i < max; i++)
440 ba_alloc_index(pool, i);
447 * Internal function to mark all the l2 ctxt allocated that Truflow
451 tf_rm_rsvd_l2_ctxt(struct tf_session *tfs)
453 uint32_t index = TF_RESC_TYPE_HW_L2_CTXT_TCAM;
456 /* l2 ctxt rx direction */
457 if (tfs->resc.rx.hw_entry[index].stride > 0)
458 end = tfs->resc.rx.hw_entry[index].start +
459 tfs->resc.rx.hw_entry[index].stride - 1;
461 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
462 tfs->resc.rx.hw_entry[index].start,
465 tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX);
467 /* l2 ctxt tx direction */
468 if (tfs->resc.tx.hw_entry[index].stride > 0)
469 end = tfs->resc.tx.hw_entry[index].start +
470 tfs->resc.tx.hw_entry[index].stride - 1;
472 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
473 tfs->resc.tx.hw_entry[index].start,
476 tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX);
480 * Internal function to mark all the l2 func resources allocated that
481 * Truflow does not own.
484 tf_rm_rsvd_l2_func(struct tf_session *tfs)
486 uint32_t index = TF_RESC_TYPE_HW_L2_FUNC;
489 /* l2 func rx direction */
490 if (tfs->resc.rx.hw_entry[index].stride > 0)
491 end = tfs->resc.rx.hw_entry[index].start +
492 tfs->resc.rx.hw_entry[index].stride - 1;
494 tf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,
495 tfs->resc.rx.hw_entry[index].start,
498 tfs->TF_L2_FUNC_POOL_NAME_RX);
500 /* l2 func tx direction */
501 if (tfs->resc.tx.hw_entry[index].stride > 0)
502 end = tfs->resc.tx.hw_entry[index].start +
503 tfs->resc.tx.hw_entry[index].stride - 1;
505 tf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,
506 tfs->resc.tx.hw_entry[index].start,
509 tfs->TF_L2_FUNC_POOL_NAME_TX);
513 * Internal function to mark all the full action resources allocated
514 * that Truflow does not own.
517 tf_rm_rsvd_sram_full_action(struct tf_session *tfs)
519 uint32_t index = TF_RESC_TYPE_SRAM_FULL_ACTION;
522 /* full action rx direction */
523 if (tfs->resc.rx.sram_entry[index].stride > 0)
524 end = tfs->resc.rx.sram_entry[index].start +
525 tfs->resc.rx.sram_entry[index].stride - 1;
527 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
528 TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_RX,
530 TF_RSVD_SRAM_FULL_ACTION_RX,
531 tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX);
533 /* full action tx direction */
534 if (tfs->resc.tx.sram_entry[index].stride > 0)
535 end = tfs->resc.tx.sram_entry[index].start +
536 tfs->resc.tx.sram_entry[index].stride - 1;
538 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
539 TF_RSVD_SRAM_FULL_ACTION_BEGIN_IDX_TX,
541 TF_RSVD_SRAM_FULL_ACTION_TX,
542 tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX);
546 * Internal function to mark all the multicast group resources
547 * allocated that Truflow does not own.
550 tf_rm_rsvd_sram_mcg(struct tf_session *tfs)
552 uint32_t index = TF_RESC_TYPE_SRAM_MCG;
555 /* multicast group rx direction */
556 if (tfs->resc.rx.sram_entry[index].stride > 0)
557 end = tfs->resc.rx.sram_entry[index].start +
558 tfs->resc.rx.sram_entry[index].stride - 1;
560 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
561 TF_RSVD_SRAM_MCG_BEGIN_IDX_RX,
564 tfs->TF_SRAM_MCG_POOL_NAME_RX);
566 /* Multicast Group on TX is not supported */
570 * Internal function to mark all the encap resources allocated that
571 * Truflow does not own.
574 tf_rm_rsvd_sram_encap(struct tf_session *tfs)
576 uint32_t index = TF_RESC_TYPE_SRAM_ENCAP_8B;
579 /* encap 8b rx direction */
580 if (tfs->resc.rx.sram_entry[index].stride > 0)
581 end = tfs->resc.rx.sram_entry[index].start +
582 tfs->resc.rx.sram_entry[index].stride - 1;
584 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
585 TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_RX,
587 TF_RSVD_SRAM_ENCAP_8B_RX,
588 tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX);
590 /* encap 8b tx direction */
591 if (tfs->resc.tx.sram_entry[index].stride > 0)
592 end = tfs->resc.tx.sram_entry[index].start +
593 tfs->resc.tx.sram_entry[index].stride - 1;
595 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
596 TF_RSVD_SRAM_ENCAP_8B_BEGIN_IDX_TX,
598 TF_RSVD_SRAM_ENCAP_8B_TX,
599 tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX);
601 index = TF_RESC_TYPE_SRAM_ENCAP_16B;
603 /* encap 16b rx direction */
604 if (tfs->resc.rx.sram_entry[index].stride > 0)
605 end = tfs->resc.rx.sram_entry[index].start +
606 tfs->resc.rx.sram_entry[index].stride - 1;
608 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
609 TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_RX,
611 TF_RSVD_SRAM_ENCAP_16B_RX,
612 tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX);
614 /* encap 16b tx direction */
615 if (tfs->resc.tx.sram_entry[index].stride > 0)
616 end = tfs->resc.tx.sram_entry[index].start +
617 tfs->resc.tx.sram_entry[index].stride - 1;
619 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
620 TF_RSVD_SRAM_ENCAP_16B_BEGIN_IDX_TX,
622 TF_RSVD_SRAM_ENCAP_16B_TX,
623 tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX);
625 index = TF_RESC_TYPE_SRAM_ENCAP_64B;
627 /* Encap 64B not supported on RX */
629 /* Encap 64b tx direction */
630 if (tfs->resc.tx.sram_entry[index].stride > 0)
631 end = tfs->resc.tx.sram_entry[index].start +
632 tfs->resc.tx.sram_entry[index].stride - 1;
634 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
635 TF_RSVD_SRAM_ENCAP_64B_BEGIN_IDX_TX,
637 TF_RSVD_SRAM_ENCAP_64B_TX,
638 tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX);
642 * Internal function to mark all the sp resources allocated that
643 * Truflow does not own.
646 tf_rm_rsvd_sram_sp(struct tf_session *tfs)
648 uint32_t index = TF_RESC_TYPE_SRAM_SP_SMAC;
651 /* sp smac rx direction */
652 if (tfs->resc.rx.sram_entry[index].stride > 0)
653 end = tfs->resc.rx.sram_entry[index].start +
654 tfs->resc.rx.sram_entry[index].stride - 1;
656 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
657 TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_RX,
659 TF_RSVD_SRAM_SP_SMAC_RX,
660 tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX);
662 /* sp smac tx direction */
663 if (tfs->resc.tx.sram_entry[index].stride > 0)
664 end = tfs->resc.tx.sram_entry[index].start +
665 tfs->resc.tx.sram_entry[index].stride - 1;
667 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
668 TF_RSVD_SRAM_SP_SMAC_BEGIN_IDX_TX,
670 TF_RSVD_SRAM_SP_SMAC_TX,
671 tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX);
673 index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4;
675 /* SP SMAC IPv4 not supported on RX */
677 /* sp smac ipv4 tx direction */
678 if (tfs->resc.tx.sram_entry[index].stride > 0)
679 end = tfs->resc.tx.sram_entry[index].start +
680 tfs->resc.tx.sram_entry[index].stride - 1;
682 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
683 TF_RSVD_SRAM_SP_SMAC_IPV4_BEGIN_IDX_TX,
685 TF_RSVD_SRAM_SP_SMAC_IPV4_TX,
686 tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX);
688 index = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6;
690 /* SP SMAC IPv6 not supported on RX */
692 /* sp smac ipv6 tx direction */
693 if (tfs->resc.tx.sram_entry[index].stride > 0)
694 end = tfs->resc.tx.sram_entry[index].start +
695 tfs->resc.tx.sram_entry[index].stride - 1;
697 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
698 TF_RSVD_SRAM_SP_SMAC_IPV6_BEGIN_IDX_TX,
700 TF_RSVD_SRAM_SP_SMAC_IPV6_TX,
701 tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX);
705 * Internal function to mark all the stat resources allocated that
706 * Truflow does not own.
709 tf_rm_rsvd_sram_stats(struct tf_session *tfs)
711 uint32_t index = TF_RESC_TYPE_SRAM_COUNTER_64B;
714 /* counter 64b rx direction */
715 if (tfs->resc.rx.sram_entry[index].stride > 0)
716 end = tfs->resc.rx.sram_entry[index].start +
717 tfs->resc.rx.sram_entry[index].stride - 1;
719 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
720 TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_RX,
722 TF_RSVD_SRAM_COUNTER_64B_RX,
723 tfs->TF_SRAM_STATS_64B_POOL_NAME_RX);
725 /* counter 64b tx direction */
726 if (tfs->resc.tx.sram_entry[index].stride > 0)
727 end = tfs->resc.tx.sram_entry[index].start +
728 tfs->resc.tx.sram_entry[index].stride - 1;
730 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
731 TF_RSVD_SRAM_COUNTER_64B_BEGIN_IDX_TX,
733 TF_RSVD_SRAM_COUNTER_64B_TX,
734 tfs->TF_SRAM_STATS_64B_POOL_NAME_TX);
738 * Internal function to mark all the nat resources allocated that
739 * Truflow does not own.
742 tf_rm_rsvd_sram_nat(struct tf_session *tfs)
744 uint32_t index = TF_RESC_TYPE_SRAM_NAT_SPORT;
747 /* nat source port rx direction */
748 if (tfs->resc.rx.sram_entry[index].stride > 0)
749 end = tfs->resc.rx.sram_entry[index].start +
750 tfs->resc.rx.sram_entry[index].stride - 1;
752 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
753 TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_RX,
755 TF_RSVD_SRAM_NAT_SPORT_RX,
756 tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX);
758 /* nat source port tx direction */
759 if (tfs->resc.tx.sram_entry[index].stride > 0)
760 end = tfs->resc.tx.sram_entry[index].start +
761 tfs->resc.tx.sram_entry[index].stride - 1;
763 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
764 TF_RSVD_SRAM_NAT_SPORT_BEGIN_IDX_TX,
766 TF_RSVD_SRAM_NAT_SPORT_TX,
767 tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX);
769 index = TF_RESC_TYPE_SRAM_NAT_DPORT;
771 /* nat destination port rx direction */
772 if (tfs->resc.rx.sram_entry[index].stride > 0)
773 end = tfs->resc.rx.sram_entry[index].start +
774 tfs->resc.rx.sram_entry[index].stride - 1;
776 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
777 TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_RX,
779 TF_RSVD_SRAM_NAT_DPORT_RX,
780 tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX);
782 /* nat destination port tx direction */
783 if (tfs->resc.tx.sram_entry[index].stride > 0)
784 end = tfs->resc.tx.sram_entry[index].start +
785 tfs->resc.tx.sram_entry[index].stride - 1;
787 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
788 TF_RSVD_SRAM_NAT_DPORT_BEGIN_IDX_TX,
790 TF_RSVD_SRAM_NAT_DPORT_TX,
791 tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX);
793 index = TF_RESC_TYPE_SRAM_NAT_S_IPV4;
795 /* nat source port ipv4 rx direction */
796 if (tfs->resc.rx.sram_entry[index].stride > 0)
797 end = tfs->resc.rx.sram_entry[index].start +
798 tfs->resc.rx.sram_entry[index].stride - 1;
800 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
801 TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_RX,
803 TF_RSVD_SRAM_NAT_S_IPV4_RX,
804 tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX);
806 /* nat source ipv4 port tx direction */
807 if (tfs->resc.tx.sram_entry[index].stride > 0)
808 end = tfs->resc.tx.sram_entry[index].start +
809 tfs->resc.tx.sram_entry[index].stride - 1;
811 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
812 TF_RSVD_SRAM_NAT_S_IPV4_BEGIN_IDX_TX,
814 TF_RSVD_SRAM_NAT_S_IPV4_TX,
815 tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX);
817 index = TF_RESC_TYPE_SRAM_NAT_D_IPV4;
819 /* nat destination port ipv4 rx direction */
820 if (tfs->resc.rx.sram_entry[index].stride > 0)
821 end = tfs->resc.rx.sram_entry[index].start +
822 tfs->resc.rx.sram_entry[index].stride - 1;
824 tf_rm_reserve_range(tfs->resc.rx.sram_entry[index].stride,
825 TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_RX,
827 TF_RSVD_SRAM_NAT_D_IPV4_RX,
828 tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX);
830 /* nat destination ipv4 port tx direction */
831 if (tfs->resc.tx.sram_entry[index].stride > 0)
832 end = tfs->resc.tx.sram_entry[index].start +
833 tfs->resc.tx.sram_entry[index].stride - 1;
835 tf_rm_reserve_range(tfs->resc.tx.sram_entry[index].stride,
836 TF_RSVD_SRAM_NAT_D_IPV4_BEGIN_IDX_TX,
838 TF_RSVD_SRAM_NAT_D_IPV4_TX,
839 tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX);
843 * Internal function used to validate the HW allocated resources
844 * against the requested values.
847 tf_rm_hw_alloc_validate(enum tf_dir dir,
848 struct tf_rm_hw_alloc *hw_alloc,
849 struct tf_rm_entry *hw_entry)
854 for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {
855 if (hw_entry[i].stride != hw_alloc->hw_num[i]) {
857 "%s, Alloc failed id:%d expect:%d got:%d\n",
870 * Internal function used to validate the SRAM allocated resources
871 * against the requested values.
874 tf_rm_sram_alloc_validate(enum tf_dir dir __rte_unused,
875 struct tf_rm_sram_alloc *sram_alloc,
876 struct tf_rm_entry *sram_entry)
881 for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
882 if (sram_entry[i].stride != sram_alloc->sram_num[i]) {
884 "%s, Alloc failed idx:%d expect:%d got:%d\n",
887 sram_alloc->sram_num[i],
888 sram_entry[i].stride);
897 * Internal function used to mark all the HW resources allocated that
898 * Truflow does not own.
901 tf_rm_reserve_hw(struct tf *tfp)
903 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
906 * There is no direct AFM resource allocation as it is carved
907 * statically at AFM boot time. Thus the bit allocators work
908 * on the full HW resource amount and we just mark everything
909 * used except the resources that Truflow took ownership off.
911 tf_rm_rsvd_l2_ctxt(tfs);
912 tf_rm_rsvd_l2_func(tfs);
916 * Internal function used to mark all the SRAM resources allocated
917 * that Truflow does not own.
920 tf_rm_reserve_sram(struct tf *tfp)
922 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
925 * There is no direct AFM resource allocation as it is carved
926 * statically at AFM boot time. Thus the bit allocators work
927 * on the full HW resource amount and we just mark everything
928 * used except the resources that Truflow took ownership off.
930 tf_rm_rsvd_sram_full_action(tfs);
931 tf_rm_rsvd_sram_mcg(tfs);
932 tf_rm_rsvd_sram_encap(tfs);
933 tf_rm_rsvd_sram_sp(tfs);
934 tf_rm_rsvd_sram_stats(tfs);
935 tf_rm_rsvd_sram_nat(tfs);
939 * Internal function used to allocate and validate all HW resources.
942 tf_rm_allocate_validate_hw(struct tf *tfp,
947 struct tf_rm_hw_query hw_query;
948 struct tf_rm_hw_alloc hw_alloc;
949 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
950 struct tf_rm_entry *hw_entries;
953 if (dir == TF_DIR_RX)
954 hw_entries = tfs->resc.rx.hw_entry;
956 hw_entries = tfs->resc.tx.hw_entry;
958 /* Query for Session HW Resources */
959 rc = tf_msg_session_hw_resc_qcaps(tfp, dir, &hw_query);
963 "%s, HW qcaps message send failed\n",
968 rc = tf_rm_check_hw_qcaps_static(&hw_query, dir, &error_flag);
972 "%s, HW QCAPS validation failed, error_flag:0x%x\n",
978 /* Post process HW capability */
979 for (i = 0; i < TF_RESC_TYPE_HW_MAX; i++)
980 hw_alloc.hw_num[i] = hw_query.hw_query[i].max;
982 /* Allocate Session HW Resources */
983 rc = tf_msg_session_hw_resc_alloc(tfp, dir, &hw_alloc, hw_entries);
987 "%s, HW alloc message send failed\n",
992 /* Perform HW allocation validation as its possible the
993 * resource availability changed between qcaps and alloc
995 rc = tf_rm_hw_alloc_validate(dir, &hw_alloc, hw_entries);
999 "%s, HW Resource validation failed\n",
1011 * Internal function used to allocate and validate all SRAM resources.
1014 * Pointer to TF handle
1017 * Receive or transmit direction
1021 * -1 - Internal error
1024 tf_rm_allocate_validate_sram(struct tf *tfp,
1029 struct tf_rm_sram_query sram_query;
1030 struct tf_rm_sram_alloc sram_alloc;
1031 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
1032 struct tf_rm_entry *sram_entries;
1033 uint32_t error_flag;
1035 if (dir == TF_DIR_RX)
1036 sram_entries = tfs->resc.rx.sram_entry;
1038 sram_entries = tfs->resc.tx.sram_entry;
1040 /* Query for Session SRAM Resources */
1041 rc = tf_msg_session_sram_resc_qcaps(tfp, dir, &sram_query);
1045 "%s, SRAM qcaps message send failed\n",
1050 rc = tf_rm_check_sram_qcaps_static(&sram_query, dir, &error_flag);
1054 "%s, SRAM QCAPS validation failed, error_flag:%x\n",
1057 tf_rm_print_sram_qcaps_error(dir, &sram_query, &error_flag);
1061 /* Post process SRAM capability */
1062 for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++)
1063 sram_alloc.sram_num[i] = sram_query.sram_query[i].max;
1065 /* Allocate Session SRAM Resources */
1066 rc = tf_msg_session_sram_resc_alloc(tfp,
1073 "%s, SRAM alloc message send failed\n",
1078 /* Perform SRAM allocation validation as its possible the
1079 * resource availability changed between qcaps and alloc
1081 rc = tf_rm_sram_alloc_validate(dir, &sram_alloc, sram_entries);
1085 "%s, SRAM Resource allocation validation failed\n",
1097 * Helper function used to prune a SRAM resource array to only hold
1098 * elements that needs to be flushed.
1104 * Receive or transmit direction
1107 * Master SRAM Resource data base
1109 * [in/out] flush_entries
1110 * Pruned SRAM Resource database of entries to be flushed. This
1111 * array should be passed in as a complete copy of the master SRAM
1112 * Resource database. The outgoing result will be a pruned version
1113 * based on the result of the requested checking
1116 * 0 - Success, no flush required
1117 * 1 - Success, flush required
1118 * -1 - Internal error
1121 tf_rm_sram_to_flush(struct tf_session *tfs,
1123 struct tf_rm_entry *sram_entries,
1124 struct tf_rm_entry *flush_entries)
1129 struct bitalloc *pool;
1131 /* Check all the sram resource pools and check for left over
1132 * elements. Any found will result in the complete pool of a
1133 * type to get invalidated.
1136 TF_RM_GET_POOLS(tfs, dir, &pool,
1137 TF_SRAM_FULL_ACTION_POOL_NAME,
1141 free_cnt = ba_free_count(pool);
1142 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride) {
1143 flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].start = 0;
1144 flush_entries[TF_RESC_TYPE_SRAM_FULL_ACTION].stride = 0;
1149 /* Only pools for RX direction */
1150 if (dir == TF_DIR_RX) {
1151 TF_RM_GET_POOLS_RX(tfs, &pool,
1152 TF_SRAM_MCG_POOL_NAME);
1155 free_cnt = ba_free_count(pool);
1156 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_MCG].stride) {
1157 flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0;
1158 flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0;
1163 /* Always prune TX direction */
1164 flush_entries[TF_RESC_TYPE_SRAM_MCG].start = 0;
1165 flush_entries[TF_RESC_TYPE_SRAM_MCG].stride = 0;
1168 TF_RM_GET_POOLS(tfs, dir, &pool,
1169 TF_SRAM_ENCAP_8B_POOL_NAME,
1173 free_cnt = ba_free_count(pool);
1174 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride) {
1175 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].start = 0;
1176 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_8B].stride = 0;
1181 TF_RM_GET_POOLS(tfs, dir, &pool,
1182 TF_SRAM_ENCAP_16B_POOL_NAME,
1186 free_cnt = ba_free_count(pool);
1187 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride) {
1188 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].start = 0;
1189 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_16B].stride = 0;
1194 /* Only pools for TX direction */
1195 if (dir == TF_DIR_TX) {
1196 TF_RM_GET_POOLS_TX(tfs, &pool,
1197 TF_SRAM_ENCAP_64B_POOL_NAME);
1200 free_cnt = ba_free_count(pool);
1202 sram_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride) {
1203 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0;
1204 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0;
1209 /* Always prune RX direction */
1210 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].start = 0;
1211 flush_entries[TF_RESC_TYPE_SRAM_ENCAP_64B].stride = 0;
1214 TF_RM_GET_POOLS(tfs, dir, &pool,
1215 TF_SRAM_SP_SMAC_POOL_NAME,
1219 free_cnt = ba_free_count(pool);
1220 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride) {
1221 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].start = 0;
1222 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC].stride = 0;
1227 /* Only pools for TX direction */
1228 if (dir == TF_DIR_TX) {
1229 TF_RM_GET_POOLS_TX(tfs, &pool,
1230 TF_SRAM_SP_SMAC_IPV4_POOL_NAME);
1233 free_cnt = ba_free_count(pool);
1235 sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride) {
1236 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0;
1237 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride =
1243 /* Always prune RX direction */
1244 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].start = 0;
1245 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV4].stride = 0;
1248 /* Only pools for TX direction */
1249 if (dir == TF_DIR_TX) {
1250 TF_RM_GET_POOLS_TX(tfs, &pool,
1251 TF_SRAM_SP_SMAC_IPV6_POOL_NAME);
1254 free_cnt = ba_free_count(pool);
1256 sram_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride) {
1257 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0;
1258 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride =
1264 /* Always prune RX direction */
1265 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].start = 0;
1266 flush_entries[TF_RESC_TYPE_SRAM_SP_SMAC_IPV6].stride = 0;
1269 TF_RM_GET_POOLS(tfs, dir, &pool,
1270 TF_SRAM_STATS_64B_POOL_NAME,
1274 free_cnt = ba_free_count(pool);
1275 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride) {
1276 flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].start = 0;
1277 flush_entries[TF_RESC_TYPE_SRAM_COUNTER_64B].stride = 0;
1282 TF_RM_GET_POOLS(tfs, dir, &pool,
1283 TF_SRAM_NAT_SPORT_POOL_NAME,
1287 free_cnt = ba_free_count(pool);
1288 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride) {
1289 flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].start = 0;
1290 flush_entries[TF_RESC_TYPE_SRAM_NAT_SPORT].stride = 0;
1295 TF_RM_GET_POOLS(tfs, dir, &pool,
1296 TF_SRAM_NAT_DPORT_POOL_NAME,
1300 free_cnt = ba_free_count(pool);
1301 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride) {
1302 flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].start = 0;
1303 flush_entries[TF_RESC_TYPE_SRAM_NAT_DPORT].stride = 0;
1308 TF_RM_GET_POOLS(tfs, dir, &pool,
1309 TF_SRAM_NAT_S_IPV4_POOL_NAME,
1313 free_cnt = ba_free_count(pool);
1314 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride) {
1315 flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].start = 0;
1316 flush_entries[TF_RESC_TYPE_SRAM_NAT_S_IPV4].stride = 0;
1321 TF_RM_GET_POOLS(tfs, dir, &pool,
1322 TF_SRAM_NAT_D_IPV4_POOL_NAME,
1326 free_cnt = ba_free_count(pool);
1327 if (free_cnt == sram_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride) {
1328 flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].start = 0;
1329 flush_entries[TF_RESC_TYPE_SRAM_NAT_D_IPV4].stride = 0;
1338 * Helper function used to generate an error log for the SRAM types
1339 * that needs to be flushed. The types should have been cleaned up
1340 * ahead of invoking tf_close_session.
1343 * SRAM Resource database holding elements to be flushed
1346 tf_rm_log_sram_flush(enum tf_dir dir,
1347 struct tf_rm_entry *sram_entries)
1351 /* Walk the sram flush array and log the types that wasn't
1354 for (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++) {
1355 if (sram_entries[i].stride != 0)
1357 "%s: %s was not cleaned up\n",
1359 tf_hcapi_sram_2_str(i));
1364 tf_rm_init(struct tf *tfp __rte_unused)
1366 struct tf_session *tfs =
1367 (struct tf_session *)(tfp->session->core_data);
1369 /* This version is host specific and should be checked against
1370 * when attaching as there is no guarantee that a secondary
1371 * would run from same image version.
1373 tfs->ver.major = TF_SESSION_VER_MAJOR;
1374 tfs->ver.minor = TF_SESSION_VER_MINOR;
1375 tfs->ver.update = TF_SESSION_VER_UPDATE;
1377 tfs->session_id.id = 0;
1380 /* Initialization of Table Scopes */
1381 /* ll_init(&tfs->tbl_scope_ll); */
1383 /* Initialization of HW and SRAM resource DB */
1384 memset(&tfs->resc, 0, sizeof(struct tf_rm_db));
1386 /* Initialization of HW Resource Pools */
1387 ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
1388 ba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
1390 /* Initialization of SRAM Resource Pools
1391 * These pools are set to the TFLIB defined MAX sizes not
1392 * AFM's HW max as to limit the memory consumption
1394 ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_RX,
1395 TF_RSVD_SRAM_FULL_ACTION_RX);
1396 ba_init(tfs->TF_SRAM_FULL_ACTION_POOL_NAME_TX,
1397 TF_RSVD_SRAM_FULL_ACTION_TX);
1398 /* Only Multicast Group on RX is supported */
1399 ba_init(tfs->TF_SRAM_MCG_POOL_NAME_RX,
1400 TF_RSVD_SRAM_MCG_RX);
1401 ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_RX,
1402 TF_RSVD_SRAM_ENCAP_8B_RX);
1403 ba_init(tfs->TF_SRAM_ENCAP_8B_POOL_NAME_TX,
1404 TF_RSVD_SRAM_ENCAP_8B_TX);
1405 ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_RX,
1406 TF_RSVD_SRAM_ENCAP_16B_RX);
1407 ba_init(tfs->TF_SRAM_ENCAP_16B_POOL_NAME_TX,
1408 TF_RSVD_SRAM_ENCAP_16B_TX);
1409 /* Only Encap 64B on TX is supported */
1410 ba_init(tfs->TF_SRAM_ENCAP_64B_POOL_NAME_TX,
1411 TF_RSVD_SRAM_ENCAP_64B_TX);
1412 ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_RX,
1413 TF_RSVD_SRAM_SP_SMAC_RX);
1414 ba_init(tfs->TF_SRAM_SP_SMAC_POOL_NAME_TX,
1415 TF_RSVD_SRAM_SP_SMAC_TX);
1416 /* Only SP SMAC IPv4 on TX is supported */
1417 ba_init(tfs->TF_SRAM_SP_SMAC_IPV4_POOL_NAME_TX,
1418 TF_RSVD_SRAM_SP_SMAC_IPV4_TX);
1419 /* Only SP SMAC IPv6 on TX is supported */
1420 ba_init(tfs->TF_SRAM_SP_SMAC_IPV6_POOL_NAME_TX,
1421 TF_RSVD_SRAM_SP_SMAC_IPV6_TX);
1422 ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_RX,
1423 TF_RSVD_SRAM_COUNTER_64B_RX);
1424 ba_init(tfs->TF_SRAM_STATS_64B_POOL_NAME_TX,
1425 TF_RSVD_SRAM_COUNTER_64B_TX);
1426 ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_RX,
1427 TF_RSVD_SRAM_NAT_SPORT_RX);
1428 ba_init(tfs->TF_SRAM_NAT_SPORT_POOL_NAME_TX,
1429 TF_RSVD_SRAM_NAT_SPORT_TX);
1430 ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_RX,
1431 TF_RSVD_SRAM_NAT_DPORT_RX);
1432 ba_init(tfs->TF_SRAM_NAT_DPORT_POOL_NAME_TX,
1433 TF_RSVD_SRAM_NAT_DPORT_TX);
1434 ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_RX,
1435 TF_RSVD_SRAM_NAT_S_IPV4_RX);
1436 ba_init(tfs->TF_SRAM_NAT_S_IPV4_POOL_NAME_TX,
1437 TF_RSVD_SRAM_NAT_S_IPV4_TX);
1438 ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_RX,
1439 TF_RSVD_SRAM_NAT_D_IPV4_RX);
1440 ba_init(tfs->TF_SRAM_NAT_D_IPV4_POOL_NAME_TX,
1441 TF_RSVD_SRAM_NAT_D_IPV4_TX);
1443 /* Initialization of pools local to TF Core */
1444 ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);
1445 ba_init(tfs->TF_L2_CTXT_REMAP_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);
1449 tf_rm_allocate_validate(struct tf *tfp)
1454 for (i = 0; i < TF_DIR_MAX; i++) {
1455 rc = tf_rm_allocate_validate_hw(tfp, i);
1458 rc = tf_rm_allocate_validate_sram(tfp, i);
1463 /* With both HW and SRAM allocated and validated we can
1464 * 'scrub' the reservation on the pools.
1466 tf_rm_reserve_hw(tfp);
1467 tf_rm_reserve_sram(tfp);
1473 tf_rm_close(struct tf *tfp)
1478 struct tf_rm_entry *hw_entries;
1479 struct tf_rm_entry *sram_entries;
1480 struct tf_rm_entry *sram_flush_entries;
1481 struct tf_session *tfs __rte_unused =
1482 (struct tf_session *)(tfp->session->core_data);
1484 struct tf_rm_db flush_resc = tfs->resc;
1486 /* On close it is assumed that the session has already cleaned
1487 * up all its resources, individually, while destroying its
1488 * flows. No checking is performed thus the behavior is as
1491 * Session RM will signal FW to release session resources. FW
1492 * will perform invalidation of all the allocated entries
1493 * (assures any outstanding resources has been cleared, then
1494 * free the FW RM instance.
1496 * Session will then be freed by tf_close_session() thus there
1497 * is no need to clean each resource pool as the whole session
1501 for (i = 0; i < TF_DIR_MAX; i++) {
1502 if (i == TF_DIR_RX) {
1503 hw_entries = tfs->resc.rx.hw_entry;
1504 sram_entries = tfs->resc.rx.sram_entry;
1505 sram_flush_entries = flush_resc.rx.sram_entry;
1507 hw_entries = tfs->resc.tx.hw_entry;
1508 sram_entries = tfs->resc.tx.sram_entry;
1509 sram_flush_entries = flush_resc.tx.sram_entry;
1512 /* Check for any not previously freed SRAM resources
1513 * and flush if required.
1515 rc = tf_rm_sram_to_flush(tfs,
1518 sram_flush_entries);
1520 rc_close = -ENOTEMPTY;
1523 "%s, lingering SRAM resources\n",
1526 /* Log the entries to be flushed */
1527 tf_rm_log_sram_flush(i, sram_flush_entries);
1529 rc = tf_msg_session_sram_resc_flush(tfp,
1531 sram_flush_entries);
1536 "%s, HW flush failed\n",
1541 rc = tf_msg_session_hw_resc_free(tfp, i, hw_entries);
1546 "%s, HW free failed\n",
1550 rc = tf_msg_session_sram_resc_free(tfp, i, sram_entries);
1555 "%s, SRAM free failed\n",
1564 tf_rm_convert_tbl_type(enum tf_tbl_type type,
1565 uint32_t *hcapi_type)
1570 case TF_TBL_TYPE_FULL_ACT_RECORD:
1571 *hcapi_type = TF_RESC_TYPE_SRAM_FULL_ACTION;
1573 case TF_TBL_TYPE_MCAST_GROUPS:
1574 *hcapi_type = TF_RESC_TYPE_SRAM_MCG;
1576 case TF_TBL_TYPE_ACT_ENCAP_8B:
1577 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_8B;
1579 case TF_TBL_TYPE_ACT_ENCAP_16B:
1580 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_16B;
1582 case TF_TBL_TYPE_ACT_ENCAP_64B:
1583 *hcapi_type = TF_RESC_TYPE_SRAM_ENCAP_64B;
1585 case TF_TBL_TYPE_ACT_SP_SMAC:
1586 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC;
1588 case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
1589 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV4;
1591 case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
1592 *hcapi_type = TF_RESC_TYPE_SRAM_SP_SMAC_IPV6;
1594 case TF_TBL_TYPE_ACT_STATS_64:
1595 *hcapi_type = TF_RESC_TYPE_SRAM_COUNTER_64B;
1597 case TF_TBL_TYPE_ACT_MODIFY_SPORT:
1598 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_SPORT;
1600 case TF_TBL_TYPE_ACT_MODIFY_DPORT:
1601 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_DPORT;
1603 case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
1604 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_S_IPV4;
1606 case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
1607 *hcapi_type = TF_RESC_TYPE_SRAM_NAT_D_IPV4;
1609 case TF_TBL_TYPE_METER_PROF:
1610 *hcapi_type = TF_RESC_TYPE_HW_METER_PROF;
1612 case TF_TBL_TYPE_METER_INST:
1613 *hcapi_type = TF_RESC_TYPE_HW_METER_INST;
1615 case TF_TBL_TYPE_MIRROR_CONFIG:
1616 *hcapi_type = TF_RESC_TYPE_HW_MIRROR;
1618 case TF_TBL_TYPE_UPAR:
1619 *hcapi_type = TF_RESC_TYPE_HW_UPAR;
1621 case TF_TBL_TYPE_EPOCH0:
1622 *hcapi_type = TF_RESC_TYPE_HW_EPOCH0;
1624 case TF_TBL_TYPE_EPOCH1:
1625 *hcapi_type = TF_RESC_TYPE_HW_EPOCH1;
1627 case TF_TBL_TYPE_METADATA:
1628 *hcapi_type = TF_RESC_TYPE_HW_METADATA;
1630 case TF_TBL_TYPE_CT_STATE:
1631 *hcapi_type = TF_RESC_TYPE_HW_CT_STATE;
1633 case TF_TBL_TYPE_RANGE_PROF:
1634 *hcapi_type = TF_RESC_TYPE_HW_RANGE_PROF;
1636 case TF_TBL_TYPE_RANGE_ENTRY:
1637 *hcapi_type = TF_RESC_TYPE_HW_RANGE_ENTRY;
1639 case TF_TBL_TYPE_LAG:
1640 *hcapi_type = TF_RESC_TYPE_HW_LAG_ENTRY;
1642 /* Not yet supported */
1643 case TF_TBL_TYPE_ACT_ENCAP_32B:
1644 case TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST:
1645 case TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC:
1646 case TF_TBL_TYPE_VNIC_SVIF:
1647 case TF_TBL_TYPE_EXT: /* No pools for this type */
1648 case TF_TBL_TYPE_EXT_0: /* No pools for this type */
1658 tf_rm_convert_index(struct tf_session *tfs,
1660 enum tf_tbl_type type,
1661 enum tf_rm_convert_type c_type,
1663 uint32_t *convert_index)
1666 struct tf_rm_resc *resc;
1667 uint32_t hcapi_type;
1668 uint32_t base_index;
1670 if (dir == TF_DIR_RX)
1671 resc = &tfs->resc.rx;
1672 else if (dir == TF_DIR_TX)
1673 resc = &tfs->resc.tx;
1677 rc = tf_rm_convert_tbl_type(type, &hcapi_type);
1682 case TF_TBL_TYPE_FULL_ACT_RECORD:
1683 case TF_TBL_TYPE_MCAST_GROUPS:
1684 case TF_TBL_TYPE_ACT_ENCAP_8B:
1685 case TF_TBL_TYPE_ACT_ENCAP_16B:
1686 case TF_TBL_TYPE_ACT_ENCAP_32B:
1687 case TF_TBL_TYPE_ACT_ENCAP_64B:
1688 case TF_TBL_TYPE_ACT_SP_SMAC:
1689 case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
1690 case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
1691 case TF_TBL_TYPE_ACT_STATS_64:
1692 case TF_TBL_TYPE_ACT_MODIFY_SPORT:
1693 case TF_TBL_TYPE_ACT_MODIFY_DPORT:
1694 case TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:
1695 case TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:
1696 base_index = resc->sram_entry[hcapi_type].start;
1698 case TF_TBL_TYPE_MIRROR_CONFIG:
1699 case TF_TBL_TYPE_METER_PROF:
1700 case TF_TBL_TYPE_METER_INST:
1701 case TF_TBL_TYPE_UPAR:
1702 case TF_TBL_TYPE_EPOCH0:
1703 case TF_TBL_TYPE_EPOCH1:
1704 case TF_TBL_TYPE_METADATA:
1705 case TF_TBL_TYPE_CT_STATE:
1706 case TF_TBL_TYPE_RANGE_PROF:
1707 case TF_TBL_TYPE_RANGE_ENTRY:
1708 case TF_TBL_TYPE_LAG:
1709 base_index = resc->hw_entry[hcapi_type].start;
1711 /* Not yet supported */
1712 case TF_TBL_TYPE_VNIC_SVIF:
1713 case TF_TBL_TYPE_EXT: /* No pools for this type */
1714 case TF_TBL_TYPE_EXT_0: /* No pools for this type */
1720 case TF_RM_CONVERT_RM_BASE:
1721 *convert_index = index - base_index;
1723 case TF_RM_CONVERT_ADD_BASE:
1724 *convert_index = index + base_index;