+static inline unsigned int
+roc_npa_aura_op_batch_alloc(uint64_t aura_handle, uint64_t *buf,
+ uint64_t *aligned_buf, unsigned int num,
+ const int dis_wait, const int drop,
+ const int partial)
+{
+ unsigned int count, chunk, num_alloc;
+
+ /* The buffer should be 128 byte cache line aligned */
+ if (((uint64_t)aligned_buf & (ROC_ALIGN - 1)) != 0)
+ return 0;
+
+ count = 0;
+ while (num) {
+ chunk = (num > ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS) ?
+ ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS :
+ num;
+
+ if (roc_npa_aura_batch_alloc_issue(aura_handle, aligned_buf,
+ chunk, dis_wait, drop))
+ break;
+
+ num_alloc = roc_npa_aura_batch_alloc_extract(buf, aligned_buf,
+ chunk);
+
+ count += num_alloc;
+ buf += num_alloc;
+ num -= num_alloc;
+
+ if (num_alloc != chunk)
+ break;
+ }
+
+ /* If the requested number of pointers was not allocated and if partial
+ * alloc is not desired, then free allocated pointers.
+ */
+ if (unlikely(num != 0 && !partial)) {
+ roc_npa_aura_op_bulk_free(aura_handle, buf - count, count, 1);
+ count = 0;
+ }
+
+ return count;
+}
+
+static inline void
+roc_npa_aura_batch_free(uint64_t aura_handle, uint64_t const *buf,
+ unsigned int num, const int fabs, uint64_t lmt_addr,
+ uint64_t lmt_id)
+{
+ uint64_t addr, tar_addr, free0;
+ volatile uint64_t *lmt_data;
+ unsigned int i;
+
+ if (num > ROC_CN10K_NPA_BATCH_FREE_MAX_PTRS)
+ return;
+
+ lmt_data = (uint64_t *)lmt_addr;
+
+ addr = roc_npa_aura_handle_to_base(aura_handle) +
+ NPA_LF_AURA_BATCH_FREE0;
+
+ /*
+ * NPA_LF_AURA_BATCH_FREE0
+ *
+ * 63 63 62 33 32 32 31 20 19 0
+ * -----------------------------------------
+ * | FABS | Rsvd | COUNT_EOT | Rsvd | AURA |
+ * -----------------------------------------
+ */
+ free0 = roc_npa_aura_handle_to_aura(aura_handle);
+ if (fabs)
+ free0 |= (0x1UL << 63);
+ if (num & 0x1)
+ free0 |= (0x1UL << 32);
+
+ /* tar_addr[4:6] is LMTST size-1 in units of 128b */
+ tar_addr = addr | ((num >> 1) << 4);
+
+ lmt_data[0] = free0;
+ for (i = 0; i < num; i++)
+ lmt_data[i + 1] = buf[i];
+
+ roc_lmt_submit_steorl(lmt_id, tar_addr);
+ plt_io_wmb();
+}
+
+static inline void
+roc_npa_aura_op_batch_free(uint64_t aura_handle, uint64_t const *buf,
+ unsigned int num, const int fabs, uint64_t lmt_addr,
+ uint64_t lmt_id)
+{
+ unsigned int chunk;
+
+ while (num) {
+ chunk = (num >= ROC_CN10K_NPA_BATCH_FREE_MAX_PTRS) ?
+ ROC_CN10K_NPA_BATCH_FREE_MAX_PTRS :
+ num;
+
+ roc_npa_aura_batch_free(aura_handle, buf, chunk, fabs, lmt_addr,
+ lmt_id);
+
+ buf += chunk;
+ num -= chunk;
+ }
+}
+