1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_ethdev.h"
9 flow_mcam_alloc_counter(struct otx2_mbox *mbox, uint16_t *ctr)
11 struct npc_mcam_alloc_counter_req *req;
12 struct npc_mcam_alloc_counter_rsp *rsp;
15 req = otx2_mbox_alloc_msg_npc_mcam_alloc_counter(mbox);
17 otx2_mbox_msg_send(mbox, 0);
18 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
20 *ctr = rsp->cntr_list[0];
25 otx2_flow_mcam_free_counter(struct otx2_mbox *mbox, uint16_t ctr_id)
27 struct npc_mcam_oper_counter_req *req;
30 req = otx2_mbox_alloc_msg_npc_mcam_free_counter(mbox);
32 otx2_mbox_msg_send(mbox, 0);
33 rc = otx2_mbox_get_rsp(mbox, 0, NULL);
39 otx2_flow_mcam_read_counter(struct otx2_mbox *mbox, uint32_t ctr_id,
42 struct npc_mcam_oper_counter_req *req;
43 struct npc_mcam_oper_counter_rsp *rsp;
46 req = otx2_mbox_alloc_msg_npc_mcam_counter_stats(mbox);
48 otx2_mbox_msg_send(mbox, 0);
49 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
56 otx2_flow_mcam_clear_counter(struct otx2_mbox *mbox, uint32_t ctr_id)
58 struct npc_mcam_oper_counter_req *req;
61 req = otx2_mbox_alloc_msg_npc_mcam_clear_counter(mbox);
63 otx2_mbox_msg_send(mbox, 0);
64 rc = otx2_mbox_get_rsp(mbox, 0, NULL);
70 otx2_flow_mcam_free_entry(struct otx2_mbox *mbox, uint32_t entry)
72 struct npc_mcam_free_entry_req *req;
75 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
77 otx2_mbox_msg_send(mbox, 0);
78 rc = otx2_mbox_get_rsp(mbox, 0, NULL);
84 otx2_flow_mcam_free_all_entries(struct otx2_mbox *mbox)
86 struct npc_mcam_free_entry_req *req;
89 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
91 otx2_mbox_msg_send(mbox, 0);
92 rc = otx2_mbox_get_rsp(mbox, 0, NULL);
98 flow_prep_mcam_ldata(uint8_t *ptr, const uint8_t *data, int len)
102 for (idx = 0; idx < len; idx++)
103 ptr[idx] = data[len - 1 - idx];
107 flow_check_copysz(size_t size, size_t len)
115 flow_mem_is_zero(const void *mem, int len)
120 for (i = 0; i < len; i++) {
128 flow_set_hw_mask(struct otx2_flow_item_info *info,
129 struct npc_xtract_info *xinfo,
135 if (xinfo->enable == 0)
138 if (xinfo->hdr_off < info->hw_hdr_len)
141 max_off = xinfo->hdr_off + xinfo->len - info->hw_hdr_len;
143 if (max_off > info->len)
146 offset = xinfo->hdr_off - info->hw_hdr_len;
147 for (j = offset; j < max_off; j++)
152 otx2_flow_get_hw_supp_mask(struct otx2_parse_state *pst,
153 struct otx2_flow_item_info *info, int lid, int lt)
155 struct npc_xtract_info *xinfo, *lfinfo;
156 char *hw_mask = info->hw_mask;
161 intf = pst->flow->nix_intf;
162 xinfo = pst->npc->prx_dxcfg[intf][lid][lt].xtract;
163 memset(hw_mask, 0, info->len);
165 for (i = 0; i < NPC_MAX_LD; i++) {
166 flow_set_hw_mask(info, &xinfo[i], hw_mask);
169 for (i = 0; i < NPC_MAX_LD; i++) {
171 if (xinfo[i].flags_enable == 0)
174 lf_cfg = pst->npc->prx_lfcfg[i].i;
176 for (j = 0; j < NPC_MAX_LFL; j++) {
177 lfinfo = pst->npc->prx_fxcfg[intf]
179 flow_set_hw_mask(info, &lfinfo[0], hw_mask);
186 flow_update_extraction_data(struct otx2_parse_state *pst,
187 struct otx2_flow_item_info *info,
188 struct npc_xtract_info *xinfo)
190 uint8_t int_info_mask[NPC_MAX_EXTRACT_DATA_LEN];
191 uint8_t int_info[NPC_MAX_EXTRACT_DATA_LEN];
192 struct npc_xtract_info *x;
198 hdr_off = x->hdr_off;
200 if (hdr_off < info->hw_hdr_len)
206 otx2_npc_dbg("x->hdr_off = %d, len = %d, info->len = %d,"
207 "x->key_off = %d", x->hdr_off, len, info->len,
210 hdr_off -= info->hw_hdr_len;
212 if (hdr_off + len > info->len)
213 len = info->len - hdr_off;
215 /* Check for over-write of previous layer */
216 if (!flow_mem_is_zero(pst->mcam_mask + x->key_off,
218 /* Cannot support this data match */
219 rte_flow_error_set(pst->error, ENOTSUP,
220 RTE_FLOW_ERROR_TYPE_ITEM,
222 "Extraction unsupported");
226 len = flow_check_copysz((OTX2_MAX_MCAM_WIDTH_DWORDS * 8)
230 rte_flow_error_set(pst->error, ENOTSUP,
231 RTE_FLOW_ERROR_TYPE_ITEM,
237 /* Need to reverse complete structure so that dest addr is at
238 * MSB so as to program the MCAM using mcam_data & mcam_mask
241 flow_prep_mcam_ldata(int_info,
242 (const uint8_t *)info->spec + hdr_off,
244 flow_prep_mcam_ldata(int_info_mask,
245 (const uint8_t *)info->mask + hdr_off,
248 otx2_npc_dbg("Spec: ");
249 for (k = 0; k < info->len; k++)
250 otx2_npc_dbg("0x%.2x ",
251 ((const uint8_t *)info->spec)[k]);
253 otx2_npc_dbg("Int_info: ");
254 for (k = 0; k < info->len; k++)
255 otx2_npc_dbg("0x%.2x ", int_info[k]);
257 memcpy(pst->mcam_mask + x->key_off, int_info_mask, len);
258 memcpy(pst->mcam_data + x->key_off, int_info, len);
260 otx2_npc_dbg("Parse state mcam data & mask");
261 for (idx = 0; idx < len ; idx++)
262 otx2_npc_dbg("data[%d]: 0x%x, mask[%d]: 0x%x", idx,
263 *(pst->mcam_data + idx + x->key_off), idx,
264 *(pst->mcam_mask + idx + x->key_off));
269 otx2_flow_update_parse_state(struct otx2_parse_state *pst,
270 struct otx2_flow_item_info *info, int lid, int lt,
273 struct npc_lid_lt_xtract_info *xinfo;
274 struct otx2_flow_dump_data *dump;
275 struct npc_xtract_info *lfinfo;
279 otx2_npc_dbg("Parse state function info mask total %s",
280 (const uint8_t *)info->mask);
282 pst->layer_mask |= lid;
284 pst->flags[lid] = flags;
286 intf = pst->flow->nix_intf;
287 xinfo = &pst->npc->prx_dxcfg[intf][lid][lt];
288 otx2_npc_dbg("Is_terminating = %d", xinfo->is_terminating);
289 if (xinfo->is_terminating)
292 if (info->spec == NULL) {
293 otx2_npc_dbg("Info spec NULL");
297 for (i = 0; i < NPC_MAX_LD; i++) {
298 rc = flow_update_extraction_data(pst, info, &xinfo->xtract[i]);
303 for (i = 0; i < NPC_MAX_LD; i++) {
304 if (xinfo->xtract[i].flags_enable == 0)
307 lf_cfg = pst->npc->prx_lfcfg[i].i;
309 for (j = 0; j < NPC_MAX_LFL; j++) {
310 lfinfo = pst->npc->prx_fxcfg[intf]
312 rc = flow_update_extraction_data(pst, info,
317 if (lfinfo[0].enable)
324 dump = &pst->flow->dump_data[pst->flow->num_patterns++];
327 /* Next pattern to parse by subsequent layers */
333 flow_range_is_valid(const char *spec, const char *last, const char *mask,
336 /* Mask must be zero or equal to spec as we do not support
337 * non-contiguous ranges.
341 (spec[len] & mask[len]) != (last[len] & mask[len]))
342 return 0; /* False */
349 flow_mask_is_supported(const char *mask, const char *hw_mask, int len)
352 * If no hw_mask, assume nothing is supported.
356 return flow_mem_is_zero(mask, len);
359 if ((mask[len] | hw_mask[len]) != hw_mask[len])
360 return 0; /* False */
366 otx2_flow_parse_item_basic(const struct rte_flow_item *item,
367 struct otx2_flow_item_info *info,
368 struct rte_flow_error *error)
370 /* Item must not be NULL */
372 rte_flow_error_set(error, EINVAL,
373 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
377 /* If spec is NULL, both mask and last must be NULL, this
378 * makes it to match ANY value (eq to mask = 0).
379 * Setting either mask or last without spec is an error
381 if (item->spec == NULL) {
382 if (item->last == NULL && item->mask == NULL) {
386 rte_flow_error_set(error, EINVAL,
387 RTE_FLOW_ERROR_TYPE_ITEM, item,
388 "mask or last set without spec");
392 /* We have valid spec */
393 if (item->type != RTE_FLOW_ITEM_TYPE_RAW)
394 info->spec = item->spec;
396 /* If mask is not set, use default mask, err if default mask is
399 if (item->mask == NULL) {
400 otx2_npc_dbg("Item mask null, using default mask");
401 if (info->def_mask == NULL) {
402 rte_flow_error_set(error, EINVAL,
403 RTE_FLOW_ERROR_TYPE_ITEM, item,
404 "No mask or default mask given");
407 info->mask = info->def_mask;
409 if (item->type != RTE_FLOW_ITEM_TYPE_RAW)
410 info->mask = item->mask;
413 /* mask specified must be subset of hw supported mask
414 * mask | hw_mask == hw_mask
416 if (!flow_mask_is_supported(info->mask, info->hw_mask, info->len)) {
417 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
418 item, "Unsupported field in the mask");
422 /* Now we have spec and mask. OTX2 does not support non-contiguous
423 * range. We should have either:
424 * - spec & mask == last & mask or,
428 if (item->last != NULL && !flow_mem_is_zero(item->last, info->len)) {
429 if (!flow_range_is_valid(item->spec, item->last, info->mask,
431 rte_flow_error_set(error, EINVAL,
432 RTE_FLOW_ERROR_TYPE_ITEM, item,
433 "Unsupported range for match");
442 otx2_flow_keyx_compress(uint64_t *data, uint32_t nibble_mask)
444 uint64_t cdata[2] = {0ULL, 0ULL}, nibble;
447 for (i = 0; i < NPC_MAX_KEY_NIBBLES; i++) {
448 if (nibble_mask & (1 << i)) {
449 nibble = (data[i / 16] >> ((i & 0xf) * 4)) & 0xf;
450 cdata[j / 16] |= (nibble << ((j & 0xf) * 4));
460 flow_first_set_bit(uint64_t slab)
464 if ((slab & 0xffffffff) == 0) {
468 if ((slab & 0xffff) == 0) {
472 if ((slab & 0xff) == 0) {
476 if ((slab & 0xf) == 0) {
480 if ((slab & 0x3) == 0) {
484 if ((slab & 0x1) == 0)
491 flow_shift_lv_ent(struct otx2_mbox *mbox, struct rte_flow *flow,
492 struct otx2_npc_flow_info *flow_info,
493 uint32_t old_ent, uint32_t new_ent)
495 struct npc_mcam_shift_entry_req *req;
496 struct npc_mcam_shift_entry_rsp *rsp;
497 struct otx2_flow_list *list;
498 struct rte_flow *flow_iter;
501 otx2_npc_dbg("Old ent:%u new ent:%u priority:%u", old_ent, new_ent,
504 list = &flow_info->flow_list[flow->priority];
506 /* Old entry is disabled & it's contents are moved to new_entry,
507 * new entry is enabled finally.
509 req = otx2_mbox_alloc_msg_npc_mcam_shift_entry(mbox);
510 req->curr_entry[0] = old_ent;
511 req->new_entry[0] = new_ent;
512 req->shift_count = 1;
514 otx2_mbox_msg_send(mbox, 0);
515 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
519 /* Remove old node from list */
520 TAILQ_FOREACH(flow_iter, list, next) {
521 if (flow_iter->mcam_id == old_ent)
522 TAILQ_REMOVE(list, flow_iter, next);
525 /* Insert node with new mcam id at right place */
526 TAILQ_FOREACH(flow_iter, list, next) {
527 if (flow_iter->mcam_id > new_ent)
528 TAILQ_INSERT_BEFORE(flow_iter, flow, next);
533 /* Exchange all required entries with a given priority level */
535 flow_shift_ent(struct otx2_mbox *mbox, struct rte_flow *flow,
536 struct otx2_npc_flow_info *flow_info,
537 struct npc_mcam_alloc_entry_rsp *rsp, int dir, int prio_lvl)
539 struct rte_bitmap *fr_bmp, *fr_bmp_rev, *lv_bmp, *lv_bmp_rev, *bmp;
540 uint32_t e_fr = 0, e_lv = 0, e, e_id = 0, mcam_entries;
541 uint64_t fr_bit_pos = 0, lv_bit_pos = 0, bit_pos = 0;
542 /* Bit position within the slab */
543 uint32_t sl_fr_bit_off = 0, sl_lv_bit_off = 0;
544 /* Overall bit position of the start of slab */
545 /* free & live entry index */
546 int rc_fr = 0, rc_lv = 0, rc = 0, idx = 0;
547 struct otx2_mcam_ents_info *ent_info;
548 /* free & live bitmap slab */
549 uint64_t sl_fr = 0, sl_lv = 0, *sl;
551 fr_bmp = flow_info->free_entries[prio_lvl];
552 fr_bmp_rev = flow_info->free_entries_rev[prio_lvl];
553 lv_bmp = flow_info->live_entries[prio_lvl];
554 lv_bmp_rev = flow_info->live_entries_rev[prio_lvl];
555 ent_info = &flow_info->flow_entry_info[prio_lvl];
556 mcam_entries = flow_info->mcam_entries;
559 /* New entries allocated are always contiguous, but older entries
560 * already in free/live bitmap can be non-contiguous: so return
561 * shifted entries should be in non-contiguous format.
563 while (idx <= rsp->count) {
564 if (!sl_fr && !sl_lv) {
565 /* Lower index elements to be exchanged */
567 rc_fr = rte_bitmap_scan(fr_bmp, &e_fr, &sl_fr);
568 rc_lv = rte_bitmap_scan(lv_bmp, &e_lv, &sl_lv);
569 otx2_npc_dbg("Fwd slab rc fr %u rc lv %u "
570 "e_fr %u e_lv %u", rc_fr, rc_lv,
573 rc_fr = rte_bitmap_scan(fr_bmp_rev,
576 rc_lv = rte_bitmap_scan(lv_bmp_rev,
580 otx2_npc_dbg("Rev slab rc fr %u rc lv %u "
581 "e_fr %u e_lv %u", rc_fr, rc_lv,
587 fr_bit_pos = flow_first_set_bit(sl_fr);
588 e_fr = sl_fr_bit_off + fr_bit_pos;
589 otx2_npc_dbg("Fr_bit_pos 0x%" PRIx64, fr_bit_pos);
595 lv_bit_pos = flow_first_set_bit(sl_lv);
596 e_lv = sl_lv_bit_off + lv_bit_pos;
597 otx2_npc_dbg("Lv_bit_pos 0x%" PRIx64, lv_bit_pos);
602 /* First entry is from free_bmap */
607 bit_pos = fr_bit_pos;
609 e_id = mcam_entries - e - 1;
612 otx2_npc_dbg("Fr e %u e_id %u", e, e_id);
617 bit_pos = lv_bit_pos;
619 e_id = mcam_entries - e - 1;
623 otx2_npc_dbg("Lv e %u e_id %u", e, e_id);
624 if (idx < rsp->count)
626 flow_shift_lv_ent(mbox, flow,
631 rte_bitmap_clear(bmp, e);
632 rte_bitmap_set(bmp, rsp->entry + idx);
633 /* Update entry list, use non-contiguous
636 rsp->entry_list[idx] = e_id;
637 *sl &= ~(1 << bit_pos);
639 /* Update min & max entry identifiers in current
643 ent_info->max_id = rsp->entry + idx;
644 ent_info->min_id = e_id;
646 ent_info->max_id = e_id;
647 ent_info->min_id = rsp->entry;
655 /* Validate if newly allocated entries lie in the correct priority zone
656 * since NPC_MCAM_LOWER_PRIO & NPC_MCAM_HIGHER_PRIO don't ensure zone accuracy.
657 * If not properly aligned, shift entries to do so
660 flow_validate_and_shift_prio_ent(struct otx2_mbox *mbox, struct rte_flow *flow,
661 struct otx2_npc_flow_info *flow_info,
662 struct npc_mcam_alloc_entry_rsp *rsp,
665 int prio_idx = 0, rc = 0, needs_shift = 0, idx, prio = flow->priority;
666 struct otx2_mcam_ents_info *info = flow_info->flow_entry_info;
667 int dir = (req_prio == NPC_MCAM_HIGHER_PRIO) ? 1 : -1;
668 uint32_t tot_ent = 0;
670 otx2_npc_dbg("Dir %d, priority = %d", dir, prio);
673 prio_idx = flow_info->flow_max_priority - 1;
675 /* Only live entries needs to be shifted, free entries can just be
676 * moved by bits manipulation.
679 /* For dir = -1(NPC_MCAM_LOWER_PRIO), when shifting,
680 * NPC_MAX_PREALLOC_ENT are exchanged with adjoining higher priority
681 * level entries(lower indexes).
683 * For dir = +1(NPC_MCAM_HIGHER_PRIO), during shift,
684 * NPC_MAX_PREALLOC_ENT are exchanged with adjoining lower priority
685 * level entries(higher indexes) with highest indexes.
688 tot_ent = info[prio_idx].free_ent + info[prio_idx].live_ent;
690 if (dir < 0 && prio_idx != prio &&
691 rsp->entry > info[prio_idx].max_id && tot_ent) {
692 otx2_npc_dbg("Rsp entry %u prio idx %u "
693 "max id %u", rsp->entry, prio_idx,
694 info[prio_idx].max_id);
697 } else if ((dir > 0) && (prio_idx != prio) &&
698 (rsp->entry < info[prio_idx].min_id) && tot_ent) {
699 otx2_npc_dbg("Rsp entry %u prio idx %u "
700 "min id %u", rsp->entry, prio_idx,
701 info[prio_idx].min_id);
705 otx2_npc_dbg("Needs_shift = %d", needs_shift);
708 rc = flow_shift_ent(mbox, flow, flow_info, rsp, dir,
711 for (idx = 0; idx < rsp->count; idx++)
712 rsp->entry_list[idx] = rsp->entry + idx;
714 } while ((prio_idx != prio) && (prio_idx += dir));
720 flow_find_ref_entry(struct otx2_npc_flow_info *flow_info, int *prio,
723 struct otx2_mcam_ents_info *info = flow_info->flow_entry_info;
726 while (step < flow_info->flow_max_priority) {
727 if (((prio_lvl + step) < flow_info->flow_max_priority) &&
728 info[prio_lvl + step].live_ent) {
729 *prio = NPC_MCAM_HIGHER_PRIO;
730 return info[prio_lvl + step].min_id;
733 if (((prio_lvl - step) >= 0) &&
734 info[prio_lvl - step].live_ent) {
735 otx2_npc_dbg("Prio_lvl %u live %u", prio_lvl - step,
736 info[prio_lvl - step].live_ent);
737 *prio = NPC_MCAM_LOWER_PRIO;
738 return info[prio_lvl - step].max_id;
742 *prio = NPC_MCAM_ANY_PRIO;
747 flow_fill_entry_cache(struct otx2_mbox *mbox, struct rte_flow *flow,
748 struct otx2_npc_flow_info *flow_info, uint32_t *free_ent)
750 struct rte_bitmap *free_bmp, *free_bmp_rev, *live_bmp, *live_bmp_rev;
751 struct npc_mcam_alloc_entry_rsp rsp_local;
752 struct npc_mcam_alloc_entry_rsp *rsp_cmd;
753 struct npc_mcam_alloc_entry_req *req;
754 struct npc_mcam_alloc_entry_rsp *rsp;
755 struct otx2_mcam_ents_info *info;
756 uint16_t ref_ent, idx;
759 info = &flow_info->flow_entry_info[flow->priority];
760 free_bmp = flow_info->free_entries[flow->priority];
761 free_bmp_rev = flow_info->free_entries_rev[flow->priority];
762 live_bmp = flow_info->live_entries[flow->priority];
763 live_bmp_rev = flow_info->live_entries_rev[flow->priority];
765 ref_ent = flow_find_ref_entry(flow_info, &prio, flow->priority);
767 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(mbox);
769 req->count = flow_info->flow_prealloc_size;
770 req->priority = prio;
771 req->ref_entry = ref_ent;
773 otx2_npc_dbg("Fill cache ref entry %u prio %u", ref_ent, prio);
775 otx2_mbox_msg_send(mbox, 0);
776 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp_cmd);
781 memcpy(rsp, rsp_cmd, sizeof(*rsp));
783 otx2_npc_dbg("Alloc entry %u count %u , prio = %d", rsp->entry,
786 /* Non-first ent cache fill */
787 if (prio != NPC_MCAM_ANY_PRIO) {
788 flow_validate_and_shift_prio_ent(mbox, flow, flow_info, rsp,
791 /* Copy into response entry list */
792 for (idx = 0; idx < rsp->count; idx++)
793 rsp->entry_list[idx] = rsp->entry + idx;
796 otx2_npc_dbg("Fill entry cache rsp count %u", rsp->count);
797 /* Update free entries, reverse free entries list,
798 * min & max entry ids.
800 for (idx = 0; idx < rsp->count; idx++) {
801 if (unlikely(rsp->entry_list[idx] < info->min_id))
802 info->min_id = rsp->entry_list[idx];
804 if (unlikely(rsp->entry_list[idx] > info->max_id))
805 info->max_id = rsp->entry_list[idx];
807 /* Skip entry to be returned, not to be part of free
810 if (prio == NPC_MCAM_HIGHER_PRIO) {
811 if (unlikely(idx == (rsp->count - 1))) {
812 *free_ent = rsp->entry_list[idx];
816 if (unlikely(!idx)) {
817 *free_ent = rsp->entry_list[idx];
822 rte_bitmap_set(free_bmp, rsp->entry_list[idx]);
823 rte_bitmap_set(free_bmp_rev, flow_info->mcam_entries -
824 rsp->entry_list[idx] - 1);
826 otx2_npc_dbg("Final rsp entry %u rsp entry rev %u",
827 rsp->entry_list[idx],
828 flow_info->mcam_entries - rsp->entry_list[idx] - 1);
831 otx2_npc_dbg("Cache free entry %u, rev = %u", *free_ent,
832 flow_info->mcam_entries - *free_ent - 1);
834 rte_bitmap_set(live_bmp, *free_ent);
835 rte_bitmap_set(live_bmp_rev, flow_info->mcam_entries - *free_ent - 1);
841 flow_check_preallocated_entry_cache(struct otx2_mbox *mbox,
842 struct rte_flow *flow,
843 struct otx2_npc_flow_info *flow_info)
845 struct rte_bitmap *free, *free_rev, *live, *live_rev;
846 uint32_t pos = 0, free_ent = 0, mcam_entries;
847 struct otx2_mcam_ents_info *info;
851 otx2_npc_dbg("Flow priority %u", flow->priority);
853 info = &flow_info->flow_entry_info[flow->priority];
855 free_rev = flow_info->free_entries_rev[flow->priority];
856 free = flow_info->free_entries[flow->priority];
857 live_rev = flow_info->live_entries_rev[flow->priority];
858 live = flow_info->live_entries[flow->priority];
859 mcam_entries = flow_info->mcam_entries;
861 if (info->free_ent) {
862 rc = rte_bitmap_scan(free, &pos, &slab);
864 /* Get free_ent from free entry bitmap */
865 free_ent = pos + __builtin_ctzll(slab);
866 otx2_npc_dbg("Allocated from cache entry %u", free_ent);
867 /* Remove from free bitmaps and add to live ones */
868 rte_bitmap_clear(free, free_ent);
869 rte_bitmap_set(live, free_ent);
870 rte_bitmap_clear(free_rev,
871 mcam_entries - free_ent - 1);
872 rte_bitmap_set(live_rev,
873 mcam_entries - free_ent - 1);
880 otx2_npc_dbg("No free entry:its a mess");
884 rc = flow_fill_entry_cache(mbox, flow, flow_info, &free_ent);
892 otx2_flow_mcam_alloc_and_write(struct rte_flow *flow, struct otx2_mbox *mbox,
893 struct otx2_parse_state *pst,
894 struct otx2_npc_flow_info *flow_info)
896 int use_ctr = (flow->ctr_id == NPC_COUNTER_NONE ? 0 : 1);
897 struct npc_mcam_read_base_rule_rsp *base_rule_rsp;
898 struct npc_mcam_write_entry_req *req;
899 struct mcam_entry *base_entry;
900 struct mbox_msghdr *rsp;
906 rc = flow_mcam_alloc_counter(mbox, &ctr);
911 entry = flow_check_preallocated_entry_cache(mbox, flow, flow_info);
913 otx2_err("Prealloc failed");
914 otx2_flow_mcam_free_counter(mbox, ctr);
915 return NPC_MCAM_ALLOC_FAILED;
919 (void)otx2_mbox_alloc_msg_npc_read_base_steer_rule(mbox);
920 rc = otx2_mbox_process_msg(mbox, (void *)&base_rule_rsp);
922 otx2_err("Failed to fetch VF's base MCAM entry");
925 base_entry = &base_rule_rsp->entry_data;
926 for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
927 flow->mcam_data[idx] |= base_entry->kw[idx];
928 flow->mcam_mask[idx] |= base_entry->kw_mask[idx];
932 req = otx2_mbox_alloc_msg_npc_mcam_write_entry(mbox);
933 req->set_cntr = use_ctr;
936 otx2_npc_dbg("Alloc & write entry %u", entry);
939 (flow->nix_intf == OTX2_INTF_RX) ? NPC_MCAM_RX : NPC_MCAM_TX;
940 req->enable_entry = 1;
941 req->entry_data.action = flow->npc_action;
942 req->entry_data.vtag_action = flow->vtag_action;
944 for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
945 req->entry_data.kw[idx] = flow->mcam_data[idx];
946 req->entry_data.kw_mask[idx] = flow->mcam_mask[idx];
949 if (flow->nix_intf == OTX2_INTF_RX) {
950 req->entry_data.kw[0] |= flow_info->channel;
951 req->entry_data.kw_mask[0] |= (BIT_ULL(12) - 1);
953 uint16_t pf_func = (flow->npc_action >> 48) & 0xffff;
955 pf_func = htons(pf_func);
956 req->entry_data.kw[0] |= ((uint64_t)pf_func << 32);
957 req->entry_data.kw_mask[0] |= ((uint64_t)0xffff << 32);
960 otx2_mbox_msg_send(mbox, 0);
961 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
965 flow->mcam_id = entry;