1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_ethdev.h"
9 otx2_flow_free_all_resources(struct otx2_eth_dev *hw)
11 struct otx2_npc_flow_info *npc = &hw->npc_flow;
12 struct otx2_mbox *mbox = hw->mbox;
13 struct otx2_mcam_ents_info *info;
14 struct rte_bitmap *bmap;
15 struct rte_flow *flow;
19 for (idx = 0; idx < npc->flow_max_priority; idx++) {
20 info = &npc->flow_entry_info[idx];
21 entry_count += info->live_ent;
27 /* Free all MCAM entries allocated */
28 rc = otx2_flow_mcam_free_all_entries(mbox);
30 /* Free any MCAM counters and delete flow list */
31 for (idx = 0; idx < npc->flow_max_priority; idx++) {
32 while ((flow = TAILQ_FIRST(&npc->flow_list[idx])) != NULL) {
33 if (flow->ctr_id != NPC_COUNTER_NONE)
34 rc |= otx2_flow_mcam_free_counter(mbox,
37 TAILQ_REMOVE(&npc->flow_list[idx], flow, next);
39 bmap = npc->live_entries[flow->priority];
40 rte_bitmap_clear(bmap, flow->mcam_id);
42 info = &npc->flow_entry_info[idx];
51 flow_program_npc(struct otx2_parse_state *pst, struct otx2_mbox *mbox,
52 struct otx2_npc_flow_info *flow_info)
54 /* This is non-LDATA part in search key */
55 uint64_t key_data[2] = {0ULL, 0ULL};
56 uint64_t key_mask[2] = {0ULL, 0ULL};
57 int intf = pst->flow->nix_intf;
58 int key_len, bit = 0, index;
59 int off, idx, data_off = 0;
60 uint8_t lid, mask, data;
65 /* Skip till Layer A data start */
66 while (bit < NPC_PARSE_KEX_S_LA_OFFSET) {
67 if (flow_info->keyx_supp_nmask[intf] & (1 << bit))
72 /* Each bit represents 1 nibble */
76 for (lid = 0; lid < NPC_MAX_LID; lid++) {
78 off = NPC_PARSE_KEX_S_LID_OFFSET(lid);
79 lt = pst->lt[lid] & 0xf;
80 flags = pst->flags[lid] & 0xff;
83 layer_info = ((flow_info->keyx_supp_nmask[intf] >> off) & 0x7);
86 for (idx = 0; idx <= 2 ; idx++) {
87 if (layer_info & (1 << idx)) {
91 data = ((flags >> 4) & 0xf);
99 key_data[index] |= ((uint64_t)data <<
104 key_mask[index] |= ((uint64_t)mask <<
112 otx2_npc_dbg("Npc prog key data0: 0x%" PRIx64 ", data1: 0x%" PRIx64,
113 key_data[0], key_data[1]);
115 /* Copy this into mcam string */
116 key_len = (pst->npc->keyx_len[intf] + 7) / 8;
117 otx2_npc_dbg("Key_len = %d", key_len);
118 memcpy(pst->flow->mcam_data, key_data, key_len);
119 memcpy(pst->flow->mcam_mask, key_mask, key_len);
121 otx2_npc_dbg("Final flow data");
122 for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
123 otx2_npc_dbg("data[%d]: 0x%" PRIx64 ", mask[%d]: 0x%" PRIx64,
124 idx, pst->flow->mcam_data[idx],
125 idx, pst->flow->mcam_mask[idx]);
129 * Now we have mcam data and mask formatted as
130 * [Key_len/4 nibbles][0 or 1 nibble hole][data]
131 * hole is present if key_len is odd number of nibbles.
132 * mcam data must be split into 64 bits + 48 bits segments
133 * for each back W0, W1.
136 return otx2_flow_mcam_alloc_and_write(pst->flow, mbox, pst, flow_info);
140 flow_parse_attr(struct rte_eth_dev *eth_dev,
141 const struct rte_flow_attr *attr,
142 struct rte_flow_error *error,
143 struct rte_flow *flow)
145 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
146 const char *errmsg = NULL;
149 errmsg = "Attribute can't be empty";
150 else if (attr->group)
151 errmsg = "Groups are not supported";
152 else if (attr->priority >= dev->npc_flow.flow_max_priority)
153 errmsg = "Priority should be with in specified range";
154 else if ((!attr->egress && !attr->ingress) ||
155 (attr->egress && attr->ingress))
156 errmsg = "Exactly one of ingress or egress must be set";
158 if (errmsg != NULL) {
159 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
165 flow->nix_intf = OTX2_INTF_RX;
167 flow->nix_intf = OTX2_INTF_TX;
169 flow->priority = attr->priority;
174 flow_get_free_rss_grp(struct rte_bitmap *bmap,
175 uint32_t size, uint32_t *pos)
177 for (*pos = 0; *pos < size; ++*pos) {
178 if (!rte_bitmap_get(bmap, *pos))
182 return *pos < size ? 0 : -1;
186 flow_configure_rss_action(struct otx2_eth_dev *dev,
187 const struct rte_flow_action_rss *rss,
188 uint8_t *alg_idx, uint32_t *rss_grp,
191 struct otx2_npc_flow_info *flow_info = &dev->npc_flow;
192 uint16_t reta[NIX_RSS_RETA_SIZE_MAX];
193 uint32_t flowkey_cfg, grp_aval, i;
194 uint16_t *ind_tbl = NULL;
195 uint8_t flowkey_algx;
198 rc = flow_get_free_rss_grp(flow_info->rss_grp_entries,
199 flow_info->rss_grps, &grp_aval);
200 /* RSS group :0 is not usable for flow rss action */
201 if (rc < 0 || grp_aval == 0)
206 otx2_nix_rss_set_key(dev, (uint8_t *)(uintptr_t)rss->key,
209 /* If queue count passed in the rss action is less than
210 * HW configured reta size, replicate rss action reta
211 * across HW reta table.
213 if (dev->rss_info.rss_size > rss->queue_num) {
216 for (i = 0; i < (dev->rss_info.rss_size / rss->queue_num); i++)
217 memcpy(reta + i * rss->queue_num, rss->queue,
218 sizeof(uint16_t) * rss->queue_num);
220 i = dev->rss_info.rss_size % rss->queue_num;
222 memcpy(&reta[dev->rss_info.rss_size] - i,
223 rss->queue, i * sizeof(uint16_t));
225 ind_tbl = (uint16_t *)(uintptr_t)rss->queue;
228 rc = otx2_nix_rss_tbl_init(dev, *rss_grp, ind_tbl);
230 otx2_err("Failed to init rss table rc = %d", rc);
234 flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss->types, rss->level);
236 rc = otx2_rss_set_hf(dev, flowkey_cfg, &flowkey_algx,
237 *rss_grp, mcam_index);
239 otx2_err("Failed to set rss hash function rc = %d", rc);
243 *alg_idx = flowkey_algx;
245 rte_bitmap_set(flow_info->rss_grp_entries, *rss_grp);
252 flow_program_rss_action(struct rte_eth_dev *eth_dev,
253 const struct rte_flow_action actions[],
254 struct rte_flow *flow)
256 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
257 const struct rte_flow_action_rss *rss;
262 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
263 if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
264 rss = (const struct rte_flow_action_rss *)actions->conf;
266 rc = flow_configure_rss_action(dev,
267 rss, &alg_idx, &rss_grp,
273 ((uint64_t)(alg_idx & NIX_RSS_ACT_ALG_MASK) <<
274 NIX_RSS_ACT_ALG_OFFSET) |
275 ((uint64_t)(rss_grp & NIX_RSS_ACT_GRP_MASK) <<
276 NIX_RSS_ACT_GRP_OFFSET);
283 flow_free_rss_action(struct rte_eth_dev *eth_dev,
284 struct rte_flow *flow)
286 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
287 struct otx2_npc_flow_info *npc = &dev->npc_flow;
290 if (flow->npc_action & NIX_RX_ACTIONOP_RSS) {
291 rss_grp = (flow->npc_action >> NIX_RSS_ACT_GRP_OFFSET) &
292 NIX_RSS_ACT_GRP_MASK;
293 if (rss_grp == 0 || rss_grp >= npc->rss_grps)
296 rte_bitmap_clear(npc->rss_grp_entries, rss_grp);
304 flow_parse_meta_items(__rte_unused struct otx2_parse_state *pst)
306 otx2_npc_dbg("Meta Item");
311 * Parse function of each layer:
312 * - Consume one or more patterns that are relevant.
313 * - Update parse_state
314 * - Set parse_state.pattern = last item consumed
315 * - Set appropriate error code/message when returning error.
317 typedef int (*flow_parse_stage_func_t)(struct otx2_parse_state *pst);
320 flow_parse_pattern(struct rte_eth_dev *dev,
321 const struct rte_flow_item pattern[],
322 struct rte_flow_error *error,
323 struct rte_flow *flow,
324 struct otx2_parse_state *pst)
326 flow_parse_stage_func_t parse_stage_funcs[] = {
327 flow_parse_meta_items,
328 otx2_flow_parse_higig2_hdr,
338 struct otx2_eth_dev *hw = dev->data->dev_private;
343 if (pattern == NULL) {
344 rte_flow_error_set(error, EINVAL,
345 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
350 memset(pst, 0, sizeof(*pst));
351 pst->npc = &hw->npc_flow;
355 /* Use integral byte offset */
356 key_offset = pst->npc->keyx_len[flow->nix_intf];
357 key_offset = (key_offset + 7) / 8;
359 /* Location where LDATA would begin */
360 pst->mcam_data = (uint8_t *)flow->mcam_data;
361 pst->mcam_mask = (uint8_t *)flow->mcam_mask;
363 while (pattern->type != RTE_FLOW_ITEM_TYPE_END &&
364 layer < RTE_DIM(parse_stage_funcs)) {
365 otx2_npc_dbg("Pattern type = %d", pattern->type);
367 /* Skip place-holders */
368 pattern = otx2_flow_skip_void_and_any_items(pattern);
370 pst->pattern = pattern;
371 otx2_npc_dbg("Is tunnel = %d, layer = %d", pst->tunnel, layer);
372 rc = parse_stage_funcs[layer](pst);
379 * Parse stage function sets pst->pattern to
380 * 1 past the last item it consumed.
382 pattern = pst->pattern;
388 /* Skip trailing place-holders */
389 pattern = otx2_flow_skip_void_and_any_items(pattern);
391 /* Are there more items than what we can handle? */
392 if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
393 rte_flow_error_set(error, ENOTSUP,
394 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
395 "unsupported item in the sequence");
403 flow_parse_rule(struct rte_eth_dev *dev,
404 const struct rte_flow_attr *attr,
405 const struct rte_flow_item pattern[],
406 const struct rte_flow_action actions[],
407 struct rte_flow_error *error,
408 struct rte_flow *flow,
409 struct otx2_parse_state *pst)
413 /* Check attributes */
414 err = flow_parse_attr(dev, attr, error, flow);
419 err = otx2_flow_parse_actions(dev, attr, actions, error, flow);
424 err = flow_parse_pattern(dev, pattern, error, flow, pst);
428 /* Check for overlaps? */
433 otx2_flow_validate(struct rte_eth_dev *dev,
434 const struct rte_flow_attr *attr,
435 const struct rte_flow_item pattern[],
436 const struct rte_flow_action actions[],
437 struct rte_flow_error *error)
439 struct otx2_parse_state parse_state;
440 struct rte_flow flow;
442 memset(&flow, 0, sizeof(flow));
443 return flow_parse_rule(dev, attr, pattern, actions, error, &flow,
447 static struct rte_flow *
448 otx2_flow_create(struct rte_eth_dev *dev,
449 const struct rte_flow_attr *attr,
450 const struct rte_flow_item pattern[],
451 const struct rte_flow_action actions[],
452 struct rte_flow_error *error)
454 struct otx2_eth_dev *hw = dev->data->dev_private;
455 struct otx2_parse_state parse_state;
456 struct otx2_mbox *mbox = hw->mbox;
457 struct rte_flow *flow, *flow_iter;
458 struct otx2_flow_list *list;
461 flow = rte_zmalloc("otx2_rte_flow", sizeof(*flow), 0);
463 rte_flow_error_set(error, ENOMEM,
464 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
466 "Memory allocation failed");
469 memset(flow, 0, sizeof(*flow));
471 rc = flow_parse_rule(dev, attr, pattern, actions, error, flow,
476 rc = flow_program_npc(&parse_state, mbox, &hw->npc_flow);
478 rte_flow_error_set(error, EIO,
479 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
481 "Failed to insert filter");
485 rc = flow_program_rss_action(dev, actions, flow);
487 rte_flow_error_set(error, EIO,
488 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
490 "Failed to program rss action");
495 list = &hw->npc_flow.flow_list[flow->priority];
496 /* List in ascending order of mcam entries */
497 TAILQ_FOREACH(flow_iter, list, next) {
498 if (flow_iter->mcam_id > flow->mcam_id) {
499 TAILQ_INSERT_BEFORE(flow_iter, flow, next);
504 TAILQ_INSERT_TAIL(list, flow, next);
513 otx2_flow_destroy(struct rte_eth_dev *dev,
514 struct rte_flow *flow,
515 struct rte_flow_error *error)
517 struct otx2_eth_dev *hw = dev->data->dev_private;
518 struct otx2_npc_flow_info *npc = &hw->npc_flow;
519 struct otx2_mbox *mbox = hw->mbox;
520 struct rte_bitmap *bmap;
524 match_id = (flow->npc_action >> NIX_RX_ACT_MATCH_OFFSET) &
525 NIX_RX_ACT_MATCH_MASK;
527 if (match_id && match_id < OTX2_FLOW_ACTION_FLAG_DEFAULT) {
528 if (rte_atomic32_read(&npc->mark_actions) == 0)
531 /* Clear mark offload flag if there are no more mark actions */
532 if (rte_atomic32_sub_return(&npc->mark_actions, 1) == 0) {
533 hw->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
534 otx2_eth_set_rx_function(dev);
538 rc = flow_free_rss_action(dev, flow);
540 rte_flow_error_set(error, EIO,
541 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
543 "Failed to free rss action");
546 rc = otx2_flow_mcam_free_entry(mbox, flow->mcam_id);
548 rte_flow_error_set(error, EIO,
549 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
551 "Failed to destroy filter");
554 TAILQ_REMOVE(&npc->flow_list[flow->priority], flow, next);
556 bmap = npc->live_entries[flow->priority];
557 rte_bitmap_clear(bmap, flow->mcam_id);
564 otx2_flow_flush(struct rte_eth_dev *dev,
565 struct rte_flow_error *error)
567 struct otx2_eth_dev *hw = dev->data->dev_private;
570 rc = otx2_flow_free_all_resources(hw);
572 otx2_err("Error when deleting NPC MCAM entries "
574 rte_flow_error_set(error, EIO,
575 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
577 "Failed to flush filter");
585 otx2_flow_isolate(struct rte_eth_dev *dev __rte_unused,
586 int enable __rte_unused,
587 struct rte_flow_error *error)
590 * If we support, we need to un-install the default mcam
591 * entry for this port.
594 rte_flow_error_set(error, ENOTSUP,
595 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
597 "Flow isolation not supported");
603 otx2_flow_query(struct rte_eth_dev *dev,
604 struct rte_flow *flow,
605 const struct rte_flow_action *action,
607 struct rte_flow_error *error)
609 struct otx2_eth_dev *hw = dev->data->dev_private;
610 struct rte_flow_query_count *query = data;
611 struct otx2_mbox *mbox = hw->mbox;
612 const char *errmsg = NULL;
613 int errcode = ENOTSUP;
616 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) {
617 errmsg = "Only COUNT is supported in query";
621 if (flow->ctr_id == NPC_COUNTER_NONE) {
622 errmsg = "Counter is not available";
626 rc = otx2_flow_mcam_read_counter(mbox, flow->ctr_id, &query->hits);
629 errmsg = "Error reading flow counter";
633 query->bytes_set = 0;
636 rc = otx2_flow_mcam_clear_counter(mbox, flow->ctr_id);
639 errmsg = "Error clearing flow counter";
646 rte_flow_error_set(error, errcode,
647 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
653 const struct rte_flow_ops otx2_flow_ops = {
654 .validate = otx2_flow_validate,
655 .create = otx2_flow_create,
656 .destroy = otx2_flow_destroy,
657 .flush = otx2_flow_flush,
658 .query = otx2_flow_query,
659 .isolate = otx2_flow_isolate,
663 flow_supp_key_len(uint32_t supp_mask)
668 supp_mask &= (supp_mask - 1);
670 return nib_count * 4;
673 /* Refer HRM register:
674 * NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG
676 * NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG
678 #define BYTESM1_SHIFT 16
679 #define HDR_OFF_SHIFT 8
681 flow_update_kex_info(struct npc_xtract_info *xtract_info,
684 xtract_info->len = ((val >> BYTESM1_SHIFT) & 0xf) + 1;
685 xtract_info->hdr_off = (val >> HDR_OFF_SHIFT) & 0xff;
686 xtract_info->key_off = val & 0x3f;
687 xtract_info->enable = ((val >> 7) & 0x1);
688 xtract_info->flags_enable = ((val >> 6) & 0x1);
692 flow_process_mkex_cfg(struct otx2_npc_flow_info *npc,
693 struct npc_get_kex_cfg_rsp *kex_rsp)
695 volatile uint64_t (*q)[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT]
697 struct npc_xtract_info *x_info = NULL;
698 int lid, lt, ld, fl, ix;
703 npc->keyx_supp_nmask[NPC_MCAM_RX] =
704 kex_rsp->rx_keyx_cfg & 0x7fffffffULL;
705 npc->keyx_supp_nmask[NPC_MCAM_TX] =
706 kex_rsp->tx_keyx_cfg & 0x7fffffffULL;
707 npc->keyx_len[NPC_MCAM_RX] =
708 flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_RX]);
709 npc->keyx_len[NPC_MCAM_TX] =
710 flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_TX]);
712 keyw = (kex_rsp->rx_keyx_cfg >> 32) & 0x7ULL;
713 npc->keyw[NPC_MCAM_RX] = keyw;
714 keyw = (kex_rsp->tx_keyx_cfg >> 32) & 0x7ULL;
715 npc->keyw[NPC_MCAM_TX] = keyw;
717 /* Update KEX_LD_FLAG */
718 for (ix = 0; ix < NPC_MAX_INTF; ix++) {
719 for (ld = 0; ld < NPC_MAX_LD; ld++) {
720 for (fl = 0; fl < NPC_MAX_LFL; fl++) {
722 &npc->prx_fxcfg[ix][ld][fl].xtract[0];
723 val = kex_rsp->intf_ld_flags[ix][ld][fl];
724 flow_update_kex_info(x_info, val);
729 /* Update LID, LT and LDATA cfg */
731 q = (volatile uint64_t (*)[][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD])
732 (&kex_rsp->intf_lid_lt_ld);
733 for (ix = 0; ix < NPC_MAX_INTF; ix++) {
734 for (lid = 0; lid < NPC_MAX_LID; lid++) {
735 for (lt = 0; lt < NPC_MAX_LT; lt++) {
736 for (ld = 0; ld < NPC_MAX_LD; ld++) {
737 x_info = &(*p)[ix][lid][lt].xtract[ld];
738 val = (*q)[ix][lid][lt][ld];
739 flow_update_kex_info(x_info, val);
744 /* Update LDATA Flags cfg */
745 npc->prx_lfcfg[0].i = kex_rsp->kex_ld_flags[0];
746 npc->prx_lfcfg[1].i = kex_rsp->kex_ld_flags[1];
749 static struct otx2_idev_kex_cfg *
750 flow_intra_dev_kex_cfg(void)
752 static const char name[] = "octeontx2_intra_device_kex_conf";
753 struct otx2_idev_kex_cfg *idev;
754 const struct rte_memzone *mz;
756 mz = rte_memzone_lookup(name);
760 /* Request for the first time */
761 mz = rte_memzone_reserve_aligned(name, sizeof(struct otx2_idev_kex_cfg),
762 SOCKET_ID_ANY, 0, OTX2_ALIGN);
765 rte_atomic16_set(&idev->kex_refcnt, 0);
772 flow_fetch_kex_cfg(struct otx2_eth_dev *dev)
774 struct otx2_npc_flow_info *npc = &dev->npc_flow;
775 struct npc_get_kex_cfg_rsp *kex_rsp;
776 struct otx2_mbox *mbox = dev->mbox;
777 char mkex_pfl_name[MKEX_NAME_LEN];
778 struct otx2_idev_kex_cfg *idev;
781 idev = flow_intra_dev_kex_cfg();
785 /* Is kex_cfg read by any another driver? */
786 if (rte_atomic16_add_return(&idev->kex_refcnt, 1) == 1) {
787 /* Call mailbox to get key & data size */
788 (void)otx2_mbox_alloc_msg_npc_get_kex_cfg(mbox);
789 otx2_mbox_msg_send(mbox, 0);
790 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&kex_rsp);
792 otx2_err("Failed to fetch NPC keyx config");
795 memcpy(&idev->kex_cfg, kex_rsp,
796 sizeof(struct npc_get_kex_cfg_rsp));
799 otx2_mbox_memcpy(mkex_pfl_name,
800 idev->kex_cfg.mkex_pfl_name, MKEX_NAME_LEN);
802 strlcpy((char *)dev->mkex_pfl_name,
803 mkex_pfl_name, sizeof(dev->mkex_pfl_name));
805 flow_process_mkex_cfg(npc, &idev->kex_cfg);
812 otx2_flow_init(struct otx2_eth_dev *hw)
814 uint8_t *mem = NULL, *nix_mem = NULL, *npc_mem = NULL;
815 struct otx2_npc_flow_info *npc = &hw->npc_flow;
819 rc = flow_fetch_kex_cfg(hw);
821 otx2_err("Failed to fetch NPC keyx config from idev");
825 rte_atomic32_init(&npc->mark_actions);
827 npc->mcam_entries = NPC_MCAM_TOT_ENTRIES >> npc->keyw[NPC_MCAM_RX];
828 /* Free, free_rev, live and live_rev entries */
829 bmap_sz = rte_bitmap_get_memory_footprint(npc->mcam_entries);
830 mem = rte_zmalloc(NULL, 4 * bmap_sz * npc->flow_max_priority,
831 RTE_CACHE_LINE_SIZE);
833 otx2_err("Bmap alloc failed");
838 npc->flow_entry_info = rte_zmalloc(NULL, npc->flow_max_priority
839 * sizeof(struct otx2_mcam_ents_info),
841 if (npc->flow_entry_info == NULL) {
842 otx2_err("flow_entry_info alloc failed");
847 npc->free_entries = rte_zmalloc(NULL, npc->flow_max_priority
848 * sizeof(struct rte_bitmap *),
850 if (npc->free_entries == NULL) {
851 otx2_err("free_entries alloc failed");
856 npc->free_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
857 * sizeof(struct rte_bitmap *),
859 if (npc->free_entries_rev == NULL) {
860 otx2_err("free_entries_rev alloc failed");
865 npc->live_entries = rte_zmalloc(NULL, npc->flow_max_priority
866 * sizeof(struct rte_bitmap *),
868 if (npc->live_entries == NULL) {
869 otx2_err("live_entries alloc failed");
874 npc->live_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
875 * sizeof(struct rte_bitmap *),
877 if (npc->live_entries_rev == NULL) {
878 otx2_err("live_entries_rev alloc failed");
883 npc->flow_list = rte_zmalloc(NULL, npc->flow_max_priority
884 * sizeof(struct otx2_flow_list),
886 if (npc->flow_list == NULL) {
887 otx2_err("flow_list alloc failed");
893 for (idx = 0; idx < npc->flow_max_priority; idx++) {
894 TAILQ_INIT(&npc->flow_list[idx]);
896 npc->free_entries[idx] =
897 rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
900 npc->free_entries_rev[idx] =
901 rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
904 npc->live_entries[idx] =
905 rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
908 npc->live_entries_rev[idx] =
909 rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
912 npc->flow_entry_info[idx].free_ent = 0;
913 npc->flow_entry_info[idx].live_ent = 0;
914 npc->flow_entry_info[idx].max_id = 0;
915 npc->flow_entry_info[idx].min_id = ~(0);
918 npc->rss_grps = NIX_RSS_GRPS;
920 bmap_sz = rte_bitmap_get_memory_footprint(npc->rss_grps);
921 nix_mem = rte_zmalloc(NULL, bmap_sz, RTE_CACHE_LINE_SIZE);
922 if (nix_mem == NULL) {
923 otx2_err("Bmap alloc failed");
928 npc->rss_grp_entries = rte_bitmap_init(npc->rss_grps, nix_mem, bmap_sz);
930 /* Group 0 will be used for RSS,
931 * 1 -7 will be used for rte_flow RSS action
933 rte_bitmap_set(npc->rss_grp_entries, 0);
939 rte_free(npc->flow_list);
940 if (npc->live_entries_rev)
941 rte_free(npc->live_entries_rev);
942 if (npc->live_entries)
943 rte_free(npc->live_entries);
944 if (npc->free_entries_rev)
945 rte_free(npc->free_entries_rev);
946 if (npc->free_entries)
947 rte_free(npc->free_entries);
948 if (npc->flow_entry_info)
949 rte_free(npc->flow_entry_info);
956 otx2_flow_fini(struct otx2_eth_dev *hw)
958 struct otx2_npc_flow_info *npc = &hw->npc_flow;
961 rc = otx2_flow_free_all_resources(hw);
963 otx2_err("Error when deleting NPC MCAM entries, counters");
968 rte_free(npc->flow_list);
969 if (npc->live_entries_rev)
970 rte_free(npc->live_entries_rev);
971 if (npc->live_entries)
972 rte_free(npc->live_entries);
973 if (npc->free_entries_rev)
974 rte_free(npc->free_entries_rev);
975 if (npc->free_entries)
976 rte_free(npc->free_entries);
977 if (npc->flow_entry_info)
978 rte_free(npc->flow_entry_info);