1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_ethdev.h"
6 #include "otx2_ethdev_sec.h"
9 enum flow_vtag_cfg_dir { VTAG_TX, VTAG_RX };
12 otx2_flow_free_all_resources(struct otx2_eth_dev *hw)
14 struct otx2_npc_flow_info *npc = &hw->npc_flow;
15 struct otx2_mbox *mbox = hw->mbox;
16 struct otx2_mcam_ents_info *info;
17 struct rte_bitmap *bmap;
18 struct rte_flow *flow;
22 for (idx = 0; idx < npc->flow_max_priority; idx++) {
23 info = &npc->flow_entry_info[idx];
24 entry_count += info->live_ent;
30 /* Free all MCAM entries allocated */
31 rc = otx2_flow_mcam_free_all_entries(mbox);
33 /* Free any MCAM counters and delete flow list */
34 for (idx = 0; idx < npc->flow_max_priority; idx++) {
35 while ((flow = TAILQ_FIRST(&npc->flow_list[idx])) != NULL) {
36 if (flow->ctr_id != NPC_COUNTER_NONE)
37 rc |= otx2_flow_mcam_free_counter(mbox,
40 TAILQ_REMOVE(&npc->flow_list[idx], flow, next);
42 bmap = npc->live_entries[flow->priority];
43 rte_bitmap_clear(bmap, flow->mcam_id);
45 info = &npc->flow_entry_info[idx];
54 flow_program_npc(struct otx2_parse_state *pst, struct otx2_mbox *mbox,
55 struct otx2_npc_flow_info *flow_info)
57 /* This is non-LDATA part in search key */
58 uint64_t key_data[2] = {0ULL, 0ULL};
59 uint64_t key_mask[2] = {0ULL, 0ULL};
60 int intf = pst->flow->nix_intf;
61 int key_len, bit = 0, index;
62 int off, idx, data_off = 0;
63 uint8_t lid, mask, data;
68 /* Skip till Layer A data start */
69 while (bit < NPC_PARSE_KEX_S_LA_OFFSET) {
70 if (flow_info->keyx_supp_nmask[intf] & (1 << bit))
75 /* Each bit represents 1 nibble */
79 for (lid = 0; lid < NPC_MAX_LID; lid++) {
81 off = NPC_PARSE_KEX_S_LID_OFFSET(lid);
82 lt = pst->lt[lid] & 0xf;
83 flags = pst->flags[lid] & 0xff;
86 layer_info = ((flow_info->keyx_supp_nmask[intf] >> off) & 0x7);
89 for (idx = 0; idx <= 2 ; idx++) {
90 if (layer_info & (1 << idx)) {
94 data = ((flags >> 4) & 0xf);
102 key_data[index] |= ((uint64_t)data <<
107 key_mask[index] |= ((uint64_t)mask <<
115 otx2_npc_dbg("Npc prog key data0: 0x%" PRIx64 ", data1: 0x%" PRIx64,
116 key_data[0], key_data[1]);
118 /* Copy this into mcam string */
119 key_len = (pst->npc->keyx_len[intf] + 7) / 8;
120 otx2_npc_dbg("Key_len = %d", key_len);
121 memcpy(pst->flow->mcam_data, key_data, key_len);
122 memcpy(pst->flow->mcam_mask, key_mask, key_len);
124 otx2_npc_dbg("Final flow data");
125 for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
126 otx2_npc_dbg("data[%d]: 0x%" PRIx64 ", mask[%d]: 0x%" PRIx64,
127 idx, pst->flow->mcam_data[idx],
128 idx, pst->flow->mcam_mask[idx]);
132 * Now we have mcam data and mask formatted as
133 * [Key_len/4 nibbles][0 or 1 nibble hole][data]
134 * hole is present if key_len is odd number of nibbles.
135 * mcam data must be split into 64 bits + 48 bits segments
136 * for each back W0, W1.
139 return otx2_flow_mcam_alloc_and_write(pst->flow, mbox, pst, flow_info);
143 flow_parse_attr(struct rte_eth_dev *eth_dev,
144 const struct rte_flow_attr *attr,
145 struct rte_flow_error *error,
146 struct rte_flow *flow)
148 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
149 const char *errmsg = NULL;
152 errmsg = "Attribute can't be empty";
153 else if (attr->group)
154 errmsg = "Groups are not supported";
155 else if (attr->priority >= dev->npc_flow.flow_max_priority)
156 errmsg = "Priority should be with in specified range";
157 else if ((!attr->egress && !attr->ingress) ||
158 (attr->egress && attr->ingress))
159 errmsg = "Exactly one of ingress or egress must be set";
161 if (errmsg != NULL) {
162 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
168 flow->nix_intf = OTX2_INTF_RX;
170 flow->nix_intf = OTX2_INTF_TX;
172 flow->priority = attr->priority;
177 flow_get_free_rss_grp(struct rte_bitmap *bmap,
178 uint32_t size, uint32_t *pos)
180 for (*pos = 0; *pos < size; ++*pos) {
181 if (!rte_bitmap_get(bmap, *pos))
185 return *pos < size ? 0 : -1;
189 flow_configure_rss_action(struct otx2_eth_dev *dev,
190 const struct rte_flow_action_rss *rss,
191 uint8_t *alg_idx, uint32_t *rss_grp,
194 struct otx2_npc_flow_info *flow_info = &dev->npc_flow;
195 uint16_t reta[NIX_RSS_RETA_SIZE_MAX];
196 uint32_t flowkey_cfg, grp_aval, i;
197 uint16_t *ind_tbl = NULL;
198 uint8_t flowkey_algx;
201 rc = flow_get_free_rss_grp(flow_info->rss_grp_entries,
202 flow_info->rss_grps, &grp_aval);
203 /* RSS group :0 is not usable for flow rss action */
204 if (rc < 0 || grp_aval == 0)
209 otx2_nix_rss_set_key(dev, (uint8_t *)(uintptr_t)rss->key,
212 /* If queue count passed in the rss action is less than
213 * HW configured reta size, replicate rss action reta
214 * across HW reta table.
216 if (dev->rss_info.rss_size > rss->queue_num) {
219 for (i = 0; i < (dev->rss_info.rss_size / rss->queue_num); i++)
220 memcpy(reta + i * rss->queue_num, rss->queue,
221 sizeof(uint16_t) * rss->queue_num);
223 i = dev->rss_info.rss_size % rss->queue_num;
225 memcpy(&reta[dev->rss_info.rss_size] - i,
226 rss->queue, i * sizeof(uint16_t));
228 ind_tbl = (uint16_t *)(uintptr_t)rss->queue;
231 rc = otx2_nix_rss_tbl_init(dev, *rss_grp, ind_tbl);
233 otx2_err("Failed to init rss table rc = %d", rc);
237 flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss->types, rss->level);
239 rc = otx2_rss_set_hf(dev, flowkey_cfg, &flowkey_algx,
240 *rss_grp, mcam_index);
242 otx2_err("Failed to set rss hash function rc = %d", rc);
246 *alg_idx = flowkey_algx;
248 rte_bitmap_set(flow_info->rss_grp_entries, *rss_grp);
255 flow_program_rss_action(struct rte_eth_dev *eth_dev,
256 const struct rte_flow_action actions[],
257 struct rte_flow *flow)
259 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
260 const struct rte_flow_action_rss *rss;
265 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
266 if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
267 rss = (const struct rte_flow_action_rss *)actions->conf;
269 rc = flow_configure_rss_action(dev,
270 rss, &alg_idx, &rss_grp,
275 flow->npc_action &= (~(0xfULL));
276 flow->npc_action |= NIX_RX_ACTIONOP_RSS;
278 ((uint64_t)(alg_idx & NIX_RSS_ACT_ALG_MASK) <<
279 NIX_RSS_ACT_ALG_OFFSET) |
280 ((uint64_t)(rss_grp & NIX_RSS_ACT_GRP_MASK) <<
281 NIX_RSS_ACT_GRP_OFFSET);
288 flow_free_rss_action(struct rte_eth_dev *eth_dev,
289 struct rte_flow *flow)
291 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
292 struct otx2_npc_flow_info *npc = &dev->npc_flow;
295 if (flow->npc_action & NIX_RX_ACTIONOP_RSS) {
296 rss_grp = (flow->npc_action >> NIX_RSS_ACT_GRP_OFFSET) &
297 NIX_RSS_ACT_GRP_MASK;
298 if (rss_grp == 0 || rss_grp >= npc->rss_grps)
301 rte_bitmap_clear(npc->rss_grp_entries, rss_grp);
308 flow_update_sec_tt(struct rte_eth_dev *eth_dev,
309 const struct rte_flow_action actions[])
313 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
314 if (actions->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
315 rc = otx2_eth_sec_update_tag_type(eth_dev);
324 flow_parse_meta_items(__rte_unused struct otx2_parse_state *pst)
326 otx2_npc_dbg("Meta Item");
331 * Parse function of each layer:
332 * - Consume one or more patterns that are relevant.
333 * - Update parse_state
334 * - Set parse_state.pattern = last item consumed
335 * - Set appropriate error code/message when returning error.
337 typedef int (*flow_parse_stage_func_t)(struct otx2_parse_state *pst);
340 flow_parse_pattern(struct rte_eth_dev *dev,
341 const struct rte_flow_item pattern[],
342 struct rte_flow_error *error,
343 struct rte_flow *flow,
344 struct otx2_parse_state *pst)
346 flow_parse_stage_func_t parse_stage_funcs[] = {
347 flow_parse_meta_items,
348 otx2_flow_parse_higig2_hdr,
358 struct otx2_eth_dev *hw = dev->data->dev_private;
363 if (pattern == NULL) {
364 rte_flow_error_set(error, EINVAL,
365 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
370 memset(pst, 0, sizeof(*pst));
371 pst->npc = &hw->npc_flow;
375 /* Use integral byte offset */
376 key_offset = pst->npc->keyx_len[flow->nix_intf];
377 key_offset = (key_offset + 7) / 8;
379 /* Location where LDATA would begin */
380 pst->mcam_data = (uint8_t *)flow->mcam_data;
381 pst->mcam_mask = (uint8_t *)flow->mcam_mask;
383 while (pattern->type != RTE_FLOW_ITEM_TYPE_END &&
384 layer < RTE_DIM(parse_stage_funcs)) {
385 otx2_npc_dbg("Pattern type = %d", pattern->type);
387 /* Skip place-holders */
388 pattern = otx2_flow_skip_void_and_any_items(pattern);
390 pst->pattern = pattern;
391 otx2_npc_dbg("Is tunnel = %d, layer = %d", pst->tunnel, layer);
392 rc = parse_stage_funcs[layer](pst);
399 * Parse stage function sets pst->pattern to
400 * 1 past the last item it consumed.
402 pattern = pst->pattern;
408 /* Skip trailing place-holders */
409 pattern = otx2_flow_skip_void_and_any_items(pattern);
411 /* Are there more items than what we can handle? */
412 if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
413 rte_flow_error_set(error, ENOTSUP,
414 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
415 "unsupported item in the sequence");
423 flow_parse_rule(struct rte_eth_dev *dev,
424 const struct rte_flow_attr *attr,
425 const struct rte_flow_item pattern[],
426 const struct rte_flow_action actions[],
427 struct rte_flow_error *error,
428 struct rte_flow *flow,
429 struct otx2_parse_state *pst)
433 /* Check attributes */
434 err = flow_parse_attr(dev, attr, error, flow);
439 err = otx2_flow_parse_actions(dev, attr, actions, error, flow);
444 err = flow_parse_pattern(dev, pattern, error, flow, pst);
448 /* Check for overlaps? */
453 otx2_flow_validate(struct rte_eth_dev *dev,
454 const struct rte_flow_attr *attr,
455 const struct rte_flow_item pattern[],
456 const struct rte_flow_action actions[],
457 struct rte_flow_error *error)
459 struct otx2_parse_state parse_state;
460 struct rte_flow flow;
462 memset(&flow, 0, sizeof(flow));
463 return flow_parse_rule(dev, attr, pattern, actions, error, &flow,
468 flow_program_vtag_action(struct rte_eth_dev *eth_dev,
469 const struct rte_flow_action actions[],
470 struct rte_flow *flow)
472 uint16_t vlan_id = 0, vlan_ethtype = RTE_ETHER_TYPE_VLAN;
473 struct otx2_eth_dev *dev = eth_dev->data->dev_private;
476 struct nix_tx_vtag_action_s act;
478 struct otx2_mbox *mbox = dev->mbox;
479 struct nix_vtag_config *vtag_cfg;
480 struct nix_vtag_config_rsp *rsp;
481 bool vlan_insert_action = false;
482 uint64_t rx_vtag_action = 0;
483 uint8_t vlan_pcp = 0;
486 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
487 if (actions->type == RTE_FLOW_ACTION_TYPE_OF_POP_VLAN) {
488 if (dev->npc_flow.vtag_actions == 1) {
490 otx2_mbox_alloc_msg_nix_vtag_cfg(mbox);
491 vtag_cfg->cfg_type = VTAG_RX;
492 vtag_cfg->rx.strip_vtag = 1;
494 vtag_cfg->rx.capture_vtag = 1;
495 vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
496 vtag_cfg->rx.vtag_type = 0;
498 rc = otx2_mbox_process(mbox);
503 rx_vtag_action |= (NIX_RX_VTAGACTION_VTAG_VALID << 15);
504 rx_vtag_action |= (NPC_LID_LB << 8);
505 rx_vtag_action |= NIX_RX_VTAGACTION_VTAG0_RELPTR;
506 flow->vtag_action = rx_vtag_action;
507 } else if (actions->type ==
508 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
509 const struct rte_flow_action_of_set_vlan_vid *vtag =
510 (const struct rte_flow_action_of_set_vlan_vid *)
512 vlan_id = rte_be_to_cpu_16(vtag->vlan_vid);
513 if (vlan_id > 0xfff) {
514 otx2_err("Invalid vlan_id for set vlan action");
517 vlan_insert_action = true;
518 } else if (actions->type == RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) {
519 const struct rte_flow_action_of_push_vlan *ethtype =
520 (const struct rte_flow_action_of_push_vlan *)
522 vlan_ethtype = rte_be_to_cpu_16(ethtype->ethertype);
523 if (vlan_ethtype != RTE_ETHER_TYPE_VLAN &&
524 vlan_ethtype != RTE_ETHER_TYPE_QINQ) {
525 otx2_err("Invalid ethtype specified for push"
529 vlan_insert_action = true;
530 } else if (actions->type ==
531 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
532 const struct rte_flow_action_of_set_vlan_pcp *pcp =
533 (const struct rte_flow_action_of_set_vlan_pcp *)
535 vlan_pcp = pcp->vlan_pcp;
536 if (vlan_pcp > 0x7) {
537 otx2_err("Invalid PCP value for pcp action");
540 vlan_insert_action = true;
544 if (vlan_insert_action) {
545 vtag_cfg = otx2_mbox_alloc_msg_nix_vtag_cfg(mbox);
546 vtag_cfg->cfg_type = VTAG_TX;
547 vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
549 ((vlan_ethtype << 16) | (vlan_pcp << 13) | vlan_id);
550 vtag_cfg->tx.cfg_vtag0 = 1;
551 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
555 tx_vtag_action.reg = 0;
556 tx_vtag_action.act.vtag0_def = rsp->vtag0_idx;
557 if (tx_vtag_action.act.vtag0_def < 0) {
558 otx2_err("Failed to config TX VTAG action");
561 tx_vtag_action.act.vtag0_lid = NPC_LID_LA;
562 tx_vtag_action.act.vtag0_op = NIX_TX_VTAGOP_INSERT;
563 tx_vtag_action.act.vtag0_relptr =
564 NIX_TX_VTAGACTION_VTAG0_RELPTR;
565 flow->vtag_action = tx_vtag_action.reg;
570 static struct rte_flow *
571 otx2_flow_create(struct rte_eth_dev *dev,
572 const struct rte_flow_attr *attr,
573 const struct rte_flow_item pattern[],
574 const struct rte_flow_action actions[],
575 struct rte_flow_error *error)
577 struct otx2_eth_dev *hw = dev->data->dev_private;
578 struct otx2_parse_state parse_state;
579 struct otx2_mbox *mbox = hw->mbox;
580 struct rte_flow *flow, *flow_iter;
581 struct otx2_flow_list *list;
584 flow = rte_zmalloc("otx2_rte_flow", sizeof(*flow), 0);
586 rte_flow_error_set(error, ENOMEM,
587 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
589 "Memory allocation failed");
592 memset(flow, 0, sizeof(*flow));
594 rc = flow_parse_rule(dev, attr, pattern, actions, error, flow,
599 rc = flow_program_vtag_action(dev, actions, flow);
601 rte_flow_error_set(error, EIO,
602 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
604 "Failed to program vlan action");
608 parse_state.is_vf = otx2_dev_is_vf(hw);
610 rc = flow_program_npc(&parse_state, mbox, &hw->npc_flow);
612 rte_flow_error_set(error, EIO,
613 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
615 "Failed to insert filter");
619 rc = flow_program_rss_action(dev, actions, flow);
621 rte_flow_error_set(error, EIO,
622 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
624 "Failed to program rss action");
628 if (hw->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
629 rc = flow_update_sec_tt(dev, actions);
631 rte_flow_error_set(error, EIO,
632 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
634 "Failed to update tt with sec act");
639 list = &hw->npc_flow.flow_list[flow->priority];
640 /* List in ascending order of mcam entries */
641 TAILQ_FOREACH(flow_iter, list, next) {
642 if (flow_iter->mcam_id > flow->mcam_id) {
643 TAILQ_INSERT_BEFORE(flow_iter, flow, next);
648 TAILQ_INSERT_TAIL(list, flow, next);
657 otx2_flow_destroy(struct rte_eth_dev *dev,
658 struct rte_flow *flow,
659 struct rte_flow_error *error)
661 struct otx2_eth_dev *hw = dev->data->dev_private;
662 struct otx2_npc_flow_info *npc = &hw->npc_flow;
663 struct otx2_mbox *mbox = hw->mbox;
664 struct rte_bitmap *bmap;
668 match_id = (flow->npc_action >> NIX_RX_ACT_MATCH_OFFSET) &
669 NIX_RX_ACT_MATCH_MASK;
671 if (match_id && match_id < OTX2_FLOW_ACTION_FLAG_DEFAULT) {
672 if (rte_atomic32_read(&npc->mark_actions) == 0)
675 /* Clear mark offload flag if there are no more mark actions */
676 if (rte_atomic32_sub_return(&npc->mark_actions, 1) == 0) {
677 hw->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
678 otx2_eth_set_rx_function(dev);
682 if (flow->nix_intf == OTX2_INTF_RX && flow->vtag_action) {
684 if (npc->vtag_actions == 0) {
685 if (hw->vlan_info.strip_on == 0) {
686 hw->rx_offload_flags &=
687 ~NIX_RX_OFFLOAD_VLAN_STRIP_F;
688 otx2_eth_set_rx_function(dev);
693 rc = flow_free_rss_action(dev, flow);
695 rte_flow_error_set(error, EIO,
696 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
698 "Failed to free rss action");
701 rc = otx2_flow_mcam_free_entry(mbox, flow->mcam_id);
703 rte_flow_error_set(error, EIO,
704 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
706 "Failed to destroy filter");
709 TAILQ_REMOVE(&npc->flow_list[flow->priority], flow, next);
711 bmap = npc->live_entries[flow->priority];
712 rte_bitmap_clear(bmap, flow->mcam_id);
719 otx2_flow_flush(struct rte_eth_dev *dev,
720 struct rte_flow_error *error)
722 struct otx2_eth_dev *hw = dev->data->dev_private;
725 rc = otx2_flow_free_all_resources(hw);
727 otx2_err("Error when deleting NPC MCAM entries "
729 rte_flow_error_set(error, EIO,
730 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
732 "Failed to flush filter");
740 otx2_flow_isolate(struct rte_eth_dev *dev __rte_unused,
741 int enable __rte_unused,
742 struct rte_flow_error *error)
745 * If we support, we need to un-install the default mcam
746 * entry for this port.
749 rte_flow_error_set(error, ENOTSUP,
750 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
752 "Flow isolation not supported");
758 otx2_flow_query(struct rte_eth_dev *dev,
759 struct rte_flow *flow,
760 const struct rte_flow_action *action,
762 struct rte_flow_error *error)
764 struct otx2_eth_dev *hw = dev->data->dev_private;
765 struct rte_flow_query_count *query = data;
766 struct otx2_mbox *mbox = hw->mbox;
767 const char *errmsg = NULL;
768 int errcode = ENOTSUP;
771 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) {
772 errmsg = "Only COUNT is supported in query";
776 if (flow->ctr_id == NPC_COUNTER_NONE) {
777 errmsg = "Counter is not available";
781 rc = otx2_flow_mcam_read_counter(mbox, flow->ctr_id, &query->hits);
784 errmsg = "Error reading flow counter";
788 query->bytes_set = 0;
791 rc = otx2_flow_mcam_clear_counter(mbox, flow->ctr_id);
794 errmsg = "Error clearing flow counter";
801 rte_flow_error_set(error, errcode,
802 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
809 otx2_flow_dev_dump(struct rte_eth_dev *dev,
810 struct rte_flow *flow, FILE *file,
811 struct rte_flow_error *error)
813 struct otx2_eth_dev *hw = dev->data->dev_private;
814 struct otx2_flow_list *list;
815 struct rte_flow *flow_iter;
816 uint32_t max_prio, i;
819 rte_flow_error_set(error, EINVAL,
820 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
826 rte_flow_error_set(error, EINVAL,
827 RTE_FLOW_ERROR_TYPE_HANDLE,
833 max_prio = hw->npc_flow.flow_max_priority;
835 for (i = 0; i < max_prio; i++) {
836 list = &hw->npc_flow.flow_list[i];
838 /* List in ascending order of mcam entries */
839 TAILQ_FOREACH(flow_iter, list, next) {
840 otx2_flow_dump(file, hw, flow_iter);
847 const struct rte_flow_ops otx2_flow_ops = {
848 .validate = otx2_flow_validate,
849 .create = otx2_flow_create,
850 .destroy = otx2_flow_destroy,
851 .flush = otx2_flow_flush,
852 .query = otx2_flow_query,
853 .isolate = otx2_flow_isolate,
854 .dev_dump = otx2_flow_dev_dump,
858 flow_supp_key_len(uint32_t supp_mask)
863 supp_mask &= (supp_mask - 1);
865 return nib_count * 4;
868 /* Refer HRM register:
869 * NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG
871 * NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG
873 #define BYTESM1_SHIFT 16
874 #define HDR_OFF_SHIFT 8
876 flow_update_kex_info(struct npc_xtract_info *xtract_info,
879 xtract_info->len = ((val >> BYTESM1_SHIFT) & 0xf) + 1;
880 xtract_info->hdr_off = (val >> HDR_OFF_SHIFT) & 0xff;
881 xtract_info->key_off = val & 0x3f;
882 xtract_info->enable = ((val >> 7) & 0x1);
883 xtract_info->flags_enable = ((val >> 6) & 0x1);
887 flow_process_mkex_cfg(struct otx2_npc_flow_info *npc,
888 struct npc_get_kex_cfg_rsp *kex_rsp)
890 volatile uint64_t (*q)[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT]
892 struct npc_xtract_info *x_info = NULL;
893 int lid, lt, ld, fl, ix;
898 npc->keyx_supp_nmask[NPC_MCAM_RX] =
899 kex_rsp->rx_keyx_cfg & 0x7fffffffULL;
900 npc->keyx_supp_nmask[NPC_MCAM_TX] =
901 kex_rsp->tx_keyx_cfg & 0x7fffffffULL;
902 npc->keyx_len[NPC_MCAM_RX] =
903 flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_RX]);
904 npc->keyx_len[NPC_MCAM_TX] =
905 flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_TX]);
907 keyw = (kex_rsp->rx_keyx_cfg >> 32) & 0x7ULL;
908 npc->keyw[NPC_MCAM_RX] = keyw;
909 keyw = (kex_rsp->tx_keyx_cfg >> 32) & 0x7ULL;
910 npc->keyw[NPC_MCAM_TX] = keyw;
912 /* Update KEX_LD_FLAG */
913 for (ix = 0; ix < NPC_MAX_INTF; ix++) {
914 for (ld = 0; ld < NPC_MAX_LD; ld++) {
915 for (fl = 0; fl < NPC_MAX_LFL; fl++) {
917 &npc->prx_fxcfg[ix][ld][fl].xtract[0];
918 val = kex_rsp->intf_ld_flags[ix][ld][fl];
919 flow_update_kex_info(x_info, val);
924 /* Update LID, LT and LDATA cfg */
926 q = (volatile uint64_t (*)[][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD])
927 (&kex_rsp->intf_lid_lt_ld);
928 for (ix = 0; ix < NPC_MAX_INTF; ix++) {
929 for (lid = 0; lid < NPC_MAX_LID; lid++) {
930 for (lt = 0; lt < NPC_MAX_LT; lt++) {
931 for (ld = 0; ld < NPC_MAX_LD; ld++) {
932 x_info = &(*p)[ix][lid][lt].xtract[ld];
933 val = (*q)[ix][lid][lt][ld];
934 flow_update_kex_info(x_info, val);
939 /* Update LDATA Flags cfg */
940 npc->prx_lfcfg[0].i = kex_rsp->kex_ld_flags[0];
941 npc->prx_lfcfg[1].i = kex_rsp->kex_ld_flags[1];
944 static struct otx2_idev_kex_cfg *
945 flow_intra_dev_kex_cfg(void)
947 static const char name[] = "octeontx2_intra_device_kex_conf";
948 struct otx2_idev_kex_cfg *idev;
949 const struct rte_memzone *mz;
951 mz = rte_memzone_lookup(name);
955 /* Request for the first time */
956 mz = rte_memzone_reserve_aligned(name, sizeof(struct otx2_idev_kex_cfg),
957 SOCKET_ID_ANY, 0, OTX2_ALIGN);
960 rte_atomic16_set(&idev->kex_refcnt, 0);
967 flow_fetch_kex_cfg(struct otx2_eth_dev *dev)
969 struct otx2_npc_flow_info *npc = &dev->npc_flow;
970 struct npc_get_kex_cfg_rsp *kex_rsp;
971 struct otx2_mbox *mbox = dev->mbox;
972 char mkex_pfl_name[MKEX_NAME_LEN];
973 struct otx2_idev_kex_cfg *idev;
976 idev = flow_intra_dev_kex_cfg();
980 /* Is kex_cfg read by any another driver? */
981 if (rte_atomic16_add_return(&idev->kex_refcnt, 1) == 1) {
982 /* Call mailbox to get key & data size */
983 (void)otx2_mbox_alloc_msg_npc_get_kex_cfg(mbox);
984 otx2_mbox_msg_send(mbox, 0);
985 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&kex_rsp);
987 otx2_err("Failed to fetch NPC keyx config");
990 memcpy(&idev->kex_cfg, kex_rsp,
991 sizeof(struct npc_get_kex_cfg_rsp));
994 otx2_mbox_memcpy(mkex_pfl_name,
995 idev->kex_cfg.mkex_pfl_name, MKEX_NAME_LEN);
997 strlcpy((char *)dev->mkex_pfl_name,
998 mkex_pfl_name, sizeof(dev->mkex_pfl_name));
1000 flow_process_mkex_cfg(npc, &idev->kex_cfg);
1006 #define OTX2_MCAM_TOT_ENTRIES_96XX (4096)
1007 #define OTX2_MCAM_TOT_ENTRIES_98XX (16384)
1009 static int otx2_mcam_tot_entries(struct otx2_eth_dev *dev)
1011 if (otx2_dev_is_98xx(dev))
1012 return OTX2_MCAM_TOT_ENTRIES_98XX;
1014 return OTX2_MCAM_TOT_ENTRIES_96XX;
1018 otx2_flow_init(struct otx2_eth_dev *hw)
1020 uint8_t *mem = NULL, *nix_mem = NULL, *npc_mem = NULL;
1021 struct otx2_npc_flow_info *npc = &hw->npc_flow;
1022 uint32_t bmap_sz, tot_mcam_entries = 0;
1025 rc = flow_fetch_kex_cfg(hw);
1027 otx2_err("Failed to fetch NPC keyx config from idev");
1031 rte_atomic32_init(&npc->mark_actions);
1032 npc->vtag_actions = 0;
1034 tot_mcam_entries = otx2_mcam_tot_entries(hw);
1035 npc->mcam_entries = tot_mcam_entries >> npc->keyw[NPC_MCAM_RX];
1036 /* Free, free_rev, live and live_rev entries */
1037 bmap_sz = rte_bitmap_get_memory_footprint(npc->mcam_entries);
1038 mem = rte_zmalloc(NULL, 4 * bmap_sz * npc->flow_max_priority,
1039 RTE_CACHE_LINE_SIZE);
1041 otx2_err("Bmap alloc failed");
1046 npc->flow_entry_info = rte_zmalloc(NULL, npc->flow_max_priority
1047 * sizeof(struct otx2_mcam_ents_info),
1049 if (npc->flow_entry_info == NULL) {
1050 otx2_err("flow_entry_info alloc failed");
1055 npc->free_entries = rte_zmalloc(NULL, npc->flow_max_priority
1056 * sizeof(struct rte_bitmap *),
1058 if (npc->free_entries == NULL) {
1059 otx2_err("free_entries alloc failed");
1064 npc->free_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
1065 * sizeof(struct rte_bitmap *),
1067 if (npc->free_entries_rev == NULL) {
1068 otx2_err("free_entries_rev alloc failed");
1073 npc->live_entries = rte_zmalloc(NULL, npc->flow_max_priority
1074 * sizeof(struct rte_bitmap *),
1076 if (npc->live_entries == NULL) {
1077 otx2_err("live_entries alloc failed");
1082 npc->live_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
1083 * sizeof(struct rte_bitmap *),
1085 if (npc->live_entries_rev == NULL) {
1086 otx2_err("live_entries_rev alloc failed");
1091 npc->flow_list = rte_zmalloc(NULL, npc->flow_max_priority
1092 * sizeof(struct otx2_flow_list),
1094 if (npc->flow_list == NULL) {
1095 otx2_err("flow_list alloc failed");
1101 for (idx = 0; idx < npc->flow_max_priority; idx++) {
1102 TAILQ_INIT(&npc->flow_list[idx]);
1104 npc->free_entries[idx] =
1105 rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
1108 npc->free_entries_rev[idx] =
1109 rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
1112 npc->live_entries[idx] =
1113 rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
1116 npc->live_entries_rev[idx] =
1117 rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
1120 npc->flow_entry_info[idx].free_ent = 0;
1121 npc->flow_entry_info[idx].live_ent = 0;
1122 npc->flow_entry_info[idx].max_id = 0;
1123 npc->flow_entry_info[idx].min_id = ~(0);
1126 npc->rss_grps = NIX_RSS_GRPS;
1128 bmap_sz = rte_bitmap_get_memory_footprint(npc->rss_grps);
1129 nix_mem = rte_zmalloc(NULL, bmap_sz, RTE_CACHE_LINE_SIZE);
1130 if (nix_mem == NULL) {
1131 otx2_err("Bmap alloc failed");
1136 npc->rss_grp_entries = rte_bitmap_init(npc->rss_grps, nix_mem, bmap_sz);
1138 /* Group 0 will be used for RSS,
1139 * 1 -7 will be used for rte_flow RSS action
1141 rte_bitmap_set(npc->rss_grp_entries, 0);
1147 rte_free(npc->flow_list);
1148 if (npc->live_entries_rev)
1149 rte_free(npc->live_entries_rev);
1150 if (npc->live_entries)
1151 rte_free(npc->live_entries);
1152 if (npc->free_entries_rev)
1153 rte_free(npc->free_entries_rev);
1154 if (npc->free_entries)
1155 rte_free(npc->free_entries);
1156 if (npc->flow_entry_info)
1157 rte_free(npc->flow_entry_info);
1164 otx2_flow_fini(struct otx2_eth_dev *hw)
1166 struct otx2_npc_flow_info *npc = &hw->npc_flow;
1169 rc = otx2_flow_free_all_resources(hw);
1171 otx2_err("Error when deleting NPC MCAM entries, counters");
1176 rte_free(npc->flow_list);
1177 if (npc->live_entries_rev)
1178 rte_free(npc->live_entries_rev);
1179 if (npc->live_entries)
1180 rte_free(npc->live_entries);
1181 if (npc->free_entries_rev)
1182 rte_free(npc->free_entries_rev);
1183 if (npc->free_entries)
1184 rte_free(npc->free_entries);
1185 if (npc->flow_entry_info)
1186 rte_free(npc->flow_entry_info);