1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 npc_prep_mcam_ldata(uint8_t *ptr, const uint8_t *data, int len)
12 for (idx = 0; idx < len; idx++)
13 ptr[idx] = data[len - 1 - idx];
17 npc_check_copysz(size_t size, size_t len)
25 npc_mem_is_zero(const void *mem, int len)
30 for (i = 0; i < len; i++) {
38 npc_set_hw_mask(struct npc_parse_item_info *info, struct npc_xtract_info *xinfo,
44 if (xinfo->enable == 0)
47 if (xinfo->hdr_off < info->hw_hdr_len)
50 max_off = xinfo->hdr_off + xinfo->len - info->hw_hdr_len;
52 if (max_off > info->len)
55 offset = xinfo->hdr_off - info->hw_hdr_len;
56 for (j = offset; j < max_off; j++)
61 npc_get_hw_supp_mask(struct npc_parse_state *pst,
62 struct npc_parse_item_info *info, int lid, int lt)
64 struct npc_xtract_info *xinfo, *lfinfo;
65 char *hw_mask = info->hw_mask;
71 xinfo = pst->npc->prx_dxcfg[intf][lid][lt].xtract;
72 memset(hw_mask, 0, info->len);
74 for (i = 0; i < NPC_MAX_LD; i++)
75 npc_set_hw_mask(info, &xinfo[i], hw_mask);
77 for (i = 0; i < NPC_MAX_LD; i++) {
78 if (xinfo[i].flags_enable == 0)
81 lf_cfg = pst->npc->prx_lfcfg[i].i;
83 for (j = 0; j < NPC_MAX_LFL; j++) {
84 lfinfo = pst->npc->prx_fxcfg[intf][i][j].xtract;
85 npc_set_hw_mask(info, &lfinfo[0], hw_mask);
92 npc_mask_is_supported(const char *mask, const char *hw_mask, int len)
95 * If no hw_mask, assume nothing is supported.
99 return npc_mem_is_zero(mask, len);
102 if ((mask[len] | hw_mask[len]) != hw_mask[len])
103 return 0; /* False */
109 npc_parse_item_basic(const struct roc_npc_item_info *item,
110 struct npc_parse_item_info *info)
112 /* Item must not be NULL */
114 return NPC_ERR_PARAM;
116 /* Don't support ranges */
117 if (item->last != NULL)
118 return NPC_ERR_INVALID_RANGE;
120 /* If spec is NULL, both mask and last must be NULL, this
121 * makes it to match ANY value (eq to mask = 0).
122 * Setting either mask or last without spec is an error
124 if (item->spec == NULL) {
125 if (item->last == NULL && item->mask == NULL) {
129 return NPC_ERR_INVALID_SPEC;
132 /* We have valid spec */
133 if (item->type != ROC_NPC_ITEM_TYPE_RAW)
134 info->spec = item->spec;
136 /* If mask is not set, use default mask, err if default mask is
139 if (item->mask == NULL) {
140 if (info->def_mask == NULL)
141 return NPC_ERR_PARAM;
142 info->mask = info->def_mask;
144 if (item->type != ROC_NPC_ITEM_TYPE_RAW)
145 info->mask = item->mask;
148 /* mask specified must be subset of hw supported mask
149 * mask | hw_mask == hw_mask
151 if (!npc_mask_is_supported(info->mask, info->hw_mask, info->len))
152 return NPC_ERR_INVALID_MASK;
158 npc_update_extraction_data(struct npc_parse_state *pst,
159 struct npc_parse_item_info *info,
160 struct npc_xtract_info *xinfo)
162 uint8_t int_info_mask[NPC_MAX_EXTRACT_DATA_LEN];
163 uint8_t int_info[NPC_MAX_EXTRACT_DATA_LEN];
164 struct npc_xtract_info *x;
169 if (x->len > NPC_MAX_EXTRACT_DATA_LEN)
170 return NPC_ERR_INVALID_SIZE;
173 hdr_off = x->hdr_off;
175 if (hdr_off < info->hw_hdr_len)
181 hdr_off -= info->hw_hdr_len;
183 if (hdr_off >= info->len)
186 if (hdr_off + len > info->len)
187 len = info->len - hdr_off;
189 len = npc_check_copysz((ROC_NPC_MAX_MCAM_WIDTH_DWORDS * 8) - x->key_off,
192 return NPC_ERR_INVALID_SIZE;
194 /* Need to reverse complete structure so that dest addr is at
195 * MSB so as to program the MCAM using mcam_data & mcam_mask
198 npc_prep_mcam_ldata(int_info, (const uint8_t *)info->spec + hdr_off,
200 npc_prep_mcam_ldata(int_info_mask,
201 (const uint8_t *)info->mask + hdr_off, x->len);
203 memcpy(pst->mcam_mask + x->key_off, int_info_mask, len);
204 memcpy(pst->mcam_data + x->key_off, int_info, len);
209 npc_update_parse_state(struct npc_parse_state *pst,
210 struct npc_parse_item_info *info, int lid, int lt,
213 struct npc_lid_lt_xtract_info *xinfo;
214 struct roc_npc_flow_dump_data *dump;
215 struct npc_xtract_info *lfinfo;
219 pst->layer_mask |= lid;
221 pst->flags[lid] = flags;
223 intf = pst->nix_intf;
224 xinfo = &pst->npc->prx_dxcfg[intf][lid][lt];
225 if (xinfo->is_terminating)
228 if (info->spec == NULL)
231 for (i = 0; i < NPC_MAX_LD; i++) {
232 rc = npc_update_extraction_data(pst, info, &xinfo->xtract[i]);
237 for (i = 0; i < NPC_MAX_LD; i++) {
238 if (xinfo->xtract[i].flags_enable == 0)
241 lf_cfg = pst->npc->prx_lfcfg[i].i;
243 for (j = 0; j < NPC_MAX_LFL; j++) {
244 lfinfo = pst->npc->prx_fxcfg[intf][i][j].xtract;
245 rc = npc_update_extraction_data(pst, info,
250 if (lfinfo[0].enable)
257 dump = &pst->flow->dump_data[pst->flow->num_patterns++];
265 npc_initialise_mcam_entry(struct npc *npc, struct roc_npc_flow *flow,
268 struct npc_mcam_write_entry_req *req;
269 struct npc_mcam_write_entry_rsq *rsp;
272 req = mbox_alloc_msg_npc_mcam_write_entry(npc->mbox);
277 req->entry = mcam_id;
279 req->intf = (flow->nix_intf == NIX_INTF_RX) ? NPC_MCAM_RX : NPC_MCAM_TX;
280 req->enable_entry = 1;
281 req->entry_data.action = flow->npc_action;
282 req->entry_data.vtag_action = flow->vtag_action;
284 for (idx = 0; idx < ROC_NPC_MAX_MCAM_WIDTH_DWORDS; idx++) {
285 req->entry_data.kw[idx] = 0x0;
286 req->entry_data.kw_mask[idx] = 0x0;
289 if (flow->nix_intf == NIX_INTF_RX) {
290 req->entry_data.kw[0] |= (uint64_t)npc->channel;
291 req->entry_data.kw_mask[0] |= (BIT_ULL(12) - 1);
293 uint16_t pf_func = (flow->npc_action >> 4) & 0xffff;
295 pf_func = plt_cpu_to_be_16(pf_func);
296 req->entry_data.kw[0] |= ((uint64_t)pf_func << 32);
297 req->entry_data.kw_mask[0] |= ((uint64_t)0xffff << 32);
300 rc = mbox_process_msg(npc->mbox, (void *)&rsp);
302 plt_err("npc: mcam initialisation write failed");
309 npc_shift_mcam_entry(struct mbox *mbox, uint16_t old_ent, uint16_t new_ent)
311 struct npc_mcam_shift_entry_req *req;
312 struct npc_mcam_shift_entry_rsp *rsp;
315 /* Old entry is disabled & it's contents are moved to new_entry,
316 * new entry is enabled finally.
318 req = mbox_alloc_msg_npc_mcam_shift_entry(mbox);
321 req->curr_entry[0] = old_ent;
322 req->new_entry[0] = new_ent;
323 req->shift_count = 1;
325 rc = mbox_process_msg(mbox, (void *)&rsp);
333 SLIDE_ENTRIES_TO_LOWER_INDEX,
334 SLIDE_ENTRIES_TO_HIGHER_INDEX,
338 npc_slide_mcam_entries(struct mbox *mbox, struct npc *npc, int prio,
339 uint16_t *free_mcam_id, int dir)
341 uint16_t to_mcam_id = 0, from_mcam_id = 0;
342 struct npc_prio_flow_list_head *list;
343 struct npc_prio_flow_entry *curr = 0;
346 list = &npc->prio_flow_list[prio];
348 to_mcam_id = *free_mcam_id;
349 if (dir == SLIDE_ENTRIES_TO_HIGHER_INDEX)
350 curr = TAILQ_LAST(list, npc_prio_flow_list_head);
351 else if (dir == SLIDE_ENTRIES_TO_LOWER_INDEX)
352 curr = TAILQ_FIRST(list);
355 from_mcam_id = curr->flow->mcam_id;
356 if ((dir == SLIDE_ENTRIES_TO_HIGHER_INDEX &&
357 from_mcam_id < to_mcam_id) ||
358 (dir == SLIDE_ENTRIES_TO_LOWER_INDEX &&
359 from_mcam_id > to_mcam_id)) {
360 /* Newly allocated entry and the source entry given to
361 * npc_mcam_shift_entry_req will be in disabled state.
362 * Initialise and enable before moving an entry into
365 rc = npc_initialise_mcam_entry(npc, curr->flow,
369 rc = npc_shift_mcam_entry(mbox, from_mcam_id,
373 curr->flow->mcam_id = to_mcam_id;
374 to_mcam_id = from_mcam_id;
377 if (dir == SLIDE_ENTRIES_TO_HIGHER_INDEX)
378 curr = TAILQ_PREV(curr, npc_prio_flow_list_head, next);
379 else if (dir == SLIDE_ENTRIES_TO_LOWER_INDEX)
380 curr = TAILQ_NEXT(curr, next);
383 *free_mcam_id = from_mcam_id;
389 * The mcam_alloc request is first made with NPC_MCAM_LOWER_PRIO with the last
390 * entry in the requested priority level as the reference entry. If it fails,
391 * the alloc request is retried with NPC_MCAM_HIGHER_PRIO with the first entry
392 * in the next lower priority level as the reference entry. After obtaining
393 * the free MCAM from kernel, we check if it is at the right user requested
394 * priority level. If not, the flow rules are moved across MCAM entries till
395 * the user requested priority levels are met.
396 * The MCAM sorting algorithm works as below.
397 * For any given free MCAM obtained from the kernel, there are 3 possibilities.
399 * There are entries belonging to higher user priority level (numerically
400 * lesser) in higher mcam indices. In this case, the entries with higher user
401 * priority are slided towards lower indices and a free entry is created in the
404 * Assume free entry = 1610, user requested priority = 2 and
405 * max user priority levels = 5 with below entries in respective priority
407 * 0: 1630, 1635, 1641
408 * 1: 1646, 1650, 1651
409 * 2: 1652, 1655, 1660
410 * 3: 1661, 1662, 1663, 1664
411 * 4: 1665, 1667, 1670
413 * Entries (1630, 1635, 1641, 1646, 1650, 1651) have to be slided down towards
415 * Shifting sequence will be as below:
416 * 1610 <- 1630 <- 1635 <- 1641 <- 1646 <- 1650 <- 1651
417 * Entry 1651 will be free-ed for writing the new flow. This entry will now
418 * become the head of priority level 2.
421 * There are entries belonging to lower user priority level (numerically
422 * bigger) in lower mcam indices. In this case, the entries with lower user
423 * priority are slided towards higher indices and a free entry is created in the
427 * free entry = 1653, user requested priority = 0
428 * 0: 1630, 1635, 1641
429 * 1: 1646, 1650, 1651
430 * 2: 1652, 1655, 1660
431 * 3: 1661, 1662, 1663, 1664
432 * 4: 1665, 1667, 1670
434 * Entries (1646, 1650, 1651, 1652) have to be slided up towards higher
436 * Shifting sequence will be as below:
437 * 1646 -> 1650 -> 1651 -> 1652 -> 1653
438 * Entry 1646 will be free-ed for writing the new flow. This entry will now
439 * become the last element in priority level 0.
442 * Free mcam is at the right place, ie, all higher user priority level
443 * mcams lie in lower indices and all lower user priority level mcams lie in
444 * higher mcam indices.
446 * The priority level lists are scanned first for case (1) and if the
447 * condition is found true, case(2) is skipped because they are mutually
448 * exclusive. For example, consider below state.
449 * 0: 1630, 1635, 1641
450 * 1: 1646, 1650, 1651
451 * 2: 1652, 1655, 1660
452 * 3: 1661, 1662, 1663, 1664
453 * 4: 1665, 1667, 1670
454 * free entry = 1610, user requested priority = 2
456 * Case 1: Here the condition is;
457 * "if (requested_prio > prio_idx && free_mcam < tail->flow->mcam_id ){}"
458 * If this condition is true, it means at some higher priority level than
459 * requested priority level, there are entries at lower indices than the given
460 * free mcam. That is, we have found in levels 0,1 there is an mcam X which is
462 * If, for any free entry and user req prio, the above condition is true, then
463 * the below case(2) condition will always be false since the lists are kept
464 * sorted. The case(2) condition is;
465 * "if (requested_prio < prio_idx && free_mcam > head->flow->mcam_id){}"
466 * There can't be entries at lower indices at priority level higher
467 * than the requested priority level. That is, here, at levels 3 & 4 there
468 * cannot be any entry greater than 1610. Because all entries in 3 & 4 must be
469 * greater than X which was found to be greater than 1610 earlier.
473 npc_sort_mcams_by_user_prio_level(struct mbox *mbox,
474 struct npc_prio_flow_entry *flow_list_entry,
476 struct npc_mcam_alloc_entry_rsp *rsp)
478 int requested_prio = flow_list_entry->flow->priority;
479 struct npc_prio_flow_entry *head, *tail;
480 struct npc_prio_flow_list_head *list;
481 uint16_t free_mcam = rsp->entry;
482 bool do_reverse_scan = true;
483 int prio_idx = 0, rc = 0;
485 while (prio_idx <= npc->flow_max_priority - 1) {
486 list = &npc->prio_flow_list[prio_idx];
487 tail = TAILQ_LAST(list, npc_prio_flow_list_head);
489 /* requested priority is lower than current level
490 * ie, numerically req prio is higher
492 if ((requested_prio > prio_idx) && tail) {
493 /* but there are some mcams in current level
494 * at higher indices, ie, at priority lower
497 if (free_mcam < tail->flow->mcam_id) {
498 rc = npc_slide_mcam_entries(
499 mbox, npc, prio_idx, &free_mcam,
500 SLIDE_ENTRIES_TO_LOWER_INDEX);
503 do_reverse_scan = false;
509 prio_idx = npc->flow_max_priority - 1;
510 while (prio_idx && do_reverse_scan) {
511 list = &npc->prio_flow_list[prio_idx];
512 head = TAILQ_FIRST(list);
514 /* requested priority is higher than current level
515 * ie, numerically req prio is lower
517 if (requested_prio < prio_idx && head) {
518 /* but free mcam is higher than lowest priority
519 * mcam in current level
521 if (free_mcam > head->flow->mcam_id) {
522 rc = npc_slide_mcam_entries(
523 mbox, npc, prio_idx, &free_mcam,
524 SLIDE_ENTRIES_TO_HIGHER_INDEX);
531 rsp->entry = free_mcam;
536 npc_insert_into_flow_list(struct npc *npc, struct npc_prio_flow_entry *entry)
538 struct npc_prio_flow_list_head *list;
539 struct npc_prio_flow_entry *curr;
541 list = &npc->prio_flow_list[entry->flow->priority];
542 curr = TAILQ_FIRST(list);
546 if (entry->flow->mcam_id > curr->flow->mcam_id)
547 curr = TAILQ_NEXT(curr, next);
552 TAILQ_INSERT_BEFORE(curr, entry, next);
554 TAILQ_INSERT_TAIL(list, entry, next);
556 TAILQ_INSERT_HEAD(list, entry, next);
561 npc_allocate_mcam_entry(struct mbox *mbox, int prio,
562 struct npc_mcam_alloc_entry_rsp *rsp_local,
565 struct npc_mcam_alloc_entry_rsp *rsp_cmd;
566 struct npc_mcam_alloc_entry_req *req;
567 struct npc_mcam_alloc_entry_rsp *rsp;
570 req = mbox_alloc_msg_npc_mcam_alloc_entry(mbox);
575 req->priority = prio;
576 req->ref_entry = ref_entry;
578 rc = mbox_process_msg(mbox, (void *)&rsp_cmd);
585 memcpy(rsp_local, rsp_cmd, sizeof(*rsp));
591 npc_find_mcam_ref_entry(struct roc_npc_flow *flow, struct npc *npc, int *prio,
592 int *ref_entry, int dir)
594 struct npc_prio_flow_entry *head, *tail;
595 struct npc_prio_flow_list_head *list;
596 int prio_idx = flow->priority;
598 if (dir == NPC_MCAM_LOWER_PRIO) {
599 while (prio_idx >= 0) {
600 list = &npc->prio_flow_list[prio_idx];
601 head = TAILQ_FIRST(list);
603 *prio = NPC_MCAM_LOWER_PRIO;
604 *ref_entry = head->flow->mcam_id;
609 } else if (dir == NPC_MCAM_HIGHER_PRIO) {
610 prio_idx = flow->priority;
611 while (prio_idx <= npc->flow_max_priority - 1) {
612 list = &npc->prio_flow_list[prio_idx];
613 tail = TAILQ_LAST(list, npc_prio_flow_list_head);
615 *prio = NPC_MCAM_HIGHER_PRIO;
616 *ref_entry = tail->flow->mcam_id;
622 *prio = NPC_MCAM_ANY_PRIO;
627 npc_alloc_mcam_by_ref_entry(struct mbox *mbox, struct roc_npc_flow *flow,
629 struct npc_mcam_alloc_entry_rsp *rsp_local)
631 int prio, ref_entry = 0, rc = 0, dir = NPC_MCAM_LOWER_PRIO;
632 bool retry_done = false;
635 npc_find_mcam_ref_entry(flow, npc, &prio, &ref_entry, dir);
636 rc = npc_allocate_mcam_entry(mbox, prio, rsp_local, ref_entry);
637 if (rc && !retry_done) {
639 "npc: Failed to allocate lower priority entry. Retrying for higher priority");
641 dir = NPC_MCAM_HIGHER_PRIO;
644 } else if (rc && retry_done) {
652 npc_get_free_mcam_entry(struct mbox *mbox, struct roc_npc_flow *flow,
655 struct npc_mcam_alloc_entry_rsp rsp_local;
656 struct npc_prio_flow_entry *new_entry;
659 rc = npc_alloc_mcam_by_ref_entry(mbox, flow, npc, &rsp_local);
664 new_entry = plt_zmalloc(sizeof(*new_entry), 0);
668 new_entry->flow = flow;
670 plt_npc_dbg("kernel allocated MCAM entry %d", rsp_local.entry);
672 rc = npc_sort_mcams_by_user_prio_level(mbox, new_entry, npc,
677 plt_npc_dbg("allocated MCAM entry after sorting %d", rsp_local.entry);
678 flow->mcam_id = rsp_local.entry;
679 npc_insert_into_flow_list(npc, new_entry);
681 return rsp_local.entry;
688 npc_delete_prio_list_entry(struct npc *npc, struct roc_npc_flow *flow)
690 struct npc_prio_flow_list_head *list;
691 struct npc_prio_flow_entry *curr;
693 list = &npc->prio_flow_list[flow->priority];
694 curr = TAILQ_FIRST(list);
700 if (flow->mcam_id == curr->flow->mcam_id) {
701 TAILQ_REMOVE(list, curr, next);
705 curr = TAILQ_NEXT(curr, next);