1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 npc_prep_mcam_ldata(uint8_t *ptr, const uint8_t *data, int len)
12 for (idx = 0; idx < len; idx++)
13 ptr[idx] = data[len - 1 - idx];
17 npc_check_copysz(size_t size, size_t len)
25 npc_mem_is_zero(const void *mem, int len)
30 for (i = 0; i < len; i++) {
38 npc_set_hw_mask(struct npc_parse_item_info *info, struct npc_xtract_info *xinfo,
44 if (xinfo->enable == 0)
47 if (xinfo->hdr_off < info->hw_hdr_len)
50 max_off = xinfo->hdr_off + xinfo->len - info->hw_hdr_len;
52 if (max_off > info->len)
55 offset = xinfo->hdr_off - info->hw_hdr_len;
56 for (j = offset; j < max_off; j++)
61 npc_get_hw_supp_mask(struct npc_parse_state *pst,
62 struct npc_parse_item_info *info, int lid, int lt)
64 struct npc_xtract_info *xinfo, *lfinfo;
65 char *hw_mask = info->hw_mask;
71 xinfo = pst->npc->prx_dxcfg[intf][lid][lt].xtract;
72 memset(hw_mask, 0, info->len);
74 for (i = 0; i < NPC_MAX_LD; i++)
75 npc_set_hw_mask(info, &xinfo[i], hw_mask);
77 for (i = 0; i < NPC_MAX_LD; i++) {
78 if (xinfo[i].flags_enable == 0)
81 lf_cfg = pst->npc->prx_lfcfg[i].i;
83 for (j = 0; j < NPC_MAX_LFL; j++) {
84 lfinfo = pst->npc->prx_fxcfg[intf][i][j].xtract;
85 npc_set_hw_mask(info, &lfinfo[0], hw_mask);
92 npc_mask_is_supported(const char *mask, const char *hw_mask, int len)
95 * If no hw_mask, assume nothing is supported.
99 return npc_mem_is_zero(mask, len);
102 if ((mask[len] | hw_mask[len]) != hw_mask[len])
103 return 0; /* False */
109 npc_parse_item_basic(const struct roc_npc_item_info *item,
110 struct npc_parse_item_info *info)
112 /* Item must not be NULL */
114 return NPC_ERR_PARAM;
116 /* Don't support ranges */
117 if (item->last != NULL)
118 return NPC_ERR_INVALID_RANGE;
120 /* If spec is NULL, both mask and last must be NULL, this
121 * makes it to match ANY value (eq to mask = 0).
122 * Setting either mask or last without spec is an error
124 if (item->spec == NULL) {
125 if (item->last == NULL && item->mask == NULL) {
129 return NPC_ERR_INVALID_SPEC;
132 /* We have valid spec */
133 if (item->type != ROC_NPC_ITEM_TYPE_RAW)
134 info->spec = item->spec;
136 /* If mask is not set, use default mask, err if default mask is
139 if (item->mask == NULL) {
140 if (info->def_mask == NULL)
141 return NPC_ERR_PARAM;
142 info->mask = info->def_mask;
144 if (item->type != ROC_NPC_ITEM_TYPE_RAW)
145 info->mask = item->mask;
148 /* mask specified must be subset of hw supported mask
149 * mask | hw_mask == hw_mask
151 if (!npc_mask_is_supported(info->mask, info->hw_mask, info->len))
152 return NPC_ERR_INVALID_MASK;
158 npc_update_extraction_data(struct npc_parse_state *pst,
159 struct npc_parse_item_info *info,
160 struct npc_xtract_info *xinfo)
162 uint8_t int_info_mask[NPC_MAX_EXTRACT_DATA_LEN];
163 uint8_t int_info[NPC_MAX_EXTRACT_DATA_LEN];
164 struct npc_xtract_info *x;
170 hdr_off = x->hdr_off;
172 if (hdr_off < info->hw_hdr_len)
178 hdr_off -= info->hw_hdr_len;
180 if (hdr_off >= info->len)
183 if (hdr_off + len > info->len)
184 len = info->len - hdr_off;
186 len = npc_check_copysz((ROC_NPC_MAX_MCAM_WIDTH_DWORDS * 8) - x->key_off,
189 return NPC_ERR_INVALID_SIZE;
191 /* Need to reverse complete structure so that dest addr is at
192 * MSB so as to program the MCAM using mcam_data & mcam_mask
195 npc_prep_mcam_ldata(int_info, (const uint8_t *)info->spec + hdr_off,
197 npc_prep_mcam_ldata(int_info_mask,
198 (const uint8_t *)info->mask + hdr_off, x->len);
200 memcpy(pst->mcam_mask + x->key_off, int_info_mask, len);
201 memcpy(pst->mcam_data + x->key_off, int_info, len);
206 npc_update_parse_state(struct npc_parse_state *pst,
207 struct npc_parse_item_info *info, int lid, int lt,
210 struct npc_lid_lt_xtract_info *xinfo;
211 struct roc_npc_flow_dump_data *dump;
212 struct npc_xtract_info *lfinfo;
216 pst->layer_mask |= lid;
218 pst->flags[lid] = flags;
220 intf = pst->nix_intf;
221 xinfo = &pst->npc->prx_dxcfg[intf][lid][lt];
222 if (xinfo->is_terminating)
225 if (info->spec == NULL)
228 for (i = 0; i < NPC_MAX_LD; i++) {
229 rc = npc_update_extraction_data(pst, info, &xinfo->xtract[i]);
234 for (i = 0; i < NPC_MAX_LD; i++) {
235 if (xinfo->xtract[i].flags_enable == 0)
238 lf_cfg = pst->npc->prx_lfcfg[i].i;
240 for (j = 0; j < NPC_MAX_LFL; j++) {
241 lfinfo = pst->npc->prx_fxcfg[intf][i][j].xtract;
242 rc = npc_update_extraction_data(pst, info,
247 if (lfinfo[0].enable)
254 dump = &pst->flow->dump_data[pst->flow->num_patterns++];
262 npc_initialise_mcam_entry(struct npc *npc, struct roc_npc_flow *flow,
265 struct npc_mcam_write_entry_req *req;
266 struct npc_mcam_write_entry_rsq *rsp;
269 req = mbox_alloc_msg_npc_mcam_write_entry(npc->mbox);
274 req->entry = mcam_id;
276 req->intf = (flow->nix_intf == NIX_INTF_RX) ? NPC_MCAM_RX : NPC_MCAM_TX;
277 req->enable_entry = 1;
278 req->entry_data.action = flow->npc_action;
279 req->entry_data.vtag_action = flow->vtag_action;
281 for (idx = 0; idx < ROC_NPC_MAX_MCAM_WIDTH_DWORDS; idx++) {
282 req->entry_data.kw[idx] = 0x0;
283 req->entry_data.kw_mask[idx] = 0x0;
286 if (flow->nix_intf == NIX_INTF_RX) {
287 req->entry_data.kw[0] |= (uint64_t)npc->channel;
288 req->entry_data.kw_mask[0] |= (BIT_ULL(12) - 1);
290 uint16_t pf_func = (flow->npc_action >> 4) & 0xffff;
292 pf_func = plt_cpu_to_be_16(pf_func);
293 req->entry_data.kw[0] |= ((uint64_t)pf_func << 32);
294 req->entry_data.kw_mask[0] |= ((uint64_t)0xffff << 32);
297 rc = mbox_process_msg(npc->mbox, (void *)&rsp);
299 plt_err("npc: mcam initialisation write failed");
306 npc_shift_mcam_entry(struct mbox *mbox, uint16_t old_ent, uint16_t new_ent)
308 struct npc_mcam_shift_entry_req *req;
309 struct npc_mcam_shift_entry_rsp *rsp;
312 /* Old entry is disabled & it's contents are moved to new_entry,
313 * new entry is enabled finally.
315 req = mbox_alloc_msg_npc_mcam_shift_entry(mbox);
318 req->curr_entry[0] = old_ent;
319 req->new_entry[0] = new_ent;
320 req->shift_count = 1;
322 rc = mbox_process_msg(mbox, (void *)&rsp);
330 SLIDE_ENTRIES_TO_LOWER_INDEX,
331 SLIDE_ENTRIES_TO_HIGHER_INDEX,
335 npc_slide_mcam_entries(struct mbox *mbox, struct npc *npc, int prio,
336 uint16_t *free_mcam_id, int dir)
338 uint16_t to_mcam_id = 0, from_mcam_id = 0;
339 struct npc_prio_flow_list_head *list;
340 struct npc_prio_flow_entry *curr = 0;
343 list = &npc->prio_flow_list[prio];
345 to_mcam_id = *free_mcam_id;
346 if (dir == SLIDE_ENTRIES_TO_HIGHER_INDEX)
347 curr = TAILQ_LAST(list, npc_prio_flow_list_head);
348 else if (dir == SLIDE_ENTRIES_TO_LOWER_INDEX)
349 curr = TAILQ_FIRST(list);
352 from_mcam_id = curr->flow->mcam_id;
353 if ((dir == SLIDE_ENTRIES_TO_HIGHER_INDEX &&
354 from_mcam_id < to_mcam_id) ||
355 (dir == SLIDE_ENTRIES_TO_LOWER_INDEX &&
356 from_mcam_id > to_mcam_id)) {
357 /* Newly allocated entry and the source entry given to
358 * npc_mcam_shift_entry_req will be in disabled state.
359 * Initialise and enable before moving an entry into
362 rc = npc_initialise_mcam_entry(npc, curr->flow,
366 rc = npc_shift_mcam_entry(mbox, from_mcam_id,
370 curr->flow->mcam_id = to_mcam_id;
371 to_mcam_id = from_mcam_id;
374 if (dir == SLIDE_ENTRIES_TO_HIGHER_INDEX)
375 curr = TAILQ_PREV(curr, npc_prio_flow_list_head, next);
376 else if (dir == SLIDE_ENTRIES_TO_LOWER_INDEX)
377 curr = TAILQ_NEXT(curr, next);
380 *free_mcam_id = from_mcam_id;
386 * The mcam_alloc request is first made with NPC_MCAM_LOWER_PRIO with the last
387 * entry in the requested priority level as the reference entry. If it fails,
388 * the alloc request is retried with NPC_MCAM_HIGHER_PRIO with the first entry
389 * in the next lower priority level as the reference entry. After obtaining
390 * the free MCAM from kernel, we check if it is at the right user requested
391 * priority level. If not, the flow rules are moved across MCAM entries till
392 * the user requested priority levels are met.
393 * The MCAM sorting algorithm works as below.
394 * For any given free MCAM obtained from the kernel, there are 3 possibilities.
396 * There are entries belonging to higher user priority level (numerically
397 * lesser) in higher mcam indices. In this case, the entries with higher user
398 * priority are slided towards lower indices and a free entry is created in the
401 * Assume free entry = 1610, user requested priority = 2 and
402 * max user priority levels = 5 with below entries in respective priority
404 * 0: 1630, 1635, 1641
405 * 1: 1646, 1650, 1651
406 * 2: 1652, 1655, 1660
407 * 3: 1661, 1662, 1663, 1664
408 * 4: 1665, 1667, 1670
410 * Entries (1630, 1635, 1641, 1646, 1650, 1651) have to be slided down towards
412 * Shifting sequence will be as below:
413 * 1610 <- 1630 <- 1635 <- 1641 <- 1646 <- 1650 <- 1651
414 * Entry 1651 will be free-ed for writing the new flow. This entry will now
415 * become the head of priority level 2.
418 * There are entries belonging to lower user priority level (numerically
419 * bigger) in lower mcam indices. In this case, the entries with lower user
420 * priority are slided towards higher indices and a free entry is created in the
424 * free entry = 1653, user requested priority = 0
425 * 0: 1630, 1635, 1641
426 * 1: 1646, 1650, 1651
427 * 2: 1652, 1655, 1660
428 * 3: 1661, 1662, 1663, 1664
429 * 4: 1665, 1667, 1670
431 * Entries (1646, 1650, 1651, 1652) have to be slided up towards higher
433 * Shifting sequence will be as below:
434 * 1646 -> 1650 -> 1651 -> 1652 -> 1653
435 * Entry 1646 will be free-ed for writing the new flow. This entry will now
436 * become the last element in priority level 0.
439 * Free mcam is at the right place, ie, all higher user priority level
440 * mcams lie in lower indices and all lower user priority level mcams lie in
441 * higher mcam indices.
443 * The priority level lists are scanned first for case (1) and if the
444 * condition is found true, case(2) is skipped because they are mutually
445 * exclusive. For example, consider below state.
446 * 0: 1630, 1635, 1641
447 * 1: 1646, 1650, 1651
448 * 2: 1652, 1655, 1660
449 * 3: 1661, 1662, 1663, 1664
450 * 4: 1665, 1667, 1670
451 * free entry = 1610, user requested priority = 2
453 * Case 1: Here the condition is;
454 * "if (requested_prio > prio_idx && free_mcam < tail->flow->mcam_id ){}"
455 * If this condition is true, it means at some higher priority level than
456 * requested priority level, there are entries at lower indices than the given
457 * free mcam. That is, we have found in levels 0,1 there is an mcam X which is
459 * If, for any free entry and user req prio, the above condition is true, then
460 * the below case(2) condition will always be false since the lists are kept
461 * sorted. The case(2) condition is;
462 * "if (requested_prio < prio_idx && free_mcam > head->flow->mcam_id){}"
463 * There can't be entries at lower indices at priority level higher
464 * than the requested priority level. That is, here, at levels 3 & 4 there
465 * cannot be any entry greater than 1610. Because all entries in 3 & 4 must be
466 * greater than X which was found to be greater than 1610 earlier.
470 npc_sort_mcams_by_user_prio_level(struct mbox *mbox,
471 struct npc_prio_flow_entry *flow_list_entry,
473 struct npc_mcam_alloc_entry_rsp *rsp)
475 int requested_prio = flow_list_entry->flow->priority;
476 struct npc_prio_flow_entry *head, *tail;
477 struct npc_prio_flow_list_head *list;
478 uint16_t free_mcam = rsp->entry;
479 bool do_reverse_scan = true;
480 int prio_idx = 0, rc = 0;
482 while (prio_idx <= npc->flow_max_priority - 1) {
483 list = &npc->prio_flow_list[prio_idx];
484 tail = TAILQ_LAST(list, npc_prio_flow_list_head);
486 /* requested priority is lower than current level
487 * ie, numerically req prio is higher
489 if ((requested_prio > prio_idx) && tail) {
490 /* but there are some mcams in current level
491 * at higher indices, ie, at priority lower
494 if (free_mcam < tail->flow->mcam_id) {
495 rc = npc_slide_mcam_entries(
496 mbox, npc, prio_idx, &free_mcam,
497 SLIDE_ENTRIES_TO_LOWER_INDEX);
500 do_reverse_scan = false;
506 prio_idx = npc->flow_max_priority - 1;
507 while (prio_idx && do_reverse_scan) {
508 list = &npc->prio_flow_list[prio_idx];
509 head = TAILQ_FIRST(list);
511 /* requested priority is higher than current level
512 * ie, numerically req prio is lower
514 if (requested_prio < prio_idx && head) {
515 /* but free mcam is higher than lowest priority
516 * mcam in current level
518 if (free_mcam > head->flow->mcam_id) {
519 rc = npc_slide_mcam_entries(
520 mbox, npc, prio_idx, &free_mcam,
521 SLIDE_ENTRIES_TO_HIGHER_INDEX);
528 rsp->entry = free_mcam;
533 npc_insert_into_flow_list(struct npc *npc, struct npc_prio_flow_entry *entry)
535 struct npc_prio_flow_list_head *list;
536 struct npc_prio_flow_entry *curr;
538 list = &npc->prio_flow_list[entry->flow->priority];
539 curr = TAILQ_FIRST(list);
543 if (entry->flow->mcam_id > curr->flow->mcam_id)
544 curr = TAILQ_NEXT(curr, next);
549 TAILQ_INSERT_BEFORE(curr, entry, next);
551 TAILQ_INSERT_TAIL(list, entry, next);
553 TAILQ_INSERT_HEAD(list, entry, next);
558 npc_allocate_mcam_entry(struct mbox *mbox, int prio,
559 struct npc_mcam_alloc_entry_rsp *rsp_local,
562 struct npc_mcam_alloc_entry_rsp *rsp_cmd;
563 struct npc_mcam_alloc_entry_req *req;
564 struct npc_mcam_alloc_entry_rsp *rsp;
567 req = mbox_alloc_msg_npc_mcam_alloc_entry(mbox);
572 req->priority = prio;
573 req->ref_entry = ref_entry;
575 rc = mbox_process_msg(mbox, (void *)&rsp_cmd);
582 memcpy(rsp_local, rsp_cmd, sizeof(*rsp));
588 npc_find_mcam_ref_entry(struct roc_npc_flow *flow, struct npc *npc, int *prio,
589 int *ref_entry, int dir)
591 struct npc_prio_flow_entry *head, *tail;
592 struct npc_prio_flow_list_head *list;
593 int prio_idx = flow->priority;
595 if (dir == NPC_MCAM_LOWER_PRIO) {
596 while (prio_idx >= 0) {
597 list = &npc->prio_flow_list[prio_idx];
598 head = TAILQ_FIRST(list);
600 *prio = NPC_MCAM_LOWER_PRIO;
601 *ref_entry = head->flow->mcam_id;
606 } else if (dir == NPC_MCAM_HIGHER_PRIO) {
607 prio_idx = flow->priority;
608 while (prio_idx <= npc->flow_max_priority - 1) {
609 list = &npc->prio_flow_list[prio_idx];
610 tail = TAILQ_LAST(list, npc_prio_flow_list_head);
612 *prio = NPC_MCAM_HIGHER_PRIO;
613 *ref_entry = tail->flow->mcam_id;
619 *prio = NPC_MCAM_ANY_PRIO;
624 npc_alloc_mcam_by_ref_entry(struct mbox *mbox, struct roc_npc_flow *flow,
626 struct npc_mcam_alloc_entry_rsp *rsp_local)
628 int prio, ref_entry = 0, rc = 0, dir = NPC_MCAM_LOWER_PRIO;
629 bool retry_done = false;
632 npc_find_mcam_ref_entry(flow, npc, &prio, &ref_entry, dir);
633 rc = npc_allocate_mcam_entry(mbox, prio, rsp_local, ref_entry);
634 if (rc && !retry_done) {
636 "npc: Failed to allocate lower priority entry. Retrying for higher priority");
638 dir = NPC_MCAM_HIGHER_PRIO;
641 } else if (rc && retry_done) {
649 npc_get_free_mcam_entry(struct mbox *mbox, struct roc_npc_flow *flow,
652 struct npc_mcam_alloc_entry_rsp rsp_local;
653 struct npc_prio_flow_entry *new_entry;
656 rc = npc_alloc_mcam_by_ref_entry(mbox, flow, npc, &rsp_local);
661 new_entry = plt_zmalloc(sizeof(*new_entry), 0);
665 new_entry->flow = flow;
667 plt_info("npc: kernel allocated MCAM entry %d", rsp_local.entry);
669 rc = npc_sort_mcams_by_user_prio_level(mbox, new_entry, npc,
674 plt_info("npc: allocated MCAM entry after sorting %d", rsp_local.entry);
675 flow->mcam_id = rsp_local.entry;
676 npc_insert_into_flow_list(npc, new_entry);
678 return rsp_local.entry;
685 npc_delete_prio_list_entry(struct npc *npc, struct roc_npc_flow *flow)
687 struct npc_prio_flow_list_head *list;
688 struct npc_prio_flow_entry *curr;
690 list = &npc->prio_flow_list[flow->priority];
691 curr = TAILQ_FIRST(list);
697 if (flow->mcam_id == curr->flow->mcam_id) {
698 TAILQ_REMOVE(list, curr, next);
702 curr = TAILQ_NEXT(curr, next);