1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 npc_prep_mcam_ldata(uint8_t *ptr, const uint8_t *data, int len)
12 for (idx = 0; idx < len; idx++)
13 ptr[idx] = data[len - 1 - idx];
17 npc_check_copysz(size_t size, size_t len)
25 npc_mem_is_zero(const void *mem, int len)
30 for (i = 0; i < len; i++) {
38 npc_set_hw_mask(struct npc_parse_item_info *info, struct npc_xtract_info *xinfo,
44 if (xinfo->enable == 0)
47 if (xinfo->hdr_off < info->hw_hdr_len)
50 max_off = xinfo->hdr_off + xinfo->len - info->hw_hdr_len;
52 if (max_off > info->len)
55 offset = xinfo->hdr_off - info->hw_hdr_len;
56 for (j = offset; j < max_off; j++)
61 npc_get_hw_supp_mask(struct npc_parse_state *pst,
62 struct npc_parse_item_info *info, int lid, int lt)
64 struct npc_xtract_info *xinfo, *lfinfo;
65 char *hw_mask = info->hw_mask;
71 xinfo = pst->npc->prx_dxcfg[intf][lid][lt].xtract;
72 memset(hw_mask, 0, info->len);
74 for (i = 0; i < NPC_MAX_LD; i++)
75 npc_set_hw_mask(info, &xinfo[i], hw_mask);
77 for (i = 0; i < NPC_MAX_LD; i++) {
78 if (xinfo[i].flags_enable == 0)
81 lf_cfg = pst->npc->prx_lfcfg[i].i;
83 for (j = 0; j < NPC_MAX_LFL; j++) {
84 lfinfo = pst->npc->prx_fxcfg[intf][i][j].xtract;
85 npc_set_hw_mask(info, &lfinfo[0], hw_mask);
92 npc_mask_is_supported(const char *mask, const char *hw_mask, int len)
95 * If no hw_mask, assume nothing is supported.
99 return npc_mem_is_zero(mask, len);
102 if ((mask[len] | hw_mask[len]) != hw_mask[len])
103 return 0; /* False */
109 npc_parse_item_basic(const struct roc_npc_item_info *item,
110 struct npc_parse_item_info *info)
112 /* Item must not be NULL */
114 return NPC_ERR_PARAM;
116 /* Don't support ranges */
117 if (item->last != NULL)
118 return NPC_ERR_INVALID_RANGE;
120 /* If spec is NULL, both mask and last must be NULL, this
121 * makes it to match ANY value (eq to mask = 0).
122 * Setting either mask or last without spec is an error
124 if (item->spec == NULL) {
125 if (item->last == NULL && item->mask == NULL) {
129 return NPC_ERR_INVALID_SPEC;
132 /* We have valid spec */
133 if (item->type != ROC_NPC_ITEM_TYPE_RAW)
134 info->spec = item->spec;
136 /* If mask is not set, use default mask, err if default mask is
139 if (item->mask == NULL) {
140 if (info->def_mask == NULL)
141 return NPC_ERR_PARAM;
142 info->mask = info->def_mask;
144 if (item->type != ROC_NPC_ITEM_TYPE_RAW)
145 info->mask = item->mask;
148 if (info->mask == NULL)
149 return NPC_ERR_INVALID_MASK;
151 /* mask specified must be subset of hw supported mask
152 * mask | hw_mask == hw_mask
154 if (!npc_mask_is_supported(info->mask, info->hw_mask, info->len))
155 return NPC_ERR_INVALID_MASK;
161 npc_update_extraction_data(struct npc_parse_state *pst,
162 struct npc_parse_item_info *info,
163 struct npc_xtract_info *xinfo)
165 uint8_t int_info_mask[NPC_MAX_EXTRACT_DATA_LEN];
166 uint8_t int_info[NPC_MAX_EXTRACT_DATA_LEN];
167 struct npc_xtract_info *x;
172 if (x->len > NPC_MAX_EXTRACT_DATA_LEN)
173 return NPC_ERR_INVALID_SIZE;
176 hdr_off = x->hdr_off;
178 if (hdr_off < info->hw_hdr_len)
184 hdr_off -= info->hw_hdr_len;
186 if (hdr_off >= info->len)
189 if (hdr_off + len > info->len)
190 len = info->len - hdr_off;
192 len = npc_check_copysz((ROC_NPC_MAX_MCAM_WIDTH_DWORDS * 8) - x->key_off,
195 return NPC_ERR_INVALID_SIZE;
197 /* Need to reverse complete structure so that dest addr is at
198 * MSB so as to program the MCAM using mcam_data & mcam_mask
201 npc_prep_mcam_ldata(int_info, (const uint8_t *)info->spec + hdr_off,
203 npc_prep_mcam_ldata(int_info_mask,
204 (const uint8_t *)info->mask + hdr_off, x->len);
206 memcpy(pst->mcam_mask + x->key_off, int_info_mask, len);
207 memcpy(pst->mcam_data + x->key_off, int_info, len);
212 npc_update_parse_state(struct npc_parse_state *pst,
213 struct npc_parse_item_info *info, int lid, int lt,
216 struct npc_lid_lt_xtract_info *xinfo;
217 struct roc_npc_flow_dump_data *dump;
218 struct npc_xtract_info *lfinfo;
222 pst->layer_mask |= lid;
224 pst->flags[lid] = flags;
226 intf = pst->nix_intf;
227 xinfo = &pst->npc->prx_dxcfg[intf][lid][lt];
228 if (xinfo->is_terminating)
231 if (info->spec == NULL)
234 for (i = 0; i < NPC_MAX_LD; i++) {
235 rc = npc_update_extraction_data(pst, info, &xinfo->xtract[i]);
240 for (i = 0; i < NPC_MAX_LD; i++) {
241 if (xinfo->xtract[i].flags_enable == 0)
244 lf_cfg = pst->npc->prx_lfcfg[i].i;
246 for (j = 0; j < NPC_MAX_LFL; j++) {
247 lfinfo = pst->npc->prx_fxcfg[intf][i][j].xtract;
248 rc = npc_update_extraction_data(pst, info,
253 if (lfinfo[0].enable)
260 dump = &pst->flow->dump_data[pst->flow->num_patterns++];
268 npc_initialise_mcam_entry(struct npc *npc, struct roc_npc_flow *flow,
271 struct npc_mcam_write_entry_req *req;
272 struct npc_mcam_write_entry_rsq *rsp;
275 req = mbox_alloc_msg_npc_mcam_write_entry(npc->mbox);
280 req->entry = mcam_id;
282 req->intf = (flow->nix_intf == NIX_INTF_RX) ? NPC_MCAM_RX : NPC_MCAM_TX;
283 req->enable_entry = 1;
284 req->entry_data.action = flow->npc_action;
285 req->entry_data.vtag_action = flow->vtag_action;
287 for (idx = 0; idx < ROC_NPC_MAX_MCAM_WIDTH_DWORDS; idx++) {
288 req->entry_data.kw[idx] = 0x0;
289 req->entry_data.kw_mask[idx] = 0x0;
292 if (flow->nix_intf == NIX_INTF_RX) {
293 req->entry_data.kw[0] |= (uint64_t)npc->channel;
294 req->entry_data.kw_mask[0] |= (BIT_ULL(12) - 1);
296 uint16_t pf_func = (flow->npc_action >> 4) & 0xffff;
298 pf_func = plt_cpu_to_be_16(pf_func);
299 req->entry_data.kw[0] |= ((uint64_t)pf_func << 32);
300 req->entry_data.kw_mask[0] |= ((uint64_t)0xffff << 32);
303 rc = mbox_process_msg(npc->mbox, (void *)&rsp);
305 plt_err("npc: mcam initialisation write failed");
312 npc_shift_mcam_entry(struct mbox *mbox, uint16_t old_ent, uint16_t new_ent)
314 struct npc_mcam_shift_entry_req *req;
315 struct npc_mcam_shift_entry_rsp *rsp;
318 /* Old entry is disabled & it's contents are moved to new_entry,
319 * new entry is enabled finally.
321 req = mbox_alloc_msg_npc_mcam_shift_entry(mbox);
324 req->curr_entry[0] = old_ent;
325 req->new_entry[0] = new_ent;
326 req->shift_count = 1;
328 rc = mbox_process_msg(mbox, (void *)&rsp);
336 SLIDE_ENTRIES_TO_LOWER_INDEX,
337 SLIDE_ENTRIES_TO_HIGHER_INDEX,
341 npc_slide_mcam_entries(struct mbox *mbox, struct npc *npc, int prio,
342 uint16_t *free_mcam_id, int dir)
344 uint16_t to_mcam_id = 0, from_mcam_id = 0;
345 struct npc_prio_flow_list_head *list;
346 struct npc_prio_flow_entry *curr = 0;
349 list = &npc->prio_flow_list[prio];
351 to_mcam_id = *free_mcam_id;
352 if (dir == SLIDE_ENTRIES_TO_HIGHER_INDEX)
353 curr = TAILQ_LAST(list, npc_prio_flow_list_head);
354 else if (dir == SLIDE_ENTRIES_TO_LOWER_INDEX)
355 curr = TAILQ_FIRST(list);
358 from_mcam_id = curr->flow->mcam_id;
359 if ((dir == SLIDE_ENTRIES_TO_HIGHER_INDEX &&
360 from_mcam_id < to_mcam_id) ||
361 (dir == SLIDE_ENTRIES_TO_LOWER_INDEX &&
362 from_mcam_id > to_mcam_id)) {
363 /* Newly allocated entry and the source entry given to
364 * npc_mcam_shift_entry_req will be in disabled state.
365 * Initialise and enable before moving an entry into
368 rc = npc_initialise_mcam_entry(npc, curr->flow,
372 rc = npc_shift_mcam_entry(mbox, from_mcam_id,
376 curr->flow->mcam_id = to_mcam_id;
377 to_mcam_id = from_mcam_id;
380 if (dir == SLIDE_ENTRIES_TO_HIGHER_INDEX)
381 curr = TAILQ_PREV(curr, npc_prio_flow_list_head, next);
382 else if (dir == SLIDE_ENTRIES_TO_LOWER_INDEX)
383 curr = TAILQ_NEXT(curr, next);
386 *free_mcam_id = from_mcam_id;
392 * The mcam_alloc request is first made with NPC_MCAM_LOWER_PRIO with the last
393 * entry in the requested priority level as the reference entry. If it fails,
394 * the alloc request is retried with NPC_MCAM_HIGHER_PRIO with the first entry
395 * in the next lower priority level as the reference entry. After obtaining
396 * the free MCAM from kernel, we check if it is at the right user requested
397 * priority level. If not, the flow rules are moved across MCAM entries till
398 * the user requested priority levels are met.
399 * The MCAM sorting algorithm works as below.
400 * For any given free MCAM obtained from the kernel, there are 3 possibilities.
402 * There are entries belonging to higher user priority level (numerically
403 * lesser) in higher mcam indices. In this case, the entries with higher user
404 * priority are slided towards lower indices and a free entry is created in the
407 * Assume free entry = 1610, user requested priority = 2 and
408 * max user priority levels = 5 with below entries in respective priority
410 * 0: 1630, 1635, 1641
411 * 1: 1646, 1650, 1651
412 * 2: 1652, 1655, 1660
413 * 3: 1661, 1662, 1663, 1664
414 * 4: 1665, 1667, 1670
416 * Entries (1630, 1635, 1641, 1646, 1650, 1651) have to be slided down towards
418 * Shifting sequence will be as below:
419 * 1610 <- 1630 <- 1635 <- 1641 <- 1646 <- 1650 <- 1651
420 * Entry 1651 will be free-ed for writing the new flow. This entry will now
421 * become the head of priority level 2.
424 * There are entries belonging to lower user priority level (numerically
425 * bigger) in lower mcam indices. In this case, the entries with lower user
426 * priority are slided towards higher indices and a free entry is created in the
430 * free entry = 1653, user requested priority = 0
431 * 0: 1630, 1635, 1641
432 * 1: 1646, 1650, 1651
433 * 2: 1652, 1655, 1660
434 * 3: 1661, 1662, 1663, 1664
435 * 4: 1665, 1667, 1670
437 * Entries (1646, 1650, 1651, 1652) have to be slided up towards higher
439 * Shifting sequence will be as below:
440 * 1646 -> 1650 -> 1651 -> 1652 -> 1653
441 * Entry 1646 will be free-ed for writing the new flow. This entry will now
442 * become the last element in priority level 0.
445 * Free mcam is at the right place, ie, all higher user priority level
446 * mcams lie in lower indices and all lower user priority level mcams lie in
447 * higher mcam indices.
449 * The priority level lists are scanned first for case (1) and if the
450 * condition is found true, case(2) is skipped because they are mutually
451 * exclusive. For example, consider below state.
452 * 0: 1630, 1635, 1641
453 * 1: 1646, 1650, 1651
454 * 2: 1652, 1655, 1660
455 * 3: 1661, 1662, 1663, 1664
456 * 4: 1665, 1667, 1670
457 * free entry = 1610, user requested priority = 2
459 * Case 1: Here the condition is;
460 * "if (requested_prio > prio_idx && free_mcam < tail->flow->mcam_id ){}"
461 * If this condition is true, it means at some higher priority level than
462 * requested priority level, there are entries at lower indices than the given
463 * free mcam. That is, we have found in levels 0,1 there is an mcam X which is
465 * If, for any free entry and user req prio, the above condition is true, then
466 * the below case(2) condition will always be false since the lists are kept
467 * sorted. The case(2) condition is;
468 * "if (requested_prio < prio_idx && free_mcam > head->flow->mcam_id){}"
469 * There can't be entries at lower indices at priority level higher
470 * than the requested priority level. That is, here, at levels 3 & 4 there
471 * cannot be any entry greater than 1610. Because all entries in 3 & 4 must be
472 * greater than X which was found to be greater than 1610 earlier.
476 npc_sort_mcams_by_user_prio_level(struct mbox *mbox,
477 struct npc_prio_flow_entry *flow_list_entry,
479 struct npc_mcam_alloc_entry_rsp *rsp)
481 int requested_prio = flow_list_entry->flow->priority;
482 struct npc_prio_flow_entry *head, *tail;
483 struct npc_prio_flow_list_head *list;
484 uint16_t free_mcam = rsp->entry;
485 bool do_reverse_scan = true;
486 int prio_idx = 0, rc = 0;
488 while (prio_idx <= npc->flow_max_priority - 1) {
489 list = &npc->prio_flow_list[prio_idx];
490 tail = TAILQ_LAST(list, npc_prio_flow_list_head);
492 /* requested priority is lower than current level
493 * ie, numerically req prio is higher
495 if ((requested_prio > prio_idx) && tail) {
496 /* but there are some mcams in current level
497 * at higher indices, ie, at priority lower
500 if (free_mcam < tail->flow->mcam_id) {
501 rc = npc_slide_mcam_entries(
502 mbox, npc, prio_idx, &free_mcam,
503 SLIDE_ENTRIES_TO_LOWER_INDEX);
506 do_reverse_scan = false;
512 prio_idx = npc->flow_max_priority - 1;
513 while (prio_idx && do_reverse_scan) {
514 list = &npc->prio_flow_list[prio_idx];
515 head = TAILQ_FIRST(list);
517 /* requested priority is higher than current level
518 * ie, numerically req prio is lower
520 if (requested_prio < prio_idx && head) {
521 /* but free mcam is higher than lowest priority
522 * mcam in current level
524 if (free_mcam > head->flow->mcam_id) {
525 rc = npc_slide_mcam_entries(
526 mbox, npc, prio_idx, &free_mcam,
527 SLIDE_ENTRIES_TO_HIGHER_INDEX);
534 rsp->entry = free_mcam;
539 npc_insert_into_flow_list(struct npc *npc, struct npc_prio_flow_entry *entry)
541 struct npc_prio_flow_list_head *list;
542 struct npc_prio_flow_entry *curr;
544 list = &npc->prio_flow_list[entry->flow->priority];
545 curr = TAILQ_FIRST(list);
549 if (entry->flow->mcam_id > curr->flow->mcam_id)
550 curr = TAILQ_NEXT(curr, next);
555 TAILQ_INSERT_BEFORE(curr, entry, next);
557 TAILQ_INSERT_TAIL(list, entry, next);
559 TAILQ_INSERT_HEAD(list, entry, next);
564 npc_allocate_mcam_entry(struct mbox *mbox, int prio,
565 struct npc_mcam_alloc_entry_rsp *rsp_local,
568 struct npc_mcam_alloc_entry_rsp *rsp_cmd;
569 struct npc_mcam_alloc_entry_req *req;
570 struct npc_mcam_alloc_entry_rsp *rsp;
573 req = mbox_alloc_msg_npc_mcam_alloc_entry(mbox);
578 req->priority = prio;
579 req->ref_entry = ref_entry;
581 rc = mbox_process_msg(mbox, (void *)&rsp_cmd);
588 mbox_memcpy(rsp_local, rsp_cmd, sizeof(*rsp));
594 npc_find_mcam_ref_entry(struct roc_npc_flow *flow, struct npc *npc, int *prio,
595 int *ref_entry, int dir)
597 struct npc_prio_flow_entry *head, *tail;
598 struct npc_prio_flow_list_head *list;
599 int prio_idx = flow->priority;
601 if (dir == NPC_MCAM_LOWER_PRIO) {
602 while (prio_idx >= 0) {
603 list = &npc->prio_flow_list[prio_idx];
604 head = TAILQ_FIRST(list);
606 *prio = NPC_MCAM_LOWER_PRIO;
607 *ref_entry = head->flow->mcam_id;
612 } else if (dir == NPC_MCAM_HIGHER_PRIO) {
613 prio_idx = flow->priority;
614 while (prio_idx <= npc->flow_max_priority - 1) {
615 list = &npc->prio_flow_list[prio_idx];
616 tail = TAILQ_LAST(list, npc_prio_flow_list_head);
618 *prio = NPC_MCAM_HIGHER_PRIO;
619 *ref_entry = tail->flow->mcam_id;
625 *prio = NPC_MCAM_ANY_PRIO;
630 npc_alloc_mcam_by_ref_entry(struct mbox *mbox, struct roc_npc_flow *flow,
632 struct npc_mcam_alloc_entry_rsp *rsp_local)
634 int prio, ref_entry = 0, rc = 0, dir = NPC_MCAM_LOWER_PRIO;
635 bool retry_done = false;
638 npc_find_mcam_ref_entry(flow, npc, &prio, &ref_entry, dir);
639 rc = npc_allocate_mcam_entry(mbox, prio, rsp_local, ref_entry);
640 if (rc && !retry_done) {
642 "npc: Failed to allocate lower priority entry. Retrying for higher priority");
644 dir = NPC_MCAM_HIGHER_PRIO;
647 } else if (rc && retry_done) {
655 npc_get_free_mcam_entry(struct mbox *mbox, struct roc_npc_flow *flow,
658 struct npc_mcam_alloc_entry_rsp rsp_local;
659 struct npc_prio_flow_entry *new_entry;
662 rc = npc_alloc_mcam_by_ref_entry(mbox, flow, npc, &rsp_local);
667 new_entry = plt_zmalloc(sizeof(*new_entry), 0);
671 new_entry->flow = flow;
673 plt_npc_dbg("kernel allocated MCAM entry %d", rsp_local.entry);
675 rc = npc_sort_mcams_by_user_prio_level(mbox, new_entry, npc,
680 plt_npc_dbg("allocated MCAM entry after sorting %d", rsp_local.entry);
681 flow->mcam_id = rsp_local.entry;
682 npc_insert_into_flow_list(npc, new_entry);
684 return rsp_local.entry;
691 npc_delete_prio_list_entry(struct npc *npc, struct roc_npc_flow *flow)
693 struct npc_prio_flow_list_head *list;
694 struct npc_prio_flow_entry *curr;
696 list = &npc->prio_flow_list[flow->priority];
697 curr = TAILQ_FIRST(list);
703 if (flow->mcam_id == curr->flow->mcam_id) {
704 TAILQ_REMOVE(list, curr, next);
708 curr = TAILQ_NEXT(curr, next);