1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_ethdev.h"
9 otx2_flow_mcam_free_counter(struct otx2_mbox *mbox, uint16_t ctr_id)
11 struct npc_mcam_oper_counter_req *req;
14 req = otx2_mbox_alloc_msg_npc_mcam_free_counter(mbox);
16 otx2_mbox_msg_send(mbox, 0);
17 rc = otx2_mbox_get_rsp(mbox, 0, NULL);
23 otx2_flow_mcam_read_counter(struct otx2_mbox *mbox, uint32_t ctr_id,
26 struct npc_mcam_oper_counter_req *req;
27 struct npc_mcam_oper_counter_rsp *rsp;
30 req = otx2_mbox_alloc_msg_npc_mcam_counter_stats(mbox);
32 otx2_mbox_msg_send(mbox, 0);
33 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
40 otx2_flow_mcam_clear_counter(struct otx2_mbox *mbox, uint32_t ctr_id)
42 struct npc_mcam_oper_counter_req *req;
45 req = otx2_mbox_alloc_msg_npc_mcam_clear_counter(mbox);
47 otx2_mbox_msg_send(mbox, 0);
48 rc = otx2_mbox_get_rsp(mbox, 0, NULL);
54 otx2_flow_mcam_free_entry(struct otx2_mbox *mbox, uint32_t entry)
56 struct npc_mcam_free_entry_req *req;
59 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
61 otx2_mbox_msg_send(mbox, 0);
62 rc = otx2_mbox_get_rsp(mbox, 0, NULL);
68 otx2_flow_mcam_free_all_entries(struct otx2_mbox *mbox)
70 struct npc_mcam_free_entry_req *req;
73 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
75 otx2_mbox_msg_send(mbox, 0);
76 rc = otx2_mbox_get_rsp(mbox, 0, NULL);
82 flow_prep_mcam_ldata(uint8_t *ptr, const uint8_t *data, int len)
86 for (idx = 0; idx < len; idx++)
87 ptr[idx] = data[len - 1 - idx];
91 flow_check_copysz(size_t size, size_t len)
99 flow_mem_is_zero(const void *mem, int len)
104 for (i = 0; i < len; i++) {
112 otx2_flow_get_hw_supp_mask(struct otx2_parse_state *pst,
113 struct otx2_flow_item_info *info, int lid, int lt)
115 struct npc_xtract_info *xinfo;
116 char *hw_mask = info->hw_mask;
121 intf = pst->flow->nix_intf;
122 xinfo = pst->npc->prx_dxcfg[intf][lid][lt].xtract;
123 memset(hw_mask, 0, info->len);
125 for (i = 0; i < NPC_MAX_LD; i++) {
126 if (xinfo[i].hdr_off < info->hw_hdr_len)
129 max_off = xinfo[i].hdr_off + xinfo[i].len - info->hw_hdr_len;
131 if (xinfo[i].enable == 0)
134 if (max_off > info->len)
137 offset = xinfo[i].hdr_off - info->hw_hdr_len;
138 for (j = offset; j < max_off; j++)
144 otx2_flow_update_parse_state(struct otx2_parse_state *pst,
145 struct otx2_flow_item_info *info, int lid, int lt,
148 uint8_t int_info_mask[NPC_MAX_EXTRACT_DATA_LEN];
149 uint8_t int_info[NPC_MAX_EXTRACT_DATA_LEN];
150 struct npc_lid_lt_xtract_info *xinfo;
155 otx2_npc_dbg("Parse state function info mask total %s",
156 (const uint8_t *)info->mask);
158 pst->layer_mask |= lid;
160 pst->flags[lid] = flags;
162 intf = pst->flow->nix_intf;
163 xinfo = &pst->npc->prx_dxcfg[intf][lid][lt];
164 otx2_npc_dbg("Is_terminating = %d", xinfo->is_terminating);
165 if (xinfo->is_terminating)
168 /* Need to check if flags are supported but in latest
169 * KPU profile, flags are used as enumeration! No way,
170 * it can be validated unless MBOX is changed to return
171 * set of valid values out of 2**8 possible values.
173 if (info->spec == NULL) { /* Nothing to match */
174 otx2_npc_dbg("Info spec NULL");
178 /* Copy spec and mask into mcam match string, mask.
179 * Since both RTE FLOW and OTX2 MCAM use network-endianness
180 * for data, we are saved from nasty conversions.
182 for (i = 0; i < NPC_MAX_LD; i++) {
183 struct npc_xtract_info *x;
186 x = &xinfo->xtract[i];
188 hdr_off = x->hdr_off;
190 if (hdr_off < info->hw_hdr_len)
196 otx2_npc_dbg("x->hdr_off = %d, len = %d, info->len = %d,"
197 "x->key_off = %d", x->hdr_off, len, info->len,
200 hdr_off -= info->hw_hdr_len;
202 if (hdr_off + len > info->len)
203 len = info->len - hdr_off;
205 /* Check for over-write of previous layer */
206 if (!flow_mem_is_zero(pst->mcam_mask + x->key_off,
208 /* Cannot support this data match */
209 rte_flow_error_set(pst->error, ENOTSUP,
210 RTE_FLOW_ERROR_TYPE_ITEM,
212 "Extraction unsupported");
216 len = flow_check_copysz((OTX2_MAX_MCAM_WIDTH_DWORDS * 8)
220 rte_flow_error_set(pst->error, ENOTSUP,
221 RTE_FLOW_ERROR_TYPE_ITEM,
227 /* Need to reverse complete structure so that dest addr is at
228 * MSB so as to program the MCAM using mcam_data & mcam_mask
231 flow_prep_mcam_ldata(int_info,
232 (const uint8_t *)info->spec + hdr_off,
234 flow_prep_mcam_ldata(int_info_mask,
235 (const uint8_t *)info->mask + hdr_off,
238 otx2_npc_dbg("Spec: ");
239 for (k = 0; k < info->len; k++)
240 otx2_npc_dbg("0x%.2x ",
241 ((const uint8_t *)info->spec)[k]);
243 otx2_npc_dbg("Int_info: ");
244 for (k = 0; k < info->len; k++)
245 otx2_npc_dbg("0x%.2x ", int_info[k]);
247 memcpy(pst->mcam_mask + x->key_off, int_info_mask, len);
248 memcpy(pst->mcam_data + x->key_off, int_info, len);
250 otx2_npc_dbg("Parse state mcam data & mask");
251 for (idx = 0; idx < len ; idx++)
252 otx2_npc_dbg("data[%d]: 0x%x, mask[%d]: 0x%x", idx,
253 *(pst->mcam_data + idx + x->key_off), idx,
254 *(pst->mcam_mask + idx + x->key_off));
258 /* Next pattern to parse by subsequent layers */
264 flow_range_is_valid(const char *spec, const char *last, const char *mask,
267 /* Mask must be zero or equal to spec as we do not support
268 * non-contiguous ranges.
272 (spec[len] & mask[len]) != (last[len] & mask[len]))
273 return 0; /* False */
280 flow_mask_is_supported(const char *mask, const char *hw_mask, int len)
283 * If no hw_mask, assume nothing is supported.
287 return flow_mem_is_zero(mask, len);
290 if ((mask[len] | hw_mask[len]) != hw_mask[len])
291 return 0; /* False */
297 otx2_flow_parse_item_basic(const struct rte_flow_item *item,
298 struct otx2_flow_item_info *info,
299 struct rte_flow_error *error)
301 /* Item must not be NULL */
303 rte_flow_error_set(error, EINVAL,
304 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
308 /* If spec is NULL, both mask and last must be NULL, this
309 * makes it to match ANY value (eq to mask = 0).
310 * Setting either mask or last without spec is an error
312 if (item->spec == NULL) {
313 if (item->last == NULL && item->mask == NULL) {
317 rte_flow_error_set(error, EINVAL,
318 RTE_FLOW_ERROR_TYPE_ITEM, item,
319 "mask or last set without spec");
323 /* We have valid spec */
324 info->spec = item->spec;
326 /* If mask is not set, use default mask, err if default mask is
329 if (item->mask == NULL) {
330 otx2_npc_dbg("Item mask null, using default mask");
331 if (info->def_mask == NULL) {
332 rte_flow_error_set(error, EINVAL,
333 RTE_FLOW_ERROR_TYPE_ITEM, item,
334 "No mask or default mask given");
337 info->mask = info->def_mask;
339 info->mask = item->mask;
342 /* mask specified must be subset of hw supported mask
343 * mask | hw_mask == hw_mask
345 if (!flow_mask_is_supported(info->mask, info->hw_mask, info->len)) {
346 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
347 item, "Unsupported field in the mask");
351 /* Now we have spec and mask. OTX2 does not support non-contiguous
352 * range. We should have either:
353 * - spec & mask == last & mask or,
357 if (item->last != NULL && !flow_mem_is_zero(item->last, info->len)) {
358 if (!flow_range_is_valid(item->spec, item->last, info->mask,
360 rte_flow_error_set(error, EINVAL,
361 RTE_FLOW_ERROR_TYPE_ITEM, item,
362 "Unsupported range for match");
371 otx2_flow_keyx_compress(uint64_t *data, uint32_t nibble_mask)
373 uint64_t cdata[2] = {0ULL, 0ULL}, nibble;
376 for (i = 0; i < NPC_MAX_KEY_NIBBLES; i++) {
377 if (nibble_mask & (1 << i)) {
378 nibble = (data[i / 16] >> ((i & 0xf) * 4)) & 0xf;
379 cdata[j / 16] |= (nibble << ((j & 0xf) * 4));
389 flow_first_set_bit(uint64_t slab)
393 if ((slab & 0xffffffff) == 0) {
397 if ((slab & 0xffff) == 0) {
401 if ((slab & 0xff) == 0) {
405 if ((slab & 0xf) == 0) {
409 if ((slab & 0x3) == 0) {
413 if ((slab & 0x1) == 0)
420 flow_shift_lv_ent(struct otx2_mbox *mbox, struct rte_flow *flow,
421 struct otx2_npc_flow_info *flow_info,
422 uint32_t old_ent, uint32_t new_ent)
424 struct npc_mcam_shift_entry_req *req;
425 struct npc_mcam_shift_entry_rsp *rsp;
426 struct otx2_flow_list *list;
427 struct rte_flow *flow_iter;
430 otx2_npc_dbg("Old ent:%u new ent:%u priority:%u", old_ent, new_ent,
433 list = &flow_info->flow_list[flow->priority];
435 /* Old entry is disabled & it's contents are moved to new_entry,
436 * new entry is enabled finally.
438 req = otx2_mbox_alloc_msg_npc_mcam_shift_entry(mbox);
439 req->curr_entry[0] = old_ent;
440 req->new_entry[0] = new_ent;
441 req->shift_count = 1;
443 otx2_mbox_msg_send(mbox, 0);
444 rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
448 /* Remove old node from list */
449 TAILQ_FOREACH(flow_iter, list, next) {
450 if (flow_iter->mcam_id == old_ent)
451 TAILQ_REMOVE(list, flow_iter, next);
454 /* Insert node with new mcam id at right place */
455 TAILQ_FOREACH(flow_iter, list, next) {
456 if (flow_iter->mcam_id > new_ent)
457 TAILQ_INSERT_BEFORE(flow_iter, flow, next);
462 /* Exchange all required entries with a given priority level */
464 flow_shift_ent(struct otx2_mbox *mbox, struct rte_flow *flow,
465 struct otx2_npc_flow_info *flow_info,
466 struct npc_mcam_alloc_entry_rsp *rsp, int dir, int prio_lvl)
468 struct rte_bitmap *fr_bmp, *fr_bmp_rev, *lv_bmp, *lv_bmp_rev, *bmp;
469 uint32_t e_fr = 0, e_lv = 0, e, e_id = 0, mcam_entries;
470 uint64_t fr_bit_pos = 0, lv_bit_pos = 0, bit_pos = 0;
471 /* Bit position within the slab */
472 uint32_t sl_fr_bit_off = 0, sl_lv_bit_off = 0;
473 /* Overall bit position of the start of slab */
474 /* free & live entry index */
475 int rc_fr = 0, rc_lv = 0, rc = 0, idx = 0;
476 struct otx2_mcam_ents_info *ent_info;
477 /* free & live bitmap slab */
478 uint64_t sl_fr = 0, sl_lv = 0, *sl;
480 fr_bmp = flow_info->free_entries[prio_lvl];
481 fr_bmp_rev = flow_info->free_entries_rev[prio_lvl];
482 lv_bmp = flow_info->live_entries[prio_lvl];
483 lv_bmp_rev = flow_info->live_entries_rev[prio_lvl];
484 ent_info = &flow_info->flow_entry_info[prio_lvl];
485 mcam_entries = flow_info->mcam_entries;
488 /* New entries allocated are always contiguous, but older entries
489 * already in free/live bitmap can be non-contiguous: so return
490 * shifted entries should be in non-contiguous format.
492 while (idx <= rsp->count) {
493 if (!sl_fr && !sl_lv) {
494 /* Lower index elements to be exchanged */
496 rc_fr = rte_bitmap_scan(fr_bmp, &e_fr, &sl_fr);
497 rc_lv = rte_bitmap_scan(lv_bmp, &e_lv, &sl_lv);
498 otx2_npc_dbg("Fwd slab rc fr %u rc lv %u "
499 "e_fr %u e_lv %u", rc_fr, rc_lv,
502 rc_fr = rte_bitmap_scan(fr_bmp_rev,
505 rc_lv = rte_bitmap_scan(lv_bmp_rev,
509 otx2_npc_dbg("Rev slab rc fr %u rc lv %u "
510 "e_fr %u e_lv %u", rc_fr, rc_lv,
516 fr_bit_pos = flow_first_set_bit(sl_fr);
517 e_fr = sl_fr_bit_off + fr_bit_pos;
518 otx2_npc_dbg("Fr_bit_pos 0x%" PRIx64, fr_bit_pos);
524 lv_bit_pos = flow_first_set_bit(sl_lv);
525 e_lv = sl_lv_bit_off + lv_bit_pos;
526 otx2_npc_dbg("Lv_bit_pos 0x%" PRIx64, lv_bit_pos);
531 /* First entry is from free_bmap */
536 bit_pos = fr_bit_pos;
538 e_id = mcam_entries - e - 1;
541 otx2_npc_dbg("Fr e %u e_id %u", e, e_id);
546 bit_pos = lv_bit_pos;
548 e_id = mcam_entries - e - 1;
552 otx2_npc_dbg("Lv e %u e_id %u", e, e_id);
553 if (idx < rsp->count)
555 flow_shift_lv_ent(mbox, flow,
560 rte_bitmap_clear(bmp, e);
561 rte_bitmap_set(bmp, rsp->entry + idx);
562 /* Update entry list, use non-contiguous
565 rsp->entry_list[idx] = e_id;
566 *sl &= ~(1 << bit_pos);
568 /* Update min & max entry identifiers in current
572 ent_info->max_id = rsp->entry + idx;
573 ent_info->min_id = e_id;
575 ent_info->max_id = e_id;
576 ent_info->min_id = rsp->entry;
584 /* Validate if newly allocated entries lie in the correct priority zone
585 * since NPC_MCAM_LOWER_PRIO & NPC_MCAM_HIGHER_PRIO don't ensure zone accuracy.
586 * If not properly aligned, shift entries to do so
589 flow_validate_and_shift_prio_ent(struct otx2_mbox *mbox, struct rte_flow *flow,
590 struct otx2_npc_flow_info *flow_info,
591 struct npc_mcam_alloc_entry_rsp *rsp,
594 int prio_idx = 0, rc = 0, needs_shift = 0, idx, prio = flow->priority;
595 struct otx2_mcam_ents_info *info = flow_info->flow_entry_info;
596 int dir = (req_prio == NPC_MCAM_HIGHER_PRIO) ? 1 : -1;
597 uint32_t tot_ent = 0;
599 otx2_npc_dbg("Dir %d, priority = %d", dir, prio);
602 prio_idx = flow_info->flow_max_priority - 1;
604 /* Only live entries needs to be shifted, free entries can just be
605 * moved by bits manipulation.
608 /* For dir = -1(NPC_MCAM_LOWER_PRIO), when shifting,
609 * NPC_MAX_PREALLOC_ENT are exchanged with adjoining higher priority
610 * level entries(lower indexes).
612 * For dir = +1(NPC_MCAM_HIGHER_PRIO), during shift,
613 * NPC_MAX_PREALLOC_ENT are exchanged with adjoining lower priority
614 * level entries(higher indexes) with highest indexes.
617 tot_ent = info[prio_idx].free_ent + info[prio_idx].live_ent;
619 if (dir < 0 && prio_idx != prio &&
620 rsp->entry > info[prio_idx].max_id && tot_ent) {
621 otx2_npc_dbg("Rsp entry %u prio idx %u "
622 "max id %u", rsp->entry, prio_idx,
623 info[prio_idx].max_id);
626 } else if ((dir > 0) && (prio_idx != prio) &&
627 (rsp->entry < info[prio_idx].min_id) && tot_ent) {
628 otx2_npc_dbg("Rsp entry %u prio idx %u "
629 "min id %u", rsp->entry, prio_idx,
630 info[prio_idx].min_id);
634 otx2_npc_dbg("Needs_shift = %d", needs_shift);
637 rc = flow_shift_ent(mbox, flow, flow_info, rsp, dir,
640 for (idx = 0; idx < rsp->count; idx++)
641 rsp->entry_list[idx] = rsp->entry + idx;
643 } while ((prio_idx != prio) && (prio_idx += dir));