1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 npc_mcam_alloc_counter(struct npc *npc, uint16_t *ctr)
10 struct npc_mcam_alloc_counter_req *req;
11 struct npc_mcam_alloc_counter_rsp *rsp;
12 struct mbox *mbox = npc->mbox;
15 req = mbox_alloc_msg_npc_mcam_alloc_counter(mbox);
19 rc = mbox_process_msg(mbox, (void *)&rsp);
22 *ctr = rsp->cntr_list[0];
27 npc_mcam_free_counter(struct npc *npc, uint16_t ctr_id)
29 struct npc_mcam_oper_counter_req *req;
30 struct mbox *mbox = npc->mbox;
33 req = mbox_alloc_msg_npc_mcam_free_counter(mbox);
37 return mbox_process(mbox);
41 npc_mcam_read_counter(struct npc *npc, uint32_t ctr_id, uint64_t *count)
43 struct npc_mcam_oper_counter_req *req;
44 struct npc_mcam_oper_counter_rsp *rsp;
45 struct mbox *mbox = npc->mbox;
48 req = mbox_alloc_msg_npc_mcam_counter_stats(mbox);
52 rc = mbox_process_msg(mbox, (void *)&rsp);
60 npc_mcam_clear_counter(struct npc *npc, uint32_t ctr_id)
62 struct npc_mcam_oper_counter_req *req;
63 struct mbox *mbox = npc->mbox;
66 req = mbox_alloc_msg_npc_mcam_clear_counter(mbox);
70 return mbox_process(mbox);
74 npc_mcam_free_entry(struct npc *npc, uint32_t entry)
76 struct npc_mcam_free_entry_req *req;
77 struct mbox *mbox = npc->mbox;
80 req = mbox_alloc_msg_npc_mcam_free_entry(mbox);
84 return mbox_process(mbox);
88 npc_mcam_free_all_entries(struct npc *npc)
90 struct npc_mcam_free_entry_req *req;
91 struct mbox *mbox = npc->mbox;
94 req = mbox_alloc_msg_npc_mcam_free_entry(mbox);
98 return mbox_process(mbox);
102 npc_supp_key_len(uint32_t supp_mask)
108 supp_mask &= (supp_mask - 1);
110 return nib_count * 4;
114 * Returns true if any LDATA bits are extracted for specific LID+LTYPE.
116 * No LFLAG extraction is taken into account.
119 npc_lid_lt_in_kex(struct npc *npc, uint8_t lid, uint8_t lt)
121 struct npc_xtract_info *x_info;
124 for (i = 0; i < NPC_MAX_LD; i++) {
125 x_info = &npc->prx_dxcfg[NIX_INTF_RX][lid][lt].xtract[i];
126 /* Check for LDATA */
127 if (x_info->enable && x_info->len > 0)
135 npc_construct_ldata_mask(struct npc *npc, struct plt_bitmap *bmap, uint8_t lid,
136 uint8_t lt, uint8_t ld)
138 struct npc_xtract_info *x_info, *infoflag;
145 x_info = &(*p)[0][lid][lt].xtract[ld];
147 if (x_info->enable == 0)
150 hdr_off = x_info->hdr_off * 8;
151 keylen = x_info->len * 8;
152 for (i = hdr_off; i < (hdr_off + keylen); i++)
153 plt_bitmap_set(bmap, i);
155 if (x_info->flags_enable == 0)
158 if ((npc->prx_lfcfg[0].i & 0x7) != lid)
162 for (j = 0; j < NPC_MAX_LFL; j++) {
163 infoflag = &(*q)[0][ld][j].xtract[0];
164 if (infoflag->enable) {
165 hdr_off = infoflag->hdr_off * 8;
166 keylen = infoflag->len * 8;
167 for (i = hdr_off; i < (hdr_off + keylen); i++)
168 plt_bitmap_set(bmap, i);
174 * Check if given LID+LTYPE combination is present in KEX
176 * len is non-zero, this function will return true if KEX extracts len bytes
177 * at given offset. Otherwise it'll return true if any bytes are extracted
178 * specifically for given LID+LTYPE combination (meaning not LFLAG based).
179 * The second case increases flexibility for custom frames whose extracted
180 * bits may change depending on KEX profile loaded.
182 * @param npc NPC context structure
183 * @param lid Layer ID to check for
184 * @param lt Layer Type to check for
185 * @param offset offset into the layer header to match
186 * @param len length of the match
189 npc_is_kex_enabled(struct npc *npc, uint8_t lid, uint8_t lt, int offset,
192 struct plt_bitmap *bmap;
198 return npc_lid_lt_in_kex(npc, lid, lt);
200 bmap_sz = plt_bitmap_get_memory_footprint(300 * 8);
201 mem = plt_zmalloc(bmap_sz, 0);
203 plt_err("mem alloc failed");
206 bmap = plt_bitmap_init(300 * 8, mem, bmap_sz);
208 plt_err("mem alloc failed");
213 npc_construct_ldata_mask(npc, bmap, lid, lt, 0);
214 npc_construct_ldata_mask(npc, bmap, lid, lt, 1);
216 for (i = offset; i < (offset + len); i++) {
217 if (plt_bitmap_get(bmap, i) != 0x1) {
228 npc_get_kex_capability(struct npc *npc)
230 npc_kex_cap_terms_t kex_cap;
232 memset(&kex_cap, 0, sizeof(kex_cap));
234 /* Ethtype: Offset 12B, len 2B */
235 kex_cap.bit.ethtype_0 = npc_is_kex_enabled(
236 npc, NPC_LID_LA, NPC_LT_LA_ETHER, 12 * 8, 2 * 8);
237 /* QINQ VLAN Ethtype: offset 8B, len 2B */
238 kex_cap.bit.ethtype_x = npc_is_kex_enabled(
239 npc, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8 * 8, 2 * 8);
240 /* VLAN ID0 : Outer VLAN: Offset 2B, len 2B */
241 kex_cap.bit.vlan_id_0 = npc_is_kex_enabled(
242 npc, NPC_LID_LB, NPC_LT_LB_CTAG, 2 * 8, 2 * 8);
243 /* VLAN ID0 : Inner VLAN: offset 6B, len 2B */
244 kex_cap.bit.vlan_id_x = npc_is_kex_enabled(
245 npc, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 6 * 8, 2 * 8);
246 /* DMCA: offset 0B, len 6B */
247 kex_cap.bit.dmac = npc_is_kex_enabled(npc, NPC_LID_LA, NPC_LT_LA_ETHER,
249 /* IP proto: offset 9B, len 1B */
250 kex_cap.bit.ip_proto =
251 npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP, 9 * 8, 1 * 8);
252 /* UDP dport: offset 2B, len 2B */
253 kex_cap.bit.udp_dport = npc_is_kex_enabled(npc, NPC_LID_LD,
254 NPC_LT_LD_UDP, 2 * 8, 2 * 8);
255 /* UDP sport: offset 0B, len 2B */
256 kex_cap.bit.udp_sport = npc_is_kex_enabled(npc, NPC_LID_LD,
257 NPC_LT_LD_UDP, 0 * 8, 2 * 8);
258 /* TCP dport: offset 2B, len 2B */
259 kex_cap.bit.tcp_dport = npc_is_kex_enabled(npc, NPC_LID_LD,
260 NPC_LT_LD_TCP, 2 * 8, 2 * 8);
261 /* TCP sport: offset 0B, len 2B */
262 kex_cap.bit.tcp_sport = npc_is_kex_enabled(npc, NPC_LID_LD,
263 NPC_LT_LD_TCP, 0 * 8, 2 * 8);
264 /* IP SIP: offset 12B, len 4B */
265 kex_cap.bit.sip_addr = npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP,
267 /* IP DIP: offset 14B, len 4B */
268 kex_cap.bit.dip_addr = npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP,
270 /* IP6 SIP: offset 8B, len 16B */
271 kex_cap.bit.sip6_addr = npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP6,
273 /* IP6 DIP: offset 24B, len 16B */
274 kex_cap.bit.dip6_addr = npc_is_kex_enabled(
275 npc, NPC_LID_LC, NPC_LT_LC_IP6, 24 * 8, 16 * 8);
276 /* ESP SPI: offset 0B, len 4B */
277 kex_cap.bit.ipsec_spi = npc_is_kex_enabled(npc, NPC_LID_LE,
278 NPC_LT_LE_ESP, 0 * 8, 4 * 8);
279 /* VXLAN VNI: offset 4B, len 3B */
280 kex_cap.bit.ld_vni = npc_is_kex_enabled(npc, NPC_LID_LE,
281 NPC_LT_LE_VXLAN, 0 * 8, 3 * 8);
283 /* Custom L3 frame: varied offset and lengths */
284 kex_cap.bit.custom_l3 =
285 npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_CUSTOM0, 0, 0);
286 kex_cap.bit.custom_l3 |= (uint64_t)npc_is_kex_enabled(npc, NPC_LID_LC,
287 NPC_LT_LC_CUSTOM1, 0, 0);
288 /* SCTP sport : offset 0B, len 2B */
289 kex_cap.bit.sctp_sport = npc_is_kex_enabled(
290 npc, NPC_LID_LD, NPC_LT_LD_SCTP, 0 * 8, 2 * 8);
291 /* SCTP dport : offset 2B, len 2B */
292 kex_cap.bit.sctp_dport = npc_is_kex_enabled(
293 npc, NPC_LID_LD, NPC_LT_LD_SCTP, 2 * 8, 2 * 8);
294 /* ICMP type : offset 0B, len 1B */
295 kex_cap.bit.icmp_type = npc_is_kex_enabled(
296 npc, NPC_LID_LD, NPC_LT_LD_ICMP, 0 * 8, 1 * 8);
297 /* ICMP code : offset 1B, len 1B */
298 kex_cap.bit.icmp_code = npc_is_kex_enabled(
299 npc, NPC_LID_LD, NPC_LT_LD_ICMP, 1 * 8, 1 * 8);
300 /* ICMP id : offset 4B, len 2B */
301 kex_cap.bit.icmp_id = npc_is_kex_enabled(npc, NPC_LID_LD,
302 NPC_LT_LD_ICMP, 4 * 8, 2 * 8);
303 /* IGMP grp_addr : offset 4B, len 4B */
304 kex_cap.bit.igmp_grp_addr = npc_is_kex_enabled(
305 npc, NPC_LID_LD, NPC_LT_LD_IGMP, 4 * 8, 4 * 8);
306 /* GTPU teid : offset 4B, len 4B */
307 kex_cap.bit.gtpu_teid = npc_is_kex_enabled(
308 npc, NPC_LID_LE, NPC_LT_LE_GTPU, 4 * 8, 4 * 8);
309 return kex_cap.all_bits;
312 #define BYTESM1_SHIFT 16
313 #define HDR_OFF_SHIFT 8
315 npc_update_kex_info(struct npc_xtract_info *xtract_info, uint64_t val)
317 xtract_info->len = ((val >> BYTESM1_SHIFT) & 0xf) + 1;
318 xtract_info->hdr_off = (val >> HDR_OFF_SHIFT) & 0xff;
319 xtract_info->key_off = val & 0x3f;
320 xtract_info->enable = ((val >> 7) & 0x1);
321 xtract_info->flags_enable = ((val >> 6) & 0x1);
325 npc_mcam_alloc_entries(struct npc *npc, int ref_mcam, int *alloc_entry,
326 int req_count, int prio, int *resp_count)
328 struct npc_mcam_alloc_entry_req *req;
329 struct npc_mcam_alloc_entry_rsp *rsp;
330 struct mbox *mbox = npc->mbox;
334 req = mbox_alloc_msg_npc_mcam_alloc_entry(mbox);
338 req->count = req_count;
339 req->priority = prio;
340 req->ref_entry = ref_mcam;
342 rc = mbox_process_msg(mbox, (void *)&rsp);
345 for (i = 0; i < rsp->count; i++)
346 alloc_entry[i] = rsp->entry_list[i];
347 *resp_count = rsp->count;
352 npc_mcam_alloc_entry(struct npc *npc, struct roc_npc_flow *mcam,
353 struct roc_npc_flow *ref_mcam, int prio, int *resp_count)
355 struct npc_mcam_alloc_entry_req *req;
356 struct npc_mcam_alloc_entry_rsp *rsp;
357 struct mbox *mbox = npc->mbox;
360 req = mbox_alloc_msg_npc_mcam_alloc_entry(mbox);
365 req->priority = prio;
366 req->ref_entry = ref_mcam->mcam_id;
368 rc = mbox_process_msg(mbox, (void *)&rsp);
371 memset(mcam, 0, sizeof(struct roc_npc_flow));
372 mcam->mcam_id = rsp->entry;
373 mcam->nix_intf = ref_mcam->nix_intf;
374 *resp_count = rsp->count;
379 npc_mcam_ena_dis_entry(struct npc *npc, struct roc_npc_flow *mcam, bool enable)
381 struct npc_mcam_ena_dis_entry_req *req;
382 struct mbox *mbox = npc->mbox;
386 req = mbox_alloc_msg_npc_mcam_ena_entry(mbox);
388 req = mbox_alloc_msg_npc_mcam_dis_entry(mbox);
392 req->entry = mcam->mcam_id;
393 mcam->enable = enable;
394 return mbox_process(mbox);
398 npc_mcam_write_entry(struct npc *npc, struct roc_npc_flow *mcam)
400 struct npc_mcam_write_entry_req *req;
401 struct mbox *mbox = npc->mbox;
402 struct mbox_msghdr *rsp;
406 req = mbox_alloc_msg_npc_mcam_write_entry(mbox);
409 req->entry = mcam->mcam_id;
410 req->intf = mcam->nix_intf;
411 req->enable_entry = mcam->enable;
412 req->entry_data.action = mcam->npc_action;
413 req->entry_data.vtag_action = mcam->vtag_action;
414 for (i = 0; i < NPC_MCAM_KEY_X4_WORDS; i++) {
415 req->entry_data.kw[i] = mcam->mcam_data[i];
416 req->entry_data.kw_mask[i] = mcam->mcam_mask[i];
418 return mbox_process_msg(mbox, (void *)&rsp);
422 npc_mcam_process_mkex_cfg(struct npc *npc, struct npc_get_kex_cfg_rsp *kex_rsp)
425 *q)[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
426 struct npc_xtract_info *x_info = NULL;
427 int lid, lt, ld, fl, ix;
432 npc->keyx_supp_nmask[NPC_MCAM_RX] =
433 kex_rsp->rx_keyx_cfg & 0x7fffffffULL;
434 npc->keyx_supp_nmask[NPC_MCAM_TX] =
435 kex_rsp->tx_keyx_cfg & 0x7fffffffULL;
436 npc->keyx_len[NPC_MCAM_RX] =
437 npc_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_RX]);
438 npc->keyx_len[NPC_MCAM_TX] =
439 npc_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_TX]);
441 keyw = (kex_rsp->rx_keyx_cfg >> 32) & 0x7ULL;
442 npc->keyw[NPC_MCAM_RX] = keyw;
443 keyw = (kex_rsp->tx_keyx_cfg >> 32) & 0x7ULL;
444 npc->keyw[NPC_MCAM_TX] = keyw;
446 /* Update KEX_LD_FLAG */
447 for (ix = 0; ix < NPC_MAX_INTF; ix++) {
448 for (ld = 0; ld < NPC_MAX_LD; ld++) {
449 for (fl = 0; fl < NPC_MAX_LFL; fl++) {
450 x_info = &npc->prx_fxcfg[ix][ld][fl].xtract[0];
451 val = kex_rsp->intf_ld_flags[ix][ld][fl];
452 npc_update_kex_info(x_info, val);
457 /* Update LID, LT and LDATA cfg */
459 q = (volatile uint64_t(*)[][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD])(
460 &kex_rsp->intf_lid_lt_ld);
461 for (ix = 0; ix < NPC_MAX_INTF; ix++) {
462 for (lid = 0; lid < NPC_MAX_LID; lid++) {
463 for (lt = 0; lt < NPC_MAX_LT; lt++) {
464 for (ld = 0; ld < NPC_MAX_LD; ld++) {
465 x_info = &(*p)[ix][lid][lt].xtract[ld];
466 val = (*q)[ix][lid][lt][ld];
467 npc_update_kex_info(x_info, val);
472 /* Update LDATA Flags cfg */
473 npc->prx_lfcfg[0].i = kex_rsp->kex_ld_flags[0];
474 npc->prx_lfcfg[1].i = kex_rsp->kex_ld_flags[1];
478 npc_mcam_fetch_kex_cfg(struct npc *npc)
480 struct npc_get_kex_cfg_rsp *kex_rsp;
481 struct mbox *mbox = npc->mbox;
484 mbox_alloc_msg_npc_get_kex_cfg(mbox);
485 rc = mbox_process_msg(mbox, (void *)&kex_rsp);
487 plt_err("Failed to fetch NPC KEX config");
491 mbox_memcpy((char *)npc->profile_name, kex_rsp->mkex_pfl_name,
494 npc_mcam_process_mkex_cfg(npc, kex_rsp);
501 npc_mcam_set_channel(struct roc_npc_flow *flow,
502 struct npc_mcam_write_entry_req *req, uint16_t channel,
503 uint16_t chan_mask, bool is_second_pass)
505 uint16_t chan = 0, mask = 0;
507 req->entry_data.kw[0] &= ~(GENMASK(11, 0));
508 req->entry_data.kw_mask[0] &= ~(GENMASK(11, 0));
509 flow->mcam_data[0] &= ~(GENMASK(11, 0));
510 flow->mcam_mask[0] &= ~(GENMASK(11, 0));
512 if (is_second_pass) {
513 chan = (channel | NIX_CHAN_CPT_CH_START);
514 mask = (chan_mask | NIX_CHAN_CPT_CH_START);
517 * Clear bits 10 & 11 corresponding to CPT
518 * channel. By default, rules should match
519 * both first pass packets and second pass
522 chan = (channel & NIX_CHAN_CPT_X2P_MASK);
523 mask = (chan_mask & NIX_CHAN_CPT_X2P_MASK);
526 req->entry_data.kw[0] |= (uint64_t)chan;
527 req->entry_data.kw_mask[0] |= (uint64_t)mask;
528 flow->mcam_data[0] |= (uint64_t)chan;
529 flow->mcam_mask[0] |= (uint64_t)mask;
533 npc_mcam_alloc_and_write(struct npc *npc, struct roc_npc_flow *flow,
534 struct npc_parse_state *pst)
536 int use_ctr = (flow->ctr_id == NPC_COUNTER_NONE ? 0 : 1);
537 struct npc_mcam_write_entry_req *req;
538 struct nix_inl_dev *inl_dev = NULL;
539 struct mbox *mbox = npc->mbox;
540 struct mbox_msghdr *rsp;
541 struct idev_cfg *idev;
542 uint16_t pf_func = 0;
550 rc = npc_mcam_alloc_counter(npc, &ctr);
555 entry = npc_get_free_mcam_entry(mbox, flow, npc);
558 npc_mcam_free_counter(npc, ctr);
559 return NPC_ERR_MCAM_ALLOC;
562 req = mbox_alloc_msg_npc_mcam_write_entry(mbox);
565 req->set_cntr = use_ctr;
569 req->intf = (flow->nix_intf == NIX_INTF_RX) ? NPC_MCAM_RX : NPC_MCAM_TX;
570 req->enable_entry = 1;
571 req->entry_data.action = flow->npc_action;
574 * Driver sets vtag action on per interface basis, not
575 * per flow basis. It is a matter of how we decide to support
576 * this pmd specific behavior. There are two ways:
577 * 1. Inherit the vtag action from the one configured
578 * for this interface. This can be read from the
579 * vtag_action configured for default mcam entry of
581 * 2. Do not support vtag action with npc_flow.
583 * Second approach is used now.
585 req->entry_data.vtag_action = flow->vtag_action;
587 for (idx = 0; idx < ROC_NPC_MAX_MCAM_WIDTH_DWORDS; idx++) {
588 req->entry_data.kw[idx] = flow->mcam_data[idx];
589 req->entry_data.kw_mask[idx] = flow->mcam_mask[idx];
592 idev = idev_get_cfg();
594 inl_dev = idev->nix_inl_dev;
596 if (flow->nix_intf == NIX_INTF_RX) {
597 if (inl_dev && inl_dev->is_multi_channel &&
598 (flow->npc_action & NIX_RX_ACTIONOP_UCAST_IPSEC)) {
599 pf_func = nix_inl_dev_pffunc_get();
600 req->entry_data.action &= ~(GENMASK(19, 4));
601 req->entry_data.action |= (uint64_t)pf_func << 4;
602 flow->npc_action &= ~(GENMASK(19, 4));
603 flow->npc_action |= (uint64_t)pf_func << 4;
605 npc_mcam_set_channel(flow, req, inl_dev->channel,
606 inl_dev->chan_mask, false);
607 } else if (npc->is_sdp_link) {
608 npc_mcam_set_channel(flow, req, npc->sdp_channel,
609 npc->sdp_channel_mask,
610 pst->is_second_pass_rule);
612 npc_mcam_set_channel(flow, req, npc->channel,
614 pst->is_second_pass_rule);
617 uint16_t pf_func = (flow->npc_action >> 4) & 0xffff;
619 pf_func = plt_cpu_to_be_16(pf_func);
620 req->entry_data.kw[0] |= ((uint64_t)pf_func << 32);
621 req->entry_data.kw_mask[0] |= ((uint64_t)0xffff << 32);
623 flow->mcam_data[0] |= ((uint64_t)pf_func << 32);
624 flow->mcam_mask[0] |= ((uint64_t)0xffff << 32);
627 rc = mbox_process_msg(mbox, (void *)&rsp);
631 flow->mcam_id = entry;
639 npc_set_vlan_ltype(struct npc_parse_state *pst)
645 __builtin_popcount(pst->npc->keyx_supp_nmask[pst->nix_intf] &
646 ((1ULL << NPC_LTYPE_LB_OFFSET) - 1));
649 mask = ~((0xfULL << lb_offset));
650 pst->flow->mcam_data[0] &= mask;
651 pst->flow->mcam_mask[0] &= mask;
652 /* NPC_LT_LB_CTAG: 0b0010, NPC_LT_LB_STAG_QINQ: 0b0011
653 * Set LB layertype/mask as 0b0010/0b1110 to match both.
655 val = ((uint64_t)(NPC_LT_LB_CTAG & NPC_LT_LB_STAG_QINQ)) << lb_offset;
656 pst->flow->mcam_data[0] |= val;
657 pst->flow->mcam_mask[0] |= (0xeULL << lb_offset);
661 npc_set_ipv6ext_ltype_mask(struct npc_parse_state *pst)
663 uint8_t lc_offset, lcflag_offset;
667 __builtin_popcount(pst->npc->keyx_supp_nmask[pst->nix_intf] &
668 ((1ULL << NPC_LTYPE_LC_OFFSET) - 1));
671 mask = ~((0xfULL << lc_offset));
672 pst->flow->mcam_data[0] &= mask;
673 pst->flow->mcam_mask[0] &= mask;
674 /* NPC_LT_LC_IP6: 0b0100, NPC_LT_LC_IP6_EXT: 0b0101
675 * Set LC layertype/mask as 0b0100/0b1110 to match both.
677 val = ((uint64_t)(NPC_LT_LC_IP6 & NPC_LT_LC_IP6_EXT)) << lc_offset;
678 pst->flow->mcam_data[0] |= val;
679 pst->flow->mcam_mask[0] |= (0xeULL << lc_offset);
681 /* If LC LFLAG is non-zero, set the LC LFLAG mask to 0xF. In general
682 * case flag mask is set same as the value in data. For example, to
683 * match 3 VLANs, flags have to match a range of values. But, for IPv6
684 * extended attributes matching, we need an exact match. Hence, set the
685 * mask as 0xF. This is done only if LC LFLAG value is non-zero,
686 * because for AH and ESP, LC LFLAG is zero and we don't want to match
690 __builtin_popcount(pst->npc->keyx_supp_nmask[pst->nix_intf] &
691 ((1ULL << NPC_LFLAG_LC_OFFSET) - 1));
694 mask = (0xfULL << lcflag_offset);
695 val = pst->flow->mcam_data[0] & mask;
697 pst->flow->mcam_mask[0] |= mask;
701 npc_program_mcam(struct npc *npc, struct npc_parse_state *pst, bool mcam_alloc)
703 struct npc_mcam_read_base_rule_rsp *base_rule_rsp;
704 /* This is non-LDATA part in search key */
705 uint64_t key_data[2] = {0ULL, 0ULL};
706 uint64_t key_mask[2] = {0ULL, 0ULL};
707 int key_len, bit = 0, index, rc = 0;
708 int intf = pst->flow->nix_intf;
709 struct mcam_entry *base_entry;
710 int off, idx, data_off = 0;
711 uint8_t lid, mask, data;
715 /* Skip till Layer A data start */
716 while (bit < NPC_PARSE_KEX_S_LA_OFFSET) {
717 if (npc->keyx_supp_nmask[intf] & (1 << bit))
722 /* Each bit represents 1 nibble */
726 for (lid = 0; lid < NPC_MAX_LID; lid++) {
728 off = NPC_PARSE_KEX_S_LID_OFFSET(lid);
729 lt = pst->lt[lid] & 0xf;
730 flags = pst->flags[lid] & 0xff;
732 /* NPC_LAYER_KEX_S */
733 layer_info = ((npc->keyx_supp_nmask[intf] >> off) & 0x7);
736 for (idx = 0; idx <= 2; idx++) {
737 if (layer_info & (1 << idx)) {
741 } else if (idx == 1) {
742 data = ((flags >> 4) & 0xf);
743 mask = ((flags >> 4) & 0xf);
745 data = (flags & 0xf);
746 mask = (flags & 0xf);
749 if (data_off >= 64) {
754 ((uint64_t)data << data_off);
759 ((uint64_t)mask << data_off);
766 /* Copy this into mcam string */
767 key_len = (pst->npc->keyx_len[intf] + 7) / 8;
768 memcpy(pst->flow->mcam_data, key_data, key_len);
769 memcpy(pst->flow->mcam_mask, key_mask, key_len);
771 if (pst->set_vlan_ltype_mask)
772 npc_set_vlan_ltype(pst);
774 if (pst->set_ipv6ext_ltype_mask)
775 npc_set_ipv6ext_ltype_mask(pst);
777 if (pst->is_vf && pst->flow->nix_intf == NIX_INTF_RX) {
778 (void)mbox_alloc_msg_npc_read_base_steer_rule(npc->mbox);
779 rc = mbox_process_msg(npc->mbox, (void *)&base_rule_rsp);
781 plt_err("Failed to fetch VF's base MCAM entry");
784 base_entry = &base_rule_rsp->entry_data;
785 for (idx = 0; idx < ROC_NPC_MAX_MCAM_WIDTH_DWORDS; idx++) {
786 pst->flow->mcam_data[idx] |= base_entry->kw[idx];
787 pst->flow->mcam_mask[idx] |= base_entry->kw_mask[idx];
792 * Now we have mcam data and mask formatted as
793 * [Key_len/4 nibbles][0 or 1 nibble hole][data]
794 * hole is present if key_len is odd number of nibbles.
795 * mcam data must be split into 64 bits + 48 bits segments
796 * for each back W0, W1.
800 return npc_mcam_alloc_and_write(npc, pst->flow, pst);
806 npc_flow_enable_all_entries(struct npc *npc, bool enable)
808 struct npc_flow_list *list;
809 struct roc_npc_flow *flow;
812 /* Free any MCAM counters and delete flow list */
813 for (idx = 0; idx < npc->flow_max_priority; idx++) {
814 list = &npc->flow_list[idx];
815 TAILQ_FOREACH(flow, list, next) {
816 flow->enable = enable;
817 rc = npc_mcam_write_entry(npc, flow);
826 npc_flow_free_all_resources(struct npc *npc)
828 struct roc_npc_flow *flow;
831 /* Free all MCAM entries allocated */
832 rc = npc_mcam_free_all_entries(npc);
834 /* Free any MCAM counters and delete flow list */
835 for (idx = 0; idx < npc->flow_max_priority; idx++) {
836 while ((flow = TAILQ_FIRST(&npc->flow_list[idx])) != NULL) {
837 npc_rss_group_free(npc, flow);
838 if (flow->ctr_id != NPC_COUNTER_NONE)
839 rc |= npc_mcam_free_counter(npc, flow->ctr_id);
841 npc_delete_prio_list_entry(npc, flow);
843 TAILQ_REMOVE(&npc->flow_list[idx], flow, next);