/* Ethtype: Offset 12B, len 2B */
kex_cap.bit.ethtype_0 = npc_is_kex_enabled(
npc, NPC_LID_LA, NPC_LT_LA_ETHER, 12 * 8, 2 * 8);
- /* QINQ VLAN Ethtype: ofset 8B, len 2B */
+ /* QINQ VLAN Ethtype: offset 8B, len 2B */
kex_cap.bit.ethtype_x = npc_is_kex_enabled(
npc, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8 * 8, 2 * 8);
/* VLAN ID0 : Outer VLAN: Offset 2B, len 2B */
kex_cap.bit.dip_addr = npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP,
14 * 8, 4 * 8);
/* IP6 SIP: offset 8B, len 16B */
- kex_cap.bit.sip6_addr = npc_is_kex_enabled(
- npc, NPC_LID_LC, NPC_LT_LC_IP6, 8 * 8, 16 * 8);
+ kex_cap.bit.sip6_addr = npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_IP6,
+ 8 * 8, 16 * 8);
/* IP6 DIP: offset 24B, len 16B */
kex_cap.bit.dip6_addr = npc_is_kex_enabled(
npc, NPC_LID_LC, NPC_LT_LC_IP6, 24 * 8, 16 * 8);
/* Custom L3 frame: varied offset and lengths */
kex_cap.bit.custom_l3 =
npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_CUSTOM0, 0, 0);
- kex_cap.bit.custom_l3 |=
- npc_is_kex_enabled(npc, NPC_LID_LC, NPC_LT_LC_CUSTOM1, 0, 0);
+ kex_cap.bit.custom_l3 |= (uint64_t)npc_is_kex_enabled(npc, NPC_LID_LC,
+ NPC_LT_LC_CUSTOM1, 0, 0);
/* SCTP sport : offset 0B, len 2B */
kex_cap.bit.sctp_sport = npc_is_kex_enabled(
npc, NPC_LID_LD, NPC_LT_LD_SCTP, 0 * 8, 2 * 8);
return rc;
}
+static void
+npc_mcam_set_channel(struct roc_npc_flow *flow,
+ struct npc_mcam_write_entry_req *req, uint16_t channel,
+ uint16_t chan_mask, bool is_second_pass)
+{
+ uint16_t chan = 0, mask = 0;
+
+ req->entry_data.kw[0] &= ~(GENMASK(11, 0));
+ req->entry_data.kw_mask[0] &= ~(GENMASK(11, 0));
+ flow->mcam_data[0] &= ~(GENMASK(11, 0));
+ flow->mcam_mask[0] &= ~(GENMASK(11, 0));
+
+ if (is_second_pass) {
+ chan = (channel | NIX_CHAN_CPT_CH_START);
+ mask = (chan_mask | NIX_CHAN_CPT_CH_START);
+ } else {
+ /*
+ * Clear bits 10 & 11 corresponding to CPT
+ * channel. By default, rules should match
+ * both first pass packets and second pass
+ * packets from CPT.
+ */
+ chan = (channel & NIX_CHAN_CPT_X2P_MASK);
+ mask = (chan_mask & NIX_CHAN_CPT_X2P_MASK);
+ }
+
+ req->entry_data.kw[0] |= (uint64_t)chan;
+ req->entry_data.kw_mask[0] |= (uint64_t)mask;
+ flow->mcam_data[0] |= (uint64_t)chan;
+ flow->mcam_mask[0] |= (uint64_t)mask;
+}
+
int
npc_mcam_alloc_and_write(struct npc *npc, struct roc_npc_flow *flow,
struct npc_parse_state *pst)
{
int use_ctr = (flow->ctr_id == NPC_COUNTER_NONE ? 0 : 1);
struct npc_mcam_write_entry_req *req;
+ struct nix_inl_dev *inl_dev = NULL;
struct mbox *mbox = npc->mbox;
struct mbox_msghdr *rsp;
+ struct idev_cfg *idev;
+ uint16_t pf_func = 0;
uint16_t ctr = ~(0);
int rc, idx;
int entry;
return rc;
}
- entry = npc_check_preallocated_entry_cache(mbox, flow, npc);
+ entry = npc_get_free_mcam_entry(mbox, flow, npc);
if (entry < 0) {
- npc_mcam_free_counter(npc, ctr);
+ if (use_ctr)
+ npc_mcam_free_counter(npc, ctr);
return NPC_ERR_MCAM_ALLOC;
}
*
* Second approach is used now.
*/
- req->entry_data.vtag_action = 0ULL;
+ req->entry_data.vtag_action = flow->vtag_action;
for (idx = 0; idx < ROC_NPC_MAX_MCAM_WIDTH_DWORDS; idx++) {
req->entry_data.kw[idx] = flow->mcam_data[idx];
req->entry_data.kw_mask[idx] = flow->mcam_mask[idx];
}
+ idev = idev_get_cfg();
+ if (idev)
+ inl_dev = idev->nix_inl_dev;
+
if (flow->nix_intf == NIX_INTF_RX) {
- req->entry_data.kw[0] |= (uint64_t)npc->channel;
- req->entry_data.kw_mask[0] |= (BIT_ULL(12) - 1);
+ if (inl_dev && inl_dev->is_multi_channel &&
+ (flow->npc_action & NIX_RX_ACTIONOP_UCAST_IPSEC)) {
+ pf_func = nix_inl_dev_pffunc_get();
+ req->entry_data.action &= ~(GENMASK(19, 4));
+ req->entry_data.action |= (uint64_t)pf_func << 4;
+ flow->npc_action &= ~(GENMASK(19, 4));
+ flow->npc_action |= (uint64_t)pf_func << 4;
+
+ npc_mcam_set_channel(flow, req, inl_dev->channel,
+ inl_dev->chan_mask, false);
+ } else if (npc->is_sdp_link) {
+ npc_mcam_set_channel(flow, req, npc->sdp_channel,
+ npc->sdp_channel_mask,
+ pst->is_second_pass_rule);
+ } else {
+ npc_mcam_set_channel(flow, req, npc->channel,
+ (BIT_ULL(12) - 1),
+ pst->is_second_pass_rule);
+ }
} else {
uint16_t pf_func = (flow->npc_action >> 4) & 0xffff;
pf_func = plt_cpu_to_be_16(pf_func);
req->entry_data.kw[0] |= ((uint64_t)pf_func << 32);
req->entry_data.kw_mask[0] |= ((uint64_t)0xffff << 32);
+
+ flow->mcam_data[0] |= ((uint64_t)pf_func << 32);
+ flow->mcam_mask[0] |= ((uint64_t)0xffff << 32);
}
rc = mbox_process_msg(mbox, (void *)&rsp);
return rc;
flow->mcam_id = entry;
+
if (use_ctr)
flow->ctr_id = ctr;
return 0;
}
+static void
+npc_set_vlan_ltype(struct npc_parse_state *pst)
+{
+ uint64_t val, mask;
+ uint8_t lb_offset;
+
+ lb_offset =
+ __builtin_popcount(pst->npc->keyx_supp_nmask[pst->nix_intf] &
+ ((1ULL << NPC_LTYPE_LB_OFFSET) - 1));
+ lb_offset *= 4;
+
+ mask = ~((0xfULL << lb_offset));
+ pst->flow->mcam_data[0] &= mask;
+ pst->flow->mcam_mask[0] &= mask;
+ /* NPC_LT_LB_CTAG: 0b0010, NPC_LT_LB_STAG_QINQ: 0b0011
+ * Set LB layertype/mask as 0b0010/0b1110 to match both.
+ */
+ val = ((uint64_t)(NPC_LT_LB_CTAG & NPC_LT_LB_STAG_QINQ)) << lb_offset;
+ pst->flow->mcam_data[0] |= val;
+ pst->flow->mcam_mask[0] |= (0xeULL << lb_offset);
+}
+
+static void
+npc_set_ipv6ext_ltype_mask(struct npc_parse_state *pst)
+{
+ uint8_t lc_offset, lcflag_offset;
+ uint64_t val, mask;
+
+ lc_offset =
+ __builtin_popcount(pst->npc->keyx_supp_nmask[pst->nix_intf] &
+ ((1ULL << NPC_LTYPE_LC_OFFSET) - 1));
+ lc_offset *= 4;
+
+ mask = ~((0xfULL << lc_offset));
+ pst->flow->mcam_data[0] &= mask;
+ pst->flow->mcam_mask[0] &= mask;
+ /* NPC_LT_LC_IP6: 0b0100, NPC_LT_LC_IP6_EXT: 0b0101
+ * Set LC layertype/mask as 0b0100/0b1110 to match both.
+ */
+ val = ((uint64_t)(NPC_LT_LC_IP6 & NPC_LT_LC_IP6_EXT)) << lc_offset;
+ pst->flow->mcam_data[0] |= val;
+ pst->flow->mcam_mask[0] |= (0xeULL << lc_offset);
+
+ /* If LC LFLAG is non-zero, set the LC LFLAG mask to 0xF. In general
+ * case flag mask is set same as the value in data. For example, to
+ * match 3 VLANs, flags have to match a range of values. But, for IPv6
+ * extended attributes matching, we need an exact match. Hence, set the
+ * mask as 0xF. This is done only if LC LFLAG value is non-zero,
+ * because for AH and ESP, LC LFLAG is zero and we don't want to match
+ * zero in LFLAG.
+ */
+ lcflag_offset =
+ __builtin_popcount(pst->npc->keyx_supp_nmask[pst->nix_intf] &
+ ((1ULL << NPC_LFLAG_LC_OFFSET) - 1));
+ lcflag_offset *= 4;
+
+ mask = (0xfULL << lcflag_offset);
+ val = pst->flow->mcam_data[0] & mask;
+ if (val)
+ pst->flow->mcam_mask[0] |= mask;
+}
+
int
npc_program_mcam(struct npc *npc, struct npc_parse_state *pst, bool mcam_alloc)
{
if (layer_info) {
for (idx = 0; idx <= 2; idx++) {
if (layer_info & (1 << idx)) {
- if (idx == 2)
+ if (idx == 2) {
data = lt;
- else if (idx == 1)
+ mask = 0xf;
+ } else if (idx == 1) {
data = ((flags >> 4) & 0xf);
- else
+ mask = ((flags >> 4) & 0xf);
+ } else {
data = (flags & 0xf);
+ mask = (flags & 0xf);
+ }
if (data_off >= 64) {
data_off = 0;
}
key_data[index] |=
((uint64_t)data << data_off);
- mask = 0xf;
+
if (lt == 0)
mask = 0;
key_mask[index] |=
memcpy(pst->flow->mcam_data, key_data, key_len);
memcpy(pst->flow->mcam_mask, key_mask, key_len);
- if (pst->is_vf) {
+ if (pst->set_vlan_ltype_mask)
+ npc_set_vlan_ltype(pst);
+
+ if (pst->set_ipv6ext_ltype_mask)
+ npc_set_ipv6ext_ltype_mask(pst);
+
+ if (pst->is_vf && pst->flow->nix_intf == NIX_INTF_RX) {
(void)mbox_alloc_msg_npc_read_base_steer_rule(npc->mbox);
rc = mbox_process_msg(npc->mbox, (void *)&base_rule_rsp);
if (rc) {
}
int
-npc_flow_free_all_resources(struct npc *npc)
+npc_flow_enable_all_entries(struct npc *npc, bool enable)
{
- struct npc_mcam_ents_info *info;
+ struct npc_flow_list *list;
struct roc_npc_flow *flow;
- struct plt_bitmap *bmap;
- int entry_count = 0;
- int rc, idx;
+ int rc = 0, idx;
+ /* Free any MCAM counters and delete flow list */
for (idx = 0; idx < npc->flow_max_priority; idx++) {
- info = &npc->flow_entry_info[idx];
- entry_count += info->live_ent;
+ list = &npc->flow_list[idx];
+ TAILQ_FOREACH(flow, list, next) {
+ flow->enable = enable;
+ rc = npc_mcam_write_entry(npc, flow);
+ if (rc)
+ return rc;
+ }
}
+ return rc;
+}
- if (entry_count == 0)
- return 0;
+int
+npc_flow_free_all_resources(struct npc *npc)
+{
+ struct roc_npc_flow *flow;
+ int rc, idx;
/* Free all MCAM entries allocated */
rc = npc_mcam_free_all_entries(npc);
if (flow->ctr_id != NPC_COUNTER_NONE)
rc |= npc_mcam_free_counter(npc, flow->ctr_id);
+ npc_delete_prio_list_entry(npc, flow);
+
TAILQ_REMOVE(&npc->flow_list[idx], flow, next);
plt_free(flow);
- bmap = npc->live_entries[flow->priority];
- plt_bitmap_clear(bmap, flow->mcam_id);
}
- info = &npc->flow_entry_info[idx];
- info->free_ent = 0;
- info->live_ent = 0;
}
return rc;
}