/* Adds vlan_id & LB CTAG flag to MCAM KW */
if (flags & VLAN_ID_MATCH) {
- entry.kw[kwi] |= NPC_LT_LB_CTAG << mkex->lb_lt_offset;
- entry.kw_mask[kwi] |= 0xFULL << mkex->lb_lt_offset;
+ entry.kw[kwi] |= (NPC_LT_LB_CTAG | NPC_LT_LB_STAG_QINQ)
+ << mkex->lb_lt_offset;
+ entry.kw_mask[kwi] |=
+ (0xF & ~(NPC_LT_LB_CTAG ^ NPC_LT_LB_STAG_QINQ))
+ << mkex->lb_lt_offset;
- mcam_data = (vlan_id << 16);
- mcam_mask = (BIT_ULL(16) - 1) << 16;
+ mcam_data = (uint16_t)vlan_id;
+ mcam_mask = (BIT_ULL(16) - 1);
otx2_mbox_memcpy(key_data + mkex->lb_xtract.key_off,
- &mcam_data, mkex->lb_xtract.len + 1);
+ &mcam_data, mkex->lb_xtract.len);
otx2_mbox_memcpy(key_mask + mkex->lb_xtract.key_off,
- &mcam_mask, mkex->lb_xtract.len + 1);
+ &mcam_mask, mkex->lb_xtract.len);
}
/* Adds LB STAG flag to MCAM KW */
if (flags & QINQ_F_MATCH) {
- entry.kw[kwi] |= NPC_LT_LB_STAG << mkex->lb_lt_offset;
+ entry.kw[kwi] |= NPC_LT_LB_STAG_QINQ << mkex->lb_lt_offset;
entry.kw_mask[kwi] |= 0xFULL << mkex->lb_lt_offset;
}
/* Adds LB CTAG & LB STAG flags to MCAM KW */
if (flags & VTAG_F_MATCH) {
- entry.kw[kwi] |= (NPC_LT_LB_CTAG | NPC_LT_LB_STAG)
+ entry.kw[kwi] |= (NPC_LT_LB_CTAG | NPC_LT_LB_STAG_QINQ)
<< mkex->lb_lt_offset;
- entry.kw_mask[kwi] |= (NPC_LT_LB_CTAG & NPC_LT_LB_STAG)
+ entry.kw_mask[kwi] |=
+ (0xF & ~(NPC_LT_LB_CTAG ^ NPC_LT_LB_STAG_QINQ))
<< mkex->lb_lt_offset;
}
pf_func = (dev->pf_func & 0xff) << 8;
pf_func |= (dev->pf_func >> 8) & 0xff;
- /* PF Func extracted to KW1[63:48] */
- entry.kw[1] = (uint64_t)pf_func << 48;
- entry.kw_mask[1] = (BIT_ULL(16) - 1) << 48;
+ /* PF Func extracted to KW1[47:32] */
+ entry.kw[0] = (uint64_t)pf_func << 32;
+ entry.kw_mask[0] = (BIT_ULL(16) - 1) << 32;
nix_set_tx_vlan_action(&entry, type, vtag_index);
vlan->def_tx_mcam_ent = entry;
} else {
TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) {
if (entry->vlan_id == vlan_id) {
- nix_vlan_mcam_free(dev, entry->mcam_idx);
+ rc = nix_vlan_mcam_free(dev, entry->mcam_idx);
+ if (rc)
+ return rc;
TAILQ_REMOVE(&vlan->fltr_tbl, entry, next);
rte_free(entry);
break;
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
uint64_t offloads = dev->rx_offloads;
struct rte_eth_rxmode *rxmode;
- int rc;
+ int rc = 0;
rxmode = ð_dev->data->dev_conf.rxmode;
- if (mask & ETH_VLAN_EXTEND_MASK) {
- otx2_err("Extend offload not supported");
- return -ENOTSUP;
- }
-
if (mask & ETH_VLAN_STRIP_MASK) {
if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
DEV_RX_OFFLOAD_QINQ_STRIP)) {
dev->rx_offloads |= offloads;
dev->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
+ otx2_eth_set_rx_function(eth_dev);
}
done:
vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
if (vlan->outer_vlan_tpid)
- vtag_cfg->tx.vtag0 =
- (vlan->outer_vlan_tpid << 16) | vlan_id;
+ vtag_cfg->tx.vtag0 = ((uint32_t)vlan->outer_vlan_tpid
+ << 16) | vlan_id;
else
vtag_cfg->tx.vtag0 =
((RTE_ETHER_TYPE_VLAN << 16) | vlan_id);