};
static int
-__rte_unused nix_vlan_mcam_enb_dis(struct otx2_eth_dev *dev,
- uint32_t entry, const int enable)
+nix_vlan_mcam_enb_dis(struct otx2_eth_dev *dev,
+ uint32_t entry, const int enable)
{
struct npc_mcam_ena_dis_entry_req *req;
struct otx2_mbox *mbox = dev->mbox;
entry->vtag_action = vtag_action;
}
+static void
+nix_set_tx_vlan_action(struct mcam_entry *entry, enum rte_vlan_type type,
+ int vtag_index)
+{
+ union {
+ uint64_t reg;
+ struct nix_tx_vtag_action_s act;
+ } vtag_action;
+
+ uint64_t action;
+
+ action = NIX_TX_ACTIONOP_UCAST_DEFAULT;
+
+ /*
+ * Take offset from LA since in case of untagged packet,
+ * lbptr is zero.
+ */
+ if (type == ETH_VLAN_TYPE_OUTER) {
+ vtag_action.act.vtag0_def = vtag_index;
+ vtag_action.act.vtag0_lid = NPC_LID_LA;
+ vtag_action.act.vtag0_op = NIX_TX_VTAGOP_INSERT;
+ vtag_action.act.vtag0_relptr = NIX_TX_VTAGACTION_VTAG0_RELPTR;
+ } else {
+ vtag_action.act.vtag1_def = vtag_index;
+ vtag_action.act.vtag1_lid = NPC_LID_LA;
+ vtag_action.act.vtag1_op = NIX_TX_VTAGOP_INSERT;
+ vtag_action.act.vtag1_relptr = NIX_TX_VTAGACTION_VTAG1_RELPTR;
+ }
+
+ entry->action = action;
+ entry->vtag_action = vtag_action.reg;
+}
+
static int
nix_vlan_mcam_free(struct otx2_eth_dev *dev, uint32_t entry)
{
return 0;
}
+/* Installs/Removes default tx entry */
+static int
+nix_vlan_handle_default_tx_entry(struct rte_eth_dev *eth_dev,
+ enum rte_vlan_type type, int vtag_index,
+ int enable)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_vlan_info *vlan = &dev->vlan_info;
+ struct mcam_entry entry;
+ uint16_t pf_func;
+ int rc;
+
+ if (!vlan->def_tx_mcam_idx && enable) {
+ memset(&entry, 0, sizeof(struct mcam_entry));
+
+ /* Only pf_func is matched, swap it's bytes */
+ pf_func = (dev->pf_func & 0xff) << 8;
+ pf_func |= (dev->pf_func >> 8) & 0xff;
+
+ /* PF Func extracted to KW1[63:48] */
+ entry.kw[1] = (uint64_t)pf_func << 48;
+ entry.kw_mask[1] = (BIT_ULL(16) - 1) << 48;
+
+ nix_set_tx_vlan_action(&entry, type, vtag_index);
+ vlan->def_tx_mcam_ent = entry;
+
+ return nix_vlan_mcam_alloc_and_write(eth_dev, &entry,
+ NIX_INTF_TX, 0);
+ }
+
+ if (vlan->def_tx_mcam_idx && !enable) {
+ rc = nix_vlan_mcam_free(dev, vlan->def_tx_mcam_idx);
+ if (rc)
+ return rc;
+ vlan->def_rx_mcam_idx = 0;
+ }
+
+ return 0;
+}
+
/* Configure vlan stripping on or off */
static int
nix_vlan_hw_strip(struct rte_eth_dev *eth_dev, const uint8_t enable)
uint16_t vlan_id)
{
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_vlan_info *vlan = &dev->vlan_info;
+ struct vlan_entry *entry;
int rc = -EINVAL;
if (!vlan_id && enable) {
return 0;
}
+ /* Enable/disable existing vlan filter entries */
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) {
+ if (vlan_id) {
+ if (entry->vlan_id == vlan_id) {
+ rc = nix_vlan_mcam_enb_dis(dev,
+ entry->mcam_idx,
+ enable);
+ if (rc)
+ return rc;
+ }
+ } else {
+ rc = nix_vlan_mcam_enb_dis(dev, entry->mcam_idx,
+ enable);
+ if (rc)
+ return rc;
+ }
+ }
+
if (!vlan_id && !enable) {
rc = nix_vlan_handle_default_rx_entry(eth_dev, false, true,
enable);
return 0;
}
+/* Enable/disable vlan filtering for the given vlan_id */
+int
+otx2_nix_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id,
+ int on)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_vlan_info *vlan = &dev->vlan_info;
+ struct vlan_entry *entry;
+ int entry_exists = 0;
+ int rc = -EINVAL;
+ int mcam_idx;
+
+ if (!vlan_id) {
+ otx2_err("Vlan Id can't be zero");
+ return rc;
+ }
+
+ if (!vlan->def_rx_mcam_idx) {
+ otx2_err("Vlan Filtering is disabled, enable it first");
+ return rc;
+ }
+
+ if (on) {
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) {
+ if (entry->vlan_id == vlan_id) {
+ /* Vlan entry already exists */
+ entry_exists = 1;
+ /* Mcam entry already allocated */
+ if (entry->mcam_idx) {
+ rc = nix_vlan_hw_filter(eth_dev, on,
+ vlan_id);
+ return rc;
+ }
+ break;
+ }
+ }
+
+ if (!entry_exists) {
+ entry = rte_zmalloc("otx2_nix_vlan_entry",
+ sizeof(struct vlan_entry), 0);
+ if (!entry) {
+ otx2_err("Failed to allocate memory");
+ return -ENOMEM;
+ }
+ }
+
+ /* Enables vlan_id & mac address based filtering */
+ if (eth_dev->data->promiscuous)
+ mcam_idx = nix_vlan_mcam_config(eth_dev, vlan_id,
+ VLAN_ID_MATCH);
+ else
+ mcam_idx = nix_vlan_mcam_config(eth_dev, vlan_id,
+ VLAN_ID_MATCH |
+ MAC_ADDR_MATCH);
+ if (mcam_idx < 0) {
+ otx2_err("Failed to config vlan mcam");
+ TAILQ_REMOVE(&vlan->fltr_tbl, entry, next);
+ rte_free(entry);
+ return mcam_idx;
+ }
+
+ entry->mcam_idx = mcam_idx;
+ if (!entry_exists) {
+ entry->vlan_id = vlan_id;
+ TAILQ_INSERT_HEAD(&vlan->fltr_tbl, entry, next);
+ }
+ } else {
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) {
+ if (entry->vlan_id == vlan_id) {
+ nix_vlan_mcam_free(dev, entry->mcam_idx);
+ TAILQ_REMOVE(&vlan->fltr_tbl, entry, next);
+ rte_free(entry);
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
/* Configure double vlan(qinq) on or off */
static int
otx2_nix_config_double_vlan(struct rte_eth_dev *eth_dev,
DEV_RX_OFFLOAD_QINQ_STRIP)) {
dev->rx_offloads |= offloads;
dev->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
+ otx2_eth_set_rx_function(eth_dev);
}
done:
return rc;
}
+int
+otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev,
+ enum rte_vlan_type type, uint16_t tpid)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct nix_set_vlan_tpid *tpid_cfg;
+ struct otx2_mbox *mbox = dev->mbox;
+ int rc;
+
+ tpid_cfg = otx2_mbox_alloc_msg_nix_set_vlan_tpid(mbox);
+
+ tpid_cfg->tpid = tpid;
+ if (type == ETH_VLAN_TYPE_OUTER)
+ tpid_cfg->vlan_type = NIX_VLAN_TYPE_OUTER;
+ else
+ tpid_cfg->vlan_type = NIX_VLAN_TYPE_INNER;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ if (type == ETH_VLAN_TYPE_OUTER)
+ dev->vlan_info.outer_vlan_tpid = tpid;
+ else
+ dev->vlan_info.inner_vlan_tpid = tpid;
+ return 0;
+}
+
+int
+otx2_nix_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct otx2_eth_dev *otx2_dev = otx2_eth_pmd_priv(dev);
+ struct otx2_mbox *mbox = otx2_dev->mbox;
+ struct nix_vtag_config *vtag_cfg;
+ struct nix_vtag_config_rsp *rsp;
+ struct otx2_vlan_info *vlan;
+ int rc, rc1, vtag_index = 0;
+
+ if (vlan_id == 0) {
+ otx2_err("vlan id can't be zero");
+ return -EINVAL;
+ }
+
+ vlan = &otx2_dev->vlan_info;
+
+ if (on && vlan->pvid_insert_on && vlan->pvid == vlan_id) {
+ otx2_err("pvid %d is already enabled", vlan_id);
+ return -EINVAL;
+ }
+
+ if (on && vlan->pvid_insert_on && vlan->pvid != vlan_id) {
+ otx2_err("another pvid is enabled, disable that first");
+ return -EINVAL;
+ }
+
+ /* No pvid active */
+ if (!on && !vlan->pvid_insert_on)
+ return 0;
+
+ /* Given pvid already disabled */
+ if (!on && vlan->pvid != vlan_id)
+ return 0;
+
+ vtag_cfg = otx2_mbox_alloc_msg_nix_vtag_cfg(mbox);
+
+ if (on) {
+ vtag_cfg->cfg_type = VTAG_TX;
+ vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
+
+ if (vlan->outer_vlan_tpid)
+ vtag_cfg->tx.vtag0 =
+ (vlan->outer_vlan_tpid << 16) | vlan_id;
+ else
+ vtag_cfg->tx.vtag0 =
+ ((RTE_ETHER_TYPE_VLAN << 16) | vlan_id);
+ vtag_cfg->tx.cfg_vtag0 = 1;
+ } else {
+ vtag_cfg->cfg_type = VTAG_TX;
+ vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
+
+ vtag_cfg->tx.vtag0_idx = vlan->outer_vlan_idx;
+ vtag_cfg->tx.free_vtag0 = 1;
+ }
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (on) {
+ vtag_index = rsp->vtag0_idx;
+ } else {
+ vlan->pvid = 0;
+ vlan->pvid_insert_on = 0;
+ vlan->outer_vlan_idx = 0;
+ }
+
+ rc = nix_vlan_handle_default_tx_entry(dev, ETH_VLAN_TYPE_OUTER,
+ vtag_index, on);
+ if (rc < 0) {
+ printf("Default tx entry failed with rc %d\n", rc);
+ vtag_cfg->tx.vtag0_idx = vtag_index;
+ vtag_cfg->tx.free_vtag0 = 1;
+ vtag_cfg->tx.cfg_vtag0 = 0;
+
+ rc1 = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc1)
+ otx2_err("Vtag free failed");
+
+ return rc;
+ }
+
+ if (on) {
+ vlan->pvid = vlan_id;
+ vlan->pvid_insert_on = 1;
+ vlan->outer_vlan_idx = vtag_index;
+ }
+
+ return 0;
+}
+
+void otx2_nix_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue,
+ __rte_unused int on)
+{
+ otx2_err("Not Supported");
+}
+
static int
nix_vlan_rx_mkex_offset(uint64_t mask)
{
return 0;
}
+static void nix_vlan_reinstall_vlan_filters(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct vlan_entry *entry;
+ int rc;
+
+ /* VLAN filters can't be set without setting filtern on */
+ rc = nix_vlan_handle_default_rx_entry(eth_dev, false, true, true);
+ if (rc) {
+ otx2_err("Failed to reinstall vlan filters");
+ return;
+ }
+
+ TAILQ_FOREACH(entry, &dev->vlan_info.fltr_tbl, next) {
+ rc = otx2_nix_vlan_filter_set(eth_dev, entry->vlan_id, true);
+ if (rc)
+ otx2_err("Failed to reinstall filter for vlan:%d",
+ entry->vlan_id);
+ }
+}
+
int
otx2_nix_vlan_offload_init(struct rte_eth_dev *eth_dev)
{
}
TAILQ_INIT(&dev->vlan_info.fltr_tbl);
+ } else {
+ /* Reinstall all mcam entries now if filter offload is set */
+ if (eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER)
+ nix_vlan_reinstall_vlan_filters(eth_dev);
}
mask =
{
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
struct otx2_vlan_info *vlan = &dev->vlan_info;
+ struct vlan_entry *entry;
int rc;
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) {
+ if (!dev->configured) {
+ TAILQ_REMOVE(&vlan->fltr_tbl, entry, next);
+ rte_free(entry);
+ } else {
+ /* MCAM entries freed by flow_fini & lf_free on
+ * port stop.
+ */
+ entry->mcam_idx = 0;
+ }
+ }
+
if (!dev->configured) {
if (vlan->def_rx_mcam_idx) {
rc = nix_vlan_mcam_free(dev, vlan->def_rx_mcam_idx);