Add force_link_up devargs to always force link as up for VFs.
This enables VFs on the same NIC to send traffic to each other
even when physical link is down.
Also add RTE_PMD_REGISTER_PARAM_STRING to export all supported
devargs.
Fixes:
011ebc236ddc ("net/cxgbe: add skeleton VF driver")
Signed-off-by: Shagun Agrawal <shaguna@chelsio.com>
Signed-off-by: Kumar Sanghvi <kumaras@chelsio.com>
Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
enabled, the outer VLAN tag is preserved in Q-in-Q packets. Otherwise,
the outer VLAN tag is stripped in Q-in-Q packets.
+- ``force_link_up`` (default **0**)
+
+ When set to 1, CXGBEVF PMD always forces link as up for all VFs on
+ underlying Chelsio NICs. This enables multiple VFs on the same NIC
+ to send traffic to each other even when the physical link is down.
+
.. _driver-compilation:
Driver compilation and testing
ETH_RSS_NONFRAG_IPV6_TCP | \
ETH_RSS_NONFRAG_IPV6_UDP)
+#define CXGBE_DEVARG_KEEP_OVLAN "keep_ovlan"
+#define CXGBE_DEVARG_FORCE_LINK_UP "force_link_up"
+
+bool force_linkup(struct adapter *adap);
int cxgbe_probe(struct adapter *adapter);
int cxgbevf_probe(struct adapter *adapter);
void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps);
void cxgbe_enable_rx_queues(struct port_info *pi);
void print_port_info(struct adapter *adap);
void print_adapter_info(struct adapter *adap);
+int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key);
#endif /* _CXGBE_H_ */
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
struct sge *s = &adapter->sge;
- struct rte_eth_link *old_link = ð_dev->data->dev_link;
+ struct rte_eth_link new_link;
unsigned int work_done, budget = 4;
cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
- if (old_link->link_status == pi->link_cfg.link_ok)
- return -1; /* link not changed */
- eth_dev->data->dev_link.link_status = pi->link_cfg.link_ok;
- eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- eth_dev->data->dev_link.link_speed = pi->link_cfg.speed;
+ new_link.link_status = force_linkup(adapter) ?
+ ETH_LINK_UP : pi->link_cfg.link_ok;
+ new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new_link.link_speed = pi->link_cfg.speed;
- /* link has changed */
- return 0;
+ return rte_eth_linkstatus_set(eth_dev, &new_link);
}
int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
+ CXGBE_DEVARG_KEEP_OVLAN "=<0|1> "
+ CXGBE_DEVARG_FORCE_LINK_UP "=<0|1> ");
#include "t4_msg.h"
#include "cxgbe.h"
-#define CXGBE_DEVARG_KEEP_OVLAN "keep_ovlan"
-
/*
* Response queue handler for the FW event queue.
*/
return 0;
}
-static int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key)
+int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key)
{
struct rte_kvargs *kvlist;
pi->port_id, pi->mod_type);
}
+inline bool force_linkup(struct adapter *adap)
+{
+ struct rte_pci_device *pdev = adap->pdev;
+
+ if (is_pf4(adap))
+ return false; /* force_linkup not required for pf driver*/
+ if (!cxgbe_get_devargs(pdev->device.devargs,
+ CXGBE_DEVARG_FORCE_LINK_UP))
+ return false;
+ return true;
+}
+
/**
* link_start - enable a port
* @dev: the port to enable
ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid,
true, true, false);
}
+
+ if (ret == 0 && force_linkup(adapter))
+ pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
return ret;
}