* Copyright(c) 2017 Cavium, Inc
*/
-#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_cycles.h>
#include <rte_malloc.h>
#include "lio_ethdev.h"
#include "lio_rxtx.h"
+int lio_logtype_init;
+int lio_logtype_driver;
+
/* Default RSS key in use */
static uint8_t lio_rss_key[40] = {
0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
}
}
-static void
+static int
lio_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *devinfo)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- devinfo->pci_dev = pci_dev;
-
switch (pci_dev->id.subsystem_device_id) {
/* CN23xx 10G cards */
case PCI_SUBSYS_DEV_ID_CN2350_210:
devinfo->speed_capa = ETH_LINK_SPEED_10G;
lio_dev_err(lio_dev,
"Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
+ return -EINVAL;
}
devinfo->max_rx_queues = lio_dev->max_rx_queues;
ETH_RSS_NONFRAG_IPV6_TCP |
ETH_RSS_IPV6_EX |
ETH_RSS_IPV6_TCP_EX);
+ return 0;
}
static int
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
uint16_t pf_mtu = lio_dev->linfo.link.s.mtu;
- uint32_t frame_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t frame_len = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
struct lio_dev_ctrl_cmd ctrl_cmd;
struct lio_ctrl_pkt ctrl_pkt;
/* check if VF MTU is within allowed range.
* New value should not exceed PF MTU.
*/
- if ((mtu < ETHER_MIN_MTU) || (mtu > pf_mtu)) {
+ if (mtu < RTE_ETHER_MIN_MTU || mtu > pf_mtu) {
lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n",
- ETHER_MIN_MTU, pf_mtu);
+ RTE_ETHER_MIN_MTU, pf_mtu);
return -EINVAL;
}
return -1;
}
- if (frame_len > ETHER_MAX_LEN)
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ if (frame_len > RTE_ETHER_MAX_LEN)
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
eth_dev->data->mtu = mtu;
return 0;
}
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param eth_dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-lio_dev_atomic_write_link_status(struct rte_eth_dev *eth_dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = ð_dev->data->dev_link;
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
static uint64_t
lio_hweight64(uint64_t w)
{
int wait_to_complete __rte_unused)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
/* Initialize */
+ memset(&link, 0, sizeof(link));
link.link_status = ETH_LINK_DOWN;
link.link_speed = ETH_SPEED_NUM_NONE;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
link.link_autoneg = ETH_LINK_AUTONEG;
- memset(&old, 0, sizeof(old));
/* Return what we found */
if (lio_dev->linfo.link.s.link_up == 0) {
/* Interface is down */
- if (lio_dev_atomic_write_link_status(eth_dev, &link))
- return -1;
- if (link.link_status == old.link_status)
- return -1;
- return 0;
+ return rte_eth_linkstatus_set(eth_dev, &link);
}
link.link_status = ETH_LINK_UP; /* Interface is up */
link.link_duplex = ETH_LINK_HALF_DUPLEX;
}
- if (lio_dev_atomic_write_link_status(eth_dev, &link))
- return -1;
-
- if (link.link_status == old.link_status)
- return -1;
-
- return 0;
+ return rte_eth_linkstatus_set(eth_dev, &link);
}
/**
* \brief Net device enable, disable allmulticast
* @param eth_dev Pointer to the structure rte_eth_dev
+ *
+ * @return
+ * On success return 0
+ * On failure return negative errno
*/
-static void
+static int
lio_change_dev_flag(struct rte_eth_dev *eth_dev)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
lio_dev_err(lio_dev, "Failed to send change flag message\n");
- return;
+ return -EAGAIN;
}
- if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
lio_dev_err(lio_dev, "Change dev flag command timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
-static void
+static int
lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
lio_dev_err(lio_dev, "Require firmware version >= %s\n",
LIO_VF_TRUST_MIN_VERSION);
- return;
+ return -EAGAIN;
}
if (!lio_dev->intf_open) {
lio_dev_err(lio_dev, "Port %d down, can't enable promiscuous\n",
lio_dev->port_id);
- return;
+ return -EAGAIN;
}
lio_dev->ifflags |= LIO_IFFLAG_PROMISC;
- lio_change_dev_flag(eth_dev);
+ return lio_change_dev_flag(eth_dev);
}
-static void
+static int
lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
lio_dev_err(lio_dev, "Require firmware version >= %s\n",
LIO_VF_TRUST_MIN_VERSION);
- return;
+ return -EAGAIN;
}
if (!lio_dev->intf_open) {
lio_dev_err(lio_dev, "Port %d down, can't disable promiscuous\n",
lio_dev->port_id);
- return;
+ return -EAGAIN;
}
lio_dev->ifflags &= ~LIO_IFFLAG_PROMISC;
- lio_change_dev_flag(eth_dev);
+ return lio_change_dev_flag(eth_dev);
}
static void
fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
- if ((lio_dev->droq[fw_mapped_oq]) &&
- (num_rx_descs != lio_dev->droq[fw_mapped_oq]->max_count)) {
- lio_dev_err(lio_dev,
- "Reconfiguring Rx descs not supported. Configure descs to same value %u or restart application\n",
- lio_dev->droq[fw_mapped_oq]->max_count);
- return -ENOTSUP;
+ /* Free previous allocation if any */
+ if (eth_dev->data->rx_queues[q_no] != NULL) {
+ lio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]);
+ eth_dev->data->rx_queues[q_no] = NULL;
}
mbp_priv = rte_mempool_get_priv(mp);
int oq_no;
if (droq) {
- /* Run time queue deletion not supported */
- if (droq->lio_dev->port_configured)
- return;
-
oq_no = droq->q_no;
lio_delete_droq_queue(droq->lio_dev, oq_no);
}
lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
- if ((lio_dev->instr_queue[fw_mapped_iq] != NULL) &&
- (num_tx_descs != lio_dev->instr_queue[fw_mapped_iq]->max_count)) {
- lio_dev_err(lio_dev,
- "Reconfiguring Tx descs not supported. Configure descs to same value %u or restart application\n",
- lio_dev->instr_queue[fw_mapped_iq]->max_count);
- return -ENOTSUP;
+ /* Free previous allocation if any */
+ if (eth_dev->data->tx_queues[q_no] != NULL) {
+ lio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]);
+ eth_dev->data->tx_queues[q_no] = NULL;
}
retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
}
retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
- lio_dev->instr_queue[fw_mapped_iq]->max_count,
+ lio_dev->instr_queue[fw_mapped_iq]->nb_desc,
socket_id);
if (retval) {
if (tq) {
- /* Run time queue deletion not supported */
- if (tq->lio_dev->port_configured)
- return;
-
/* Free sg_list */
lio_delete_sglist(tq);
/* Configure RSS if device configured with multiple RX queues. */
lio_dev_mq_rx_configure(eth_dev);
+ /* Before update the link info,
+ * must set linfo.link.link_status64 to 0.
+ */
+ lio_dev->linfo.link.link_status64 = 0;
+
/* start polling for lsc */
ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
lio_sync_link_state_check,
goto dev_mtu_set_error;
}
- mtu = (uint16_t)(frame_len - ETHER_HDR_LEN - ETHER_CRC_LEN);
- if (mtu < ETHER_MIN_MTU)
- mtu = ETHER_MIN_MTU;
+ mtu = (uint16_t)(frame_len - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN);
+ if (mtu < RTE_ETHER_MIN_MTU)
+ mtu = RTE_ETHER_MIN_MTU;
if (eth_dev->data->mtu != mtu) {
ret = lio_dev_mtu_set(eth_dev, mtu);
lio_send_rx_ctrl_cmd(eth_dev, 0);
+ lio_wait_for_instr_fetch(lio_dev);
+
/* Clear recorded link status */
lio_dev->linfo.link.link_status64 = 0;
}
lio_dev_close(struct rte_eth_dev *eth_dev)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
- uint32_t i;
lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id);
if (lio_dev->intf_open)
lio_dev_stop(eth_dev);
- lio_wait_for_instr_fetch(lio_dev);
-
- lio_dev->fn_list.disable_io_queues(lio_dev);
-
- cn23xx_vf_set_io_queues_off(lio_dev);
-
- /* Reset iq regs (IQ_DBELL).
- * Clear sli_pktx_cnts (OQ_PKTS_SENT).
- */
- for (i = 0; i < lio_dev->nb_rx_queues; i++) {
- struct lio_droq *droq = lio_dev->droq[i];
-
- if (droq == NULL)
- break;
-
- uint32_t pkt_count = rte_read32(droq->pkts_sent_reg);
-
- lio_dev_dbg(lio_dev,
- "pending oq count %u\n", pkt_count);
- rte_write32(pkt_count, droq->pkts_sent_reg);
- }
+ /* Reset ioq regs */
+ lio_dev->fn_list.setup_device_regs(lio_dev);
if (lio_dev->pci_dev->kdrv == RTE_KDRV_IGB_UIO) {
cn23xx_vf_ask_pf_to_do_flr(lio_dev);
lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n");
}
-static int lio_dev_configure(struct rte_eth_dev *eth_dev)
+static int
+lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq,
+ int num_rxq)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (strcmp(lio_dev->firmware_version, LIO_Q_RECONF_MIN_VERSION) < 0) {
+ lio_dev_err(lio_dev, "Require firmware version >= %s\n",
+ LIO_Q_RECONF_MIN_VERSION);
+ return -ENOTSUP;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL;
+ ctrl_pkt.ncmd.s.param1 = num_txq;
+ ctrl_pkt.ncmd.s.param2 = num_rxq;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send queue count control command\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Queue count control command timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (lio_dev->nb_rx_queues != num_rxq ||
+ lio_dev->nb_tx_queues != num_txq) {
+ if (lio_send_queue_count_update(eth_dev, num_txq, num_rxq))
+ return -1;
+ lio_dev->nb_rx_queues = num_rxq;
+ lio_dev->nb_tx_queues = num_txq;
+ }
+
+ if (lio_dev->intf_open)
+ lio_dev_stop(eth_dev);
+
+ /* Reset ioq registers */
+ if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
+ lio_dev_err(lio_dev, "Failed to configure device registers\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_configure(struct rte_eth_dev *eth_dev)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
int retval, num_iqueues, num_oqueues;
- uint8_t mac[ETHER_ADDR_LEN], i;
+ uint8_t mac[RTE_ETHER_ADDR_LEN], i;
struct lio_if_cfg_resp *resp;
struct lio_soft_command *sc;
union lio_if_cfg if_cfg;
PMD_INIT_FUNC_TRACE();
- /* Re-configuring firmware not supported.
- * Can't change tx/rx queues per port from initial value.
+ /* Inform firmware about change in number of queues to use.
+ * Disable IO queues and reset registers for re-configuration.
*/
- if (lio_dev->port_configured) {
- if ((lio_dev->nb_rx_queues != eth_dev->data->nb_rx_queues) ||
- (lio_dev->nb_tx_queues != eth_dev->data->nb_tx_queues)) {
- lio_dev_err(lio_dev,
- "rxq/txq re-conf not supported. Restart application with new value.\n");
- return -ENOTSUP;
- }
- return 0;
- }
+ if (lio_dev->port_configured)
+ return lio_reconf_queues(eth_dev,
+ eth_dev->data->nb_tx_queues,
+ eth_dev->data->nb_rx_queues);
lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
+ /* Set max number of queues which can be re-configured. */
+ lio_dev->max_rx_queues = eth_dev->data->nb_rx_queues;
+ lio_dev->max_tx_queues = eth_dev->data->nb_tx_queues;
+
resp_size = sizeof(struct lio_if_cfg_resp);
sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
if (sc == NULL)
goto nic_config_fail;
}
- snprintf(lio_dev->firmware_version, LIO_FW_VERSION_LENGTH, "%s",
- resp->cfg_info.lio_firmware_version);
+ strlcpy(lio_dev->firmware_version,
+ resp->cfg_info.lio_firmware_version, LIO_FW_VERSION_LENGTH);
lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
sizeof(struct octeon_if_cfg_info) >> 3);
/* 64-bit swap required on LE machines */
lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +
2 + i));
/* Copy the permanent MAC address */
- ether_addr_copy((struct ether_addr *)mac, ð_dev->data->mac_addrs[0]);
+ rte_ether_addr_copy((struct rte_ether_addr *)mac,
+ ð_dev->data->mac_addrs[0]);
/* enable firmware checksum support for tunnel packets */
lio_enable_hw_tunnel_rx_checksum(eth_dev);
lio_free_soft_command(sc);
- /* Disable iq_0 for reconf */
- lio_dev->fn_list.disable_io_queues(lio_dev);
-
/* Reset ioq regs */
lio_dev->fn_list.setup_device_regs(lio_dev);
rte_delay_ms(LIO_PCI_FLR_WAIT * 2);
}
- if (cn23xx_vf_set_io_queues_off(lio_dev)) {
- lio_dev_err(lio_dev, "Setting io queues off failed\n");
- goto error;
- }
-
if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
lio_dev_err(lio_dev, "Failed to configure device registers\n");
goto error;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return -EPERM;
+ return 0;
/* lio_free_sc_buffer_pool */
lio_free_sc_buffer_pool(lio_dev);
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
}
eth_dev->dev_ops = &liovf_eth_dev_ops;
- eth_dev->data->mac_addrs = rte_zmalloc("lio", ETHER_ADDR_LEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc("lio", RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
lio_dev_err(lio_dev,
"MAC addresses memory allocation failed\n");
lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- struct rte_eth_dev *eth_dev;
- int ret;
-
- eth_dev = rte_eth_dev_pci_allocate(pci_dev,
- sizeof(struct lio_device));
- if (eth_dev == NULL)
- return -ENOMEM;
-
- ret = lio_eth_dev_init(eth_dev);
- if (ret)
- rte_eth_dev_pci_release(eth_dev);
-
- return ret;
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device),
+ lio_eth_dev_init);
}
static int
RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci");
+
+RTE_INIT(lio_init_log)
+{
+ lio_logtype_init = rte_log_register("pmd.net.liquidio.init");
+ if (lio_logtype_init >= 0)
+ rte_log_set_level(lio_logtype_init, RTE_LOG_NOTICE);
+ lio_logtype_driver = rte_log_register("pmd.net.liquidio.driver");
+ if (lio_logtype_driver >= 0)
+ rte_log_set_level(lio_logtype_driver, RTE_LOG_NOTICE);
+}