*/
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_cycles.h>
#include <rte_malloc.h>
#include <rte_alarm.h>
+#include <rte_ether.h>
#include "lio_logs.h"
#include "lio_23xx_vf.h"
return 0;
}
+/* store statistics names and its offset in stats structure */
+struct rte_lio_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned int offset;
+};
+
+static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = {
+ {"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)},
+ {"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)},
+ {"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)},
+ {"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)},
+ {"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)},
+ {"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)},
+ {"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)},
+ {"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)},
+ {"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)},
+ {"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)},
+ {"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)},
+ {"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)},
+ {"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)},
+ {"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_broadcast_pkts",
+ (offsetof(struct octeon_tx_stats, bcast_pkts_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_multicast_pkts",
+ (offsetof(struct octeon_tx_stats, mcast_pkts_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_total_collisions", (offsetof(struct octeon_tx_stats,
+ total_collisions)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) +
+ sizeof(struct octeon_rx_stats)},
+};
+
+#define LIO_NB_XSTATS RTE_DIM(rte_lio_stats_strings)
+
+/* Get hw stats of the port */
+static int
+lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
+ struct octeon_link_stats *hw_stats;
+ struct lio_link_stats_resp *resp;
+ struct lio_soft_command *sc;
+ uint32_t resp_size;
+ unsigned int i;
+ int retval;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ if (n < LIO_NB_XSTATS)
+ return LIO_NB_XSTATS;
+
+ resp_size = sizeof(struct lio_link_stats_resp);
+ sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
+ if (sc == NULL)
+ return -ENOMEM;
+
+ resp = (struct lio_link_stats_resp *)sc->virtrptr;
+ lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
+ LIO_OPCODE_PORT_STATS, 0, 0, 0);
+
+ /* Setting wait time in seconds */
+ sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
+
+ retval = lio_send_soft_command(lio_dev, sc);
+ if (retval == LIO_IQ_SEND_FAILED) {
+ lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n",
+ retval);
+ goto get_stats_fail;
+ }
+
+ while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
+ lio_process_ordered_list(lio_dev);
+ rte_delay_ms(1);
+ }
+
+ retval = resp->status;
+ if (retval) {
+ lio_dev_err(lio_dev, "failed to get port stats from firmware\n");
+ goto get_stats_fail;
+ }
+
+ lio_swap_8B_data((uint64_t *)(&resp->link_stats),
+ sizeof(struct octeon_link_stats) >> 3);
+
+ hw_stats = &resp->link_stats;
+
+ for (i = 0; i < LIO_NB_XSTATS; i++) {
+ xstats[i].id = i;
+ xstats[i].value =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_lio_stats_strings[i].offset);
+ }
+
+ lio_free_soft_command(sc);
+
+ return LIO_NB_XSTATS;
+
+get_stats_fail:
+ lio_free_soft_command(sc);
+
+ return -1;
+}
+
+static int
+lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned limit __rte_unused)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ unsigned int i;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ if (xstats_names == NULL)
+ return LIO_NB_XSTATS;
+
+ /* Note: limit checked in rte_eth_xstats_names() */
+
+ for (i = 0; i < LIO_NB_XSTATS; i++) {
+ snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
+ "%s", rte_lio_stats_strings[i].name);
+ }
+
+ return LIO_NB_XSTATS;
+}
+
+/* Reset hw stats for the port */
+static void
+lio_dev_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send clear stats command\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Clear stats command timed out\n");
+ return;
+ }
+
+ /* clear stored per queue stats */
+ RTE_FUNC_PTR_OR_RET(*eth_dev->dev_ops->stats_reset);
+ (*eth_dev->dev_ops->stats_reset)(eth_dev);
+}
+
+/* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */
+static void
+lio_dev_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *stats)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_droq_stats *oq_stats;
+ struct lio_iq_stats *iq_stats;
+ struct lio_instr_queue *txq;
+ struct lio_droq *droq;
+ int i, iq_no, oq_no;
+ uint64_t bytes = 0;
+ uint64_t pkts = 0;
+ uint64_t drop = 0;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ iq_no = lio_dev->linfo.txpciq[i].s.q_no;
+ txq = lio_dev->instr_queue[iq_no];
+ if (txq != NULL) {
+ iq_stats = &txq->stats;
+ pkts += iq_stats->tx_done;
+ drop += iq_stats->tx_dropped;
+ bytes += iq_stats->tx_tot_bytes;
+ }
+ }
+
+ stats->opackets = pkts;
+ stats->obytes = bytes;
+ stats->oerrors = drop;
+
+ pkts = 0;
+ drop = 0;
+ bytes = 0;
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
+ droq = lio_dev->droq[oq_no];
+ if (droq != NULL) {
+ oq_stats = &droq->stats;
+ pkts += oq_stats->rx_pkts_received;
+ drop += (oq_stats->rx_dropped +
+ oq_stats->dropped_toomany +
+ oq_stats->dropped_nomem);
+ bytes += oq_stats->rx_bytes_received;
+ }
+ }
+ stats->ibytes = bytes;
+ stats->ipackets = pkts;
+ stats->ierrors = drop;
+}
+
+static void
+lio_dev_stats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_droq_stats *oq_stats;
+ struct lio_iq_stats *iq_stats;
+ struct lio_instr_queue *txq;
+ struct lio_droq *droq;
+ int i, iq_no, oq_no;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ iq_no = lio_dev->linfo.txpciq[i].s.q_no;
+ txq = lio_dev->instr_queue[iq_no];
+ if (txq != NULL) {
+ iq_stats = &txq->stats;
+ memset(iq_stats, 0, sizeof(struct lio_iq_stats));
+ }
+ }
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
+ droq = lio_dev->droq[oq_no];
+ if (droq != NULL) {
+ oq_stats = &droq->stats;
+ memset(oq_stats, 0, sizeof(struct lio_droq_stats));
+ }
+ }
+}
+
static void
lio_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *devinfo)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ devinfo->pci_dev = pci_dev;
+
+ switch (pci_dev->id.subsystem_device_id) {
+ /* CN23xx 10G cards */
+ case PCI_SUBSYS_DEV_ID_CN2350_210:
+ case PCI_SUBSYS_DEV_ID_CN2360_210:
+ devinfo->speed_capa = ETH_LINK_SPEED_10G;
+ break;
+ /* CN23xx 25G cards */
+ case PCI_SUBSYS_DEV_ID_CN2350_225:
+ case PCI_SUBSYS_DEV_ID_CN2360_225:
+ devinfo->speed_capa = ETH_LINK_SPEED_25G;
+ break;
+ default:
+ lio_dev_err(lio_dev,
+ "Unknown CN23XX subsystem device id. Not setting speed capability.\n");
+ }
devinfo->max_rx_queues = lio_dev->max_rx_queues;
devinfo->max_tx_queues = lio_dev->max_tx_queues;
devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM);
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_VLAN_STRIP);
devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
devinfo->rx_desc_lim = lio_rx_desc_lim;
devinfo->tx_desc_lim = lio_tx_desc_lim;
}
static int
-lio_dev_validate_vf_mtu(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
+lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint16_t pf_mtu = lio_dev->linfo.link.s.mtu;
+ uint32_t frame_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
PMD_INIT_FUNC_TRACE();
if (!lio_dev->intf_open) {
- lio_dev_err(lio_dev, "Port %d down, can't check MTU\n",
+ lio_dev_err(lio_dev, "Port %d down, can't set MTU\n",
lio_dev->port_id);
return -EINVAL;
}
- /* Limit the MTU to make sure the ethernet packets are between
- * ETHER_MIN_MTU bytes and PF's MTU
+ /* check if VF MTU is within allowed range.
+ * New value should not exceed PF MTU.
*/
- if ((new_mtu < ETHER_MIN_MTU) ||
- (new_mtu > lio_dev->linfo.link.s.mtu)) {
- lio_dev_err(lio_dev, "Invalid MTU: %d\n", new_mtu);
- lio_dev_err(lio_dev, "Valid range %d and %d\n",
- ETHER_MIN_MTU, lio_dev->linfo.link.s.mtu);
+ if ((mtu < ETHER_MIN_MTU) || (mtu > pf_mtu)) {
+ lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n",
+ ETHER_MIN_MTU, pf_mtu);
return -EINVAL;
}
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU;
+ ctrl_pkt.ncmd.s.param1 = mtu;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send command to change MTU\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Command to change MTU timed out\n");
+ return -1;
+ }
+
+ if (frame_len > ETHER_MAX_LEN)
+ eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
+ eth_dev->data->mtu = mtu;
+
return 0;
}
return 0;
}
+/**
+ * Add vxlan dest udp port for an interface.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ * @param udp_tnl
+ * udp tunnel conf
+ *
+ * @return
+ * On success return 0
+ * On failure return -1
+ */
+static int
+lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tnl)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (udp_tnl == NULL)
+ return -EINVAL;
+
+ if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+ lio_dev_err(lio_dev, "Unsupported tunnel type\n");
+ return -1;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
+ ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
+ ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Remove vxlan dest udp port for an interface.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ * @param udp_tnl
+ * udp tunnel conf
+ *
+ * @return
+ * On success return 0
+ * On failure return -1
+ */
+static int
+lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tnl)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (udp_tnl == NULL)
+ return -EINVAL;
+
+ if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+ lio_dev_err(lio_dev, "Unsupported tunnel type\n");
+ return -1;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
+ ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
+ ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (lio_dev->linfo.vlan_is_admin_assigned)
+ return -EPERM;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = on ?
+ LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER;
+ ctrl_pkt.ncmd.s.param1 = vlan_id;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to %s VLAN port\n",
+ on ? "add" : "remove");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n",
+ on ? "add" : "remove");
+ return -1;
+ }
+
+ return 0;
+}
+
/**
* Atomically writes the link status information into global
* structure rte_eth_dev.
link.link_status = ETH_LINK_DOWN;
link.link_speed = ETH_SPEED_NUM_NONE;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_autoneg = ETH_LINK_AUTONEG;
memset(&old, 0, sizeof(old));
/* Return what we found */
case LIO_LINK_SPEED_10000:
link.link_speed = ETH_SPEED_NUM_10G;
break;
+ case LIO_LINK_SPEED_25000:
+ link.link_speed = ETH_SPEED_NUM_25G;
+ break;
default:
link.link_speed = ETH_SPEED_NUM_NONE;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
return 0;
}
+/**
+ * \brief Net device enable, disable allmulticast
+ * @param eth_dev Pointer to the structure rte_eth_dev
+ */
+static void
+lio_change_dev_flag(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ /* Create a ctrl pkt command to be sent to core app. */
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
+ ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send change flag message\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
+ lio_dev_err(lio_dev, "Change dev flag command timed out\n");
+}
+
+static void
+lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI;
+ lio_change_dev_flag(eth_dev);
+}
+
+static void
+lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI;
+ lio_change_dev_flag(eth_dev);
+}
+
static void
lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
{
* @return
* - nothing
*/
-static void
+void
lio_dev_rx_queue_release(void *rxq)
{
struct lio_droq *droq = rxq;
- struct lio_device *lio_dev = droq->lio_dev;
int oq_no;
- /* Run time queue deletion not supported */
- if (lio_dev->port_configured)
- return;
+ if (droq) {
+ /* Run time queue deletion not supported */
+ if (droq->lio_dev->port_configured)
+ return;
- if (droq != NULL) {
oq_no = droq->q_no;
lio_delete_droq_queue(droq->lio_dev, oq_no);
}
* @return
* - nothing
*/
-static void
+void
lio_dev_tx_queue_release(void *txq)
{
struct lio_instr_queue *tq = txq;
- struct lio_device *lio_dev = tq->lio_dev;
uint32_t fw_mapped_iq_no;
- /* Run time queue deletion not supported */
- if (lio_dev->port_configured)
- return;
- if (tq != NULL) {
+ if (tq) {
+ /* Run time queue deletion not supported */
+ if (tq->lio_dev->port_configured)
+ return;
+
/* Free sg_list */
lio_delete_sglist(tq);
lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3);
if (lio_dev->linfo.link.link_status64 != ls->link_status64) {
+ if (ls->s.mtu < eth_dev->data->mtu) {
+ lio_dev_info(lio_dev, "Lowered VF MTU to %d as PF MTU dropped\n",
+ ls->s.mtu);
+ eth_dev->data->mtu = ls->s.mtu;
+ }
lio_dev->linfo.link.link_status64 = ls->link_status64;
lio_dev_link_update(eth_dev, 0);
}
static int
lio_dev_start(struct rte_eth_dev *eth_dev)
{
- uint16_t mtu = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint16_t mtu;
+ uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
struct lio_device *lio_dev = LIO_DEV(eth_dev);
uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
int ret = 0;
if (lio_dev->linfo.link.link_status64 == 0) {
ret = -1;
- goto dev_mtu_check_error;
+ goto dev_mtu_set_error;
}
- if (lio_dev->linfo.link.s.mtu != mtu) {
- ret = lio_dev_validate_vf_mtu(eth_dev, mtu);
+ mtu = (uint16_t)(frame_len - ETHER_HDR_LEN - ETHER_CRC_LEN);
+ if (mtu < ETHER_MIN_MTU)
+ mtu = ETHER_MIN_MTU;
+
+ if (eth_dev->data->mtu != mtu) {
+ ret = lio_dev_mtu_set(eth_dev, mtu);
if (ret)
- goto dev_mtu_check_error;
+ goto dev_mtu_set_error;
}
return 0;
-dev_mtu_check_error:
+dev_mtu_set_error:
rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
dev_lsc_handle_error:
return ret;
}
+/* Stop device and disable input/output functions */
+static void
+lio_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id);
+ lio_dev->intf_open = 0;
+ rte_mb();
+
+ /* Cancel callback if still running. */
+ rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
+
+ lio_send_rx_ctrl_cmd(eth_dev, 0);
+
+ /* Clear recorded link status */
+ lio_dev->linfo.link.link_status64 = 0;
+}
+
+static int
+lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
+ return 0;
+ }
+
+ if (lio_dev->linfo.link.s.link_up) {
+ lio_dev_info(lio_dev, "Link is already UP\n");
+ return 0;
+ }
+
+ if (lio_send_rx_ctrl_cmd(eth_dev, 1)) {
+ lio_dev_err(lio_dev, "Unable to set Link UP\n");
+ return -1;
+ }
+
+ lio_dev->linfo.link.s.link_up = 1;
+ eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+
+ return 0;
+}
+
+static int
+lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
+ return 0;
+ }
+
+ if (!lio_dev->linfo.link.s.link_up) {
+ lio_dev_info(lio_dev, "Link is already DOWN\n");
+ return 0;
+ }
+
+ lio_dev->linfo.link.s.link_up = 0;
+ eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+
+ if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
+ lio_dev->linfo.link.s.link_up = 1;
+ eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+ lio_dev_err(lio_dev, "Unable to set Link Down\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Reset and stop the device. This occurs on the first
+ * call to this routine. Subsequent calls will simply
+ * return. NB: This will require the NIC to be rebooted.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ *
+ * @return
+ * - nothing
+ */
+static void
+lio_dev_close(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint32_t i;
+
+ lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id);
+
+ if (lio_dev->intf_open)
+ lio_dev_stop(eth_dev);
+
+ lio_wait_for_instr_fetch(lio_dev);
+
+ lio_dev->fn_list.disable_io_queues(lio_dev);
+
+ cn23xx_vf_set_io_queues_off(lio_dev);
+
+ /* Reset iq regs (IQ_DBELL).
+ * Clear sli_pktx_cnts (OQ_PKTS_SENT).
+ */
+ for (i = 0; i < lio_dev->nb_rx_queues; i++) {
+ struct lio_droq *droq = lio_dev->droq[i];
+
+ if (droq == NULL)
+ break;
+
+ uint32_t pkt_count = rte_read32(droq->pkts_sent_reg);
+
+ lio_dev_dbg(lio_dev,
+ "pending oq count %u\n", pkt_count);
+ rte_write32(pkt_count, droq->pkts_sent_reg);
+ }
+
+ /* lio_free_mbox */
+ lio_dev->fn_list.free_mbox(lio_dev);
+
+ /* Free glist resources */
+ rte_free(lio_dev->glist_head);
+ rte_free(lio_dev->glist_lock);
+ lio_dev->glist_head = NULL;
+ lio_dev->glist_lock = NULL;
+
+ lio_dev->port_configured = 0;
+
+ /* Delete all queues */
+ lio_dev_clear_queues(eth_dev);
+}
+
+/**
+ * Enable tunnel rx checksum verification from firmware.
+ */
+static void
+lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL;
+ ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
+ lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n");
+}
+
+/**
+ * Enable checksum calculation for inner packet in a tunnel.
+ */
+static void
+lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL;
+ ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
+ lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n");
+}
+
static int lio_dev_configure(struct rte_eth_dev *eth_dev)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
/* Copy the permanent MAC address */
ether_addr_copy((struct ether_addr *)mac, ð_dev->data->mac_addrs[0]);
+ /* enable firmware checksum support for tunnel packets */
+ lio_enable_hw_tunnel_rx_checksum(eth_dev);
+ lio_enable_hw_tunnel_tx_checksum(eth_dev);
+
lio_dev->glist_lock =
rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);
if (lio_dev->glist_lock == NULL)
static const struct eth_dev_ops liovf_eth_dev_ops = {
.dev_configure = lio_dev_configure,
.dev_start = lio_dev_start,
+ .dev_stop = lio_dev_stop,
+ .dev_set_link_up = lio_dev_set_link_up,
+ .dev_set_link_down = lio_dev_set_link_down,
+ .dev_close = lio_dev_close,
+ .allmulticast_enable = lio_dev_allmulticast_enable,
+ .allmulticast_disable = lio_dev_allmulticast_disable,
.link_update = lio_dev_link_update,
+ .stats_get = lio_dev_stats_get,
+ .xstats_get = lio_dev_xstats_get,
+ .xstats_get_names = lio_dev_xstats_get_names,
+ .stats_reset = lio_dev_stats_reset,
+ .xstats_reset = lio_dev_xstats_reset,
.dev_infos_get = lio_dev_info_get,
+ .vlan_filter_set = lio_dev_vlan_filter_set,
.rx_queue_setup = lio_dev_rx_queue_setup,
.rx_queue_release = lio_dev_rx_queue_release,
.tx_queue_setup = lio_dev_tx_queue_setup,
.reta_query = lio_dev_rss_reta_query,
.rss_hash_conf_get = lio_dev_rss_hash_conf_get,
.rss_hash_update = lio_dev_rss_hash_update,
+ .udp_tunnel_port_add = lio_dev_udp_tunnel_add,
+ .udp_tunnel_port_del = lio_dev_udp_tunnel_del,
+ .mtu_set = lio_dev_mtu_set,
};
static void
if (cn23xx_pfvf_handshake(lio_dev))
goto error;
- /* Initial reset */
- cn23xx_vf_ask_pf_to_do_flr(lio_dev);
- /* Wait for FLR for 100ms per SRIOV specification */
- rte_delay_ms(100);
-
if (cn23xx_vf_set_io_queues_off(lio_dev)) {
lio_dev_err(lio_dev, "Setting io queues off failed\n");
goto error;
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
+ eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
static int
lio_eth_dev_init(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct lio_device *lio_dev = LIO_DEV(eth_dev);
PMD_INIT_FUNC_TRACE();
return 0;
}
+static int
+lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ eth_dev = rte_eth_dev_pci_allocate(pci_dev,
+ sizeof(struct lio_device));
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ ret = lio_eth_dev_init(eth_dev);
+ if (ret)
+ rte_eth_dev_pci_release(eth_dev);
+
+ return ret;
+}
+
+static int
+lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev,
+ lio_eth_dev_uninit);
+}
+
/* Set of PCI devices this driver supports */
static const struct rte_pci_id pci_id_liovf_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) },
{ .vendor_id = 0, /* sentinel */ }
};
-static struct eth_driver rte_liovf_pmd = {
- .pci_drv = {
- .id_table = pci_id_liovf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = lio_eth_dev_init,
- .eth_dev_uninit = lio_eth_dev_uninit,
- .dev_private_size = sizeof(struct lio_device),
+static struct rte_pci_driver rte_liovf_pmd = {
+ .id_table = pci_id_liovf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = lio_eth_dev_pci_probe,
+ .remove = lio_eth_dev_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci");