VMXNET3_CMD_GET_CONF_INTR,
VMXNET3_CMD_GET_ADAPTIVE_RING_INFO,
VMXNET3_CMD_GET_TXDATA_DESC_SIZE,
- VMXNET3_CMD_RESERVED5,
+ VMXNET3_CMD_RESERVED5,
+ VMXNET3_CMD_RESERVED6,
+ VMXNET3_CMD_RESERVED7,
+ VMXNET3_CMD_RESERVED8,
+ VMXNET3_CMD_GET_MAX_QUEUES_CONF,
} Vmxnet3_Cmd;
/* Adaptive Ring Info Flags */
/* addition 1 for events */
#define VMXNET3_MAX_INTRS 25
+/* Version 6 and later will use below macros */
+#define VMXNET3_EXT_MAX_TX_QUEUES 32
+#define VMXNET3_EXT_MAX_RX_QUEUES 32
+
+/* Version-dependent MAX RX/TX queues macro */
+#define MAX_RX_QUEUES(hw) \
+ (VMXNET3_VERSION_GE_6((hw)) ? \
+ VMXNET3_EXT_MAX_RX_QUEUES : \
+ VMXNET3_MAX_RX_QUEUES)
+#define MAX_TX_QUEUES(hw) \
+ (VMXNET3_VERSION_GE_6((hw)) ? \
+ VMXNET3_EXT_MAX_TX_QUEUES : \
+ VMXNET3_MAX_TX_QUEUES)
+
+/* addition 1 for events */
+#define VMXNET3_EXT_MAX_INTRS 65
+#define VMXNET3_FIRST_SET_INTRS 64
+
/* value of intrCtrl */
#define VMXNET3_IC_DISABLE_ALL 0x1 /* bit 0 */
#include "vmware_pack_end.h"
Vmxnet3_IntrConf;
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_IntrConfExt {
+ uint8 autoMask;
+ uint8 numIntrs; /* # of interrupts */
+ uint8 eventIntrIdx;
+ uint8 reserved;
+ __le32 intrCtrl;
+ __le32 reserved1;
+ uint8 modLevels[VMXNET3_EXT_MAX_INTRS]; /* moderation level for each intr */
+ uint8 reserved2[3];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_IntrConfExt;
+
/* one bit per VLAN ID, the size is in the units of uint32 */
#define VMXNET3_VFT_SIZE (4096 / (sizeof(uint32) * 8))
#include "vmware_pack_end.h"
Vmxnet3_DSDevRead;
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_DSDevReadExt {
+ /* read-only region for device, read by dev in response to a SET cmd */
+ struct Vmxnet3_IntrConfExt intrConfExt;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_DSDevReadExt;
+
typedef
#include "vmware_pack_begin.h"
struct Vmxnet3_TxQueueDesc {
typedef
#include "vmware_pack_begin.h"
struct Vmxnet3_DriverShared {
- __le32 magic;
- __le32 pad; /* make devRead start at 64-bit boundaries */
- Vmxnet3_DSDevRead devRead;
- __le32 ecr;
- __le32 reserved;
-
- union {
- __le32 reserved1[4];
- Vmxnet3_CmdInfo cmdInfo; /* only valid in the context of executing the
- * relevant command
- */
- } cu;
+ __le32 magic;
+ __le32 size; /* size of DriverShared */
+ Vmxnet3_DSDevRead devRead;
+ __le32 ecr;
+ __le32 reserved;
+
+ union {
+ __le32 reserved1[4];
+ /* only valid in the context of executing the relevant command */
+ Vmxnet3_CmdInfo cmdInfo;
+ } cu;
+ struct Vmxnet3_DSDevReadExt devReadExt;
}
#include "vmware_pack_end.h"
Vmxnet3_DriverShared;
((vfTable[vid >> 5] & (1 << (vid & 31))) != 0)
#define VMXNET3_MAX_MTU 9000
+#define VMXNET3_V6_MAX_MTU 9190
#define VMXNET3_MIN_MTU 60
#define VMXNET3_LINK_UP (10000 << 16 | 1) // 10 Gbps, up
}
/*
- * Enable all intrs used by the device
+ * Simple helper to get intrCtrl and eventIntrIdx based on config and hw version
*/
static void
-vmxnet3_enable_all_intrs(struct vmxnet3_hw *hw)
+vmxnet3_get_intr_ctrl_ev(struct vmxnet3_hw *hw,
+ uint8 **out_eventIntrIdx,
+ uint32 **out_intrCtrl)
{
- Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
-
- PMD_INIT_FUNC_TRACE();
- devRead->intrConf.intrCtrl &= rte_cpu_to_le_32(~VMXNET3_IC_DISABLE_ALL);
-
- if (hw->intr.lsc_only) {
- vmxnet3_enable_intr(hw, devRead->intrConf.eventIntrIdx);
+ if (VMXNET3_VERSION_GE_6(hw) && hw->queuesExtEnabled) {
+ *out_eventIntrIdx = &hw->shared->devReadExt.intrConfExt.eventIntrIdx;
+ *out_intrCtrl = &hw->shared->devReadExt.intrConfExt.intrCtrl;
} else {
- int i;
-
- for (i = 0; i < hw->intr.num_intrs; i++)
- vmxnet3_enable_intr(hw, i);
+ *out_eventIntrIdx = &hw->shared->devRead.intrConf.eventIntrIdx;
+ *out_intrCtrl = &hw->shared->devRead.intrConf.intrCtrl;
}
}
vmxnet3_disable_all_intrs(struct vmxnet3_hw *hw)
{
int i;
+ uint8 *eventIntrIdx;
+ uint32 *intrCtrl;
PMD_INIT_FUNC_TRACE();
+ vmxnet3_get_intr_ctrl_ev(hw, &eventIntrIdx, &intrCtrl);
+
+ *intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
- hw->shared->devRead.intrConf.intrCtrl |=
- rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
- for (i = 0; i < hw->num_intrs; i++)
+ for (i = 0; i < hw->intr.num_intrs; i++)
vmxnet3_disable_intr(hw, i);
}
+/*
+ * Enable all intrs used by the device
+ */
+static void
+vmxnet3_enable_all_intrs(struct vmxnet3_hw *hw)
+{
+ uint8 *eventIntrIdx;
+ uint32 *intrCtrl;
+
+ PMD_INIT_FUNC_TRACE();
+ vmxnet3_get_intr_ctrl_ev(hw, &eventIntrIdx, &intrCtrl);
+
+ *intrCtrl &= rte_cpu_to_le_32(~VMXNET3_IC_DISABLE_ALL);
+
+ if (hw->intr.lsc_only) {
+ vmxnet3_enable_intr(hw, *eventIntrIdx);
+ } else {
+ int i;
+
+ for (i = 0; i < hw->intr.num_intrs; i++)
+ vmxnet3_enable_intr(hw, i);
+ }
+}
+
/*
* Gets tx data ring descriptor size.
*/
/* Check h/w version compatibility with driver. */
ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
- if (ver & (1 << VMXNET3_REV_5)) {
+ if (ver & (1 << VMXNET3_REV_6)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_6);
+ hw->version = VMXNET3_REV_6 + 1;
+ } else if (ver & (1 << VMXNET3_REV_5)) {
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
1 << VMXNET3_REV_5);
hw->version = VMXNET3_REV_5 + 1;
if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
- if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
- dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
- PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
- return -EINVAL;
+ if (!VMXNET3_VERSION_GE_6(hw)) {
+ if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
+ PMD_INIT_LOG(ERR,
+ "ERROR: Number of rx queues not power of 2");
+ return -EINVAL;
+ }
}
- if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
- PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2");
- return -EINVAL;
+ /* At this point, the number of queues requested has already
+ * been validated against dev_infos max queues by EAL
+ */
+ if (dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES ||
+ dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES) {
+ hw->queuesExtEnabled = 1;
+ } else {
+ hw->queuesExtEnabled = 0;
}
size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
return -1;
intr_vector = dev->data->nb_rx_queues;
- if (intr_vector > VMXNET3_MAX_RX_QUEUES) {
+ if (intr_vector > MAX_RX_QUEUES(hw)) {
PMD_INIT_LOG(ERR, "At most %d intr queues supported",
- VMXNET3_MAX_RX_QUEUES);
+ MAX_RX_QUEUES(hw));
return -ENOTSUP;
}
uint32_t mtu = dev->data->mtu;
Vmxnet3_DriverShared *shared = hw->shared;
Vmxnet3_DSDevRead *devRead = &shared->devRead;
+ struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
uint32_t i;
int ret;
}
/* intr settings */
- devRead->intrConf.autoMask = hw->intr.mask_mode == VMXNET3_IMM_AUTO;
- devRead->intrConf.numIntrs = hw->intr.num_intrs;
- for (i = 0; i < hw->intr.num_intrs; i++)
- devRead->intrConf.modLevels[i] = hw->intr.mod_levels[i];
+ if (VMXNET3_VERSION_GE_6(hw) && hw->queuesExtEnabled) {
+ devReadExt->intrConfExt.autoMask = hw->intr.mask_mode ==
+ VMXNET3_IMM_AUTO;
+ devReadExt->intrConfExt.numIntrs = hw->intr.num_intrs;
+ for (i = 0; i < hw->intr.num_intrs; i++)
+ devReadExt->intrConfExt.modLevels[i] =
+ hw->intr.mod_levels[i];
+
+ devReadExt->intrConfExt.eventIntrIdx = hw->intr.event_intr_idx;
+ devReadExt->intrConfExt.intrCtrl |=
+ rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
+ } else {
+ devRead->intrConf.autoMask = hw->intr.mask_mode ==
+ VMXNET3_IMM_AUTO;
+ devRead->intrConf.numIntrs = hw->intr.num_intrs;
+ for (i = 0; i < hw->intr.num_intrs; i++)
+ devRead->intrConf.modLevels[i] = hw->intr.mod_levels[i];
- devRead->intrConf.eventIntrIdx = hw->intr.event_intr_idx;
- devRead->intrConf.intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
+ devRead->intrConf.eventIntrIdx = hw->intr.event_intr_idx;
+ devRead->intrConf.intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
+ }
/* RxMode set to 0 of VMXNET3_RXM_xxx */
devRead->rxFilterConf.rxMode = 0;
return -EINVAL;
}
- /* Setup memory region for rx buffers */
- ret = vmxnet3_dev_setup_memreg(dev);
- if (ret == 0) {
- VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
- VMXNET3_CMD_REGISTER_MEMREGS);
- ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
- if (ret != 0)
- PMD_INIT_LOG(DEBUG,
- "Failed in setup memory region cmd\n");
- ret = 0;
+ /* Check memregs restrictions first */
+ if (dev->data->nb_rx_queues <= VMXNET3_MAX_RX_QUEUES &&
+ dev->data->nb_tx_queues <= VMXNET3_MAX_TX_QUEUES) {
+ ret = vmxnet3_dev_setup_memreg(dev);
+ if (ret == 0) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_REGISTER_MEMREGS);
+ ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+ if (ret != 0)
+ PMD_INIT_LOG(DEBUG,
+ "Failed in setup memory region cmd\n");
+ ret = 0;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
+ }
} else {
- PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
+ PMD_INIT_LOG(WARNING, "Memregs can't init (rx: %d, tx: %d)",
+ dev->data->nb_rx_queues, dev->data->nb_tx_queues);
}
if (VMXNET3_VERSION_GE_4(hw) &&
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
- RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
-
for (i = 0; i < hw->num_tx_queues; i++)
vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]);
for (i = 0; i < hw->num_rx_queues; i++)
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
- RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
for (i = 0; i < hw->num_tx_queues; i++) {
vmxnet3_tx_stats_get(hw, i, &txStats);
stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
}
- RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
for (i = 0; i < hw->num_rx_queues; i++) {
vmxnet3_rx_stats_get(hw, i, &rxStats);
struct rte_eth_dev_info *dev_info)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
+ int queues = 0;
+
+ if (VMXNET3_VERSION_GE_6(hw)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_MAX_QUEUES_CONF);
+ queues = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+
+ if (queues > 0) {
+ dev_info->max_rx_queues =
+ RTE_MIN(VMXNET3_EXT_MAX_RX_QUEUES, ((queues >> 8) & 0xff));
+ dev_info->max_tx_queues =
+ RTE_MIN(VMXNET3_EXT_MAX_TX_QUEUES, (queues & 0xff));
+ } else {
+ dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
+ dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
+ }
+ } else {
+ dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
+ dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
+ }
- dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
- dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
dev_info->min_mtu = VMXNET3_MIN_MTU;
}
static int
-vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, __rte_unused uint16_t mtu)
+vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
{
- if (dev->data->dev_started) {
- PMD_DRV_LOG(ERR, "Port %d must be stopped to configure MTU",
- dev->data->port_id);
- return -EBUSY;
- }
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)(hw->perm_addr));
+ vmxnet3_write_mac(hw, mac_addr->addr_bytes);
return 0;
}
static int
-vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
+vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
+ uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
+
+ if (mtu < VMXNET3_MIN_MTU)
+ return -EINVAL;
+
+ if (VMXNET3_VERSION_GE_6(hw)) {
+ if (mtu > VMXNET3_V6_MAX_MTU)
+ return -EINVAL;
+ } else {
+ if (mtu > VMXNET3_MAX_MTU) {
+ PMD_DRV_LOG(ERR, "MTU %d too large in device version v%d",
+ mtu, hw->version);
+ return -EINVAL;
+ }
+ }
+
+ dev->data->mtu = mtu;
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.mtu = frame_size;
+
+ if (dev->data->dev_started == 0)
+ return 0;
+
+ /* changing mtu for vmxnet3 pmd does not require a restart
+ * as it does not need to repopulate the rx rings to support
+ * different mtu size. We stop and restart the device here
+ * just to pass the mtu info to the backend.
+ */
+ vmxnet3_dev_stop(dev);
+ vmxnet3_dev_start(dev);
- rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)(hw->perm_addr));
- vmxnet3_write_mac(hw, mac_addr->addr_bytes);
return 0;
}
{
struct rte_eth_dev *dev = param;
struct vmxnet3_hw *hw = dev->data->dev_private;
- Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
uint32_t events;
+ uint8 *eventIntrIdx;
+ uint32 *intrCtrl;
PMD_INIT_FUNC_TRACE();
- vmxnet3_disable_intr(hw, devRead->intrConf.eventIntrIdx);
+
+ vmxnet3_get_intr_ctrl_ev(hw, &eventIntrIdx, &intrCtrl);
+ vmxnet3_disable_intr(hw, *eventIntrIdx);
events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
if (events == 0)
RTE_LOG(DEBUG, PMD, "Reading events: 0x%X", events);
vmxnet3_process_events(dev);
done:
- vmxnet3_enable_intr(hw, devRead->intrConf.eventIntrIdx);
+ vmxnet3_enable_intr(hw, *eventIntrIdx);
}
static int