#define VMXNET3_TX_MAX_SEG UINT8_MAX
#define VMXNET3_TX_OFFLOAD_CAP \
- (DEV_TX_OFFLOAD_VLAN_INSERT | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_MULTI_SEGS)
+ (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
#define VMXNET3_RX_OFFLOAD_CAP \
- (DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_VLAN_FILTER | \
- DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_UDP_CKSUM | \
- DEV_RX_OFFLOAD_TCP_CKSUM | \
- DEV_RX_OFFLOAD_TCP_LRO | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
- DEV_RX_OFFLOAD_RSS_HASH)
+ (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+ RTE_ETH_RX_OFFLOAD_SCATTER | \
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_TCP_LRO | \
+ RTE_ETH_RX_OFFLOAD_RSS_HASH)
int vmxnet3_segs_dynfield_offset = -1;
struct rte_eth_xstat *xstats, unsigned int n);
static int vmxnet3_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
+static int vmxnet3_hw_ver_get(struct rte_eth_dev *dev,
+ char *fw_version, size_t fw_size);
static const uint32_t *
vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
static int vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
struct rte_ether_addr *mac_addr);
static void vmxnet3_process_events(struct rte_eth_dev *dev);
static void vmxnet3_interrupt_handler(void *param);
+static int
+vmxnet3_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int
+vmxnet3_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+
static int vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
static int vmxnet3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
.dev_stop = vmxnet3_dev_stop,
.dev_close = vmxnet3_dev_close,
.dev_reset = vmxnet3_dev_reset,
+ .link_update = vmxnet3_dev_link_update,
.promiscuous_enable = vmxnet3_dev_promiscuous_enable,
.promiscuous_disable = vmxnet3_dev_promiscuous_disable,
.allmulticast_enable = vmxnet3_dev_allmulticast_enable,
.allmulticast_disable = vmxnet3_dev_allmulticast_disable,
- .link_update = vmxnet3_dev_link_update,
+ .mac_addr_set = vmxnet3_mac_addr_set,
+ .mtu_set = vmxnet3_dev_mtu_set,
.stats_get = vmxnet3_dev_stats_get,
- .xstats_get_names = vmxnet3_dev_xstats_get_names,
- .xstats_get = vmxnet3_dev_xstats_get,
.stats_reset = vmxnet3_dev_stats_reset,
- .mac_addr_set = vmxnet3_mac_addr_set,
+ .xstats_get = vmxnet3_dev_xstats_get,
+ .xstats_get_names = vmxnet3_dev_xstats_get_names,
.dev_infos_get = vmxnet3_dev_info_get,
+ .fw_version_get = vmxnet3_hw_ver_get,
.dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
- .mtu_set = vmxnet3_dev_mtu_set,
.vlan_filter_set = vmxnet3_dev_vlan_filter_set,
.vlan_offload_set = vmxnet3_dev_vlan_offload_set,
.rx_queue_setup = vmxnet3_dev_rx_queue_setup,
.rx_queue_release = vmxnet3_dev_rx_queue_release,
- .tx_queue_setup = vmxnet3_dev_tx_queue_setup,
- .tx_queue_release = vmxnet3_dev_tx_queue_release,
.rx_queue_intr_enable = vmxnet3_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = vmxnet3_dev_rx_queue_intr_disable,
+ .tx_queue_setup = vmxnet3_dev_tx_queue_setup,
+ .tx_queue_release = vmxnet3_dev_tx_queue_release,
+ .reta_update = vmxnet3_rss_reta_update,
+ .reta_query = vmxnet3_rss_reta_query,
};
struct vmxnet3_xstats_name_off {
}
/*
- * Enable all intrs used by the device
+ * Simple helper to get intrCtrl and eventIntrIdx based on config and hw version
*/
static void
-vmxnet3_enable_all_intrs(struct vmxnet3_hw *hw)
+vmxnet3_get_intr_ctrl_ev(struct vmxnet3_hw *hw,
+ uint8 **out_eventIntrIdx,
+ uint32 **out_intrCtrl)
{
- Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
-
- PMD_INIT_FUNC_TRACE();
- devRead->intrConf.intrCtrl &= rte_cpu_to_le_32(~VMXNET3_IC_DISABLE_ALL);
-
- if (hw->intr.lsc_only) {
- vmxnet3_enable_intr(hw, devRead->intrConf.eventIntrIdx);
+ if (VMXNET3_VERSION_GE_6(hw) && hw->queuesExtEnabled) {
+ *out_eventIntrIdx = &hw->shared->devReadExt.intrConfExt.eventIntrIdx;
+ *out_intrCtrl = &hw->shared->devReadExt.intrConfExt.intrCtrl;
} else {
- int i;
-
- for (i = 0; i < hw->intr.num_intrs; i++)
- vmxnet3_enable_intr(hw, i);
+ *out_eventIntrIdx = &hw->shared->devRead.intrConf.eventIntrIdx;
+ *out_intrCtrl = &hw->shared->devRead.intrConf.intrCtrl;
}
}
vmxnet3_disable_all_intrs(struct vmxnet3_hw *hw)
{
int i;
+ uint8 *eventIntrIdx;
+ uint32 *intrCtrl;
PMD_INIT_FUNC_TRACE();
+ vmxnet3_get_intr_ctrl_ev(hw, &eventIntrIdx, &intrCtrl);
- hw->shared->devRead.intrConf.intrCtrl |=
- rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
- for (i = 0; i < hw->num_intrs; i++)
+ *intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
+
+ for (i = 0; i < hw->intr.num_intrs; i++)
vmxnet3_disable_intr(hw, i);
}
+/*
+ * Enable all intrs used by the device
+ */
+static void
+vmxnet3_enable_all_intrs(struct vmxnet3_hw *hw)
+{
+ uint8 *eventIntrIdx;
+ uint32 *intrCtrl;
+
+ PMD_INIT_FUNC_TRACE();
+ vmxnet3_get_intr_ctrl_ev(hw, &eventIntrIdx, &intrCtrl);
+
+ *intrCtrl &= rte_cpu_to_le_32(~VMXNET3_IC_DISABLE_ALL);
+
+ if (hw->intr.lsc_only) {
+ vmxnet3_enable_intr(hw, *eventIntrIdx);
+ } else {
+ int i;
+
+ for (i = 0; i < hw->intr.num_intrs; i++)
+ vmxnet3_enable_intr(hw, i);
+ }
+}
+
/*
* Gets tx data ring descriptor size.
*/
eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
+ eth_dev->rx_queue_count = vmxnet3_dev_rx_queue_count;
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
/* extra mbuf field is required to guess MSS */
/* Check h/w version compatibility with driver. */
ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
- PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
- if (ver & (1 << VMXNET3_REV_4)) {
+ if (ver & (1 << VMXNET3_REV_6)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_6);
+ hw->version = VMXNET3_REV_6 + 1;
+ } else if (ver & (1 << VMXNET3_REV_5)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_5);
+ hw->version = VMXNET3_REV_5 + 1;
+ } else if (ver & (1 << VMXNET3_REV_4)) {
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
1 << VMXNET3_REV_4);
hw->version = VMXNET3_REV_4 + 1;
rte_ether_addr_copy((struct rte_ether_addr *)hw->perm_addr,
ð_dev->data->mac_addrs[0]);
- PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
+ PMD_INIT_LOG(DEBUG, "MAC Address : " RTE_ETHER_ADDR_PRT_FMT,
hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
/* set the initial link status */
memset(&link, 0, sizeof(link));
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_speed = ETH_SPEED_NUM_10G;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_10G;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
rte_eth_linkstatus_set(eth_dev, &link);
return 0;
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
- if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
- dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
- PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
- return -EINVAL;
+ if (!VMXNET3_VERSION_GE_6(hw)) {
+ if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
+ PMD_INIT_LOG(ERR,
+ "ERROR: Number of rx queues not power of 2");
+ return -EINVAL;
+ }
}
- if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
- PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2");
- return -EINVAL;
+ /* At this point, the number of queues requested has already
+ * been validated against dev_infos max queues by EAL
+ */
+ if (dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES ||
+ dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES) {
+ hw->queuesExtEnabled = 1;
+ } else {
+ hw->queuesExtEnabled = 0;
}
size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
hw->queueDescPA = mz->iova;
hw->queue_desc_len = (uint16_t)size;
- if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
/* Allocate memory structure for UPT1_RSSConf and configure */
mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
"rss_conf", rte_socket_id(),
uint32_t val;
PMD_INIT_LOG(DEBUG,
- "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
+ "Writing MAC Address : " RTE_ETHER_ADDR_PRT_FMT,
addr[0], addr[1], addr[2],
addr[3], addr[4], addr[5]);
return -1;
intr_vector = dev->data->nb_rx_queues;
- if (intr_vector > VMXNET3_MAX_RX_QUEUES) {
+ if (intr_vector > MAX_RX_QUEUES(hw)) {
PMD_INIT_LOG(ERR, "At most %d intr queues supported",
- VMXNET3_MAX_RX_QUEUES);
+ MAX_RX_QUEUES(hw));
return -ENOTSUP;
}
return -1;
}
- if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
- intr_handle->intr_vec =
- rte_zmalloc("intr_vec",
- dev->data->nb_rx_queues * sizeof(int), 0);
- if (intr_handle->intr_vec == NULL) {
+ if (rte_intr_dp_is_en(intr_handle)) {
+ if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+ dev->data->nb_rx_queues)) {
PMD_INIT_LOG(ERR, "Failed to allocate %d Rx queues intr_vec",
dev->data->nb_rx_queues);
rte_intr_efd_disable(intr_handle);
if (!rte_intr_allow_others(intr_handle) &&
dev->data->dev_conf.intr_conf.lsc != 0) {
PMD_INIT_LOG(ERR, "not enough intr vector to support both Rx interrupt and LSC");
- rte_free(intr_handle->intr_vec);
- intr_handle->intr_vec = NULL;
+ rte_intr_vec_list_free(intr_handle);
rte_intr_efd_disable(intr_handle);
return -1;
}
/* if we cannot allocate one MSI-X vector per queue, don't enable
* interrupt mode.
*/
- if (hw->intr.num_intrs != (intr_handle->nb_efd + 1)) {
+ if (hw->intr.num_intrs !=
+ (rte_intr_nb_efd_get(intr_handle) + 1)) {
PMD_INIT_LOG(ERR, "Device configured with %d Rx intr vectors, expecting %d",
- hw->intr.num_intrs, intr_handle->nb_efd + 1);
- rte_free(intr_handle->intr_vec);
- intr_handle->intr_vec = NULL;
+ hw->intr.num_intrs,
+ rte_intr_nb_efd_get(intr_handle) + 1);
+ rte_intr_vec_list_free(intr_handle);
rte_intr_efd_disable(intr_handle);
return -1;
}
for (i = 0; i < dev->data->nb_rx_queues; i++)
- intr_handle->intr_vec[i] = i + 1;
+ if (rte_intr_vec_list_index_set(intr_handle, i, i + 1))
+ return -rte_errno;
for (i = 0; i < hw->intr.num_intrs; i++)
hw->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
uint32_t mtu = dev->data->mtu;
Vmxnet3_DriverShared *shared = hw->shared;
Vmxnet3_DSDevRead *devRead = &shared->devRead;
+ struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
uint32_t i;
int ret;
if (hw->intr.lsc_only)
tqd->conf.intrIdx = 1;
else
- tqd->conf.intrIdx = intr_handle->intr_vec[i];
+ tqd->conf.intrIdx =
+ rte_intr_vec_list_index_get(intr_handle,
+ i);
tqd->status.stopped = TRUE;
tqd->status.error = 0;
memset(&tqd->stats, 0, sizeof(tqd->stats));
rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size;
rqd->conf.compRingSize = rxq->comp_ring.size;
+ if (VMXNET3_VERSION_GE_3(hw)) {
+ rqd->conf.rxDataRingBasePA = rxq->data_ring.basePA;
+ rqd->conf.rxDataRingDescSize = rxq->data_desc_size;
+ }
+
if (hw->intr.lsc_only)
rqd->conf.intrIdx = 1;
else
- rqd->conf.intrIdx = intr_handle->intr_vec[i];
+ rqd->conf.intrIdx =
+ rte_intr_vec_list_index_get(intr_handle,
+ i);
rqd->status.stopped = TRUE;
rqd->status.error = 0;
memset(&rqd->stats, 0, sizeof(rqd->stats));
}
/* intr settings */
- devRead->intrConf.autoMask = hw->intr.mask_mode == VMXNET3_IMM_AUTO;
- devRead->intrConf.numIntrs = hw->intr.num_intrs;
- for (i = 0; i < hw->intr.num_intrs; i++)
- devRead->intrConf.modLevels[i] = hw->intr.mod_levels[i];
+ if (VMXNET3_VERSION_GE_6(hw) && hw->queuesExtEnabled) {
+ devReadExt->intrConfExt.autoMask = hw->intr.mask_mode ==
+ VMXNET3_IMM_AUTO;
+ devReadExt->intrConfExt.numIntrs = hw->intr.num_intrs;
+ for (i = 0; i < hw->intr.num_intrs; i++)
+ devReadExt->intrConfExt.modLevels[i] =
+ hw->intr.mod_levels[i];
+
+ devReadExt->intrConfExt.eventIntrIdx = hw->intr.event_intr_idx;
+ devReadExt->intrConfExt.intrCtrl |=
+ rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
+ } else {
+ devRead->intrConf.autoMask = hw->intr.mask_mode ==
+ VMXNET3_IMM_AUTO;
+ devRead->intrConf.numIntrs = hw->intr.num_intrs;
+ for (i = 0; i < hw->intr.num_intrs; i++)
+ devRead->intrConf.modLevels[i] = hw->intr.mod_levels[i];
- devRead->intrConf.eventIntrIdx = hw->intr.event_intr_idx;
- devRead->intrConf.intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
+ devRead->intrConf.eventIntrIdx = hw->intr.event_intr_idx;
+ devRead->intrConf.intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
+ }
/* RxMode set to 0 of VMXNET3_RXM_xxx */
devRead->rxFilterConf.rxMode = 0;
/* Setting up feature flags */
- if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
- if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
devRead->misc.uptFeatures |= VMXNET3_F_LRO;
devRead->misc.maxNumRxSG = 0;
}
- if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
ret = vmxnet3_rss_configure(dev);
if (ret != VMXNET3_SUCCESS)
return ret;
}
ret = vmxnet3_dev_vlan_offload_set(dev,
- ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+ RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
if (ret)
return ret;
return -EINVAL;
}
- /* Setup memory region for rx buffers */
- ret = vmxnet3_dev_setup_memreg(dev);
- if (ret == 0) {
- VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
- VMXNET3_CMD_REGISTER_MEMREGS);
- ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
- if (ret != 0)
- PMD_INIT_LOG(DEBUG,
- "Failed in setup memory region cmd\n");
- ret = 0;
+ /* Check memregs restrictions first */
+ if (dev->data->nb_rx_queues <= VMXNET3_MAX_RX_QUEUES &&
+ dev->data->nb_tx_queues <= VMXNET3_MAX_TX_QUEUES) {
+ ret = vmxnet3_dev_setup_memreg(dev);
+ if (ret == 0) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_REGISTER_MEMREGS);
+ ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+ if (ret != 0)
+ PMD_INIT_LOG(DEBUG,
+ "Failed in setup memory region cmd\n");
+ ret = 0;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
+ }
} else {
- PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
+ PMD_INIT_LOG(WARNING, "Memregs can't init (rx: %d, tx: %d)",
+ dev->data->nb_rx_queues, dev->data->nb_tx_queues);
}
if (VMXNET3_VERSION_GE_4(hw) &&
- dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
/* Check for additional RSS */
ret = vmxnet3_v4_rss_configure(dev);
if (ret != VMXNET3_SUCCESS) {
/* Clean datapath event and queue/vector mapping */
rte_intr_efd_disable(intr_handle);
- if (intr_handle->intr_vec != NULL) {
- rte_free(intr_handle->intr_vec);
- intr_handle->intr_vec = NULL;
- }
+ rte_intr_vec_list_free(intr_handle);
/* quiesce the device first */
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
/* Clear recorded link status */
memset(&link, 0, sizeof(link));
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_speed = ETH_SPEED_NUM_10G;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_10G;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
rte_eth_linkstatus_set(dev, &link);
hw->adapter_stopped = 1;
PMD_INIT_FUNC_TRACE();
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- void *rxq = dev->data->rx_queues[i];
-
- vmxnet3_dev_rx_queue_release(rxq);
- }
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ vmxnet3_dev_rx_queue_release(dev, i);
dev->data->nb_rx_queues = 0;
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- void *txq = dev->data->tx_queues[i];
-
- vmxnet3_dev_tx_queue_release(txq);
- }
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ vmxnet3_dev_tx_queue_release(dev, i);
dev->data->nb_tx_queues = 0;
}
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
- RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
-
for (i = 0; i < hw->num_tx_queues; i++)
vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]);
for (i = 0; i < hw->num_rx_queues; i++)
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
- RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
for (i = 0; i < hw->num_tx_queues; i++) {
vmxnet3_tx_stats_get(hw, i, &txStats);
stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
}
- RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
for (i = 0; i < hw->num_rx_queues; i++) {
vmxnet3_rx_stats_get(hw, i, &rxStats);
struct rte_eth_dev_info *dev_info)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
+ int queues = 0;
+
+ if (VMXNET3_VERSION_GE_6(hw)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_MAX_QUEUES_CONF);
+ queues = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+
+ if (queues > 0) {
+ dev_info->max_rx_queues =
+ RTE_MIN(VMXNET3_EXT_MAX_RX_QUEUES, ((queues >> 8) & 0xff));
+ dev_info->max_tx_queues =
+ RTE_MIN(VMXNET3_EXT_MAX_TX_QUEUES, (queues & 0xff));
+ } else {
+ dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
+ dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
+ }
+ } else {
+ dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
+ dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
+ }
- dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
- dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
dev_info->min_mtu = VMXNET3_MIN_MTU;
dev_info->max_mtu = VMXNET3_MAX_MTU;
- dev_info->speed_capa = ETH_LINK_SPEED_10G;
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
dev_info->rx_queue_offload_capa = 0;
dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP;
dev_info->tx_queue_offload_capa = 0;
-
+ if (hw->rss_conf == NULL) {
+ /* RSS not configured */
+ dev_info->reta_size = 0;
+ } else {
+ dev_info->reta_size = hw->rss_conf->indTableSize;
+ }
return 0;
}
+static int
+vmxnet3_hw_ver_get(struct rte_eth_dev *dev,
+ char *fw_version, size_t fw_size)
+{
+ int ret;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ ret = snprintf(fw_version, fw_size, "v%d", hw->version);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (uint32_t)ret)
+ return ret;
+ else
+ return 0;
+}
+
static const uint32_t *
vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
}
static int
-vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, __rte_unused uint16_t mtu)
+vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
{
- if (dev->data->dev_started) {
- PMD_DRV_LOG(ERR, "Port %d must be stopped to configure MTU",
- dev->data->port_id);
- return -EBUSY;
- }
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)(hw->perm_addr));
+ vmxnet3_write_mac(hw, mac_addr->addr_bytes);
return 0;
}
static int
-vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
+vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
+ uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
+
+ if (mtu < VMXNET3_MIN_MTU)
+ return -EINVAL;
+
+ if (VMXNET3_VERSION_GE_6(hw)) {
+ if (mtu > VMXNET3_V6_MAX_MTU)
+ return -EINVAL;
+ } else {
+ if (mtu > VMXNET3_MAX_MTU) {
+ PMD_DRV_LOG(ERR, "MTU %d too large in device version v%d",
+ mtu, hw->version);
+ return -EINVAL;
+ }
+ }
+
+ dev->data->mtu = mtu;
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.mtu = frame_size;
+
+ if (dev->data->dev_started == 0)
+ return 0;
+
+ /* changing mtu for vmxnet3 pmd does not require a restart
+ * as it does not need to repopulate the rx rings to support
+ * different mtu size. We stop and restart the device here
+ * just to pass the mtu info to the backend.
+ */
+ vmxnet3_dev_stop(dev);
+ vmxnet3_dev_start(dev);
- rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)(hw->perm_addr));
- vmxnet3_write_mac(hw, mac_addr->addr_bytes);
return 0;
}
ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
if (ret & 0x1)
- link.link_status = ETH_LINK_UP;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_speed = ETH_SPEED_NUM_10G;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_status = RTE_ETH_LINK_UP;
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link.link_speed = RTE_ETH_SPEED_NUM_10G;
+ link.link_autoneg = RTE_ETH_LINK_FIXED;
return rte_eth_linkstatus_set(dev, &link);
}
uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
else
memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
uint32_t *vf_table = devRead->rxFilterConf.vfTable;
uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
else
devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
VMXNET3_CMD_UPDATE_FEATURE);
}
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
else
memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
{
struct rte_eth_dev *dev = param;
struct vmxnet3_hw *hw = dev->data->dev_private;
- Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
uint32_t events;
+ uint8 *eventIntrIdx;
+ uint32 *intrCtrl;
PMD_INIT_FUNC_TRACE();
- vmxnet3_disable_intr(hw, devRead->intrConf.eventIntrIdx);
+
+ vmxnet3_get_intr_ctrl_ev(hw, &eventIntrIdx, &intrCtrl);
+ vmxnet3_disable_intr(hw, *eventIntrIdx);
events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
if (events == 0)
RTE_LOG(DEBUG, PMD, "Reading events: 0x%X", events);
vmxnet3_process_events(dev);
done:
- vmxnet3_enable_intr(hw, devRead->intrConf.eventIntrIdx);
+ vmxnet3_enable_intr(hw, *eventIntrIdx);
}
static int
{
struct vmxnet3_hw *hw = dev->data->dev_private;
- vmxnet3_enable_intr(hw, dev->intr_handle->intr_vec[queue_id]);
+ vmxnet3_enable_intr(hw,
+ rte_intr_vec_list_index_get(dev->intr_handle,
+ queue_id));
return 0;
}
{
struct vmxnet3_hw *hw = dev->data->dev_private;
- vmxnet3_disable_intr(hw, dev->intr_handle->intr_vec[queue_id]);
+ vmxnet3_disable_intr(hw,
+ rte_intr_vec_list_index_get(dev->intr_handle, queue_id));
return 0;
}
RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_LOG_REGISTER_SUFFIX(vmxnet3_logtype_init, init, NOTICE);
RTE_LOG_REGISTER_SUFFIX(vmxnet3_logtype_driver, driver, NOTICE);
+
+static int
+vmxnet3_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ int i, idx, shift;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct VMXNET3_RSSConf *dev_rss_conf = hw->rss_conf;
+
+ if (reta_size != dev_rss_conf->indTableSize) {
+ PMD_DRV_LOG(ERR,
+ "The size of hash lookup table configured (%d) doesn't match "
+ "the supported number (%d)",
+ reta_size, dev_rss_conf->indTableSize);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & RTE_BIT64(shift))
+ dev_rss_conf->indTable[i] = (uint8_t)reta_conf[idx].reta[shift];
+ }
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_RSSIDT);
+
+ return 0;
+}
+
+static int
+vmxnet3_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ int i, idx, shift;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct VMXNET3_RSSConf *dev_rss_conf = hw->rss_conf;
+
+ if (reta_size != dev_rss_conf->indTableSize) {
+ PMD_DRV_LOG(ERR,
+ "Size of requested hash lookup table (%d) doesn't "
+ "match the configured size (%d)",
+ reta_size, dev_rss_conf->indTableSize);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & RTE_BIT64(shift))
+ reta_conf[idx].reta[shift] = dev_rss_conf->indTable[i];
+ }
+
+ return 0;
+}