struct rte_ether_addr *mac_addr);
static void vmxnet3_process_events(struct rte_eth_dev *dev);
static void vmxnet3_interrupt_handler(void *param);
+static int
+vmxnet3_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int
+vmxnet3_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
static int vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
static int vmxnet3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
.dev_stop = vmxnet3_dev_stop,
.dev_close = vmxnet3_dev_close,
.dev_reset = vmxnet3_dev_reset,
+ .link_update = vmxnet3_dev_link_update,
.promiscuous_enable = vmxnet3_dev_promiscuous_enable,
.promiscuous_disable = vmxnet3_dev_promiscuous_disable,
.allmulticast_enable = vmxnet3_dev_allmulticast_enable,
.allmulticast_disable = vmxnet3_dev_allmulticast_disable,
- .link_update = vmxnet3_dev_link_update,
+ .mac_addr_set = vmxnet3_mac_addr_set,
+ .mtu_set = vmxnet3_dev_mtu_set,
.stats_get = vmxnet3_dev_stats_get,
- .xstats_get_names = vmxnet3_dev_xstats_get_names,
- .xstats_get = vmxnet3_dev_xstats_get,
.stats_reset = vmxnet3_dev_stats_reset,
- .mac_addr_set = vmxnet3_mac_addr_set,
+ .xstats_get = vmxnet3_dev_xstats_get,
+ .xstats_get_names = vmxnet3_dev_xstats_get_names,
.dev_infos_get = vmxnet3_dev_info_get,
.dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
- .mtu_set = vmxnet3_dev_mtu_set,
.vlan_filter_set = vmxnet3_dev_vlan_filter_set,
.vlan_offload_set = vmxnet3_dev_vlan_offload_set,
.rx_queue_setup = vmxnet3_dev_rx_queue_setup,
.rx_queue_release = vmxnet3_dev_rx_queue_release,
- .tx_queue_setup = vmxnet3_dev_tx_queue_setup,
- .tx_queue_release = vmxnet3_dev_tx_queue_release,
.rx_queue_intr_enable = vmxnet3_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = vmxnet3_dev_rx_queue_intr_disable,
+ .tx_queue_setup = vmxnet3_dev_tx_queue_setup,
+ .tx_queue_release = vmxnet3_dev_tx_queue_release,
+ .reta_update = vmxnet3_rss_reta_update,
+ .reta_query = vmxnet3_rss_reta_query,
};
struct vmxnet3_xstats_name_off {
eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
+ eth_dev->rx_queue_count = vmxnet3_dev_rx_queue_count;
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
/* extra mbuf field is required to guess MSS */
/* Check h/w version compatibility with driver. */
ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
- PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
- if (ver & (1 << VMXNET3_REV_4)) {
+ if (ver & (1 << VMXNET3_REV_5)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_5);
+ hw->version = VMXNET3_REV_5 + 1;
+ } else if (ver & (1 << VMXNET3_REV_4)) {
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
1 << VMXNET3_REV_4);
hw->version = VMXNET3_REV_4 + 1;
return -1;
}
- if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
- intr_handle->intr_vec =
- rte_zmalloc("intr_vec",
- dev->data->nb_rx_queues * sizeof(int), 0);
- if (intr_handle->intr_vec == NULL) {
+ if (rte_intr_dp_is_en(intr_handle)) {
+ if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+ dev->data->nb_rx_queues)) {
PMD_INIT_LOG(ERR, "Failed to allocate %d Rx queues intr_vec",
dev->data->nb_rx_queues);
rte_intr_efd_disable(intr_handle);
if (!rte_intr_allow_others(intr_handle) &&
dev->data->dev_conf.intr_conf.lsc != 0) {
PMD_INIT_LOG(ERR, "not enough intr vector to support both Rx interrupt and LSC");
- rte_free(intr_handle->intr_vec);
- intr_handle->intr_vec = NULL;
+ rte_intr_vec_list_free(intr_handle);
rte_intr_efd_disable(intr_handle);
return -1;
}
/* if we cannot allocate one MSI-X vector per queue, don't enable
* interrupt mode.
*/
- if (hw->intr.num_intrs != (intr_handle->nb_efd + 1)) {
+ if (hw->intr.num_intrs !=
+ (rte_intr_nb_efd_get(intr_handle) + 1)) {
PMD_INIT_LOG(ERR, "Device configured with %d Rx intr vectors, expecting %d",
- hw->intr.num_intrs, intr_handle->nb_efd + 1);
- rte_free(intr_handle->intr_vec);
- intr_handle->intr_vec = NULL;
+ hw->intr.num_intrs,
+ rte_intr_nb_efd_get(intr_handle) + 1);
+ rte_intr_vec_list_free(intr_handle);
rte_intr_efd_disable(intr_handle);
return -1;
}
for (i = 0; i < dev->data->nb_rx_queues; i++)
- intr_handle->intr_vec[i] = i + 1;
+ if (rte_intr_vec_list_index_set(intr_handle, i, i + 1))
+ return -rte_errno;
for (i = 0; i < hw->intr.num_intrs; i++)
hw->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
if (hw->intr.lsc_only)
tqd->conf.intrIdx = 1;
else
- tqd->conf.intrIdx = intr_handle->intr_vec[i];
+ tqd->conf.intrIdx =
+ rte_intr_vec_list_index_get(intr_handle,
+ i);
tqd->status.stopped = TRUE;
tqd->status.error = 0;
memset(&tqd->stats, 0, sizeof(tqd->stats));
if (hw->intr.lsc_only)
rqd->conf.intrIdx = 1;
else
- rqd->conf.intrIdx = intr_handle->intr_vec[i];
+ rqd->conf.intrIdx =
+ rte_intr_vec_list_index_get(intr_handle,
+ i);
rqd->status.stopped = TRUE;
rqd->status.error = 0;
memset(&rqd->stats, 0, sizeof(rqd->stats));
/* Clean datapath event and queue/vector mapping */
rte_intr_efd_disable(intr_handle);
- if (intr_handle->intr_vec != NULL) {
- rte_free(intr_handle->intr_vec);
- intr_handle->intr_vec = NULL;
- }
+ rte_intr_vec_list_free(intr_handle);
/* quiesce the device first */
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
{
struct vmxnet3_hw *hw = dev->data->dev_private;
- vmxnet3_enable_intr(hw, dev->intr_handle->intr_vec[queue_id]);
+ vmxnet3_enable_intr(hw,
+ rte_intr_vec_list_index_get(dev->intr_handle,
+ queue_id));
return 0;
}
{
struct vmxnet3_hw *hw = dev->data->dev_private;
- vmxnet3_disable_intr(hw, dev->intr_handle->intr_vec[queue_id]);
+ vmxnet3_disable_intr(hw,
+ rte_intr_vec_list_index_get(dev->intr_handle, queue_id));
return 0;
}
RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_LOG_REGISTER_SUFFIX(vmxnet3_logtype_init, init, NOTICE);
RTE_LOG_REGISTER_SUFFIX(vmxnet3_logtype_driver, driver, NOTICE);
+
+static int
+vmxnet3_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ int i, idx, shift;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct VMXNET3_RSSConf *dev_rss_conf = hw->rss_conf;
+
+ if (reta_size != dev_rss_conf->indTableSize) {
+ PMD_DRV_LOG(ERR,
+ "The size of hash lookup table configured (%d) doesn't match "
+ "the supported number (%d)",
+ reta_size, dev_rss_conf->indTableSize);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & RTE_BIT64(shift))
+ dev_rss_conf->indTable[i] = (uint8_t)reta_conf[idx].reta[shift];
+ }
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_RSSIDT);
+
+ return 0;
+}
+
+static int
+vmxnet3_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ int i, idx, shift;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct VMXNET3_RSSConf *dev_rss_conf = hw->rss_conf;
+
+ if (reta_size != dev_rss_conf->indTableSize) {
+ PMD_DRV_LOG(ERR,
+ "Size of requested hash lookup table (%d) doesn't "
+ "match the configured size (%d)",
+ reta_size, dev_rss_conf->indTableSize);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & RTE_BIT64(shift))
+ reta_conf[idx].reta[shift] = dev_rss_conf->indTable[i];
+ }
+
+ return 0;
+}