(frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
return -EINVAL;
- if (frame_size > ETHER_MAX_LEN)
+ if (frame_size > RTE_ETHER_MAX_LEN)
rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
return -EINVAL;
/* Update max_rx_pkt_len */
- rxmode->max_rx_pkt_len = mtu + ETHER_HDR_LEN;
+ rxmode->max_rx_pkt_len = mtu + RTE_ETHER_HDR_LEN;
nic->mtu = mtu;
for (i = 0; i < nic->sqs_count; i++)
return ptypes;
}
-static void
+static int
nicvf_dev_stats_reset(struct rte_eth_dev *dev)
{
int i;
struct nicvf *nic = nicvf_pmd_priv(dev);
uint16_t rx_start, rx_end;
uint16_t tx_start, tx_end;
+ int ret;
/* Reset all primary nic counters */
nicvf_rx_range(dev, nic, &rx_start, &rx_end);
for (i = tx_start; i <= tx_end; i++)
txqs |= (0x3 << (i * 2));
- nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
+ ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
+ if (ret != 0)
+ return ret;
/* Reset secondary nic queue counters */
for (i = 0; i < nic->sqs_count; i++) {
for (i = tx_start; i <= tx_end; i++)
txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2));
- nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
+ ret = nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
+ if (ret != 0)
+ return ret;
}
+
+ return 0;
}
/* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
-static void
+static int
nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
{
+ return 0;
}
static inline uint64_t
return 0;
}
-static void
+static int
nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct nicvf *nic = nicvf_pmd_priv(dev);
if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
dev_info->speed_capa |= ETH_LINK_SPEED_40G;
- dev_info->min_rx_bufsize = ETHER_MIN_MTU;
- dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + ETHER_HDR_LEN;
+ dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
+ dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + RTE_ETHER_HDR_LEN;
dev_info->max_rx_queues =
(uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
dev_info->max_tx_queues =
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM,
};
+
+ return 0;
}
static nicvf_iova_addr_t
/* Setup MTU based on max_rx_pkt_len or default */
mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
dev->data->dev_conf.rxmode.max_rx_pkt_len
- - ETHER_HDR_LEN : ETHER_MTU;
+ - RTE_ETHER_HDR_LEN : RTE_ETHER_MTU;
if (nicvf_dev_set_mtu(dev, mtu)) {
PMD_INIT_LOG(ERR, "Failed to set default mtu size");
PMD_INIT_FUNC_TRACE();
+ rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
if (!rte_eal_has_hugepages()) {
PMD_INIT_LOG(INFO, "Huge page is not configured");
return -EINVAL;
return ret;
}
static int
+nicvf_eth_dev_uninit(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ nicvf_dev_close(dev);
+
+ return 0;
+}
+static int
nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
{
int ret;
return ENOTSUP;
}
- eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
+ RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
ret = -ENOMEM;
goto alarm_fail;
}
- if (is_zero_ether_addr((struct rte_ether_addr *)nic->mac_addr))
- eth_random_addr(&nic->mac_addr[0]);
+ if (rte_is_zero_ether_addr((struct rte_ether_addr *)nic->mac_addr))
+ rte_eth_random_addr(&nic->mac_addr[0]);
- ether_addr_copy((struct rte_ether_addr *)nic->mac_addr,
+ rte_ether_addr_copy((struct rte_ether_addr *)nic->mac_addr,
ð_dev->data->mac_addrs[0]);
ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
malloc_fail:
rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
alarm_fail:
nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
fail:
static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+ return rte_eth_dev_pci_generic_remove(pci_dev, nicvf_eth_dev_uninit);
}
static struct rte_pci_driver rte_nicvf_pmd = {