static void axgbe_dev_stop(struct rte_eth_dev *dev);
static void axgbe_dev_interrupt_handler(void *param);
static void axgbe_dev_close(struct rte_eth_dev *dev);
-static void axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
-static void axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
static void axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
static int axgbe_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
-static void axgbe_dev_stats_reset(struct rte_eth_dev *dev);
-static void axgbe_dev_info_get(struct rte_eth_dev *dev,
+static int axgbe_dev_stats_reset(struct rte_eth_dev *dev);
+static int axgbe_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
/* The set of PCI devices this driver supports */
DMA_CH_SR, dma_ch_isr);
}
}
- /* Enable interrupts since disabled after generation*/
- rte_intr_enable(&pdata->pci_dev->intr_handle);
+ /* Unmask interrupts since disabled after generation */
+ rte_intr_ack(&pdata->pci_dev->intr_handle);
}
/*
static int
axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
{
- struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
+ struct axgbe_port *pdata = dev->data->dev_private;
if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
pdata->rss_enable = 1;
static int
axgbe_dev_start(struct rte_eth_dev *dev)
{
- PMD_INIT_FUNC_TRACE();
- struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
+ struct axgbe_port *pdata = dev->data->dev_private;
int ret;
+ PMD_INIT_FUNC_TRACE();
+
/* Multiqueue RSS */
ret = axgbe_dev_rx_mq_config(dev);
if (ret) {
static void
axgbe_dev_stop(struct rte_eth_dev *dev)
{
- PMD_INIT_FUNC_TRACE();
struct axgbe_port *pdata = dev->data->dev_private;
+ PMD_INIT_FUNC_TRACE();
+
rte_intr_disable(&pdata->pci_dev->intr_handle);
if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
axgbe_dev_clear_queues(dev);
}
-static void
+static int
axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
- PMD_INIT_FUNC_TRACE();
struct axgbe_port *pdata = dev->data->dev_private;
+ PMD_INIT_FUNC_TRACE();
+
AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
+
+ return 0;
}
-static void
+static int
axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
{
- PMD_INIT_FUNC_TRACE();
struct axgbe_port *pdata = dev->data->dev_private;
+ PMD_INIT_FUNC_TRACE();
+
AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
+
+ return 0;
}
static void
axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
- PMD_INIT_FUNC_TRACE();
struct axgbe_port *pdata = dev->data->dev_private;
+ PMD_INIT_FUNC_TRACE();
+
if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
return;
AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
static void
axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
- PMD_INIT_FUNC_TRACE();
struct axgbe_port *pdata = dev->data->dev_private;
+ PMD_INIT_FUNC_TRACE();
+
if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
return;
AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
return 0;
}
-static void
+static int
axgbe_dev_stats_reset(struct rte_eth_dev *dev)
{
struct axgbe_rx_queue *rxq;
txq->bytes = 0;
txq->errors = 0;
}
+
+ return 0;
}
-static void
+static int
axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct axgbe_port *pdata = dev->data->dev_private;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_KEEP_CRC;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_IPV4_CKSUM |
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = AXGBE_TX_FREE_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
};
+
+ return 0;
}
static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pdata = (struct axgbe_port *)eth_dev->data->dev_private;
+ pdata = eth_dev->data->dev_private;
/* initial state */
axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
- ETHER_ADDR_LEN, 0);
+ RTE_ETHER_ADDR_LEN, 0);
if (!eth_dev->data->mac_addrs) {
PMD_INIT_LOG(ERR,
"Failed to alloc %u bytes needed to store MAC addr tbl",
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
return -ENOMEM;
}
- if (!is_valid_assigned_ether_addr(&pdata->mac_addr))
- eth_random_addr(pdata->mac_addr.addr_bytes);
+ if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr))
+ rte_eth_random_addr(pdata->mac_addr.addr_bytes);
/* Copy the permanent MAC address */
- ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]);
+ rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]);
/* Clock settings */
pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
ret = pdata->phy_if.phy_init(pdata);
if (ret) {
rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
return ret;
}
return 0;
pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
- /*Free macaddres*/
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(axgbe_init_log);
-static void
-axgbe_init_log(void)
+RTE_INIT(axgbe_init_log)
{
axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
if (axgbe_logtype_init >= 0)