static void axgbe_dev_stop(struct rte_eth_dev *dev);
static void axgbe_dev_interrupt_handler(void *param);
static void axgbe_dev_close(struct rte_eth_dev *dev);
-static void axgbe_dev_info_get(struct rte_eth_dev *dev,
+static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int axgbe_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static int axgbe_dev_stats_reset(struct rte_eth_dev *dev);
+static int axgbe_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
/* The set of PCI devices this driver supports */
.tx_tstamp_workaround = 1,
.ecc_support = 1,
.i2c_support = 1,
+ .an_cdr_workaround = 1,
};
static struct axgbe_version_data axgbe_v2b = {
.tx_tstamp_workaround = 1,
.ecc_support = 1,
.i2c_support = 1,
+ .an_cdr_workaround = 1,
};
static const struct rte_eth_desc_lim rx_desc_lim = {
.dev_start = axgbe_dev_start,
.dev_stop = axgbe_dev_stop,
.dev_close = axgbe_dev_close,
+ .promiscuous_enable = axgbe_dev_promiscuous_enable,
+ .promiscuous_disable = axgbe_dev_promiscuous_disable,
+ .allmulticast_enable = axgbe_dev_allmulticast_enable,
+ .allmulticast_disable = axgbe_dev_allmulticast_disable,
+ .link_update = axgbe_dev_link_update,
+ .stats_get = axgbe_dev_stats_get,
+ .stats_reset = axgbe_dev_stats_reset,
.dev_infos_get = axgbe_dev_info_get,
.rx_queue_setup = axgbe_dev_rx_queue_setup,
.rx_queue_release = axgbe_dev_rx_queue_release,
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int dma_isr, dma_ch_isr;
pdata->phy_if.an_isr(pdata);
-
- /* Enable interrupts since disabled after generation*/
- rte_intr_enable(&pdata->pci_dev->intr_handle);
+ /*DMA related interrupts*/
+ dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
+ if (dma_isr) {
+ if (dma_isr & 1) {
+ dma_ch_isr =
+ AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
+ pdata->rx_queues[0],
+ DMA_CH_SR);
+ AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
+ pdata->rx_queues[0],
+ DMA_CH_SR, dma_ch_isr);
+ }
+ }
+ /* Unmask interrupts since disabled after generation */
+ rte_intr_ack(&pdata->pci_dev->intr_handle);
}
/*
static int
axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
{
- struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
+ struct axgbe_port *pdata = dev->data->dev_private;
if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
pdata->rss_enable = 1;
static int
axgbe_dev_start(struct rte_eth_dev *dev)
{
- PMD_INIT_FUNC_TRACE();
- struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
+ struct axgbe_port *pdata = dev->data->dev_private;
int ret;
+ PMD_INIT_FUNC_TRACE();
+
/* Multiqueue RSS */
ret = axgbe_dev_rx_mq_config(dev);
if (ret) {
/* phy start*/
pdata->phy_if.phy_start(pdata);
+ axgbe_dev_enable_tx(dev);
+ axgbe_dev_enable_rx(dev);
axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
static void
axgbe_dev_stop(struct rte_eth_dev *dev)
{
- PMD_INIT_FUNC_TRACE();
struct axgbe_port *pdata = dev->data->dev_private;
+ PMD_INIT_FUNC_TRACE();
+
rte_intr_disable(&pdata->pci_dev->intr_handle);
if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
return;
axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
+ axgbe_dev_disable_tx(dev);
+ axgbe_dev_disable_rx(dev);
pdata->phy_if.phy_stop(pdata);
pdata->hw_if.exit(pdata);
axgbe_dev_clear_queues(dev);
}
+static int
+axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
+
+ return 0;
+}
+
+static int
+axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
+
+ return 0;
+}
+
+static void
+axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
+ return;
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
+}
+
static void
-axgbe_dev_info_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info)
+axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
+ return;
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+axgbe_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+ struct rte_eth_link link;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ rte_delay_ms(800);
+
+ pdata->phy_if.phy_status(pdata);
+
+ memset(&link, 0, sizeof(struct rte_eth_link));
+ link.link_duplex = pdata->phy.duplex;
+ link.link_status = pdata->phy_link;
+ link.link_speed = pdata->phy_speed;
+ link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+ ret = rte_eth_linkstatus_set(dev, &link);
+ if (ret == -1)
+ PMD_DRV_LOG(ERR, "No change in link status\n");
+
+ return ret;
+}
+
+static int
+axgbe_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ stats->q_ipackets[i] = rxq->pkts;
+ stats->ipackets += rxq->pkts;
+ stats->q_ibytes[i] = rxq->bytes;
+ stats->ibytes += rxq->bytes;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ stats->q_opackets[i] = txq->pkts;
+ stats->opackets += txq->pkts;
+ stats->q_obytes[i] = txq->bytes;
+ stats->obytes += txq->bytes;
+ }
+
+ return 0;
+}
+
+static int
+axgbe_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->pkts = 0;
+ rxq->bytes = 0;
+ rxq->errors = 0;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ txq->pkts = 0;
+ txq->bytes = 0;
+ txq->errors = 0;
+ }
+
+ return 0;
+}
+
+static int
+axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct axgbe_port *pdata = dev->data->dev_private;
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = pdata->rx_ring_count;
dev_info->max_tx_queues = pdata->tx_ring_count;
dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_KEEP_CRC;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_IPV4_CKSUM |
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = AXGBE_TX_FREE_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
};
+
+ return 0;
}
static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
int ret;
eth_dev->dev_ops = &axgbe_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
/*
* For secondary processes, we don't initialise any further as primary
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pdata = (struct axgbe_port *)eth_dev->data->dev_private;
+ pdata = eth_dev->data->dev_private;
/* initial state */
axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
pdata->pci_dev = pci_dev;
pdata->xgmac_regs =
- (uint64_t)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
- pdata->xprop_regs = pdata->xgmac_regs + AXGBE_MAC_PROP_OFFSET;
- pdata->xi2c_regs = pdata->xgmac_regs + AXGBE_I2C_CTRL_OFFSET;
- pdata->xpcs_regs = (uint64_t)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
+ (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
+ pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
+ + AXGBE_MAC_PROP_OFFSET);
+ pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
+ + AXGBE_I2C_CTRL_OFFSET);
+ pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
/* version specific driver data*/
if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
- ETHER_ADDR_LEN, 0);
+ RTE_ETHER_ADDR_LEN, 0);
if (!eth_dev->data->mac_addrs) {
PMD_INIT_LOG(ERR,
"Failed to alloc %u bytes needed to store MAC addr tbl",
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
return -ENOMEM;
}
- if (!is_valid_assigned_ether_addr(&pdata->mac_addr))
- eth_random_addr(pdata->mac_addr.addr_bytes);
+ if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr))
+ rte_eth_random_addr(pdata->mac_addr.addr_bytes);
/* Copy the permanent MAC address */
- ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]);
+ rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]);
/* Clock settings */
pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
ret = pdata->phy_if.phy_init(pdata);
if (ret) {
rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
return ret;
}
return 0;
pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
- /*Free macaddres*/
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
axgbe_dev_clear_queues(eth_dev);
/* disable uio intr before callback unregister */
RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(axgbe_init_log);
-static void
-axgbe_init_log(void)
+RTE_INIT(axgbe_init_log)
{
axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
if (axgbe_logtype_init >= 0)