/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2020 NXP
*/
#include <stdbool.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_pci.h>
+#include <rte_random.h>
+#include <dpaax_iova_table.h>
#include "enetc_logs.h"
#include "enetc.h"
-int enetc_logtype_pmd;
-
-static int
-enetc_dev_configure(struct rte_eth_dev *dev __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
- return 0;
-}
-
static int
enetc_dev_start(struct rte_eth_dev *dev)
{
return 0;
}
-static void
+static int
enetc_dev_stop(struct rte_eth_dev *dev)
{
struct enetc_eth_hw *hw =
uint32_t val;
PMD_INIT_FUNC_TRACE();
+ dev->data->dev_started = 0;
/* Disable port */
val = enetc_port_rd(enetc_hw, ENETC_PMR);
enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
+
+ return 0;
}
static const uint32_t *
return rte_eth_linkstatus_set(dev, &link);
}
+static void
+print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
+{
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
+ ENETC_PMD_NOTICE("%s%s\n", name, buf);
+}
+
static int
enetc_hardware_init(struct enetc_eth_hw *hw)
{
struct enetc_hw *enetc_hw = &hw->hw;
uint32_t *mac = (uint32_t *)hw->mac.addr;
+ uint32_t high_mac = 0;
+ uint16_t low_mac = 0;
PMD_INIT_FUNC_TRACE();
/* Calculating and storing the base HW addresses */
hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
+ /* WA for Rx lock-up HW erratum */
+ enetc_port_wr(enetc_hw, ENETC_PM0_RX_FIFO, 1);
+
+ /* set ENETC transaction flags to coherent, don't allocate.
+ * BD writes merge with surrounding cache line data, frame data writes
+ * overwrite cache line.
+ */
+ enetc_wr(enetc_hw, ENETC_SICAR0, ENETC_SICAR0_COHERENT);
+
/* Enabling Station Interface */
enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
*mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
+ high_mac = (uint32_t)*mac;
mac++;
*mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
+ low_mac = (uint16_t)*mac;
+
+ if ((high_mac | low_mac) == 0) {
+ char *first_byte;
+
+ ENETC_PMD_NOTICE("MAC is not available for this SI, "
+ "set random MAC\n");
+ mac = (uint32_t *)hw->mac.addr;
+ *mac = (uint32_t)rte_rand();
+ first_byte = (char *)mac;
+ *first_byte &= 0xfe; /* clear multicast bit */
+ *first_byte |= 0x02; /* set local assignment bit (IEEE802) */
+
+ enetc_port_wr(enetc_hw, ENETC_PSIPMAR0(0), *mac);
+ mac++;
+ *mac = (uint16_t)rte_rand();
+ enetc_port_wr(enetc_hw, ENETC_PSIPMAR1(0), *mac);
+ print_ethaddr("New address: ",
+ (const struct rte_ether_addr *)hw->mac.addr);
+ }
return 0;
}
-static void
+static int
enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *dev_info)
{
};
dev_info->max_rx_queues = MAX_RX_RINGS;
dev_info->max_tx_queues = MAX_TX_RINGS;
- dev_info->max_rx_pktlen = 1500;
+ dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
+ dev_info->rx_offload_capa =
+ (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_JUMBO_FRAME);
+
+ return 0;
}
static int
int size;
size = nb_desc * sizeof(struct enetc_swbd);
- txr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
if (txr->q_swbd == NULL)
return -ENOMEM;
size = nb_desc * sizeof(struct enetc_tx_bd);
- txr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
if (txr->bd_base == NULL) {
rte_free(txr->q_swbd);
txr->q_swbd = NULL;
enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
{
int idx = tx_ring->index;
- uint32_t tbmr;
phys_addr_t bd_address;
bd_address = (phys_addr_t)
enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
ENETC_RTBLENR_LEN(tx_ring->bd_count));
- tbmr = ENETC_TBMR_EN;
- /* enable ring */
- enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
tx_ring->tcir = (void *)((size_t)hw->reg +
}
static int
-enetc_alloc_tx_resources(struct rte_eth_dev *dev,
- uint16_t queue_idx,
- uint16_t nb_desc)
+enetc_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf)
{
- int err;
+ int err = 0;
struct enetc_bdr *tx_ring;
struct rte_eth_dev_data *data = dev->data;
struct enetc_eth_adapter *priv =
ENETC_DEV_PRIVATE(data->dev_private);
+ PMD_INIT_FUNC_TRACE();
+ if (nb_desc > MAX_BD_COUNT)
+ return -1;
+
tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
if (tx_ring == NULL) {
ENETC_PMD_ERR("Failed to allocate TX ring memory");
enetc_setup_txbdr(&priv->hw.hw, tx_ring);
data->tx_queues[queue_idx] = tx_ring;
+ if (!tx_conf->tx_deferred_start) {
+ /* enable ring */
+ enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
+ ENETC_TBMR, ENETC_TBMR_EN);
+ dev->data->tx_queue_state[tx_ring->index] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ } else {
+ dev->data->tx_queue_state[tx_ring->index] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
return 0;
fail:
rte_free(tx_ring);
return err;
}
-static int
-enetc_tx_queue_setup(struct rte_eth_dev *dev,
- uint16_t queue_idx,
- uint16_t nb_desc,
- unsigned int socket_id __rte_unused,
- const struct rte_eth_txconf *tx_conf __rte_unused)
-{
- int err = 0;
-
- PMD_INIT_FUNC_TRACE();
- if (nb_desc > MAX_BD_COUNT)
- return -1;
-
- err = enetc_alloc_tx_resources(dev, queue_idx, nb_desc);
-
- return err;
-}
-
static void
enetc_tx_queue_release(void *txq)
{
int size;
size = nb_rx_desc * sizeof(struct enetc_swbd);
- rxr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ rxr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
if (rxr->q_swbd == NULL)
return -ENOMEM;
size = nb_rx_desc * sizeof(union enetc_rx_bd);
- rxr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ rxr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
if (rxr->bd_base == NULL) {
rte_free(rxr->q_swbd);
rxr->q_swbd = NULL;
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
RTE_PKTMBUF_HEADROOM);
enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
- /* enable ring */
- enetc_rxbdr_wr(hw, idx, ENETC_RBMR, ENETC_RBMR_EN);
enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
}
static int
-enetc_alloc_rx_resources(struct rte_eth_dev *dev,
- uint16_t rx_queue_id,
- uint16_t nb_rx_desc,
- struct rte_mempool *mb_pool)
+enetc_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
{
- int err;
+ int err = 0;
struct enetc_bdr *rx_ring;
struct rte_eth_dev_data *data = dev->data;
struct enetc_eth_adapter *adapter =
ENETC_DEV_PRIVATE(data->dev_private);
+ uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
+
+ PMD_INIT_FUNC_TRACE();
+ if (nb_rx_desc > MAX_BD_COUNT)
+ return -1;
rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
if (rx_ring == NULL) {
enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
data->rx_queues[rx_queue_id] = rx_ring;
+ if (!rx_conf->rx_deferred_start) {
+ /* enable ring */
+ enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
+ ENETC_RBMR_EN);
+ dev->data->rx_queue_state[rx_ring->index] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ } else {
+ dev->data->rx_queue_state[rx_ring->index] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+ RTE_ETHER_CRC_LEN : 0);
+
return 0;
fail:
rte_free(rx_ring);
return err;
}
-static int
-enetc_rx_queue_setup(struct rte_eth_dev *dev,
- uint16_t rx_queue_id,
- uint16_t nb_rx_desc,
- unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf __rte_unused,
- struct rte_mempool *mb_pool)
-{
- int err = 0;
-
- PMD_INIT_FUNC_TRACE();
- if (nb_rx_desc > MAX_BD_COUNT)
- return -1;
-
- err = enetc_alloc_rx_resources(dev, rx_queue_id,
- nb_rx_desc,
- mb_pool);
-
- return err;
-}
-
static void
enetc_rx_queue_release(void *rxq)
{
return 0;
}
-static void
+static int
enetc_stats_reset(struct rte_eth_dev *dev)
{
struct enetc_eth_hw *hw =
struct enetc_hw *enetc_hw = &hw->hw;
enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
+
+ return 0;
}
-static void
+static int
enetc_dev_close(struct rte_eth_dev *dev)
{
uint16_t i;
+ int ret;
PMD_INIT_FUNC_TRACE();
- enetc_dev_stop(dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ ret = enetc_dev_stop(dev);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
enetc_rx_queue_release(dev->data->rx_queues[i]);
dev->data->tx_queues[i] = NULL;
}
dev->data->nb_tx_queues = 0;
+
+ if (rte_eal_iova_mode() == RTE_IOVA_PA)
+ dpaax_iova_table_depopulate();
+
+ return ret;
}
-static void
+static int
enetc_promiscuous_enable(struct rte_eth_dev *dev)
{
struct enetc_eth_hw *hw =
psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
+
+ return 0;
}
-static void
+static int
enetc_promiscuous_disable(struct rte_eth_dev *dev)
{
struct enetc_eth_hw *hw =
psipmr &= (~ENETC_PSIPMR_SET_MP(0));
enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
+
+ return 0;
}
-static void
+static int
enetc_allmulticast_enable(struct rte_eth_dev *dev)
{
struct enetc_eth_hw *hw =
psipmr |= ENETC_PSIPMR_SET_MP(0);
enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
+
+ return 0;
}
-static void
+static int
enetc_allmulticast_disable(struct rte_eth_dev *dev)
{
struct enetc_eth_hw *hw =
uint32_t psipmr = 0;
if (dev->data->promiscuous == 1)
- return; /* must remain in all_multicast mode */
+ return 0; /* must remain in all_multicast mode */
/* Setting to disable all multicast mode for SI0*/
psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
~(ENETC_PSIPMR_SET_MP(0));
enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
+
+ return 0;
+}
+
+static int
+enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+ uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ /* check that mtu is within the allowed range */
+ if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
+ return -EINVAL;
+
+ /*
+ * Refuse mtu that requires the support of scattered packets
+ * when this feature has not been enabled before.
+ */
+ if (dev->data->min_rx_buf_size &&
+ !dev->data->scattered_rx && frame_size >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
+ ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
+ return -EINVAL;
+ }
+
+ if (frame_size > ENETC_ETH_MAX_LEN)
+ dev->data->dev_conf.rxmode.offloads &=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
+ enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
+
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ /*setting the MTU*/
+ enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
+ ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
+
+ return 0;
+}
+
+static int
+enetc_dev_configure(struct rte_eth_dev *dev)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+ struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+ uint64_t rx_offloads = eth_conf->rxmode.offloads;
+ uint32_t checksum = L3_CKSUM | L4_CKSUM;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ uint32_t max_len;
+
+ max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(max_len));
+ enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
+ ENETC_MAC_MAXFRM_SIZE);
+ enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
+ 2 * ENETC_MAC_MAXFRM_SIZE);
+ dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
+ RTE_ETHER_CRC_LEN;
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ int config;
+
+ config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
+ config |= ENETC_PM0_CRC;
+ enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ checksum &= ~L3_CKSUM;
+
+ if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
+ checksum &= ~L4_CKSUM;
+
+ enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
+
+
+ return 0;
+}
+
+static int
+enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(dev->data->dev_private);
+ struct enetc_bdr *rx_ring;
+ uint32_t rx_data;
+
+ rx_ring = dev->data->rx_queues[qidx];
+ if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
+ rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
+ ENETC_RBMR);
+ rx_data = rx_data | ENETC_RBMR_EN;
+ enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
+ rx_data);
+ dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ return 0;
+}
+
+static int
+enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(dev->data->dev_private);
+ struct enetc_bdr *rx_ring;
+ uint32_t rx_data;
+
+ rx_ring = dev->data->rx_queues[qidx];
+ if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
+ rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
+ ENETC_RBMR);
+ rx_data = rx_data & (~ENETC_RBMR_EN);
+ enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
+ rx_data);
+ dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ return 0;
+}
+
+static int
+enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(dev->data->dev_private);
+ struct enetc_bdr *tx_ring;
+ uint32_t tx_data;
+
+ tx_ring = dev->data->tx_queues[qidx];
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
+ tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
+ ENETC_TBMR);
+ tx_data = tx_data | ENETC_TBMR_EN;
+ enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
+ tx_data);
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ return 0;
+}
+
+static int
+enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(dev->data->dev_private);
+ struct enetc_bdr *tx_ring;
+ uint32_t tx_data;
+
+ tx_ring = dev->data->tx_queues[qidx];
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
+ tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
+ ENETC_TBMR);
+ tx_data = tx_data & (~ENETC_TBMR_EN);
+ enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
+ tx_data);
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ return 0;
}
/*
.allmulticast_enable = enetc_allmulticast_enable,
.allmulticast_disable = enetc_allmulticast_disable,
.dev_infos_get = enetc_dev_infos_get,
+ .mtu_set = enetc_mtu_set,
.rx_queue_setup = enetc_rx_queue_setup,
+ .rx_queue_start = enetc_rx_queue_start,
+ .rx_queue_stop = enetc_rx_queue_stop,
.rx_queue_release = enetc_rx_queue_release,
.tx_queue_setup = enetc_tx_queue_setup,
+ .tx_queue_start = enetc_tx_queue_start,
+ .tx_queue_stop = enetc_tx_queue_stop,
.tx_queue_release = enetc_tx_queue_release,
.dev_supported_ptypes_get = enetc_supported_ptypes_get,
};
eth_dev->rx_pkt_burst = &enetc_recv_pkts;
eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
/* Retrieving and storing the HW base address of device */
hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
hw->device_id = pci_dev->id.device_id;
}
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth", ETHER_ADDR_LEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
+ RTE_ETHER_ADDR_LEN, 0);
if (!eth_dev->data->mac_addrs) {
ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
"store MAC addresses",
- ETHER_ADDR_LEN * 1);
+ RTE_ETHER_ADDR_LEN * 1);
error = -ENOMEM;
return -1;
}
/* Copy the permanent MAC address */
- ether_addr_copy((struct ether_addr *)hw->mac.addr,
+ rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
ð_dev->data->mac_addrs[0]);
+ /* Set MTU */
+ enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
+ eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
+ RTE_ETHER_CRC_LEN;
+
+ if (rte_eal_iova_mode() == RTE_IOVA_PA)
+ dpaax_iova_table_populate();
+
ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
}
static int
-enetc_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)
+enetc_dev_uninit(struct rte_eth_dev *eth_dev)
{
PMD_INIT_FUNC_TRACE();
- return 0;
+
+ return enetc_dev_close(eth_dev);
}
static int
RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
-
-RTE_INIT(enetc_pmd_init_log)
-{
- enetc_logtype_pmd = rte_log_register("pmd.net.enetc");
- if (enetc_logtype_pmd >= 0)
- rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(enetc_logtype_pmd, pmd.net.enetc, NOTICE);