int enetc_logtype_pmd;
-static int
-enetc_dev_configure(struct rte_eth_dev *dev __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
- return 0;
-}
-
static int
enetc_dev_start(struct rte_eth_dev *dev)
{
};
dev_info->max_rx_queues = MAX_RX_RINGS;
dev_info->max_tx_queues = MAX_TX_RINGS;
- dev_info->max_rx_pktlen = 1500;
+ dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
+ dev_info->rx_offload_capa =
+ (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_JUMBO_FRAME);
}
static int
enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
{
int idx = tx_ring->index;
- uint32_t tbmr;
phys_addr_t bd_address;
bd_address = (phys_addr_t)
enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
ENETC_RTBLENR_LEN(tx_ring->bd_count));
- tbmr = ENETC_TBMR_EN;
- /* enable ring */
- enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
tx_ring->tcir = (void *)((size_t)hw->reg +
}
static int
-enetc_alloc_tx_resources(struct rte_eth_dev *dev,
- uint16_t queue_idx,
- uint16_t nb_desc)
+enetc_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf)
{
- int err;
+ int err = 0;
struct enetc_bdr *tx_ring;
struct rte_eth_dev_data *data = dev->data;
struct enetc_eth_adapter *priv =
ENETC_DEV_PRIVATE(data->dev_private);
+ PMD_INIT_FUNC_TRACE();
+ if (nb_desc > MAX_BD_COUNT)
+ return -1;
+
tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
if (tx_ring == NULL) {
ENETC_PMD_ERR("Failed to allocate TX ring memory");
enetc_setup_txbdr(&priv->hw.hw, tx_ring);
data->tx_queues[queue_idx] = tx_ring;
+ if (!tx_conf->tx_deferred_start) {
+ /* enable ring */
+ enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
+ ENETC_TBMR, ENETC_TBMR_EN);
+ dev->data->tx_queue_state[tx_ring->index] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ } else {
+ dev->data->tx_queue_state[tx_ring->index] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
return 0;
fail:
rte_free(tx_ring);
return err;
}
-static int
-enetc_tx_queue_setup(struct rte_eth_dev *dev,
- uint16_t queue_idx,
- uint16_t nb_desc,
- unsigned int socket_id __rte_unused,
- const struct rte_eth_txconf *tx_conf __rte_unused)
-{
- int err = 0;
-
- PMD_INIT_FUNC_TRACE();
- if (nb_desc > MAX_BD_COUNT)
- return -1;
-
- err = enetc_alloc_tx_resources(dev, queue_idx, nb_desc);
-
- return err;
-}
-
static void
enetc_tx_queue_release(void *txq)
{
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
RTE_PKTMBUF_HEADROOM);
enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
- /* enable ring */
- enetc_rxbdr_wr(hw, idx, ENETC_RBMR, ENETC_RBMR_EN);
enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
}
static int
-enetc_alloc_rx_resources(struct rte_eth_dev *dev,
- uint16_t rx_queue_id,
- uint16_t nb_rx_desc,
- struct rte_mempool *mb_pool)
+enetc_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
{
- int err;
+ int err = 0;
struct enetc_bdr *rx_ring;
struct rte_eth_dev_data *data = dev->data;
struct enetc_eth_adapter *adapter =
ENETC_DEV_PRIVATE(data->dev_private);
+ uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
+
+ PMD_INIT_FUNC_TRACE();
+ if (nb_rx_desc > MAX_BD_COUNT)
+ return -1;
rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
if (rx_ring == NULL) {
enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
data->rx_queues[rx_queue_id] = rx_ring;
+ if (!rx_conf->rx_deferred_start) {
+ /* enable ring */
+ enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
+ ENETC_RBMR_EN);
+ dev->data->rx_queue_state[rx_ring->index] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ } else {
+ dev->data->rx_queue_state[rx_ring->index] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
+ ETHER_CRC_LEN : 0);
+
return 0;
fail:
rte_free(rx_ring);
return err;
}
-static int
-enetc_rx_queue_setup(struct rte_eth_dev *dev,
- uint16_t rx_queue_id,
- uint16_t nb_rx_desc,
- unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf __rte_unused,
- struct rte_mempool *mb_pool)
-{
- int err = 0;
-
- PMD_INIT_FUNC_TRACE();
- if (nb_rx_desc > MAX_BD_COUNT)
- return -1;
-
- err = enetc_alloc_rx_resources(dev, rx_queue_id,
- nb_rx_desc,
- mb_pool);
-
- return err;
-}
-
static void
enetc_rx_queue_release(void *rxq)
{
enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
}
+static int
+enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+ uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ /* check that mtu is within the allowed range */
+ if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
+ return -EINVAL;
+
+ /*
+ * Refuse mtu that requires the support of scattered packets
+ * when this feature has not been enabled before.
+ */
+ if (dev->data->min_rx_buf_size &&
+ !dev->data->scattered_rx && frame_size >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
+ ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
+ return -EINVAL;
+ }
+
+ if (frame_size > ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.offloads &=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
+ enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
+
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ /*setting the MTU*/
+ enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
+ ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
+
+ return 0;
+}
+
+static int
+enetc_dev_configure(struct rte_eth_dev *dev)
+{
+ struct enetc_eth_hw *hw =
+ ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct enetc_hw *enetc_hw = &hw->hw;
+ struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+ uint64_t rx_offloads = eth_conf->rxmode.offloads;
+ uint32_t checksum = L3_CKSUM | L4_CKSUM;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ uint32_t max_len;
+
+ max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(max_len));
+ enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
+ ENETC_MAC_MAXFRM_SIZE);
+ enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
+ 2 * ENETC_MAC_MAXFRM_SIZE);
+ dev->data->mtu = ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN;
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ int config;
+
+ config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
+ config |= ENETC_PM0_CRC;
+ enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ checksum &= ~L3_CKSUM;
+
+ if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
+ checksum &= ~L4_CKSUM;
+
+ enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
+
+
+ return 0;
+}
+
+static int
+enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(dev->data->dev_private);
+ struct enetc_bdr *rx_ring;
+ uint32_t rx_data;
+
+ rx_ring = dev->data->rx_queues[qidx];
+ if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
+ rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
+ ENETC_RBMR);
+ rx_data = rx_data | ENETC_RBMR_EN;
+ enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
+ rx_data);
+ dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ return 0;
+}
+
+static int
+enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(dev->data->dev_private);
+ struct enetc_bdr *rx_ring;
+ uint32_t rx_data;
+
+ rx_ring = dev->data->rx_queues[qidx];
+ if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
+ rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
+ ENETC_RBMR);
+ rx_data = rx_data & (~ENETC_RBMR_EN);
+ enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
+ rx_data);
+ dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ return 0;
+}
+
+static int
+enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(dev->data->dev_private);
+ struct enetc_bdr *tx_ring;
+ uint32_t tx_data;
+
+ tx_ring = dev->data->tx_queues[qidx];
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
+ tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
+ ENETC_TBMR);
+ tx_data = tx_data | ENETC_TBMR_EN;
+ enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
+ tx_data);
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ return 0;
+}
+
+static int
+enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct enetc_eth_adapter *priv =
+ ENETC_DEV_PRIVATE(dev->data->dev_private);
+ struct enetc_bdr *tx_ring;
+ uint32_t tx_data;
+
+ tx_ring = dev->data->tx_queues[qidx];
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
+ tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
+ ENETC_TBMR);
+ tx_data = tx_data & (~ENETC_TBMR_EN);
+ enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
+ tx_data);
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+
+ return 0;
+}
+
/*
* The set of PCI devices this driver supports
*/
.allmulticast_enable = enetc_allmulticast_enable,
.allmulticast_disable = enetc_allmulticast_disable,
.dev_infos_get = enetc_dev_infos_get,
+ .mtu_set = enetc_mtu_set,
.rx_queue_setup = enetc_rx_queue_setup,
+ .rx_queue_start = enetc_rx_queue_start,
+ .rx_queue_stop = enetc_rx_queue_stop,
.rx_queue_release = enetc_rx_queue_release,
.tx_queue_setup = enetc_tx_queue_setup,
+ .tx_queue_start = enetc_tx_queue_start,
+ .tx_queue_stop = enetc_tx_queue_stop,
.tx_queue_release = enetc_tx_queue_release,
.dev_supported_ptypes_get = enetc_supported_ptypes_get,
};
ether_addr_copy((struct ether_addr *)hw->mac.addr,
ð_dev->data->mac_addrs[0]);
+ /* Set MTU */
+ enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(ETHER_MAX_LEN));
+ eth_dev->data->mtu = ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN;
+
ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);