-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Semihalf nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_kvargs.h>
#include <rte_log.h>
#include <rte_malloc.h>
/* Memory size (in bytes) for MUSDK dma buffers */
#define MRVL_MUSDK_DMA_MEMSIZE 41943040
+/** Port Rx offload capabilities */
+#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_CRC_STRIP | \
+ DEV_RX_OFFLOAD_CHECKSUM)
+
+/** Port Tx offloads capabilities */
+#define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM)
+
static const char * const valid_args[] = {
MRVL_IFACE_NAME_ARG,
MRVL_CFG_ARG,
int queue_id;
int port_id;
uint64_t bytes_sent;
+ struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
};
-/*
- * Every tx queue should have dedicated shadow tx queue.
- *
- * Ports assigned by DPDK might not start at zero or be continuous so
- * as a workaround define shadow queues for each possible port so that
- * we eventually fit somewhere.
- */
-struct mrvl_shadow_txq shadow_txqs[RTE_MAX_ETHPORTS][RTE_MAX_LCORE];
-
static int mrvl_lcore_first;
static int mrvl_lcore_last;
static int mrvl_dev_num;
static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num);
+static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
+ struct pp2_hif *hif, unsigned int core_id,
+ struct mrvl_shadow_txq *sq, int qid, int force);
static inline int
mrvl_get_bpool_size(int pp2_id, int pool_id)
return -EINVAL;
}
- if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
+ if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
RTE_LOG(INFO, PMD,
"L2 CRC stripping is always enabled in hw\n");
- dev->data->dev_conf.rxmode.hw_strip_crc = 1;
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
- if (dev->data->dev_conf.rxmode.hw_vlan_strip) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
return -EINVAL;
}
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.enable_lro) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
RTE_LOG(INFO, PMD, "LRO not supported\n");
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.jumbo_frame)
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
ETHER_HDR_LEN - ETHER_CRC_LEN;
if (ret < 0)
return ret;
+ ret = mrvl_configure_txqs(priv, dev->data->port_id,
+ dev->data->nb_tx_queues);
+ if (ret < 0)
+ return ret;
+
priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
priv->ppio_params.maintain_stats = 1;
priv->nb_rx_queues = dev->data->nb_rx_queues;
return -EINVAL;
if (!priv->ppio)
- return -EPERM;
+ return 0;
ret = pp2_ppio_set_mru(priv->ppio, mru);
if (ret)
static void
mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
{
- int i;
+ int i, j;
+ struct mrvl_txq *txq;
RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n");
- for (i = 0; i < RTE_MAX_LCORE; i++) {
- struct mrvl_shadow_txq *sq =
- &shadow_txqs[dev->data->port_id][i];
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = (struct mrvl_txq *)dev->data->tx_queues[i];
- while (sq->tail != sq->head) {
- uint64_t addr = cookie_addr_high |
+ for (j = 0; j < RTE_MAX_LCORE; j++) {
+ struct mrvl_shadow_txq *sq;
+
+ if (!hifs[j])
+ continue;
+
+ sq = &txq->shadow_txqs[j];
+ mrvl_free_sent_buffers(txq->priv->ppio,
+ hifs[j], j, sq, txq->queue_id, 1);
+ while (sq->tail != sq->head) {
+ uint64_t addr = cookie_addr_high |
sq->ent[sq->tail].buff.cookie;
- rte_pktmbuf_free((struct rte_mbuf *)addr);
- sq->tail = (sq->tail + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
+ rte_pktmbuf_free(
+ (struct rte_mbuf *)addr);
+ sq->tail = (sq->tail + 1) &
+ MRVL_PP2_TX_SHADOWQ_MASK;
+ }
+ memset(sq, 0, sizeof(*sq));
}
-
- memset(sq, 0, sizeof(*sq));
}
}
pp2_cls_qos_tbl_deinit(priv->qos_tbl);
priv->qos_tbl = NULL;
}
- pp2_ppio_deinit(priv->ppio);
+ if (priv->ppio)
+ pp2_ppio_deinit(priv->ppio);
priv->ppio = NULL;
+
+ /* policer must be released after ppio deinitialization */
+ if (priv->policer) {
+ pp2_cls_plcr_deinit(priv->policer);
+ priv->policer = NULL;
+ }
}
/**
return -1;
if (!priv->ppio)
- return -EPERM;
+ return 0;
/*
* Maximum number of uc addresses can be tuned via kernel module mvpp2x
info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
- info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ info->rx_offload_capa = MRVL_RX_OFFLOADS;
+ info->rx_queue_offload_capa = MRVL_RX_OFFLOADS;
- info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ info->tx_offload_capa = MRVL_TX_OFFLOADS;
+ info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
info->flow_type_rss_offloads = ETH_RSS_IPV4 |
ETH_RSS_NONFRAG_IPV4_TCP |
/* By default packets are dropped if no descriptors are available */
info->default_rxconf.rx_drop_en = 1;
+ info->default_rxconf.offloads = DEV_RX_OFFLOAD_CRC_STRIP;
info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
}
return -1;
}
+/**
+ * Check whether requested rx queue offloads match port offloads.
+ *
+ * @param
+ * dev Pointer to the device.
+ * @param
+ * requested Bitmap of the requested offloads.
+ *
+ * @return
+ * 1 if requested offloads are okay, 0 otherwise.
+ */
+static int
+mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
+{
+ uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported = MRVL_RX_OFFLOADS;
+ uint64_t unsupported = requested & ~supported;
+ uint64_t missing = mandatory & ~requested;
+
+ if (unsupported) {
+ RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
+ "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
+ requested, supported);
+ return 0;
+ }
+
+ if (missing) {
+ RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
+ "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
+ requested, missing);
+ return 0;
+ }
+
+ return 1;
+}
+
/**
* DPDK callback to configure the receive queue.
*
* @param socket
* NUMA socket on which memory must be allocated.
* @param conf
- * Thresholds parameters (unused_).
+ * Thresholds parameters.
* @param mp
* Memory pool for buffer allocations.
*
static int
mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket,
- const struct rte_eth_rxconf *conf __rte_unused,
+ const struct rte_eth_rxconf *conf,
struct rte_mempool *mp)
{
struct mrvl_priv *priv = dev->data->dev_private;
max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
int ret, tc, inq;
+ if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads))
+ return -ENOTSUP;
+
if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
/*
* Unknown TC mapping, mapping will not have a correct queue.
rxq->priv = priv;
rxq->mp = mp;
- rxq->cksum_enabled = dev->data->dev_conf.rxmode.hw_ip_checksum;
+ rxq->cksum_enabled =
+ dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
rxq->queue_id = idx;
rxq->port_id = dev->data->port_id;
mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
rte_free(q);
}
+/**
+ * Check whether requested tx queue offloads match port offloads.
+ *
+ * @param
+ * dev Pointer to the device.
+ * @param
+ * requested Bitmap of the requested offloads.
+ *
+ * @return
+ * 1 if requested offloads are okay, 0 otherwise.
+ */
+static int
+mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
+{
+ uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
+ uint64_t supported = MRVL_TX_OFFLOADS;
+ uint64_t unsupported = requested & ~supported;
+ uint64_t missing = mandatory & ~requested;
+
+ if (unsupported) {
+ RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
+ "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
+ requested, supported);
+ return 0;
+ }
+
+ if (missing) {
+ RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
+ "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
+ requested, missing);
+ return 0;
+ }
+
+ return 1;
+}
+
/**
* DPDK callback to configure the transmit queue.
*
* @param socket
* NUMA socket on which memory must be allocated.
* @param conf
- * Thresholds parameters (unused).
+ * Thresholds parameters.
*
* @return
* 0 on success, negative error value otherwise.
static int
mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket,
- const struct rte_eth_txconf *conf __rte_unused)
+ const struct rte_eth_txconf *conf)
{
struct mrvl_priv *priv = dev->data->dev_private;
struct mrvl_txq *txq;
+ if (!mrvl_tx_queue_offloads_okay(dev, conf->offloads))
+ return -ENOTSUP;
+
if (dev->data->tx_queues[idx]) {
rte_free(dev->data->tx_queues[idx]);
dev->data->tx_queues[idx] = NULL;
dev->data->tx_queues[idx] = txq;
priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
- priv->ppio_params.outqs_params.outqs_params[idx].weight = 1;
return 0;
}
sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
sq->size -= num;
num = 0;
+ skip_bufs = 0;
}
if (likely(num)) {
mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct mrvl_txq *q = txq;
- struct mrvl_shadow_txq *sq = &shadow_txqs[q->port_id][rte_lcore_id()];
+ struct mrvl_shadow_txq *sq;
struct pp2_hif *hif;
struct pp2_ppio_desc descs[nb_pkts];
unsigned int core_id = rte_lcore_id();
uint64_t addr;
hif = mrvl_get_hif(q->priv, core_id);
+ sq = &q->shadow_txqs[core_id];
if (unlikely(!q->priv->ppio || !hif))
return 0;
sq->ent[sq->head].buff.addr =
rte_mbuf_data_iova_default(mbuf);
sq->ent[sq->head].bpool =
- (unlikely(mbuf->port == 0xff || mbuf->refcnt > 1)) ?
- NULL : mrvl_port_to_bpool_lookup[mbuf->port];
+ (unlikely(mbuf->port >= RTE_MAX_ETHPORTS ||
+ mbuf->refcnt > 1)) ? NULL :
+ mrvl_port_to_bpool_lookup[mbuf->port];
sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
sq->size++;
}
memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
+ memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup));
mrvl_lcore_first = RTE_MAX_LCORE;
mrvl_lcore_last = 0;