#define OTX_EP_MAX_RINGS_PER_VF (8)
#define OTX_EP_CFG_IO_QUEUES OTX_EP_MAX_RINGS_PER_VF
#define OTX_EP_64BYTE_INSTR (64)
+#define OTX_EP_MIN_IQ_DESCRIPTORS (128)
+#define OTX_EP_MIN_OQ_DESCRIPTORS (128)
#define OTX_EP_MAX_IQ_DESCRIPTORS (8192)
#define OTX_EP_MAX_OQ_DESCRIPTORS (8192)
#define OTX_EP_OQ_BUF_SIZE (2048)
+#define OTX_EP_MIN_RX_BUF_SIZE (64)
#define OTX_EP_OQ_INFOPTR_MODE (0)
#define OTX_EP_OQ_REFIL_THRESHOLD (16)
struct otx_ep_fn_list fn_list;
+ uint32_t max_tx_queues;
+
+ uint32_t max_rx_queues;
+
/* SR-IOV info */
struct otx_ep_sriov_info sriov_info;
/* Device configuration */
const struct otx_ep_config *conf;
+
+ uint64_t rx_offloads;
+
+ uint64_t tx_offloads;
};
+#define OTX_EP_MAX_PKT_SZ 64000U
+#define OTX_EP_MAX_MAC_ADDRS 1
+
extern int otx_net_ep_logtype;
#endif /* _OTX_EP_COMMON_H_ */
#include "otx_ep_common.h"
#include "otx_ep_vf.h"
#include "otx2_ep_vf.h"
+#include "otx_ep_rxtx.h"
+
+#define OTX_EP_DEV(_eth_dev) \
+ ((struct otx_ep_device *)(_eth_dev)->data->dev_private)
+
+static const struct rte_eth_desc_lim otx_ep_rx_desc_lim = {
+ .nb_max = OTX_EP_MAX_OQ_DESCRIPTORS,
+ .nb_min = OTX_EP_MIN_OQ_DESCRIPTORS,
+ .nb_align = OTX_EP_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim otx_ep_tx_desc_lim = {
+ .nb_max = OTX_EP_MAX_IQ_DESCRIPTORS,
+ .nb_min = OTX_EP_MIN_IQ_DESCRIPTORS,
+ .nb_align = OTX_EP_TXD_ALIGN,
+};
+
+static int
+otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *devinfo)
+{
+ struct otx_ep_device *otx_epvf;
+
+ otx_epvf = OTX_EP_DEV(eth_dev);
+
+ devinfo->speed_capa = ETH_LINK_SPEED_10G;
+ devinfo->max_rx_queues = otx_epvf->max_rx_queues;
+ devinfo->max_tx_queues = otx_epvf->max_tx_queues;
+
+ devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
+ devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
+ devinfo->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
+ devinfo->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
+ devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
+
+ devinfo->rx_desc_lim = otx_ep_rx_desc_lim;
+ devinfo->tx_desc_lim = otx_ep_tx_desc_lim;
+
+ return 0;
+}
-#define OTX_EP_DEV(_eth_dev) ((_eth_dev)->data->dev_private)
static int
otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
{
static int
otx_epdev_init(struct otx_ep_device *otx_epvf)
{
+ uint32_t ethdev_queues;
int ret = 0;
ret = otx_ep_chip_specific_setup(otx_epvf);
otx_epvf->fn_list.setup_device_regs(otx_epvf);
+ ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
+ otx_epvf->max_rx_queues = ethdev_queues;
+ otx_epvf->max_tx_queues = ethdev_queues;
+
otx_ep_info("OTX_EP Device is Ready\n");
setup_fail:
return ret;
}
+static int
+otx_ep_dev_configure(struct rte_eth_dev *eth_dev)
+{
+ struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct rte_eth_rxmode *rxmode;
+ struct rte_eth_txmode *txmode;
+ struct rte_eth_conf *conf;
+
+ conf = &data->dev_conf;
+ rxmode = &conf->rxmode;
+ txmode = &conf->txmode;
+ if (eth_dev->data->nb_rx_queues > otx_epvf->max_rx_queues ||
+ eth_dev->data->nb_tx_queues > otx_epvf->max_tx_queues) {
+ otx_ep_err("invalid num queues\n");
+ return -EINVAL;
+ }
+ otx_ep_info("OTX_EP Device is configured with num_txq %d num_rxq %d\n",
+ eth_dev->data->nb_rx_queues, eth_dev->data->nb_tx_queues);
+
+ otx_epvf->rx_offloads = rxmode->offloads;
+ otx_epvf->tx_offloads = txmode->offloads;
+
+ return 0;
+}
+
+/* Define our ethernet definitions */
+static const struct eth_dev_ops otx_ep_eth_dev_ops = {
+ .dev_configure = otx_ep_dev_configure,
+ .dev_infos_get = otx_ep_dev_info_get,
+};
+
static int
otx_ep_eth_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
{
otx_epvf->eth_dev = eth_dev;
otx_epvf->port_id = eth_dev->data->port_id;
+ eth_dev->dev_ops = &otx_ep_eth_dev_ops;
eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
otx_ep_err("MAC addresses memory allocation failed\n");