+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+octeontx_port_open(struct octeontx_nic *nic)
+{
+ octeontx_mbox_bgx_port_conf_t bgx_port_conf;
+ octeontx_mbox_bgx_port_fifo_cfg_t fifo_cfg;
+ int res;
+
+ res = 0;
+ memset(&bgx_port_conf, 0x0, sizeof(bgx_port_conf));
+ PMD_INIT_FUNC_TRACE();
+
+ res = octeontx_bgx_port_open(nic->port_id, &bgx_port_conf);
+ if (res < 0) {
+ octeontx_log_err("failed to open port %d", res);
+ return res;
+ }
+
+ nic->node = bgx_port_conf.node;
+ nic->port_ena = bgx_port_conf.enable;
+ nic->base_ichan = bgx_port_conf.base_chan;
+ nic->base_ochan = bgx_port_conf.base_chan;
+ nic->num_ichans = bgx_port_conf.num_chans;
+ nic->num_ochans = bgx_port_conf.num_chans;
+ nic->bgx_mtu = bgx_port_conf.mtu;
+ nic->bpen = bgx_port_conf.bpen;
+ nic->fcs_strip = bgx_port_conf.fcs_strip;
+ nic->bcast_mode = bgx_port_conf.bcast_mode;
+ nic->mcast_mode = bgx_port_conf.mcast_mode;
+ nic->speed = bgx_port_conf.mode;
+
+ nic->duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ memset(&fifo_cfg, 0x0, sizeof(fifo_cfg));
+
+ res = octeontx_bgx_port_get_fifo_cfg(nic->port_id, &fifo_cfg);
+ if (res < 0) {
+ octeontx_log_err("failed to get port %d fifo cfg", res);
+ return res;
+ }
+
+ nic->fc.rx_fifosz = fifo_cfg.rx_fifosz;
+
+ memcpy(&nic->mac_addr[0], &bgx_port_conf.macaddr[0],
+ RTE_ETHER_ADDR_LEN);
+
+ octeontx_log_dbg("port opened %d", nic->port_id);
+ return res;
+}
+
+static void
+octeontx_link_status_print(struct rte_eth_dev *eth_dev,
+ struct rte_eth_link *link)
+{
+ if (link && link->link_status)
+ octeontx_log_info("Port %u: Link Up - speed %u Mbps - %s",
+ (eth_dev->data->port_id),
+ link->link_speed,
+ link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
+ "full-duplex" : "half-duplex");
+ else
+ octeontx_log_info("Port %d: Link Down",
+ (int)(eth_dev->data->port_id));
+}
+
+static inline uint32_t
+octeontx_parse_link_speeds(uint32_t link_speeds)
+{
+ uint32_t link_speed = OCTEONTX_LINK_SPEED_UNKNOWN;
+
+ if (link_speeds & RTE_ETH_LINK_SPEED_40G)
+ link_speed = OCTEONTX_LINK_SPEED_40G_R;
+
+ if (link_speeds & RTE_ETH_LINK_SPEED_10G) {
+ link_speed = OCTEONTX_LINK_SPEED_XAUI;
+ link_speed |= OCTEONTX_LINK_SPEED_RXAUI;
+ link_speed |= OCTEONTX_LINK_SPEED_10G_R;
+ }
+
+ if (link_speeds & RTE_ETH_LINK_SPEED_5G)
+ link_speed = OCTEONTX_LINK_SPEED_QSGMII;
+
+ if (link_speeds & RTE_ETH_LINK_SPEED_1G)
+ link_speed = OCTEONTX_LINK_SPEED_SGMII;
+
+ return link_speed;
+}
+
+static inline uint8_t
+octeontx_parse_eth_link_duplex(uint32_t link_speeds)
+{
+ if ((link_speeds & RTE_ETH_LINK_SPEED_10M_HD) ||
+ (link_speeds & RTE_ETH_LINK_SPEED_100M_HD))
+ return RTE_ETH_LINK_HALF_DUPLEX;
+ else
+ return RTE_ETH_LINK_FULL_DUPLEX;
+}
+
+static int
+octeontx_apply_link_speed(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+ octeontx_mbox_bgx_port_change_mode_t cfg;
+
+ if (conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
+ return 0;
+
+ cfg.speed = octeontx_parse_link_speeds(conf->link_speeds);
+ cfg.autoneg = (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) ? 1 : 0;
+ cfg.duplex = octeontx_parse_eth_link_duplex(conf->link_speeds);
+ cfg.qlm_mode = ((conf->link_speeds & RTE_ETH_LINK_SPEED_1G) ?
+ OCTEONTX_QLM_MODE_SGMII :
+ (conf->link_speeds & RTE_ETH_LINK_SPEED_10G) ?
+ OCTEONTX_QLM_MODE_XFI : 0);
+
+ if (cfg.speed != OCTEONTX_LINK_SPEED_UNKNOWN &&
+ (cfg.speed != nic->speed || cfg.duplex != nic->duplex)) {
+ nic->speed = cfg.speed;
+ nic->duplex = cfg.duplex;
+ return octeontx_bgx_port_change_mode(nic->port_id, &cfg);
+ } else {
+ return 0;
+ }
+}
+
+static void
+octeontx_link_status_update(struct octeontx_nic *nic,
+ struct rte_eth_link *link)
+{
+ memset(link, 0, sizeof(*link));
+
+ link->link_status = nic->link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+
+ switch (nic->speed) {
+ case OCTEONTX_LINK_SPEED_SGMII:
+ link->link_speed = RTE_ETH_SPEED_NUM_1G;
+ break;
+
+ case OCTEONTX_LINK_SPEED_XAUI:
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
+ break;
+
+ case OCTEONTX_LINK_SPEED_RXAUI:
+ case OCTEONTX_LINK_SPEED_10G_R:
+ link->link_speed = RTE_ETH_SPEED_NUM_10G;
+ break;
+ case OCTEONTX_LINK_SPEED_QSGMII:
+ link->link_speed = RTE_ETH_SPEED_NUM_5G;
+ break;
+ case OCTEONTX_LINK_SPEED_40G_R:
+ link->link_speed = RTE_ETH_SPEED_NUM_40G;
+ break;
+
+ case OCTEONTX_LINK_SPEED_RESERVE1:
+ case OCTEONTX_LINK_SPEED_RESERVE2:
+ default:
+ link->link_speed = RTE_ETH_SPEED_NUM_NONE;
+ octeontx_log_err("incorrect link speed %d", nic->speed);
+ break;
+ }
+
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link->link_autoneg = RTE_ETH_LINK_AUTONEG;
+}
+
+static void
+octeontx_link_status_poll(void *arg)
+{
+ struct octeontx_nic *nic = arg;
+ struct rte_eth_link link;
+ struct rte_eth_dev *dev;
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev = nic->dev;
+
+ res = octeontx_bgx_port_link_status(nic->port_id);
+ if (res < 0) {
+ octeontx_log_err("Failed to get port %d link status",
+ nic->port_id);
+ } else {
+ if (nic->link_up != (uint8_t)res) {
+ nic->link_up = (uint8_t)res;
+ octeontx_link_status_update(nic, &link);
+ octeontx_link_status_print(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
+ rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+ }
+
+ res = rte_eal_alarm_set(OCCTX_INTR_POLL_INTERVAL_MS * 1000,
+ octeontx_link_status_poll, nic);
+ if (res < 0)
+ octeontx_log_err("Failed to restart alarm for port %d, err: %d",
+ nic->port_id, res);
+}
+
+static void
+octeontx_port_close(struct octeontx_nic *nic)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rte_eal_alarm_cancel(octeontx_link_status_poll, nic);
+ octeontx_bgx_port_close(nic->port_id);
+ octeontx_log_dbg("port closed %d", nic->port_id);
+}
+
+static int
+octeontx_port_start(struct octeontx_nic *nic)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return octeontx_bgx_port_start(nic->port_id);
+}
+
+static int
+octeontx_port_stop(struct octeontx_nic *nic)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return octeontx_bgx_port_stop(nic->port_id);
+}
+
+static int
+octeontx_port_promisc_set(struct octeontx_nic *nic, int en)
+{
+ struct rte_eth_dev *dev;
+ int res;
+
+ res = 0;
+ PMD_INIT_FUNC_TRACE();
+ dev = nic->dev;
+
+ res = octeontx_bgx_port_promisc_set(nic->port_id, en);
+ if (res < 0) {
+ octeontx_log_err("failed to set promiscuous mode %d",
+ nic->port_id);
+ return res;
+ }
+
+ /* Set proper flag for the mode */
+ dev->data->promiscuous = (en != 0) ? 1 : 0;
+
+ octeontx_log_dbg("port %d : promiscuous mode %s",
+ nic->port_id, en ? "set" : "unset");
+
+ return 0;
+}
+
+static int
+octeontx_port_stats(struct octeontx_nic *nic, struct rte_eth_stats *stats)
+{
+ octeontx_mbox_bgx_port_stats_t bgx_stats;
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+
+ res = octeontx_bgx_port_stats(nic->port_id, &bgx_stats);
+ if (res < 0) {
+ octeontx_log_err("failed to get port stats %d", nic->port_id);
+ return res;
+ }
+
+ stats->ipackets = bgx_stats.rx_packets;
+ stats->ibytes = bgx_stats.rx_bytes;
+ stats->imissed = bgx_stats.rx_dropped;
+ stats->ierrors = bgx_stats.rx_errors;
+ stats->opackets = bgx_stats.tx_packets;
+ stats->obytes = bgx_stats.tx_bytes;
+ stats->oerrors = bgx_stats.tx_errors;
+
+ octeontx_log_dbg("port%d stats inpkts=%" PRIx64 " outpkts=%" PRIx64 "",
+ nic->port_id, stats->ipackets, stats->opackets);
+
+ return 0;
+}
+
+static int
+octeontx_port_stats_clr(struct octeontx_nic *nic)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return octeontx_bgx_port_stats_clr(nic->port_id);
+}
+
+static inline void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+ dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+
+ dev_conf->nb_event_ports = info->max_event_ports;
+ dev_conf->nb_event_queues = info->max_event_queues;
+
+ dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+ dev_conf->nb_event_port_dequeue_depth =
+ info->max_event_port_dequeue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_events_limit =
+ info->max_num_events;
+}
+
+static uint16_t
+octeontx_tx_offload_flags(struct rte_eth_dev *eth_dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
+ uint16_t flags = 0;
+
+ if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+ nic->tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ flags |= OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F;
+
+ if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
+ nic->tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
+ nic->tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
+ nic->tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
+ flags |= OCCTX_TX_OFFLOAD_L3_L4_CSUM_F;
+
+ if (!(nic->tx_offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
+ flags |= OCCTX_TX_OFFLOAD_MBUF_NOFF_F;
+
+ if (nic->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+ flags |= OCCTX_TX_MULTI_SEG_F;
+
+ return flags;
+}
+
+static uint16_t
+octeontx_rx_offload_flags(struct rte_eth_dev *eth_dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
+ uint16_t flags = 0;
+
+ if (nic->rx_offloads & (RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
+ flags |= OCCTX_RX_OFFLOAD_CSUM_F;
+
+ if (nic->rx_offloads & (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+ flags |= OCCTX_RX_OFFLOAD_CSUM_F;
+
+ if (nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
+ flags |= OCCTX_RX_MULTI_SEG_F;
+ eth_dev->data->scattered_rx = 1;
+ /* If scatter mode is enabled, TX should also be in multi
+ * seg mode, else memory leak will occur
+ */
+ nic->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
+ }
+
+ return flags;
+}
+
+static int
+octeontx_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_conf *conf = &data->dev_conf;
+ struct rte_eth_rxmode *rxmode = &conf->rxmode;
+ struct rte_eth_txmode *txmode = &conf->txmode;
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ RTE_SET_USED(conf);
+
+ if (!rte_eal_has_hugepages()) {
+ octeontx_log_err("huge page is not configured");
+ return -EINVAL;
+ }
+
+ if (txmode->mq_mode) {
+ octeontx_log_err("tx mq_mode DCB or VMDq not supported");
+ return -EINVAL;
+ }
+
+ if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
+ rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
+ octeontx_log_err("unsupported rx qmode %d", rxmode->mq_mode);
+ return -EINVAL;
+ }
+
+ if (!(txmode->offloads & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)) {
+ PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
+ txmode->offloads |= RTE_ETH_TX_OFFLOAD_MT_LOCKFREE;
+ }
+
+ if (conf->dcb_capability_en) {
+ octeontx_log_err("DCB enable not supported");
+ return -EINVAL;
+ }
+
+ if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ octeontx_log_err("flow director not supported");
+ return -EINVAL;
+ }
+
+ nic->num_tx_queues = dev->data->nb_tx_queues;
+
+ if (!nic->reconfigure) {
+ ret = octeontx_pko_channel_open(nic->pko_vfid * PKO_VF_NUM_DQ,
+ nic->num_tx_queues,
+ nic->base_ochan);
+ if (ret) {
+ octeontx_log_err("failed to open channel %d no-of-txq %d",
+ nic->base_ochan, nic->num_tx_queues);
+ return -EFAULT;
+ }
+
+ ret = octeontx_dev_vlan_offload_init(dev);
+ if (ret) {
+ octeontx_log_err("failed to initialize vlan offload");
+ return -EFAULT;
+ }
+
+ nic->pki.classifier_enable = false;
+ nic->pki.hash_enable = true;
+ nic->pki.initialized = false;
+ }
+
+ nic->rx_offloads |= rxmode->offloads;
+ nic->tx_offloads |= txmode->offloads;
+ nic->rx_offload_flags |= octeontx_rx_offload_flags(dev);
+ nic->tx_offload_flags |= octeontx_tx_offload_flags(dev);
+
+ nic->reconfigure = true;
+
+ return 0;
+}
+
+static int
+octeontx_dev_close(struct rte_eth_dev *dev)
+{
+ struct octeontx_txq *txq = NULL;
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ unsigned int i;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Stopping/closing event device once all eth ports are closed. */
+ if (__atomic_sub_fetch(&evdev_refcnt, 1, __ATOMIC_ACQUIRE) == 0) {
+ rte_event_dev_stop(nic->evdev);
+ rte_event_dev_close(nic->evdev);
+ }
+
+ octeontx_dev_flow_ctrl_fini(dev);
+
+ octeontx_dev_vlan_offload_fini(dev);
+
+ ret = octeontx_pko_channel_close(nic->base_ochan);
+ if (ret < 0) {
+ octeontx_log_err("failed to close channel %d VF%d %d %d",
+ nic->base_ochan, nic->port_id, nic->num_tx_queues,
+ ret);
+ }
+ /* Free txq resources for this port */
+ for (i = 0; i < nic->num_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq)
+ continue;
+
+ rte_free(txq);
+ }
+
+ octeontx_port_close(nic);
+ nic->reconfigure = false;
+
+ return 0;
+}
+
+static int
+octeontx_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ uint32_t buffsz, frame_size = mtu + OCCTX_L2_OVERHEAD;
+ struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ int rc = 0;
+
+ buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
+
+ /* Refuse MTU that requires the support of scattered packets
+ * when this feature has not been enabled before.
+ */
+ if (data->dev_started && frame_size > buffsz &&
+ !(nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
+ octeontx_log_err("Scatter mode is disabled");
+ return -EINVAL;
+ }
+
+ /* Check <seg size> * <max_seg> >= max_frame */
+ if ((nic->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) &&
+ (frame_size > buffsz * OCCTX_RX_NB_SEG_MAX))
+ return -EINVAL;
+
+ rc = octeontx_pko_send_mtu(nic->port_id, frame_size);
+ if (rc)
+ return rc;
+
+ rc = octeontx_bgx_port_mtu_set(nic->port_id, frame_size);
+ if (rc)
+ return rc;
+
+ octeontx_log_info("Received pkt beyond maxlen %d will be dropped",
+ frame_size);
+
+ return rc;
+}
+
+static int
+octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq)
+{
+ struct rte_eth_dev *eth_dev = rxq->eth_dev;
+ struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct evdev_priv_data *evdev_priv;
+ struct rte_eventdev *dev;
+ uint32_t buffsz;
+
+ /* Get rx buffer size */
+ mbp_priv = rte_mempool_get_priv(rxq->pool);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+ /* Setup scatter mode if needed by jumbo */
+ if (data->mtu > buffsz) {
+ nic->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+ nic->rx_offload_flags |= octeontx_rx_offload_flags(eth_dev);
+ nic->tx_offload_flags |= octeontx_tx_offload_flags(eth_dev);
+ }
+
+ /* Sharing offload flags via eventdev priv region */
+ dev = &rte_eventdevs[rxq->evdev];
+ evdev_priv = dev->data->dev_private;
+ evdev_priv->rx_offload_flags = nic->rx_offload_flags;
+ evdev_priv->tx_offload_flags = nic->tx_offload_flags;
+
+ /* Setup MTU */
+ nic->mtu = data->mtu;
+
+ return 0;
+}
+
+static int
+octeontx_dev_start(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct octeontx_rxq *rxq;
+ int ret, i;
+
+ PMD_INIT_FUNC_TRACE();
+ /* Rechecking if any new offload set to update
+ * rx/tx burst function pointer accordingly.
+ */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ octeontx_recheck_rx_offloads(rxq);
+ }
+
+ /* Setting up the mtu */
+ ret = octeontx_dev_mtu_set(dev, nic->mtu);
+ if (ret) {
+ octeontx_log_err("Failed to set default MTU size %d", ret);
+ goto error;
+ }
+
+ /* Apply new link configurations if changed */
+ ret = octeontx_apply_link_speed(dev);
+ if (ret) {
+ octeontx_log_err("Failed to set link configuration: %d", ret);
+ goto error;
+ }
+
+ /*
+ * Tx start
+ */
+ octeontx_set_tx_function(dev);
+ ret = octeontx_pko_channel_start(nic->base_ochan);
+ if (ret < 0) {
+ octeontx_log_err("fail to conf VF%d no. txq %d chan %d ret %d",
+ nic->port_id, nic->num_tx_queues, nic->base_ochan,
+ ret);
+ goto error;
+ }
+
+ /*
+ * Rx start
+ */
+ dev->rx_pkt_burst = octeontx_recv_pkts;
+ ret = octeontx_pki_port_start(nic->port_id);
+ if (ret < 0) {
+ octeontx_log_err("fail to start Rx on port %d", nic->port_id);
+ goto channel_stop_error;
+ }
+
+ /*
+ * Start port
+ */
+ ret = octeontx_port_start(nic);
+ if (ret < 0) {
+ octeontx_log_err("failed start port %d", ret);
+ goto pki_port_stop_error;
+ }
+
+ PMD_TX_LOG(DEBUG, "pko: start channel %d no.of txq %d port %d",
+ nic->base_ochan, nic->num_tx_queues, nic->port_id);
+
+ ret = rte_event_dev_start(nic->evdev);
+ if (ret < 0) {
+ octeontx_log_err("failed to start evdev: ret (%d)", ret);
+ goto pki_port_stop_error;
+ }
+
+ /* Success */
+ return ret;
+
+pki_port_stop_error:
+ octeontx_pki_port_stop(nic->port_id);
+channel_stop_error:
+ octeontx_pko_channel_stop(nic->base_ochan);
+error:
+ return ret;
+}
+
+static int
+octeontx_dev_stop(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = octeontx_port_stop(nic);
+ if (ret < 0) {
+ octeontx_log_err("failed to req stop port %d res=%d",
+ nic->port_id, ret);
+ return ret;
+ }
+
+ ret = octeontx_pki_port_stop(nic->port_id);
+ if (ret < 0) {
+ octeontx_log_err("failed to stop pki port %d res=%d",
+ nic->port_id, ret);
+ return ret;
+ }
+
+ ret = octeontx_pko_channel_stop(nic->base_ochan);
+ if (ret < 0) {
+ octeontx_log_err("failed to stop channel %d VF%d %d %d",
+ nic->base_ochan, nic->port_id, nic->num_tx_queues,
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+octeontx_dev_promisc_enable(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ return octeontx_port_promisc_set(nic, 1);
+}
+
+static int
+octeontx_dev_promisc_disable(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ return octeontx_port_promisc_set(nic, 0);
+}
+
+static int
+octeontx_port_link_status(struct octeontx_nic *nic)
+{
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+ res = octeontx_bgx_port_link_status(nic->port_id);
+ if (res < 0) {
+ octeontx_log_err("failed to get port %d link status",
+ nic->port_id);
+ return res;
+ }
+
+ if (nic->link_up != (uint8_t)res || nic->print_flag == -1) {
+ nic->link_up = (uint8_t)res;
+ nic->print_flag = 1;
+ }
+ octeontx_log_dbg("port %d link status %d", nic->port_id, nic->link_up);
+
+ return res;
+}
+
+/*
+ * Return 0 means link status changed, -1 means not changed
+ */
+static int
+octeontx_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct rte_eth_link link;
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+
+ res = octeontx_port_link_status(nic);
+ if (res < 0) {
+ octeontx_log_err("failed to request link status %d", res);
+ return res;
+ }
+
+ octeontx_link_status_update(nic, &link);
+ if (nic->print_flag) {
+ octeontx_link_status_print(nic->dev, &link);
+ nic->print_flag = 0;
+ }
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+octeontx_port_mcast_set(struct octeontx_nic *nic, int en)
+{
+ struct rte_eth_dev *dev;
+ int res;
+
+ res = 0;
+ PMD_INIT_FUNC_TRACE();
+ dev = nic->dev;
+
+ res = octeontx_bgx_port_multicast_set(nic->port_id, en);
+ if (res < 0) {
+ octeontx_log_err("failed to set multicast mode %d",
+ nic->port_id);
+ return res;
+ }
+
+ /* Set proper flag for the mode */
+ dev->data->all_multicast = (en != 0) ? 1 : 0;
+
+ octeontx_log_dbg("port %d : multicast mode %s",
+ nic->port_id, en ? "set" : "unset");
+
+ return 0;
+}
+
+static int
+octeontx_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ return octeontx_port_mcast_set(nic, 1);
+}
+
+static int
+octeontx_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ return octeontx_port_mcast_set(nic, 0);
+}
+
+static inline int octeontx_dev_total_xstat(void)
+{
+ return NUM_BGX_XSTAT;
+}
+
+static int
+octeontx_port_xstats(struct octeontx_nic *nic, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ octeontx_mbox_bgx_port_stats_t bgx_stats;
+ int stat_cnt, res, si, i;
+
+ res = octeontx_bgx_port_xstats(nic->port_id, &bgx_stats);
+ if (res < 0) {
+ octeontx_log_err("failed to get port stats %d", nic->port_id);
+ return res;
+ }
+
+ si = 0;
+ /* Fill BGX stats */
+ stat_cnt = (n > NUM_BGX_XSTAT) ? NUM_BGX_XSTAT : n;
+ n = n - stat_cnt;
+ for (i = 0; i < stat_cnt; i++) {
+ xstats[si].id = si;
+ xstats[si].value = *(uint64_t *)(((char *)&bgx_stats) +
+ octeontx_bgx_xstats[i].soffset);
+ si++;
+ }
+ /*TODO: Similarly fill rest of HW stats */
+
+ return si;
+}
+
+static int
+octeontx_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *stat_val, unsigned int n)
+{
+ unsigned int i, xstat_cnt = octeontx_dev_total_xstat();
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct rte_eth_xstat xstats[xstat_cnt];
+
+ octeontx_port_xstats(nic, xstats, xstat_cnt);
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= xstat_cnt) {
+ PMD_INIT_LOG(ERR, "out of range id value");
+ return -1;
+ }
+ stat_val[i] = xstats[ids[i]].value;
+ }
+ return n;
+}
+
+static int
+octeontx_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int size)
+{
+ int stat_cnt, si, i;
+
+ if (xstats_names) {
+ si = 0;
+ /* Fill BGX stats */
+ stat_cnt = (size > NUM_BGX_XSTAT) ? NUM_BGX_XSTAT : size;
+ size = size - stat_cnt;
+ for (i = 0; i < stat_cnt; i++) {
+ strlcpy(xstats_names[si].name,
+ octeontx_bgx_xstats[i].sname,
+ sizeof(xstats_names[si].name));
+ si++;
+ }
+ /*TODO: Similarly fill rest of HW stats */
+ return si;
+ } else {
+ return octeontx_dev_total_xstat();
+ }
+}
+
+static void build_xstat_names(struct rte_eth_xstat_name *xstat_names)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_BGX_XSTAT; i++) {
+ strlcpy(xstat_names[i].name, octeontx_bgx_xstats[i].sname,
+ RTE_ETH_XSTATS_NAME_SIZE);
+ }
+}
+
+static int
+octeontx_dev_xstats_get_names_by_id(struct rte_eth_dev *dev __rte_unused,
+ const uint64_t *ids,
+ struct rte_eth_xstat_name *stat_names,
+ unsigned int n)
+{
+ unsigned int i, xstat_cnt = octeontx_dev_total_xstat();
+ struct rte_eth_xstat_name xstat_names[xstat_cnt];
+
+ build_xstat_names(xstat_names);
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= xstat_cnt) {
+ PMD_INIT_LOG(ERR, "out of range id value");
+ return -1;
+ }
+ strlcpy(stat_names[i].name, xstat_names[ids[i]].name,
+ sizeof(stat_names[i].name));
+ }
+ /*TODO: Similarly fill rest of HW stats */
+
+ return n;
+}
+
+static int
+octeontx_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ return octeontx_port_xstats(nic, xstats, n);
+}
+
+static int
+octeontx_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ return octeontx_port_stats(nic, stats);
+}
+
+static int
+octeontx_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ return octeontx_port_stats_clr(nic);
+}
+
+static void
+octeontx_dev_mac_addr_del(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ ret = octeontx_bgx_port_mac_del(nic->port_id, index);
+ if (ret != 0)
+ octeontx_log_err("failed to del MAC address filter on port %d",
+ nic->port_id);
+}
+
+static int
+octeontx_dev_mac_addr_add(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr,
+ uint32_t index,
+ __rte_unused uint32_t vmdq)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ ret = octeontx_bgx_port_mac_add(nic->port_id, mac_addr->addr_bytes,
+ index);
+ if (ret < 0) {
+ octeontx_log_err("failed to add MAC address filter on port %d",
+ nic->port_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ ret = octeontx_bgx_port_mac_set(nic->port_id, addr->addr_bytes);
+ if (ret == 0) {
+ /* Update same mac address to BGX CAM table */
+ ret = octeontx_bgx_port_mac_add(nic->port_id, addr->addr_bytes,
+ 0);
+ }
+ if (ret < 0) {
+ octeontx_log_err("failed to set MAC address on port %d",
+ nic->port_id);
+ }
+
+ return ret;
+}
+
+static int
+octeontx_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ /* Autonegotiation may be disabled */
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_FIXED;
+ dev_info->speed_capa |= RTE_ETH_LINK_SPEED_10M | RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
+ RTE_ETH_LINK_SPEED_40G;
+
+ /* Min/Max MTU supported */
+ dev_info->min_rx_bufsize = OCCTX_MIN_FRS;
+ dev_info->max_rx_pktlen = OCCTX_MAX_FRS;
+ dev_info->max_mtu = dev_info->max_rx_pktlen - OCCTX_L2_OVERHEAD;
+ dev_info->min_mtu = dev_info->min_rx_bufsize - OCCTX_L2_OVERHEAD;
+
+ dev_info->max_mac_addrs =
+ octeontx_bgx_port_mac_entries_get(nic->port_id);
+ dev_info->max_rx_queues = 1;
+ dev_info->max_tx_queues = PKO_MAX_NUM_DQ;
+ dev_info->min_rx_bufsize = 0;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = 0,
+ .rx_drop_en = 0,
+ .offloads = OCTEONTX_RX_OFFLOADS,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_free_thresh = 0,
+ .offloads = OCTEONTX_TX_OFFLOADS,
+ };
+
+ dev_info->rx_offload_capa = OCTEONTX_RX_OFFLOADS;
+ dev_info->tx_offload_capa = OCTEONTX_TX_OFFLOADS;
+ dev_info->rx_queue_offload_capa = OCTEONTX_RX_OFFLOADS;
+ dev_info->tx_queue_offload_capa = OCTEONTX_TX_OFFLOADS;
+
+ return 0;
+}
+
+static void
+octeontx_dq_info_getter(octeontx_dq_t *dq, void *out)
+{
+ ((octeontx_dq_t *)out)->lmtline_va = dq->lmtline_va;
+ ((octeontx_dq_t *)out)->ioreg_va = dq->ioreg_va;
+ ((octeontx_dq_t *)out)->fc_status_va = dq->fc_status_va;