net/octeontx: add packet type parsing support
[dpdk.git] / drivers / net / octeontx / octeontx_ethdev.c
index e71a0cb..706cf88 100644 (file)
@@ -47,6 +47,7 @@
 #include <rte_vdev.h>
 
 #include "octeontx_ethdev.h"
+#include "octeontx_rxtx.h"
 #include "octeontx_logs.h"
 
 struct octeontx_vdev_init_params {
@@ -160,6 +161,14 @@ octeontx_port_close(struct octeontx_nic *nic)
        octeontx_log_dbg("port closed %d", nic->port_id);
 }
 
+static int
+octeontx_port_stop(struct octeontx_nic *nic)
+{
+       PMD_INIT_FUNC_TRACE();
+
+       return octeontx_bgx_port_stop(nic->port_id);
+}
+
 static void
 octeontx_port_promisc_set(struct octeontx_nic *nic, int en)
 {
@@ -182,6 +191,38 @@ octeontx_port_promisc_set(struct octeontx_nic *nic, int en)
                        nic->port_id, en ? "set" : "unset");
 }
 
+static void
+octeontx_port_stats(struct octeontx_nic *nic, struct rte_eth_stats *stats)
+{
+       octeontx_mbox_bgx_port_stats_t bgx_stats;
+       int res;
+
+       PMD_INIT_FUNC_TRACE();
+
+       res = octeontx_bgx_port_stats(nic->port_id, &bgx_stats);
+       if (res < 0)
+               octeontx_log_err("failed to get port stats %d", nic->port_id);
+
+       stats->ipackets = bgx_stats.rx_packets;
+       stats->ibytes = bgx_stats.rx_bytes;
+       stats->imissed = bgx_stats.rx_dropped;
+       stats->ierrors = bgx_stats.rx_errors;
+       stats->opackets = bgx_stats.tx_packets;
+       stats->obytes = bgx_stats.tx_bytes;
+       stats->oerrors = bgx_stats.tx_errors;
+
+       octeontx_log_dbg("port%d stats inpkts=%" PRIx64 " outpkts=%" PRIx64 "",
+                       nic->port_id, stats->ipackets, stats->opackets);
+}
+
+static void
+octeontx_port_stats_clr(struct octeontx_nic *nic)
+{
+       PMD_INIT_FUNC_TRACE();
+
+       octeontx_bgx_port_stats_clr(nic->port_id);
+}
+
 static inline void
 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
                                struct rte_event_dev_info *info)
@@ -401,6 +442,37 @@ octeontx_dev_link_update(struct rte_eth_dev *dev,
        return octeontx_atomic_write_link_status(dev, &link);
 }
 
+static void
+octeontx_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+       struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+       PMD_INIT_FUNC_TRACE();
+       octeontx_port_stats(nic, stats);
+}
+
+static void
+octeontx_dev_stats_reset(struct rte_eth_dev *dev)
+{
+       struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+       PMD_INIT_FUNC_TRACE();
+       octeontx_port_stats_clr(nic);
+}
+
+static void
+octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev,
+                                       struct ether_addr *addr)
+{
+       struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+       int ret;
+
+       ret = octeontx_bgx_port_mac_set(nic->port_id, addr->addr_bytes);
+       if (ret != 0)
+               octeontx_log_err("failed to set MAC address on port %d",
+                               nic->port_id);
+}
+
 static void
 octeontx_dev_info(struct rte_eth_dev *dev,
                struct rte_eth_dev_info *dev_info)
@@ -437,6 +509,358 @@ octeontx_dev_info(struct rte_eth_dev *dev,
        dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MT_LOCKFREE;
 }
 
+static void
+octeontx_dq_info_getter(octeontx_dq_t *dq, void *out)
+{
+       ((octeontx_dq_t *)out)->lmtline_va = dq->lmtline_va;
+       ((octeontx_dq_t *)out)->ioreg_va = dq->ioreg_va;
+       ((octeontx_dq_t *)out)->fc_status_va = dq->fc_status_va;
+}
+
+static int
+octeontx_vf_start_tx_queue(struct rte_eth_dev *dev, struct octeontx_nic *nic,
+                               uint16_t qidx)
+{
+       struct octeontx_txq *txq;
+       int res;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+               return 0;
+
+       txq = dev->data->tx_queues[qidx];
+
+       res = octeontx_pko_channel_query_dqs(nic->base_ochan,
+                                               &txq->dq,
+                                               sizeof(octeontx_dq_t),
+                                               txq->queue_id,
+                                               octeontx_dq_info_getter);
+       if (res < 0) {
+               res = -EFAULT;
+               goto close_port;
+       }
+
+       dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+       return res;
+
+close_port:
+       (void)octeontx_port_stop(nic);
+       octeontx_pko_channel_stop(nic->base_ochan);
+       octeontx_pko_channel_close(nic->base_ochan);
+       dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+       return res;
+}
+
+static int
+octeontx_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+       struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+       PMD_INIT_FUNC_TRACE();
+       qidx = qidx % PKO_VF_NUM_DQ;
+       return octeontx_vf_start_tx_queue(dev, nic, qidx);
+}
+
+static inline int
+octeontx_vf_stop_tx_queue(struct rte_eth_dev *dev, struct octeontx_nic *nic,
+                         uint16_t qidx)
+{
+       int ret = 0;
+
+       RTE_SET_USED(nic);
+       PMD_INIT_FUNC_TRACE();
+
+       if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
+               return 0;
+
+       dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+       return ret;
+}
+
+static int
+octeontx_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+       struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+       PMD_INIT_FUNC_TRACE();
+       qidx = qidx % PKO_VF_NUM_DQ;
+
+       return octeontx_vf_stop_tx_queue(dev, nic, qidx);
+}
+
+static void
+octeontx_dev_tx_queue_release(void *tx_queue)
+{
+       struct octeontx_txq *txq = tx_queue;
+       int res;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (txq) {
+               res = octeontx_dev_tx_queue_stop(txq->eth_dev, txq->queue_id);
+               if (res < 0)
+                       octeontx_log_err("failed stop tx_queue(%d)\n",
+                                  txq->queue_id);
+
+               rte_free(txq);
+       }
+}
+
+static int
+octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
+                           uint16_t nb_desc, unsigned int socket_id,
+                           const struct rte_eth_txconf *tx_conf)
+{
+       struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+       struct octeontx_txq *txq = NULL;
+       uint16_t dq_num;
+       int res = 0;
+
+       RTE_SET_USED(nb_desc);
+       RTE_SET_USED(socket_id);
+       RTE_SET_USED(tx_conf);
+
+       dq_num = (nic->port_id * PKO_VF_NUM_DQ) + qidx;
+
+       /* Socket id check */
+       if (socket_id != (unsigned int)SOCKET_ID_ANY &&
+                       socket_id != (unsigned int)nic->node)
+               PMD_TX_LOG(INFO, "socket_id expected %d, configured %d",
+                                               socket_id, nic->node);
+
+       /* Free memory prior to re-allocation if needed. */
+       if (dev->data->tx_queues[qidx] != NULL) {
+               PMD_TX_LOG(DEBUG, "freeing memory prior to re-allocation %d",
+                               qidx);
+               octeontx_dev_tx_queue_release(dev->data->tx_queues[qidx]);
+               dev->data->tx_queues[qidx] = NULL;
+       }
+
+       /* Allocating tx queue data structure */
+       txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq),
+                                RTE_CACHE_LINE_SIZE, nic->node);
+       if (txq == NULL) {
+               octeontx_log_err("failed to allocate txq=%d", qidx);
+               res = -ENOMEM;
+               goto err;
+       }
+
+       txq->eth_dev = dev;
+       txq->queue_id = dq_num;
+       dev->data->tx_queues[qidx] = txq;
+       dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+       res = octeontx_pko_channel_query_dqs(nic->base_ochan,
+                                               &txq->dq,
+                                               sizeof(octeontx_dq_t),
+                                               txq->queue_id,
+                                               octeontx_dq_info_getter);
+       if (res < 0) {
+               res = -EFAULT;
+               goto err;
+       }
+
+       PMD_TX_LOG(DEBUG, "[%d]:[%d] txq=%p nb_desc=%d lmtline=%p ioreg_va=%p fc_status_va=%p",
+                       qidx, txq->queue_id, txq, nb_desc, txq->dq.lmtline_va,
+                       txq->dq.ioreg_va,
+                       txq->dq.fc_status_va);
+
+       return res;
+
+err:
+       if (txq)
+               rte_free(txq);
+
+       return res;
+}
+
+static int
+octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
+                               uint16_t nb_desc, unsigned int socket_id,
+                               const struct rte_eth_rxconf *rx_conf,
+                               struct rte_mempool *mb_pool)
+{
+       struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+       struct rte_mempool_ops *mp_ops = NULL;
+       struct octeontx_rxq *rxq = NULL;
+       pki_pktbuf_cfg_t pktbuf_conf;
+       pki_hash_cfg_t pki_hash;
+       pki_qos_cfg_t pki_qos;
+       uintptr_t pool;
+       int ret, port;
+       uint8_t gaura;
+       unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
+       unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
+
+       RTE_SET_USED(nb_desc);
+
+       memset(&pktbuf_conf, 0, sizeof(pktbuf_conf));
+       memset(&pki_hash, 0, sizeof(pki_hash));
+       memset(&pki_qos, 0, sizeof(pki_qos));
+
+       mp_ops = rte_mempool_get_ops(mb_pool->ops_index);
+       if (strcmp(mp_ops->name, "octeontx_fpavf")) {
+               octeontx_log_err("failed to find octeontx_fpavf mempool");
+               return -ENOTSUP;
+       }
+
+       /* Handle forbidden configurations */
+       if (nic->pki.classifier_enable) {
+               octeontx_log_err("cannot setup queue %d. "
+                                       "Classifier option unsupported", qidx);
+               return -EINVAL;
+       }
+
+       port = nic->port_id;
+
+       /* Rx deferred start is not supported */
+       if (rx_conf->rx_deferred_start) {
+               octeontx_log_err("rx deferred start not supported");
+               return -EINVAL;
+       }
+
+       /* Verify queue index */
+       if (qidx >= dev->data->nb_rx_queues) {
+               octeontx_log_err("QID %d not supporteded (0 - %d available)\n",
+                               qidx, (dev->data->nb_rx_queues - 1));
+               return -ENOTSUP;
+       }
+
+       /* Socket id check */
+       if (socket_id != (unsigned int)SOCKET_ID_ANY &&
+                       socket_id != (unsigned int)nic->node)
+               PMD_RX_LOG(INFO, "socket_id expected %d, configured %d",
+                                               socket_id, nic->node);
+
+       /* Allocating rx queue data structure */
+       rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct octeontx_rxq),
+                                RTE_CACHE_LINE_SIZE, nic->node);
+       if (rxq == NULL) {
+               octeontx_log_err("failed to allocate rxq=%d", qidx);
+               return -ENOMEM;
+       }
+
+       if (!nic->pki.initialized) {
+               pktbuf_conf.port_type = 0;
+               pki_hash.port_type = 0;
+               pki_qos.port_type = 0;
+
+               pktbuf_conf.mmask.f_wqe_skip = 1;
+               pktbuf_conf.mmask.f_first_skip = 1;
+               pktbuf_conf.mmask.f_later_skip = 1;
+               pktbuf_conf.mmask.f_mbuff_size = 1;
+               pktbuf_conf.mmask.f_cache_mode = 1;
+
+               pktbuf_conf.wqe_skip = OCTTX_PACKET_WQE_SKIP;
+               pktbuf_conf.first_skip = OCTTX_PACKET_FIRST_SKIP;
+               pktbuf_conf.later_skip = OCTTX_PACKET_LATER_SKIP;
+               pktbuf_conf.mbuff_size = (mb_pool->elt_size -
+                                       RTE_PKTMBUF_HEADROOM -
+                                       sizeof(struct rte_mbuf));
+
+               pktbuf_conf.cache_mode = PKI_OPC_MODE_STF2_STT;
+
+               ret = octeontx_pki_port_pktbuf_config(port, &pktbuf_conf);
+               if (ret != 0) {
+                       octeontx_log_err("fail to configure pktbuf for port %d",
+                                       port);
+                       rte_free(rxq);
+                       return ret;
+               }
+               PMD_RX_LOG(DEBUG, "Port %d Rx pktbuf configured:\n"
+                               "\tmbuf_size:\t0x%0x\n"
+                               "\twqe_skip:\t0x%0x\n"
+                               "\tfirst_skip:\t0x%0x\n"
+                               "\tlater_skip:\t0x%0x\n"
+                               "\tcache_mode:\t%s\n",
+                               port,
+                               pktbuf_conf.mbuff_size,
+                               pktbuf_conf.wqe_skip,
+                               pktbuf_conf.first_skip,
+                               pktbuf_conf.later_skip,
+                               (pktbuf_conf.cache_mode ==
+                                               PKI_OPC_MODE_STT) ?
+                               "STT" :
+                               (pktbuf_conf.cache_mode ==
+                                               PKI_OPC_MODE_STF) ?
+                               "STF" :
+                               (pktbuf_conf.cache_mode ==
+                                               PKI_OPC_MODE_STF1_STT) ?
+                               "STF1_STT" : "STF2_STT");
+
+               if (nic->pki.hash_enable) {
+                       pki_hash.tag_dlc = 1;
+                       pki_hash.tag_slc = 1;
+                       pki_hash.tag_dlf = 1;
+                       pki_hash.tag_slf = 1;
+                       octeontx_pki_port_hash_config(port, &pki_hash);
+               }
+
+               pool = (uintptr_t)mb_pool->pool_id;
+
+               /* Get the gpool Id */
+               gaura = octeontx_fpa_bufpool_gpool(pool);
+
+               pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
+               pki_qos.num_entry = 1;
+               pki_qos.drop_policy = 0;
+               pki_qos.tag_type = 2L;
+               pki_qos.qos_entry[0].port_add = 0;
+               pki_qos.qos_entry[0].gaura = gaura;
+               pki_qos.qos_entry[0].ggrp_ok = ev_queues;
+               pki_qos.qos_entry[0].ggrp_bad = ev_queues;
+               pki_qos.qos_entry[0].grptag_bad = 0;
+               pki_qos.qos_entry[0].grptag_ok = 0;
+
+               ret = octeontx_pki_port_create_qos(port, &pki_qos);
+               if (ret < 0) {
+                       octeontx_log_err("failed to create QOS port=%d, q=%d",
+                                       port, qidx);
+                       rte_free(rxq);
+                       return ret;
+               }
+               nic->pki.initialized = true;
+       }
+
+       rxq->port_id = nic->port_id;
+       rxq->eth_dev = dev;
+       rxq->queue_id = qidx;
+       rxq->evdev = nic->evdev;
+       rxq->ev_queues = ev_queues;
+       rxq->ev_ports = ev_ports;
+
+       dev->data->rx_queues[qidx] = rxq;
+       dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+       return 0;
+}
+
+static void
+octeontx_dev_rx_queue_release(void *rxq)
+{
+       rte_free(rxq);
+}
+
+static const uint32_t *
+octeontx_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       static const uint32_t ptypes[] = {
+               RTE_PTYPE_L3_IPV4,
+               RTE_PTYPE_L3_IPV4_EXT,
+               RTE_PTYPE_L3_IPV6,
+               RTE_PTYPE_L3_IPV6_EXT,
+               RTE_PTYPE_L4_TCP,
+               RTE_PTYPE_L4_UDP,
+               RTE_PTYPE_L4_FRAG,
+               RTE_PTYPE_UNKNOWN
+       };
+
+       if (dev->rx_pkt_burst == octeontx_recv_pkts)
+               return ptypes;
+
+       return NULL;
+}
+
 /* Initialize and register driver with DPDK Application */
 static const struct eth_dev_ops octeontx_dev_ops = {
        .dev_configure           = octeontx_dev_configure,
@@ -444,6 +868,16 @@ static const struct eth_dev_ops octeontx_dev_ops = {
        .promiscuous_enable      = octeontx_dev_promisc_enable,
        .promiscuous_disable     = octeontx_dev_promisc_disable,
        .link_update             = octeontx_dev_link_update,
+       .stats_get               = octeontx_dev_stats_get,
+       .stats_reset             = octeontx_dev_stats_reset,
+       .mac_addr_set            = octeontx_dev_default_mac_addr_set,
+       .tx_queue_start          = octeontx_dev_tx_queue_start,
+       .tx_queue_stop           = octeontx_dev_tx_queue_stop,
+       .tx_queue_setup          = octeontx_dev_tx_queue_setup,
+       .tx_queue_release        = octeontx_dev_tx_queue_release,
+       .rx_queue_setup          = octeontx_dev_rx_queue_setup,
+       .rx_queue_release        = octeontx_dev_rx_queue_release,
+       .dev_supported_ptypes_get = octeontx_dev_supported_ptypes_get,
 };
 
 /* Create Ethdev interface per BGX LMAC ports */