net/octeontx2: add packet type translation for ICMP6
[dpdk.git] / drivers / event / octeontx2 / otx2_evdev_adptr.c
index ce5621f..b6e9f59 100644 (file)
@@ -199,6 +199,37 @@ void
 sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data, uint32_t event_type)
 {
        switch (event_type) {
+       case RTE_EVENT_TYPE_ETHDEV:
+       {
+               struct otx2_eth_rxq *rxq = data;
+               int i, match = false;
+               uint64_t *old_ptr;
+
+               for (i = 0; i < dev->rx_adptr_pool_cnt; i++) {
+                       if ((uint64_t)rxq->pool == dev->rx_adptr_pools[i])
+                               match = true;
+               }
+
+               if (!match) {
+                       dev->rx_adptr_pool_cnt++;
+                       old_ptr = dev->rx_adptr_pools;
+                       dev->rx_adptr_pools = rte_realloc(dev->rx_adptr_pools,
+                                                         sizeof(uint64_t) *
+                                                         dev->rx_adptr_pool_cnt
+                                                         , 0);
+                       if (dev->rx_adptr_pools == NULL) {
+                               dev->adptr_xae_cnt += rxq->pool->size;
+                               dev->rx_adptr_pools = old_ptr;
+                               dev->rx_adptr_pool_cnt--;
+                               return;
+                       }
+                       dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
+                               (uint64_t)rxq->pool;
+
+                       dev->adptr_xae_cnt += rxq->pool->size;
+               }
+               break;
+       }
        case RTE_EVENT_TYPE_TIMER:
        {
                dev->adptr_xae_cnt += (*(uint64_t *)data);
@@ -209,6 +240,25 @@ sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data, uint32_t event_type)
        }
 }
 
+static inline void
+sso_updt_lookup_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
+{
+       struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+       int i;
+
+       for (i = 0; i < dev->nb_event_ports; i++) {
+               if (dev->dual_ws) {
+                       struct otx2_ssogws_dual *ws = event_dev->data->ports[i];
+
+                       ws->lookup_mem = lookup_mem;
+               } else {
+                       struct otx2_ssogws *ws = event_dev->data->ports[i];
+
+                       ws->lookup_mem = lookup_mem;
+               }
+       }
+}
+
 int
 otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
                              const struct rte_eth_dev *eth_dev,
@@ -216,24 +266,36 @@ otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
                const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
        struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
+       struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
        uint16_t port = eth_dev->data->port_id;
+       struct otx2_eth_rxq *rxq;
        int i, rc;
 
-       RTE_SET_USED(event_dev);
        rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
        if (rc)
                return -EINVAL;
 
        if (rx_queue_id < 0) {
                for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++) {
+                       rxq = eth_dev->data->rx_queues[i];
+                       sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
+                       rc = sso_xae_reconfigure((struct rte_eventdev *)
+                                                (uintptr_t)event_dev);
                        rc |= sso_rxq_enable(otx2_eth_dev, i,
                                             queue_conf->ev.sched_type,
                                             queue_conf->ev.queue_id, port);
                }
+               rxq = eth_dev->data->rx_queues[0];
+               sso_updt_lookup_mem(event_dev, rxq->lookup_mem);
        } else {
+               rxq = eth_dev->data->rx_queues[rx_queue_id];
+               sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
+               rc = sso_xae_reconfigure((struct rte_eventdev *)
+                                        (uintptr_t)event_dev);
                rc |= sso_rxq_enable(otx2_eth_dev, (uint16_t)rx_queue_id,
                                     queue_conf->ev.sched_type,
                                     queue_conf->ev.queue_id, port);
+               sso_updt_lookup_mem(event_dev, rxq->lookup_mem);
        }
 
        if (rc < 0) {
@@ -242,6 +304,10 @@ otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
                return rc;
        }
 
+       dev->rx_offloads |= otx2_eth_dev->rx_offload_flags;
+       dev->tstamp = &otx2_eth_dev->tstamp;
+       sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
        return 0;
 }
 
@@ -271,3 +337,109 @@ otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
 
        return rc;
 }
+
+int
+otx2_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
+                         const struct rte_eth_dev *eth_dev)
+{
+       RTE_SET_USED(event_dev);
+       RTE_SET_USED(eth_dev);
+
+       return 0;
+}
+
+int
+otx2_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
+                        const struct rte_eth_dev *eth_dev)
+{
+       RTE_SET_USED(event_dev);
+       RTE_SET_USED(eth_dev);
+
+       return 0;
+}
+
+int
+otx2_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
+                            const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+       int ret;
+
+       RTE_SET_USED(dev);
+       ret = strncmp(eth_dev->device->driver->name, "net_octeontx2,", 13);
+       if (ret)
+               *caps = 0;
+       else
+               *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
+
+       return 0;
+}
+
+static int
+sso_sqb_aura_limit_edit(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
+{
+       struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
+       struct npa_aq_enq_req *aura_req;
+
+       aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+       aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+       aura_req->ctype = NPA_AQ_CTYPE_AURA;
+       aura_req->op = NPA_AQ_INSTOP_WRITE;
+
+       aura_req->aura.limit = nb_sqb_bufs;
+       aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
+
+       return otx2_mbox_process(npa_lf->mbox);
+}
+
+int
+otx2_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
+                             const struct rte_eth_dev *eth_dev,
+                             int32_t tx_queue_id)
+{
+       struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
+       struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+       struct otx2_eth_txq *txq;
+       int i;
+
+       RTE_SET_USED(id);
+       if (tx_queue_id < 0) {
+               for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
+                       txq = eth_dev->data->tx_queues[i];
+                       sso_sqb_aura_limit_edit(txq->sqb_pool,
+                                               OTX2_SSO_SQB_LIMIT);
+               }
+       } else {
+               txq = eth_dev->data->tx_queues[tx_queue_id];
+               sso_sqb_aura_limit_edit(txq->sqb_pool, OTX2_SSO_SQB_LIMIT);
+       }
+
+       dev->tx_offloads |= otx2_eth_dev->tx_offload_flags;
+       sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+       return 0;
+}
+
+int
+otx2_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
+                             const struct rte_eth_dev *eth_dev,
+                             int32_t tx_queue_id)
+{
+       struct otx2_eth_txq *txq;
+       int i;
+
+       RTE_SET_USED(id);
+       RTE_SET_USED(eth_dev);
+       RTE_SET_USED(event_dev);
+       if (tx_queue_id < 0) {
+               for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
+                       txq = eth_dev->data->tx_queues[i];
+                       sso_sqb_aura_limit_edit(txq->sqb_pool,
+                                               txq->nb_sqb_bufs);
+               }
+       } else {
+               txq = eth_dev->data->tx_queues[tx_queue_id];
+               sso_sqb_aura_limit_edit(txq->sqb_pool, txq->nb_sqb_bufs);
+       }
+
+       return 0;
+}