drivers: remove octeontx2 drivers
[dpdk.git] / drivers / event / cnxk / cnxk_eventdev_adptr.c
index 502da27..fdcd68c 100644 (file)
@@ -38,6 +38,31 @@ cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
                dev->adptr_xae_cnt += rxq->qconf.mp->size;
                break;
        }
+       case RTE_EVENT_TYPE_ETHDEV_VECTOR: {
+               struct rte_mempool *mp = data;
+               uint64_t *old_ptr;
+
+               for (i = 0; i < dev->vec_pool_cnt; i++) {
+                       if ((uint64_t)mp == dev->vec_pools[i])
+                               return;
+               }
+
+               dev->vec_pool_cnt++;
+               old_ptr = dev->vec_pools;
+               dev->vec_pools =
+                       rte_realloc(dev->vec_pools,
+                                   sizeof(uint64_t) * dev->vec_pool_cnt, 0);
+               if (dev->vec_pools == NULL) {
+                       dev->adptr_xae_cnt += mp->size;
+                       dev->vec_pools = old_ptr;
+                       dev->vec_pool_cnt--;
+                       return;
+               }
+               dev->vec_pools[dev->vec_pool_cnt - 1] = (uint64_t)mp;
+
+               dev->adptr_xae_cnt += mp->size;
+               break;
+       }
        case RTE_EVENT_TYPE_TIMER: {
                struct cnxk_tim_ring *timr = data;
                uint16_t *old_ring_ptr;
@@ -98,7 +123,9 @@ cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,
                    uint16_t port_id, const struct rte_event *ev,
                    uint8_t custom_flowid)
 {
+       struct roc_nix *nix = &cnxk_eth_dev->nix;
        struct roc_nix_rq *rq;
+       int rc;
 
        rq = &cnxk_eth_dev->rqs[rq_id];
        rq->sso_ena = 1;
@@ -115,7 +142,24 @@ cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,
                rq->tag_mask |= ev->flow_id;
        }
 
-       return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
+       rc = roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
+       if (rc)
+               return rc;
+
+       if (rq_id == 0 && roc_nix_inl_inb_is_enabled(nix)) {
+               uint32_t sec_tag_const;
+
+               /* IPSec tag const is 8-bit left shifted value of tag_mask
+                * as it applies to bit 32:8 of tag only.
+                */
+               sec_tag_const = rq->tag_mask >> 8;
+               rc = roc_nix_inl_inb_tag_update(nix, sec_tag_const,
+                                               ev->sched_type);
+               if (rc)
+                       plt_err("Failed to set tag conf for ipsec, rc=%d", rc);
+       }
+
+       return rc;
 }
 
 static int
@@ -131,6 +175,35 @@ cnxk_sso_rxq_disable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id)
        return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
 }
 
+static int
+cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
+                               uint16_t port_id, uint16_t rq_id, uint16_t sz,
+                               uint64_t tmo_ns, struct rte_mempool *vmp)
+{
+       struct roc_nix_rq *rq;
+
+       rq = &cnxk_eth_dev->rqs[rq_id];
+
+       if (!rq->sso_ena)
+               return -EINVAL;
+       if (rq->flow_tag_width == 0)
+               return -EINVAL;
+
+       rq->vwqe_ena = 1;
+       rq->vwqe_first_skip = 0;
+       rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
+       rq->vwqe_max_sz_exp = rte_log2_u32(sz);
+       rq->vwqe_wait_tmo =
+               tmo_ns /
+               ((roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100);
+       rq->tag_mask = (port_id & 0xF) << 20;
+       rq->tag_mask |=
+               (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV_VECTOR << 4))
+               << 24;
+
+       return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
+}
+
 int
 cnxk_sso_rx_adapter_queue_add(
        const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
@@ -158,9 +231,26 @@ cnxk_sso_rx_adapter_queue_add(
                        &queue_conf->ev,
                        !!(queue_conf->rx_queue_flags &
                           RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));
+               if (queue_conf->rx_queue_flags &
+                   RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
+                       cnxk_sso_updt_xae_cnt(dev, queue_conf->vector_mp,
+                                             RTE_EVENT_TYPE_ETHDEV_VECTOR);
+                       rc |= cnxk_sso_xae_reconfigure(
+                               (struct rte_eventdev *)(uintptr_t)event_dev);
+                       rc |= cnxk_sso_rx_adapter_vwqe_enable(
+                               cnxk_eth_dev, port, rx_queue_id,
+                               queue_conf->vector_sz,
+                               queue_conf->vector_timeout_ns,
+                               queue_conf->vector_mp);
+
+                       if (cnxk_eth_dev->vec_drop_re_dis)
+                               rc |= roc_nix_rx_drop_re_set(&cnxk_eth_dev->nix,
+                                                            false);
+               }
                rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
                                      rxq_sp->qconf.mp->pool_id, true,
                                      dev->force_ena_bp);
+               cnxk_eth_dev->nb_rxq_sso++;
        }
 
        if (rc < 0) {
@@ -171,6 +261,14 @@ cnxk_sso_rx_adapter_queue_add(
 
        dev->rx_offloads |= cnxk_eth_dev->rx_offload_flags;
 
+       /* Switch to use PF/VF's NIX LF instead of inline device for inbound
+        * when all the RQ's are switched to event dev mode. We do this only
+        * when using inline device is not forced by dev args.
+        */
+       if (!cnxk_eth_dev->inb.force_inl_dev &&
+           cnxk_eth_dev->nb_rxq_sso == cnxk_eth_dev->nb_rxq)
+               cnxk_nix_inb_mode_set(cnxk_eth_dev, false);
+
        return 0;
 }
 
@@ -195,12 +293,22 @@ cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
                rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
                                      rxq_sp->qconf.mp->pool_id, false,
                                      dev->force_ena_bp);
+               cnxk_eth_dev->nb_rxq_sso--;
+
+               /* Enable drop_re if it was disabled earlier */
+               if (cnxk_eth_dev->vec_drop_re_dis && !cnxk_eth_dev->nb_rxq_sso)
+                       rc |= roc_nix_rx_drop_re_set(&cnxk_eth_dev->nix, true);
        }
 
        if (rc < 0)
                plt_err("Failed to clear Rx adapter config port=%d, q=%d",
                        eth_dev->data->port_id, rx_queue_id);
 
+       /* Removing RQ from Rx adapter implies need to use
+        * inline device for CQ/Poll mode.
+        */
+       cnxk_nix_inb_mode_set(cnxk_eth_dev, true);
+
        return rc;
 }