event/cnxk: support Tx adapter
authorPavan Nikhilesh <pbhagavatula@marvell.com>
Wed, 14 Jul 2021 09:02:03 +0000 (14:32 +0530)
committerJerin Jacob <jerinj@marvell.com>
Fri, 16 Jul 2021 12:16:37 +0000 (14:16 +0200)
Add support for event eth Tx adapter.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
doc/guides/rel_notes/release_21_08.rst
drivers/common/cnxk/roc_nix.h
drivers/common/cnxk/roc_nix_queue.c
drivers/event/cnxk/cn10k_eventdev.c
drivers/event/cnxk/cn9k_eventdev.c
drivers/event/cnxk/cnxk_eventdev.h
drivers/event/cnxk/cnxk_eventdev_adptr.c

index 08018d8..21249d7 100644 (file)
@@ -118,9 +118,9 @@ New Features
   usecases. Configuration happens via standard rawdev enq/deq operations. See
   the :doc:`../rawdevs/cnxk_bphy` rawdev guide for more details on this driver.
 
-* **Added support for Marvell CN10K, CN9K, event Rx adapter.**
+* **Added support for Marvell CN10K, CN9K, event Rx/Tx adapter.**
 
-  * Added Rx adapter support for event/cnxk when the ethernet device requested
+  * Added Rx/Tx adapter support for event/cnxk when the ethernet device requested
     is net/cnxk.
 
 * **Added cppc_cpufreq support to Power Management library.**
index 76613fe..822c190 100644 (file)
@@ -200,6 +200,7 @@ struct roc_nix_sq {
        uint64_t aura_handle;
        int16_t nb_sqb_bufs_adj;
        uint16_t nb_sqb_bufs;
+       uint16_t aura_sqb_bufs;
        plt_iova_t io_addr;
        void *lmt_addr;
        void *sqe_mem;
index 0604e7a..7e2f86e 100644 (file)
@@ -587,12 +587,12 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
        aura.fc_ena = 1;
        aura.fc_addr = (uint64_t)sq->fc;
        aura.fc_hyst_bits = 0; /* Store count on all updates */
-       rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, nb_sqb_bufs, &aura,
+       rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura,
                                 &pool);
        if (rc)
                goto fail;
 
-       sq->sqe_mem = plt_zmalloc(blk_sz * nb_sqb_bufs, blk_sz);
+       sq->sqe_mem = plt_zmalloc(blk_sz * NIX_MAX_SQB, blk_sz);
        if (sq->sqe_mem == NULL) {
                rc = NIX_ERR_NO_MEM;
                goto nomem;
@@ -600,11 +600,13 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 
        /* Fill the initial buffers */
        iova = (uint64_t)sq->sqe_mem;
-       for (count = 0; count < nb_sqb_bufs; count++) {
+       for (count = 0; count < NIX_MAX_SQB; count++) {
                roc_npa_aura_op_free(sq->aura_handle, 0, iova);
                iova += blk_sz;
        }
        roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
+       roc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs);
+       sq->aura_sqb_bufs = NIX_MAX_SQB;
 
        return rc;
 nomem:
index ba7d95f..8a9b04a 100644 (file)
@@ -44,6 +44,7 @@ cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)
        /* First cache line is reserved for cookie */
        ws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
        ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
+       ws->tx_base = ws->base;
        ws->hws_id = port_id;
        ws->swtag_req = 0;
        ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);
@@ -233,6 +234,39 @@ cn10k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
        return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
 }
 
+static int
+cn10k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
+{
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       int i;
+
+       if (dev->tx_adptr_data == NULL)
+               return 0;
+
+       for (i = 0; i < dev->nb_event_ports; i++) {
+               struct cn10k_sso_hws *ws = event_dev->data->ports[i];
+               void *ws_cookie;
+
+               ws_cookie = cnxk_sso_hws_get_cookie(ws);
+               ws_cookie = rte_realloc_socket(
+                       ws_cookie,
+                       sizeof(struct cnxk_sso_hws_cookie) +
+                               sizeof(struct cn10k_sso_hws) +
+                               (sizeof(uint64_t) * (dev->max_port_id + 1) *
+                                RTE_MAX_QUEUES_PER_PORT),
+                       RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+               if (ws_cookie == NULL)
+                       return -ENOMEM;
+               ws = RTE_PTR_ADD(ws_cookie, sizeof(struct cnxk_sso_hws_cookie));
+               memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
+                      sizeof(uint64_t) * (dev->max_port_id + 1) *
+                              RTE_MAX_QUEUES_PER_PORT);
+               event_dev->data->ports[i] = ws;
+       }
+
+       return 0;
+}
+
 static void
 cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 {
@@ -493,6 +527,10 @@ cn10k_sso_start(struct rte_eventdev *event_dev)
 {
        int rc;
 
+       rc = cn10k_sso_updt_tx_adptr_data(event_dev);
+       if (rc < 0)
+               return rc;
+
        rc = cnxk_sso_start(event_dev, cn10k_sso_hws_reset,
                            cn10k_sso_hws_flush_events);
        if (rc < 0)
@@ -595,6 +633,55 @@ cn10k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
        return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
 }
 
+static int
+cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
+                             const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+       int ret;
+
+       RTE_SET_USED(dev);
+       ret = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
+       if (ret)
+               *caps = 0;
+       else
+               *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
+
+       return 0;
+}
+
+static int
+cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
+                              const struct rte_eth_dev *eth_dev,
+                              int32_t tx_queue_id)
+{
+       int rc;
+
+       RTE_SET_USED(id);
+       rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
+       if (rc < 0)
+               return rc;
+       rc = cn10k_sso_updt_tx_adptr_data(event_dev);
+       if (rc < 0)
+               return rc;
+       cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+       return 0;
+}
+
+static int
+cn10k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
+                              const struct rte_eth_dev *eth_dev,
+                              int32_t tx_queue_id)
+{
+       int rc;
+
+       RTE_SET_USED(id);
+       rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
+       if (rc < 0)
+               return rc;
+       return cn10k_sso_updt_tx_adptr_data(event_dev);
+}
+
 static struct rte_eventdev_ops cn10k_sso_dev_ops = {
        .dev_infos_get = cn10k_sso_info_get,
        .dev_configure = cn10k_sso_dev_configure,
@@ -614,6 +701,10 @@ static struct rte_eventdev_ops cn10k_sso_dev_ops = {
        .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
        .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
 
+       .eth_tx_adapter_caps_get = cn10k_sso_tx_adapter_caps_get,
+       .eth_tx_adapter_queue_add = cn10k_sso_tx_adapter_queue_add,
+       .eth_tx_adapter_queue_del = cn10k_sso_tx_adapter_queue_del,
+
        .timer_adapter_caps_get = cnxk_tim_caps_get,
 
        .dump = cnxk_sso_dump,
index e386cb7..21f8032 100644 (file)
@@ -248,6 +248,66 @@ cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
        return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
 }
 
+static int
+cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
+{
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       int i;
+
+       if (dev->tx_adptr_data == NULL)
+               return 0;
+
+       for (i = 0; i < dev->nb_event_ports; i++) {
+               if (dev->dual_ws) {
+                       struct cn9k_sso_hws_dual *dws =
+                               event_dev->data->ports[i];
+                       void *ws_cookie;
+
+                       ws_cookie = cnxk_sso_hws_get_cookie(dws);
+                       ws_cookie = rte_realloc_socket(
+                               ws_cookie,
+                               sizeof(struct cnxk_sso_hws_cookie) +
+                                       sizeof(struct cn9k_sso_hws_dual) +
+                                       (sizeof(uint64_t) *
+                                        (dev->max_port_id + 1) *
+                                        RTE_MAX_QUEUES_PER_PORT),
+                               RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+                       if (ws_cookie == NULL)
+                               return -ENOMEM;
+                       dws = RTE_PTR_ADD(ws_cookie,
+                                         sizeof(struct cnxk_sso_hws_cookie));
+                       memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
+                              sizeof(uint64_t) * (dev->max_port_id + 1) *
+                                      RTE_MAX_QUEUES_PER_PORT);
+                       event_dev->data->ports[i] = dws;
+               } else {
+                       struct cn9k_sso_hws *ws = event_dev->data->ports[i];
+                       void *ws_cookie;
+
+                       ws_cookie = cnxk_sso_hws_get_cookie(ws);
+                       ws_cookie = rte_realloc_socket(
+                               ws_cookie,
+                               sizeof(struct cnxk_sso_hws_cookie) +
+                                       sizeof(struct cn9k_sso_hws_dual) +
+                                       (sizeof(uint64_t) *
+                                        (dev->max_port_id + 1) *
+                                        RTE_MAX_QUEUES_PER_PORT),
+                               RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+                       if (ws_cookie == NULL)
+                               return -ENOMEM;
+                       ws = RTE_PTR_ADD(ws_cookie,
+                                        sizeof(struct cnxk_sso_hws_cookie));
+                       memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
+                              sizeof(uint64_t) * (dev->max_port_id + 1) *
+                                      RTE_MAX_QUEUES_PER_PORT);
+                       event_dev->data->ports[i] = ws;
+               }
+       }
+       rte_mb();
+
+       return 0;
+}
+
 static void
 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 {
@@ -734,6 +794,10 @@ cn9k_sso_start(struct rte_eventdev *event_dev)
 {
        int rc;
 
+       rc = cn9k_sso_updt_tx_adptr_data(event_dev);
+       if (rc < 0)
+               return rc;
+
        rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
                            cn9k_sso_hws_flush_events);
        if (rc < 0)
@@ -844,6 +908,86 @@ cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
        return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
 }
 
+static int
+cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
+                            const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+       int ret;
+
+       RTE_SET_USED(dev);
+       ret = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
+       if (ret)
+               *caps = 0;
+       else
+               *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
+
+       return 0;
+}
+
+static void
+cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
+                      bool ena)
+{
+       struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+       struct cn9k_eth_txq *txq;
+       struct roc_nix_sq *sq;
+       int i;
+
+       if (tx_queue_id < 0) {
+               for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+                       cn9k_sso_txq_fc_update(eth_dev, i, ena);
+       } else {
+               uint16_t sq_limit;
+
+               sq = &cnxk_eth_dev->sqs[tx_queue_id];
+               txq = eth_dev->data->tx_queues[tx_queue_id];
+               sq_limit =
+                       ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
+                                   sq->nb_sqb_bufs;
+               txq->nb_sqb_bufs_adj =
+                       sq_limit -
+                       RTE_ALIGN_MUL_CEIL(sq_limit,
+                                          (1ULL << txq->sqes_per_sqb_log2)) /
+                               (1ULL << txq->sqes_per_sqb_log2);
+               txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
+       }
+}
+
+static int
+cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
+                             const struct rte_eth_dev *eth_dev,
+                             int32_t tx_queue_id)
+{
+       int rc;
+
+       RTE_SET_USED(id);
+       rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
+       if (rc < 0)
+               return rc;
+       cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
+       rc = cn9k_sso_updt_tx_adptr_data(event_dev);
+       if (rc < 0)
+               return rc;
+       cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+       return 0;
+}
+
+static int
+cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
+                             const struct rte_eth_dev *eth_dev,
+                             int32_t tx_queue_id)
+{
+       int rc;
+
+       RTE_SET_USED(id);
+       rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
+       if (rc < 0)
+               return rc;
+       cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
+       return cn9k_sso_updt_tx_adptr_data(event_dev);
+}
+
 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
        .dev_infos_get = cn9k_sso_info_get,
        .dev_configure = cn9k_sso_dev_configure,
@@ -863,6 +1007,10 @@ static struct rte_eventdev_ops cn9k_sso_dev_ops = {
        .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
        .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
 
+       .eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,
+       .eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,
+       .eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,
+
        .timer_adapter_caps_get = cnxk_tim_caps_get,
 
        .dump = cnxk_sso_dump,
index 9d5d2d0..24e1be6 100644 (file)
@@ -8,6 +8,7 @@
 #include <rte_devargs.h>
 #include <rte_ethdev.h>
 #include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
 #include <rte_kvargs.h>
 #include <rte_mbuf_pool_ops.h>
 #include <rte_pci.h>
@@ -34,6 +35,7 @@
 #define CNXK_SSO_XAQ_CACHE_CNT (0x7)
 #define CNXK_SSO_XAQ_SLACK     (8)
 #define CNXK_SSO_WQE_SG_PTR    (9)
+#define CNXK_SSO_SQB_LIMIT     (0x180)
 
 #define CNXK_TT_FROM_TAG(x)        (((x) >> 32) & SSO_TT_EMPTY)
 #define CNXK_TT_FROM_EVENT(x)      (((x) >> 38) & SSO_TT_EMPTY)
@@ -86,9 +88,12 @@ struct cnxk_sso_evdev {
        rte_iova_t fc_iova;
        struct rte_mempool *xaq_pool;
        uint64_t rx_offloads;
+       uint64_t tx_offloads;
        uint64_t adptr_xae_cnt;
        uint16_t rx_adptr_pool_cnt;
        uint64_t *rx_adptr_pools;
+       uint64_t *tx_adptr_data;
+       uint16_t max_port_id;
        uint16_t tim_adptr_ring_cnt;
        uint16_t *timer_adptr_rings;
        uint64_t *timer_adptr_sz;
@@ -115,7 +120,10 @@ struct cn10k_sso_hws {
        uint64_t xaq_lmt __rte_cache_aligned;
        uint64_t *fc_mem;
        uintptr_t grps_base[CNXK_SSO_MAX_HWGRP];
+       /* Tx Fastpath data */
+       uint64_t tx_base __rte_cache_aligned;
        uintptr_t lmt_base;
+       uint8_t tx_adptr_data[];
 } __rte_cache_aligned;
 
 /* CN9K HWS ops */
@@ -140,7 +148,9 @@ struct cn9k_sso_hws {
        uint64_t xaq_lmt __rte_cache_aligned;
        uint64_t *fc_mem;
        uintptr_t grps_base[CNXK_SSO_MAX_HWGRP];
-       uint64_t base;
+       /* Tx Fastpath data */
+       uint64_t base __rte_cache_aligned;
+       uint8_t tx_adptr_data[];
 } __rte_cache_aligned;
 
 struct cn9k_sso_hws_state {
@@ -160,7 +170,9 @@ struct cn9k_sso_hws_dual {
        uint64_t xaq_lmt __rte_cache_aligned;
        uint64_t *fc_mem;
        uintptr_t grps_base[CNXK_SSO_MAX_HWGRP];
-       uint64_t base[2];
+       /* Tx Fastpath data */
+       uint64_t base[2] __rte_cache_aligned;
+       uint8_t tx_adptr_data[];
 } __rte_cache_aligned;
 
 struct cnxk_sso_hws_cookie {
@@ -267,5 +279,11 @@ int cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
                              const struct rte_eth_dev *eth_dev);
 int cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
                             const struct rte_eth_dev *eth_dev);
+int cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,
+                                 const struct rte_eth_dev *eth_dev,
+                                 int32_t tx_queue_id);
+int cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
+                                 const struct rte_eth_dev *eth_dev,
+                                 int32_t tx_queue_id);
 
 #endif /* __CNXK_EVENTDEV_H__ */
index 3b7ecb3..502da27 100644 (file)
@@ -223,3 +223,91 @@ cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
 
        return 0;
 }
+
+static int
+cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs)
+{
+       return roc_npa_aura_limit_modify(
+               sq->aura_handle, RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));
+}
+
+static int
+cnxk_sso_updt_tx_queue_data(const struct rte_eventdev *event_dev,
+                           uint16_t eth_port_id, uint16_t tx_queue_id,
+                           void *txq)
+{
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       uint16_t max_port_id = dev->max_port_id;
+       uint64_t *txq_data = dev->tx_adptr_data;
+
+       if (txq_data == NULL || eth_port_id > max_port_id) {
+               max_port_id = RTE_MAX(max_port_id, eth_port_id);
+               txq_data = rte_realloc_socket(
+                       txq_data,
+                       (sizeof(uint64_t) * (max_port_id + 1) *
+                        RTE_MAX_QUEUES_PER_PORT),
+                       RTE_CACHE_LINE_SIZE, event_dev->data->socket_id);
+               if (txq_data == NULL)
+                       return -ENOMEM;
+       }
+
+       ((uint64_t(*)[RTE_MAX_QUEUES_PER_PORT])
+                txq_data)[eth_port_id][tx_queue_id] = (uint64_t)txq;
+       dev->max_port_id = max_port_id;
+       dev->tx_adptr_data = txq_data;
+       return 0;
+}
+
+int
+cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,
+                             const struct rte_eth_dev *eth_dev,
+                             int32_t tx_queue_id)
+{
+       struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       struct roc_nix_sq *sq;
+       int i, ret;
+       void *txq;
+
+       if (tx_queue_id < 0) {
+               for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+                       cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, i);
+       } else {
+               txq = eth_dev->data->tx_queues[tx_queue_id];
+               sq = &cnxk_eth_dev->sqs[tx_queue_id];
+               cnxk_sso_sqb_aura_limit_edit(sq, CNXK_SSO_SQB_LIMIT);
+               ret = cnxk_sso_updt_tx_queue_data(
+                       event_dev, eth_dev->data->port_id, tx_queue_id, txq);
+               if (ret < 0)
+                       return ret;
+
+               dev->tx_offloads |= cnxk_eth_dev->tx_offload_flags;
+       }
+
+       return 0;
+}
+
+int
+cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
+                             const struct rte_eth_dev *eth_dev,
+                             int32_t tx_queue_id)
+{
+       struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+       struct roc_nix_sq *sq;
+       int i, ret;
+
+       RTE_SET_USED(event_dev);
+       if (tx_queue_id < 0) {
+               for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+                       cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, i);
+       } else {
+               sq = &cnxk_eth_dev->sqs[tx_queue_id];
+               cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
+               ret = cnxk_sso_updt_tx_queue_data(
+                       event_dev, eth_dev->data->port_id, tx_queue_id, NULL);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}