net/hns3: support flow control autoneg for copper port
[dpdk.git] / drivers / event / octeontx2 / otx2_evdev.c
index 7e23435..cdadbb2 100644 (file)
@@ -885,29 +885,27 @@ sso_configure_ports(const struct rte_eventdev *event_dev)
                struct otx2_ssogws *ws;
                uintptr_t base;
 
-               /* Free memory prior to re-allocation if needed */
                if (event_dev->data->ports[i] != NULL) {
                        ws = event_dev->data->ports[i];
-                       rte_free(ssogws_get_cookie(ws));
-                       ws = NULL;
-               }
+               } else {
+                       /* Allocate event port memory */
+                       ws = rte_zmalloc_socket("otx2_sso_ws",
+                                               sizeof(struct otx2_ssogws) +
+                                               RTE_CACHE_LINE_SIZE,
+                                               RTE_CACHE_LINE_SIZE,
+                                               event_dev->data->socket_id);
+                       if (ws == NULL) {
+                               otx2_err("Failed to alloc memory for port=%d",
+                                        i);
+                               rc = -ENOMEM;
+                               break;
+                       }
 
-               /* Allocate event port memory */
-               ws = rte_zmalloc_socket("otx2_sso_ws",
-                                       sizeof(struct otx2_ssogws) +
-                                       RTE_CACHE_LINE_SIZE,
-                                       RTE_CACHE_LINE_SIZE,
-                                       event_dev->data->socket_id);
-               if (ws == NULL) {
-                       otx2_err("Failed to alloc memory for port=%d", i);
-                       rc = -ENOMEM;
-                       break;
+                       /* First cache line is reserved for cookie */
+                       ws = (struct otx2_ssogws *)
+                               ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
                }
 
-               /* First cache line is reserved for cookie */
-               ws = (struct otx2_ssogws *)
-                       ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
-
                ws->port = i;
                base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
                sso_set_port_ops(ws, base);
@@ -986,7 +984,7 @@ sso_xaq_allocate(struct otx2_sso_evdev *dev)
 
        dev->fc_iova = mz->iova;
        dev->fc_mem = mz->addr;
-
+       *dev->fc_mem = 0;
        aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN);
        memset(aura, 0, sizeof(struct npa_aura_s));
 
@@ -1062,6 +1060,19 @@ sso_ggrp_alloc_xaq(struct otx2_sso_evdev *dev)
        return otx2_mbox_process(mbox);
 }
 
+static int
+sso_ggrp_free_xaq(struct otx2_sso_evdev *dev)
+{
+       struct otx2_mbox *mbox = dev->mbox;
+       struct sso_release_xaq *req;
+
+       otx2_sso_dbg("Freeing XAQ for GGRPs");
+       req = otx2_mbox_alloc_msg_sso_hw_release_xaq_aura(mbox);
+       req->hwgrps = dev->nb_event_queues;
+
+       return otx2_mbox_process(mbox);
+}
+
 static void
 sso_lf_teardown(struct otx2_sso_evdev *dev,
                enum otx2_sso_lf_type lf_type)
@@ -1452,12 +1463,16 @@ sso_cleanup(struct rte_eventdev *event_dev, uint8_t enable)
                        ssogws_reset((struct otx2_ssogws *)&ws->ws_state[1]);
                        ws->swtag_req = 0;
                        ws->vws = 0;
+                       ws->fc_mem = dev->fc_mem;
+                       ws->xaq_lmt = dev->xaq_lmt;
                } else {
                        struct otx2_ssogws *ws;
 
                        ws = event_dev->data->ports[i];
                        ssogws_reset(ws);
                        ws->swtag_req = 0;
+                       ws->fc_mem = dev->fc_mem;
+                       ws->xaq_lmt = dev->xaq_lmt;
                }
        }
 
@@ -1498,28 +1513,30 @@ int
 sso_xae_reconfigure(struct rte_eventdev *event_dev)
 {
        struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
-       struct rte_mempool *prev_xaq_pool;
        int rc = 0;
 
        if (event_dev->data->dev_started)
                sso_cleanup(event_dev, 0);
 
-       prev_xaq_pool = dev->xaq_pool;
+       rc = sso_ggrp_free_xaq(dev);
+       if (rc < 0) {
+               otx2_err("Failed to free XAQ\n");
+               return rc;
+       }
+
+       rte_mempool_free(dev->xaq_pool);
        dev->xaq_pool = NULL;
        rc = sso_xaq_allocate(dev);
        if (rc < 0) {
                otx2_err("Failed to alloc xaq pool %d", rc);
-               rte_mempool_free(prev_xaq_pool);
                return rc;
        }
        rc = sso_ggrp_alloc_xaq(dev);
        if (rc < 0) {
                otx2_err("Failed to alloc xaq to ggrp %d", rc);
-               rte_mempool_free(prev_xaq_pool);
                return rc;
        }
 
-       rte_mempool_free(prev_xaq_pool);
        rte_mb();
        if (event_dev->data->dev_started)
                sso_cleanup(event_dev, 1);