raw/cnxk_bphy: support retrieving BPHY device memory
authorTomasz Duszynski <tduszynski@marvell.com>
Mon, 21 Jun 2021 15:04:47 +0000 (17:04 +0200)
committerThomas Monjalon <thomas@monjalon.net>
Mon, 5 Jul 2021 21:08:44 +0000 (23:08 +0200)
Allow user to retrieve baseband phy memory resources.

Signed-off-by: Jakub Palider <jpalider@marvell.com>
Signed-off-by: Tomasz Duszynski <tduszynski@marvell.com>
Reviewed-by: Jerin Jacob <jerinj@marvell.com>
doc/guides/rawdevs/cnxk_bphy.rst
drivers/raw/cnxk_bphy/cnxk_bphy.c
drivers/raw/cnxk_bphy/cnxk_bphy_irq.c
drivers/raw/cnxk_bphy/cnxk_bphy_irq.h
drivers/raw/cnxk_bphy/rte_pmd_bphy.h

index b69c5f3..16195d2 100644 (file)
@@ -17,6 +17,7 @@ Features
 The BPHY CGX/RPM implements following features in the rawdev API:
 
 - Access to BPHY CGX/RPM via a set of predefined messages
+- Access to BPHY memory
 
 Device Setup
 ------------
@@ -115,6 +116,15 @@ The former will setup low level interrupt handling while the latter will tear ev
 are also two convenience functions namely ``rte_pmd_bphy_intr_init()`` and
 ``rte_pmd_bphy_intr_fini()`` that take care of all details.
 
+
+Get device memory
+~~~~~~~~~~~~~~~~~
+
+Message is used to read device MMIO address.
+
+Message must have type set to ``CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET``. There's a convenience function
+``rte_pmd_bphy_intr_mem_get()`` available that takes care of retrieving that address.
+
 Self test
 ---------
 
index 3f86795..278e26a 100644 (file)
@@ -53,6 +53,9 @@ cnxk_bphy_irq_enqueue_bufs(struct rte_rawdev *dev,
        case CNXK_BPHY_IRQ_MSG_TYPE_FINI:
                cnxk_bphy_intr_fini(dev->dev_id);
                break;
+       case CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET:
+               bphy_dev->queues[queue].rsp = &bphy_dev->mem;
+               break;
        default:
                ret = -EINVAL;
        }
index 991c2d7..13a0d8a 100644 (file)
@@ -57,3 +57,11 @@ cnxk_bphy_intr_fini(uint16_t dev_id)
        roc_bphy_intr_fini(irq_chip);
        bphy_dev->irq_chip = NULL;
 }
+
+struct bphy_mem *
+cnxk_bphy_mem_get(uint16_t dev_id)
+{
+       struct bphy_device *bphy_dev = cnxk_bphy_get_bphy_dev_by_dev_id(dev_id);
+
+       return &bphy_dev->mem;
+}
index 6b59218..5f87143 100644 (file)
@@ -31,6 +31,7 @@ struct bphy_device {
 
 int cnxk_bphy_intr_init(uint16_t dev_id);
 void cnxk_bphy_intr_fini(uint16_t dev_id);
+struct bphy_mem *cnxk_bphy_mem_get(uint16_t dev_id);
 uint64_t cnxk_bphy_irq_max_get(uint16_t dev_id);
 
 #endif /* _CNXK_BPHY_IRQ_ */
index c667d98..d08b14b 100644 (file)
@@ -103,6 +103,7 @@ struct cnxk_bphy_cgx_msg {
        void *data;
 };
 
+#define cnxk_bphy_mem      bphy_mem
 #define CNXK_BPHY_DEF_QUEUE 0
 
 enum cnxk_bphy_irq_msg_type {
@@ -115,6 +116,11 @@ enum cnxk_bphy_irq_msg_type {
 
 struct cnxk_bphy_irq_msg {
        enum cnxk_bphy_irq_msg_type type;
+       /*
+        * The data field, depending on message type, may point to
+        * - (deq) struct cnxk_bphy_mem for memory range request response
+        * - (xxx) NULL
+        */
        void *data;
 };
 
@@ -155,4 +161,28 @@ rte_pmd_bphy_intr_fini(uint16_t dev_id)
        rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
 }
 
+static __rte_always_inline struct cnxk_bphy_mem *
+rte_pmd_bphy_intr_mem_get(uint16_t dev_id)
+{
+       struct cnxk_bphy_irq_msg msg = {
+               .type = CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET,
+       };
+       struct rte_rawdev_buf *bufs[1];
+       struct rte_rawdev_buf buf;
+       int ret;
+
+       buf.buf_addr = &msg;
+       bufs[0] = &buf;
+
+       ret = rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
+       if (ret)
+               return NULL;
+
+       ret = rte_rawdev_dequeue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
+       if (ret)
+               return NULL;
+
+       return buf.buf_addr;
+}
+
 #endif /* _CNXK_BPHY_H_ */