mempool/octeontx: probe fpavf PCIe devices
authorSantosh Shukla <santosh.shukla@caviumnetworks.com>
Sun, 8 Oct 2017 12:40:04 +0000 (18:10 +0530)
committerThomas Monjalon <thomas@monjalon.net>
Sun, 8 Oct 2017 17:30:50 +0000 (19:30 +0200)
A mempool device is set of PCIe vfs.
On Octeontx HW, each mempool devices are enumerated as
separate SRIOV VF PCIe device.

In order to expose as a mempool device:
On PCIe probe, the driver stores the information associated with the
PCIe device and later upon application pool request
(e.g. rte_mempool_create_empty), Infrastructure creates a pool device
with earlier probed PCIe VF devices.

Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
drivers/mempool/octeontx/octeontx_fpavf.c
drivers/mempool/octeontx/octeontx_fpavf.h

index 9bb7759..0b4a935 100644 (file)
  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/mman.h>
+
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_pci.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_spinlock.h>
+
+#include "octeontx_fpavf.h"
+
+struct fpavf_res {
+       void            *pool_stack_base;
+       void            *bar0;
+       uint64_t        stack_ln_ptr;
+       uint16_t        domain_id;
+       uint16_t        vf_id;  /* gpool_id */
+       uint16_t        sz128;  /* Block size in cache lines */
+       bool            is_inuse;
+};
+
+struct octeontx_fpadev {
+       rte_spinlock_t lock;
+       uint8_t total_gpool_cnt;
+       struct fpavf_res pool[FPA_VF_MAX];
+};
+
+static struct octeontx_fpadev fpadev;
+
+static void
+octeontx_fpavf_setup(void)
+{
+       uint8_t i;
+       static bool init_once;
+
+       if (!init_once) {
+               rte_spinlock_init(&fpadev.lock);
+               fpadev.total_gpool_cnt = 0;
+
+               for (i = 0; i < FPA_VF_MAX; i++) {
+
+                       fpadev.pool[i].domain_id = ~0;
+                       fpadev.pool[i].stack_ln_ptr = 0;
+                       fpadev.pool[i].sz128 = 0;
+                       fpadev.pool[i].bar0 = NULL;
+                       fpadev.pool[i].pool_stack_base = NULL;
+                       fpadev.pool[i].is_inuse = false;
+               }
+               init_once = 1;
+       }
+}
+
+static int
+octeontx_fpavf_identify(void *bar0)
+{
+       uint64_t val;
+       uint16_t domain_id;
+       uint16_t vf_id;
+       uint64_t stack_ln_ptr;
+
+       val = fpavf_read64((void *)((uintptr_t)bar0 +
+                               FPA_VF_VHAURA_CNT_THRESHOLD(0)));
+
+       domain_id = (val >> 8) & 0xffff;
+       vf_id = (val >> 24) & 0xffff;
+
+       stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 +
+                                       FPA_VF_VHPOOL_THRESHOLD(0)));
+       if (vf_id >= FPA_VF_MAX) {
+               fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id);
+               return -1;
+       }
+
+       if (fpadev.pool[vf_id].is_inuse) {
+               fpavf_log_err("vf_id %d is_inuse\n", vf_id);
+               return -1;
+       }
+
+       fpadev.pool[vf_id].domain_id = domain_id;
+       fpadev.pool[vf_id].vf_id = vf_id;
+       fpadev.pool[vf_id].bar0 = bar0;
+       fpadev.pool[vf_id].stack_ln_ptr = stack_ln_ptr;
+
+       /* SUCCESS */
+       return vf_id;
+}
+
+/* FPAVF pcie device aka mempool probe */
+static int
+fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+       uint8_t *idreg;
+       int res;
+       struct fpavf_res *fpa;
+
+       RTE_SET_USED(pci_drv);
+       RTE_SET_USED(fpa);
+
+       /* For secondary processes, the primary has done all the work */
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return 0;
+
+       if (pci_dev->mem_resource[0].addr == NULL) {
+               fpavf_log_err("Empty bars %p ", pci_dev->mem_resource[0].addr);
+               return -ENODEV;
+       }
+       idreg = pci_dev->mem_resource[0].addr;
+
+       octeontx_fpavf_setup();
+
+       res = octeontx_fpavf_identify(idreg);
+       if (res < 0)
+               return -1;
+
+       fpa = &fpadev.pool[res];
+       fpadev.total_gpool_cnt++;
+       rte_wmb();
+
+       fpavf_log_dbg("total_fpavfs %d bar0 %p domain %d vf %d stk_ln_ptr 0x%x",
+                      fpadev.total_gpool_cnt, fpa->bar0, fpa->domain_id,
+                      fpa->vf_id, (unsigned int)fpa->stack_ln_ptr);
+
+       return 0;
+}
+
+static const struct rte_pci_id pci_fpavf_map[] = {
+       {
+               RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+                               PCI_DEVICE_ID_OCTEONTX_FPA_VF)
+       },
+       {
+               .vendor_id = 0,
+       },
+};
+
+static struct rte_pci_driver pci_fpavf = {
+       .id_table = pci_fpavf_map,
+       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+       .probe = fpavf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_fpavf, pci_fpavf);
index 1c70372..33f0366 100644 (file)
@@ -34,6 +34,7 @@
 #define        __OCTEONTX_FPAVF_H__
 
 #include <rte_debug.h>
+#include <rte_io.h>
 
 #ifdef RTE_LIBRTE_OCTEONTX_MEMPOOL_DEBUG
 #define fpavf_log_info(fmt, args...) \
 #define        FPA_VF0_APERTURE_SHIFT          22
 #define FPA_AURA_SET_SIZE              16
 
+
+/*
+ * In Cavium OcteonTX SoC, all accesses to the device registers are
+ * implicitly strongly ordered. So, the relaxed version of IO operation is
+ * safe to use with out any IO memory barriers.
+ */
+#define fpavf_read64 rte_read64_relaxed
+#define fpavf_write64 rte_write64_relaxed
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define fpavf_load_pair(val0, val1, addr) ({           \
+                       asm volatile(                   \
+                       "ldp %x[x0], %x[x1], [%x[p1]]"  \
+                       :[x0]"=r"(val0), [x1]"=r"(val1) \
+                       :[p1]"r"(addr)                  \
+                       ); })
+
+#define fpavf_store_pair(val0, val1, addr) ({          \
+                       asm volatile(                   \
+                       "stp %x[x0], %x[x1], [%x[p1]]"  \
+                       ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+                       ); })
+#else /* Un optimized functions for building on non arm64 arch */
+
+#define fpavf_load_pair(val0, val1, addr)              \
+do {                                                   \
+       val0 = rte_read64(addr);                        \
+       val1 = rte_read64(((uint8_t *)addr) + 8);       \
+} while (0)
+
+#define fpavf_store_pair(val0, val1, addr)             \
+do {                                                   \
+       rte_write64(val0, addr);                        \
+       rte_write64(val1, (((uint8_t *)addr) + 8));     \
+} while (0)
+#endif
+
 #endif /* __OCTEONTX_FPAVF_H__ */