baseband/fpga_5gnr_fec: add queue configuration
authorNicolas Chautru <nicolas.chautru@intel.com>
Sat, 18 Apr 2020 22:46:41 +0000 (15:46 -0700)
committerAkhil Goyal <akhil.goyal@nxp.com>
Sun, 19 Apr 2020 20:34:26 +0000 (22:34 +0200)
Adding function to create and configure queues for
the device. Still no capability.

Signed-off-by: Nicolas Chautru <nicolas.chautru@intel.com>
Acked-by: Dave Burley <dave.burley@accelercomm.com>
Acked-by: Niall Power <niall.power@intel.com>
Reviewed-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c

index ab51cb7..b39c92f 100644 (file)
@@ -223,10 +223,120 @@ struct __rte_packed fpga_ring_ctrl_reg {
 struct fpga_5gnr_fec_device {
        /** Base address of MMIO registers (BAR0) */
        void *mmio_base;
+       /** Base address of memory for sw rings */
+       void *sw_rings;
+       /** Physical address of sw_rings */
+       rte_iova_t sw_rings_phys;
+       /** Number of bytes available for each queue in device. */
+       uint32_t sw_ring_size;
+       /** Max number of entries available for each queue in device */
+       uint32_t sw_ring_max_depth;
+       /** Base address of response tail pointer buffer */
+       uint32_t *tail_ptrs;
+       /** Physical address of tail pointers */
+       rte_iova_t tail_ptr_phys;
+       /** Queues flush completion flag */
+       uint64_t *flush_queue_status;
+       /* Bitmap capturing which Queues are bound to the PF/VF */
+       uint64_t q_bound_bit_map;
+       /* Bitmap capturing which Queues have already been assigned */
+       uint64_t q_assigned_bit_map;
        /** True if this is a PF FPGA FEC device */
        bool pf_device;
 };
 
+/* Structure associated with each queue. */
+struct __rte_cache_aligned fpga_queue {
+       struct fpga_ring_ctrl_reg ring_ctrl_reg;  /* Ring Control Register */
+       union fpga_dma_desc *ring_addr;  /* Virtual address of software ring */
+       uint64_t *ring_head_addr;  /* Virtual address of completion_head */
+       uint64_t shadow_completion_head; /* Shadow completion head value */
+       uint16_t head_free_desc;  /* Ring head */
+       uint16_t tail;  /* Ring tail */
+       /* Mask used to wrap enqueued descriptors on the sw ring */
+       uint32_t sw_ring_wrap_mask;
+       uint32_t irq_enable;  /* Enable ops dequeue interrupts if set to 1 */
+       uint8_t q_idx;  /* Queue index */
+       struct fpga_5gnr_fec_device *d;
+       /* MMIO register of shadow_tail used to enqueue descriptors */
+       void *shadow_tail_addr;
+};
+
+/* Write to 16 bit MMIO register address */
+static inline void
+mmio_write_16(void *addr, uint16_t value)
+{
+       *((volatile uint16_t *)(addr)) = rte_cpu_to_le_16(value);
+}
+
+/* Write to 32 bit MMIO register address */
+static inline void
+mmio_write_32(void *addr, uint32_t value)
+{
+       *((volatile uint32_t *)(addr)) = rte_cpu_to_le_32(value);
+}
+
+/* Write to 64 bit MMIO register address */
+static inline void
+mmio_write_64(void *addr, uint64_t value)
+{
+       *((volatile uint64_t *)(addr)) = rte_cpu_to_le_64(value);
+}
+
+/* Write a 8 bit register of a FPGA 5GNR FEC device */
+static inline void
+fpga_reg_write_8(void *mmio_base, uint32_t offset, uint8_t payload)
+{
+       void *reg_addr = RTE_PTR_ADD(mmio_base, offset);
+       *((volatile uint8_t *)(reg_addr)) = payload;
+}
+
+/* Write a 16 bit register of a FPGA 5GNR FEC device */
+static inline void
+fpga_reg_write_16(void *mmio_base, uint32_t offset, uint16_t payload)
+{
+       void *reg_addr = RTE_PTR_ADD(mmio_base, offset);
+       mmio_write_16(reg_addr, payload);
+}
+
+/* Write a 32 bit register of a FPGA 5GNR FEC device */
+static inline void
+fpga_reg_write_32(void *mmio_base, uint32_t offset, uint32_t payload)
+{
+       void *reg_addr = RTE_PTR_ADD(mmio_base, offset);
+       mmio_write_32(reg_addr, payload);
+}
+
+/* Write a 64 bit register of a FPGA 5GNR FEC device */
+static inline void
+fpga_reg_write_64(void *mmio_base, uint32_t offset, uint64_t payload)
+{
+       void *reg_addr = RTE_PTR_ADD(mmio_base, offset);
+       mmio_write_64(reg_addr, payload);
+}
+
+/* Write a ring control register of a FPGA 5GNR FEC device */
+static inline void
+fpga_ring_reg_write(void *mmio_base, uint32_t offset,
+               struct fpga_ring_ctrl_reg payload)
+{
+       fpga_reg_write_64(mmio_base, offset, payload.ring_base_addr);
+       fpga_reg_write_64(mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_ADDR,
+                       payload.ring_head_addr);
+       fpga_reg_write_16(mmio_base, offset + FPGA_5GNR_FEC_RING_SIZE,
+                       payload.ring_size);
+       fpga_reg_write_16(mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_POINT,
+                       payload.head_point);
+       fpga_reg_write_8(mmio_base, offset + FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN,
+                       payload.flush_queue_en);
+       fpga_reg_write_16(mmio_base, offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL,
+                       payload.shadow_tail);
+       fpga_reg_write_8(mmio_base, offset + FPGA_5GNR_FEC_RING_MISC,
+                       payload.misc);
+       fpga_reg_write_8(mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,
+                       payload.enable);
+}
+
 /* Read a register of FPGA 5GNR FEC device */
 static inline uint32_t
 fpga_reg_read_32(void *mmio_base, uint32_t offset)
index b3f2d0e..ec74860 100644 (file)
 static int fpga_5gnr_fec_logtype;
 
 static int
-fpga_dev_close(struct rte_bbdev *dev __rte_unused)
+fpga_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
 {
+       /* Number of queues bound to a PF/VF */
+       uint32_t hw_q_num = 0;
+       uint32_t ring_size, payload, address, q_id, offset;
+       rte_iova_t phys_addr;
+       struct fpga_ring_ctrl_reg ring_reg;
+       struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
+
+       address = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;
+       if (!(fpga_reg_read_32(fpga_dev->mmio_base, address) & 0x1)) {
+               rte_bbdev_log(ERR,
+                               "Queue-PF/VF mapping is not set! Was PF configured for device (%s) ?",
+                               dev->data->name);
+               return -EPERM;
+       }
+
+       /* Clear queue registers structure */
+       memset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));
+
+       /* Scan queue map.
+        * If a queue is valid and mapped to a calling PF/VF the read value is
+        * replaced with a queue ID and if it's not then
+        * FPGA_INVALID_HW_QUEUE_ID is returned.
+        */
+       for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
+               uint32_t hw_q_id = fpga_reg_read_32(fpga_dev->mmio_base,
+                               FPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));
+
+               rte_bbdev_log_debug("%s: queue ID: %u, registry queue ID: %u",
+                               dev->device->name, q_id, hw_q_id);
+
+               if (hw_q_id != FPGA_INVALID_HW_QUEUE_ID) {
+                       fpga_dev->q_bound_bit_map |= (1ULL << q_id);
+                       /* Clear queue register of found queue */
+                       offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
+                               (sizeof(struct fpga_ring_ctrl_reg) * q_id);
+                       fpga_ring_reg_write(fpga_dev->mmio_base,
+                                       offset, ring_reg);
+                       ++hw_q_num;
+               }
+       }
+       if (hw_q_num == 0) {
+               rte_bbdev_log(ERR,
+                       "No HW queues assigned to this device. Probably this is a VF configured for PF mode. Check device configuration!");
+               return -ENODEV;
+       }
+
+       if (num_queues > hw_q_num) {
+               rte_bbdev_log(ERR,
+                       "Not enough queues for device %s! Requested: %u, available: %u",
+                       dev->device->name, num_queues, hw_q_num);
+               return -EINVAL;
+       }
+
+       ring_size = FPGA_RING_MAX_SIZE * sizeof(struct fpga_dma_dec_desc);
+
+       /* Enforce 32 byte alignment */
+       RTE_BUILD_BUG_ON((RTE_CACHE_LINE_SIZE % 32) != 0);
+
+       /* Allocate memory for SW descriptor rings */
+       fpga_dev->sw_rings = rte_zmalloc_socket(dev->device->driver->name,
+                       num_queues * ring_size, RTE_CACHE_LINE_SIZE,
+                       socket_id);
+       if (fpga_dev->sw_rings == NULL) {
+               rte_bbdev_log(ERR,
+                               "Failed to allocate memory for %s:%u sw_rings",
+                               dev->device->driver->name, dev->data->dev_id);
+               return -ENOMEM;
+       }
+
+       fpga_dev->sw_rings_phys = rte_malloc_virt2iova(fpga_dev->sw_rings);
+       fpga_dev->sw_ring_size = ring_size;
+       fpga_dev->sw_ring_max_depth = FPGA_RING_MAX_SIZE;
+
+       /* Allocate memory for ring flush status */
+       fpga_dev->flush_queue_status = rte_zmalloc_socket(NULL,
+                       sizeof(uint64_t), RTE_CACHE_LINE_SIZE, socket_id);
+       if (fpga_dev->flush_queue_status == NULL) {
+               rte_bbdev_log(ERR,
+                               "Failed to allocate memory for %s:%u flush_queue_status",
+                               dev->device->driver->name, dev->data->dev_id);
+               return -ENOMEM;
+       }
+
+       /* Set the flush status address registers */
+       phys_addr = rte_malloc_virt2iova(fpga_dev->flush_queue_status);
+
+       address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_LW;
+       payload = (uint32_t)(phys_addr);
+       fpga_reg_write_32(fpga_dev->mmio_base, address, payload);
+
+       address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_HI;
+       payload = (uint32_t)(phys_addr >> 32);
+       fpga_reg_write_32(fpga_dev->mmio_base, address, payload);
+
+       return 0;
+}
+
+static int
+fpga_dev_close(struct rte_bbdev *dev)
+{
+       struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
+
+       rte_free(fpga_dev->sw_rings);
+       rte_free(fpga_dev->flush_queue_status);
+
        return 0;
 }
 
@@ -80,9 +185,267 @@ fpga_dev_info_get(struct rte_bbdev *dev,
        }
 }
 
+/**
+ * Find index of queue bound to current PF/VF which is unassigned. Return -1
+ * when there is no available queue
+ */
+static inline int
+fpga_find_free_queue_idx(struct rte_bbdev *dev,
+               const struct rte_bbdev_queue_conf *conf)
+{
+       struct fpga_5gnr_fec_device *d = dev->data->dev_private;
+       uint64_t q_idx;
+       uint8_t i = 0;
+       uint8_t range = FPGA_TOTAL_NUM_QUEUES >> 1;
+
+       if (conf->op_type == RTE_BBDEV_OP_LDPC_ENC) {
+               i = FPGA_NUM_DL_QUEUES;
+               range = FPGA_TOTAL_NUM_QUEUES;
+       }
+
+       for (; i < range; ++i) {
+               q_idx = 1ULL << i;
+               /* Check if index of queue is bound to current PF/VF */
+               if (d->q_bound_bit_map & q_idx)
+                       /* Check if found queue was not already assigned */
+                       if (!(d->q_assigned_bit_map & q_idx)) {
+                               d->q_assigned_bit_map |= q_idx;
+                               return i;
+                       }
+       }
+
+       rte_bbdev_log(INFO, "Failed to find free queue on %s", dev->data->name);
+
+       return -1;
+}
+
+static int
+fpga_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
+               const struct rte_bbdev_queue_conf *conf)
+{
+       uint32_t address, ring_offset;
+       struct fpga_5gnr_fec_device *d = dev->data->dev_private;
+       struct fpga_queue *q;
+       int8_t q_idx;
+
+       /* Check if there is a free queue to assign */
+       q_idx = fpga_find_free_queue_idx(dev, conf);
+       if (q_idx == -1)
+               return -1;
+
+       /* Allocate the queue data structure. */
+       q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),
+                       RTE_CACHE_LINE_SIZE, conf->socket);
+       if (q == NULL) {
+               /* Mark queue as un-assigned */
+               d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
+               rte_bbdev_log(ERR, "Failed to allocate queue memory");
+               return -ENOMEM;
+       }
+
+       q->d = d;
+       q->q_idx = q_idx;
+
+       /* Set ring_base_addr */
+       q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
+       q->ring_ctrl_reg.ring_base_addr = d->sw_rings_phys +
+                       (d->sw_ring_size * queue_id);
+
+       /* Allocate memory for Completion Head variable*/
+       q->ring_head_addr = rte_zmalloc_socket(dev->device->driver->name,
+                       sizeof(uint64_t), RTE_CACHE_LINE_SIZE, conf->socket);
+       if (q->ring_head_addr == NULL) {
+               /* Mark queue as un-assigned */
+               d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
+               rte_free(q);
+               rte_bbdev_log(ERR,
+                               "Failed to allocate memory for %s:%u completion_head",
+                               dev->device->driver->name, dev->data->dev_id);
+               return -ENOMEM;
+       }
+       /* Set ring_head_addr */
+       q->ring_ctrl_reg.ring_head_addr =
+                       rte_malloc_virt2iova(q->ring_head_addr);
+
+       /* Clear shadow_completion_head */
+       q->shadow_completion_head = 0;
+
+       /* Set ring_size */
+       if (conf->queue_size > FPGA_RING_MAX_SIZE) {
+               /* Mark queue as un-assigned */
+               d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
+               rte_free(q->ring_head_addr);
+               rte_free(q);
+               rte_bbdev_log(ERR,
+                               "Size of queue is too big %d (MAX: %d ) for %s:%u",
+                               conf->queue_size, FPGA_RING_MAX_SIZE,
+                               dev->device->driver->name, dev->data->dev_id);
+               return -EINVAL;
+       }
+       q->ring_ctrl_reg.ring_size = conf->queue_size;
+
+       /* Set Miscellaneous FPGA register*/
+       /* Max iteration number for TTI mitigation - todo */
+       q->ring_ctrl_reg.max_ul_dec = 0;
+       /* Enable max iteration number for TTI - todo */
+       q->ring_ctrl_reg.max_ul_dec_en = 0;
+
+       /* Enable the ring */
+       q->ring_ctrl_reg.enable = 1;
+
+       /* Set FPGA head_point and tail registers */
+       q->ring_ctrl_reg.head_point = q->tail = 0;
+
+       /* Set FPGA shadow_tail register */
+       q->ring_ctrl_reg.shadow_tail = q->tail;
+
+       /* Calculates the ring offset for found queue */
+       ring_offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
+                       (sizeof(struct fpga_ring_ctrl_reg) * q_idx);
+
+       /* Set FPGA Ring Control Registers */
+       fpga_ring_reg_write(d->mmio_base, ring_offset, q->ring_ctrl_reg);
+
+       /* Store MMIO register of shadow_tail */
+       address = ring_offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL;
+       q->shadow_tail_addr = RTE_PTR_ADD(d->mmio_base, address);
+
+       q->head_free_desc = q->tail;
+
+       /* Set wrap mask */
+       q->sw_ring_wrap_mask = conf->queue_size - 1;
+
+       rte_bbdev_log_debug("Setup dev%u q%u: queue_idx=%u",
+                       dev->data->dev_id, queue_id, q->q_idx);
+
+       dev->data->queues[queue_id].queue_private = q;
+
+       rte_bbdev_log_debug("BBDEV queue[%d] set up for FPGA queue[%d]",
+                       queue_id, q_idx);
+
+       return 0;
+}
+
+static int
+fpga_queue_release(struct rte_bbdev *dev, uint16_t queue_id)
+{
+       struct fpga_5gnr_fec_device *d = dev->data->dev_private;
+       struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
+       struct fpga_ring_ctrl_reg ring_reg;
+       uint32_t offset;
+
+       rte_bbdev_log_debug("FPGA Queue[%d] released", queue_id);
+
+       if (q != NULL) {
+               memset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));
+               offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
+                       (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
+               /* Disable queue */
+               fpga_reg_write_8(d->mmio_base,
+                               offset + FPGA_5GNR_FEC_RING_ENABLE, 0x00);
+               /* Clear queue registers */
+               fpga_ring_reg_write(d->mmio_base, offset, ring_reg);
+
+               /* Mark the Queue as un-assigned */
+               d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q->q_idx));
+               rte_free(q->ring_head_addr);
+               rte_free(q);
+               dev->data->queues[queue_id].queue_private = NULL;
+       }
+
+       return 0;
+}
+
+/* Function starts a device queue. */
+static int
+fpga_queue_start(struct rte_bbdev *dev, uint16_t queue_id)
+{
+       struct fpga_5gnr_fec_device *d = dev->data->dev_private;
+#ifdef RTE_LIBRTE_BBDEV_DEBUG
+       if (d == NULL) {
+               rte_bbdev_log(ERR, "Invalid device pointer");
+               return -1;
+       }
+#endif
+       struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
+       uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
+                       (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
+       uint8_t enable = 0x01;
+       uint16_t zero = 0x0000;
+
+       /* Clear queue head and tail variables */
+       q->tail = q->head_free_desc = 0;
+
+       /* Clear FPGA head_point and tail registers */
+       fpga_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_POINT,
+                       zero);
+       fpga_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL,
+                       zero);
+
+       /* Enable queue */
+       fpga_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,
+                       enable);
+
+       rte_bbdev_log_debug("FPGA Queue[%d] started", queue_id);
+       return 0;
+}
+
+/* Function stops a device queue. */
+static int
+fpga_queue_stop(struct rte_bbdev *dev, uint16_t queue_id)
+{
+       struct fpga_5gnr_fec_device *d = dev->data->dev_private;
+#ifdef RTE_LIBRTE_BBDEV_DEBUG
+       if (d == NULL) {
+               rte_bbdev_log(ERR, "Invalid device pointer");
+               return -1;
+       }
+#endif
+       struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
+       uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
+                       (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
+       uint8_t payload = 0x01;
+       uint8_t counter = 0;
+       uint8_t timeout = FPGA_QUEUE_FLUSH_TIMEOUT_US /
+                       FPGA_TIMEOUT_CHECK_INTERVAL;
+
+       /* Set flush_queue_en bit to trigger queue flushing */
+       fpga_reg_write_8(d->mmio_base,
+                       offset + FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN, payload);
+
+       /** Check if queue flush is completed.
+        * FPGA will update the completion flag after queue flushing is
+        * completed. If completion flag is not updated within 1ms it is
+        * considered as a failure.
+        */
+       while (!(*((volatile uint8_t *)d->flush_queue_status + q->q_idx)
+                       & payload)) {
+               if (counter > timeout) {
+                       rte_bbdev_log(ERR, "FPGA Queue Flush failed for queue %d",
+                                       queue_id);
+                       return -1;
+               }
+               usleep(FPGA_TIMEOUT_CHECK_INTERVAL);
+               counter++;
+       }
+
+       /* Disable queue */
+       payload = 0x00;
+       fpga_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,
+                       payload);
+
+       rte_bbdev_log_debug("FPGA Queue[%d] stopped", queue_id);
+       return 0;
+}
+
 static const struct rte_bbdev_ops fpga_ops = {
+       .setup_queues = fpga_setup_queues,
        .close = fpga_dev_close,
        .info_get = fpga_dev_info_get,
+       .queue_setup = fpga_queue_setup,
+       .queue_stop = fpga_queue_stop,
+       .queue_start = fpga_queue_start,
+       .queue_release = fpga_queue_release,
 };
 
 /* Initialization Function */