power: add get/set pause duration API
[dpdk.git] / drivers / dma / idxd / idxd_common.c
index 86056db..ea6413c 100644 (file)
@@ -65,6 +65,8 @@ __submit(struct idxd_dmadev *idxd)
        if (++idxd->batch_idx_write > idxd->max_batches)
                idxd->batch_idx_write = 0;
 
+       idxd->stats.submitted += idxd->batch_size;
+
        idxd->batch_start += idxd->batch_size;
        idxd->batch_size = 0;
        idxd->batch_idx_ring[idxd->batch_idx_write] = idxd->batch_start;
@@ -161,6 +163,23 @@ get_comp_status(struct idxd_completion *c)
        }
 }
 
+int
+idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
+               enum rte_dma_vchan_status *status)
+{
+       struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
+       uint16_t last_batch_write = idxd->batch_idx_write == 0 ? idxd->max_batches :
+                       idxd->batch_idx_write - 1;
+       uint8_t bstatus = (idxd->batch_comp_ring[last_batch_write].status != 0);
+
+       /* An IDXD device will always be either active or idle.
+        * RTE_DMA_VCHAN_HALTED_ERROR is therefore not supported by IDXD.
+        */
+       *status = bstatus ? RTE_DMA_VCHAN_IDLE : RTE_DMA_VCHAN_ACTIVE;
+
+       return 0;
+}
+
 static __rte_always_inline int
 batch_ok(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_status_code *status)
 {
@@ -276,6 +295,8 @@ batch_completed_status(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_
        const uint16_t b_len = b_end - b_start;
        if (b_len == 1) {/* not a batch */
                *status = get_comp_status(&idxd->batch_comp_ring[idxd->batch_idx_read]);
+               if (status != RTE_DMA_STATUS_SUCCESSFUL)
+                       idxd->stats.errors++;
                idxd->ids_avail++;
                idxd->ids_returned++;
                idxd->batch_idx_read = next_batch;
@@ -297,6 +318,8 @@ batch_completed_status(struct idxd_dmadev *idxd, uint16_t max_ops, enum rte_dma_
                struct idxd_completion *c = (void *)
                                &idxd->desc_ring[(b_start + ret) & idxd->desc_ring_mask];
                status[ret] = (ret < bcount) ? get_comp_status(c) : RTE_DMA_STATUS_NOT_ATTEMPTED;
+               if (status[ret] != RTE_DMA_STATUS_SUCCESSFUL)
+                       idxd->stats.errors++;
        }
        idxd->ids_avail = idxd->ids_returned += ret;
 
@@ -355,6 +378,7 @@ idxd_completed(void *dev_private, uint16_t qid __rte_unused, uint16_t max_ops,
                ret += batch;
        } while (batch > 0 && *has_error == false);
 
+       idxd->stats.completed += ret;
        *last_idx = idxd->ids_returned - 1;
        return ret;
 }
@@ -371,6 +395,7 @@ idxd_completed_status(void *dev_private, uint16_t qid __rte_unused, uint16_t max
                ret += batch;
        } while (batch > 0);
 
+       idxd->stats.completed += ret;
        *last_idx = idxd->ids_returned - 1;
        return ret;
 }
@@ -404,6 +429,25 @@ idxd_dump(const struct rte_dma_dev *dev, FILE *f)
        return 0;
 }
 
+int
+idxd_stats_get(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
+               struct rte_dma_stats *stats, uint32_t stats_sz)
+{
+       struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
+       if (stats_sz < sizeof(*stats))
+               return -EINVAL;
+       *stats = idxd->stats;
+       return 0;
+}
+
+int
+idxd_stats_reset(struct rte_dma_dev *dev, uint16_t vchan __rte_unused)
+{
+       struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
+       idxd->stats = (struct rte_dma_stats){0};
+       return 0;
+}
+
 int
 idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *info, uint32_t size)
 {
@@ -424,6 +468,26 @@ idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *info, uint32_t
        return 0;
 }
 
+uint16_t
+idxd_burst_capacity(const void *dev_private, uint16_t vchan __rte_unused)
+{
+       const struct idxd_dmadev *idxd = dev_private;
+       uint16_t write_idx = idxd->batch_start + idxd->batch_size;
+       uint16_t used_space;
+
+       /* Check for space in the batch ring */
+       if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) ||
+                       idxd->batch_idx_write + 1 == idxd->batch_idx_read)
+               return 0;
+
+       /* Subtract and mask to get in correct range */
+       used_space = (write_idx - idxd->ids_returned) & idxd->desc_ring_mask;
+
+       const int ret = RTE_MIN((idxd->desc_ring_mask - used_space),
+                       (idxd->max_batch_size - idxd->batch_size));
+       return ret < 0 ? 0 : (uint16_t)ret;
+}
+
 int
 idxd_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf *dev_conf,
                uint32_t conf_sz)
@@ -509,6 +573,7 @@ idxd_dmadev_create(const char *name, struct rte_device *dev,
        dmadev->fp_obj->submit = idxd_submit;
        dmadev->fp_obj->completed = idxd_completed;
        dmadev->fp_obj->completed_status = idxd_completed_status;
+       dmadev->fp_obj->burst_capacity = idxd_burst_capacity;
 
        idxd = dmadev->data->dev_private;
        *idxd = *base_idxd; /* copy over the main fields already passed in */