{
if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
return HISI_DMA_HIP08_QUEUE_BASE;
+ else if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09)
+ return HISI_DMA_HIP09_QUEUE_BASE;
else
return 0;
}
}
static void
-hisi_dma_init_hw(struct hisi_dma_dev *hw)
+hisi_dma_init_common(struct hisi_dma_dev *hw)
{
hisi_dma_write_queue(hw, HISI_DMA_QUEUE_SQ_BASE_L_REG,
lower_32_bits(hw->sqe_iova));
hisi_dma_write_queue(hw, HISI_DMA_QUEUE_ERR_INT_NUM0_REG, 0);
hisi_dma_write_queue(hw, HISI_DMA_QUEUE_ERR_INT_NUM1_REG, 0);
hisi_dma_write_queue(hw, HISI_DMA_QUEUE_ERR_INT_NUM2_REG, 0);
+}
+
+static void
+hisi_dma_init_hw(struct hisi_dma_dev *hw)
+{
+ hisi_dma_init_common(hw);
if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
hisi_dma_write_queue(hw, HISI_DMA_HIP08_QUEUE_ERR_INT_NUM3_REG,
HISI_DMA_HIP08_QUEUE_CTRL0_ERR_ABORT_B, false);
hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_STATUS_REG,
HISI_DMA_HIP08_QUEUE_INT_MASK_M, true);
- hisi_dma_update_queue_mbit(hw,
- HISI_DMA_HIP08_QUEUE_INT_MASK_REG,
+ hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_MASK_REG,
HISI_DMA_HIP08_QUEUE_INT_MASK_M, true);
+ } else if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) {
+ hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_CTRL0_REG,
+ HISI_DMA_HIP09_QUEUE_CTRL0_ERR_ABORT_M, false);
+ hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_STATUS_REG,
+ HISI_DMA_HIP09_QUEUE_INT_MASK_M, true);
+ hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_MASK_REG,
+ HISI_DMA_HIP09_QUEUE_INT_MASK_M, true);
+ hisi_dma_update_queue_mbit(hw,
+ HISI_DMA_HIP09_QUEUE_ERR_INT_STATUS_REG,
+ HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_M, true);
+ hisi_dma_update_queue_mbit(hw,
+ HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_REG,
+ HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_M, true);
+ hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL1_REG,
+ HISI_DMA_HIP09_QUEUE_CTRL1_VA_ENABLE_B, true);
+ hisi_dma_update_bit(hw,
+ HISI_DMA_HIP09_QUEUE_CFG_REG(hw->queue_id),
+ HISI_DMA_HIP09_QUEUE_CFG_LINK_DOWN_MASK_B,
+ true);
}
}
{
if (revision == HISI_DMA_REVISION_HIP08B)
return HISI_DMA_REG_LAYOUT_HIP08;
+ else if (revision >= HISI_DMA_REVISION_HIP09A)
+ return HISI_DMA_REG_LAYOUT_HIP09;
else
return HISI_DMA_REG_LAYOUT_INVALID;
}
struct rte_dma_info *dev_info,
uint32_t info_sz)
{
- RTE_SET_USED(dev);
+ struct hisi_dma_dev *hw = dev->data->dev_private;
RTE_SET_USED(info_sz);
dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
RTE_DMA_CAPA_OPS_COPY;
+ if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09)
+ dev_info->dev_capa |= RTE_DMA_CAPA_HANDLES_ERRORS;
+
dev_info->max_vchans = 1;
dev_info->max_desc = HISI_DMA_MAX_DESC_NUM;
dev_info->min_desc = HISI_DMA_MIN_DESC_NUM;
hw->submitted = 0;
hw->completed = 0;
hw->errors = 0;
+ hw->qfulls = 0;
hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL0_REG,
HISI_DMA_QUEUE_CTRL0_EN_B, true);
static int
hisi_dma_close(struct rte_dma_dev *dev)
{
- /* The dmadev already stopped */
- hisi_dma_free_iomem(dev->data->dev_private);
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* The dmadev already stopped */
+ hisi_dma_free_iomem(dev->data->dev_private);
+ }
return 0;
}
hw->submitted = 0;
hw->completed = 0;
hw->errors = 0;
+ hw->qfulls = 0;
return 0;
}
-static void
-hisi_dma_get_dump_range(struct hisi_dma_dev *hw, uint32_t *start, uint32_t *end)
+static int
+hisi_dma_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan,
+ enum rte_dma_vchan_status *status)
{
- if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
- *start = HISI_DMA_HIP08_DUMP_START_REG;
- *end = HISI_DMA_HIP08_DUMP_END_REG;
- } else {
- *start = 0;
- *end = 0;
- }
+ struct hisi_dma_dev *hw = dev->data->dev_private;
+ uint32_t val;
+
+ RTE_SET_USED(vchan);
+
+ val = hisi_dma_read_queue(hw, HISI_DMA_QUEUE_FSM_REG);
+ val = FIELD_GET(HISI_DMA_QUEUE_FSM_STS_M, val);
+ if (val == HISI_DMA_STATE_RUN)
+ *status = RTE_DMA_VCHAN_ACTIVE;
+ else if (val == HISI_DMA_STATE_CPL)
+ *status = RTE_DMA_VCHAN_IDLE;
+ else
+ *status = RTE_DMA_VCHAN_HALTED_ERROR;
+
+ return 0;
}
static void
-hisi_dma_dump_common(struct hisi_dma_dev *hw, FILE *f)
+hisi_dma_dump_range(struct hisi_dma_dev *hw, FILE *f, uint32_t start,
+ uint32_t end)
{
#define DUMP_REGNUM_PER_LINE 4
- uint32_t start, end;
uint32_t cnt, i;
- hisi_dma_get_dump_range(hw, &start, &end);
-
- (void)fprintf(f, " common-register:\n");
-
cnt = 0;
for (i = start; i <= end; i += sizeof(uint32_t)) {
if (cnt % DUMP_REGNUM_PER_LINE == 0)
(void)fprintf(f, "\n");
}
+static void
+hisi_dma_dump_common(struct hisi_dma_dev *hw, FILE *f)
+{
+ struct {
+ uint8_t reg_layout;
+ uint32_t start;
+ uint32_t end;
+ } reg_info[] = {
+ { HISI_DMA_REG_LAYOUT_HIP08,
+ HISI_DMA_HIP08_DUMP_START_REG,
+ HISI_DMA_HIP08_DUMP_END_REG },
+ { HISI_DMA_REG_LAYOUT_HIP09,
+ HISI_DMA_HIP09_DUMP_REGION_A_START_REG,
+ HISI_DMA_HIP09_DUMP_REGION_A_END_REG },
+ { HISI_DMA_REG_LAYOUT_HIP09,
+ HISI_DMA_HIP09_DUMP_REGION_B_START_REG,
+ HISI_DMA_HIP09_DUMP_REGION_B_END_REG },
+ { HISI_DMA_REG_LAYOUT_HIP09,
+ HISI_DMA_HIP09_DUMP_REGION_C_START_REG,
+ HISI_DMA_HIP09_DUMP_REGION_C_END_REG },
+ { HISI_DMA_REG_LAYOUT_HIP09,
+ HISI_DMA_HIP09_DUMP_REGION_D_START_REG,
+ HISI_DMA_HIP09_DUMP_REGION_D_END_REG },
+ };
+ uint32_t i;
+
+ (void)fprintf(f, " common-register:\n");
+ for (i = 0; i < RTE_DIM(reg_info); i++) {
+ if (hw->reg_layout != reg_info[i].reg_layout)
+ continue;
+ hisi_dma_dump_range(hw, f, reg_info[i].start, reg_info[i].end);
+ }
+}
+
static void
hisi_dma_dump_read_queue(struct hisi_dma_dev *hw, uint32_t qoff,
char *buffer, int max_sz)
" ridx: %u cridx: %u\n"
" sq_head: %u sq_tail: %u cq_sq_head: %u\n"
" cq_head: %u cqs_completed: %u cqe_vld: %u\n"
- " submitted: %" PRIu64 " completed: %" PRIu64 " errors %"
- PRIu64"\n",
+ " submitted: %" PRIu64 " completed: %" PRIu64 " errors: %"
+ PRIu64 " qfulls: %" PRIu64 "\n",
hw->revision, hw->queue_id,
hw->sq_depth_mask > 0 ? hw->sq_depth_mask + 1 : 0,
hw->ridx, hw->cridx,
hw->sq_head, hw->sq_tail, hw->cq_sq_head,
hw->cq_head, hw->cqs_completed, hw->cqe_vld,
- hw->submitted, hw->completed, hw->errors);
+ hw->submitted, hw->completed, hw->errors, hw->qfulls);
hisi_dma_dump_queue(hw, f);
hisi_dma_dump_common(hw, f);
RTE_SET_USED(vchan);
- if (((hw->sq_tail + 1) & hw->sq_depth_mask) == hw->sq_head)
+ if (((hw->sq_tail + 1) & hw->sq_depth_mask) == hw->sq_head) {
+ hw->qfulls++;
return -ENOSPC;
+ }
sqe->dw0 = rte_cpu_to_le_32(SQE_OPCODE_M2M);
sqe->dw1 = 0;
uint16_t count = 0;
uint64_t misc;
- while (true) {
+ while (count < hw->cq_depth) {
cqe = &hw->cqe[cq_head];
misc = cqe->misc;
misc = rte_le_to_cpu_64(misc);
break;
csq_head = FIELD_GET(CQE_SQ_HEAD_MASK, misc);
+ if (unlikely(csq_head > hw->sq_depth_mask)) {
+ /**
+ * Defensive programming to prevent overflow of the
+ * status array indexed by csq_head. Only error logs
+ * are used for prompting.
+ */
+ HISI_DMA_ERR(hw, "invalid csq_head:%u!\n", csq_head);
+ count = 0;
+ break;
+ }
if (unlikely(misc & CQE_STATUS_MASK))
hw->status[csq_head] = FIELD_GET(CQE_STATUS_MASK,
misc);
}
sq_head = (sq_head + 1) & hw->sq_depth_mask;
}
+ *last_idx = hw->cridx + i - 1;
if (i > 0) {
hw->cridx += i;
- *last_idx = hw->cridx - 1;
hw->sq_head = sq_head;
+ hw->completed += i;
}
- hw->completed += i;
return i;
}
hw->status[sq_head] = HISI_DMA_STATUS_SUCCESS;
sq_head = (sq_head + 1) & hw->sq_depth_mask;
}
+ *last_idx = hw->cridx + cpl_num - 1;
if (likely(cpl_num > 0)) {
hw->cridx += cpl_num;
- *last_idx = hw->cridx - 1;
hw->sq_head = sq_head;
+ hw->completed += cpl_num;
}
- hw->completed += cpl_num;
return cpl_num;
}
sq_head - 1 - sq_tail;
}
-static void
-hisi_dma_gen_pci_device_name(const struct rte_pci_device *pci_dev,
- char *name, size_t size)
-{
- memset(name, 0, size);
- (void)snprintf(name, size, "%x:%x.%x",
- pci_dev->addr.bus, pci_dev->addr.devid,
- pci_dev->addr.function);
-}
-
static void
hisi_dma_gen_dev_name(const struct rte_pci_device *pci_dev,
- uint8_t queue_id, char *name, size_t size)
+ uint8_t queue_id, char *dev_name, size_t size)
{
- memset(name, 0, size);
- (void)snprintf(name, size, "%x:%x.%x-ch%u",
- pci_dev->addr.bus, pci_dev->addr.devid,
- pci_dev->addr.function, queue_id);
+ char name[RTE_DEV_NAME_MAX_LEN] = { 0 };
+
+ memset(dev_name, 0, size);
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+ (void)snprintf(dev_name, size, "%s-ch%u", name, queue_id);
}
/**
* dev_stop| |
* | v
* ------------------
+ * | CPL |
+ * ------------------
+ * ^ |
+ * hardware | |
+ * completed all| |dev_submit
+ * descriptors | |
+ * | |
+ * ------------------
* | RUN |
* ------------------
*
.vchan_setup = hisi_dma_vchan_setup,
.stats_get = hisi_dma_stats_get,
.stats_reset = hisi_dma_stats_reset,
+ .vchan_status = hisi_dma_vchan_status,
.dev_dump = hisi_dma_dump,
};
hw->cq_head_reg = hisi_dma_queue_regaddr(hw,
HISI_DMA_QUEUE_CQ_HEAD_REG);
- ret = hisi_dma_reset_hw(hw);
- if (ret) {
- HISI_DMA_LOG(ERR, "%s init device fail!", name);
- (void)rte_dma_pmd_release(name);
- return -EIO;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ ret = hisi_dma_reset_hw(hw);
+ if (ret) {
+ HISI_DMA_LOG(ERR, "%s init device fail!", name);
+ (void)rte_dma_pmd_release(name);
+ return -EIO;
+ }
}
dev->state = RTE_DMA_DEV_READY;
uint8_t i;
int ret;
- hisi_dma_gen_pci_device_name(pci_dev, name, sizeof(name));
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
if (pci_dev->mem_resource[2].addr == NULL) {
HISI_DMA_LOG(ERR, "%s BAR2 is NULL!\n", name);
return ret;
HISI_DMA_LOG(DEBUG, "%s read PCI revision: 0x%x", name, revision);
- hisi_dma_init_gbl(pci_dev->mem_resource[2].addr, revision);
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ hisi_dma_init_gbl(pci_dev->mem_resource[2].addr, revision);
for (i = 0; i < HISI_DMA_MAX_HW_QUEUES; i++) {
ret = hisi_dma_create(pci_dev, i, revision);