1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 HiSilicon Limited
8 #include <rte_bus_pci.h>
9 #include <rte_cycles.h>
13 #include <rte_malloc.h>
14 #include <rte_memzone.h>
16 #include <rte_dmadev_pmd.h>
18 #include "hisi_dmadev.h"
20 RTE_LOG_REGISTER_DEFAULT(hisi_dma_logtype, INFO);
21 #define HISI_DMA_LOG(level, fmt, args...) \
22 rte_log(RTE_LOG_ ## level, hisi_dma_logtype, \
23 "%s(): " fmt "\n", __func__, ##args)
24 #define HISI_DMA_LOG_RAW(hw, level, fmt, args...) \
25 rte_log(RTE_LOG_ ## level, hisi_dma_logtype, \
26 "%s %s(): " fmt "\n", (hw)->data->dev_name, \
28 #define HISI_DMA_DEBUG(hw, fmt, args...) \
29 HISI_DMA_LOG_RAW(hw, DEBUG, fmt, ## args)
30 #define HISI_DMA_INFO(hw, fmt, args...) \
31 HISI_DMA_LOG_RAW(hw, INFO, fmt, ## args)
32 #define HISI_DMA_WARN(hw, fmt, args...) \
33 HISI_DMA_LOG_RAW(hw, WARNING, fmt, ## args)
34 #define HISI_DMA_ERR(hw, fmt, args...) \
35 HISI_DMA_LOG_RAW(hw, ERR, fmt, ## args)
38 hisi_dma_queue_base(struct hisi_dma_dev *hw)
40 if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
41 return HISI_DMA_HIP08_QUEUE_BASE;
42 else if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09)
43 return HISI_DMA_HIP09_QUEUE_BASE;
48 static volatile void *
49 hisi_dma_queue_regaddr(struct hisi_dma_dev *hw, uint32_t qoff)
51 uint32_t off = hisi_dma_queue_base(hw) +
52 hw->queue_id * HISI_DMA_QUEUE_REGION_SIZE + qoff;
53 return (volatile void *)((char *)hw->io_base + off);
57 hisi_dma_write_reg(void *base, uint32_t off, uint32_t val)
59 rte_write32(rte_cpu_to_le_32(val),
60 (volatile void *)((char *)base + off));
64 hisi_dma_write_dev(struct hisi_dma_dev *hw, uint32_t off, uint32_t val)
66 hisi_dma_write_reg(hw->io_base, off, val);
70 hisi_dma_write_queue(struct hisi_dma_dev *hw, uint32_t qoff, uint32_t val)
72 uint32_t off = hisi_dma_queue_base(hw) +
73 hw->queue_id * HISI_DMA_QUEUE_REGION_SIZE + qoff;
74 hisi_dma_write_dev(hw, off, val);
78 hisi_dma_read_reg(void *base, uint32_t off)
80 uint32_t val = rte_read32((volatile void *)((char *)base + off));
81 return rte_le_to_cpu_32(val);
85 hisi_dma_read_dev(struct hisi_dma_dev *hw, uint32_t off)
87 return hisi_dma_read_reg(hw->io_base, off);
91 hisi_dma_read_queue(struct hisi_dma_dev *hw, uint32_t qoff)
93 uint32_t off = hisi_dma_queue_base(hw) +
94 hw->queue_id * HISI_DMA_QUEUE_REGION_SIZE + qoff;
95 return hisi_dma_read_dev(hw, off);
99 hisi_dma_update_bit(struct hisi_dma_dev *hw, uint32_t off, uint32_t pos,
102 uint32_t tmp = hisi_dma_read_dev(hw, off);
103 uint32_t mask = 1u << pos;
104 tmp = set ? tmp | mask : tmp & ~mask;
105 hisi_dma_write_dev(hw, off, tmp);
109 hisi_dma_update_queue_bit(struct hisi_dma_dev *hw, uint32_t qoff, uint32_t pos,
112 uint32_t tmp = hisi_dma_read_queue(hw, qoff);
113 uint32_t mask = 1u << pos;
114 tmp = set ? tmp | mask : tmp & ~mask;
115 hisi_dma_write_queue(hw, qoff, tmp);
119 hisi_dma_update_queue_mbit(struct hisi_dma_dev *hw, uint32_t qoff,
120 uint32_t mask, bool set)
122 uint32_t tmp = hisi_dma_read_queue(hw, qoff);
123 tmp = set ? tmp | mask : tmp & ~mask;
124 hisi_dma_write_queue(hw, qoff, tmp);
127 #define hisi_dma_poll_hw_state(hw, val, cond, sleep_us, timeout_us) ({ \
128 uint32_t timeout = 0; \
129 while (timeout++ <= (timeout_us)) { \
130 (val) = hisi_dma_read_queue(hw, HISI_DMA_QUEUE_FSM_REG); \
133 rte_delay_us(sleep_us); \
135 (cond) ? 0 : -ETIME; \
139 hisi_dma_reset_hw(struct hisi_dma_dev *hw)
141 #define POLL_SLEEP_US 100
142 #define POLL_TIMEOUT_US 10000
147 hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL0_REG,
148 HISI_DMA_QUEUE_CTRL0_PAUSE_B, true);
149 hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL0_REG,
150 HISI_DMA_QUEUE_CTRL0_EN_B, false);
152 ret = hisi_dma_poll_hw_state(hw, tmp,
153 FIELD_GET(HISI_DMA_QUEUE_FSM_STS_M, tmp) != HISI_DMA_STATE_RUN,
154 POLL_SLEEP_US, POLL_TIMEOUT_US);
156 HISI_DMA_ERR(hw, "disable dma timeout!");
160 hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL1_REG,
161 HISI_DMA_QUEUE_CTRL1_RESET_B, true);
162 hisi_dma_write_queue(hw, HISI_DMA_QUEUE_SQ_TAIL_REG, 0);
163 hisi_dma_write_queue(hw, HISI_DMA_QUEUE_CQ_HEAD_REG, 0);
164 hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL0_REG,
165 HISI_DMA_QUEUE_CTRL0_PAUSE_B, false);
167 ret = hisi_dma_poll_hw_state(hw, tmp,
168 FIELD_GET(HISI_DMA_QUEUE_FSM_STS_M, tmp) == HISI_DMA_STATE_IDLE,
169 POLL_SLEEP_US, POLL_TIMEOUT_US);
171 HISI_DMA_ERR(hw, "reset dma timeout!");
179 hisi_dma_init_common(struct hisi_dma_dev *hw)
181 hisi_dma_write_queue(hw, HISI_DMA_QUEUE_SQ_BASE_L_REG,
182 lower_32_bits(hw->sqe_iova));
183 hisi_dma_write_queue(hw, HISI_DMA_QUEUE_SQ_BASE_H_REG,
184 upper_32_bits(hw->sqe_iova));
185 hisi_dma_write_queue(hw, HISI_DMA_QUEUE_CQ_BASE_L_REG,
186 lower_32_bits(hw->cqe_iova));
187 hisi_dma_write_queue(hw, HISI_DMA_QUEUE_CQ_BASE_H_REG,
188 upper_32_bits(hw->cqe_iova));
189 hisi_dma_write_queue(hw, HISI_DMA_QUEUE_SQ_DEPTH_REG,
191 hisi_dma_write_queue(hw, HISI_DMA_QUEUE_CQ_DEPTH_REG, hw->cq_depth - 1);
192 hisi_dma_write_queue(hw, HISI_DMA_QUEUE_SQ_TAIL_REG, 0);
193 hisi_dma_write_queue(hw, HISI_DMA_QUEUE_CQ_HEAD_REG, 0);
194 hisi_dma_write_queue(hw, HISI_DMA_QUEUE_ERR_INT_NUM0_REG, 0);
195 hisi_dma_write_queue(hw, HISI_DMA_QUEUE_ERR_INT_NUM1_REG, 0);
196 hisi_dma_write_queue(hw, HISI_DMA_QUEUE_ERR_INT_NUM2_REG, 0);
200 hisi_dma_init_hw(struct hisi_dma_dev *hw)
202 hisi_dma_init_common(hw);
204 if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
205 hisi_dma_write_queue(hw, HISI_DMA_HIP08_QUEUE_ERR_INT_NUM3_REG,
207 hisi_dma_write_queue(hw, HISI_DMA_HIP08_QUEUE_ERR_INT_NUM4_REG,
209 hisi_dma_write_queue(hw, HISI_DMA_HIP08_QUEUE_ERR_INT_NUM5_REG,
211 hisi_dma_write_queue(hw, HISI_DMA_HIP08_QUEUE_ERR_INT_NUM6_REG,
213 hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL0_REG,
214 HISI_DMA_HIP08_QUEUE_CTRL0_ERR_ABORT_B, false);
215 hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_STATUS_REG,
216 HISI_DMA_HIP08_QUEUE_INT_MASK_M, true);
217 hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_MASK_REG,
218 HISI_DMA_HIP08_QUEUE_INT_MASK_M, true);
219 } else if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) {
220 hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_CTRL0_REG,
221 HISI_DMA_HIP09_QUEUE_CTRL0_ERR_ABORT_M, false);
222 hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_STATUS_REG,
223 HISI_DMA_HIP09_QUEUE_INT_MASK_M, true);
224 hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_MASK_REG,
225 HISI_DMA_HIP09_QUEUE_INT_MASK_M, true);
226 hisi_dma_update_queue_mbit(hw,
227 HISI_DMA_HIP09_QUEUE_ERR_INT_STATUS_REG,
228 HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_M, true);
229 hisi_dma_update_queue_mbit(hw,
230 HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_REG,
231 HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_M, true);
232 hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL1_REG,
233 HISI_DMA_HIP09_QUEUE_CTRL1_VA_ENABLE_B, true);
234 hisi_dma_update_bit(hw,
235 HISI_DMA_HIP09_QUEUE_CFG_REG(hw->queue_id),
236 HISI_DMA_HIP09_QUEUE_CFG_LINK_DOWN_MASK_B,
242 hisi_dma_init_gbl(void *pci_bar, uint8_t revision)
244 struct hisi_dma_dev hw;
246 memset(&hw, 0, sizeof(hw));
247 hw.io_base = pci_bar;
249 if (revision == HISI_DMA_REVISION_HIP08B)
250 hisi_dma_update_bit(&hw, HISI_DMA_HIP08_MODE_REG,
251 HISI_DMA_HIP08_MODE_SEL_B, true);
255 hisi_dma_reg_layout(uint8_t revision)
257 if (revision == HISI_DMA_REVISION_HIP08B)
258 return HISI_DMA_REG_LAYOUT_HIP08;
259 else if (revision >= HISI_DMA_REVISION_HIP09A)
260 return HISI_DMA_REG_LAYOUT_HIP09;
262 return HISI_DMA_REG_LAYOUT_INVALID;
266 hisi_dma_zero_iomem(struct hisi_dma_dev *hw)
268 memset(hw->iomz->addr, 0, hw->iomz_sz);
272 hisi_dma_alloc_iomem(struct hisi_dma_dev *hw, uint16_t ring_size,
273 const char *dev_name)
275 uint32_t sq_size = sizeof(struct hisi_dma_sqe) * ring_size;
276 uint32_t cq_size = sizeof(struct hisi_dma_cqe) *
277 (ring_size + HISI_DMA_CQ_RESERVED);
278 uint32_t status_size = sizeof(uint16_t) * ring_size;
279 char mz_name[RTE_MEMZONE_NAMESIZE];
280 const struct rte_memzone *iomz;
283 sq_size = RTE_CACHE_LINE_ROUNDUP(sq_size);
284 cq_size = RTE_CACHE_LINE_ROUNDUP(cq_size);
285 status_size = RTE_CACHE_LINE_ROUNDUP(status_size);
286 total_size = sq_size + cq_size + status_size;
288 (void)snprintf(mz_name, sizeof(mz_name), "hisi_dma:%s", dev_name);
289 iomz = rte_memzone_reserve(mz_name, total_size, hw->data->numa_node,
290 RTE_MEMZONE_IOVA_CONTIG);
292 HISI_DMA_ERR(hw, "malloc %s iomem fail!", mz_name);
297 hw->iomz_sz = total_size;
298 hw->sqe = iomz->addr;
299 hw->cqe = (void *)((char *)iomz->addr + sq_size);
300 hw->status = (void *)((char *)iomz->addr + sq_size + cq_size);
301 hw->sqe_iova = iomz->iova;
302 hw->cqe_iova = iomz->iova + sq_size;
303 hw->sq_depth_mask = ring_size - 1;
304 hw->cq_depth = ring_size + HISI_DMA_CQ_RESERVED;
305 hisi_dma_zero_iomem(hw);
311 hisi_dma_free_iomem(struct hisi_dma_dev *hw)
313 if (hw->iomz != NULL)
314 rte_memzone_free(hw->iomz);
322 hw->sq_depth_mask = 0;
327 hisi_dma_info_get(const struct rte_dma_dev *dev,
328 struct rte_dma_info *dev_info,
331 struct hisi_dma_dev *hw = dev->data->dev_private;
332 RTE_SET_USED(info_sz);
334 dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
335 RTE_DMA_CAPA_OPS_COPY;
336 if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09)
337 dev_info->dev_capa |= RTE_DMA_CAPA_HANDLES_ERRORS;
339 dev_info->max_vchans = 1;
340 dev_info->max_desc = HISI_DMA_MAX_DESC_NUM;
341 dev_info->min_desc = HISI_DMA_MIN_DESC_NUM;
347 hisi_dma_configure(struct rte_dma_dev *dev,
348 const struct rte_dma_conf *conf,
353 RTE_SET_USED(conf_sz);
358 hisi_dma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
359 const struct rte_dma_vchan_conf *conf,
362 struct hisi_dma_dev *hw = dev->data->dev_private;
366 RTE_SET_USED(conf_sz);
368 if (!rte_is_power_of_2(conf->nb_desc)) {
369 HISI_DMA_ERR(hw, "Number of desc must be power of 2!");
373 hisi_dma_free_iomem(hw);
374 ret = hisi_dma_alloc_iomem(hw, conf->nb_desc, dev->data->dev_name);
382 hisi_dma_start(struct rte_dma_dev *dev)
384 struct hisi_dma_dev *hw = dev->data->dev_private;
386 if (hw->iomz == NULL) {
387 HISI_DMA_ERR(hw, "Vchan was not setup, start fail!\n");
391 /* Reset the dmadev to a known state, include:
392 * 1) zero iomem, also include status fields.
393 * 2) init hardware register.
394 * 3) init index values to zero.
395 * 4) init running statistics.
397 hisi_dma_zero_iomem(hw);
398 hisi_dma_init_hw(hw);
405 hw->cqs_completed = 0;
412 hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL0_REG,
413 HISI_DMA_QUEUE_CTRL0_EN_B, true);
419 hisi_dma_stop(struct rte_dma_dev *dev)
421 return hisi_dma_reset_hw(dev->data->dev_private);
425 hisi_dma_close(struct rte_dma_dev *dev)
427 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
428 /* The dmadev already stopped */
429 hisi_dma_free_iomem(dev->data->dev_private);
435 hisi_dma_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
436 struct rte_dma_stats *stats,
439 struct hisi_dma_dev *hw = dev->data->dev_private;
442 RTE_SET_USED(stats_sz);
443 stats->submitted = hw->submitted;
444 stats->completed = hw->completed;
445 stats->errors = hw->errors;
451 hisi_dma_stats_reset(struct rte_dma_dev *dev, uint16_t vchan)
453 struct hisi_dma_dev *hw = dev->data->dev_private;
465 hisi_dma_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan,
466 enum rte_dma_vchan_status *status)
468 struct hisi_dma_dev *hw = dev->data->dev_private;
473 val = hisi_dma_read_queue(hw, HISI_DMA_QUEUE_FSM_REG);
474 val = FIELD_GET(HISI_DMA_QUEUE_FSM_STS_M, val);
475 if (val == HISI_DMA_STATE_RUN)
476 *status = RTE_DMA_VCHAN_ACTIVE;
477 else if (val == HISI_DMA_STATE_CPL)
478 *status = RTE_DMA_VCHAN_IDLE;
480 *status = RTE_DMA_VCHAN_HALTED_ERROR;
486 hisi_dma_dump_range(struct hisi_dma_dev *hw, FILE *f, uint32_t start,
489 #define DUMP_REGNUM_PER_LINE 4
494 for (i = start; i <= end; i += sizeof(uint32_t)) {
495 if (cnt % DUMP_REGNUM_PER_LINE == 0)
496 (void)fprintf(f, " [%4x]:", i);
497 (void)fprintf(f, " 0x%08x", hisi_dma_read_dev(hw, i));
499 if (cnt % DUMP_REGNUM_PER_LINE == 0)
500 (void)fprintf(f, "\n");
502 if (cnt % DUMP_REGNUM_PER_LINE)
503 (void)fprintf(f, "\n");
507 hisi_dma_dump_common(struct hisi_dma_dev *hw, FILE *f)
514 { HISI_DMA_REG_LAYOUT_HIP08,
515 HISI_DMA_HIP08_DUMP_START_REG,
516 HISI_DMA_HIP08_DUMP_END_REG },
517 { HISI_DMA_REG_LAYOUT_HIP09,
518 HISI_DMA_HIP09_DUMP_REGION_A_START_REG,
519 HISI_DMA_HIP09_DUMP_REGION_A_END_REG },
520 { HISI_DMA_REG_LAYOUT_HIP09,
521 HISI_DMA_HIP09_DUMP_REGION_B_START_REG,
522 HISI_DMA_HIP09_DUMP_REGION_B_END_REG },
523 { HISI_DMA_REG_LAYOUT_HIP09,
524 HISI_DMA_HIP09_DUMP_REGION_C_START_REG,
525 HISI_DMA_HIP09_DUMP_REGION_C_END_REG },
526 { HISI_DMA_REG_LAYOUT_HIP09,
527 HISI_DMA_HIP09_DUMP_REGION_D_START_REG,
528 HISI_DMA_HIP09_DUMP_REGION_D_END_REG },
532 (void)fprintf(f, " common-register:\n");
533 for (i = 0; i < RTE_DIM(reg_info); i++) {
534 if (hw->reg_layout != reg_info[i].reg_layout)
536 hisi_dma_dump_range(hw, f, reg_info[i].start, reg_info[i].end);
541 hisi_dma_dump_read_queue(struct hisi_dma_dev *hw, uint32_t qoff,
542 char *buffer, int max_sz)
544 memset(buffer, 0, max_sz);
546 /* Address-related registers are not printed for security reasons. */
547 if (qoff == HISI_DMA_QUEUE_SQ_BASE_L_REG ||
548 qoff == HISI_DMA_QUEUE_SQ_BASE_H_REG ||
549 qoff == HISI_DMA_QUEUE_CQ_BASE_L_REG ||
550 qoff == HISI_DMA_QUEUE_CQ_BASE_H_REG) {
551 (void)snprintf(buffer, max_sz, "**********");
555 (void)snprintf(buffer, max_sz, "0x%08x", hisi_dma_read_queue(hw, qoff));
559 hisi_dma_dump_queue(struct hisi_dma_dev *hw, FILE *f)
561 #define REG_FMT_LEN 32
562 char buf[REG_FMT_LEN] = { 0 };
565 (void)fprintf(f, " queue-register:\n");
566 for (i = 0; i < HISI_DMA_QUEUE_REGION_SIZE; ) {
567 hisi_dma_dump_read_queue(hw, i, buf, sizeof(buf));
568 (void)fprintf(f, " [%2x]: %s", i, buf);
569 i += sizeof(uint32_t);
570 hisi_dma_dump_read_queue(hw, i, buf, sizeof(buf));
571 (void)fprintf(f, " %s", buf);
572 i += sizeof(uint32_t);
573 hisi_dma_dump_read_queue(hw, i, buf, sizeof(buf));
574 (void)fprintf(f, " %s", buf);
575 i += sizeof(uint32_t);
576 hisi_dma_dump_read_queue(hw, i, buf, sizeof(buf));
577 (void)fprintf(f, " %s\n", buf);
578 i += sizeof(uint32_t);
583 hisi_dma_dump(const struct rte_dma_dev *dev, FILE *f)
585 struct hisi_dma_dev *hw = dev->data->dev_private;
588 " revision: 0x%x queue_id: %u ring_size: %u\n"
589 " ridx: %u cridx: %u\n"
590 " sq_head: %u sq_tail: %u cq_sq_head: %u\n"
591 " cq_head: %u cqs_completed: %u cqe_vld: %u\n"
592 " submitted: %" PRIu64 " completed: %" PRIu64 " errors: %"
593 PRIu64 " qfulls: %" PRIu64 "\n",
594 hw->revision, hw->queue_id,
595 hw->sq_depth_mask > 0 ? hw->sq_depth_mask + 1 : 0,
597 hw->sq_head, hw->sq_tail, hw->cq_sq_head,
598 hw->cq_head, hw->cqs_completed, hw->cqe_vld,
599 hw->submitted, hw->completed, hw->errors, hw->qfulls);
600 hisi_dma_dump_queue(hw, f);
601 hisi_dma_dump_common(hw, f);
607 hisi_dma_copy(void *dev_private, uint16_t vchan,
608 rte_iova_t src, rte_iova_t dst,
609 uint32_t length, uint64_t flags)
611 struct hisi_dma_dev *hw = dev_private;
612 struct hisi_dma_sqe *sqe = &hw->sqe[hw->sq_tail];
616 if (((hw->sq_tail + 1) & hw->sq_depth_mask) == hw->sq_head) {
621 sqe->dw0 = rte_cpu_to_le_32(SQE_OPCODE_M2M);
624 sqe->length = rte_cpu_to_le_32(length);
625 sqe->src_addr = rte_cpu_to_le_64(src);
626 sqe->dst_addr = rte_cpu_to_le_64(dst);
627 hw->sq_tail = (hw->sq_tail + 1) & hw->sq_depth_mask;
630 if (flags & RTE_DMA_OP_FLAG_FENCE)
631 sqe->dw0 |= rte_cpu_to_le_32(SQE_FENCE_FLAG);
632 if (flags & RTE_DMA_OP_FLAG_SUBMIT)
633 rte_write32(rte_cpu_to_le_32(hw->sq_tail), hw->sq_tail_reg);
639 hisi_dma_submit(void *dev_private, uint16_t vchan)
641 struct hisi_dma_dev *hw = dev_private;
644 rte_write32(rte_cpu_to_le_32(hw->sq_tail), hw->sq_tail_reg);
650 hisi_dma_scan_cq(struct hisi_dma_dev *hw)
652 volatile struct hisi_dma_cqe *cqe;
653 uint16_t csq_head = hw->cq_sq_head;
654 uint16_t cq_head = hw->cq_head;
658 while (count < hw->cq_depth) {
659 cqe = &hw->cqe[cq_head];
661 misc = rte_le_to_cpu_64(misc);
662 if (FIELD_GET(CQE_VALID_B, misc) != hw->cqe_vld)
665 csq_head = FIELD_GET(CQE_SQ_HEAD_MASK, misc);
666 if (unlikely(csq_head > hw->sq_depth_mask)) {
668 * Defensive programming to prevent overflow of the
669 * status array indexed by csq_head. Only error logs
670 * are used for prompting.
672 HISI_DMA_ERR(hw, "invalid csq_head:%u!\n", csq_head);
676 if (unlikely(misc & CQE_STATUS_MASK))
677 hw->status[csq_head] = FIELD_GET(CQE_STATUS_MASK,
682 if (cq_head == hw->cq_depth) {
683 hw->cqe_vld = !hw->cqe_vld;
691 hw->cq_head = cq_head;
692 hw->cq_sq_head = (csq_head + 1) & hw->sq_depth_mask;
693 hw->cqs_completed += count;
694 if (hw->cqs_completed >= HISI_DMA_CQ_RESERVED) {
695 rte_write32(rte_cpu_to_le_32(cq_head), hw->cq_head_reg);
696 hw->cqs_completed = 0;
700 static inline uint16_t
701 hisi_dma_calc_cpls(struct hisi_dma_dev *hw, const uint16_t nb_cpls)
705 if (hw->cq_sq_head >= hw->sq_head)
706 cpl_num = hw->cq_sq_head - hw->sq_head;
708 cpl_num = hw->sq_depth_mask + 1 - hw->sq_head + hw->cq_sq_head;
710 if (cpl_num > nb_cpls)
717 hisi_dma_completed(void *dev_private,
718 uint16_t vchan, const uint16_t nb_cpls,
719 uint16_t *last_idx, bool *has_error)
721 struct hisi_dma_dev *hw = dev_private;
722 uint16_t sq_head = hw->sq_head;
726 hisi_dma_scan_cq(hw);
728 cpl_num = hisi_dma_calc_cpls(hw, nb_cpls);
729 for (i = 0; i < cpl_num; i++) {
730 if (hw->status[sq_head]) {
734 sq_head = (sq_head + 1) & hw->sq_depth_mask;
736 *last_idx = hw->cridx + i - 1;
739 hw->sq_head = sq_head;
746 static enum rte_dma_status_code
747 hisi_dma_convert_status(uint16_t status)
750 case HISI_DMA_STATUS_SUCCESS:
751 return RTE_DMA_STATUS_SUCCESSFUL;
752 case HISI_DMA_STATUS_INVALID_OPCODE:
753 return RTE_DMA_STATUS_INVALID_OPCODE;
754 case HISI_DMA_STATUS_INVALID_LENGTH:
755 return RTE_DMA_STATUS_INVALID_LENGTH;
756 case HISI_DMA_STATUS_USER_ABORT:
757 return RTE_DMA_STATUS_USER_ABORT;
758 case HISI_DMA_STATUS_REMOTE_READ_ERROR:
759 case HISI_DMA_STATUS_AXI_READ_ERROR:
760 return RTE_DMA_STATUS_BUS_READ_ERROR;
761 case HISI_DMA_STATUS_AXI_WRITE_ERROR:
762 return RTE_DMA_STATUS_BUS_WRITE_ERROR;
763 case HISI_DMA_STATUS_DATA_POISON:
764 case HISI_DMA_STATUS_REMOTE_DATA_POISION:
765 return RTE_DMA_STATUS_DATA_POISION;
766 case HISI_DMA_STATUS_SQE_READ_ERROR:
767 case HISI_DMA_STATUS_SQE_READ_POISION:
768 return RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR;
769 case HISI_DMA_STATUS_LINK_DOWN_ERROR:
770 return RTE_DMA_STATUS_DEV_LINK_ERROR;
772 return RTE_DMA_STATUS_ERROR_UNKNOWN;
777 hisi_dma_completed_status(void *dev_private,
778 uint16_t vchan, const uint16_t nb_cpls,
779 uint16_t *last_idx, enum rte_dma_status_code *status)
781 struct hisi_dma_dev *hw = dev_private;
782 uint16_t sq_head = hw->sq_head;
786 hisi_dma_scan_cq(hw);
788 cpl_num = hisi_dma_calc_cpls(hw, nb_cpls);
789 for (i = 0; i < cpl_num; i++) {
790 status[i] = hisi_dma_convert_status(hw->status[sq_head]);
791 hw->errors += !!status[i];
792 hw->status[sq_head] = HISI_DMA_STATUS_SUCCESS;
793 sq_head = (sq_head + 1) & hw->sq_depth_mask;
795 *last_idx = hw->cridx + cpl_num - 1;
796 if (likely(cpl_num > 0)) {
797 hw->cridx += cpl_num;
798 hw->sq_head = sq_head;
799 hw->completed += cpl_num;
806 hisi_dma_burst_capacity(const void *dev_private, uint16_t vchan)
808 const struct hisi_dma_dev *hw = dev_private;
809 uint16_t sq_head = hw->sq_head;
810 uint16_t sq_tail = hw->sq_tail;
814 return (sq_tail >= sq_head) ? hw->sq_depth_mask - sq_tail + sq_head :
815 sq_head - 1 - sq_tail;
819 hisi_dma_gen_dev_name(const struct rte_pci_device *pci_dev,
820 uint8_t queue_id, char *dev_name, size_t size)
822 char name[RTE_DEV_NAME_MAX_LEN] = { 0 };
824 memset(dev_name, 0, size);
825 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
826 (void)snprintf(dev_name, size, "%s-ch%u", name, queue_id);
830 * Hardware queue state machine:
832 * ----------- dmadev_create ------------------
833 * | Unknown | ---------------> | IDLE |
834 * ----------- ------------------
844 * completed all| |dev_submit
852 static const struct rte_dma_dev_ops hisi_dmadev_ops = {
853 .dev_info_get = hisi_dma_info_get,
854 .dev_configure = hisi_dma_configure,
855 .dev_start = hisi_dma_start,
856 .dev_stop = hisi_dma_stop,
857 .dev_close = hisi_dma_close,
858 .vchan_setup = hisi_dma_vchan_setup,
859 .stats_get = hisi_dma_stats_get,
860 .stats_reset = hisi_dma_stats_reset,
861 .vchan_status = hisi_dma_vchan_status,
862 .dev_dump = hisi_dma_dump,
866 hisi_dma_create(struct rte_pci_device *pci_dev, uint8_t queue_id,
869 #define REG_PCI_BAR_INDEX 2
871 char name[RTE_DEV_NAME_MAX_LEN];
872 struct rte_dma_dev *dev;
873 struct hisi_dma_dev *hw;
876 hisi_dma_gen_dev_name(pci_dev, queue_id, name, sizeof(name));
877 dev = rte_dma_pmd_allocate(name, pci_dev->device.numa_node,
880 HISI_DMA_LOG(ERR, "%s allocate dmadev fail!", name);
884 dev->device = &pci_dev->device;
885 dev->dev_ops = &hisi_dmadev_ops;
886 dev->fp_obj->dev_private = dev->data->dev_private;
887 dev->fp_obj->copy = hisi_dma_copy;
888 dev->fp_obj->submit = hisi_dma_submit;
889 dev->fp_obj->completed = hisi_dma_completed;
890 dev->fp_obj->completed_status = hisi_dma_completed_status;
891 dev->fp_obj->burst_capacity = hisi_dma_burst_capacity;
893 hw = dev->data->dev_private;
894 hw->data = dev->data;
895 hw->revision = revision;
896 hw->reg_layout = hisi_dma_reg_layout(revision);
897 hw->io_base = pci_dev->mem_resource[REG_PCI_BAR_INDEX].addr;
898 hw->queue_id = queue_id;
899 hw->sq_tail_reg = hisi_dma_queue_regaddr(hw,
900 HISI_DMA_QUEUE_SQ_TAIL_REG);
901 hw->cq_head_reg = hisi_dma_queue_regaddr(hw,
902 HISI_DMA_QUEUE_CQ_HEAD_REG);
904 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
905 ret = hisi_dma_reset_hw(hw);
907 HISI_DMA_LOG(ERR, "%s init device fail!", name);
908 (void)rte_dma_pmd_release(name);
913 dev->state = RTE_DMA_DEV_READY;
914 HISI_DMA_LOG(DEBUG, "%s create dmadev success!", name);
920 hisi_dma_check_revision(struct rte_pci_device *pci_dev, const char *name,
921 uint8_t *out_revision)
926 ret = rte_pci_read_config(pci_dev, &revision, 1,
927 HISI_DMA_PCI_REVISION_ID_REG);
929 HISI_DMA_LOG(ERR, "%s read PCI revision failed!", name);
932 if (hisi_dma_reg_layout(revision) == HISI_DMA_REG_LAYOUT_INVALID) {
933 HISI_DMA_LOG(ERR, "%s revision: 0x%x not supported!",
938 *out_revision = revision;
943 hisi_dma_probe(struct rte_pci_driver *pci_drv __rte_unused,
944 struct rte_pci_device *pci_dev)
946 char name[RTE_DEV_NAME_MAX_LEN] = { 0 };
951 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
953 if (pci_dev->mem_resource[2].addr == NULL) {
954 HISI_DMA_LOG(ERR, "%s BAR2 is NULL!\n", name);
958 ret = hisi_dma_check_revision(pci_dev, name, &revision);
961 HISI_DMA_LOG(DEBUG, "%s read PCI revision: 0x%x", name, revision);
963 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
964 hisi_dma_init_gbl(pci_dev->mem_resource[2].addr, revision);
966 for (i = 0; i < HISI_DMA_MAX_HW_QUEUES; i++) {
967 ret = hisi_dma_create(pci_dev, i, revision);
969 HISI_DMA_LOG(ERR, "%s create dmadev %u failed!",
979 hisi_dma_remove(struct rte_pci_device *pci_dev)
981 char name[RTE_DEV_NAME_MAX_LEN];
985 for (i = 0; i < HISI_DMA_MAX_HW_QUEUES; i++) {
986 hisi_dma_gen_dev_name(pci_dev, i, name, sizeof(name));
987 ret = rte_dma_pmd_release(name);
995 static const struct rte_pci_id pci_id_hisi_dma_map[] = {
996 { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HISI_DMA_DEVICE_ID) },
997 { .vendor_id = 0, }, /* sentinel */
1000 static struct rte_pci_driver hisi_dma_pmd_drv = {
1001 .id_table = pci_id_hisi_dma_map,
1002 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1003 .probe = hisi_dma_probe,
1004 .remove = hisi_dma_remove,
1007 RTE_PMD_REGISTER_PCI(dma_hisilicon, hisi_dma_pmd_drv);
1008 RTE_PMD_REGISTER_PCI_TABLE(dma_hisilicon, pci_id_hisi_dma_map);
1009 RTE_PMD_REGISTER_KMOD_DEP(dma_hisilicon, "vfio-pci");