1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2015-2018 Atomic Rules LLC
11 ark_api_num_queues(struct ark_mpu_t *mpu)
13 return mpu->hw.num_queues;
17 ark_api_num_queues_per_port(struct ark_mpu_t *mpu, uint16_t ark_ports)
19 return mpu->hw.num_queues / ark_ports;
23 ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t obj_size)
27 version = mpu->id.vernum;
28 if (mpu->id.idnum != ARK_MPU_MODID ||
29 version != ARK_MPU_MODVER ||
30 mpu->hw.obj_size != obj_size) {
32 " MPU module not found as expected %08x"
33 " \"%c%c%c%c %c%c%c%c\"\n",
35 mpu->id.id[0], mpu->id.id[1],
36 mpu->id.id[2], mpu->id.id[3],
37 mpu->id.ver[0], mpu->id.ver[1],
38 mpu->id.ver[2], mpu->id.ver[3]);
40 " MPU HW num_queues: %u hw_depth %u,"
41 " obj_size: %u, obj_per_mrr: %u"
42 " Expected size %u\n",
54 ark_mpu_stop(struct ark_mpu_t *mpu)
56 mpu->cfg.command = MPU_CMD_STOP;
60 ark_mpu_start(struct ark_mpu_t *mpu)
62 mpu->cfg.command = MPU_CMD_RUN;
66 ark_mpu_reset(struct ark_mpu_t *mpu)
70 mpu->cfg.command = MPU_CMD_RESET;
73 while (mpu->cfg.command != MPU_CMD_IDLE) {
78 if (mpu->cfg.command != MPU_CMD_IDLE) {
79 mpu->cfg.command = MPU_CMD_FORCE_RESET;
82 return mpu->cfg.command != MPU_CMD_IDLE;
86 ark_mpu_configure(struct ark_mpu_t *mpu, rte_iova_t ring, uint32_t ring_size,
91 if (!rte_is_power_of_2(ring_size)) {
92 ARK_PMD_LOG(ERR, "Invalid ring size for MPU %d\n",
97 mpu->cfg.ring_base = ring;
98 mpu->cfg.ring_size = ring_size;
99 mpu->cfg.ring_mask = ring_size - 1;
100 mpu->cfg.min_host_move = is_tx ? 1 : mpu->hw.obj_per_mrr;
101 mpu->cfg.min_hw_move = mpu->hw.obj_per_mrr;
102 mpu->cfg.sw_prod_index = 0;
103 mpu->cfg.hw_cons_index = 0;
108 ark_mpu_dump(struct ark_mpu_t *mpu, const char *code, uint16_t qid)
110 /* DUMP to see that we have started */
111 ARK_PMD_LOG(DEBUG, "MPU: %s Q: %3u sw_prod %u, hw_cons: %u\n",
113 mpu->cfg.sw_prod_index, mpu->cfg.hw_cons_index);
117 ark_mpu_dump_setup(struct ark_mpu_t *mpu, uint16_t q_id)
119 ARK_PMD_LOG(DEBUG, "MPU Setup Q: %u"
122 "ring_base", mpu->cfg.ring_base