1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
12 roc_ree_available_queues_get(struct roc_ree_vf *vf, uint16_t *nb_queues)
14 struct free_rsrcs_rsp *rsp;
15 struct dev *dev = vf->dev;
18 mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
20 ret = mbox_process_msg(dev->mbox, (void *)&rsp);
24 if (vf->block_address == RVU_BLOCK_ADDR_REE0)
25 *nb_queues = rsp->ree0;
27 *nb_queues = rsp->ree1;
32 roc_ree_max_matches_get(struct roc_ree_vf *vf, uint8_t *max_matches)
37 ret = roc_ree_af_reg_read(vf, REE_AF_REEXM_MAX_MATCH, &val);
46 roc_ree_queues_attach(struct roc_ree_vf *vf, uint8_t nb_queues)
48 struct rsrc_attach_req *req;
52 /* Ask AF to attach required LFs */
53 req = mbox_alloc_msg_attach_resources(mbox);
55 plt_err("Could not allocate mailbox message");
60 req->reelfs = nb_queues;
61 req->ree_blkaddr = vf->block_address;
63 if (mbox_process(mbox) < 0)
66 /* Update number of attached queues */
67 vf->nb_queues = nb_queues;
73 roc_ree_queues_detach(struct roc_ree_vf *vf)
75 struct rsrc_detach_req *req;
79 req = mbox_alloc_msg_detach_resources(mbox);
81 plt_err("Could not allocate mailbox message");
86 if (mbox_process(mbox) < 0)
89 /* Queues have been detached */
96 roc_ree_msix_offsets_get(struct roc_ree_vf *vf)
98 struct msix_offset_rsp *rsp;
102 /* Get REE MSI-X vector offsets */
103 mbox = vf->dev->mbox;
104 mbox_alloc_msg_msix_offset(mbox);
106 ret = mbox_process_msg(mbox, (void *)&rsp);
110 for (i = 0; i < vf->nb_queues; i++) {
111 if (vf->block_address == RVU_BLOCK_ADDR_REE0)
112 vf->lf_msixoff[i] = rsp->ree0_lf_msixoff[i];
114 vf->lf_msixoff[i] = rsp->ree1_lf_msixoff[i];
115 plt_ree_dbg("lf_msixoff[%d] 0x%x", i, vf->lf_msixoff[i]);
122 ree_send_mbox_msg(struct roc_ree_vf *vf)
124 struct mbox *mbox = vf->dev->mbox;
127 mbox_msg_send(mbox, 0);
129 ret = mbox_wait_for_rsp(mbox, 0);
131 plt_err("Could not get mailbox response");
139 roc_ree_config_lf(struct roc_ree_vf *vf, uint8_t lf, uint8_t pri, uint32_t size)
141 struct ree_lf_req_msg *req;
145 mbox = vf->dev->mbox;
146 req = mbox_alloc_msg_ree_config_lf(mbox);
148 plt_err("Could not allocate mailbox message");
153 req->pri = pri ? 1 : 0;
155 req->blkaddr = vf->block_address;
157 ret = mbox_process(mbox);
159 plt_err("Could not get mailbox response");
166 roc_ree_af_reg_read(struct roc_ree_vf *vf, uint64_t reg, uint64_t *val)
168 struct ree_rd_wr_reg_msg *msg;
169 struct mbox_dev *mdev;
173 mbox = vf->dev->mbox;
174 mdev = &mbox->dev[0];
175 msg = (struct ree_rd_wr_reg_msg *)mbox_alloc_msg_rsp(
176 mbox, 0, sizeof(*msg), sizeof(*msg));
178 plt_err("Could not allocate mailbox message");
182 msg->hdr.id = MBOX_MSG_REE_RD_WR_REGISTER;
183 msg->hdr.sig = MBOX_REQ_SIG;
184 msg->hdr.pcifunc = vf->dev->pf_func;
186 msg->reg_offset = reg;
188 msg->blkaddr = vf->block_address;
190 ret = ree_send_mbox_msg(vf);
194 off = mbox->rx_start +
195 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
196 msg = (struct ree_rd_wr_reg_msg *)((uintptr_t)mdev->mbase + off);
204 roc_ree_af_reg_write(struct roc_ree_vf *vf, uint64_t reg, uint64_t val)
206 struct ree_rd_wr_reg_msg *msg;
209 mbox = vf->dev->mbox;
210 msg = (struct ree_rd_wr_reg_msg *)mbox_alloc_msg_rsp(
211 mbox, 0, sizeof(*msg), sizeof(*msg));
213 plt_err("Could not allocate mailbox message");
217 msg->hdr.id = MBOX_MSG_REE_RD_WR_REGISTER;
218 msg->hdr.sig = MBOX_REQ_SIG;
219 msg->hdr.pcifunc = vf->dev->pf_func;
221 msg->reg_offset = reg;
223 msg->blkaddr = vf->block_address;
225 return ree_send_mbox_msg(vf);
229 roc_ree_rule_db_get(struct roc_ree_vf *vf, char *rule_db, uint32_t rule_db_len,
230 char *rule_dbi, uint32_t rule_dbi_len)
232 struct ree_rule_db_get_req_msg *req;
233 struct ree_rule_db_get_rsp_msg *rsp;
234 char *rule_db_ptr = (char *)rule_db;
239 mbox = vf->dev->mbox;
241 plt_err("Couldn't return rule db due to NULL pointer");
246 req = (struct ree_rule_db_get_req_msg *)mbox_alloc_msg_rsp(
247 mbox, 0, sizeof(*req), sizeof(*rsp));
249 plt_err("Could not allocate mailbox message");
253 req->hdr.id = MBOX_MSG_REE_RULE_DB_GET;
254 req->hdr.sig = MBOX_REQ_SIG;
255 req->hdr.pcifunc = vf->dev->pf_func;
256 req->blkaddr = vf->block_address;
259 ret = mbox_process_msg(mbox, (void *)&rsp);
262 if (rule_db_len < len + rsp->len) {
263 plt_err("Rule db size is too small");
266 mbox_memcpy(rule_db_ptr, rsp->rule_db, rsp->len);
268 rule_db_ptr = rule_db_ptr + rsp->len;
273 req = (struct ree_rule_db_get_req_msg *)mbox_alloc_msg_rsp(
274 mbox, 0, sizeof(*req), sizeof(*rsp));
276 plt_err("Could not allocate mailbox message");
280 req->hdr.id = MBOX_MSG_REE_RULE_DB_GET;
281 req->hdr.sig = MBOX_REQ_SIG;
282 req->hdr.pcifunc = vf->dev->pf_func;
283 req->blkaddr = vf->block_address;
287 ret = mbox_process_msg(mbox, (void *)&rsp);
290 if (rule_dbi_len < rsp->len) {
291 plt_err("Rule dbi size is too small");
294 mbox_memcpy(rule_dbi, rsp->rule_db, rsp->len);
300 roc_ree_rule_db_len_get(struct roc_ree_vf *vf, uint32_t *rule_db_len,
301 uint32_t *rule_dbi_len)
303 struct ree_rule_db_len_rsp_msg *rsp;
304 struct ree_req_msg *req;
308 mbox = vf->dev->mbox;
309 req = (struct ree_req_msg *)mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
312 plt_err("Could not allocate mailbox message");
316 req->hdr.id = MBOX_MSG_REE_RULE_DB_LEN_GET;
317 req->hdr.sig = MBOX_REQ_SIG;
318 req->hdr.pcifunc = vf->dev->pf_func;
319 req->blkaddr = vf->block_address;
320 ret = mbox_process_msg(mbox, (void *)&rsp);
323 if (rule_db_len != NULL)
324 *rule_db_len = rsp->len;
325 if (rule_dbi_len != NULL)
326 *rule_dbi_len = rsp->inc_len;
332 ree_db_msg(struct roc_ree_vf *vf, const char *db, uint32_t db_len, int inc,
335 uint32_t len_left = db_len, offset = 0;
336 struct ree_rule_db_prog_req_msg *req;
337 const char *rule_db_ptr = db;
342 mbox = vf->dev->mbox;
344 req = (struct ree_rule_db_prog_req_msg *)mbox_alloc_msg_rsp(
345 mbox, 0, sizeof(*req), sizeof(*rsp));
347 plt_err("Could not allocate mailbox message");
350 req->hdr.id = MBOX_MSG_REE_RULE_DB_PROG;
351 req->hdr.sig = MBOX_REQ_SIG;
352 req->hdr.pcifunc = vf->dev->pf_func;
353 req->offset = offset;
354 req->total_len = db_len;
355 req->len = REE_RULE_DB_REQ_BLOCK_SIZE;
356 req->is_incremental = inc;
358 req->blkaddr = vf->block_address;
360 if (len_left < REE_RULE_DB_REQ_BLOCK_SIZE) {
364 mbox_memcpy(req->rule_db, rule_db_ptr, req->len);
365 ret = mbox_process_msg(mbox, (void *)&rsp);
367 plt_err("Programming mailbox processing failed");
370 len_left -= req->len;
372 rule_db_ptr = rule_db_ptr + req->len;
378 roc_ree_rule_db_prog(struct roc_ree_vf *vf, const char *rule_db,
379 uint32_t rule_db_len, const char *rule_dbi,
380 uint32_t rule_dbi_len)
384 if (rule_db_len == 0) {
385 plt_err("Couldn't program empty rule db");
388 inc = (rule_dbi_len != 0);
389 if ((rule_db == NULL) || (inc && (rule_dbi == NULL))) {
390 plt_err("Couldn't program NULL rule db");
394 ret = ree_db_msg(vf, rule_dbi, rule_dbi_len, inc, 1);
398 return ree_db_msg(vf, rule_db, rule_db_len, inc, 0);
402 ree_get_blkaddr(struct dev *dev)
406 pf = dev_get_pf(dev->pf_func);
408 return RVU_BLOCK_ADDR_REE0;
409 else if (pf == REE1_PF)
410 return RVU_BLOCK_ADDR_REE1;
416 roc_ree_qp_get_base(struct roc_ree_vf *vf, uint16_t qp_id)
418 return REE_LF_BAR2(vf, qp_id);
422 roc_ree_lf_err_intr_handler(void *param)
424 uintptr_t base = (uintptr_t)param;
428 lf_id = (base >> 12) & 0xFF;
430 intr = plt_read64(base + REE_LF_MISC_INT);
434 plt_ree_dbg("LF %d MISC_INT: 0x%" PRIx64 "", lf_id, intr);
436 /* Clear interrupt */
437 plt_write64(intr, base + REE_LF_MISC_INT);
441 roc_ree_lf_err_intr_unregister(struct roc_ree_vf *vf, uint16_t msix_off,
444 struct rte_pci_device *pci_dev = vf->pci_dev;
446 /* Disable error interrupts */
447 plt_write64(~0ull, base + REE_LF_MISC_INT_ENA_W1C);
449 dev_irq_unregister(pci_dev->intr_handle,
450 roc_ree_lf_err_intr_handler, (void *)base, msix_off);
454 roc_ree_err_intr_unregister(struct roc_ree_vf *vf)
459 for (i = 0; i < vf->nb_queues; i++) {
460 base = REE_LF_BAR2(vf, i);
461 roc_ree_lf_err_intr_unregister(vf, vf->lf_msixoff[i], base);
464 vf->err_intr_registered = 0;
468 roc_ree_lf_err_intr_register(struct roc_ree_vf *vf, uint16_t msix_off,
471 struct rte_pci_device *pci_dev = vf->pci_dev;
474 /* Disable error interrupts */
475 plt_write64(~0ull, base + REE_LF_MISC_INT_ENA_W1C);
477 /* Register error interrupt handler */
478 ret = dev_irq_register(pci_dev->intr_handle,
479 roc_ree_lf_err_intr_handler, (void *)base,
484 /* Enable error interrupts */
485 plt_write64(~0ull, base + REE_LF_MISC_INT_ENA_W1S);
491 roc_ree_err_intr_register(struct roc_ree_vf *vf)
496 for (i = 0; i < vf->nb_queues; i++) {
497 if (vf->lf_msixoff[i] == MSIX_VECTOR_INVALID) {
498 plt_err("Invalid REE LF MSI-X offset: 0x%x",
504 for (i = 0; i < vf->nb_queues; i++) {
505 base = REE_LF_BAR2(vf, i);
506 ret = roc_ree_lf_err_intr_register(vf, vf->lf_msixoff[i], base);
508 goto intr_unregister;
511 vf->err_intr_registered = 1;
515 /* Unregister the ones already registered */
516 for (j = 0; j < i; j++) {
517 base = REE_LF_BAR2(vf, j);
518 roc_ree_lf_err_intr_unregister(vf, vf->lf_msixoff[j], base);
524 roc_ree_iq_enable(struct roc_ree_vf *vf, const struct roc_ree_qp *qp,
525 uint8_t pri, uint32_t size_div2)
529 /* Set instruction queue size and priority */
530 roc_ree_config_lf(vf, qp->id, pri, size_div2);
532 /* Set instruction queue base address */
533 /* Should be written after SBUF_CTL and before LF_ENA */
535 val = plt_read64(qp->base + REE_LF_SBUF_ADDR);
536 val &= ~REE_LF_SBUF_ADDR_PTR_MASK;
537 val |= FIELD_PREP(REE_LF_SBUF_ADDR_PTR_MASK, qp->iq_dma_addr >> 7);
538 plt_write64(val, qp->base + REE_LF_SBUF_ADDR);
540 /* Enable instruction queue */
542 val = plt_read64(qp->base + REE_LF_ENA);
543 val &= ~REE_LF_ENA_ENA_MASK;
544 val |= FIELD_PREP(REE_LF_ENA_ENA_MASK, 1);
545 plt_write64(val, qp->base + REE_LF_ENA);
551 roc_ree_iq_disable(struct roc_ree_qp *qp)
555 /* Stop instruction execution */
556 val = plt_read64(qp->base + REE_LF_ENA);
557 val &= ~REE_LF_ENA_ENA_MASK;
558 val |= FIELD_PREP(REE_LF_ENA_ENA_MASK, 0);
559 plt_write64(val, qp->base + REE_LF_ENA);
563 roc_ree_dev_init(struct roc_ree_vf *vf)
565 struct plt_pci_device *pci_dev;
568 uint8_t max_matches = 0;
569 uint16_t nb_queues = 0;
572 if (vf == NULL || vf->pci_dev == NULL)
575 PLT_STATIC_ASSERT(sizeof(struct ree) <= ROC_REE_MEM_SZ);
577 ree = roc_ree_to_ree_priv(vf);
578 memset(ree, 0, sizeof(*ree));
581 pci_dev = vf->pci_dev;
584 /* Initialize device */
585 rc = dev_init(dev, pci_dev);
587 plt_err("Failed to init roc device");
591 /* Get REE block address */
592 vf->block_address = ree_get_blkaddr(dev);
593 if (!vf->block_address) {
594 plt_err("Could not determine block PF number");
598 /* Get number of queues available on the device */
599 rc = roc_ree_available_queues_get(vf, &nb_queues);
601 plt_err("Could not determine the number of queues available");
605 /* Don't exceed the limits set per VF */
606 nb_queues = RTE_MIN(nb_queues, REE_MAX_QUEUES_PER_VF);
608 if (nb_queues == 0) {
609 plt_err("No free queues available on the device");
613 vf->max_queues = nb_queues;
615 plt_ree_dbg("Max queues supported by device: %d", vf->max_queues);
617 /* Get number of maximum matches supported on the device */
618 rc = roc_ree_max_matches_get(vf, &max_matches);
620 plt_err("Could not determine the maximum matches supported");
623 /* Don't exceed the limits set per VF */
624 max_matches = RTE_MIN(max_matches, REE_MAX_MATCHES_PER_VF);
625 if (max_matches == 0) {
626 plt_err("Could not determine the maximum matches supported");
630 vf->max_matches = max_matches;
632 plt_ree_dbg("Max matches supported by device: %d", vf->max_matches);
638 roc_ree_dev_fini(struct roc_ree_vf *vf)
646 return dev_fini(vf->dev, vf->pci_dev);