2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2010-2014 Intel Corporation
9 #include <rte_bus_vdev.h>
10 #include <rte_errno.h>
11 #include <rte_cycles.h>
12 #include <rte_memzone.h>
14 #include "opdl_evdev.h"
15 #include "opdl_ring.h"
19 static __rte_always_inline uint32_t
20 enqueue_check(struct opdl_port *p,
21 const struct rte_event ev[],
27 if (p->opdl->do_validation) {
29 for (i = 0; i < num; i++) {
30 if (ev[i].queue_id != p->next_external_qid) {
31 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
32 "ERROR - port:[%u] - event wants"
33 " to enq to q_id[%u],"
34 " but should be [%u]",
35 opdl_pmd_dev_id(p->opdl),
38 p->next_external_qid);
45 if (p->p_type == OPDL_PURE_RX_PORT ||
46 p->p_type == OPDL_ASYNC_PORT) {
49 p->port_stat[claim_pkts_requested] += num;
50 p->port_stat[claim_pkts_granted] += num_events;
51 p->port_stat[claim_non_empty]++;
52 p->start_cycles = rte_rdtsc();
54 p->port_stat[claim_empty]++;
58 if (p->start_cycles) {
59 uint64_t end_cycles = rte_rdtsc();
60 p->port_stat[total_cycles] +=
61 end_cycles - p->start_cycles;
66 ev[0].queue_id != p->next_external_qid) {
75 static __rte_always_inline void
76 update_on_dequeue(struct opdl_port *p,
77 struct rte_event ev[],
81 if (p->opdl->do_validation) {
83 for (i = 0; i < num; i++)
85 p->opdl->queue[p->queue_id].external_qid;
89 p->port_stat[claim_pkts_requested] += num;
90 p->port_stat[claim_pkts_granted] += num_events;
91 p->port_stat[claim_non_empty]++;
92 p->start_cycles = rte_rdtsc();
94 p->port_stat[claim_empty]++;
100 p->opdl->queue[p->queue_id].external_qid;
112 opdl_rx_error_enqueue(struct opdl_port *p,
113 const struct rte_event ev[],
128 * This function handles enqueue for a single input stage_inst with
129 * threadsafe disabled or enabled. eg 1 thread using a stage_inst or
130 * multiple threads sharing a stage_inst
134 opdl_rx_enqueue(struct opdl_port *p,
135 const struct rte_event ev[],
138 uint16_t enqueued = 0;
140 enqueued = opdl_ring_input(opdl_stage_get_opdl_ring(p->enq_stage_inst),
144 if (!enqueue_check(p, ev, num, enqueued))
160 opdl_tx_error_dequeue(struct opdl_port *p,
161 struct rte_event ev[],
174 * TX single threaded claim
176 * This function handles dequeue for a single worker stage_inst with
177 * threadsafe disabled. eg 1 thread using an stage_inst
181 opdl_tx_dequeue_single_thread(struct opdl_port *p,
182 struct rte_event ev[],
187 struct opdl_ring *ring;
189 ring = opdl_stage_get_opdl_ring(p->deq_stage_inst);
191 returned = opdl_ring_copy_to_burst(ring,
197 update_on_dequeue(p, ev, num, returned);
203 * TX multi threaded claim
205 * This function handles dequeue for multiple worker stage_inst with
206 * threadsafe disabled. eg multiple stage_inst each with its own instance
210 opdl_tx_dequeue_multi_inst(struct opdl_port *p,
211 struct rte_event ev[],
214 uint32_t num_events = 0;
216 num_events = opdl_stage_claim(p->deq_stage_inst,
223 update_on_dequeue(p, ev, num, num_events);
225 return opdl_stage_disclaim(p->deq_stage_inst, num_events, false);
230 * Worker thread claim
235 opdl_claim(struct opdl_port *p, struct rte_event ev[], uint16_t num)
237 uint32_t num_events = 0;
239 if (unlikely(num > MAX_OPDL_CONS_Q_DEPTH)) {
240 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
241 "Attempt to dequeue num of events larger than port (%d) max",
242 opdl_pmd_dev_id(p->opdl),
249 num_events = opdl_stage_claim(p->deq_stage_inst,
257 update_on_dequeue(p, ev, num, num_events);
263 * Worker thread disclaim
267 opdl_disclaim(struct opdl_port *p, const struct rte_event ev[], uint16_t num)
269 uint16_t enqueued = 0;
273 for (i = 0; i < num; i++)
274 opdl_ring_cas_slot(p->enq_stage_inst, &ev[i],
277 enqueued = opdl_stage_disclaim(p->enq_stage_inst,
281 return enqueue_check(p, ev, num, enqueued);
284 static __rte_always_inline struct opdl_stage *
285 stage_for_port(struct opdl_queue *q, unsigned int i)
287 if (q->q_pos == OPDL_Q_POS_START || q->q_pos == OPDL_Q_POS_MIDDLE)
288 return q->ports[i]->enq_stage_inst;
290 return q->ports[i]->deq_stage_inst;
293 static int opdl_add_deps(struct opdl_evdev *device,
299 struct opdl_ring *ring;
300 struct opdl_queue *queue = &device->queue[q_id];
301 struct opdl_queue *queue_deps = &device->queue[deps_q_id];
302 struct opdl_stage *dep_stages[OPDL_PORTS_MAX];
304 /* sanity check that all stages are for same opdl ring */
305 for (i = 0; i < queue->nb_ports; i++) {
306 struct opdl_ring *r =
307 opdl_stage_get_opdl_ring(stage_for_port(queue, i));
308 for (j = 0; j < queue_deps->nb_ports; j++) {
309 struct opdl_ring *rj =
310 opdl_stage_get_opdl_ring(
311 stage_for_port(queue_deps, j));
313 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
314 "Stages and dependents"
315 " are not for same opdl ring",
316 opdl_pmd_dev_id(device));
318 k < device->nb_opdls; k++) {
319 opdl_ring_dump(device->opdl[k],
327 /* Gather all stages instance in deps */
328 for (i = 0; i < queue_deps->nb_ports; i++)
329 dep_stages[i] = stage_for_port(queue_deps, i);
332 /* Add all deps for each port->stage_inst in this queue */
333 for (i = 0; i < queue->nb_ports; i++) {
335 ring = opdl_stage_get_opdl_ring(stage_for_port(queue, i));
337 status = opdl_stage_deps_add(ring,
338 stage_for_port(queue, i),
339 queue->ports[i]->num_instance,
340 queue->ports[i]->instance_id,
342 queue_deps->nb_ports);
351 opdl_add_event_handlers(struct rte_eventdev *dev)
355 struct opdl_evdev *device = opdl_pmd_priv(dev);
358 for (i = 0; i < device->max_port_nb; i++) {
360 struct opdl_port *port = &device->ports[i];
362 if (port->configured) {
363 if (port->p_type == OPDL_PURE_RX_PORT) {
364 port->enq = opdl_rx_enqueue;
365 port->deq = opdl_tx_error_dequeue;
367 } else if (port->p_type == OPDL_PURE_TX_PORT) {
369 port->enq = opdl_rx_error_enqueue;
371 if (port->num_instance == 1)
373 opdl_tx_dequeue_single_thread;
375 port->deq = opdl_tx_dequeue_multi_inst;
377 } else if (port->p_type == OPDL_REGULAR_PORT) {
379 port->enq = opdl_disclaim;
380 port->deq = opdl_claim;
382 } else if (port->p_type == OPDL_ASYNC_PORT) {
384 port->enq = opdl_rx_enqueue;
386 /* Always single instance */
387 port->deq = opdl_tx_dequeue_single_thread;
389 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
390 "port:[%u] has invalid port type - ",
391 opdl_pmd_dev_id(port->opdl),
396 port->initialized = 1;
401 fprintf(stdout, "Success - enqueue/dequeue handler(s) added\n");
406 build_all_dependencies(struct rte_eventdev *dev)
411 struct opdl_evdev *device = opdl_pmd_priv(dev);
413 uint8_t start_qid = 0;
415 for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
416 struct opdl_queue *queue = &device->queue[i];
417 if (!queue->initialized)
420 if (queue->q_pos == OPDL_Q_POS_START) {
425 if (queue->q_pos == OPDL_Q_POS_MIDDLE) {
426 err = opdl_add_deps(device, i, i-1);
428 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
429 "dependency addition for queue:[%u] - FAILED",
431 queue->external_qid);
436 if (queue->q_pos == OPDL_Q_POS_END) {
437 /* Add this dependency */
438 err = opdl_add_deps(device, i, i-1);
440 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
441 "dependency addition for queue:[%u] - FAILED",
443 queue->external_qid);
446 /* Add dependency for rx on tx */
447 err = opdl_add_deps(device, start_qid, i);
449 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
450 "dependency addition for queue:[%u] - FAILED",
452 queue->external_qid);
459 fprintf(stdout, "Success - dependencies built\n");
464 check_queues_linked(struct rte_eventdev *dev)
469 struct opdl_evdev *device = opdl_pmd_priv(dev);
472 for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
473 struct opdl_queue *queue = &device->queue[i];
475 if (!queue->initialized)
478 if (queue->external_qid == OPDL_INVALID_QID)
481 if (queue->nb_ports == 0) {
482 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
483 "queue:[%u] has no associated ports",
491 if ((i - nb_iq) != device->max_queue_nb) {
492 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
493 "%u queues counted but should be %u",
496 device->max_queue_nb);
505 destroy_queues_and_rings(struct rte_eventdev *dev)
507 struct opdl_evdev *device = opdl_pmd_priv(dev);
509 for (uint32_t i = 0; i < device->nb_opdls; i++) {
511 opdl_ring_free(device->opdl[i]);
514 memset(&device->queue,
516 sizeof(struct opdl_queue)
517 * RTE_EVENT_MAX_QUEUES_PER_DEV);
520 #define OPDL_ID(d)(d->nb_opdls - 1)
522 static __rte_always_inline void
523 initialise_queue(struct opdl_evdev *device,
527 struct opdl_queue *queue = &device->queue[device->nb_queues];
530 queue->q_type = OPDL_Q_TYPE_ORDERED;
531 queue->external_qid = OPDL_INVALID_QID;
533 queue->q_type = device->q_md[i].type;
534 queue->external_qid = device->q_md[i].ext_id;
535 /* Add ex->in for queues setup */
536 device->q_map_ex_to_in[queue->external_qid] = device->nb_queues;
538 queue->opdl_id = OPDL_ID(device);
541 queue->configured = 1;
547 static __rte_always_inline int
548 create_opdl(struct opdl_evdev *device)
552 char name[RTE_MEMZONE_NAMESIZE];
554 snprintf(name, RTE_MEMZONE_NAMESIZE,
555 "%s_%u", device->service_name, device->nb_opdls);
557 device->opdl[device->nb_opdls] =
558 opdl_ring_create(name,
559 device->nb_events_limit,
560 sizeof(struct rte_event),
561 device->max_port_nb * 2,
564 if (!device->opdl[device->nb_opdls]) {
565 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
566 "opdl ring %u creation - FAILED",
567 opdl_pmd_dev_id(device),
576 static __rte_always_inline int
577 create_link_opdl(struct opdl_evdev *device, uint32_t index)
582 if (device->q_md[index + 1].type !=
583 OPDL_Q_TYPE_SINGLE_LINK) {
585 /* async queue with regular
589 /* create a new opdl ring */
590 err = create_opdl(device);
593 * dummy queue for new opdl
595 initialise_queue(device,
602 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
603 "queue %u, two consecutive"
604 " SINGLE_LINK queues, not allowed",
605 opdl_pmd_dev_id(device),
614 create_queues_and_rings(struct rte_eventdev *dev)
618 struct opdl_evdev *device = opdl_pmd_priv(dev);
620 device->nb_queues = 0;
622 if (device->nb_ports != device->max_port_nb) {
623 PMD_DRV_LOG(ERR, "Number ports setup:%u NOT EQUAL to max port"
624 " number:%u for this device",
626 device->max_port_nb);
631 /* We will have at least one opdl so create it now */
632 err = create_opdl(device);
637 /* Create 1st "dummy" queue */
638 initialise_queue(device,
642 for (uint32_t i = 0; i < device->nb_q_md; i++) {
645 if (!device->q_md[i].setup) {
647 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
648 "queue meta data slot %u"
649 " not setup - FAILING",
654 } else if (device->q_md[i].type !=
655 OPDL_Q_TYPE_SINGLE_LINK) {
657 if (!device->q_md[i + 1].setup) {
658 /* Create a simple ORDERED/ATOMIC
661 initialise_queue(device,
666 /* Create a simple ORDERED/ATOMIC
667 * queue in the middle
669 initialise_queue(device,
673 } else if (device->q_md[i].type ==
674 OPDL_Q_TYPE_SINGLE_LINK) {
676 /* create last queue for this opdl */
677 initialise_queue(device,
681 err = create_link_opdl(device, i);
691 destroy_queues_and_rings(dev);
698 initialise_all_other_ports(struct rte_eventdev *dev)
701 struct opdl_stage *stage_inst = NULL;
703 struct opdl_evdev *device = opdl_pmd_priv(dev);
705 for (uint32_t i = 0; i < device->nb_ports; i++) {
706 struct opdl_port *port = &device->ports[i];
707 struct opdl_queue *queue = &device->queue[port->queue_id];
709 if (port->queue_id == 0) {
711 } else if (queue->q_type != OPDL_Q_TYPE_SINGLE_LINK) {
713 if (queue->q_pos == OPDL_Q_POS_MIDDLE) {
715 /* Regular port with claim/disclaim */
716 stage_inst = opdl_stage_add(
717 device->opdl[queue->opdl_id],
720 port->deq_stage_inst = stage_inst;
721 port->enq_stage_inst = stage_inst;
723 if (queue->q_type == OPDL_Q_TYPE_ATOMIC)
724 port->atomic_claim = true;
726 port->atomic_claim = false;
728 port->p_type = OPDL_REGULAR_PORT;
730 /* Add the port to the queue array of ports */
731 queue->ports[queue->nb_ports] = port;
732 port->instance_id = queue->nb_ports;
734 } else if (queue->q_pos == OPDL_Q_POS_END) {
737 stage_inst = opdl_stage_add(
738 device->opdl[queue->opdl_id],
741 port->deq_stage_inst = stage_inst;
742 port->enq_stage_inst = NULL;
743 port->p_type = OPDL_PURE_TX_PORT;
745 /* Add the port to the queue array of ports */
746 queue->ports[queue->nb_ports] = port;
747 port->instance_id = queue->nb_ports;
751 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
752 "port %u:, linked incorrectly"
753 " to a q_pos START/INVALID %u",
754 opdl_pmd_dev_id(port->opdl),
761 } else if (queue->q_type == OPDL_Q_TYPE_SINGLE_LINK) {
763 port->p_type = OPDL_ASYNC_PORT;
766 stage_inst = opdl_stage_add(
767 device->opdl[queue->opdl_id],
769 false); /* First stage */
770 port->deq_stage_inst = stage_inst;
772 /* Add the port to the queue array of ports */
773 queue->ports[queue->nb_ports] = port;
774 port->instance_id = queue->nb_ports;
777 if (queue->nb_ports > 1) {
778 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
779 "queue %u:, setup as SINGLE_LINK"
780 " but has more than one port linked",
781 opdl_pmd_dev_id(port->opdl),
782 queue->external_qid);
787 /* -- single instance rx for next opdl -- */
789 device->q_map_ex_to_in[queue->external_qid] + 1;
790 if (next_qid < RTE_EVENT_MAX_QUEUES_PER_DEV &&
791 device->queue[next_qid].configured) {
793 /* Remap the queue */
794 queue = &device->queue[next_qid];
796 stage_inst = opdl_stage_add(
797 device->opdl[queue->opdl_id],
800 port->enq_stage_inst = stage_inst;
802 /* Add the port to the queue array of ports */
803 queue->ports[queue->nb_ports] = port;
804 port->instance_id = queue->nb_ports;
806 if (queue->nb_ports > 1) {
807 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
808 "dummy queue %u: for "
810 "SINGLE_LINK but has more "
811 "than one port linked",
812 opdl_pmd_dev_id(port->opdl),
818 /* Set this queue to initialized as it is never
819 * referenced by any ports
821 queue->initialized = 1;
826 /* Now that all ports are initialised we need to
827 * setup the last bit of stage md
830 for (uint32_t i = 0; i < device->nb_ports; i++) {
831 struct opdl_port *port = &device->ports[i];
832 struct opdl_queue *queue =
833 &device->queue[port->queue_id];
835 if (port->configured &&
836 (port->queue_id != OPDL_INVALID_QID)) {
837 if (queue->nb_ports == 0) {
838 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
839 "queue:[%u] has no ports"
841 opdl_pmd_dev_id(port->opdl),
847 port->num_instance = queue->nb_ports;
848 port->initialized = 1;
849 queue->initialized = 1;
851 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
852 "Port:[%u] not configured invalid"
853 " queue configuration",
854 opdl_pmd_dev_id(port->opdl),
865 initialise_queue_zero_ports(struct rte_eventdev *dev)
869 struct opdl_stage *stage_inst = NULL;
870 struct opdl_queue *queue = NULL;
872 struct opdl_evdev *device = opdl_pmd_priv(dev);
874 /* Assign queue zero and figure out how many Q0 ports we have */
875 for (uint32_t i = 0; i < device->nb_ports; i++) {
876 struct opdl_port *port = &device->ports[i];
877 if (port->queue_id == OPDL_INVALID_QID) {
879 port->external_qid = OPDL_INVALID_QID;
880 port->p_type = OPDL_PURE_RX_PORT;
885 /* Create the stage */
886 stage_inst = opdl_stage_add(device->opdl[0],
887 (mt_rx > 1 ? true : false),
891 /* Assign the new created input stage to all relevant ports */
892 for (uint32_t i = 0; i < device->nb_ports; i++) {
893 struct opdl_port *port = &device->ports[i];
894 if (port->queue_id == 0) {
895 queue = &device->queue[port->queue_id];
896 port->enq_stage_inst = stage_inst;
897 port->deq_stage_inst = NULL;
898 port->configured = 1;
899 port->initialized = 1;
901 queue->ports[queue->nb_ports] = port;
902 port->instance_id = queue->nb_ports;
913 assign_internal_queue_ids(struct rte_eventdev *dev)
916 struct opdl_evdev *device = opdl_pmd_priv(dev);
918 for (uint32_t i = 0; i < device->nb_ports; i++) {
919 struct opdl_port *port = &device->ports[i];
920 if (port->external_qid != OPDL_INVALID_QID) {
922 device->q_map_ex_to_in[port->external_qid];
924 /* Now do the external_qid of the next queue */
925 struct opdl_queue *queue =
926 &device->queue[port->queue_id];
927 if (queue->q_pos == OPDL_Q_POS_END)
928 port->next_external_qid =
929 device->queue[port->queue_id + 2].external_qid;
931 port->next_external_qid =
932 device->queue[port->queue_id + 1].external_qid;