event/dlb: add port link
[dpdk.git] / drivers / event / dlb / dlb.c
index e98a438..29d5a0c 100644 (file)
@@ -152,6 +152,69 @@ dlb_free_qe_mem(struct dlb_port *qm_port)
        qm_port->consume_qe = NULL;
 }
 
+static int
+dlb_init_consume_qe(struct dlb_port *qm_port, char *mz_name)
+{
+       struct dlb_cq_pop_qe *qe;
+
+       qe = rte_zmalloc(mz_name,
+                       DLB_NUM_QES_PER_CACHE_LINE *
+                               sizeof(struct dlb_cq_pop_qe),
+                       RTE_CACHE_LINE_SIZE);
+
+       if (qe == NULL) {
+               DLB_LOG_ERR("dlb: no memory for consume_qe\n");
+               return -ENOMEM;
+       }
+
+       qm_port->consume_qe = qe;
+
+       qe->qe_valid = 0;
+       qe->qe_frag = 0;
+       qe->qe_comp = 0;
+       qe->cq_token = 1;
+       /* Tokens value is 0-based; i.e. '0' returns 1 token, '1' returns 2,
+        * and so on.
+        */
+       qe->tokens = 0; /* set at run time */
+       qe->meas_lat = 0;
+       qe->no_dec = 0;
+       /* Completion IDs are disabled */
+       qe->cmp_id = 0;
+
+       return 0;
+}
+
+static int
+dlb_init_qe_mem(struct dlb_port *qm_port, char *mz_name)
+{
+       int ret, sz;
+
+       sz = DLB_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb_enqueue_qe);
+
+       qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE);
+
+       if (qm_port->qe4 == NULL) {
+               DLB_LOG_ERR("dlb: no qe4 memory\n");
+               ret = -ENOMEM;
+               goto error_exit;
+       }
+
+       ret = dlb_init_consume_qe(qm_port, mz_name);
+       if (ret < 0) {
+               DLB_LOG_ERR("dlb: dlb_init_consume_qe ret=%d\n", ret);
+               goto error_exit;
+       }
+
+       return 0;
+
+error_exit:
+
+       dlb_free_qe_mem(qm_port);
+
+       return ret;
+}
+
 /* Wrapper for string to int conversion. Substituted for atoi(...), which is
  * unsafe.
  */
@@ -657,6 +720,748 @@ dlb_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
        queue_conf->priority = 0;
 }
 
+static int
+dlb_hw_create_ldb_port(struct dlb_eventdev *dlb,
+                      struct dlb_eventdev_port *ev_port,
+                      uint32_t dequeue_depth,
+                      uint32_t cq_depth,
+                      uint32_t enqueue_depth,
+                      uint16_t rsvd_tokens,
+                      bool use_rsvd_token_scheme)
+{
+       struct dlb_hw_dev *handle = &dlb->qm_instance;
+       struct dlb_create_ldb_port_args cfg = {0};
+       struct dlb_cmd_response response = {0};
+       int ret;
+       struct dlb_port *qm_port = NULL;
+       char mz_name[RTE_MEMZONE_NAMESIZE];
+       uint32_t qm_port_id;
+
+       if (handle == NULL)
+               return -EINVAL;
+
+       if (cq_depth < DLB_MIN_LDB_CQ_DEPTH) {
+               DLB_LOG_ERR("dlb: invalid cq_depth, must be %d-%d\n",
+                       DLB_MIN_LDB_CQ_DEPTH, DLB_MAX_INPUT_QUEUE_DEPTH);
+               return -EINVAL;
+       }
+
+       if (enqueue_depth < DLB_MIN_ENQUEUE_DEPTH) {
+               DLB_LOG_ERR("dlb: invalid enqueue_depth, must be at least %d\n",
+                           DLB_MIN_ENQUEUE_DEPTH);
+               return -EINVAL;
+       }
+
+       rte_spinlock_lock(&handle->resource_lock);
+
+       cfg.response = (uintptr_t)&response;
+
+       /* We round up to the next power of 2 if necessary */
+       cfg.cq_depth = rte_align32pow2(cq_depth);
+       cfg.cq_depth_threshold = rsvd_tokens;
+
+       cfg.cq_history_list_size = DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
+
+       /* User controls the LDB high watermark via enqueue depth. The DIR high
+        * watermark is equal, unless the directed credit pool is too small.
+        */
+       cfg.ldb_credit_high_watermark = enqueue_depth;
+
+       /* If there are no directed ports, the kernel driver will ignore this
+        * port's directed credit settings. Don't use enqueue_depth if it would
+        * require more directed credits than are available.
+        */
+       cfg.dir_credit_high_watermark =
+               RTE_MIN(enqueue_depth,
+                       handle->cfg.num_dir_credits / dlb->num_ports);
+
+       cfg.ldb_credit_quantum = cfg.ldb_credit_high_watermark / 2;
+       cfg.ldb_credit_low_watermark = RTE_MIN(16, cfg.ldb_credit_quantum);
+
+       cfg.dir_credit_quantum = cfg.dir_credit_high_watermark / 2;
+       cfg.dir_credit_low_watermark = RTE_MIN(16, cfg.dir_credit_quantum);
+
+       /* Per QM values */
+
+       cfg.ldb_credit_pool_id = handle->cfg.ldb_credit_pool_id;
+       cfg.dir_credit_pool_id = handle->cfg.dir_credit_pool_id;
+
+       ret = dlb_iface_ldb_port_create(handle, &cfg, dlb->poll_mode);
+       if (ret < 0) {
+               DLB_LOG_ERR("dlb: dlb_ldb_port_create error, ret=%d (driver status: %s)\n",
+                           ret, dlb_error_strings[response.status]);
+               goto error_exit;
+       }
+
+       qm_port_id = response.id;
+
+       DLB_LOG_DBG("dlb: ev_port %d uses qm LB port %d <<<<<\n",
+                   ev_port->id, qm_port_id);
+
+       qm_port = &ev_port->qm_port;
+       qm_port->ev_port = ev_port; /* back ptr */
+       qm_port->dlb = dlb; /* back ptr */
+
+       /*
+        * Allocate and init local qe struct(s).
+        * Note: MOVDIR64 requires the enqueue QE (qe4) to be aligned.
+        */
+
+       snprintf(mz_name, sizeof(mz_name), "ldb_port%d",
+                ev_port->id);
+
+       ret = dlb_init_qe_mem(qm_port, mz_name);
+       if (ret < 0) {
+               DLB_LOG_ERR("dlb: init_qe_mem failed, ret=%d\n", ret);
+               goto error_exit;
+       }
+
+       qm_port->pp_mmio_base = DLB_LDB_PP_BASE + PAGE_SIZE * qm_port_id;
+       qm_port->id = qm_port_id;
+
+       /* The credit window is one high water mark of QEs */
+       qm_port->ldb_pushcount_at_credit_expiry = 0;
+       qm_port->cached_ldb_credits = cfg.ldb_credit_high_watermark;
+       /* The credit window is one high water mark of QEs */
+       qm_port->dir_pushcount_at_credit_expiry = 0;
+       qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
+       qm_port->cq_depth = cfg.cq_depth;
+       /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
+        * the effective depth is smaller.
+        */
+       qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
+       qm_port->cq_idx = 0;
+       qm_port->cq_idx_unmasked = 0;
+       if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE)
+               qm_port->cq_depth_mask = (qm_port->cq_depth * 4) - 1;
+       else
+               qm_port->cq_depth_mask = qm_port->cq_depth - 1;
+
+       qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
+       /* starting value of gen bit - it toggles at wrap time */
+       qm_port->gen_bit = 1;
+
+       qm_port->use_rsvd_token_scheme = use_rsvd_token_scheme;
+       qm_port->cq_rsvd_token_deficit = rsvd_tokens;
+       qm_port->int_armed = false;
+
+       /* Save off for later use in info and lookup APIs. */
+       qm_port->qid_mappings = &dlb->qm_ldb_to_ev_queue_id[0];
+
+       qm_port->dequeue_depth = dequeue_depth;
+
+       qm_port->owed_tokens = 0;
+       qm_port->issued_releases = 0;
+
+       /* update state */
+       qm_port->state = PORT_STARTED; /* enabled at create time */
+       qm_port->config_state = DLB_CONFIGURED;
+
+       qm_port->dir_credits = cfg.dir_credit_high_watermark;
+       qm_port->ldb_credits = cfg.ldb_credit_high_watermark;
+
+       DLB_LOG_DBG("dlb: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
+                   qm_port_id,
+                   cq_depth,
+                   qm_port->ldb_credits,
+                   qm_port->dir_credits);
+
+       rte_spinlock_unlock(&handle->resource_lock);
+
+       return 0;
+
+error_exit:
+       if (qm_port) {
+               dlb_free_qe_mem(qm_port);
+               qm_port->pp_mmio_base = 0;
+       }
+
+       rte_spinlock_unlock(&handle->resource_lock);
+
+       DLB_LOG_ERR("dlb: create ldb port failed!\n");
+
+       return ret;
+}
+
+static int
+dlb_hw_create_dir_port(struct dlb_eventdev *dlb,
+                      struct dlb_eventdev_port *ev_port,
+                      uint32_t dequeue_depth,
+                      uint32_t cq_depth,
+                      uint32_t enqueue_depth,
+                      uint16_t rsvd_tokens,
+                      bool use_rsvd_token_scheme)
+{
+       struct dlb_hw_dev *handle = &dlb->qm_instance;
+       struct dlb_create_dir_port_args cfg = {0};
+       struct dlb_cmd_response response = {0};
+       int ret;
+       struct dlb_port *qm_port = NULL;
+       char mz_name[RTE_MEMZONE_NAMESIZE];
+       uint32_t qm_port_id;
+
+       if (dlb == NULL || handle == NULL)
+               return -EINVAL;
+
+       if (cq_depth < DLB_MIN_DIR_CQ_DEPTH) {
+               DLB_LOG_ERR("dlb: invalid cq_depth, must be at least %d\n",
+                           DLB_MIN_DIR_CQ_DEPTH);
+               return -EINVAL;
+       }
+
+       if (enqueue_depth < DLB_MIN_ENQUEUE_DEPTH) {
+               DLB_LOG_ERR("dlb: invalid enqueue_depth, must be at least %d\n",
+                           DLB_MIN_ENQUEUE_DEPTH);
+               return -EINVAL;
+       }
+
+       rte_spinlock_lock(&handle->resource_lock);
+
+       /* Directed queues are configured at link time. */
+       cfg.queue_id = -1;
+
+       cfg.response = (uintptr_t)&response;
+
+       /* We round up to the next power of 2 if necessary */
+       cfg.cq_depth = rte_align32pow2(cq_depth);
+       cfg.cq_depth_threshold = rsvd_tokens;
+
+       /* User controls the LDB high watermark via enqueue depth. The DIR high
+        * watermark is equal, unless the directed credit pool is too small.
+        */
+       cfg.ldb_credit_high_watermark = enqueue_depth;
+
+       /* Don't use enqueue_depth if it would require more directed credits
+        * than are available.
+        */
+       cfg.dir_credit_high_watermark =
+               RTE_MIN(enqueue_depth,
+                       handle->cfg.num_dir_credits / dlb->num_ports);
+
+       cfg.ldb_credit_quantum = cfg.ldb_credit_high_watermark / 2;
+       cfg.ldb_credit_low_watermark = RTE_MIN(16, cfg.ldb_credit_quantum);
+
+       cfg.dir_credit_quantum = cfg.dir_credit_high_watermark / 2;
+       cfg.dir_credit_low_watermark = RTE_MIN(16, cfg.dir_credit_quantum);
+
+       /* Per QM values */
+
+       cfg.ldb_credit_pool_id = handle->cfg.ldb_credit_pool_id;
+       cfg.dir_credit_pool_id = handle->cfg.dir_credit_pool_id;
+
+       ret = dlb_iface_dir_port_create(handle, &cfg, dlb->poll_mode);
+       if (ret < 0) {
+               DLB_LOG_ERR("dlb: dlb_dir_port_create error, ret=%d (driver status: %s)\n",
+                           ret, dlb_error_strings[response.status]);
+               goto error_exit;
+       }
+
+       qm_port_id = response.id;
+
+       DLB_LOG_DBG("dlb: ev_port %d uses qm DIR port %d <<<<<\n",
+                   ev_port->id, qm_port_id);
+
+       qm_port = &ev_port->qm_port;
+       qm_port->ev_port = ev_port; /* back ptr */
+       qm_port->dlb = dlb;  /* back ptr */
+
+       /*
+        * Init local qe struct(s).
+        * Note: MOVDIR64 requires the enqueue QE to be aligned
+        */
+
+       snprintf(mz_name, sizeof(mz_name), "dir_port%d",
+                ev_port->id);
+
+       ret = dlb_init_qe_mem(qm_port, mz_name);
+
+       if (ret < 0) {
+               DLB_LOG_ERR("dlb: init_qe_mem failed, ret=%d\n", ret);
+               goto error_exit;
+       }
+
+       qm_port->pp_mmio_base = DLB_DIR_PP_BASE + PAGE_SIZE * qm_port_id;
+       qm_port->id = qm_port_id;
+
+       /* The credit window is one high water mark of QEs */
+       qm_port->ldb_pushcount_at_credit_expiry = 0;
+       qm_port->cached_ldb_credits = cfg.ldb_credit_high_watermark;
+       /* The credit window is one high water mark of QEs */
+       qm_port->dir_pushcount_at_credit_expiry = 0;
+       qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
+       qm_port->cq_depth = cfg.cq_depth;
+       qm_port->cq_idx = 0;
+       qm_port->cq_idx_unmasked = 0;
+       if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE)
+               qm_port->cq_depth_mask = (cfg.cq_depth * 4) - 1;
+       else
+               qm_port->cq_depth_mask = cfg.cq_depth - 1;
+
+       qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
+       /* starting value of gen bit - it toggles at wrap time */
+       qm_port->gen_bit = 1;
+
+       qm_port->use_rsvd_token_scheme = use_rsvd_token_scheme;
+       qm_port->cq_rsvd_token_deficit = rsvd_tokens;
+       qm_port->int_armed = false;
+
+       /* Save off for later use in info and lookup APIs. */
+       qm_port->qid_mappings = &dlb->qm_dir_to_ev_queue_id[0];
+
+       qm_port->dequeue_depth = dequeue_depth;
+
+       qm_port->owed_tokens = 0;
+       qm_port->issued_releases = 0;
+
+       /* update state */
+       qm_port->state = PORT_STARTED; /* enabled at create time */
+       qm_port->config_state = DLB_CONFIGURED;
+
+       qm_port->dir_credits = cfg.dir_credit_high_watermark;
+       qm_port->ldb_credits = cfg.ldb_credit_high_watermark;
+
+       DLB_LOG_DBG("dlb: created dir port %d, depth = %d cr=%d,%d\n",
+                   qm_port_id,
+                   cq_depth,
+                   cfg.dir_credit_high_watermark,
+                   cfg.ldb_credit_high_watermark);
+
+       rte_spinlock_unlock(&handle->resource_lock);
+
+       return 0;
+
+error_exit:
+       if (qm_port) {
+               qm_port->pp_mmio_base = 0;
+               dlb_free_qe_mem(qm_port);
+       }
+
+       rte_spinlock_unlock(&handle->resource_lock);
+
+       DLB_LOG_ERR("dlb: create dir port failed!\n");
+
+       return ret;
+}
+
+static int32_t
+dlb_hw_create_ldb_queue(struct dlb_eventdev *dlb,
+                       struct dlb_queue *queue,
+                       const struct rte_event_queue_conf *evq_conf)
+{
+       struct dlb_hw_dev *handle = &dlb->qm_instance;
+       struct dlb_create_ldb_queue_args cfg;
+       struct dlb_cmd_response response;
+       int32_t ret;
+       uint32_t qm_qid;
+       int sched_type = -1;
+
+       if (evq_conf == NULL)
+               return -EINVAL;
+
+       if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {
+               if (evq_conf->nb_atomic_order_sequences != 0)
+                       sched_type = RTE_SCHED_TYPE_ORDERED;
+               else
+                       sched_type = RTE_SCHED_TYPE_PARALLEL;
+       } else
+               sched_type = evq_conf->schedule_type;
+
+       cfg.response = (uintptr_t)&response;
+       cfg.num_atomic_inflights = dlb->num_atm_inflights_per_queue;
+       cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;
+       cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;
+
+       if (sched_type != RTE_SCHED_TYPE_ORDERED) {
+               cfg.num_sequence_numbers = 0;
+               cfg.num_qid_inflights = DLB_DEF_UNORDERED_QID_INFLIGHTS;
+       }
+
+       ret = dlb_iface_ldb_queue_create(handle, &cfg);
+       if (ret < 0) {
+               DLB_LOG_ERR("dlb: create LB event queue error, ret=%d (driver status: %s)\n",
+                           ret, dlb_error_strings[response.status]);
+               return -EINVAL;
+       }
+
+       qm_qid = response.id;
+
+       /* Save off queue config for debug, resource lookups, and reconfig */
+       queue->num_qid_inflights = cfg.num_qid_inflights;
+       queue->num_atm_inflights = cfg.num_atomic_inflights;
+
+       queue->sched_type = sched_type;
+       queue->config_state = DLB_CONFIGURED;
+
+       DLB_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n",
+                   qm_qid,
+                   cfg.num_atomic_inflights,
+                   cfg.num_sequence_numbers,
+                   cfg.num_qid_inflights);
+
+       return qm_qid;
+}
+
+static int32_t
+dlb_get_sn_allocation(struct dlb_eventdev *dlb, int group)
+{
+       struct dlb_hw_dev *handle = &dlb->qm_instance;
+       struct dlb_get_sn_allocation_args cfg;
+       struct dlb_cmd_response response;
+       int ret;
+
+       cfg.group = group;
+       cfg.response = (uintptr_t)&response;
+
+       ret = dlb_iface_get_sn_allocation(handle, &cfg);
+       if (ret < 0) {
+               DLB_LOG_ERR("dlb: get_sn_allocation ret=%d (driver status: %s)\n",
+                           ret, dlb_error_strings[response.status]);
+               return ret;
+       }
+
+       return response.id;
+}
+
+static int
+dlb_set_sn_allocation(struct dlb_eventdev *dlb, int group, int num)
+{
+       struct dlb_hw_dev *handle = &dlb->qm_instance;
+       struct dlb_set_sn_allocation_args cfg;
+       struct dlb_cmd_response response;
+       int ret;
+
+       cfg.num = num;
+       cfg.group = group;
+       cfg.response = (uintptr_t)&response;
+
+       ret = dlb_iface_set_sn_allocation(handle, &cfg);
+       if (ret < 0) {
+               DLB_LOG_ERR("dlb: set_sn_allocation ret=%d (driver status: %s)\n",
+                           ret, dlb_error_strings[response.status]);
+               return ret;
+       }
+
+       return ret;
+}
+
+static int32_t
+dlb_get_sn_occupancy(struct dlb_eventdev *dlb, int group)
+{
+       struct dlb_hw_dev *handle = &dlb->qm_instance;
+       struct dlb_get_sn_occupancy_args cfg;
+       struct dlb_cmd_response response;
+       int ret;
+
+       cfg.group = group;
+       cfg.response = (uintptr_t)&response;
+
+       ret = dlb_iface_get_sn_occupancy(handle, &cfg);
+       if (ret < 0) {
+               DLB_LOG_ERR("dlb: get_sn_occupancy ret=%d (driver status: %s)\n",
+                           ret, dlb_error_strings[response.status]);
+               return ret;
+       }
+
+       return response.id;
+}
+
+/* Query the current sequence number allocations and, if they conflict with the
+ * requested LDB queue configuration, attempt to re-allocate sequence numbers.
+ * This is best-effort; if it fails, the PMD will attempt to configure the
+ * load-balanced queue and return an error.
+ */
+static void
+dlb_program_sn_allocation(struct dlb_eventdev *dlb,
+                         const struct rte_event_queue_conf *queue_conf)
+{
+       int grp_occupancy[DLB_NUM_SN_GROUPS];
+       int grp_alloc[DLB_NUM_SN_GROUPS];
+       int i, sequence_numbers;
+
+       sequence_numbers = (int)queue_conf->nb_atomic_order_sequences;
+
+       for (i = 0; i < DLB_NUM_SN_GROUPS; i++) {
+               int total_slots;
+
+               grp_alloc[i] = dlb_get_sn_allocation(dlb, i);
+               if (grp_alloc[i] < 0)
+                       return;
+
+               total_slots = DLB_MAX_LDB_SN_ALLOC / grp_alloc[i];
+
+               grp_occupancy[i] = dlb_get_sn_occupancy(dlb, i);
+               if (grp_occupancy[i] < 0)
+                       return;
+
+               /* DLB has at least one available slot for the requested
+                * sequence numbers, so no further configuration required.
+                */
+               if (grp_alloc[i] == sequence_numbers &&
+                   grp_occupancy[i] < total_slots)
+                       return;
+       }
+
+       /* None of the sequence number groups are configured for the requested
+        * sequence numbers, so we have to reconfigure one of them. This is
+        * only possible if a group is not in use.
+        */
+       for (i = 0; i < DLB_NUM_SN_GROUPS; i++) {
+               if (grp_occupancy[i] == 0)
+                       break;
+       }
+
+       if (i == DLB_NUM_SN_GROUPS) {
+               DLB_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots\n",
+                      __func__, sequence_numbers);
+               return;
+       }
+
+       /* Attempt to configure slot i with the requested number of sequence
+        * numbers. Ignore the return value -- if this fails, the error will be
+        * caught during subsequent queue configuration.
+        */
+       dlb_set_sn_allocation(dlb, i, sequence_numbers);
+}
+
+static int
+dlb_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
+                            struct dlb_eventdev_queue *ev_queue,
+                            const struct rte_event_queue_conf *queue_conf)
+{
+       struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+       int32_t qm_qid;
+
+       if (queue_conf->nb_atomic_order_sequences)
+               dlb_program_sn_allocation(dlb, queue_conf);
+
+       qm_qid = dlb_hw_create_ldb_queue(dlb,
+                                        &ev_queue->qm_queue,
+                                        queue_conf);
+       if (qm_qid < 0) {
+               DLB_LOG_ERR("Failed to create the load-balanced queue\n");
+
+               return qm_qid;
+       }
+
+       dlb->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;
+
+       ev_queue->qm_queue.id = qm_qid;
+
+       return 0;
+}
+
+static int dlb_num_dir_queues_setup(struct dlb_eventdev *dlb)
+{
+       int i, num = 0;
+
+       for (i = 0; i < dlb->num_queues; i++) {
+               if (dlb->ev_queues[i].setup_done &&
+                   dlb->ev_queues[i].qm_queue.is_directed)
+                       num++;
+       }
+
+       return num;
+}
+
+static void
+dlb_queue_link_teardown(struct dlb_eventdev *dlb,
+                       struct dlb_eventdev_queue *ev_queue)
+{
+       struct dlb_eventdev_port *ev_port;
+       int i, j;
+
+       for (i = 0; i < dlb->num_ports; i++) {
+               ev_port = &dlb->ev_ports[i];
+
+               for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
+                       if (!ev_port->link[j].valid ||
+                           ev_port->link[j].queue_id != ev_queue->id)
+                               continue;
+
+                       ev_port->link[j].valid = false;
+                       ev_port->num_links--;
+               }
+       }
+
+       ev_queue->num_links = 0;
+}
+
+static int
+dlb_eventdev_queue_setup(struct rte_eventdev *dev,
+                        uint8_t ev_qid,
+                        const struct rte_event_queue_conf *queue_conf)
+{
+       struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+       struct dlb_eventdev_queue *ev_queue;
+       int ret;
+
+       if (queue_conf == NULL)
+               return -EINVAL;
+
+       if (ev_qid >= dlb->num_queues)
+               return -EINVAL;
+
+       ev_queue = &dlb->ev_queues[ev_qid];
+
+       ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &
+               RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
+       ev_queue->id = ev_qid;
+       ev_queue->conf = *queue_conf;
+
+       if (!ev_queue->qm_queue.is_directed) {
+               ret = dlb_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);
+       } else {
+               /* The directed queue isn't setup until link time, at which
+                * point we know its directed port ID. Directed queue setup
+                * will only fail if this queue is already setup or there are
+                * no directed queues left to configure.
+                */
+               ret = 0;
+
+               ev_queue->qm_queue.config_state = DLB_NOT_CONFIGURED;
+
+               if (ev_queue->setup_done ||
+                   dlb_num_dir_queues_setup(dlb) == dlb->num_dir_queues)
+                       ret = -EINVAL;
+       }
+
+       /* Tear down pre-existing port->queue links */
+       if (!ret && dlb->run_state == DLB_RUN_STATE_STOPPED)
+               dlb_queue_link_teardown(dlb, ev_queue);
+
+       if (!ret)
+               ev_queue->setup_done = true;
+
+       return ret;
+}
+
+static void
+dlb_port_link_teardown(struct dlb_eventdev *dlb,
+                      struct dlb_eventdev_port *ev_port)
+{
+       struct dlb_eventdev_queue *ev_queue;
+       int i;
+
+       for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+               if (!ev_port->link[i].valid)
+                       continue;
+
+               ev_queue = &dlb->ev_queues[ev_port->link[i].queue_id];
+
+               ev_port->link[i].valid = false;
+               ev_port->num_links--;
+               ev_queue->num_links--;
+       }
+}
+
+static int
+dlb_eventdev_port_setup(struct rte_eventdev *dev,
+                       uint8_t ev_port_id,
+                       const struct rte_event_port_conf *port_conf)
+{
+       struct dlb_eventdev *dlb;
+       struct dlb_eventdev_port *ev_port;
+       bool use_rsvd_token_scheme;
+       uint32_t adj_cq_depth;
+       uint16_t rsvd_tokens;
+       int ret;
+
+       if (dev == NULL || port_conf == NULL) {
+               DLB_LOG_ERR("Null parameter\n");
+               return -EINVAL;
+       }
+
+       dlb = dlb_pmd_priv(dev);
+
+       if (ev_port_id >= DLB_MAX_NUM_PORTS)
+               return -EINVAL;
+
+       if (port_conf->dequeue_depth >
+               evdev_dlb_default_info.max_event_port_dequeue_depth ||
+           port_conf->enqueue_depth >
+               evdev_dlb_default_info.max_event_port_enqueue_depth)
+               return -EINVAL;
+
+       ev_port = &dlb->ev_ports[ev_port_id];
+       /* configured? */
+       if (ev_port->setup_done) {
+               DLB_LOG_ERR("evport %d is already configured\n", ev_port_id);
+               return -EINVAL;
+       }
+
+       /* The reserved token interrupt arming scheme requires that one or more
+        * CQ tokens be reserved by the PMD. This limits the amount of CQ space
+        * usable by the DLB, so in order to give an *effective* CQ depth equal
+        * to the user-requested value, we double CQ depth and reserve half of
+        * its tokens. If the user requests the max CQ depth (256) then we
+        * cannot double it, so we reserve one token and give an effective
+        * depth of 255 entries.
+        */
+       use_rsvd_token_scheme = true;
+       rsvd_tokens = 1;
+       adj_cq_depth = port_conf->dequeue_depth;
+
+       if (use_rsvd_token_scheme && adj_cq_depth < 256) {
+               rsvd_tokens = adj_cq_depth;
+               adj_cq_depth *= 2;
+       }
+
+       ev_port->qm_port.is_directed = port_conf->event_port_cfg &
+               RTE_EVENT_PORT_CFG_SINGLE_LINK;
+
+       if (!ev_port->qm_port.is_directed) {
+               ret = dlb_hw_create_ldb_port(dlb,
+                                            ev_port,
+                                            port_conf->dequeue_depth,
+                                            adj_cq_depth,
+                                            port_conf->enqueue_depth,
+                                            rsvd_tokens,
+                                            use_rsvd_token_scheme);
+               if (ret < 0) {
+                       DLB_LOG_ERR("Failed to create the lB port ve portId=%d\n",
+                                   ev_port_id);
+                       return ret;
+               }
+       } else {
+               ret = dlb_hw_create_dir_port(dlb,
+                                            ev_port,
+                                            port_conf->dequeue_depth,
+                                            adj_cq_depth,
+                                            port_conf->enqueue_depth,
+                                            rsvd_tokens,
+                                            use_rsvd_token_scheme);
+               if (ret < 0) {
+                       DLB_LOG_ERR("Failed to create the DIR port\n");
+                       return ret;
+               }
+       }
+
+       /* Save off port config for reconfig */
+       dlb->ev_ports[ev_port_id].conf = *port_conf;
+
+       dlb->ev_ports[ev_port_id].id = ev_port_id;
+       dlb->ev_ports[ev_port_id].enq_configured = true;
+       dlb->ev_ports[ev_port_id].setup_done = true;
+       dlb->ev_ports[ev_port_id].inflight_max =
+               port_conf->new_event_threshold;
+       dlb->ev_ports[ev_port_id].implicit_release =
+               !(port_conf->event_port_cfg &
+                 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
+       dlb->ev_ports[ev_port_id].outstanding_releases = 0;
+       dlb->ev_ports[ev_port_id].inflight_credits = 0;
+       dlb->ev_ports[ev_port_id].credit_update_quanta =
+               RTE_LIBRTE_PMD_DLB_SW_CREDIT_QUANTA;
+       dlb->ev_ports[ev_port_id].dlb = dlb; /* reverse link */
+
+       /* Tear down pre-existing port->queue links */
+       if (dlb->run_state == DLB_RUN_STATE_STOPPED)
+               dlb_port_link_teardown(dlb, &dlb->ev_ports[ev_port_id]);
+
+       dev->data->ports[ev_port_id] = &dlb->ev_ports[ev_port_id];
+
+       return 0;
+}
+
 static int
 set_dev_id(const char *key __rte_unused,
           const char *value,
@@ -727,6 +1532,311 @@ set_num_atm_inflights(const char *key __rte_unused,
        return 0;
 }
 
+static int
+dlb_validate_port_link(struct dlb_eventdev_port *ev_port,
+                      uint8_t queue_id,
+                      bool link_exists,
+                      int index)
+{
+       struct dlb_eventdev *dlb = ev_port->dlb;
+       struct dlb_eventdev_queue *ev_queue;
+       bool port_is_dir, queue_is_dir;
+
+       if (queue_id > dlb->num_queues) {
+               DLB_LOG_ERR("queue_id %d > num queues %d\n",
+                           queue_id, dlb->num_queues);
+               rte_errno = -EINVAL;
+               return -1;
+       }
+
+       ev_queue = &dlb->ev_queues[queue_id];
+
+       if (!ev_queue->setup_done &&
+           ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED) {
+               DLB_LOG_ERR("setup not done and not previously configured\n");
+               rte_errno = -EINVAL;
+               return -1;
+       }
+
+       port_is_dir = ev_port->qm_port.is_directed;
+       queue_is_dir = ev_queue->qm_queue.is_directed;
+
+       if (port_is_dir != queue_is_dir) {
+               DLB_LOG_ERR("%s queue %u can't link to %s port %u\n",
+                           queue_is_dir ? "DIR" : "LDB", ev_queue->id,
+                           port_is_dir ? "DIR" : "LDB", ev_port->id);
+
+               rte_errno = -EINVAL;
+               return -1;
+       }
+
+       /* Check if there is space for the requested link */
+       if (!link_exists && index == -1) {
+               DLB_LOG_ERR("no space for new link\n");
+               rte_errno = -ENOSPC;
+               return -1;
+       }
+
+       /* Check if the directed port is already linked */
+       if (ev_port->qm_port.is_directed && ev_port->num_links > 0 &&
+           !link_exists) {
+               DLB_LOG_ERR("Can't link DIR port %d to >1 queues\n",
+                           ev_port->id);
+               rte_errno = -EINVAL;
+               return -1;
+       }
+
+       /* Check if the directed queue is already linked */
+       if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 &&
+           !link_exists) {
+               DLB_LOG_ERR("Can't link DIR queue %d to >1 ports\n",
+                           ev_queue->id);
+               rte_errno = -EINVAL;
+               return -1;
+       }
+
+       return 0;
+}
+
+static int16_t
+dlb_hw_map_ldb_qid_to_port(struct dlb_hw_dev *handle,
+                          uint32_t qm_port_id,
+                          uint16_t qm_qid,
+                          uint8_t priority)
+{
+       struct dlb_map_qid_args cfg;
+       struct dlb_cmd_response response;
+       int32_t ret;
+
+       if (handle == NULL)
+               return -EINVAL;
+
+       /* Build message */
+       cfg.response = (uintptr_t)&response;
+       cfg.port_id = qm_port_id;
+       cfg.qid = qm_qid;
+       cfg.priority = EV_TO_DLB_PRIO(priority);
+
+       ret = dlb_iface_map_qid(handle, &cfg);
+       if (ret < 0) {
+               DLB_LOG_ERR("dlb: map qid error, ret=%d (driver status: %s)\n",
+                           ret, dlb_error_strings[response.status]);
+               DLB_LOG_ERR("dlb: device_id=%d grp=%d, qm_port=%d, qm_qid=%d prio=%d\n",
+                           handle->device_id,
+                           handle->domain_id, cfg.port_id,
+                           cfg.qid,
+                           cfg.priority);
+       } else {
+               DLB_LOG_DBG("dlb: mapped queue %d to qm_port %d\n",
+                           qm_qid, qm_port_id);
+       }
+
+       return ret;
+}
+
+static int
+dlb_event_queue_join_ldb(struct dlb_eventdev *dlb,
+                        struct dlb_eventdev_port *ev_port,
+                        struct dlb_eventdev_queue *ev_queue,
+                        uint8_t priority)
+{
+       int first_avail = -1;
+       int ret, i;
+
+       for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+               if (ev_port->link[i].valid) {
+                       if (ev_port->link[i].queue_id == ev_queue->id &&
+                           ev_port->link[i].priority == priority) {
+                               if (ev_port->link[i].mapped)
+                                       return 0; /* already mapped */
+                               first_avail = i;
+                       }
+               } else {
+                       if (first_avail == -1)
+                               first_avail = i;
+               }
+       }
+       if (first_avail == -1) {
+               DLB_LOG_ERR("dlb: qm_port %d has no available QID slots.\n",
+                           ev_port->qm_port.id);
+               return -EINVAL;
+       }
+
+       ret = dlb_hw_map_ldb_qid_to_port(&dlb->qm_instance,
+                                        ev_port->qm_port.id,
+                                        ev_queue->qm_queue.id,
+                                        priority);
+
+       if (!ret)
+               ev_port->link[first_avail].mapped = true;
+
+       return ret;
+}
+
+static int32_t
+dlb_hw_create_dir_queue(struct dlb_eventdev *dlb, int32_t qm_port_id)
+{
+       struct dlb_hw_dev *handle = &dlb->qm_instance;
+       struct dlb_create_dir_queue_args cfg;
+       struct dlb_cmd_response response;
+       int32_t ret;
+
+       cfg.response = (uintptr_t)&response;
+
+       /* The directed port is always configured before its queue */
+       cfg.port_id = qm_port_id;
+
+       ret = dlb_iface_dir_queue_create(handle, &cfg);
+       if (ret < 0) {
+               DLB_LOG_ERR("dlb: create DIR event queue error, ret=%d (driver status: %s)\n",
+                           ret, dlb_error_strings[response.status]);
+               return -EINVAL;
+       }
+
+       return response.id;
+}
+
+static int
+dlb_eventdev_dir_queue_setup(struct dlb_eventdev *dlb,
+                            struct dlb_eventdev_queue *ev_queue,
+                            struct dlb_eventdev_port *ev_port)
+{
+       int32_t qm_qid;
+
+       qm_qid = dlb_hw_create_dir_queue(dlb, ev_port->qm_port.id);
+
+       if (qm_qid < 0) {
+               DLB_LOG_ERR("Failed to create the DIR queue\n");
+               return qm_qid;
+       }
+
+       dlb->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
+
+       ev_queue->qm_queue.id = qm_qid;
+
+       return 0;
+}
+
+static int
+dlb_do_port_link(struct rte_eventdev *dev,
+                struct dlb_eventdev_queue *ev_queue,
+                struct dlb_eventdev_port *ev_port,
+                uint8_t prio)
+{
+       struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+       int err;
+
+       /* Don't link until start time. */
+       if (dlb->run_state == DLB_RUN_STATE_STOPPED)
+               return 0;
+
+       if (ev_queue->qm_queue.is_directed)
+               err = dlb_eventdev_dir_queue_setup(dlb, ev_queue, ev_port);
+       else
+               err = dlb_event_queue_join_ldb(dlb, ev_port, ev_queue, prio);
+
+       if (err) {
+               DLB_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n",
+                           ev_queue->qm_queue.is_directed ? "DIR" : "LDB",
+                           ev_queue->id, ev_port->id);
+
+               rte_errno = err;
+               return -1;
+       }
+
+       return 0;
+}
+
+static int
+dlb_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
+                      const uint8_t queues[], const uint8_t priorities[],
+                      uint16_t nb_links)
+
+{
+       struct dlb_eventdev_port *ev_port = event_port;
+       struct dlb_eventdev *dlb;
+       int i, j;
+
+       RTE_SET_USED(dev);
+
+       if (ev_port == NULL) {
+               DLB_LOG_ERR("dlb: evport not setup\n");
+               rte_errno = -EINVAL;
+               return 0;
+       }
+
+       if (!ev_port->setup_done &&
+           ev_port->qm_port.config_state != DLB_PREV_CONFIGURED) {
+               DLB_LOG_ERR("dlb: evport not setup\n");
+               rte_errno = -EINVAL;
+               return 0;
+       }
+
+       /* Note: rte_event_port_link() ensures the PMD won't receive a NULL
+        * queues pointer.
+        */
+       if (nb_links == 0) {
+               DLB_LOG_DBG("dlb: nb_links is 0\n");
+               return 0; /* Ignore and return success */
+       }
+
+       dlb = ev_port->dlb;
+
+       DLB_LOG_DBG("Linking %u queues to %s port %d\n",
+                   nb_links,
+                   ev_port->qm_port.is_directed ? "DIR" : "LDB",
+                   ev_port->id);
+
+       for (i = 0; i < nb_links; i++) {
+               struct dlb_eventdev_queue *ev_queue;
+               uint8_t queue_id, prio;
+               bool found = false;
+               int index = -1;
+
+               queue_id = queues[i];
+               prio = priorities[i];
+
+               /* Check if the link already exists. */
+               for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
+                       if (ev_port->link[j].valid) {
+                               if (ev_port->link[j].queue_id == queue_id) {
+                                       found = true;
+                                       index = j;
+                                       break;
+                               }
+                       } else {
+                               if (index == -1)
+                                       index = j;
+                       }
+
+               /* could not link */
+               if (index == -1)
+                       break;
+
+               /* Check if already linked at the requested priority */
+               if (found && ev_port->link[j].priority == prio)
+                       continue;
+
+               if (dlb_validate_port_link(ev_port, queue_id, found, index))
+                       break; /* return index of offending queue */
+
+               ev_queue = &dlb->ev_queues[queue_id];
+
+               if (dlb_do_port_link(dev, ev_queue, ev_port, prio))
+                       break; /* return index of offending queue */
+
+               ev_queue->num_links++;
+
+               ev_port->link[index].queue_id = queue_id;
+               ev_port->link[index].priority = prio;
+               ev_port->link[index].valid = true;
+               /* Entry already exists?  If so, then must be prio change */
+               if (!found)
+                       ev_port->num_links++;
+       }
+       return i;
+}
+
 void
 dlb_entry_points_init(struct rte_eventdev *dev)
 {
@@ -735,6 +1845,9 @@ dlb_entry_points_init(struct rte_eventdev *dev)
                .dev_configure    = dlb_eventdev_configure,
                .queue_def_conf   = dlb_eventdev_queue_default_conf_get,
                .port_def_conf    = dlb_eventdev_port_default_conf_get,
+               .queue_setup      = dlb_eventdev_queue_setup,
+               .port_setup       = dlb_eventdev_port_setup,
+               .port_link        = dlb_eventdev_port_link,
                .dump             = dlb_eventdev_dump,
                .xstats_get       = dlb_eventdev_xstats_get,
                .xstats_get_names = dlb_eventdev_xstats_get_names,