+ DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d",
+ dpaa_intf->name, fd_offset,
+ fman_if_get_fdoff(dpaa_intf->fif));
+ }
+ DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
+ fman_if_get_sg_enable(dpaa_intf->fif),
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ /* checking if push mode only, no error check for now */
+ if (!rxq->is_static &&
+ dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
+ struct qman_portal *qp;
+ int q_fd;
+
+ dpaa_push_queue_idx++;
+ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+ opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
+ QM_FQCTRL_CTXASTASHING |
+ QM_FQCTRL_PREFERINCACHE;
+ opts.fqd.context_a.stashing.exclusive = 0;
+ /* In muticore scenario stashing becomes a bottleneck on LS1046.
+ * So do not enable stashing in this case
+ */
+ if (dpaa_svr_family != SVR_LS1046A_FAMILY)
+ opts.fqd.context_a.stashing.annotation_cl =
+ DPAA_IF_RX_ANNOTATION_STASH;
+ opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
+ opts.fqd.context_a.stashing.context_cl =
+ DPAA_IF_RX_CONTEXT_STASH;
+
+ /*Create a channel and associate given queue with the channel*/
+ qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
+ opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
+ opts.fqd.dest.channel = rxq->ch_id;
+ opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
+ flags = QMAN_INITFQ_FLAG_SCHED;
+
+ /* Configure tail drop */
+ if (dpaa_intf->cgr_rx) {
+ opts.we_mask |= QM_INITFQ_WE_CGID;
+ opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ }
+ ret = qman_init_fq(rxq, flags, &opts);
+ if (ret) {
+ DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x "
+ "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
+ return ret;
+ }
+ if (dpaa_svr_family == SVR_LS1043A_FAMILY) {
+ rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch;
+ } else {
+ rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
+ rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
+ }
+
+ rxq->is_static = true;
+
+ /* Allocate qman specific portals */
+ qp = fsl_qman_fq_portal_create(&q_fd);
+ if (!qp) {
+ DPAA_PMD_ERR("Unable to alloc fq portal");
+ return -1;
+ }
+ rxq->qp = qp;
+
+ /* Set up the device interrupt handler */
+ if (!dev->intr_handle) {
+ struct rte_dpaa_device *dpaa_dev;
+ struct rte_device *rdev = dev->device;
+
+ dpaa_dev = container_of(rdev, struct rte_dpaa_device,
+ device);
+ dev->intr_handle = &dpaa_dev->intr_handle;
+ dev->intr_handle->intr_vec = rte_zmalloc(NULL,
+ dpaa_push_mode_max_queue, 0);
+ if (!dev->intr_handle->intr_vec) {
+ DPAA_PMD_ERR("intr_vec alloc failed");
+ return -ENOMEM;
+ }
+ dev->intr_handle->nb_efd = dpaa_push_mode_max_queue;
+ dev->intr_handle->max_intr = dpaa_push_mode_max_queue;
+ }
+
+ dev->intr_handle->type = RTE_INTR_HANDLE_EXT;
+ dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1;
+ dev->intr_handle->efds[queue_idx] = q_fd;
+ rxq->q_fd = q_fd;
+ }
+ rxq->bp_array = rte_dpaa_bpid_info;
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ /* configure the CGR size as per the desc size */
+ if (dpaa_intf->cgr_rx) {
+ struct qm_mcc_initcgr cgr_opts = {0};
+
+ /* Enable tail drop with cgr on this queue */
+ qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
+ ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
+ if (ret) {
+ DPAA_PMD_WARN(
+ "rx taildrop modify fail on fqid %d (ret=%d)",
+ rxq->fqid, ret);
+ }