+ struct fman_if *fif = dev->process_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
+ struct qm_mcc_initfq opts = {0};
+ u32 flags = 0;
+ int ret;
+ u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (queue_idx >= dev->data->nb_rx_queues) {
+ rte_errno = EOVERFLOW;
+ DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, queue_idx, dev->data->nb_rx_queues);
+ return -rte_errno;
+ }
+
+ /* Rx deferred start is not supported */
+ if (rx_conf->rx_deferred_start) {
+ DPAA_PMD_ERR("%p:Rx deferred start not supported", (void *)dev);
+ return -EINVAL;
+ }
+ rxq->nb_desc = UINT16_MAX;
+ rxq->offloads = rx_conf->offloads;
+
+ DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
+ queue_idx, rxq->fqid);
+
+ if (!fif->num_profiles) {
+ if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
+ dpaa_intf->bp_info->mp != mp) {
+ DPAA_PMD_WARN("Multiple pools on same interface not"
+ " supported");
+ return -EINVAL;
+ }
+ } else {
+ if (dpaa_eth_rx_queue_bp_check(dev, rxq->vsp_id,
+ DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid)) {
+ return -EINVAL;
+ }
+ }
+
+ /* Max packet can fit in single buffer */
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
+ ;
+ } else if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SCATTER) {
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
+ buffsz * DPAA_SGT_MAX_ENTRIES) {
+ DPAA_PMD_ERR("max RxPkt size %d too big to fit "
+ "MaxSGlist %d",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ buffsz * DPAA_SGT_MAX_ENTRIES);
+ rte_errno = EOVERFLOW;
+ return -rte_errno;
+ }
+ } else {
+ DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
+ " larger than a single mbuf (%u) and scattered"
+ " mode has not been requested",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ buffsz - RTE_PKTMBUF_HEADROOM);
+ }
+
+ dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+
+ /* For shared interface, it's done in kernel, skip.*/
+ if (!fif->is_shared_mac)
+ dpaa_fman_if_pool_setup(dev);
+
+ if (fif->num_profiles) {
+ int8_t vsp_id = rxq->vsp_id;
+
+ if (vsp_id >= 0) {
+ ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id,
+ DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid,
+ fif);
+ if (ret) {
+ DPAA_PMD_ERR("dpaa_port_vsp_update failed");
+ return ret;
+ }
+ } else {
+ DPAA_PMD_INFO("Base profile is associated to"
+ " RXQ fqid:%d\r\n", rxq->fqid);
+ if (fif->is_shared_mac) {
+ DPAA_PMD_ERR("Fatal: Base profile is associated"
+ " to shared interface on DPDK.");
+ return -EINVAL;
+ }
+ dpaa_intf->vsp_bpid[fif->base_profile_id] =
+ DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
+ }
+ } else {
+ dpaa_intf->vsp_bpid[0] =
+ DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
+ }
+
+ dpaa_intf->valid = 1;
+ DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
+ fman_if_get_sg_enable(fif),
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ /* checking if push mode only, no error check for now */
+ if (!rxq->is_static &&
+ dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
+ struct qman_portal *qp;
+ int q_fd;
+
+ dpaa_push_queue_idx++;
+ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+ opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
+ QM_FQCTRL_CTXASTASHING |
+ QM_FQCTRL_PREFERINCACHE;
+ opts.fqd.context_a.stashing.exclusive = 0;
+ /* In muticore scenario stashing becomes a bottleneck on LS1046.
+ * So do not enable stashing in this case
+ */
+ if (dpaa_svr_family != SVR_LS1046A_FAMILY)
+ opts.fqd.context_a.stashing.annotation_cl =
+ DPAA_IF_RX_ANNOTATION_STASH;
+ opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
+ opts.fqd.context_a.stashing.context_cl =
+ DPAA_IF_RX_CONTEXT_STASH;
+
+ /*Create a channel and associate given queue with the channel*/
+ qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
+ opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
+ opts.fqd.dest.channel = rxq->ch_id;
+ opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
+ flags = QMAN_INITFQ_FLAG_SCHED;
+
+ /* Configure tail drop */
+ if (dpaa_intf->cgr_rx) {
+ opts.we_mask |= QM_INITFQ_WE_CGID;
+ opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ }
+ ret = qman_init_fq(rxq, flags, &opts);
+ if (ret) {
+ DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x "
+ "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
+ return ret;
+ }
+ if (dpaa_svr_family == SVR_LS1043A_FAMILY) {
+ rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch;
+ } else {
+ rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
+ rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
+ }
+
+ rxq->is_static = true;
+
+ /* Allocate qman specific portals */
+ qp = fsl_qman_fq_portal_create(&q_fd);
+ if (!qp) {
+ DPAA_PMD_ERR("Unable to alloc fq portal");
+ return -1;
+ }
+ rxq->qp = qp;
+
+ /* Set up the device interrupt handler */
+ if (!dev->intr_handle) {
+ struct rte_dpaa_device *dpaa_dev;
+ struct rte_device *rdev = dev->device;
+
+ dpaa_dev = container_of(rdev, struct rte_dpaa_device,
+ device);
+ dev->intr_handle = &dpaa_dev->intr_handle;
+ dev->intr_handle->intr_vec = rte_zmalloc(NULL,
+ dpaa_push_mode_max_queue, 0);
+ if (!dev->intr_handle->intr_vec) {
+ DPAA_PMD_ERR("intr_vec alloc failed");
+ return -ENOMEM;
+ }
+ dev->intr_handle->nb_efd = dpaa_push_mode_max_queue;
+ dev->intr_handle->max_intr = dpaa_push_mode_max_queue;
+ }
+
+ dev->intr_handle->type = RTE_INTR_HANDLE_EXT;
+ dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1;
+ dev->intr_handle->efds[queue_idx] = q_fd;
+ rxq->q_fd = q_fd;
+ }
+ rxq->bp_array = rte_dpaa_bpid_info;
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ /* configure the CGR size as per the desc size */
+ if (dpaa_intf->cgr_rx) {
+ struct qm_mcc_initcgr cgr_opts = {0};
+
+ rxq->nb_desc = nb_desc;
+ /* Enable tail drop with cgr on this queue */
+ qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
+ ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
+ if (ret) {
+ DPAA_PMD_WARN(
+ "rx taildrop modify fail on fqid %d (ret=%d)",
+ rxq->fqid, ret);
+ }
+ }
+ /* Enable main queue to receive error packets also by default */
+ fman_if_set_err_fqid(fif, rxq->fqid);
+ return 0;
+}
+
+int
+dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id,
+ u16 ch_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ int ret;
+ u32 flags = 0;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
+ struct qm_mcc_initfq opts = {0};
+
+ if (dpaa_push_mode_max_queue)
+ DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n"
+ "PUSH mode already enabled for first %d queues.\n"
+ "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
+ dpaa_push_mode_max_queue);
+
+ dpaa_poll_queue_default_config(&opts);
+
+ switch (queue_conf->ev.sched_type) {
+ case RTE_SCHED_TYPE_ATOMIC:
+ opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
+ /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
+ * configuration with HOLD_ACTIVE setting
+ */
+ opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
+ rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic;
+ break;
+ case RTE_SCHED_TYPE_ORDERED:
+ DPAA_PMD_ERR("Ordered queue schedule type is not supported\n");
+ return -1;
+ default:
+ opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
+ rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel;
+ break;
+ }
+
+ opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
+ opts.fqd.dest.channel = ch_id;
+ opts.fqd.dest.wq = queue_conf->ev.priority;
+
+ if (dpaa_intf->cgr_rx) {
+ opts.we_mask |= QM_INITFQ_WE_CGID;
+ opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ }
+
+ flags = QMAN_INITFQ_FLAG_SCHED;
+
+ ret = qman_init_fq(rxq, flags, &opts);
+ if (ret) {
+ DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x "
+ "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
+ return ret;
+ }
+
+ /* copy configuration which needs to be filled during dequeue */
+ memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event));
+ dev->data->rx_queues[eth_rx_queue_id] = rxq;
+
+ return ret;
+}
+
+int
+dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id)
+{
+ struct qm_mcc_initfq opts;
+ int ret;
+ u32 flags = 0;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
+
+ dpaa_poll_queue_default_config(&opts);
+
+ if (dpaa_intf->cgr_rx) {
+ opts.we_mask |= QM_INITFQ_WE_CGID;
+ opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ }
+
+ ret = qman_init_fq(rxq, flags, &opts);
+ if (ret) {
+ DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
+ rxq->fqid, ret);
+ }
+
+ rxq->cb.dqrr_dpdk_cb = NULL;
+ dev->data->rx_queues[eth_rx_queue_id] = NULL;
+
+ return 0;
+}
+
+static
+void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static
+int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *txq = &dpaa_intf->tx_queues[queue_idx];
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Tx deferred start is not supported */
+ if (tx_conf->tx_deferred_start) {
+ DPAA_PMD_ERR("%p:Tx deferred start not supported", (void *)dev);
+ return -EINVAL;
+ }
+ txq->nb_desc = UINT16_MAX;
+ txq->offloads = tx_conf->offloads;
+
+ if (queue_idx >= dev->data->nb_tx_queues) {
+ rte_errno = EOVERFLOW;
+ DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, queue_idx, dev->data->nb_tx_queues);
+ return -rte_errno;
+ }
+
+ DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
+ queue_idx, txq->fqid);
+ dev->data->tx_queues[queue_idx] = txq;
+
+ return 0;
+}
+
+static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static uint32_t
+dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id];
+ u32 frm_cnt = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
+ DPAA_PMD_DEBUG("RX frame count for q(%d) is %u",
+ rx_queue_id, frm_cnt);
+ }
+ return frm_cnt;
+}
+
+static int dpaa_link_down(struct rte_eth_dev *dev)
+{
+ struct fman_if *fif = dev->process_private;
+ struct __fman_if *__fif;
+
+ PMD_INIT_FUNC_TRACE();
+
+ __fif = container_of(fif, struct __fman_if, __if);
+
+ if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+ dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
+ else
+ return dpaa_eth_dev_stop(dev);
+ return 0;
+}
+
+static int dpaa_link_up(struct rte_eth_dev *dev)
+{
+ struct fman_if *fif = dev->process_private;
+ struct __fman_if *__fif;
+
+ PMD_INIT_FUNC_TRACE();
+
+ __fif = container_of(fif, struct __fman_if, __if);
+
+ if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+ dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
+ else
+ dpaa_eth_dev_start(dev);
+ return 0;
+}
+
+static int
+dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct rte_eth_fc_conf *net_fc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!(dpaa_intf->fc_conf)) {
+ dpaa_intf->fc_conf = rte_zmalloc(NULL,
+ sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
+ if (!dpaa_intf->fc_conf) {
+ DPAA_PMD_ERR("unable to save flow control info");
+ return -ENOMEM;
+ }
+ }
+ net_fc = dpaa_intf->fc_conf;
+
+ if (fc_conf->high_water < fc_conf->low_water) {
+ DPAA_PMD_ERR("Incorrect Flow Control Configuration");
+ return -EINVAL;
+ }
+
+ if (fc_conf->mode == RTE_FC_NONE) {
+ return 0;
+ } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
+ fc_conf->mode == RTE_FC_FULL) {
+ fman_if_set_fc_threshold(dev->process_private,
+ fc_conf->high_water,
+ fc_conf->low_water,
+ dpaa_intf->bp_info->bpid);
+ if (fc_conf->pause_time)
+ fman_if_set_fc_quanta(dev->process_private,
+ fc_conf->pause_time);
+ }
+
+ /* Save the information in dpaa device */
+ net_fc->pause_time = fc_conf->pause_time;
+ net_fc->high_water = fc_conf->high_water;
+ net_fc->low_water = fc_conf->low_water;
+ net_fc->send_xon = fc_conf->send_xon;
+ net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
+ net_fc->mode = fc_conf->mode;
+ net_fc->autoneg = fc_conf->autoneg;
+
+ return 0;
+}
+
+static int
+dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (net_fc) {
+ fc_conf->pause_time = net_fc->pause_time;
+ fc_conf->high_water = net_fc->high_water;
+ fc_conf->low_water = net_fc->low_water;
+ fc_conf->send_xon = net_fc->send_xon;
+ fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
+ fc_conf->mode = net_fc->mode;
+ fc_conf->autoneg = net_fc->autoneg;
+ return 0;
+ }
+ ret = fman_if_get_fc_threshold(dev->process_private);
+ if (ret) {
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->pause_time =
+ fman_if_get_fc_quanta(dev->process_private);
+ } else {
+ fc_conf->mode = RTE_FC_NONE;
+ }
+
+ return 0;
+}
+
+static int
+dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr,
+ uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = fman_if_add_mac_addr(dev->process_private,
+ addr->addr_bytes, index);
+
+ if (ret)
+ DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret);
+ return 0;
+}
+
+static void
+dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
+ uint32_t index)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_clear_mac_addr(dev->process_private, index);
+}
+
+static int
+dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr)
+{
+ int ret;