X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa2%2Fdpaa2_ethdev.c;h=09a11d65af93009d391511f724c46b69f4f8796a;hb=c8b34b7e9af93017747d758c64777450368384f7;hp=92e680f98ae2a56932b400fc745ef92667bdad4a;hpb=f40adb40c1696e59aaef205494cb519d0bd58287;p=dpdk.git diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c index 92e680f98a..09a11d65af 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/drivers/net/dpaa2/dpaa2_ethdev.c @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include #include @@ -147,6 +147,12 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) PMD_INIT_FUNC_TRACE(); if (mask & ETH_VLAN_FILTER_MASK) { + /* VLAN Filter not avaialble */ + if (!priv->max_vlan_filters) { + RTE_LOG(INFO, PMD, "VLAN filter not available\n"); + goto next_mask; + } + if (dev->data->dev_conf.rxmode.hw_vlan_filter) ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, priv->token, true); @@ -157,7 +163,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n", ret); } - +next_mask: if (mask & ETH_VLAN_EXTEND_MASK) { if (dev->data->dev_conf.rxmode.hw_vlan_extend) RTE_LOG(INFO, PMD, @@ -321,8 +327,8 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) if (eth_conf->rxmode.jumbo_frame == 1) { if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { - ret = dpaa2_dev_mtu_set(dev, - eth_conf->rxmode.max_rx_pkt_len); + ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, + priv->token, eth_conf->rxmode.max_rx_pkt_len); if (ret) { PMD_INIT_LOG(ERR, "unable to set mtu. check config\n"); @@ -374,6 +380,25 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) return ret; } + /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in + * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC] + * to 0 for LS2 in the hardware thus disabling data/annotation + * stashing. For LX2 this is fixed in hardware and thus hash result and + * parse results can be received in FD using this option. + */ + if (dpaa2_svr_family == SVR_LX2160A) { + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_FLCTYPE_HASH, true); + if (ret) { + PMD_INIT_LOG(ERR, "Error setting FLCTYPE: Err = %d\n", + ret); + return ret; + } + } + + if (eth_conf->rxmode.hw_vlan_filter) + dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); + /* update the current status */ dpaa2_dev_link_update(dev, 0); @@ -662,7 +687,7 @@ dpaa2_interrupt_handler(void *param) dpaa2_dev_link_update(dev, 0); /* calling all the apps registered for link status event */ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, - NULL, NULL); + NULL); } out: ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, @@ -764,16 +789,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev) "code = %d\n", ret); return ret; } - /* VLAN Offload Settings */ - if (priv->max_vlan_filters) { - ret = dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); - if (ret) { - PMD_INIT_LOG(ERR, "Error to dpaa2_vlan_offload_set:" - "code = %d\n", ret); - return ret; - } - } - /* if the interrupts were configured on this devices*/ if (intr_handle && (intr_handle->fd) && @@ -974,7 +989,8 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; - uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + + VLAN_TAG_SIZE; PMD_INIT_FUNC_TRACE(); @@ -992,11 +1008,13 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) else dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + /* Set the Max Rx frame length as 'mtu' + * Maximum Ethernet header length */ ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, - mtu + ETH_VLAN_HLEN); + frame_size); if (ret) { PMD_DRV_LOG(ERR, "setting the max frame length failed"); return -1; @@ -1667,6 +1685,8 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; + else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) + dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; else return -EINVAL; @@ -1676,6 +1696,11 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, cfg.destination.id = dpcon_id; cfg.destination.priority = queue_conf->ev.priority; + if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { + options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; + cfg.destination.hold_active = 1; + } + options |= DPNI_QUEUE_OPT_USER_CTX; cfg.user_context = (uint64_t)(dpaa2_ethq);