X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_ethdev.c;h=bf4232a98ff0d174e80c2fe859b1a2937ff906d6;hb=a518584d1fcfe377c86f518c917a205b692c838c;hp=4095a068aaae265f7dd54eca6de13fe5337aa4d4;hpb=21c8bb4928c9dc61746eb0cf4079eb11124c5c3a;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 4095a068aa..bf4232a98f 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -50,7 +50,7 @@ /* DPDK headers don't like -pedantic. */ #ifdef PEDANTIC -#pragma GCC diagnostic ignored "-pedantic" +#pragma GCC diagnostic ignored "-Wpedantic" #endif #include #include @@ -60,7 +60,7 @@ #include #include #ifdef PEDANTIC -#pragma GCC diagnostic error "-pedantic" +#pragma GCC diagnostic error "-Wpedantic" #endif #include "mlx5.h" @@ -461,7 +461,7 @@ priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) if (priv_get_sysfs_ulong(priv, "flags", &tmp) == -1) return -1; tmp &= keep; - tmp |= flags; + tmp |= (flags & (~keep)); return priv_set_sysfs_ulong(priv, "flags", tmp); } @@ -583,8 +583,10 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM) : - 0); - info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; + 0) | + (priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0); + if (!priv->mps) + info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; if (priv->hw_csum) info->tx_offload_capa |= (DEV_TX_OFFLOAD_IPV4_CKSUM | @@ -724,6 +726,9 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) unsigned int i; uint16_t (*rx_func)(void *, struct rte_mbuf **, uint16_t) = mlx5_rx_burst; + unsigned int max_frame_len; + int rehash; + int restart = priv->started; if (mlx5_is_secondary()) return -E_RTE_SECONDARY; @@ -737,7 +742,6 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) goto out; } else DEBUG("adapter port %u MTU set to %u", priv->port, mtu); - priv->mtu = mtu; /* Temporarily replace RX handler with a fake one, assuming it has not * been copied elsewhere. */ dev->rx_pkt_burst = removed_rx_burst; @@ -745,28 +749,94 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) * removed_rx_burst() instead. */ rte_wmb(); usleep(1000); + /* MTU does not include header and CRC. */ + max_frame_len = ETHER_HDR_LEN + mtu + ETHER_CRC_LEN; + /* Check if at least one queue is going to need a SGE update. */ + for (i = 0; i != priv->rxqs_n; ++i) { + struct rxq *rxq = (*priv->rxqs)[i]; + unsigned int mb_len; + unsigned int size = RTE_PKTMBUF_HEADROOM + max_frame_len; + unsigned int sges_n; + + if (rxq == NULL) + continue; + mb_len = rte_pktmbuf_data_room_size(rxq->mp); + assert(mb_len >= RTE_PKTMBUF_HEADROOM); + /* + * Determine the number of SGEs needed for a full packet + * and round it to the next power of two. + */ + sges_n = log2above((size / mb_len) + !!(size % mb_len)); + if (sges_n != rxq->sges_n) + break; + } + /* + * If all queues have the right number of SGEs, a simple rehash + * of their buffers is enough, otherwise SGE information can only + * be updated in a queue by recreating it. All resources that depend + * on queues (flows, indirection tables) must be recreated as well in + * that case. + */ + rehash = (i == priv->rxqs_n); + if (!rehash) { + /* Clean up everything as with mlx5_dev_stop(). */ + priv_special_flow_disable_all(priv); + priv_mac_addrs_disable(priv); + priv_destroy_hash_rxqs(priv); + priv_fdir_disable(priv); + priv_dev_interrupt_handler_uninstall(priv, dev); + } +recover: /* Reconfigure each RX queue. */ for (i = 0; (i != priv->rxqs_n); ++i) { struct rxq *rxq = (*priv->rxqs)[i]; - unsigned int mb_len; - unsigned int max_frame_len; + struct rxq_ctrl *rxq_ctrl = + container_of(rxq, struct rxq_ctrl, rxq); int sp; + unsigned int mb_len; + unsigned int tmp; if (rxq == NULL) continue; - /* Calculate new maximum frame length according to MTU and - * toggle scattered support (sp) if necessary. */ - max_frame_len = (priv->mtu + ETHER_HDR_LEN + - (ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN)); mb_len = rte_pktmbuf_data_room_size(rxq->mp); assert(mb_len >= RTE_PKTMBUF_HEADROOM); + /* Toggle scattered support (sp) if necessary. */ sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM)); - if (sp) { - ERROR("%p: RX scatter is not supported", (void *)dev); - ret = ENOTSUP; - goto out; + /* Provide new values to rxq_setup(). */ + dev->data->dev_conf.rxmode.jumbo_frame = sp; + dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len; + if (rehash) + ret = rxq_rehash(dev, rxq_ctrl); + else + ret = rxq_ctrl_setup(dev, rxq_ctrl, 1 << rxq->elts_n, + rxq_ctrl->socket, NULL, rxq->mp); + if (!ret) + continue; + /* Attempt to roll back in case of error. */ + tmp = (mb_len << rxq->sges_n) - RTE_PKTMBUF_HEADROOM; + if (max_frame_len != tmp) { + max_frame_len = tmp; + goto recover; } + /* Double fault, disable RX. */ + break; + } + /* + * Use a safe RX burst function in case of error, otherwise mimic + * mlx5_dev_start(). + */ + if (ret) { + ERROR("unable to reconfigure RX queues, RX disabled"); + rx_func = removed_rx_burst; + } else if (restart && + !rehash && + !priv_create_hash_rxqs(priv) && + !priv_rehash_flows(priv)) { + if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE) + priv_fdir_enable(priv); + priv_dev_interrupt_handler_install(priv, dev); } + priv->mtu = mtu; /* Burst functions can now be called again. */ rte_wmb(); dev->rx_pkt_burst = rx_func; @@ -1045,7 +1115,7 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev); priv->pending_alarm = 0; priv->intr_handle.fd = 0; - priv->intr_handle.type = 0; + priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; } /** @@ -1081,7 +1151,7 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) /** * Change the link state (UP / DOWN). * - * @param dev + * @param priv * Pointer to Ethernet device structure. * @param up * Nonzero for link up, otherwise link down. @@ -1099,8 +1169,8 @@ priv_set_link(struct priv *priv, int up) err = priv_set_flags(priv, ~IFF_UP, IFF_UP); if (err) return err; - dev->rx_pkt_burst = mlx5_rx_burst; - dev->tx_pkt_burst = mlx5_tx_burst; + priv_select_tx_function(priv); + priv_select_rx_function(priv); } else { err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP); if (err) @@ -1242,11 +1312,11 @@ mlx5_secondary_data_setup(struct priv *priv) txq_ctrl = rte_calloc_socket("TXQ", 1, sizeof(*txq_ctrl), 0, primary_txq_ctrl->socket); if (txq_ctrl != NULL) { - if (txq_setup(priv->dev, - primary_txq_ctrl, - primary_txq->elts_n, - primary_txq_ctrl->socket, - NULL) == 0) { + if (txq_ctrl_setup(priv->dev, + primary_txq_ctrl, + 1 << primary_txq->elts_n, + primary_txq_ctrl->socket, + NULL) == 0) { txq_ctrl->txq.stats.idx = primary_txq->stats.idx; tx_queues[i] = &txq_ctrl->txq; @@ -1263,7 +1333,9 @@ mlx5_secondary_data_setup(struct priv *priv) } /* RX queues. */ for (i = 0; i != nb_rx_queues; ++i) { - struct rxq *primary_rxq = (*sd->primary_priv->rxqs)[i]; + struct rxq_ctrl *primary_rxq = + container_of((*sd->primary_priv->rxqs)[i], + struct rxq_ctrl, rxq); if (primary_rxq == NULL) continue; @@ -1290,13 +1362,11 @@ mlx5_secondary_data_setup(struct priv *priv) rte_mb(); priv->dev->data = &sd->data; rte_mb(); - priv->dev->tx_pkt_burst = mlx5_tx_burst; - priv->dev->rx_pkt_burst = removed_rx_burst; + priv_select_tx_function(priv); + priv_select_rx_function(priv); priv_unlock(priv); end: /* More sanity checks. */ - assert(priv->dev->tx_pkt_burst == mlx5_tx_burst); - assert(priv->dev->rx_pkt_burst == removed_rx_burst); assert(priv->dev->data == &sd->data); rte_spinlock_unlock(&sd->lock); return priv; @@ -1307,3 +1377,38 @@ error: rte_spinlock_unlock(&sd->lock); return NULL; } + +/** + * Configure the TX function to use. + * + * @param priv + * Pointer to private structure. + */ +void +priv_select_tx_function(struct priv *priv) +{ + priv->dev->tx_pkt_burst = mlx5_tx_burst; + /* Display warning for unsupported configurations. */ + if (priv->sriov && priv->mps) + WARN("multi-packet send WQE cannot be used on a SR-IOV setup"); + /* Select appropriate TX function. */ + if ((priv->sriov == 0) && priv->mps && priv->txq_inline) { + priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline; + DEBUG("selected MPW inline TX function"); + } else if ((priv->sriov == 0) && priv->mps) { + priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw; + DEBUG("selected MPW TX function"); + } +} + +/** + * Configure the RX function to use. + * + * @param priv + * Pointer to private structure. + */ +void +priv_select_rx_function(struct priv *priv) +{ + priv->dev->rx_pkt_burst = mlx5_rx_burst; +}