From: Adrien Mazarguil Date: Wed, 8 Jun 2016 09:43:27 +0000 (+0200) Subject: net/mlx: retrieve mbuf size through proper function X-Git-Tag: spdx-start~6524 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=78d9c95d91567f238527e0e9a505041dceef24a3;p=dpdk.git net/mlx: retrieve mbuf size through proper function No need to allocate a mbuf for that. Signed-off-by: Adrien Mazarguil --- diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c index 4b2c290415..9e94630aa0 100644 --- a/drivers/net/mlx4/mlx4.c +++ b/drivers/net/mlx4/mlx4.c @@ -196,7 +196,6 @@ struct rxq { unsigned int sp:1; /* Use scattered RX elements. */ unsigned int csum:1; /* Enable checksum offloading. */ unsigned int csum_l2tun:1; /* Same for L2 tunnels. */ - uint32_t mb_len; /* Length of a mp-issued mbuf. */ struct mlx4_rxq_stats stats; /* RX queue counters. */ unsigned int socket; /* CPU socket ID for allocations. */ struct ibv_exp_res_domain *rd; /* Resource Domain. */ @@ -3159,7 +3158,6 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) rep->ol_flags = -1; #endif assert(rep->buf_len == seg->buf_len); - assert(rep->buf_len == rxq->mb_len); /* Reconfigure sge to use rep instead of seg. */ assert(sge->lkey == rxq->mr->lkey); sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom); @@ -3580,6 +3578,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) unsigned int i, k; struct ibv_exp_qp_attr mod; struct ibv_recv_wr *bad_wr; + unsigned int mb_len; int err; int parent = (rxq == &priv->rxq_parent); @@ -3588,6 +3587,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) (void *)dev, (void *)rxq); return EINVAL; } + mb_len = rte_pktmbuf_data_room_size(rxq->mp); DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq); /* Number of descriptors and mbufs currently allocated. */ desc_n = (tmpl.elts_n * (tmpl.sp ? MLX4_PMD_SGE_WR_N : 1)); @@ -3602,9 +3602,10 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) rxq->csum_l2tun = tmpl.csum_l2tun; } /* Enable scattered packets support for this queue if necessary. */ + assert(mb_len >= RTE_PKTMBUF_HEADROOM); if ((dev->data->dev_conf.rxmode.jumbo_frame) && (dev->data->dev_conf.rxmode.max_rx_pkt_len > - (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) { + (mb_len - RTE_PKTMBUF_HEADROOM))) { tmpl.sp = 1; desc_n /= MLX4_PMD_SGE_WR_N; } else @@ -3795,7 +3796,7 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, } attr; enum ibv_exp_query_intf_status status; struct ibv_recv_wr *bad_wr; - struct rte_mbuf *buf; + unsigned int mb_len; int ret = 0; int parent = (rxq == &priv->rxq_parent); @@ -3811,31 +3812,22 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, desc = 1; goto skip_mr; } + mb_len = rte_pktmbuf_data_room_size(mp); if ((desc == 0) || (desc % MLX4_PMD_SGE_WR_N)) { ERROR("%p: invalid number of RX descriptors (must be a" " multiple of %d)", (void *)dev, MLX4_PMD_SGE_WR_N); return EINVAL; } - /* Get mbuf length. */ - buf = rte_pktmbuf_alloc(mp); - if (buf == NULL) { - ERROR("%p: unable to allocate mbuf", (void *)dev); - return ENOMEM; - } - tmpl.mb_len = buf->buf_len; - assert((rte_pktmbuf_headroom(buf) + - rte_pktmbuf_tailroom(buf)) == tmpl.mb_len); - assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM); - rte_pktmbuf_free(buf); /* Toggle RX checksum offload if hardware supports it. */ if (priv->hw_csum) tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum; if (priv->hw_csum_l2tun) tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum; /* Enable scattered packets support for this queue if necessary. */ + assert(mb_len >= RTE_PKTMBUF_HEADROOM); if ((dev->data->dev_conf.rxmode.jumbo_frame) && (dev->data->dev_conf.rxmode.max_rx_pkt_len > - (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) { + (mb_len - RTE_PKTMBUF_HEADROOM))) { tmpl.sp = 1; desc /= MLX4_PMD_SGE_WR_N; } @@ -4872,6 +4864,7 @@ mlx4_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) /* Reconfigure each RX queue. */ for (i = 0; (i != priv->rxqs_n); ++i) { struct rxq *rxq = (*priv->rxqs)[i]; + unsigned int mb_len; unsigned int max_frame_len; int sp; @@ -4881,7 +4874,9 @@ mlx4_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) * toggle scattered support (sp) if necessary. */ max_frame_len = (priv->mtu + ETHER_HDR_LEN + (ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN)); - sp = (max_frame_len > (rxq->mb_len - RTE_PKTMBUF_HEADROOM)); + mb_len = rte_pktmbuf_data_room_size(rxq->mp); + assert(mb_len >= RTE_PKTMBUF_HEADROOM); + sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM)); /* Provide new values to rxq_setup(). */ dev->data->dev_conf.rxmode.jumbo_frame = sp; dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len; diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 243c35a7a7..47d0862ee3 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -717,6 +717,7 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) /* Reconfigure each RX queue. */ for (i = 0; (i != priv->rxqs_n); ++i) { struct rxq *rxq = (*priv->rxqs)[i]; + unsigned int mb_len; unsigned int max_frame_len; int sp; @@ -726,7 +727,9 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) * toggle scattered support (sp) if necessary. */ max_frame_len = (priv->mtu + ETHER_HDR_LEN + (ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN)); - sp = (max_frame_len > (rxq->mb_len - RTE_PKTMBUF_HEADROOM)); + mb_len = rte_pktmbuf_data_room_size(rxq->mp); + assert(mb_len >= RTE_PKTMBUF_HEADROOM); + sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM)); /* Provide new values to rxq_setup(). */ dev->data->dev_conf.rxmode.jumbo_frame = sp; dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index cbb017bb96..469ba987b0 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -973,6 +973,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) struct rte_mbuf **pool; unsigned int i, k; struct ibv_exp_wq_attr mod; + unsigned int mb_len = rte_pktmbuf_data_room_size(rxq->mp); int err; DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq); @@ -989,9 +990,10 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) rxq->csum_l2tun = tmpl.csum_l2tun; } /* Enable scattered packets support for this queue if necessary. */ + assert(mb_len >= RTE_PKTMBUF_HEADROOM); if ((dev->data->dev_conf.rxmode.jumbo_frame) && (dev->data->dev_conf.rxmode.max_rx_pkt_len > - (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) { + (mb_len - RTE_PKTMBUF_HEADROOM))) { tmpl.sp = 1; desc_n /= MLX5_PMD_SGE_WR_N; } else @@ -1156,7 +1158,7 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, struct ibv_exp_wq_init_attr wq; } attr; enum ibv_exp_query_intf_status status; - struct rte_mbuf *buf; + unsigned int mb_len = rte_pktmbuf_data_room_size(mp); int ret = 0; unsigned int i; unsigned int cq_size = desc; @@ -1167,26 +1169,16 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, " multiple of %d)", (void *)dev, MLX5_PMD_SGE_WR_N); return EINVAL; } - /* Get mbuf length. */ - buf = rte_pktmbuf_alloc(mp); - if (buf == NULL) { - ERROR("%p: unable to allocate mbuf", (void *)dev); - return ENOMEM; - } - tmpl.mb_len = buf->buf_len; - assert((rte_pktmbuf_headroom(buf) + - rte_pktmbuf_tailroom(buf)) == tmpl.mb_len); - assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM); - rte_pktmbuf_free(buf); /* Toggle RX checksum offload if hardware supports it. */ if (priv->hw_csum) tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum; if (priv->hw_csum_l2tun) tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum; /* Enable scattered packets support for this queue if necessary. */ + assert(mb_len >= RTE_PKTMBUF_HEADROOM); if ((dev->data->dev_conf.rxmode.jumbo_frame) && (dev->data->dev_conf.rxmode.max_rx_pkt_len > - (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) { + (mb_len - RTE_PKTMBUF_HEADROOM))) { tmpl.sp = 1; desc /= MLX5_PMD_SGE_WR_N; } diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index f64cb37a33..22d7f79884 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -1005,7 +1005,6 @@ mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) rep->ol_flags = -1; #endif assert(rep->buf_len == seg->buf_len); - assert(rep->buf_len == rxq->mb_len); /* Reconfigure sge to use rep instead of seg. */ assert(sge->lkey == rxq->mr->lkey); sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom); diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index d522f70657..dd3003cfb0 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -121,7 +121,6 @@ struct rxq { struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */ struct rxq_elt (*no_sp)[]; /* RX elements. */ } elts; - uint32_t mb_len; /* Length of a mp-issued mbuf. */ unsigned int socket; /* CPU socket ID for allocations. */ struct mlx5_rxq_stats stats; /* RX queue counters. */ struct ibv_exp_res_domain *rd; /* Resource Domain. */