X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fmlx4%2Fmlx4_rxq.c;h=781ee256df71caf0ac9568bd0dc59e930d78b19b;hb=46c6714ffd4326cd9ea884a9812a459a444f464a;hp=2b75c07fad755413dbc9637d000dfc6ca68a0107;hpb=7483341ae5533c5d5fa080a5d229e6f2daf03ea5;p=dpdk.git diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c index 2b75c07fad..781ee256df 100644 --- a/drivers/net/mlx4/mlx4_rxq.c +++ b/drivers/net/mlx4/mlx4_rxq.c @@ -682,13 +682,12 @@ mlx4_rxq_detach(struct rxq *rxq) uint64_t mlx4_get_rx_queue_offloads(struct mlx4_priv *priv) { - uint64_t offloads = DEV_RX_OFFLOAD_SCATTER | - DEV_RX_OFFLOAD_KEEP_CRC | - DEV_RX_OFFLOAD_JUMBO_FRAME | - DEV_RX_OFFLOAD_RSS_HASH; + uint64_t offloads = RTE_ETH_RX_OFFLOAD_SCATTER | + RTE_ETH_RX_OFFLOAD_KEEP_CRC | + RTE_ETH_RX_OFFLOAD_RSS_HASH; if (priv->hw_csum) - offloads |= DEV_RX_OFFLOAD_CHECKSUM; + offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM; return offloads; } @@ -704,7 +703,7 @@ mlx4_get_rx_queue_offloads(struct mlx4_priv *priv) uint64_t mlx4_get_rx_port_offloads(struct mlx4_priv *priv) { - uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; + uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER; (void)priv; return offloads; @@ -753,6 +752,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, int ret; uint32_t crc_present; uint64_t offloads; + uint32_t max_rx_pktlen; offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; @@ -785,7 +785,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, } /* By default, FCS (CRC) is stripped by hardware. */ crc_present = 0; - if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) { if (priv->hw_fcs_strip) { crc_present = 1; } else { @@ -816,9 +816,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, .elts = elts, /* Toggle Rx checksum offload if hardware supports it. */ .csum = priv->hw_csum && - (offloads & DEV_RX_OFFLOAD_CHECKSUM), + (offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM), .csum_l2tun = priv->hw_csum_l2tun && - (offloads & DEV_RX_OFFLOAD_CHECKSUM), + (offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM), .crc_present = crc_present, .l2tun_offload = priv->hw_csum_l2tun, .stats = { @@ -829,13 +829,11 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, dev->data->rx_queues[idx] = rxq; /* Enable scattered packets support for this queue if necessary. */ MLX4_ASSERT(mb_len >= RTE_PKTMBUF_HEADROOM); - if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= - (mb_len - RTE_PKTMBUF_HEADROOM)) { + max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; + if (max_rx_pktlen <= (mb_len - RTE_PKTMBUF_HEADROOM)) { ; - } else if (offloads & DEV_RX_OFFLOAD_SCATTER) { - uint32_t size = - RTE_PKTMBUF_HEADROOM + - dev->data->dev_conf.rxmode.max_rx_pkt_len; + } else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) { + uint32_t size = RTE_PKTMBUF_HEADROOM + max_rx_pktlen; uint32_t sges_n; /* @@ -847,21 +845,19 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, /* Make sure sges_n did not overflow. */ size = mb_len * (1 << rxq->sges_n); size -= RTE_PKTMBUF_HEADROOM; - if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) { + if (size < max_rx_pktlen) { rte_errno = EOVERFLOW; ERROR("%p: too many SGEs (%u) needed to handle" " requested maximum packet size %u", (void *)dev, - 1 << sges_n, - dev->data->dev_conf.rxmode.max_rx_pkt_len); + 1 << sges_n, max_rx_pktlen); goto error; } } else { WARN("%p: the requested maximum Rx packet size (%u) is" " larger than a single mbuf (%u) and scattered" " mode has not been requested", - (void *)dev, - dev->data->dev_conf.rxmode.max_rx_pkt_len, + (void *)dev, max_rx_pktlen, mb_len - RTE_PKTMBUF_HEADROOM); } DEBUG("%p: maximum number of segments per packet: %u",