X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fmlx5%2Fmlx5_txq.c;h=f9bc4739cdfc10b7e2b29c7d191c205160073439;hb=d02cb0691299e81108622e41c2027b77597fb04b;hp=691ea0713b8b0f92bbfb144f377e1cf15c81c3b2;hpb=974f1e7ef146ffa112ab1a2afaf5db0360bc159f;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 691ea0713b..f9bc4739cd 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -113,15 +113,20 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev) DEV_TX_OFFLOAD_TCP_CKSUM); if (config->tso) offloads |= DEV_TX_OFFLOAD_TCP_TSO; + if (config->swp) { + if (config->hw_csum) + offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + if (config->tso) + offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO); + } + if (config->tunnel_en) { if (config->hw_csum) offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; if (config->tso) offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO); - if (config->swp) - offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO | - DEV_TX_OFFLOAD_UDP_TNL_TSO); } return offloads; } @@ -250,6 +255,9 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) struct mlx5_txq_ctrl *txq_ctrl; int already_mapped; size_t page_size = sysconf(_SC_PAGESIZE); +#ifndef RTE_ARCH_64 + unsigned int lock_idx; +#endif memset(pages, 0, priv->txqs_n * sizeof(uintptr_t)); /* @@ -276,7 +284,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) } /* new address in reserved UAR address space. */ addr = RTE_PTR_ADD(priv->uar_base, - uar_va & (MLX5_UAR_SIZE - 1)); + uar_va & (uintptr_t)(MLX5_UAR_SIZE - 1)); if (!already_mapped) { pages[pages_n++] = uar_va; /* fixed mmap to specified address in reserved @@ -300,6 +308,12 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) else assert(txq_ctrl->txq.bf_reg == RTE_PTR_ADD((void *)addr, off)); +#ifndef RTE_ARCH_64 + /* Assign a UAR lock according to UAR page number */ + lock_idx = (txq_ctrl->uar_mmap_offset / page_size) & + MLX5_UAR_PAGE_NUM_MASK; + txq->uar_lock = &priv->uar_lock[lock_idx]; +#endif } return 0; } @@ -429,7 +443,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) /* Move the QP to this state. */ .qp_state = IBV_QPS_INIT, /* Primary port number. */ - .port_num = priv->port + .port_num = 1, }; ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT)); @@ -506,6 +520,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) rte_atomic32_inc(&txq_ibv->refcnt); if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) { txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset; + DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%lx", + dev->data->port_id, txq_ctrl->uar_mmap_offset); } else { DRV_LOG(ERR, "port %u failed to retrieve UAR info, invalid" @@ -514,8 +530,6 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) rte_errno = EINVAL; goto error; } - DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d", - dev->data->port_id, idx, rte_atomic32_read(&txq_ibv->refcnt)); LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next); txq_ibv->txq_ctrl = txq_ctrl; priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; @@ -553,12 +567,8 @@ mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) if (!(*priv->txqs)[idx]) return NULL; txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - if (txq_ctrl->ibv) { + if (txq_ctrl->ibv) rte_atomic32_inc(&txq_ctrl->ibv->refcnt); - DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d", - dev->data->port_id, txq_ctrl->idx, - rte_atomic32_read(&txq_ctrl->ibv->refcnt)); - } return txq_ctrl->ibv; } @@ -575,9 +585,6 @@ int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv) { assert(txq_ibv); - DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d", - PORT_ID(txq_ibv->txq_ctrl->priv), - txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt)); if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) { claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp)); claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq)); @@ -716,7 +723,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) max_tso_inline); txq_ctrl->txq.tso_en = 1; } - txq_ctrl->txq.tunnel_en = config->tunnel_en; + txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp; txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO | DEV_TX_OFFLOAD_UDP_TNL_TSO | DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) & @@ -778,8 +785,6 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1); tmpl->txq.stats.idx = idx; rte_atomic32_inc(&tmpl->refcnt); - DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id, - idx, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; error: @@ -809,9 +814,6 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) txq); mlx5_txq_ibv_get(dev, idx); rte_atomic32_inc(&ctrl->refcnt); - DRV_LOG(DEBUG, "port %u Tx queue %u refcnt %d", - dev->data->port_id, - ctrl->idx, rte_atomic32_read(&ctrl->refcnt)); } return ctrl; } @@ -837,8 +839,6 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) if (!(*priv->txqs)[idx]) return 0; txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id, - txq->idx, rte_atomic32_read(&txq->refcnt)); if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv)) txq->ibv = NULL; if (priv->uar_base)