X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fmlx5%2Fmlx5_txq.c;h=d18561740f27e34b42da1fd2aa7f3d8009bfac4b;hb=fd350d3c9a13b5ebcb41741870c3f09b6920033f;hp=691ea0713b8b0f92bbfb144f377e1cf15c81c3b2;hpb=974f1e7ef146ffa112ab1a2afaf5db0360bc159f;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 691ea0713b..d18561740f 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -102,7 +102,7 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS | DEV_TX_OFFLOAD_VLAN_INSERT); struct mlx5_dev_config *config = &priv->config; @@ -113,16 +113,24 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev) DEV_TX_OFFLOAD_TCP_CKSUM); if (config->tso) offloads |= DEV_TX_OFFLOAD_TCP_TSO; + if (config->swp) { + if (config->hw_csum) + offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + if (config->tso) + offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO); + } if (config->tunnel_en) { if (config->hw_csum) offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; if (config->tso) offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO); - if (config->swp) - offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO | - DEV_TX_OFFLOAD_UDP_TNL_TSO); } +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (config->dv_flow_en) + offloads |= DEV_TX_OFFLOAD_MATCH_METADATA; +#endif return offloads; } @@ -147,7 +155,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_txconf *conf) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_txq_data *txq = (*priv->txqs)[idx]; struct mlx5_txq_ctrl *txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); @@ -205,7 +213,7 @@ mlx5_tx_queue_release(void *dpdk_txq) { struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq; struct mlx5_txq_ctrl *txq_ctrl; - struct priv *priv; + struct mlx5_priv *priv; unsigned int i; if (txq == NULL) @@ -238,7 +246,7 @@ mlx5_tx_queue_release(void *dpdk_txq) int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; unsigned int i, j; uintptr_t pages[priv->txqs_n]; unsigned int pages_n = 0; @@ -250,6 +258,9 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) struct mlx5_txq_ctrl *txq_ctrl; int already_mapped; size_t page_size = sysconf(_SC_PAGESIZE); +#ifndef RTE_ARCH_64 + unsigned int lock_idx; +#endif memset(pages, 0, priv->txqs_n * sizeof(uintptr_t)); /* @@ -276,7 +287,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) } /* new address in reserved UAR address space. */ addr = RTE_PTR_ADD(priv->uar_base, - uar_va & (MLX5_UAR_SIZE - 1)); + uar_va & (uintptr_t)(MLX5_UAR_SIZE - 1)); if (!already_mapped) { pages[pages_n++] = uar_va; /* fixed mmap to specified address in reserved @@ -300,6 +311,12 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) else assert(txq_ctrl->txq.bf_reg == RTE_PTR_ADD((void *)addr, off)); +#ifndef RTE_ARCH_64 + /* Assign a UAR lock according to UAR page number */ + lock_idx = (txq_ctrl->uar_mmap_offset / page_size) & + MLX5_UAR_PAGE_NUM_MASK; + txq->uar_lock = &priv->uar_lock[lock_idx]; +#endif } return 0; } @@ -337,7 +354,7 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst) struct mlx5_txq_ibv * mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; struct mlx5_txq_ctrl *txq_ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq); @@ -429,7 +446,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) /* Move the QP to this state. */ .qp_state = IBV_QPS_INIT, /* Primary port number. */ - .port_num = priv->port + .port_num = 1, }; ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT)); @@ -506,6 +523,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) rte_atomic32_inc(&txq_ibv->refcnt); if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) { txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset; + DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%lx", + dev->data->port_id, txq_ctrl->uar_mmap_offset); } else { DRV_LOG(ERR, "port %u failed to retrieve UAR info, invalid" @@ -514,8 +533,6 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) rte_errno = EINVAL; goto error; } - DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d", - dev->data->port_id, idx, rte_atomic32_read(&txq_ibv->refcnt)); LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next); txq_ibv->txq_ctrl = txq_ctrl; priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; @@ -545,7 +562,7 @@ error: struct mlx5_txq_ibv * mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq_ctrl; if (idx >= priv->txqs_n) @@ -553,12 +570,8 @@ mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) if (!(*priv->txqs)[idx]) return NULL; txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - if (txq_ctrl->ibv) { + if (txq_ctrl->ibv) rte_atomic32_inc(&txq_ctrl->ibv->refcnt); - DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d", - dev->data->port_id, txq_ctrl->idx, - rte_atomic32_read(&txq_ctrl->ibv->refcnt)); - } return txq_ctrl->ibv; } @@ -575,9 +588,6 @@ int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv) { assert(txq_ibv); - DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d", - PORT_ID(txq_ibv->txq_ctrl->priv), - txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt)); if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) { claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp)); claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq)); @@ -613,7 +623,7 @@ mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv) int mlx5_txq_ibv_verify(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; int ret = 0; struct mlx5_txq_ibv *txq_ibv; @@ -634,7 +644,7 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev) static void txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) { - struct priv *priv = txq_ctrl->priv; + struct mlx5_priv *priv = txq_ctrl->priv; struct mlx5_dev_config *config = &priv->config; const unsigned int max_tso_inline = ((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) / @@ -716,7 +726,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) max_tso_inline); txq_ctrl->txq.tso_en = 1; } - txq_ctrl->txq.tunnel_en = config->tunnel_en; + txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp; txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO | DEV_TX_OFFLOAD_UDP_TNL_TSO | DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) & @@ -744,7 +754,7 @@ struct mlx5_txq_ctrl * mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_txconf *conf) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *tmpl; tmpl = rte_calloc_socket("TXQ", 1, @@ -778,8 +788,6 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1); tmpl->txq.stats.idx = idx; rte_atomic32_inc(&tmpl->refcnt); - DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id, - idx, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; error: @@ -801,7 +809,7 @@ error: struct mlx5_txq_ctrl * mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *ctrl = NULL; if ((*priv->txqs)[idx]) { @@ -809,9 +817,6 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) txq); mlx5_txq_ibv_get(dev, idx); rte_atomic32_inc(&ctrl->refcnt); - DRV_LOG(DEBUG, "port %u Tx queue %u refcnt %d", - dev->data->port_id, - ctrl->idx, rte_atomic32_read(&ctrl->refcnt)); } return ctrl; } @@ -830,15 +835,13 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq; size_t page_size = sysconf(_SC_PAGESIZE); if (!(*priv->txqs)[idx]) return 0; txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id, - txq->idx, rte_atomic32_read(&txq->refcnt)); if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv)) txq->ibv = NULL; if (priv->uar_base) @@ -869,7 +872,7 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq; if (!(*priv->txqs)[idx]) @@ -890,7 +893,7 @@ mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx) int mlx5_txq_verify(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq; int ret = 0;