X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_tx.h;h=c4b8271f6fb3fe3c779ead631a55eb72ee32501f;hb=e3032e9c73f4791e2cdbab5a37f322c288d7e2aa;hp=7fed0e7cb93b58538d95490d0c673ff8aa15fdaa;hpb=334ed198ab4d22f249d7b8274568253df33914a6;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h index 7fed0e7cb9..c4b8271f6f 100644 --- a/drivers/net/mlx5/mlx5_tx.h +++ b/drivers/net/mlx5/mlx5_tx.h @@ -14,6 +14,7 @@ #include #include +#include #include #include "mlx5.h" @@ -160,10 +161,7 @@ struct mlx5_txq_data { int32_t ts_offset; /* Timestamp field dynamic offset. */ struct mlx5_dev_ctx_shared *sh; /* Shared context. */ struct mlx5_txq_stats stats; /* TX queue counters. */ -#ifndef RTE_ARCH_64 - rte_spinlock_t *uar_lock; - /* UAR access lock required for 32bit implementations */ -#endif + struct mlx5_uar_data uar_data; struct rte_mbuf *elts[0]; /* Storage for queued packets, must be the last field. */ } __rte_cache_aligned; @@ -184,7 +182,6 @@ struct mlx5_txq_ctrl { struct mlx5_txq_obj *obj; /* Verbs/DevX queue object. */ struct mlx5_priv *priv; /* Back pointer to private data. */ off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */ - void *bf_reg; /* BlueFlame register from Verbs. */ uint16_t dump_file_n; /* Number of dump files. */ struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */ uint32_t hairpin_status; /* Hairpin binding status. */ @@ -204,7 +201,6 @@ int mlx5_tx_hairpin_queue_setup (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, const struct rte_eth_hairpin_conf *hairpin_conf); void mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); -void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl); int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd); void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev); int mlx5_txq_obj_verify(struct rte_eth_dev *dev); @@ -225,8 +221,6 @@ void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev); /* mlx5_tx.c */ -uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, - uint16_t pkts_n); void mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq, unsigned int olx __rte_unused); int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset); @@ -289,114 +283,10 @@ MLX5_TXOFF_PRE_DECL(mci_mpw); MLX5_TXOFF_PRE_DECL(mc_mpw); MLX5_TXOFF_PRE_DECL(i_mpw); -static __rte_always_inline uint64_t * +static __rte_always_inline struct mlx5_uar_data * mlx5_tx_bfreg(struct mlx5_txq_data *txq) { - return MLX5_PROC_PRIV(txq->port_id)->uar_table[txq->idx]; -} - -/** - * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and - * 64bit architectures. - * - * @param val - * value to write in CPU endian format. - * @param addr - * Address to write to. - * @param lock - * Address of the lock to use for that UAR access. - */ -static __rte_always_inline void -__mlx5_uar_write64_relaxed(uint64_t val, void *addr, - rte_spinlock_t *lock __rte_unused) -{ -#ifdef RTE_ARCH_64 - *(uint64_t *)addr = val; -#else /* !RTE_ARCH_64 */ - rte_spinlock_lock(lock); - *(uint32_t *)addr = val; - rte_io_wmb(); - *((uint32_t *)addr + 1) = val >> 32; - rte_spinlock_unlock(lock); -#endif -} - -/** - * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and - * 64bit architectures while guaranteeing the order of execution with the - * code being executed. - * - * @param val - * value to write in CPU endian format. - * @param addr - * Address to write to. - * @param lock - * Address of the lock to use for that UAR access. - */ -static __rte_always_inline void -__mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock) -{ - rte_io_wmb(); - __mlx5_uar_write64_relaxed(val, addr, lock); -} - -/* Assist macros, used instead of directly calling the functions they wrap. */ -#ifdef RTE_ARCH_64 -#define mlx5_uar_write64_relaxed(val, dst, lock) \ - __mlx5_uar_write64_relaxed(val, dst, NULL) -#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL) -#else -#define mlx5_uar_write64_relaxed(val, dst, lock) \ - __mlx5_uar_write64_relaxed(val, dst, lock) -#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock) -#endif - -/** - * Query LKey from a packet buffer for Tx. - * - * @param txq - * Pointer to Tx queue structure. - * @param mb - * Pointer to mbuf. - * - * @return - * Searched LKey on success, UINT32_MAX on no match. - */ -static __rte_always_inline uint32_t -mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb) -{ - struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl; - struct mlx5_txq_ctrl *txq_ctrl = - container_of(txq, struct mlx5_txq_ctrl, txq); - - /* Take slower bottom-half on miss. */ - return mlx5_mr_mb2mr(mr_ctrl, mb, &txq_ctrl->priv->mp_id); -} - -/** - * Ring TX queue doorbell and flush the update if requested. - * - * @param txq - * Pointer to TX queue structure. - * @param wqe - * Pointer to the last WQE posted in the NIC. - * @param cond - * Request for write memory barrier after BlueFlame update. - */ -static __rte_always_inline void -mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe, - int cond) -{ - uint64_t *dst = mlx5_tx_bfreg(txq); - volatile uint64_t *src = ((volatile uint64_t *)wqe); - - rte_io_wmb(); - *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci); - /* Ensure ordering between DB record and BF copy. */ - rte_wmb(); - mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock); - if (cond) - rte_wmb(); + return &MLX5_PROC_PRIV(txq->port_id)->uar_table[txq->idx]; } /** @@ -410,7 +300,8 @@ mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe, static __rte_always_inline void mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe) { - mlx5_tx_dbrec_cond_wmb(txq, wqe, 1); + mlx5_doorbell_ring(mlx5_tx_bfreg(txq), *(volatile uint64_t *)wqe, + txq->wqe_ci, txq->qp_db, 1); } /** @@ -1178,7 +1069,6 @@ mlx5_tx_mseg_memcpy(uint8_t *pdst, uint8_t *psrc; MLX5_ASSERT(len); - MLX5_ASSERT(must <= len); do { /* Allow zero length packets, must check first. */ dlen = rte_pktmbuf_data_len(loc->mbuf); @@ -1205,9 +1095,11 @@ mlx5_tx_mseg_memcpy(uint8_t *pdst, if (diff <= rte_pktmbuf_data_len(loc->mbuf)) { /* * Copy only the minimal required - * part of the data buffer. + * part of the data buffer. Limit amount + * of data to be copied to the length of + * available space. */ - len = diff; + len = RTE_MIN(len, diff); } } continue; @@ -1370,7 +1262,7 @@ mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq, { MLX5_ASSERT(len); dseg->bcount = rte_cpu_to_be_32(len); - dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf); + dseg->lkey = mlx5_mr_mb2mr(&txq->mr_ctrl, loc->mbuf); dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf); } @@ -1406,7 +1298,7 @@ mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq, MLX5_ASSERT(len); if (len > MLX5_DSEG_MIN_INLINE_SIZE) { dseg->bcount = rte_cpu_to_be_32(len); - dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf); + dseg->lkey = mlx5_mr_mb2mr(&txq->mr_ctrl, loc->mbuf); dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf); return; @@ -1816,7 +1708,6 @@ mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq, inlen <= MLX5_ESEG_MIN_INLINE_SIZE || inlen > (dlen + vlan))) return MLX5_TXCMP_CODE_ERROR; - MLX5_ASSERT(inlen >= txq->inlen_mode); /* * Check whether there are enough free WQEBBs: * - Control Segment @@ -2040,7 +1931,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq, MLX5_ASSERT(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE); MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send); - inlen = txq->inlen_mode; + inlen = RTE_MIN(txq->inlen_mode, inlen); } else if (vlan && !txq->vlan_en) { /* * VLAN insertion is requested and hardware does not @@ -2053,6 +1944,8 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq, } else { goto do_first; } + if (mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE) + goto do_build; /* * Now we know the minimal amount of data is requested * to inline. Check whether we should inline the buffers @@ -2085,6 +1978,8 @@ do_first: mbuf = NEXT(mbuf); /* There should be not end of packet. */ MLX5_ASSERT(mbuf); + if (mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE) + break; nxlen = inlen + rte_pktmbuf_data_len(mbuf); } while (unlikely(nxlen < txq->inlen_send)); } @@ -2112,6 +2007,7 @@ do_align: * Estimate the number of Data Segments conservatively, * supposing no any mbufs is being freed during inlining. */ +do_build: MLX5_ASSERT(inlen <= txq->inlen_send); ds = NB_SEGS(loc->mbuf) + 2 + (inlen - MLX5_ESEG_MIN_INLINE_SIZE + @@ -3683,8 +3579,10 @@ enter_send_single: * packets are coming and the write barrier will be issued on * the next burst (after descriptor writing, at least). */ - mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc && - (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST)); + mlx5_doorbell_ring(mlx5_tx_bfreg(txq), + *(volatile uint64_t *)loc.wqe_last, txq->wqe_ci, + txq->qp_db, !txq->db_nc && + (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST)); /* Not all of the mbufs may be stored into elts yet. */ part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy; if (!MLX5_TXOFF_CONFIG(INLINE) && part) {