X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_tx.h;h=89dac0c65a748acca19c99140e73a69b400c3af8;hb=c06f77aecee404c4bfb43a6e6c5f1f4949ca4264;hp=ad13b5e608a8b6a571375948d5ef9edc724e4097;hpb=5dfa003db53f6e901dce02dab594e230555c8f33;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h index ad13b5e608..89dac0c65a 100644 --- a/drivers/net/mlx5/mlx5_tx.h +++ b/drivers/net/mlx5/mlx5_tx.h @@ -138,6 +138,8 @@ struct mlx5_txq_data { uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */ uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */ uint16_t db_heu:1; /* Doorbell heuristic write barrier. */ + uint16_t rt_timestamp:1; /* Realtime timestamp format. */ + uint16_t wait_on_time:1; /* WQE with timestamp is supported. */ uint16_t fast_free:1; /* mbuf fast free on Tx is enabled. */ uint16_t inlen_send; /* Ordinary send data inline size. */ uint16_t inlen_empw; /* eMPW max packet size to inline. */ @@ -157,6 +159,7 @@ struct mlx5_txq_data { volatile uint32_t *cq_db; /* Completion queue doorbell. */ uint16_t port_id; /* Port ID of device. */ uint16_t idx; /* Queue index. */ + uint64_t rt_timemask; /* Scheduling timestamp mask. */ uint64_t ts_mask; /* Timestamp flag dynamic mask. */ int32_t ts_offset; /* Timestamp field dynamic offset. */ struct mlx5_dev_ctx_shared *sh; /* Shared context. */ @@ -166,17 +169,12 @@ struct mlx5_txq_data { /* Storage for queued packets, must be the last field. */ } __rte_cache_aligned; -enum mlx5_txq_type { - MLX5_TXQ_TYPE_STANDARD, /* Standard Tx queue. */ - MLX5_TXQ_TYPE_HAIRPIN, /* Hairpin Tx queue. */ -}; - /* TX queue control descriptor. */ struct mlx5_txq_ctrl { LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */ uint32_t refcnt; /* Reference counter. */ unsigned int socket; /* CPU socket ID for allocations. */ - enum mlx5_txq_type type; /* The txq ctrl type. */ + bool is_hairpin; /* Whether TxQ type is Hairpin. */ unsigned int max_inline_data; /* Max inline data. */ unsigned int max_tso_header; /* Max TSO header size. */ struct mlx5_txq_obj *obj; /* Verbs/DevX queue object. */ @@ -221,8 +219,6 @@ void mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev); /* mlx5_tx.c */ -uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, - uint16_t pkts_n); void mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq, unsigned int olx __rte_unused); int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset); @@ -779,7 +775,7 @@ mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq, * compile time and may be used for optimization. */ static __rte_always_inline void -mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq, +mlx5_tx_qseg_init(struct mlx5_txq_data *restrict txq, struct mlx5_txq_local *restrict loc __rte_unused, struct mlx5_wqe *restrict wqe, unsigned int wci, @@ -794,6 +790,43 @@ mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq, qs->reserved1 = RTE_BE32(0); } +/** + * Build the Wait on Time Segment with specified timestamp value. + * + * @param txq + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. + * @param wqe + * Pointer to WQE to fill with built Control Segment. + * @param ts + * Timesatmp value to wait. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. + */ +static __rte_always_inline void +mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc __rte_unused, + struct mlx5_wqe *restrict wqe, + uint64_t ts, + unsigned int olx __rte_unused) +{ + struct mlx5_wqe_wseg *ws; + + ws = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE); + ws->operation = rte_cpu_to_be_32(MLX5_WAIT_COND_CYCLIC_BIGGER); + ws->lkey = RTE_BE32(0); + ws->va_high = RTE_BE32(0); + ws->va_low = RTE_BE32(0); + if (txq->rt_timestamp) { + ts = ts % (uint64_t)NS_PER_S + | (ts / (uint64_t)NS_PER_S) << 32; + } + ws->value = rte_cpu_to_be_64(ts); + ws->mask = txq->rt_timemask; +} + /** * Build the Ethernet Segment without inlined data. * Supports Software Parser, Checksums and VLAN insertion Tx offload features. @@ -1071,7 +1104,6 @@ mlx5_tx_mseg_memcpy(uint8_t *pdst, uint8_t *psrc; MLX5_ASSERT(len); - MLX5_ASSERT(must <= len); do { /* Allow zero length packets, must check first. */ dlen = rte_pktmbuf_data_len(loc->mbuf); @@ -1098,9 +1130,11 @@ mlx5_tx_mseg_memcpy(uint8_t *pdst, if (diff <= rte_pktmbuf_data_len(loc->mbuf)) { /* * Copy only the minimal required - * part of the data buffer. + * part of the data buffer. Limit amount + * of data to be copied to the length of + * available space. */ - len = diff; + len = RTE_MIN(len, diff); } } continue; @@ -1624,9 +1658,9 @@ mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq, { if (MLX5_TXOFF_CONFIG(TXPP) && loc->mbuf->ol_flags & txq->ts_mask) { + struct mlx5_dev_ctx_shared *sh; struct mlx5_wqe *wqe; uint64_t ts; - int32_t wci; /* * Estimate the required space quickly and roughly. @@ -1638,13 +1672,32 @@ mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq, return MLX5_TXCMP_CODE_EXIT; /* Convert the timestamp into completion to wait. */ ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *); - wci = mlx5_txpp_convert_tx_ts(txq->sh, ts); - if (unlikely(wci < 0)) - return MLX5_TXCMP_CODE_SINGLE; - /* Build the WAIT WQE with specified completion. */ wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); - mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx); - mlx5_tx_wseg_init(txq, loc, wqe, wci, olx); + sh = txq->sh; + if (txq->wait_on_time) { + /* The wait on time capability should be used. */ + ts -= sh->txpp.skew; + mlx5_tx_cseg_init(txq, loc, wqe, + 1 + sizeof(struct mlx5_wqe_wseg) / + MLX5_WSEG_SIZE, + MLX5_OPCODE_WAIT | + MLX5_OPC_MOD_WAIT_TIME << 24, olx); + mlx5_tx_wseg_init(txq, loc, wqe, ts, olx); + } else { + /* Legacy cross-channel operation should be used. */ + int32_t wci; + + wci = mlx5_txpp_convert_tx_ts(sh, ts); + if (unlikely(wci < 0)) + return MLX5_TXCMP_CODE_SINGLE; + /* Build the WAIT WQE with specified completion. */ + mlx5_tx_cseg_init(txq, loc, wqe, + 1 + sizeof(struct mlx5_wqe_qseg) / + MLX5_WSEG_SIZE, + MLX5_OPCODE_WAIT | + MLX5_OPC_MOD_WAIT_CQ_PI << 24, olx); + mlx5_tx_qseg_init(txq, loc, wqe, wci, olx); + } ++txq->wqe_ci; --loc->wqe_free; return MLX5_TXCMP_CODE_MULTI; @@ -1709,7 +1762,6 @@ mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq, inlen <= MLX5_ESEG_MIN_INLINE_SIZE || inlen > (dlen + vlan))) return MLX5_TXCMP_CODE_ERROR; - MLX5_ASSERT(inlen >= txq->inlen_mode); /* * Check whether there are enough free WQEBBs: * - Control Segment @@ -1933,7 +1985,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq, MLX5_ASSERT(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE); MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send); - inlen = txq->inlen_mode; + inlen = RTE_MIN(txq->inlen_mode, inlen); } else if (vlan && !txq->vlan_en) { /* * VLAN insertion is requested and hardware does not @@ -1946,6 +1998,8 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq, } else { goto do_first; } + if (mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE) + goto do_build; /* * Now we know the minimal amount of data is requested * to inline. Check whether we should inline the buffers @@ -1978,6 +2032,8 @@ do_first: mbuf = NEXT(mbuf); /* There should be not end of packet. */ MLX5_ASSERT(mbuf); + if (mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE) + break; nxlen = inlen + rte_pktmbuf_data_len(mbuf); } while (unlikely(nxlen < txq->inlen_send)); } @@ -2005,6 +2061,7 @@ do_align: * Estimate the number of Data Segments conservatively, * supposing no any mbufs is being freed during inlining. */ +do_build: MLX5_ASSERT(inlen <= txq->inlen_send); ds = NB_SEGS(loc->mbuf) + 2 + (inlen - MLX5_ESEG_MIN_INLINE_SIZE +