for (i = 0; i < txq_entries; ++i) {
sw_ring[i].tsoh = rte_malloc_socket("sfc-txq-tsoh-obj",
SFC_TSOH_STD_LEN,
- SFC_TX_SEG_BOUNDARY,
+ RTE_CACHE_LINE_SIZE,
socket_id);
if (sw_ring[i].tsoh == NULL)
goto fail_alloc_tsoh_objs;
uint8_t *tsoh;
const struct tcp_hdr *th;
efsys_dma_addr_t header_paddr;
- efsys_dma_addr_t paddr_next_frag;
uint16_t packet_id;
uint32_t sent_seq;
struct rte_mbuf *m = *in_seg;
return EMSGSIZE;
header_paddr = rte_pktmbuf_mtophys(m);
- paddr_next_frag = P2ROUNDUP(header_paddr + 1, SFC_TX_SEG_BOUNDARY);
/*
* Sometimes headers may be split across multiple mbufs. In such cases
* we need to glue those pieces and store them in some temporary place.
* Also, packet headers must be contiguous in memory, so that
- * they can be referred to with a single DMA descriptor. Hence, handle
- * the case where the original header crosses a 4K memory boundary
+ * they can be referred to with a single DMA descriptor. EF10 has no
+ * limitations on address boundaries crossing by DMA descriptor data.
*/
- if ((m->data_len < header_len) ||
- ((paddr_next_frag - header_paddr) < header_len)) {
+ if (m->data_len < header_len) {
sfc_tso_prepare_header(txq, in_seg, in_off, idx, header_len);
tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
struct sfc_txq_info *txq_info;
struct sfc_evq *evq;
struct sfc_txq *txq;
txq->ptr_mask = txq_info->entries - 1;
txq->free_thresh = (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
SFC_TX_DEFAULT_FREE_THRESH;
+ txq->dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
txq->hw_index = sw_index;
txq->flags = tx_conf->txq_flags;
txq->evq = evq;
int
sfc_tx_init(struct sfc_adapter *sa)
{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
unsigned int sw_index;
int rc = 0;
+ /*
+ * The datapath implementation assumes absence of boundary
+ * limits on Tx DMA descriptors. Addition of these checks on
+ * datapath would simply make the datapath slower.
+ */
+ if (encp->enc_tx_dma_desc_boundary != 0) {
+ rc = ENOTSUP;
+ goto fail_tx_dma_desc_boundary;
+ }
+
rc = sfc_tx_check_mode(sa, &dev_conf->txmode);
if (rc != 0)
goto fail_check_mode;
sa->txq_count = 0;
fail_check_mode:
+fail_tx_dma_desc_boundary:
sfc_log_init(sa, "failed (rc = %d)", rc);
return rc;
}
efsys_dma_addr_t frag_addr = next_frag;
size_t frag_len;
- next_frag = RTE_ALIGN(frag_addr + 1,
- SFC_TX_SEG_BOUNDARY);
- frag_len = MIN(next_frag - frag_addr, seg_len);
+ /*
+ * It is assumed here that there is no
+ * limitation on address boundary
+ * crossing by DMA descriptor.
+ */
+ frag_len = MIN(seg_len, txq->dma_desc_size_max);
+ next_frag += frag_len;
seg_len -= frag_len;
pkt_len -= frag_len;