1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2012-2019 Solarflare Communications Inc.
14 #define EFX_TX_QSTAT_INCR(_etp, _stat) \
16 (_etp)->et_stat[_stat]++; \
17 _NOTE(CONSTANTCONDITION) \
20 #define EFX_TX_QSTAT_INCR(_etp, _stat)
23 __checkReturn efx_rc_t
38 __checkReturn efx_rc_t
41 __in unsigned int index,
42 __in unsigned int label,
43 __in efsys_mem_t *esmp,
49 __out unsigned int *addedp)
51 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
58 inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP;
59 if (((flags & inner_csum) != 0) &&
60 (encp->enc_tunnel_encapsulations_supported == 0)) {
65 if ((rc = efx_mcdi_init_txq(enp, ndescs, eep->ee_index, label, index,
70 * A previous user of this TX queue may have written a descriptor to the
71 * TX push collector, but not pushed the doorbell (e.g. after a crash).
72 * The next doorbell write would then push the stale descriptor.
74 * Ensure the (per network port) TX push collector is cleared by writing
75 * a no-op TX option descriptor. See bug29981 for details.
78 ef10_tx_qdesc_checksum_create(etp, flags, &desc);
80 EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc.ed_eq);
81 ef10_tx_qpush(etp, *addedp, 0);
88 EFSYS_PROBE1(fail1, efx_rc_t, rc);
102 __checkReturn efx_rc_t
106 efx_nic_t *enp = etp->et_enp;
107 efx_piobuf_handle_t handle;
110 if (etp->et_pio_size != 0) {
115 /* Sub-allocate a PIO block from a piobuf */
116 if ((rc = ef10_nic_pio_alloc(enp,
121 &etp->et_pio_size)) != 0) {
124 EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
126 /* Link the piobuf to this TXQ */
127 if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
132 * et_pio_offset is the offset of the sub-allocated block within the
133 * hardware PIO buffer. It is used as the buffer address in the PIO
136 * et_pio_write_offset is the offset of the sub-allocated block from the
137 * start of the write-combined memory mapping, and is used for writing
138 * data into the PIO buffer.
140 etp->et_pio_write_offset =
141 (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
142 ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
148 (void) ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
151 etp->et_pio_size = 0;
153 EFSYS_PROBE1(fail1, efx_rc_t, rc);
159 ef10_tx_qpio_disable(
162 efx_nic_t *enp = etp->et_enp;
164 if (etp->et_pio_size != 0) {
165 /* Unlink the piobuf from this TXQ */
166 if (ef10_nic_pio_unlink(enp, etp->et_index) != 0)
169 /* Free the sub-allocated PIO block */
170 (void) ef10_nic_pio_free(enp, etp->et_pio_bufnum,
172 etp->et_pio_size = 0;
173 etp->et_pio_write_offset = 0;
177 __checkReturn efx_rc_t
180 __in_ecount(length) uint8_t *buffer,
184 efx_nic_t *enp = etp->et_enp;
185 efsys_bar_t *esbp = enp->en_esbp;
186 uint32_t write_offset;
187 uint32_t write_offset_limit;
191 EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
193 if (etp->et_pio_size == 0) {
197 if (offset + length > etp->et_pio_size) {
203 * Writes to PIO buffers must be 64 bit aligned, and multiples of
206 write_offset = etp->et_pio_write_offset + offset;
207 write_offset_limit = write_offset + length;
208 eqp = (efx_qword_t *)buffer;
209 while (write_offset < write_offset_limit) {
210 EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
212 write_offset += sizeof (efx_qword_t);
220 EFSYS_PROBE1(fail1, efx_rc_t, rc);
225 __checkReturn efx_rc_t
228 __in size_t pkt_length,
229 __in unsigned int completed,
230 __inout unsigned int *addedp)
232 efx_qword_t pio_desc;
235 unsigned int added = *addedp;
239 if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
244 if (etp->et_pio_size == 0) {
249 id = added++ & etp->et_mask;
250 offset = id * sizeof (efx_qword_t);
252 EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
253 unsigned int, id, uint32_t, etp->et_pio_offset,
256 EFX_POPULATE_QWORD_5(pio_desc,
257 ESF_DZ_TX_DESC_IS_OPT, 1,
258 ESF_DZ_TX_OPTION_TYPE, 1,
259 ESF_DZ_TX_PIO_CONT, 0,
260 ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
261 ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
263 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
265 EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
273 EFSYS_PROBE1(fail1, efx_rc_t, rc);
278 __checkReturn efx_rc_t
281 __in_ecount(ndescs) efx_buffer_t *eb,
282 __in unsigned int ndescs,
283 __in unsigned int completed,
284 __inout unsigned int *addedp)
286 unsigned int added = *addedp;
290 if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
295 for (i = 0; i < ndescs; i++) {
296 efx_buffer_t *ebp = &eb[i];
297 efsys_dma_addr_t addr = ebp->eb_addr;
298 size_t size = ebp->eb_size;
299 boolean_t eop = ebp->eb_eop;
304 /* No limitations on boundary crossing */
306 etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
308 id = added++ & etp->et_mask;
309 offset = id * sizeof (efx_qword_t);
311 EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
312 unsigned int, id, efsys_dma_addr_t, addr,
313 size_t, size, boolean_t, eop);
315 EFX_POPULATE_QWORD_5(qword,
316 ESF_DZ_TX_KER_TYPE, 0,
317 ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
318 ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
319 ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
320 ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
322 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
325 EFX_TX_QSTAT_INCR(etp, TX_POST);
331 EFSYS_PROBE1(fail1, efx_rc_t, rc);
337 * This improves performance by, when possible, pushing a TX descriptor at the
338 * same time as the doorbell. The descriptor must be added to the TXQ, so that
339 * can be used if the hardware decides not to use the pushed descriptor.
344 __in unsigned int added,
345 __in unsigned int pushed)
347 efx_nic_t *enp = etp->et_enp;
354 wptr = added & etp->et_mask;
355 id = pushed & etp->et_mask;
356 offset = id * sizeof (efx_qword_t);
358 EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
361 * Bug 65776: TSO option descriptors cannot be pushed if pacer bypass is
362 * enabled on the event queue this transmit queue is attached to.
364 * To ensure the code is safe, it is easiest to simply test the type of
365 * the descriptor to push, and only push it is if it not a TSO option
368 if ((EFX_QWORD_FIELD(desc, ESF_DZ_TX_DESC_IS_OPT) != 1) ||
369 (EFX_QWORD_FIELD(desc, ESF_DZ_TX_OPTION_TYPE) !=
370 ESE_DZ_TX_OPTION_DESC_TSO)) {
371 /* Push the descriptor and update the wptr. */
372 EFX_POPULATE_OWORD_3(oword, ERF_DZ_TX_DESC_WPTR, wptr,
373 ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
374 ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
376 /* Ensure ordering of memory (descriptors) and PIO (doorbell) */
377 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
378 EF10_TXQ_DESC_SIZE, wptr, id);
379 EFSYS_PIO_WRITE_BARRIER();
380 EFX_BAR_VI_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG,
381 etp->et_index, &oword);
386 * Only update the wptr. This is signalled to the hardware by
387 * only writing one DWORD of the doorbell register.
389 EFX_POPULATE_OWORD_1(oword, ERF_DZ_TX_DESC_WPTR, wptr);
390 dword = oword.eo_dword[2];
392 /* Ensure ordering of memory (descriptors) and PIO (doorbell) */
393 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
394 EF10_TXQ_DESC_SIZE, wptr, id);
395 EFSYS_PIO_WRITE_BARRIER();
396 EFX_BAR_VI_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG,
397 etp->et_index, &dword, B_FALSE);
401 __checkReturn efx_rc_t
404 __in_ecount(ndescs) efx_desc_t *ed,
405 __in unsigned int ndescs,
406 __in unsigned int completed,
407 __inout unsigned int *addedp)
409 unsigned int added = *addedp;
412 if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1))
415 for (i = 0; i < ndescs; i++) {
416 efx_desc_t *edp = &ed[i];
420 id = added++ & etp->et_mask;
421 offset = id * sizeof (efx_desc_t);
423 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
426 EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
427 unsigned int, added, unsigned int, ndescs);
429 EFX_TX_QSTAT_INCR(etp, TX_POST);
436 ef10_tx_qdesc_dma_create(
438 __in efsys_dma_addr_t addr,
441 __out efx_desc_t *edp)
443 _NOTE(ARGUNUSED(etp))
445 /* No limitations on boundary crossing */
446 EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
448 EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
449 efsys_dma_addr_t, addr,
450 size_t, size, boolean_t, eop);
452 EFX_POPULATE_QWORD_5(edp->ed_eq,
453 ESF_DZ_TX_KER_TYPE, 0,
454 ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
455 ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
456 ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
457 ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
461 ef10_tx_qdesc_tso_create(
463 __in uint16_t ipv4_id,
464 __in uint32_t tcp_seq,
465 __in uint8_t tcp_flags,
466 __out efx_desc_t *edp)
468 _NOTE(ARGUNUSED(etp))
470 EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
471 uint16_t, ipv4_id, uint32_t, tcp_seq,
474 EFX_POPULATE_QWORD_5(edp->ed_eq,
475 ESF_DZ_TX_DESC_IS_OPT, 1,
476 ESF_DZ_TX_OPTION_TYPE,
477 ESE_DZ_TX_OPTION_DESC_TSO,
478 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
479 ESF_DZ_TX_TSO_IP_ID, ipv4_id,
480 ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
484 ef10_tx_qdesc_tso2_create(
486 __in uint16_t ipv4_id,
487 __in uint16_t outer_ipv4_id,
488 __in uint32_t tcp_seq,
489 __in uint16_t tcp_mss,
490 __out_ecount(count) efx_desc_t *edp,
493 _NOTE(ARGUNUSED(etp, count))
495 EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
496 uint16_t, ipv4_id, uint32_t, tcp_seq,
499 EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
501 EFX_POPULATE_QWORD_5(edp[0].ed_eq,
502 ESF_DZ_TX_DESC_IS_OPT, 1,
503 ESF_DZ_TX_OPTION_TYPE,
504 ESE_DZ_TX_OPTION_DESC_TSO,
505 ESF_DZ_TX_TSO_OPTION_TYPE,
506 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
507 ESF_DZ_TX_TSO_IP_ID, ipv4_id,
508 ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
509 EFX_POPULATE_QWORD_5(edp[1].ed_eq,
510 ESF_DZ_TX_DESC_IS_OPT, 1,
511 ESF_DZ_TX_OPTION_TYPE,
512 ESE_DZ_TX_OPTION_DESC_TSO,
513 ESF_DZ_TX_TSO_OPTION_TYPE,
514 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
515 ESF_DZ_TX_TSO_TCP_MSS, tcp_mss,
516 ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id);
520 ef10_tx_qdesc_vlantci_create(
523 __out efx_desc_t *edp)
525 _NOTE(ARGUNUSED(etp))
527 EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
530 EFX_POPULATE_QWORD_4(edp->ed_eq,
531 ESF_DZ_TX_DESC_IS_OPT, 1,
532 ESF_DZ_TX_OPTION_TYPE,
533 ESE_DZ_TX_OPTION_DESC_VLAN,
534 ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
535 ESF_DZ_TX_VLAN_TAG1, tci);
539 ef10_tx_qdesc_checksum_create(
542 __out efx_desc_t *edp)
544 _NOTE(ARGUNUSED(etp));
546 EFSYS_PROBE2(tx_desc_checksum_create, unsigned int, etp->et_index,
549 EFX_POPULATE_QWORD_6(edp->ed_eq,
550 ESF_DZ_TX_DESC_IS_OPT, 1,
551 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
552 ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
553 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
554 ESF_DZ_TX_OPTION_IP_CSUM,
555 (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0,
556 ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM,
557 (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
558 ESF_DZ_TX_OPTION_INNER_IP_CSUM,
559 (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0);
563 __checkReturn efx_rc_t
566 __in unsigned int ns)
571 _NOTE(ARGUNUSED(etp, ns))
572 _NOTE(CONSTANTCONDITION)
582 EFSYS_PROBE1(fail1, efx_rc_t, rc);
587 __checkReturn efx_rc_t
591 efx_nic_t *enp = etp->et_enp;
594 if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
601 * EALREADY is not an error, but indicates that the MC has rebooted and
602 * that the TXQ has already been destroyed. Callers need to know that
603 * the TXQ flush has completed to avoid waiting until timeout for a
604 * flush done event that will not be delivered.
607 EFSYS_PROBE1(fail1, efx_rc_t, rc);
617 _NOTE(ARGUNUSED(etp))
623 ef10_tx_qstats_update(
625 __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
629 for (id = 0; id < TX_NQSTATS; id++) {
630 efsys_stat_t *essp = &stat[id];
632 EFSYS_STAT_INCR(essp, etp->et_stat[id]);
633 etp->et_stat[id] = 0;
637 #endif /* EFSYS_OPT_QSTATS */
639 #endif /* EFX_OPTS_EF10() */