1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2012-2018 Solarflare Communications Inc.
11 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
14 #define EFX_TX_QSTAT_INCR(_etp, _stat) \
16 (_etp)->et_stat[_stat]++; \
17 _NOTE(CONSTANTCONDITION) \
20 #define EFX_TX_QSTAT_INCR(_etp, _stat)
23 static __checkReturn efx_rc_t
27 __in uint32_t target_evq,
29 __in uint32_t instance,
31 __in efsys_mem_t *esmp)
34 uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
35 MC_CMD_INIT_TXQ_OUT_LEN)];
36 efx_qword_t *dma_addr;
42 EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
43 EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs));
45 if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_TXQ_SIZE(ndescs))) {
50 npages = EFX_TXQ_NBUFS(ndescs);
51 if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) {
56 (void) memset(payload, 0, sizeof (payload));
57 req.emr_cmd = MC_CMD_INIT_TXQ;
58 req.emr_in_buf = payload;
59 req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
60 req.emr_out_buf = payload;
61 req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
63 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs);
64 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
65 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
66 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
68 MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS,
69 INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
70 INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
71 (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
72 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
73 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
74 INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN,
75 (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0,
76 INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN,
77 (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
78 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
79 INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
80 INIT_TXQ_IN_CRC_MODE, 0,
81 INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
83 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
84 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
86 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
87 addr = EFSYS_MEM_ADDR(esmp);
89 for (i = 0; i < npages; i++) {
90 EFX_POPULATE_QWORD_2(*dma_addr,
91 EFX_DWORD_1, (uint32_t)(addr >> 32),
92 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
98 efx_mcdi_execute(enp, &req);
100 if (req.emr_rc != 0) {
112 EFSYS_PROBE1(fail1, efx_rc_t, rc);
117 static __checkReturn efx_rc_t
120 __in uint32_t instance)
123 uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
124 MC_CMD_FINI_TXQ_OUT_LEN)];
127 (void) memset(payload, 0, sizeof (payload));
128 req.emr_cmd = MC_CMD_FINI_TXQ;
129 req.emr_in_buf = payload;
130 req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
131 req.emr_out_buf = payload;
132 req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
134 MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
136 efx_mcdi_execute_quiet(enp, &req);
138 if (req.emr_rc != 0) {
147 * EALREADY is not an error, but indicates that the MC has rebooted and
148 * that the TXQ has already been destroyed.
151 EFSYS_PROBE1(fail1, efx_rc_t, rc);
156 __checkReturn efx_rc_t
160 _NOTE(ARGUNUSED(enp))
168 _NOTE(ARGUNUSED(enp))
171 __checkReturn efx_rc_t
174 __in unsigned int index,
175 __in unsigned int label,
176 __in efsys_mem_t *esmp,
182 __out unsigned int *addedp)
184 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
191 inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP;
192 if (((flags & inner_csum) != 0) &&
193 (encp->enc_tunnel_encapsulations_supported == 0)) {
198 if ((rc = efx_mcdi_init_txq(enp, ndescs, eep->ee_index, label, index,
203 * A previous user of this TX queue may have written a descriptor to the
204 * TX push collector, but not pushed the doorbell (e.g. after a crash).
205 * The next doorbell write would then push the stale descriptor.
207 * Ensure the (per network port) TX push collector is cleared by writing
208 * a no-op TX option descriptor. See bug29981 for details.
211 ef10_tx_qdesc_checksum_create(etp, flags, &desc);
213 EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc.ed_eq);
214 ef10_tx_qpush(etp, *addedp, 0);
221 EFSYS_PROBE1(fail1, efx_rc_t, rc);
231 _NOTE(ARGUNUSED(etp))
235 __checkReturn efx_rc_t
239 efx_nic_t *enp = etp->et_enp;
240 efx_piobuf_handle_t handle;
243 if (etp->et_pio_size != 0) {
248 /* Sub-allocate a PIO block from a piobuf */
249 if ((rc = ef10_nic_pio_alloc(enp,
254 &etp->et_pio_size)) != 0) {
257 EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
259 /* Link the piobuf to this TXQ */
260 if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
265 * et_pio_offset is the offset of the sub-allocated block within the
266 * hardware PIO buffer. It is used as the buffer address in the PIO
269 * et_pio_write_offset is the offset of the sub-allocated block from the
270 * start of the write-combined memory mapping, and is used for writing
271 * data into the PIO buffer.
273 etp->et_pio_write_offset =
274 (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
275 ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
281 ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
284 etp->et_pio_size = 0;
286 EFSYS_PROBE1(fail1, efx_rc_t, rc);
292 ef10_tx_qpio_disable(
295 efx_nic_t *enp = etp->et_enp;
297 if (etp->et_pio_size != 0) {
298 /* Unlink the piobuf from this TXQ */
299 ef10_nic_pio_unlink(enp, etp->et_index);
301 /* Free the sub-allocated PIO block */
302 ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
303 etp->et_pio_size = 0;
304 etp->et_pio_write_offset = 0;
308 __checkReturn efx_rc_t
311 __in_ecount(length) uint8_t *buffer,
315 efx_nic_t *enp = etp->et_enp;
316 efsys_bar_t *esbp = enp->en_esbp;
317 uint32_t write_offset;
318 uint32_t write_offset_limit;
322 EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
324 if (etp->et_pio_size == 0) {
328 if (offset + length > etp->et_pio_size) {
334 * Writes to PIO buffers must be 64 bit aligned, and multiples of
337 write_offset = etp->et_pio_write_offset + offset;
338 write_offset_limit = write_offset + length;
339 eqp = (efx_qword_t *)buffer;
340 while (write_offset < write_offset_limit) {
341 EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
343 write_offset += sizeof (efx_qword_t);
351 EFSYS_PROBE1(fail1, efx_rc_t, rc);
356 __checkReturn efx_rc_t
359 __in size_t pkt_length,
360 __in unsigned int completed,
361 __inout unsigned int *addedp)
363 efx_qword_t pio_desc;
366 unsigned int added = *addedp;
370 if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
375 if (etp->et_pio_size == 0) {
380 id = added++ & etp->et_mask;
381 offset = id * sizeof (efx_qword_t);
383 EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
384 unsigned int, id, uint32_t, etp->et_pio_offset,
387 EFX_POPULATE_QWORD_5(pio_desc,
388 ESF_DZ_TX_DESC_IS_OPT, 1,
389 ESF_DZ_TX_OPTION_TYPE, 1,
390 ESF_DZ_TX_PIO_CONT, 0,
391 ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
392 ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
394 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
396 EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
404 EFSYS_PROBE1(fail1, efx_rc_t, rc);
409 __checkReturn efx_rc_t
412 __in_ecount(ndescs) efx_buffer_t *eb,
413 __in unsigned int ndescs,
414 __in unsigned int completed,
415 __inout unsigned int *addedp)
417 unsigned int added = *addedp;
421 if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
426 for (i = 0; i < ndescs; i++) {
427 efx_buffer_t *ebp = &eb[i];
428 efsys_dma_addr_t addr = ebp->eb_addr;
429 size_t size = ebp->eb_size;
430 boolean_t eop = ebp->eb_eop;
435 /* No limitations on boundary crossing */
437 etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
439 id = added++ & etp->et_mask;
440 offset = id * sizeof (efx_qword_t);
442 EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
443 unsigned int, id, efsys_dma_addr_t, addr,
444 size_t, size, boolean_t, eop);
446 EFX_POPULATE_QWORD_5(qword,
447 ESF_DZ_TX_KER_TYPE, 0,
448 ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
449 ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
450 ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
451 ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
453 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
456 EFX_TX_QSTAT_INCR(etp, TX_POST);
462 EFSYS_PROBE1(fail1, efx_rc_t, rc);
468 * This improves performance by, when possible, pushing a TX descriptor at the
469 * same time as the doorbell. The descriptor must be added to the TXQ, so that
470 * can be used if the hardware decides not to use the pushed descriptor.
475 __in unsigned int added,
476 __in unsigned int pushed)
478 efx_nic_t *enp = etp->et_enp;
485 wptr = added & etp->et_mask;
486 id = pushed & etp->et_mask;
487 offset = id * sizeof (efx_qword_t);
489 EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
492 * Bug 65776: TSO option descriptors cannot be pushed if pacer bypass is
493 * enabled on the event queue this transmit queue is attached to.
495 * To ensure the code is safe, it is easiest to simply test the type of
496 * the descriptor to push, and only push it is if it not a TSO option
499 if ((EFX_QWORD_FIELD(desc, ESF_DZ_TX_DESC_IS_OPT) != 1) ||
500 (EFX_QWORD_FIELD(desc, ESF_DZ_TX_OPTION_TYPE) !=
501 ESE_DZ_TX_OPTION_DESC_TSO)) {
502 /* Push the descriptor and update the wptr. */
503 EFX_POPULATE_OWORD_3(oword, ERF_DZ_TX_DESC_WPTR, wptr,
504 ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
505 ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
507 /* Ensure ordering of memory (descriptors) and PIO (doorbell) */
508 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
510 EFSYS_PIO_WRITE_BARRIER();
511 EFX_BAR_VI_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG,
512 etp->et_index, &oword);
517 * Only update the wptr. This is signalled to the hardware by
518 * only writing one DWORD of the doorbell register.
520 EFX_POPULATE_OWORD_1(oword, ERF_DZ_TX_DESC_WPTR, wptr);
521 dword = oword.eo_dword[2];
523 /* Ensure ordering of memory (descriptors) and PIO (doorbell) */
524 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
526 EFSYS_PIO_WRITE_BARRIER();
527 EFX_BAR_VI_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG,
528 etp->et_index, &dword, B_FALSE);
532 __checkReturn efx_rc_t
535 __in_ecount(ndescs) efx_desc_t *ed,
536 __in unsigned int ndescs,
537 __in unsigned int completed,
538 __inout unsigned int *addedp)
540 unsigned int added = *addedp;
544 if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
549 for (i = 0; i < ndescs; i++) {
550 efx_desc_t *edp = &ed[i];
554 id = added++ & etp->et_mask;
555 offset = id * sizeof (efx_desc_t);
557 EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
560 EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
561 unsigned int, added, unsigned int, ndescs);
563 EFX_TX_QSTAT_INCR(etp, TX_POST);
569 EFSYS_PROBE1(fail1, efx_rc_t, rc);
575 ef10_tx_qdesc_dma_create(
577 __in efsys_dma_addr_t addr,
580 __out efx_desc_t *edp)
582 _NOTE(ARGUNUSED(etp))
584 /* No limitations on boundary crossing */
585 EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
587 EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
588 efsys_dma_addr_t, addr,
589 size_t, size, boolean_t, eop);
591 EFX_POPULATE_QWORD_5(edp->ed_eq,
592 ESF_DZ_TX_KER_TYPE, 0,
593 ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
594 ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
595 ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
596 ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
600 ef10_tx_qdesc_tso_create(
602 __in uint16_t ipv4_id,
603 __in uint32_t tcp_seq,
604 __in uint8_t tcp_flags,
605 __out efx_desc_t *edp)
607 _NOTE(ARGUNUSED(etp))
609 EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
610 uint16_t, ipv4_id, uint32_t, tcp_seq,
613 EFX_POPULATE_QWORD_5(edp->ed_eq,
614 ESF_DZ_TX_DESC_IS_OPT, 1,
615 ESF_DZ_TX_OPTION_TYPE,
616 ESE_DZ_TX_OPTION_DESC_TSO,
617 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
618 ESF_DZ_TX_TSO_IP_ID, ipv4_id,
619 ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
623 ef10_tx_qdesc_tso2_create(
625 __in uint16_t ipv4_id,
626 __in uint16_t outer_ipv4_id,
627 __in uint32_t tcp_seq,
628 __in uint16_t tcp_mss,
629 __out_ecount(count) efx_desc_t *edp,
632 _NOTE(ARGUNUSED(etp, count))
634 EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
635 uint16_t, ipv4_id, uint32_t, tcp_seq,
638 EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
640 EFX_POPULATE_QWORD_6(edp[0].ed_eq,
641 ESF_DZ_TX_DESC_IS_OPT, 1,
642 ESF_DZ_TX_OPTION_TYPE,
643 ESE_DZ_TX_OPTION_DESC_TSO,
644 ESF_DZ_TX_TSO_OPTION_TYPE,
645 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
646 ESF_DZ_TX_TSO_IP_ID, ipv4_id,
647 ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id,
648 ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
649 EFX_POPULATE_QWORD_4(edp[1].ed_eq,
650 ESF_DZ_TX_DESC_IS_OPT, 1,
651 ESF_DZ_TX_OPTION_TYPE,
652 ESE_DZ_TX_OPTION_DESC_TSO,
653 ESF_DZ_TX_TSO_OPTION_TYPE,
654 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
655 ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);
659 ef10_tx_qdesc_vlantci_create(
662 __out efx_desc_t *edp)
664 _NOTE(ARGUNUSED(etp))
666 EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
669 EFX_POPULATE_QWORD_4(edp->ed_eq,
670 ESF_DZ_TX_DESC_IS_OPT, 1,
671 ESF_DZ_TX_OPTION_TYPE,
672 ESE_DZ_TX_OPTION_DESC_VLAN,
673 ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
674 ESF_DZ_TX_VLAN_TAG1, tci);
678 ef10_tx_qdesc_checksum_create(
681 __out efx_desc_t *edp)
683 _NOTE(ARGUNUSED(etp));
685 EFSYS_PROBE2(tx_desc_checksum_create, unsigned int, etp->et_index,
688 EFX_POPULATE_QWORD_6(edp->ed_eq,
689 ESF_DZ_TX_DESC_IS_OPT, 1,
690 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
691 ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
692 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
693 ESF_DZ_TX_OPTION_IP_CSUM,
694 (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0,
695 ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM,
696 (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
697 ESF_DZ_TX_OPTION_INNER_IP_CSUM,
698 (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0);
702 __checkReturn efx_rc_t
705 __in unsigned int ns)
710 _NOTE(ARGUNUSED(etp, ns))
711 _NOTE(CONSTANTCONDITION)
721 EFSYS_PROBE1(fail1, efx_rc_t, rc);
726 __checkReturn efx_rc_t
730 efx_nic_t *enp = etp->et_enp;
733 if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
740 * EALREADY is not an error, but indicates that the MC has rebooted and
741 * that the TXQ has already been destroyed. Callers need to know that
742 * the TXQ flush has completed to avoid waiting until timeout for a
743 * flush done event that will not be delivered.
746 EFSYS_PROBE1(fail1, efx_rc_t, rc);
756 _NOTE(ARGUNUSED(etp))
762 ef10_tx_qstats_update(
764 __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
768 for (id = 0; id < TX_NQSTATS; id++) {
769 efsys_stat_t *essp = &stat[id];
771 EFSYS_STAT_INCR(essp, etp->et_stat[id]);
772 etp->et_stat[id] = 0;
776 #endif /* EFSYS_OPT_QSTATS */
778 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */