1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
11 #include <rte_common.h>
12 #include <rte_ethdev.h>
13 #include <rte_ethdev_driver.h>
16 #include "txgbe_logs.h"
17 #include "base/txgbe.h"
18 #include "txgbe_ethdev.h"
19 #include "txgbe_rxtx.h"
22 txgbe_is_vf(struct rte_eth_dev *dev)
24 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
26 switch (hw->mac.type) {
27 case txgbe_mac_raptor_vf:
35 txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
37 return DEV_RX_OFFLOAD_VLAN_STRIP;
41 txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
44 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
45 struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
47 offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
48 DEV_RX_OFFLOAD_UDP_CKSUM |
49 DEV_RX_OFFLOAD_TCP_CKSUM |
50 DEV_RX_OFFLOAD_KEEP_CRC |
51 DEV_RX_OFFLOAD_JUMBO_FRAME |
52 DEV_RX_OFFLOAD_VLAN_FILTER |
53 DEV_RX_OFFLOAD_RSS_HASH |
54 DEV_RX_OFFLOAD_SCATTER;
56 if (!txgbe_is_vf(dev))
57 offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
58 DEV_RX_OFFLOAD_QINQ_STRIP |
59 DEV_RX_OFFLOAD_VLAN_EXTEND);
62 * RSC is only supported by PF devices in a non-SR-IOV
65 if (hw->mac.type == txgbe_mac_raptor && !sriov->active)
66 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
68 if (hw->mac.type == txgbe_mac_raptor)
69 offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
71 offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
77 txgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
85 txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
87 uint64_t tx_offload_capa;
90 DEV_TX_OFFLOAD_VLAN_INSERT |
91 DEV_TX_OFFLOAD_IPV4_CKSUM |
92 DEV_TX_OFFLOAD_UDP_CKSUM |
93 DEV_TX_OFFLOAD_TCP_CKSUM |
94 DEV_TX_OFFLOAD_SCTP_CKSUM |
95 DEV_TX_OFFLOAD_TCP_TSO |
96 DEV_TX_OFFLOAD_UDP_TSO |
97 DEV_TX_OFFLOAD_UDP_TNL_TSO |
98 DEV_TX_OFFLOAD_IP_TNL_TSO |
99 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
100 DEV_TX_OFFLOAD_GRE_TNL_TSO |
101 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
102 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
103 DEV_TX_OFFLOAD_MULTI_SEGS;
105 if (!txgbe_is_vf(dev))
106 tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
108 tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
110 tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
112 return tx_offload_capa;
116 txgbe_set_rx_function(struct rte_eth_dev *dev)
122 * txgbe_get_rscctl_maxdesc
124 * @pool Memory pool of the Rx queue
126 static inline uint32_t
127 txgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
129 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
132 RTE_IPV4_MAX_PKT_LEN /
133 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
136 return TXGBE_RXCFG_RSCMAX_16;
137 else if (maxdesc >= 8)
138 return TXGBE_RXCFG_RSCMAX_8;
139 else if (maxdesc >= 4)
140 return TXGBE_RXCFG_RSCMAX_4;
142 return TXGBE_RXCFG_RSCMAX_1;
146 * txgbe_set_rsc - configure RSC related port HW registers
148 * Configures the port's RSC related registers.
152 * Returns 0 in case of success or a non-zero error code
155 txgbe_set_rsc(struct rte_eth_dev *dev)
157 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
158 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
159 struct rte_eth_dev_info dev_info = { 0 };
160 bool rsc_capable = false;
166 dev->dev_ops->dev_infos_get(dev, &dev_info);
167 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
170 if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
171 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
176 /* RSC global configuration */
178 if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
179 (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
180 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
185 rfctl = rd32(hw, TXGBE_PSRCTL);
186 if (rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
187 rfctl &= ~TXGBE_PSRCTL_RSCDIA;
189 rfctl |= TXGBE_PSRCTL_RSCDIA;
190 wr32(hw, TXGBE_PSRCTL, rfctl);
192 /* If LRO hasn't been requested - we are done here. */
193 if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
196 /* Set PSRCTL.RSCACK bit */
197 rdrxctl = rd32(hw, TXGBE_PSRCTL);
198 rdrxctl |= TXGBE_PSRCTL_RSCACK;
199 wr32(hw, TXGBE_PSRCTL, rdrxctl);
201 /* Per-queue RSC configuration */
202 for (i = 0; i < dev->data->nb_rx_queues; i++) {
203 struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
205 rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
207 rd32(hw, TXGBE_POOLRSS(rxq->reg_idx));
209 rd32(hw, TXGBE_ITR(rxq->reg_idx));
212 * txgbe PMD doesn't support header-split at the moment.
214 srrctl &= ~TXGBE_RXCFG_HDRLEN_MASK;
215 srrctl |= TXGBE_RXCFG_HDRLEN(128);
218 * TODO: Consider setting the Receive Descriptor Minimum
219 * Threshold Size for an RSC case. This is not an obviously
220 * beneficiary option but the one worth considering...
223 srrctl |= TXGBE_RXCFG_RSCENA;
224 srrctl &= ~TXGBE_RXCFG_RSCMAX_MASK;
225 srrctl |= txgbe_get_rscctl_maxdesc(rxq->mb_pool);
226 psrtype |= TXGBE_POOLRSS_L4HDR;
229 * RSC: Set ITR interval corresponding to 2K ints/s.
231 * Full-sized RSC aggregations for a 10Gb/s link will
232 * arrive at about 20K aggregation/s rate.
234 * 2K inst/s rate will make only 10% of the
235 * aggregations to be closed due to the interrupt timer
236 * expiration for a streaming at wire-speed case.
238 * For a sparse streaming case this setting will yield
239 * at most 500us latency for a single RSC aggregation.
241 eitr &= ~TXGBE_ITR_IVAL_MASK;
242 eitr |= TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
243 eitr |= TXGBE_ITR_WRDSA;
245 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
246 wr32(hw, TXGBE_POOLRSS(rxq->reg_idx), psrtype);
247 wr32(hw, TXGBE_ITR(rxq->reg_idx), eitr);
250 * RSC requires the mapping of the queue to the
253 txgbe_set_ivar_map(hw, 0, rxq->reg_idx, i);
258 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
264 * Initializes Receive Unit.
267 txgbe_dev_rx_init(struct rte_eth_dev *dev)
270 struct txgbe_rx_queue *rxq;
279 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
282 PMD_INIT_FUNC_TRACE();
283 hw = TXGBE_DEV_HW(dev);
286 * Make sure receives are disabled while setting
287 * up the RX context (registers, descriptor rings, etc.).
289 wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, 0);
290 wr32m(hw, TXGBE_PBRXCTL, TXGBE_PBRXCTL_ENA, 0);
292 /* Enable receipt of broadcasted frames */
293 fctrl = rd32(hw, TXGBE_PSRCTL);
294 fctrl |= TXGBE_PSRCTL_BCA;
295 wr32(hw, TXGBE_PSRCTL, fctrl);
298 * Configure CRC stripping, if any.
300 hlreg0 = rd32(hw, TXGBE_SECRXCTL);
301 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
302 hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
304 hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
305 wr32(hw, TXGBE_SECRXCTL, hlreg0);
308 * Configure jumbo frame support, if any.
310 if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
311 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
312 TXGBE_FRMSZ_MAX(rx_conf->max_rx_pkt_len));
314 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
315 TXGBE_FRMSZ_MAX(TXGBE_FRAME_SIZE_DFT));
319 * If loopback mode is configured, set LPBK bit.
321 hlreg0 = rd32(hw, TXGBE_PSRCTL);
322 if (hw->mac.type == txgbe_mac_raptor &&
323 dev->data->dev_conf.lpbk_mode)
324 hlreg0 |= TXGBE_PSRCTL_LBENA;
326 hlreg0 &= ~TXGBE_PSRCTL_LBENA;
328 wr32(hw, TXGBE_PSRCTL, hlreg0);
331 * Assume no header split and no VLAN strip support
332 * on any Rx queue first .
334 rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
336 /* Setup RX queues */
337 for (i = 0; i < dev->data->nb_rx_queues; i++) {
338 rxq = dev->data->rx_queues[i];
341 * Reset crc_len in case it was changed after queue setup by a
344 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
345 rxq->crc_len = RTE_ETHER_CRC_LEN;
349 /* Setup the Base and Length of the Rx Descriptor Rings */
350 bus_addr = rxq->rx_ring_phys_addr;
351 wr32(hw, TXGBE_RXBAL(rxq->reg_idx),
352 (uint32_t)(bus_addr & BIT_MASK32));
353 wr32(hw, TXGBE_RXBAH(rxq->reg_idx),
354 (uint32_t)(bus_addr >> 32));
355 wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
356 wr32(hw, TXGBE_RXWP(rxq->reg_idx), 0);
358 srrctl = TXGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
360 /* Set if packets are dropped when no descriptors available */
362 srrctl |= TXGBE_RXCFG_DROP;
365 * Configure the RX buffer size in the PKTLEN field of
366 * the RXCFG register of the queue.
367 * The value is in 1 KB resolution. Valid values can be from
370 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
371 RTE_PKTMBUF_HEADROOM);
372 buf_size = ROUND_UP(buf_size, 0x1 << 10);
373 srrctl |= TXGBE_RXCFG_PKTLEN(buf_size);
375 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
377 /* It adds dual VLAN length for supporting dual VLAN */
378 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
379 2 * TXGBE_VLAN_TAG_SIZE > buf_size)
380 dev->data->scattered_rx = 1;
381 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
382 rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
385 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
386 dev->data->scattered_rx = 1;
389 * Setup the Checksum Register.
390 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
391 * Enable IP/L4 checksum computation by hardware if requested to do so.
393 rxcsum = rd32(hw, TXGBE_PSRCTL);
394 rxcsum |= TXGBE_PSRCTL_PCSD;
395 if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
396 rxcsum |= TXGBE_PSRCTL_L4CSUM;
398 rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
400 wr32(hw, TXGBE_PSRCTL, rxcsum);
402 if (hw->mac.type == txgbe_mac_raptor) {
403 rdrxctl = rd32(hw, TXGBE_SECRXCTL);
404 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
405 rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
407 rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
408 wr32(hw, TXGBE_SECRXCTL, rdrxctl);
411 rc = txgbe_set_rsc(dev);
415 txgbe_set_rx_function(dev);
421 * Initializes Transmit Unit.
424 txgbe_dev_tx_init(struct rte_eth_dev *dev)
427 struct txgbe_tx_queue *txq;
431 PMD_INIT_FUNC_TRACE();
432 hw = TXGBE_DEV_HW(dev);
434 /* Setup the Base and Length of the Tx Descriptor Rings */
435 for (i = 0; i < dev->data->nb_tx_queues; i++) {
436 txq = dev->data->tx_queues[i];
438 bus_addr = txq->tx_ring_phys_addr;
439 wr32(hw, TXGBE_TXBAL(txq->reg_idx),
440 (uint32_t)(bus_addr & BIT_MASK32));
441 wr32(hw, TXGBE_TXBAH(txq->reg_idx),
442 (uint32_t)(bus_addr >> 32));
443 wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_BUFLEN_MASK,
444 TXGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
445 /* Setup the HW Tx Head and TX Tail descriptor pointers */
446 wr32(hw, TXGBE_TXRP(txq->reg_idx), 0);
447 wr32(hw, TXGBE_TXWP(txq->reg_idx), 0);