1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2013-2016 Intel Corporation
7 #include <rte_ethdev_driver.h>
8 #include <rte_common.h>
11 #include "base/fm10k_type.h"
13 #ifdef RTE_PMD_PACKET_PREFETCH
14 #define rte_packet_prefetch(p) rte_prefetch1(p)
16 #define rte_packet_prefetch(p) do {} while (0)
19 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
20 static inline void dump_rxd(union fm10k_rx_desc *rxd)
22 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
23 PMD_RX_LOG(DEBUG, "| GLORT | PKT HDR & TYPE |");
24 PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.glort,
26 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
27 PMD_RX_LOG(DEBUG, "| VLAN & LEN | STATUS |");
28 PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.vlan_len,
30 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
31 PMD_RX_LOG(DEBUG, "| RESERVED | RSS_HASH |");
32 PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", 0, rxd->d.rss);
33 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
34 PMD_RX_LOG(DEBUG, "| TIME TAG |");
35 PMD_RX_LOG(DEBUG, "| 0x%016"PRIx64" |", rxd->q.timestamp);
36 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
40 #define FM10K_TX_OFFLOAD_MASK ( \
46 #define FM10K_TX_OFFLOAD_NOTSUP_MASK \
47 (PKT_TX_OFFLOAD_MASK ^ FM10K_TX_OFFLOAD_MASK)
49 /* @note: When this function is changed, make corresponding change to
50 * fm10k_dev_supported_ptypes_get()
53 rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
56 ptype_table[FM10K_RXD_PKTTYPE_MASK >> FM10K_RXD_PKTTYPE_SHIFT]
57 __rte_cache_aligned = {
58 [FM10K_PKTTYPE_OTHER] = RTE_PTYPE_L2_ETHER,
59 [FM10K_PKTTYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
60 [FM10K_PKTTYPE_IPV4_EX] = RTE_PTYPE_L2_ETHER |
61 RTE_PTYPE_L3_IPV4_EXT,
62 [FM10K_PKTTYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
63 [FM10K_PKTTYPE_IPV6_EX] = RTE_PTYPE_L2_ETHER |
64 RTE_PTYPE_L3_IPV6_EXT,
65 [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
66 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
67 [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
68 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
69 [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
70 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
71 [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
72 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
75 m->packet_type = ptype_table[(d->w.pkt_info & FM10K_RXD_PKTTYPE_MASK)
76 >> FM10K_RXD_PKTTYPE_SHIFT];
78 if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK)
79 m->ol_flags |= PKT_RX_RSS_HASH;
81 if (unlikely((d->d.staterr &
82 (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) ==
83 (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)))
84 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
86 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
88 if (unlikely((d->d.staterr &
89 (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) ==
90 (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)))
91 m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
93 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
97 fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
100 struct rte_mbuf *mbuf;
101 union fm10k_rx_desc desc;
102 struct fm10k_rx_queue *q = rx_queue;
108 next_dd = q->next_dd;
110 nb_pkts = RTE_MIN(nb_pkts, q->alloc_thresh);
111 for (count = 0; count < nb_pkts; ++count) {
112 if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD))
114 mbuf = q->sw_ring[next_dd];
115 desc = q->hw_ring[next_dd];
116 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
119 rte_pktmbuf_pkt_len(mbuf) = desc.w.length;
120 rte_pktmbuf_data_len(mbuf) = desc.w.length;
123 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
124 rx_desc_to_ol_flags(mbuf, &desc);
127 mbuf->hash.rss = desc.d.rss;
129 * Packets in fm10k device always carry at least one VLAN tag.
130 * For those packets coming in without VLAN tag,
131 * the port default VLAN tag will be used.
132 * So, always PKT_RX_VLAN flag is set and vlan_tci
133 * is valid for each RX packet's mbuf.
135 mbuf->ol_flags |= PKT_RX_VLAN;
136 mbuf->vlan_tci = desc.w.vlan;
138 * mbuf->vlan_tci_outer is an idle field in fm10k driver,
139 * so it can be selected to store sglort value.
142 mbuf->vlan_tci_outer = rte_le_to_cpu_16(desc.w.sglort);
144 rx_pkts[count] = mbuf;
145 if (++next_dd == q->nb_desc) {
150 /* Prefetch next mbuf while processing current one. */
151 rte_prefetch0(q->sw_ring[next_dd]);
154 * When next RX descriptor is on a cache-line boundary,
155 * prefetch the next 4 RX descriptors and the next 8 pointers
158 if ((next_dd & 0x3) == 0) {
159 rte_prefetch0(&q->hw_ring[next_dd]);
160 rte_prefetch0(&q->sw_ring[next_dd]);
164 q->next_dd = next_dd;
166 if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
167 ret = rte_mempool_get_bulk(q->mp,
168 (void **)&q->sw_ring[q->next_alloc],
171 if (unlikely(ret != 0)) {
172 uint16_t port = q->port_id;
173 PMD_RX_LOG(ERR, "Failed to alloc mbuf");
175 * Need to restore next_dd if we cannot allocate new
176 * buffers to replenish the old ones.
178 q->next_dd = (q->next_dd + q->nb_desc - count) %
180 rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
184 for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
185 mbuf = q->sw_ring[q->next_alloc];
187 /* setup static mbuf fields */
188 fm10k_pktmbuf_reset(mbuf, q->port_id);
190 /* write descriptor */
191 desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
192 desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
193 q->hw_ring[q->next_alloc] = desc;
195 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
196 q->next_trigger += q->alloc_thresh;
197 if (q->next_trigger >= q->nb_desc) {
198 q->next_trigger = q->alloc_thresh - 1;
207 fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
210 struct rte_mbuf *mbuf;
211 union fm10k_rx_desc desc;
212 struct fm10k_rx_queue *q = rx_queue;
214 uint16_t nb_rcv, nb_seg;
217 struct rte_mbuf *first_seg = q->pkt_first_seg;
218 struct rte_mbuf *last_seg = q->pkt_last_seg;
221 next_dd = q->next_dd;
224 nb_seg = RTE_MIN(nb_pkts, q->alloc_thresh);
225 for (count = 0; count < nb_seg; count++) {
226 if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD))
228 mbuf = q->sw_ring[next_dd];
229 desc = q->hw_ring[next_dd];
230 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
234 if (++next_dd == q->nb_desc) {
239 /* Prefetch next mbuf while processing current one. */
240 rte_prefetch0(q->sw_ring[next_dd]);
243 * When next RX descriptor is on a cache-line boundary,
244 * prefetch the next 4 RX descriptors and the next 8 pointers
247 if ((next_dd & 0x3) == 0) {
248 rte_prefetch0(&q->hw_ring[next_dd]);
249 rte_prefetch0(&q->sw_ring[next_dd]);
252 /* Fill data length */
253 rte_pktmbuf_data_len(mbuf) = desc.w.length;
256 * If this is the first buffer of the received packet,
257 * set the pointer to the first mbuf of the packet and
258 * initialize its context.
259 * Otherwise, update the total length and the number of segments
260 * of the current scattered packet, and update the pointer to
261 * the last mbuf of the current packet.
265 first_seg->pkt_len = desc.w.length;
268 (uint16_t)(first_seg->pkt_len +
269 rte_pktmbuf_data_len(mbuf));
270 first_seg->nb_segs++;
271 last_seg->next = mbuf;
275 * If this is not the last buffer of the received packet,
276 * update the pointer to the last mbuf of the current scattered
277 * packet and continue to parse the RX ring.
279 if (!(desc.d.staterr & FM10K_RXD_STATUS_EOP)) {
284 first_seg->ol_flags = 0;
285 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
286 rx_desc_to_ol_flags(first_seg, &desc);
288 first_seg->hash.rss = desc.d.rss;
290 * Packets in fm10k device always carry at least one VLAN tag.
291 * For those packets coming in without VLAN tag,
292 * the port default VLAN tag will be used.
293 * So, always PKT_RX_VLAN flag is set and vlan_tci
294 * is valid for each RX packet's mbuf.
296 first_seg->ol_flags |= PKT_RX_VLAN;
297 first_seg->vlan_tci = desc.w.vlan;
299 * mbuf->vlan_tci_outer is an idle field in fm10k driver,
300 * so it can be selected to store sglort value.
303 first_seg->vlan_tci_outer =
304 rte_le_to_cpu_16(desc.w.sglort);
306 /* Prefetch data of first segment, if configured to do so. */
307 rte_packet_prefetch((char *)first_seg->buf_addr +
308 first_seg->data_off);
311 * Store the mbuf address into the next entry of the array
312 * of returned packets.
314 rx_pkts[nb_rcv++] = first_seg;
317 * Setup receipt context for a new packet.
322 q->next_dd = next_dd;
324 if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
325 ret = rte_mempool_get_bulk(q->mp,
326 (void **)&q->sw_ring[q->next_alloc],
329 if (unlikely(ret != 0)) {
330 uint16_t port = q->port_id;
331 PMD_RX_LOG(ERR, "Failed to alloc mbuf");
333 * Need to restore next_dd if we cannot allocate new
334 * buffers to replenish the old ones.
336 q->next_dd = (q->next_dd + q->nb_desc - count) %
338 rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
342 for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
343 mbuf = q->sw_ring[q->next_alloc];
345 /* setup static mbuf fields */
346 fm10k_pktmbuf_reset(mbuf, q->port_id);
348 /* write descriptor */
349 desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
350 desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
351 q->hw_ring[q->next_alloc] = desc;
353 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
354 q->next_trigger += q->alloc_thresh;
355 if (q->next_trigger >= q->nb_desc) {
356 q->next_trigger = q->alloc_thresh - 1;
361 q->pkt_first_seg = first_seg;
362 q->pkt_last_seg = last_seg;
368 fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
370 volatile union fm10k_rx_desc *rxdp;
371 struct fm10k_rx_queue *rxq = rx_queue;
375 if (unlikely(offset >= rxq->nb_desc)) {
376 PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset);
380 desc = rxq->next_dd + offset;
381 if (desc >= rxq->nb_desc)
382 desc -= rxq->nb_desc;
384 rxdp = &rxq->hw_ring[desc];
386 ret = !!(rxdp->w.status &
387 rte_cpu_to_le_16(FM10K_RXD_STATUS_DD));
393 fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
395 volatile union fm10k_rx_desc *rxdp;
396 struct fm10k_rx_queue *rxq = rx_queue;
397 uint16_t nb_hold, trigger_last;
401 if (unlikely(offset >= rxq->nb_desc)) {
402 PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset);
406 if (rxq->next_trigger < rxq->alloc_thresh)
407 trigger_last = rxq->next_trigger +
408 rxq->nb_desc - rxq->alloc_thresh;
410 trigger_last = rxq->next_trigger - rxq->alloc_thresh;
412 if (rxq->next_dd < trigger_last)
413 nb_hold = rxq->next_dd + rxq->nb_desc - trigger_last;
415 nb_hold = rxq->next_dd - trigger_last;
417 if (offset >= rxq->nb_desc - nb_hold)
418 return RTE_ETH_RX_DESC_UNAVAIL;
420 desc = rxq->next_dd + offset;
421 if (desc >= rxq->nb_desc)
422 desc -= rxq->nb_desc;
424 rxdp = &rxq->hw_ring[desc];
426 ret = !!(rxdp->w.status &
427 rte_cpu_to_le_16(FM10K_RXD_STATUS_DD));
433 fm10k_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
435 volatile struct fm10k_tx_desc *txdp;
436 struct fm10k_tx_queue *txq = tx_queue;
438 uint16_t next_rs = txq->nb_desc;
439 struct fifo rs_tracker = txq->rs_tracker;
440 struct fifo *r = &rs_tracker;
442 if (unlikely(offset >= txq->nb_desc))
445 desc = txq->next_free + offset;
446 /* go to next desc that has the RS bit */
447 desc = (desc / txq->rs_thresh + 1) *
450 if (desc >= txq->nb_desc) {
451 desc -= txq->nb_desc;
452 if (desc >= txq->nb_desc)
453 desc -= txq->nb_desc;
457 for ( ; r->head != r->endp; ) {
458 if (*r->head >= desc && *r->head < next_rs)
463 txdp = &txq->hw_ring[next_rs];
464 if (txdp->flags & FM10K_TXD_FLAG_DONE)
465 return RTE_ETH_TX_DESC_DONE;
467 return RTE_ETH_TX_DESC_FULL;
471 * Free multiple TX mbuf at a time if they are in the same pool
473 * @txep: software desc ring index that starts to free
474 * @num: number of descs to free
477 static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num)
479 struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ];
483 if (unlikely(num == 0))
486 m = rte_pktmbuf_prefree_seg(txep[0]);
487 if (likely(m != NULL)) {
490 for (i = 1; i < num; i++) {
491 m = rte_pktmbuf_prefree_seg(txep[i]);
492 if (likely(m != NULL)) {
493 if (likely(m->pool == free[0]->pool))
496 rte_mempool_put_bulk(free[0]->pool,
497 (void *)free, nb_free);
504 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
506 for (i = 1; i < num; i++) {
507 m = rte_pktmbuf_prefree_seg(txep[i]);
509 rte_mempool_put(m->pool, m);
515 static inline void tx_free_descriptors(struct fm10k_tx_queue *q)
517 uint16_t next_rs, count = 0;
519 next_rs = fifo_peek(&q->rs_tracker);
520 if (!(q->hw_ring[next_rs].flags & FM10K_TXD_FLAG_DONE))
523 /* the DONE flag is set on this descriptor so remove the ID
524 * from the RS bit tracker and free the buffers */
525 fifo_remove(&q->rs_tracker);
527 /* wrap around? if so, free buffers from last_free up to but NOT
528 * including nb_desc */
529 if (q->last_free > next_rs) {
530 count = q->nb_desc - q->last_free;
531 tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
535 /* adjust free descriptor count before the next loop */
536 q->nb_free += count + (next_rs + 1 - q->last_free);
538 /* free buffers from last_free, up to and including next_rs */
539 if (q->last_free <= next_rs) {
540 count = next_rs - q->last_free + 1;
541 tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
542 q->last_free += count;
545 if (q->last_free == q->nb_desc)
549 static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
552 uint8_t flags, hdrlen;
554 /* always set the LAST flag on the last descriptor used to
555 * transmit the packet */
556 flags = FM10K_TXD_FLAG_LAST;
557 last_id = q->next_free + mb->nb_segs - 1;
558 if (last_id >= q->nb_desc)
559 last_id = last_id - q->nb_desc;
561 /* but only set the RS flag on the last descriptor if rs_thresh
562 * descriptors will be used since the RS flag was last set */
563 if ((q->nb_used + mb->nb_segs) >= q->rs_thresh) {
564 flags |= FM10K_TXD_FLAG_RS;
565 fifo_insert(&q->rs_tracker, last_id);
568 q->nb_used = q->nb_used + mb->nb_segs;
571 q->nb_free -= mb->nb_segs;
573 q->hw_ring[q->next_free].flags = 0;
575 q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_FTAG;
576 /* set checksum flags on first descriptor of packet. SCTP checksum
577 * offload is not supported, but we do not explicitly check for this
578 * case in favor of greatly simplified processing. */
579 if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
580 q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM;
582 /* set vlan if requested */
583 if (mb->ol_flags & PKT_TX_VLAN_PKT)
584 q->hw_ring[q->next_free].vlan = mb->vlan_tci;
586 q->sw_ring[q->next_free] = mb;
587 q->hw_ring[q->next_free].buffer_addr =
588 rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
589 q->hw_ring[q->next_free].buflen =
590 rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
592 if (mb->ol_flags & PKT_TX_TCP_SEG) {
593 hdrlen = mb->outer_l2_len + mb->outer_l3_len + mb->l2_len +
594 mb->l3_len + mb->l4_len;
595 if (q->hw_ring[q->next_free].flags & FM10K_TXD_FLAG_FTAG)
596 hdrlen += sizeof(struct fm10k_ftag);
598 if (likely((hdrlen >= FM10K_TSO_MIN_HEADERLEN) &&
599 (hdrlen <= FM10K_TSO_MAX_HEADERLEN) &&
600 (mb->tso_segsz >= FM10K_TSO_MINMSS))) {
601 q->hw_ring[q->next_free].mss = mb->tso_segsz;
602 q->hw_ring[q->next_free].hdrlen = hdrlen;
606 if (++q->next_free == q->nb_desc)
609 /* fill up the rings */
610 for (mb = mb->next; mb != NULL; mb = mb->next) {
611 q->sw_ring[q->next_free] = mb;
612 q->hw_ring[q->next_free].buffer_addr =
613 rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
614 q->hw_ring[q->next_free].buflen =
615 rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
616 q->hw_ring[q->next_free].flags = 0;
617 if (++q->next_free == q->nb_desc)
621 q->hw_ring[last_id].flags |= flags;
625 fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
628 struct fm10k_tx_queue *q = tx_queue;
632 for (count = 0; count < nb_pkts; ++count) {
635 /* running low on descriptors? try to free some... */
636 if (q->nb_free < q->free_thresh)
637 tx_free_descriptors(q);
639 /* make sure there are enough free descriptors to transmit the
640 * entire packet before doing anything */
641 if (q->nb_free < mb->nb_segs)
644 /* sanity check to make sure the mbuf is valid */
645 if ((mb->nb_segs == 0) ||
646 ((mb->nb_segs > 1) && (mb->next == NULL)))
649 /* process the packet */
653 /* update the tail pointer if any packets were processed */
654 if (likely(count > 0))
655 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_free);
661 fm10k_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
667 for (i = 0; i < nb_pkts; i++) {
670 if ((m->ol_flags & PKT_TX_TCP_SEG) &&
671 (m->tso_segsz < FM10K_TSO_MINMSS)) {
676 if (m->ol_flags & FM10K_TX_OFFLOAD_NOTSUP_MASK) {
677 rte_errno = -ENOTSUP;
681 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
682 ret = rte_validate_tx_offload(m);
688 ret = rte_net_intel_cksum_prepare(m);