4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_ethdev.h>
37 #include <rte_common.h>
39 #include "base/fm10k_type.h"
41 #ifdef RTE_PMD_PACKET_PREFETCH
42 #define rte_packet_prefetch(p) rte_prefetch1(p)
44 #define rte_packet_prefetch(p) do {} while (0)
47 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
48 static inline void dump_rxd(union fm10k_rx_desc *rxd)
50 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
51 PMD_RX_LOG(DEBUG, "| GLORT | PKT HDR & TYPE |");
52 PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.glort,
54 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
55 PMD_RX_LOG(DEBUG, "| VLAN & LEN | STATUS |");
56 PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.vlan_len,
58 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
59 PMD_RX_LOG(DEBUG, "| RESERVED | RSS_HASH |");
60 PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", 0, rxd->d.rss);
61 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
62 PMD_RX_LOG(DEBUG, "| TIME TAG |");
63 PMD_RX_LOG(DEBUG, "| 0x%016"PRIx64" |", rxd->q.timestamp);
64 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
68 /* @note: When this function is changed, make corresponding change to
69 * fm10k_dev_supported_ptypes_get()
72 rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
75 ptype_table[FM10K_RXD_PKTTYPE_MASK >> FM10K_RXD_PKTTYPE_SHIFT]
76 __rte_cache_aligned = {
77 [FM10K_PKTTYPE_OTHER] = RTE_PTYPE_L2_ETHER,
78 [FM10K_PKTTYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
79 [FM10K_PKTTYPE_IPV4_EX] = RTE_PTYPE_L2_ETHER |
80 RTE_PTYPE_L3_IPV4_EXT,
81 [FM10K_PKTTYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
82 [FM10K_PKTTYPE_IPV6_EX] = RTE_PTYPE_L2_ETHER |
83 RTE_PTYPE_L3_IPV6_EXT,
84 [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
85 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
86 [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
87 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
88 [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
89 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
90 [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
91 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
94 m->packet_type = ptype_table[(d->w.pkt_info & FM10K_RXD_PKTTYPE_MASK)
95 >> FM10K_RXD_PKTTYPE_SHIFT];
97 if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK)
98 m->ol_flags |= PKT_RX_RSS_HASH;
100 if (unlikely((d->d.staterr &
101 (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) ==
102 (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)))
103 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
105 if (unlikely((d->d.staterr &
106 (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) ==
107 (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)))
108 m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
112 fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
115 struct rte_mbuf *mbuf;
116 union fm10k_rx_desc desc;
117 struct fm10k_rx_queue *q = rx_queue;
123 next_dd = q->next_dd;
125 nb_pkts = RTE_MIN(nb_pkts, q->alloc_thresh);
126 for (count = 0; count < nb_pkts; ++count) {
127 if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD))
129 mbuf = q->sw_ring[next_dd];
130 desc = q->hw_ring[next_dd];
131 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
134 rte_pktmbuf_pkt_len(mbuf) = desc.w.length;
135 rte_pktmbuf_data_len(mbuf) = desc.w.length;
138 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
139 rx_desc_to_ol_flags(mbuf, &desc);
142 mbuf->hash.rss = desc.d.rss;
144 * Packets in fm10k device always carry at least one VLAN tag.
145 * For those packets coming in without VLAN tag,
146 * the port default VLAN tag will be used.
147 * So, always PKT_RX_VLAN_PKT flag is set and vlan_tci
148 * is valid for each RX packet's mbuf.
150 mbuf->ol_flags |= PKT_RX_VLAN_PKT;
151 mbuf->vlan_tci = desc.w.vlan;
153 * mbuf->vlan_tci_outer is an idle field in fm10k driver,
154 * so it can be selected to store sglort value.
157 mbuf->vlan_tci_outer = rte_le_to_cpu_16(desc.w.sglort);
159 rx_pkts[count] = mbuf;
160 if (++next_dd == q->nb_desc) {
165 /* Prefetch next mbuf while processing current one. */
166 rte_prefetch0(q->sw_ring[next_dd]);
169 * When next RX descriptor is on a cache-line boundary,
170 * prefetch the next 4 RX descriptors and the next 8 pointers
173 if ((next_dd & 0x3) == 0) {
174 rte_prefetch0(&q->hw_ring[next_dd]);
175 rte_prefetch0(&q->sw_ring[next_dd]);
179 q->next_dd = next_dd;
181 if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
182 ret = rte_mempool_get_bulk(q->mp,
183 (void **)&q->sw_ring[q->next_alloc],
186 if (unlikely(ret != 0)) {
187 uint8_t port = q->port_id;
188 PMD_RX_LOG(ERR, "Failed to alloc mbuf");
190 * Need to restore next_dd if we cannot allocate new
191 * buffers to replenish the old ones.
193 q->next_dd = (q->next_dd + q->nb_desc - count) %
195 rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
199 for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
200 mbuf = q->sw_ring[q->next_alloc];
202 /* setup static mbuf fields */
203 fm10k_pktmbuf_reset(mbuf, q->port_id);
205 /* write descriptor */
206 desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
207 desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
208 q->hw_ring[q->next_alloc] = desc;
210 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
211 q->next_trigger += q->alloc_thresh;
212 if (q->next_trigger >= q->nb_desc) {
213 q->next_trigger = q->alloc_thresh - 1;
222 fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
225 struct rte_mbuf *mbuf;
226 union fm10k_rx_desc desc;
227 struct fm10k_rx_queue *q = rx_queue;
229 uint16_t nb_rcv, nb_seg;
232 struct rte_mbuf *first_seg = q->pkt_first_seg;
233 struct rte_mbuf *last_seg = q->pkt_last_seg;
236 next_dd = q->next_dd;
239 nb_seg = RTE_MIN(nb_pkts, q->alloc_thresh);
240 for (count = 0; count < nb_seg; count++) {
241 if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD))
243 mbuf = q->sw_ring[next_dd];
244 desc = q->hw_ring[next_dd];
245 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
249 if (++next_dd == q->nb_desc) {
254 /* Prefetch next mbuf while processing current one. */
255 rte_prefetch0(q->sw_ring[next_dd]);
258 * When next RX descriptor is on a cache-line boundary,
259 * prefetch the next 4 RX descriptors and the next 8 pointers
262 if ((next_dd & 0x3) == 0) {
263 rte_prefetch0(&q->hw_ring[next_dd]);
264 rte_prefetch0(&q->sw_ring[next_dd]);
267 /* Fill data length */
268 rte_pktmbuf_data_len(mbuf) = desc.w.length;
271 * If this is the first buffer of the received packet,
272 * set the pointer to the first mbuf of the packet and
273 * initialize its context.
274 * Otherwise, update the total length and the number of segments
275 * of the current scattered packet, and update the pointer to
276 * the last mbuf of the current packet.
280 first_seg->pkt_len = desc.w.length;
283 (uint16_t)(first_seg->pkt_len +
284 rte_pktmbuf_data_len(mbuf));
285 first_seg->nb_segs++;
286 last_seg->next = mbuf;
290 * If this is not the last buffer of the received packet,
291 * update the pointer to the last mbuf of the current scattered
292 * packet and continue to parse the RX ring.
294 if (!(desc.d.staterr & FM10K_RXD_STATUS_EOP)) {
299 first_seg->ol_flags = 0;
300 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
301 rx_desc_to_ol_flags(first_seg, &desc);
303 first_seg->hash.rss = desc.d.rss;
305 * Packets in fm10k device always carry at least one VLAN tag.
306 * For those packets coming in without VLAN tag,
307 * the port default VLAN tag will be used.
308 * So, always PKT_RX_VLAN_PKT flag is set and vlan_tci
309 * is valid for each RX packet's mbuf.
311 first_seg->ol_flags |= PKT_RX_VLAN_PKT;
312 first_seg->vlan_tci = desc.w.vlan;
314 * mbuf->vlan_tci_outer is an idle field in fm10k driver,
315 * so it can be selected to store sglort value.
318 first_seg->vlan_tci_outer =
319 rte_le_to_cpu_16(desc.w.sglort);
321 /* Prefetch data of first segment, if configured to do so. */
322 rte_packet_prefetch((char *)first_seg->buf_addr +
323 first_seg->data_off);
326 * Store the mbuf address into the next entry of the array
327 * of returned packets.
329 rx_pkts[nb_rcv++] = first_seg;
332 * Setup receipt context for a new packet.
337 q->next_dd = next_dd;
339 if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
340 ret = rte_mempool_get_bulk(q->mp,
341 (void **)&q->sw_ring[q->next_alloc],
344 if (unlikely(ret != 0)) {
345 uint8_t port = q->port_id;
346 PMD_RX_LOG(ERR, "Failed to alloc mbuf");
348 * Need to restore next_dd if we cannot allocate new
349 * buffers to replenish the old ones.
351 q->next_dd = (q->next_dd + q->nb_desc - count) %
353 rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
357 for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
358 mbuf = q->sw_ring[q->next_alloc];
360 /* setup static mbuf fields */
361 fm10k_pktmbuf_reset(mbuf, q->port_id);
363 /* write descriptor */
364 desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
365 desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
366 q->hw_ring[q->next_alloc] = desc;
368 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
369 q->next_trigger += q->alloc_thresh;
370 if (q->next_trigger >= q->nb_desc) {
371 q->next_trigger = q->alloc_thresh - 1;
376 q->pkt_first_seg = first_seg;
377 q->pkt_last_seg = last_seg;
383 fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
385 volatile union fm10k_rx_desc *rxdp;
386 struct fm10k_rx_queue *rxq = rx_queue;
390 if (unlikely(offset >= rxq->nb_desc)) {
391 PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset);
395 desc = rxq->next_dd + offset;
396 if (desc >= rxq->nb_desc)
397 desc -= rxq->nb_desc;
399 rxdp = &rxq->hw_ring[desc];
401 ret = !!(rxdp->w.status &
402 rte_cpu_to_le_16(FM10K_RXD_STATUS_DD));
408 * Free multiple TX mbuf at a time if they are in the same pool
410 * @txep: software desc ring index that starts to free
411 * @num: number of descs to free
414 static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num)
416 struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ];
420 if (unlikely(num == 0))
423 m = __rte_pktmbuf_prefree_seg(txep[0]);
424 if (likely(m != NULL)) {
427 for (i = 1; i < num; i++) {
428 m = __rte_pktmbuf_prefree_seg(txep[i]);
429 if (likely(m != NULL)) {
430 if (likely(m->pool == free[0]->pool))
433 rte_mempool_put_bulk(free[0]->pool,
434 (void *)free, nb_free);
441 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
443 for (i = 1; i < num; i++) {
444 m = __rte_pktmbuf_prefree_seg(txep[i]);
446 rte_mempool_put(m->pool, m);
452 static inline void tx_free_descriptors(struct fm10k_tx_queue *q)
454 uint16_t next_rs, count = 0;
456 next_rs = fifo_peek(&q->rs_tracker);
457 if (!(q->hw_ring[next_rs].flags & FM10K_TXD_FLAG_DONE))
460 /* the DONE flag is set on this descriptor so remove the ID
461 * from the RS bit tracker and free the buffers */
462 fifo_remove(&q->rs_tracker);
464 /* wrap around? if so, free buffers from last_free up to but NOT
465 * including nb_desc */
466 if (q->last_free > next_rs) {
467 count = q->nb_desc - q->last_free;
468 tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
472 /* adjust free descriptor count before the next loop */
473 q->nb_free += count + (next_rs + 1 - q->last_free);
475 /* free buffers from last_free, up to and including next_rs */
476 if (q->last_free <= next_rs) {
477 count = next_rs - q->last_free + 1;
478 tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
479 q->last_free += count;
482 if (q->last_free == q->nb_desc)
486 static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
489 uint8_t flags, hdrlen;
491 /* always set the LAST flag on the last descriptor used to
492 * transmit the packet */
493 flags = FM10K_TXD_FLAG_LAST;
494 last_id = q->next_free + mb->nb_segs - 1;
495 if (last_id >= q->nb_desc)
496 last_id = last_id - q->nb_desc;
498 /* but only set the RS flag on the last descriptor if rs_thresh
499 * descriptors will be used since the RS flag was last set */
500 if ((q->nb_used + mb->nb_segs) >= q->rs_thresh) {
501 flags |= FM10K_TXD_FLAG_RS;
502 fifo_insert(&q->rs_tracker, last_id);
505 q->nb_used = q->nb_used + mb->nb_segs;
508 q->nb_free -= mb->nb_segs;
510 q->hw_ring[q->next_free].flags = 0;
512 q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_FTAG;
513 /* set checksum flags on first descriptor of packet. SCTP checksum
514 * offload is not supported, but we do not explicitly check for this
515 * case in favor of greatly simplified processing. */
516 if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
517 q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM;
519 /* set vlan if requested */
520 if (mb->ol_flags & PKT_TX_VLAN_PKT)
521 q->hw_ring[q->next_free].vlan = mb->vlan_tci;
523 q->sw_ring[q->next_free] = mb;
524 q->hw_ring[q->next_free].buffer_addr =
525 rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
526 q->hw_ring[q->next_free].buflen =
527 rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
529 if (mb->ol_flags & PKT_TX_TCP_SEG) {
530 hdrlen = mb->outer_l2_len + mb->outer_l3_len + mb->l2_len +
531 mb->l3_len + mb->l4_len;
532 if (q->hw_ring[q->next_free].flags & FM10K_TXD_FLAG_FTAG)
533 hdrlen += sizeof(struct fm10k_ftag);
535 if (likely((hdrlen >= FM10K_TSO_MIN_HEADERLEN) &&
536 (hdrlen <= FM10K_TSO_MAX_HEADERLEN) &&
537 (mb->tso_segsz >= FM10K_TSO_MINMSS))) {
538 q->hw_ring[q->next_free].mss = mb->tso_segsz;
539 q->hw_ring[q->next_free].hdrlen = hdrlen;
543 if (++q->next_free == q->nb_desc)
546 /* fill up the rings */
547 for (mb = mb->next; mb != NULL; mb = mb->next) {
548 q->sw_ring[q->next_free] = mb;
549 q->hw_ring[q->next_free].buffer_addr =
550 rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
551 q->hw_ring[q->next_free].buflen =
552 rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
553 q->hw_ring[q->next_free].flags = 0;
554 if (++q->next_free == q->nb_desc)
558 q->hw_ring[last_id].flags |= flags;
562 fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
565 struct fm10k_tx_queue *q = tx_queue;
569 for (count = 0; count < nb_pkts; ++count) {
572 /* running low on descriptors? try to free some... */
573 if (q->nb_free < q->free_thresh)
574 tx_free_descriptors(q);
576 /* make sure there are enough free descriptors to transmit the
577 * entire packet before doing anything */
578 if (q->nb_free < mb->nb_segs)
581 /* sanity check to make sure the mbuf is valid */
582 if ((mb->nb_segs == 0) ||
583 ((mb->nb_segs > 1) && (mb->next == NULL)))
586 /* process the packet */
590 /* update the tail pointer if any packets were processed */
591 if (likely(count > 0))
592 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_free);