4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_ethdev.h>
34 #include <rte_common.h>
36 #include "base/fm10k_type.h"
38 #ifdef RTE_PMD_PACKET_PREFETCH
39 #define rte_packet_prefetch(p) rte_prefetch1(p)
41 #define rte_packet_prefetch(p) do {} while (0)
44 static inline void dump_rxd(union fm10k_rx_desc *rxd)
46 #ifndef RTE_LIBRTE_FM10K_DEBUG_RX
49 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
50 PMD_RX_LOG(DEBUG, "| GLORT | PKT HDR & TYPE |");
51 PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.glort,
53 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
54 PMD_RX_LOG(DEBUG, "| VLAN & LEN | STATUS |");
55 PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.vlan_len,
57 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
58 PMD_RX_LOG(DEBUG, "| RESERVED | RSS_HASH |");
59 PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", 0, rxd->d.rss);
60 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
61 PMD_RX_LOG(DEBUG, "| TIME TAG |");
62 PMD_RX_LOG(DEBUG, "| 0x%016lx |", rxd->q.timestamp);
63 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
67 rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
70 static const uint16_t pt_lut[] = { 0,
71 PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT,
72 PKT_RX_IPV6_HDR, PKT_RX_IPV6_HDR_EXT,
76 if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK)
77 m->ol_flags |= PKT_RX_RSS_HASH;
79 if (unlikely((d->d.staterr &
80 (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) ==
81 (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)))
82 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
84 if (unlikely((d->d.staterr &
85 (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) ==
86 (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)))
87 m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
89 if (d->d.staterr & FM10K_RXD_STATUS_VEXT)
90 m->ol_flags |= PKT_RX_VLAN_PKT;
92 if (unlikely(d->d.staterr & FM10K_RXD_STATUS_HBO))
93 m->ol_flags |= PKT_RX_HBUF_OVERFLOW;
95 if (unlikely(d->d.staterr & FM10K_RXD_STATUS_RXE))
96 m->ol_flags |= PKT_RX_RECIP_ERR;
98 ptype = (d->d.data & FM10K_RXD_PKTTYPE_MASK_L3) >>
99 FM10K_RXD_PKTTYPE_SHIFT;
100 m->ol_flags |= pt_lut[(uint8_t)ptype];
104 fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
107 struct rte_mbuf *mbuf;
108 union fm10k_rx_desc desc;
109 struct fm10k_rx_queue *q = rx_queue;
115 next_dd = q->next_dd;
117 nb_pkts = RTE_MIN(nb_pkts, q->alloc_thresh);
118 for (count = 0; count < nb_pkts; ++count) {
119 mbuf = q->sw_ring[next_dd];
120 desc = q->hw_ring[next_dd];
121 if (!(desc.d.staterr & FM10K_RXD_STATUS_DD))
123 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
126 rte_pktmbuf_pkt_len(mbuf) = desc.w.length;
127 rte_pktmbuf_data_len(mbuf) = desc.w.length;
130 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
131 rx_desc_to_ol_flags(mbuf, &desc);
134 mbuf->hash.rss = desc.d.rss;
136 rx_pkts[count] = mbuf;
137 if (++next_dd == q->nb_desc) {
142 /* Prefetch next mbuf while processing current one. */
143 rte_prefetch0(q->sw_ring[next_dd]);
146 * When next RX descriptor is on a cache-line boundary,
147 * prefetch the next 4 RX descriptors and the next 8 pointers
150 if ((next_dd & 0x3) == 0) {
151 rte_prefetch0(&q->hw_ring[next_dd]);
152 rte_prefetch0(&q->sw_ring[next_dd]);
156 q->next_dd = next_dd;
158 if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
159 ret = rte_mempool_get_bulk(q->mp,
160 (void **)&q->sw_ring[q->next_alloc],
163 if (unlikely(ret != 0)) {
164 uint8_t port = q->port_id;
165 PMD_RX_LOG(ERR, "Failed to alloc mbuf");
167 * Need to restore next_dd if we cannot allocate new
168 * buffers to replenish the old ones.
170 q->next_dd = (q->next_dd + q->nb_desc - count) %
172 rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
176 for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
177 mbuf = q->sw_ring[q->next_alloc];
179 /* setup static mbuf fields */
180 fm10k_pktmbuf_reset(mbuf, q->port_id);
182 /* write descriptor */
183 desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
184 desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
185 q->hw_ring[q->next_alloc] = desc;
187 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
188 q->next_trigger += q->alloc_thresh;
189 if (q->next_trigger >= q->nb_desc) {
190 q->next_trigger = q->alloc_thresh - 1;
199 fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
202 struct rte_mbuf *mbuf;
203 union fm10k_rx_desc desc;
204 struct fm10k_rx_queue *q = rx_queue;
206 uint16_t nb_rcv, nb_seg;
209 struct rte_mbuf *first_seg = q->pkt_first_seg;
210 struct rte_mbuf *last_seg = q->pkt_last_seg;
213 next_dd = q->next_dd;
216 nb_seg = RTE_MIN(nb_pkts, q->alloc_thresh);
217 for (count = 0; count < nb_seg; count++) {
218 mbuf = q->sw_ring[next_dd];
219 desc = q->hw_ring[next_dd];
220 if (!(desc.d.staterr & FM10K_RXD_STATUS_DD))
222 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
226 if (++next_dd == q->nb_desc) {
231 /* Prefetch next mbuf while processing current one. */
232 rte_prefetch0(q->sw_ring[next_dd]);
235 * When next RX descriptor is on a cache-line boundary,
236 * prefetch the next 4 RX descriptors and the next 8 pointers
239 if ((next_dd & 0x3) == 0) {
240 rte_prefetch0(&q->hw_ring[next_dd]);
241 rte_prefetch0(&q->sw_ring[next_dd]);
244 /* Fill data length */
245 rte_pktmbuf_data_len(mbuf) = desc.w.length;
248 * If this is the first buffer of the received packet,
249 * set the pointer to the first mbuf of the packet and
250 * initialize its context.
251 * Otherwise, update the total length and the number of segments
252 * of the current scattered packet, and update the pointer to
253 * the last mbuf of the current packet.
257 first_seg->pkt_len = desc.w.length;
260 (uint16_t)(first_seg->pkt_len +
261 rte_pktmbuf_data_len(mbuf));
262 first_seg->nb_segs++;
263 last_seg->next = mbuf;
267 * If this is not the last buffer of the received packet,
268 * update the pointer to the last mbuf of the current scattered
269 * packet and continue to parse the RX ring.
271 if (!(desc.d.staterr & FM10K_RXD_STATUS_EOP)) {
276 first_seg->ol_flags = 0;
277 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
278 rx_desc_to_ol_flags(first_seg, &desc);
280 first_seg->hash.rss = desc.d.rss;
282 /* Prefetch data of first segment, if configured to do so. */
283 rte_packet_prefetch((char *)first_seg->buf_addr +
284 first_seg->data_off);
287 * Store the mbuf address into the next entry of the array
288 * of returned packets.
290 rx_pkts[nb_rcv++] = first_seg;
293 * Setup receipt context for a new packet.
298 q->next_dd = next_dd;
300 if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
301 ret = rte_mempool_get_bulk(q->mp,
302 (void **)&q->sw_ring[q->next_alloc],
305 if (unlikely(ret != 0)) {
306 uint8_t port = q->port_id;
307 PMD_RX_LOG(ERR, "Failed to alloc mbuf");
309 * Need to restore next_dd if we cannot allocate new
310 * buffers to replenish the old ones.
312 q->next_dd = (q->next_dd + q->nb_desc - count) %
314 rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
318 for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
319 mbuf = q->sw_ring[q->next_alloc];
321 /* setup static mbuf fields */
322 fm10k_pktmbuf_reset(mbuf, q->port_id);
324 /* write descriptor */
325 desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
326 desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
327 q->hw_ring[q->next_alloc] = desc;
329 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
330 q->next_trigger += q->alloc_thresh;
331 if (q->next_trigger >= q->nb_desc) {
332 q->next_trigger = q->alloc_thresh - 1;
337 q->pkt_first_seg = first_seg;
338 q->pkt_last_seg = last_seg;
343 static inline void tx_free_descriptors(struct fm10k_tx_queue *q)
345 uint16_t next_rs, count = 0;
347 next_rs = fifo_peek(&q->rs_tracker);
348 if (!(q->hw_ring[next_rs].flags & FM10K_TXD_FLAG_DONE))
351 /* the DONE flag is set on this descriptor so remove the ID
352 * from the RS bit tracker and free the buffers */
353 fifo_remove(&q->rs_tracker);
355 /* wrap around? if so, free buffers from last_free up to but NOT
356 * including nb_desc */
357 if (q->last_free > next_rs) {
358 count = q->nb_desc - q->last_free;
359 while (q->last_free < q->nb_desc) {
360 rte_pktmbuf_free_seg(q->sw_ring[q->last_free]);
361 q->sw_ring[q->last_free] = NULL;
367 /* adjust free descriptor count before the next loop */
368 q->nb_free += count + (next_rs + 1 - q->last_free);
370 /* free buffers from last_free, up to and including next_rs */
371 while (q->last_free <= next_rs) {
372 rte_pktmbuf_free_seg(q->sw_ring[q->last_free]);
373 q->sw_ring[q->last_free] = NULL;
377 if (q->last_free == q->nb_desc)
381 static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
386 /* always set the LAST flag on the last descriptor used to
387 * transmit the packet */
388 flags = FM10K_TXD_FLAG_LAST;
389 last_id = q->next_free + mb->nb_segs - 1;
390 if (last_id >= q->nb_desc)
391 last_id = last_id - q->nb_desc;
393 /* but only set the RS flag on the last descriptor if rs_thresh
394 * descriptors will be used since the RS flag was last set */
395 if ((q->nb_used + mb->nb_segs) >= q->rs_thresh) {
396 flags |= FM10K_TXD_FLAG_RS;
397 fifo_insert(&q->rs_tracker, last_id);
400 q->nb_used = q->nb_used + mb->nb_segs;
403 q->hw_ring[last_id].flags = flags;
404 q->nb_free -= mb->nb_segs;
406 /* set checksum flags on first descriptor of packet. SCTP checksum
407 * offload is not supported, but we do not explicitly check for this
408 * case in favor of greatly simplified processing. */
409 if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
410 q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM;
412 /* set vlan if requested */
413 if (mb->ol_flags & PKT_TX_VLAN_PKT)
414 q->hw_ring[q->next_free].vlan = mb->vlan_tci;
416 /* fill up the rings */
417 for (; mb != NULL; mb = mb->next) {
418 q->sw_ring[q->next_free] = mb;
419 q->hw_ring[q->next_free].buffer_addr =
420 rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
421 q->hw_ring[q->next_free].buflen =
422 rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
423 if (++q->next_free == q->nb_desc)
429 fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
432 struct fm10k_tx_queue *q = tx_queue;
436 for (count = 0; count < nb_pkts; ++count) {
439 /* running low on descriptors? try to free some... */
440 if (q->nb_free < q->free_trigger)
441 tx_free_descriptors(q);
443 /* make sure there are enough free descriptors to transmit the
444 * entire packet before doing anything */
445 if (q->nb_free < mb->nb_segs)
448 /* sanity check to make sure the mbuf is valid */
449 if ((mb->nb_segs == 0) ||
450 ((mb->nb_segs > 1) && (mb->next == NULL)))
453 /* process the packet */
457 /* update the tail pointer if any packets were processed */
458 if (likely(count > 0))
459 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_free);