drivers/net: fix exposing internal headers
[dpdk.git] / drivers / net / enic / enic_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_net.h>
9 #include <rte_prefetch.h>
10
11 #include "enic_compat.h"
12 #include "rq_enet_desc.h"
13 #include "enic.h"
14 #include "enic_rxtx_common.h"
15 #include <rte_ether.h>
16 #include <rte_ip.h>
17 #include <rte_tcp.h>
18
19 #define RTE_PMD_USE_PREFETCH
20
21 #ifdef RTE_PMD_USE_PREFETCH
22 /*Prefetch a cache line into all cache levels. */
23 #define rte_enic_prefetch(p) rte_prefetch0(p)
24 #else
25 #define rte_enic_prefetch(p) do {} while (0)
26 #endif
27
28 #ifdef RTE_PMD_PACKET_PREFETCH
29 #define rte_packet_prefetch(p) rte_prefetch1(p)
30 #else
31 #define rte_packet_prefetch(p) do {} while (0)
32 #endif
33
34 /* dummy receive function to replace actual function in
35  * order to do safe reconfiguration operations.
36  */
37 uint16_t
38 enic_dummy_recv_pkts(__rte_unused void *rx_queue,
39                      __rte_unused struct rte_mbuf **rx_pkts,
40                      __rte_unused uint16_t nb_pkts)
41 {
42         return 0;
43 }
44
45 uint16_t
46 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
47                uint16_t nb_pkts)
48 {
49         struct vnic_rq *sop_rq = rx_queue;
50         struct vnic_rq *data_rq;
51         struct vnic_rq *rq;
52         struct enic *enic = vnic_dev_priv(sop_rq->vdev);
53         uint16_t cq_idx;
54         uint16_t rq_idx, max_rx;
55         uint16_t rq_num;
56         struct rte_mbuf *nmb, *rxmb;
57         uint16_t nb_rx = 0;
58         struct vnic_cq *cq;
59         volatile struct cq_desc *cqd_ptr;
60         uint8_t color;
61         uint8_t tnl;
62         uint16_t seg_length;
63         struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
64         struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
65
66         cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
67         cq_idx = cq->to_clean;          /* index of cqd, rqd, mbuf_table */
68         cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
69         color = cq->last_color;
70
71         data_rq = &enic->rq[sop_rq->data_queue_idx];
72
73         /* Receive until the end of the ring, at most. */
74         max_rx = RTE_MIN(nb_pkts, cq->ring.desc_count - cq_idx);
75
76         while (max_rx) {
77                 volatile struct rq_enet_desc *rqd_ptr;
78                 struct cq_desc cqd;
79                 uint8_t packet_error;
80                 uint16_t ciflags;
81
82                 max_rx--;
83
84                 /* Check for pkts available */
85                 if ((cqd_ptr->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
86                         break;
87
88                 /* Get the cq descriptor and extract rq info from it */
89                 cqd = *cqd_ptr;
90                 rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
91                 rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
92
93                 rq = &enic->rq[rq_num];
94                 rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
95
96                 /* allocate a new mbuf */
97                 nmb = rte_mbuf_raw_alloc(rq->mp);
98                 if (nmb == NULL) {
99                         rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
100                         break;
101                 }
102
103                 /* A packet error means descriptor and data are untrusted */
104                 packet_error = enic_cq_rx_check_err(&cqd);
105
106                 /* Get the mbuf to return and replace with one just allocated */
107                 rxmb = rq->mbuf_ring[rq_idx];
108                 rq->mbuf_ring[rq_idx] = nmb;
109                 cq_idx++;
110
111                 /* Prefetch next mbuf & desc while processing current one */
112                 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
113                 rte_enic_prefetch(cqd_ptr);
114
115                 ciflags = enic_cq_rx_desc_ciflags(
116                         (struct cq_enet_rq_desc *)&cqd);
117
118                 /* Push descriptor for newly allocated mbuf */
119                 nmb->data_off = RTE_PKTMBUF_HEADROOM;
120                 /*
121                  * Only the address needs to be refilled. length_type of the
122                  * descriptor it set during initialization
123                  * (enic_alloc_rx_queue_mbufs) and does not change.
124                  */
125                 rqd_ptr->address = rte_cpu_to_le_64(nmb->buf_iova +
126                                                     RTE_PKTMBUF_HEADROOM);
127
128                 /* Fill in the rest of the mbuf */
129                 seg_length = enic_cq_rx_desc_n_bytes(&cqd);
130
131                 if (rq->is_sop) {
132                         first_seg = rxmb;
133                         first_seg->pkt_len = seg_length;
134                 } else {
135                         first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
136                                                         + seg_length);
137                         first_seg->nb_segs++;
138                         last_seg->next = rxmb;
139                 }
140
141                 rxmb->port = enic->port_id;
142                 rxmb->data_len = seg_length;
143
144                 rq->rx_nb_hold++;
145
146                 if (!(enic_cq_rx_desc_eop(ciflags))) {
147                         last_seg = rxmb;
148                         continue;
149                 }
150
151                 /*
152                  * When overlay offload is enabled, CQ.fcoe indicates the
153                  * packet is tunnelled.
154                  */
155                 tnl = enic->overlay_offload &&
156                         (ciflags & CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
157                 /* cq rx flags are only valid if eop bit is set */
158                 first_seg->packet_type =
159                         enic_cq_rx_flags_to_pkt_type(&cqd, tnl);
160                 enic_cq_rx_to_pkt_flags(&cqd, first_seg);
161
162                 /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
163                 if (tnl) {
164                         first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK |
165                                                     RTE_PTYPE_L4_MASK);
166                 }
167                 if (unlikely(packet_error)) {
168                         rte_pktmbuf_free(first_seg);
169                         rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
170                         continue;
171                 }
172
173
174                 /* prefetch mbuf data for caller */
175                 rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
176                                     RTE_PKTMBUF_HEADROOM));
177
178                 /* store the mbuf address into the next entry of the array */
179                 rx_pkts[nb_rx++] = first_seg;
180         }
181         if (unlikely(cq_idx == cq->ring.desc_count)) {
182                 cq_idx = 0;
183                 cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
184         }
185
186         sop_rq->pkt_first_seg = first_seg;
187         sop_rq->pkt_last_seg = last_seg;
188
189         cq->to_clean = cq_idx;
190
191         if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >
192             sop_rq->rx_free_thresh) {
193                 if (data_rq->in_use) {
194                         data_rq->posted_index =
195                                 enic_ring_add(data_rq->ring.desc_count,
196                                               data_rq->posted_index,
197                                               data_rq->rx_nb_hold);
198                         data_rq->rx_nb_hold = 0;
199                 }
200                 sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
201                                                      sop_rq->posted_index,
202                                                      sop_rq->rx_nb_hold);
203                 sop_rq->rx_nb_hold = 0;
204
205                 rte_mb();
206                 if (data_rq->in_use)
207                         iowrite32_relaxed(data_rq->posted_index,
208                                           &data_rq->ctrl->posted_index);
209                 rte_compiler_barrier();
210                 iowrite32_relaxed(sop_rq->posted_index,
211                                   &sop_rq->ctrl->posted_index);
212         }
213
214
215         return nb_rx;
216 }
217
218 uint16_t
219 enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
220                          uint16_t nb_pkts)
221 {
222         struct rte_mbuf *mb, **rx, **rxmb;
223         uint16_t cq_idx, nb_rx, max_rx;
224         struct cq_enet_rq_desc *cqd;
225         struct rq_enet_desc *rqd;
226         unsigned int port_id;
227         struct vnic_cq *cq;
228         struct vnic_rq *rq;
229         struct enic *enic;
230         uint8_t color;
231         bool overlay;
232         bool tnl;
233
234         rq = rx_queue;
235         enic = vnic_dev_priv(rq->vdev);
236         cq = &enic->cq[enic_cq_rq(enic, rq->index)];
237         cq_idx = cq->to_clean;
238
239         /*
240          * Fill up the reserve of free mbufs. Below, we restock the receive
241          * ring with these mbufs to avoid allocation failures.
242          */
243         if (rq->num_free_mbufs == 0) {
244                 if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs,
245                                          ENIC_RX_BURST_MAX))
246                         return 0;
247                 rq->num_free_mbufs = ENIC_RX_BURST_MAX;
248         }
249
250         /* Receive until the end of the ring, at most. */
251         max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs);
252         max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx);
253
254         cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx;
255         color = cq->last_color;
256         rxmb = rq->mbuf_ring + cq_idx;
257         port_id = enic->port_id;
258         overlay = enic->overlay_offload;
259
260         rx = rx_pkts;
261         while (max_rx) {
262                 max_rx--;
263                 if ((cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
264                         break;
265                 if (unlikely(cqd->bytes_written_flags &
266                              CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) {
267                         rte_pktmbuf_free(*rxmb++);
268                         rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
269                         cqd++;
270                         continue;
271                 }
272
273                 mb = *rxmb++;
274                 /* prefetch mbuf data for caller */
275                 rte_packet_prefetch(RTE_PTR_ADD(mb->buf_addr,
276                                     RTE_PKTMBUF_HEADROOM));
277                 mb->data_len = cqd->bytes_written_flags &
278                         CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
279                 mb->pkt_len = mb->data_len;
280                 mb->port = port_id;
281                 tnl = overlay && (cqd->completed_index_flags &
282                                   CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
283                 mb->packet_type =
284                         enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd,
285                                                      tnl);
286                 enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb);
287                 /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
288                 if (tnl) {
289                         mb->packet_type &= ~(RTE_PTYPE_L3_MASK |
290                                              RTE_PTYPE_L4_MASK);
291                 }
292                 cqd++;
293                 *rx++ = mb;
294         }
295         /* Number of descriptors visited */
296         nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx;
297         if (nb_rx == 0)
298                 return 0;
299         rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx;
300         rxmb = rq->mbuf_ring + cq_idx;
301         cq_idx += nb_rx;
302         rq->rx_nb_hold += nb_rx;
303         if (unlikely(cq_idx == cq->ring.desc_count)) {
304                 cq_idx = 0;
305                 cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
306         }
307         cq->to_clean = cq_idx;
308
309         memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs,
310                sizeof(struct rte_mbuf *) * nb_rx);
311         rq->num_free_mbufs -= nb_rx;
312         while (nb_rx) {
313                 nb_rx--;
314                 mb = *rxmb++;
315                 mb->data_off = RTE_PKTMBUF_HEADROOM;
316                 rqd->address = mb->buf_iova + RTE_PKTMBUF_HEADROOM;
317                 rqd++;
318         }
319         if (rq->rx_nb_hold > rq->rx_free_thresh) {
320                 rq->posted_index = enic_ring_add(rq->ring.desc_count,
321                                                  rq->posted_index,
322                                                  rq->rx_nb_hold);
323                 rq->rx_nb_hold = 0;
324                 rte_wmb();
325                 iowrite32_relaxed(rq->posted_index,
326                                   &rq->ctrl->posted_index);
327         }
328
329         return rx - rx_pkts;
330 }
331
332 static inline void enic_free_wq_bufs(struct vnic_wq *wq,
333                                      uint16_t completed_index)
334 {
335         struct rte_mbuf *buf;
336         struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
337         unsigned int nb_to_free, nb_free = 0, i;
338         struct rte_mempool *pool;
339         unsigned int tail_idx;
340         unsigned int desc_count = wq->ring.desc_count;
341
342         nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
343                                    + 1;
344         tail_idx = wq->tail_idx;
345         pool = wq->bufs[tail_idx]->pool;
346         for (i = 0; i < nb_to_free; i++) {
347                 buf = wq->bufs[tail_idx];
348                 m = rte_pktmbuf_prefree_seg(buf);
349                 if (unlikely(m == NULL)) {
350                         tail_idx = enic_ring_incr(desc_count, tail_idx);
351                         continue;
352                 }
353
354                 if (likely(m->pool == pool)) {
355                         RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
356                         free[nb_free++] = m;
357                 } else {
358                         rte_mempool_put_bulk(pool, (void *)free, nb_free);
359                         free[0] = m;
360                         nb_free = 1;
361                         pool = m->pool;
362                 }
363                 tail_idx = enic_ring_incr(desc_count, tail_idx);
364         }
365
366         if (nb_free > 0)
367                 rte_mempool_put_bulk(pool, (void **)free, nb_free);
368
369         wq->tail_idx = tail_idx;
370         wq->ring.desc_avail += nb_to_free;
371 }
372
373 unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
374 {
375         uint16_t completed_index;
376
377         completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
378
379         if (wq->last_completed_index != completed_index) {
380                 enic_free_wq_bufs(wq, completed_index);
381                 wq->last_completed_index = completed_index;
382         }
383         return 0;
384 }
385
386 uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
387                         uint16_t nb_pkts)
388 {
389         struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
390         int32_t ret;
391         uint16_t i;
392         uint64_t ol_flags;
393         struct rte_mbuf *m;
394
395         for (i = 0; i != nb_pkts; i++) {
396                 m = tx_pkts[i];
397                 ol_flags = m->ol_flags;
398                 if (!(ol_flags & PKT_TX_TCP_SEG)) {
399                         if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
400                                 rte_errno = EINVAL;
401                                 return i;
402                         }
403                 } else {
404                         uint16_t header_len;
405
406                         header_len = m->l2_len + m->l3_len + m->l4_len;
407                         if (m->tso_segsz + header_len > ENIC_TX_MAX_PKT_SIZE) {
408                                 rte_errno = EINVAL;
409                                 return i;
410                         }
411                 }
412
413                 if (ol_flags & wq->tx_offload_notsup_mask) {
414                         rte_errno = ENOTSUP;
415                         return i;
416                 }
417 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
418                 ret = rte_validate_tx_offload(m);
419                 if (ret != 0) {
420                         rte_errno = -ret;
421                         return i;
422                 }
423 #endif
424                 ret = rte_net_intel_cksum_prepare(m);
425                 if (ret != 0) {
426                         rte_errno = -ret;
427                         return i;
428                 }
429         }
430
431         return i;
432 }
433
434 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
435         uint16_t nb_pkts)
436 {
437         uint16_t index;
438         unsigned int pkt_len, data_len;
439         unsigned int nb_segs;
440         struct rte_mbuf *tx_pkt;
441         struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
442         struct enic *enic = vnic_dev_priv(wq->vdev);
443         unsigned short vlan_id;
444         uint64_t ol_flags;
445         uint64_t ol_flags_mask;
446         unsigned int wq_desc_avail;
447         int head_idx;
448         unsigned int desc_count;
449         struct wq_enet_desc *descs, *desc_p, desc_tmp;
450         uint16_t mss;
451         uint8_t vlan_tag_insert;
452         uint8_t eop, cq;
453         uint64_t bus_addr;
454         uint8_t offload_mode;
455         uint16_t header_len;
456         uint64_t tso;
457         rte_atomic64_t *tx_oversized;
458
459         enic_cleanup_wq(enic, wq);
460         wq_desc_avail = vnic_wq_desc_avail(wq);
461         head_idx = wq->head_idx;
462         desc_count = wq->ring.desc_count;
463         ol_flags_mask = PKT_TX_VLAN | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
464         tx_oversized = &enic->soft_stats.tx_oversized;
465
466         nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
467
468         for (index = 0; index < nb_pkts; index++) {
469                 tx_pkt = *tx_pkts++;
470                 pkt_len = tx_pkt->pkt_len;
471                 data_len = tx_pkt->data_len;
472                 ol_flags = tx_pkt->ol_flags;
473                 nb_segs = tx_pkt->nb_segs;
474                 tso = ol_flags & PKT_TX_TCP_SEG;
475
476                 /* drop packet if it's too big to send */
477                 if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
478                         rte_pktmbuf_free(tx_pkt);
479                         rte_atomic64_inc(tx_oversized);
480                         continue;
481                 }
482
483                 if (nb_segs > wq_desc_avail) {
484                         if (index > 0)
485                                 goto post;
486                         goto done;
487                 }
488
489                 mss = 0;
490                 vlan_id = tx_pkt->vlan_tci;
491                 vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN);
492                 bus_addr = (dma_addr_t)
493                            (tx_pkt->buf_iova + tx_pkt->data_off);
494
495                 descs = (struct wq_enet_desc *)wq->ring.descs;
496                 desc_p = descs + head_idx;
497
498                 eop = (data_len == pkt_len);
499                 offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
500                 header_len = 0;
501
502                 if (tso) {
503                         header_len = tx_pkt->l2_len + tx_pkt->l3_len +
504                                      tx_pkt->l4_len;
505
506                         /* Drop if non-TCP packet or TSO seg size is too big */
507                         if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz +
508                             header_len) > ENIC_TX_MAX_PKT_SIZE))) {
509                                 rte_pktmbuf_free(tx_pkt);
510                                 rte_atomic64_inc(tx_oversized);
511                                 continue;
512                         }
513
514                         offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
515                         mss = tx_pkt->tso_segsz;
516                         /* For tunnel, need the size of outer+inner headers */
517                         if (ol_flags & PKT_TX_TUNNEL_MASK) {
518                                 header_len += tx_pkt->outer_l2_len +
519                                         tx_pkt->outer_l3_len;
520                         }
521                 }
522
523                 if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
524                         if (ol_flags & PKT_TX_IP_CKSUM)
525                                 mss |= ENIC_CALC_IP_CKSUM;
526
527                         /* Nic uses just 1 bit for UDP and TCP */
528                         switch (ol_flags & PKT_TX_L4_MASK) {
529                         case PKT_TX_TCP_CKSUM:
530                         case PKT_TX_UDP_CKSUM:
531                                 mss |= ENIC_CALC_TCP_UDP_CKSUM;
532                                 break;
533                         }
534                 }
535                 wq->cq_pend++;
536                 cq = 0;
537                 if (eop && wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
538                         cq = 1;
539                         wq->cq_pend = 0;
540                 }
541                 wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
542                                  offload_mode, eop, cq, 0, vlan_tag_insert,
543                                  vlan_id, 0);
544
545                 *desc_p = desc_tmp;
546                 wq->bufs[head_idx] = tx_pkt;
547                 head_idx = enic_ring_incr(desc_count, head_idx);
548                 wq_desc_avail--;
549
550                 if (!eop) {
551                         for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
552                             tx_pkt->next) {
553                                 data_len = tx_pkt->data_len;
554
555                                 wq->cq_pend++;
556                                 cq = 0;
557                                 if (tx_pkt->next == NULL) {
558                                         eop = 1;
559                                         if (wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
560                                                 cq = 1;
561                                                 wq->cq_pend = 0;
562                                         }
563                                 }
564                                 desc_p = descs + head_idx;
565                                 bus_addr = (dma_addr_t)(tx_pkt->buf_iova
566                                            + tx_pkt->data_off);
567                                 wq_enet_desc_enc((struct wq_enet_desc *)
568                                                  &desc_tmp, bus_addr, data_len,
569                                                  mss, 0, offload_mode, eop, cq,
570                                                  0, vlan_tag_insert, vlan_id,
571                                                  0);
572
573                                 *desc_p = desc_tmp;
574                                 wq->bufs[head_idx] = tx_pkt;
575                                 head_idx = enic_ring_incr(desc_count, head_idx);
576                                 wq_desc_avail--;
577                         }
578                 }
579         }
580  post:
581         rte_wmb();
582         iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
583  done:
584         wq->ring.desc_avail = wq_desc_avail;
585         wq->head_idx = head_idx;
586
587         return index;
588 }
589
590 static void enqueue_simple_pkts(struct rte_mbuf **pkts,
591                                 struct wq_enet_desc *desc,
592                                 uint16_t n,
593                                 struct enic *enic)
594 {
595         struct rte_mbuf *p;
596         uint16_t mss;
597
598         while (n) {
599                 n--;
600                 p = *pkts++;
601                 desc->address = p->buf_iova + p->data_off;
602                 desc->length = p->pkt_len;
603                 /* VLAN insert */
604                 desc->vlan_tag = p->vlan_tci;
605                 desc->header_length_flags &=
606                         ((1 << WQ_ENET_FLAGS_EOP_SHIFT) |
607                          (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT));
608                 if (p->ol_flags & PKT_TX_VLAN) {
609                         desc->header_length_flags |=
610                                 1 << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT;
611                 }
612                 /*
613                  * Checksum offload. We use WQ_ENET_OFFLOAD_MODE_CSUM, which
614                  * is 0, so no need to set offload_mode.
615                  */
616                 mss = 0;
617                 if (p->ol_flags & PKT_TX_IP_CKSUM)
618                         mss |= ENIC_CALC_IP_CKSUM << WQ_ENET_MSS_SHIFT;
619                 if (p->ol_flags & PKT_TX_L4_MASK)
620                         mss |= ENIC_CALC_TCP_UDP_CKSUM << WQ_ENET_MSS_SHIFT;
621                 desc->mss_loopback = mss;
622
623                 /*
624                  * The app should not send oversized
625                  * packets. tx_pkt_prepare includes a check as
626                  * well. But some apps ignore the device max size and
627                  * tx_pkt_prepare. Oversized packets cause WQ errrors
628                  * and the NIC ends up disabling the whole WQ. So
629                  * truncate packets..
630                  */
631                 if (unlikely(p->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
632                         desc->length = ENIC_TX_MAX_PKT_SIZE;
633                         rte_atomic64_inc(&enic->soft_stats.tx_oversized);
634                 }
635                 desc++;
636         }
637 }
638
639 uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
640                                uint16_t nb_pkts)
641 {
642         unsigned int head_idx, desc_count;
643         struct wq_enet_desc *desc;
644         struct vnic_wq *wq;
645         struct enic *enic;
646         uint16_t rem, n;
647
648         wq = (struct vnic_wq *)tx_queue;
649         enic = vnic_dev_priv(wq->vdev);
650         enic_cleanup_wq(enic, wq);
651         /* Will enqueue this many packets in this call */
652         nb_pkts = RTE_MIN(nb_pkts, wq->ring.desc_avail);
653         if (nb_pkts == 0)
654                 return 0;
655
656         head_idx = wq->head_idx;
657         desc_count = wq->ring.desc_count;
658
659         /* Descriptors until the end of the ring */
660         n = desc_count - head_idx;
661         n = RTE_MIN(nb_pkts, n);
662
663         /* Save mbuf pointers to free later */
664         memcpy(wq->bufs + head_idx, tx_pkts, sizeof(struct rte_mbuf *) * n);
665
666         /* Enqueue until the ring end */
667         rem = nb_pkts - n;
668         desc = ((struct wq_enet_desc *)wq->ring.descs) + head_idx;
669         enqueue_simple_pkts(tx_pkts, desc, n, enic);
670
671         /* Wrap to the start of the ring */
672         if (rem) {
673                 tx_pkts += n;
674                 memcpy(wq->bufs, tx_pkts, sizeof(struct rte_mbuf *) * rem);
675                 desc = (struct wq_enet_desc *)wq->ring.descs;
676                 enqueue_simple_pkts(tx_pkts, desc, rem, enic);
677         }
678         rte_wmb();
679
680         /* Update head_idx and desc_avail */
681         wq->ring.desc_avail -= nb_pkts;
682         head_idx += nb_pkts;
683         if (head_idx >= desc_count)
684                 head_idx -= desc_count;
685         wq->head_idx = head_idx;
686         iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
687         return nb_pkts;
688 }