net/enic: support scatter Rx in MTU update
[dpdk.git] / drivers / net / enic / enic_rxtx.c
1 /* Copyright 2008-2016 Cisco Systems, Inc.  All rights reserved.
2  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
3  *
4  * Copyright (c) 2014, Cisco Systems, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in
16  * the documentation and/or other materials provided with the
17  * distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_mbuf.h>
34 #include <rte_ethdev.h>
35 #include <rte_prefetch.h>
36
37 #include "enic_compat.h"
38 #include "rq_enet_desc.h"
39 #include "enic.h"
40
41 #define RTE_PMD_USE_PREFETCH
42
43 #ifdef RTE_PMD_USE_PREFETCH
44 /*Prefetch a cache line into all cache levels. */
45 #define rte_enic_prefetch(p) rte_prefetch0(p)
46 #else
47 #define rte_enic_prefetch(p) do {} while (0)
48 #endif
49
50 #ifdef RTE_PMD_PACKET_PREFETCH
51 #define rte_packet_prefetch(p) rte_prefetch1(p)
52 #else
53 #define rte_packet_prefetch(p) do {} while (0)
54 #endif
55
56 static inline uint16_t
57 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
58 {
59         return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
60 }
61
62 static inline uint16_t
63 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
64 {
65         return le16_to_cpu(crd->bytes_written_flags) &
66                            ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
67 }
68
69 static inline uint8_t
70 enic_cq_rx_desc_packet_error(uint16_t bwflags)
71 {
72         return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
73                 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
74 }
75
76 static inline uint8_t
77 enic_cq_rx_desc_eop(uint16_t ciflags)
78 {
79         return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
80                 == CQ_ENET_RQ_DESC_FLAGS_EOP;
81 }
82
83 static inline uint8_t
84 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
85 {
86         return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
87                 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
88                 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
89 }
90
91 static inline uint8_t
92 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
93 {
94         return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
95                 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
96 }
97
98 static inline uint8_t
99 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
100 {
101         return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
102                 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
103 }
104
105 static inline uint8_t
106 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
107 {
108         return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
109                 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
110 }
111
112 static inline uint32_t
113 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
114 {
115         return le32_to_cpu(cqrd->rss_hash);
116 }
117
118 static inline uint16_t
119 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
120 {
121         return le16_to_cpu(cqrd->vlan);
122 }
123
124 static inline uint16_t
125 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
126 {
127         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
128         return le16_to_cpu(cqrd->bytes_written_flags) &
129                 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
130 }
131
132 static inline uint8_t
133 enic_cq_rx_check_err(struct cq_desc *cqd)
134 {
135         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
136         uint16_t bwflags;
137
138         bwflags = enic_cq_rx_desc_bwflags(cqrd);
139         if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
140                 return 1;
141         return 0;
142 }
143
144 /* Lookup table to translate RX CQ flags to mbuf flags. */
145 static inline uint32_t
146 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
147 {
148         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
149         uint8_t cqrd_flags = cqrd->flags;
150         static const uint32_t cq_type_table[128] __rte_cache_aligned = {
151                 [0x00] = RTE_PTYPE_UNKNOWN,
152                 [0x20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
153                           | RTE_PTYPE_L4_NONFRAG,
154                 [0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
155                           | RTE_PTYPE_L4_UDP,
156                 [0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
157                           | RTE_PTYPE_L4_TCP,
158                 [0x60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
159                           | RTE_PTYPE_L4_FRAG,
160                 [0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
161                           | RTE_PTYPE_L4_UDP,
162                 [0x64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
163                           | RTE_PTYPE_L4_TCP,
164                 [0x10] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
165                           | RTE_PTYPE_L4_NONFRAG,
166                 [0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
167                           | RTE_PTYPE_L4_UDP,
168                 [0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
169                           | RTE_PTYPE_L4_TCP,
170                 [0x50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
171                           | RTE_PTYPE_L4_FRAG,
172                 [0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
173                           | RTE_PTYPE_L4_UDP,
174                 [0x54] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
175                           | RTE_PTYPE_L4_TCP,
176                 /* All others reserved */
177         };
178         cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
179                 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
180                 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
181         return cq_type_table[cqrd_flags];
182 }
183
184 static inline void
185 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
186 {
187         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
188         uint16_t ciflags, bwflags, pkt_flags = 0;
189         ciflags = enic_cq_rx_desc_ciflags(cqrd);
190         bwflags = enic_cq_rx_desc_bwflags(cqrd);
191
192         mbuf->ol_flags = 0;
193
194         /* flags are meaningless if !EOP */
195         if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
196                 goto mbuf_flags_done;
197
198         /* VLAN stripping */
199         if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
200                 pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
201                 mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
202         } else {
203                 mbuf->vlan_tci = 0;
204         }
205
206         /* RSS flag */
207         if (enic_cq_rx_desc_rss_type(cqrd)) {
208                 pkt_flags |= PKT_RX_RSS_HASH;
209                 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
210         }
211
212         /* checksum flags */
213         if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
214                 (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
215                 uint32_t l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
216
217                 if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
218                         pkt_flags |= PKT_RX_IP_CKSUM_BAD;
219                 if (l4_flags == RTE_PTYPE_L4_UDP ||
220                     l4_flags == RTE_PTYPE_L4_TCP) {
221                         if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
222                                 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
223                 }
224         }
225
226  mbuf_flags_done:
227         mbuf->ol_flags = pkt_flags;
228 }
229
230 /* dummy receive function to replace actual function in
231  * order to do safe reconfiguration operations.
232  */
233 uint16_t
234 enic_dummy_recv_pkts(__rte_unused void *rx_queue,
235                      __rte_unused struct rte_mbuf **rx_pkts,
236                      __rte_unused uint16_t nb_pkts)
237 {
238         return 0;
239 }
240
241 uint16_t
242 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
243                uint16_t nb_pkts)
244 {
245         struct vnic_rq *sop_rq = rx_queue;
246         struct vnic_rq *data_rq;
247         struct vnic_rq *rq;
248         struct enic *enic = vnic_dev_priv(sop_rq->vdev);
249         uint16_t cq_idx;
250         uint16_t rq_idx;
251         uint16_t rq_num;
252         struct rte_mbuf *nmb, *rxmb;
253         uint16_t nb_rx = 0;
254         struct vnic_cq *cq;
255         volatile struct cq_desc *cqd_ptr;
256         uint8_t color;
257         uint16_t seg_length;
258         struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
259         struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
260
261         cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
262         cq_idx = cq->to_clean;          /* index of cqd, rqd, mbuf_table */
263         cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
264
265         data_rq = &enic->rq[sop_rq->data_queue_idx];
266
267         while (nb_rx < nb_pkts) {
268                 volatile struct rq_enet_desc *rqd_ptr;
269                 dma_addr_t dma_addr;
270                 struct cq_desc cqd;
271                 uint8_t packet_error;
272                 uint16_t ciflags;
273
274                 /* Check for pkts available */
275                 color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
276                         & CQ_DESC_COLOR_MASK;
277                 if (color == cq->last_color)
278                         break;
279
280                 /* Get the cq descriptor and extract rq info from it */
281                 cqd = *cqd_ptr;
282                 rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
283                 rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
284
285                 rq = &enic->rq[rq_num];
286                 rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
287
288                 /* allocate a new mbuf */
289                 nmb = rte_mbuf_raw_alloc(rq->mp);
290                 if (nmb == NULL) {
291                         rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
292                         break;
293                 }
294
295                 /* A packet error means descriptor and data are untrusted */
296                 packet_error = enic_cq_rx_check_err(&cqd);
297
298                 /* Get the mbuf to return and replace with one just allocated */
299                 rxmb = rq->mbuf_ring[rq_idx];
300                 rq->mbuf_ring[rq_idx] = nmb;
301
302                 /* Increment cqd, rqd, mbuf_table index */
303                 cq_idx++;
304                 if (unlikely(cq_idx == cq->ring.desc_count)) {
305                         cq_idx = 0;
306                         cq->last_color = cq->last_color ? 0 : 1;
307                 }
308
309                 /* Prefetch next mbuf & desc while processing current one */
310                 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
311                 rte_enic_prefetch(cqd_ptr);
312
313                 ciflags = enic_cq_rx_desc_ciflags(
314                         (struct cq_enet_rq_desc *)&cqd);
315
316                 /* Push descriptor for newly allocated mbuf */
317                 nmb->data_off = RTE_PKTMBUF_HEADROOM;
318                 dma_addr = (dma_addr_t)(nmb->buf_physaddr +
319                                         RTE_PKTMBUF_HEADROOM);
320                 rq_enet_desc_enc(rqd_ptr, dma_addr,
321                                 (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
322                                 : RQ_ENET_TYPE_NOT_SOP),
323                                 nmb->buf_len - RTE_PKTMBUF_HEADROOM);
324
325                 /* Fill in the rest of the mbuf */
326                 seg_length = enic_cq_rx_desc_n_bytes(&cqd);
327
328                 if (rq->is_sop) {
329                         first_seg = rxmb;
330                         first_seg->nb_segs = 1;
331                         first_seg->pkt_len = seg_length;
332                 } else {
333                         first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
334                                                         + seg_length);
335                         first_seg->nb_segs++;
336                         last_seg->next = rxmb;
337                 }
338
339                 rxmb->next = NULL;
340                 rxmb->port = enic->port_id;
341                 rxmb->data_len = seg_length;
342
343                 rq->rx_nb_hold++;
344
345                 if (!(enic_cq_rx_desc_eop(ciflags))) {
346                         last_seg = rxmb;
347                         continue;
348                 }
349
350                 /* cq rx flags are only valid if eop bit is set */
351                 first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
352                 enic_cq_rx_to_pkt_flags(&cqd, first_seg);
353
354                 if (unlikely(packet_error)) {
355                         rte_pktmbuf_free(first_seg);
356                         rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
357                         continue;
358                 }
359
360
361                 /* prefetch mbuf data for caller */
362                 rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
363                                     RTE_PKTMBUF_HEADROOM));
364
365                 /* store the mbuf address into the next entry of the array */
366                 rx_pkts[nb_rx++] = first_seg;
367         }
368
369         sop_rq->pkt_first_seg = first_seg;
370         sop_rq->pkt_last_seg = last_seg;
371
372         cq->to_clean = cq_idx;
373
374         if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >
375             sop_rq->rx_free_thresh) {
376                 if (data_rq->in_use) {
377                         data_rq->posted_index =
378                                 enic_ring_add(data_rq->ring.desc_count,
379                                               data_rq->posted_index,
380                                               data_rq->rx_nb_hold);
381                         data_rq->rx_nb_hold = 0;
382                 }
383                 sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
384                                                      sop_rq->posted_index,
385                                                      sop_rq->rx_nb_hold);
386                 sop_rq->rx_nb_hold = 0;
387
388                 rte_mb();
389                 if (data_rq->in_use)
390                         iowrite32(data_rq->posted_index,
391                                   &data_rq->ctrl->posted_index);
392                 rte_compiler_barrier();
393                 iowrite32(sop_rq->posted_index, &sop_rq->ctrl->posted_index);
394         }
395
396
397         return nb_rx;
398 }
399
400 static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
401 {
402         struct vnic_wq_buf *buf;
403         struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
404         unsigned int nb_to_free, nb_free = 0, i;
405         struct rte_mempool *pool;
406         unsigned int tail_idx;
407         unsigned int desc_count = wq->ring.desc_count;
408
409         nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
410                                    + 1;
411         tail_idx = wq->tail_idx;
412         buf = &wq->bufs[tail_idx];
413         pool = ((struct rte_mbuf *)buf->mb)->pool;
414         for (i = 0; i < nb_to_free; i++) {
415                 buf = &wq->bufs[tail_idx];
416                 m = __rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
417                 buf->mb = NULL;
418
419                 if (unlikely(m == NULL)) {
420                         tail_idx = enic_ring_incr(desc_count, tail_idx);
421                         continue;
422                 }
423
424                 if (likely(m->pool == pool)) {
425                         RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
426                         free[nb_free++] = m;
427                 } else {
428                         rte_mempool_put_bulk(pool, (void *)free, nb_free);
429                         free[0] = m;
430                         nb_free = 1;
431                         pool = m->pool;
432                 }
433                 tail_idx = enic_ring_incr(desc_count, tail_idx);
434         }
435
436         rte_mempool_put_bulk(pool, (void **)free, nb_free);
437
438         wq->tail_idx = tail_idx;
439         wq->ring.desc_avail += nb_to_free;
440 }
441
442 unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
443 {
444         u16 completed_index;
445
446         completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
447
448         if (wq->last_completed_index != completed_index) {
449                 enic_free_wq_bufs(wq, completed_index);
450                 wq->last_completed_index = completed_index;
451         }
452         return 0;
453 }
454
455 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
456         uint16_t nb_pkts)
457 {
458         uint16_t index;
459         unsigned int pkt_len, data_len;
460         unsigned int nb_segs;
461         struct rte_mbuf *tx_pkt;
462         struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
463         struct enic *enic = vnic_dev_priv(wq->vdev);
464         unsigned short vlan_id;
465         uint64_t ol_flags;
466         uint64_t ol_flags_mask;
467         unsigned int wq_desc_avail;
468         int head_idx;
469         struct vnic_wq_buf *buf;
470         unsigned int desc_count;
471         struct wq_enet_desc *descs, *desc_p, desc_tmp;
472         uint16_t mss;
473         uint8_t vlan_tag_insert;
474         uint8_t eop;
475         uint64_t bus_addr;
476
477         enic_cleanup_wq(enic, wq);
478         wq_desc_avail = vnic_wq_desc_avail(wq);
479         head_idx = wq->head_idx;
480         desc_count = wq->ring.desc_count;
481         ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
482
483         nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
484
485         for (index = 0; index < nb_pkts; index++) {
486                 tx_pkt = *tx_pkts++;
487                 nb_segs = tx_pkt->nb_segs;
488                 if (nb_segs > wq_desc_avail) {
489                         if (index > 0)
490                                 goto post;
491                         goto done;
492                 }
493
494                 pkt_len = tx_pkt->pkt_len;
495                 data_len = tx_pkt->data_len;
496                 ol_flags = tx_pkt->ol_flags;
497                 mss = 0;
498                 vlan_id = 0;
499                 vlan_tag_insert = 0;
500                 bus_addr = (dma_addr_t)
501                            (tx_pkt->buf_physaddr + tx_pkt->data_off);
502
503                 descs = (struct wq_enet_desc *)wq->ring.descs;
504                 desc_p = descs + head_idx;
505
506                 eop = (data_len == pkt_len);
507
508                 if (ol_flags & ol_flags_mask) {
509                         if (ol_flags & PKT_TX_VLAN_PKT) {
510                                 vlan_tag_insert = 1;
511                                 vlan_id = tx_pkt->vlan_tci;
512                         }
513
514                         if (ol_flags & PKT_TX_IP_CKSUM)
515                                 mss |= ENIC_CALC_IP_CKSUM;
516
517                         /* Nic uses just 1 bit for UDP and TCP */
518                         switch (ol_flags & PKT_TX_L4_MASK) {
519                         case PKT_TX_TCP_CKSUM:
520                         case PKT_TX_UDP_CKSUM:
521                                 mss |= ENIC_CALC_TCP_UDP_CKSUM;
522                                 break;
523                         }
524                 }
525
526                 wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop,
527                                  eop, 0, vlan_tag_insert, vlan_id, 0);
528
529                 *desc_p = desc_tmp;
530                 buf = &wq->bufs[head_idx];
531                 buf->mb = (void *)tx_pkt;
532                 head_idx = enic_ring_incr(desc_count, head_idx);
533                 wq_desc_avail--;
534
535                 if (!eop) {
536                         for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
537                             tx_pkt->next) {
538                                 data_len = tx_pkt->data_len;
539
540                                 if (tx_pkt->next == NULL)
541                                         eop = 1;
542                                 desc_p = descs + head_idx;
543                                 bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr
544                                            + tx_pkt->data_off);
545                                 wq_enet_desc_enc((struct wq_enet_desc *)
546                                                  &desc_tmp, bus_addr, data_len,
547                                                  mss, 0, 0, eop, eop, 0,
548                                                  vlan_tag_insert, vlan_id, 0);
549
550                                 *desc_p = desc_tmp;
551                                 buf = &wq->bufs[head_idx];
552                                 buf->mb = (void *)tx_pkt;
553                                 head_idx = enic_ring_incr(desc_count, head_idx);
554                                 wq_desc_avail--;
555                         }
556                 }
557         }
558  post:
559         rte_wmb();
560         iowrite32(head_idx, &wq->ctrl->posted_index);
561  done:
562         wq->ring.desc_avail = wq_desc_avail;
563         wq->head_idx = head_idx;
564
565         return index;
566 }
567
568